diff --git a/Makefile b/Makefile index a88b75358..b54a44867 100644 --- a/Makefile +++ b/Makefile @@ -65,7 +65,7 @@ ALL_OBJ := $(MASTER_OBJ) $(TABLETNODE_OBJ) $(IO_OBJ) $(SDK_OBJ) $(PROTO_OBJ) \ $(JNI_TERA_OBJ) $(OTHER_OBJ) $(COMMON_OBJ) $(SERVER_OBJ) $(CLIENT_OBJ) \ $(TEST_CLIENT_OBJ) $(TERA_C_OBJ) $(MONITOR_OBJ) $(MARK_OBJ) $(TEST_OBJ) LEVELDB_LIB := src/leveldb/libleveldb.a -LEVELDB_UTIL := src/leveldb/util/histogram.o +LEVELDB_UTIL := src/leveldb/util/histogram.o src/leveldb/port/port_posix.o PROGRAM = tera_main teracli teramo tera_test LIBRARY = libtera.a diff --git a/src/leveldb/util/histogram.cc b/src/leveldb/util/histogram.cc index 12fc1e0b8..d636f9c73 100644 --- a/src/leveldb/util/histogram.cc +++ b/src/leveldb/util/histogram.cc @@ -35,6 +35,7 @@ const double Histogram::kBucketLimit[kNumBuckets] = { }; void Histogram::Clear() { + MutexLock lock(&mutex_); min_ = kBucketLimit[kNumBuckets-1]; max_ = 0; num_ = 0; @@ -46,6 +47,7 @@ void Histogram::Clear() { } void Histogram::Add(double value) { + MutexLock lock(&mutex_); // Linear search is fast enough for our usage in db_bench int b = 0; while (b < kNumBuckets - 1 && kBucketLimit[b] <= value) { @@ -60,6 +62,7 @@ void Histogram::Add(double value) { } void Histogram::Merge(const Histogram& other) { + MutexLock lock(&mutex_); if (other.min_ < min_) min_ = other.min_; if (other.max_ > max_) max_ = other.max_; num_ += other.num_; @@ -75,6 +78,7 @@ double Histogram::Median() const { } double Histogram::Percentile(double p) const { + MutexLock lock(&mutex_); double threshold = num_ * (p / 100.0); double sum = 0; for (int b = 0; b < kNumBuckets; b++) { @@ -96,11 +100,13 @@ double Histogram::Percentile(double p) const { } double Histogram::Average() const { + MutexLock lock(&mutex_); if (num_ == 0.0) return 0; return sum_ / num_; } double Histogram::StandardDeviation() const { + MutexLock lock(&mutex_); if (num_ == 0.0) return 0; double variance = (sum_squares_ * num_ - sum_ * sum_) / (num_ * num_); return sqrt(variance); diff --git a/src/leveldb/util/histogram.h b/src/leveldb/util/histogram.h index ecdc726e6..389a4ab41 100644 --- a/src/leveldb/util/histogram.h +++ b/src/leveldb/util/histogram.h @@ -10,6 +10,8 @@ #define STORAGE_LEVELDB_UTIL_HISTOGRAM_H_ #include +#include "port/port_posix.h" +#include "util/mutexlock.h" namespace leveldb { @@ -25,6 +27,7 @@ class Histogram { std::string ToString() const; private: + mutable port::Mutex mutex_; double min_; double max_; double num_; diff --git a/src/sdk/table_impl.cc b/src/sdk/table_impl.cc index a47776691..66cf5e86f 100644 --- a/src/sdk/table_impl.cc +++ b/src/sdk/table_impl.cc @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -1096,6 +1097,7 @@ void TableImpl::CommitReaders(const std::string server_addr, row_reader->AddCommitTimes(); row_reader->DecRef(); } + VLOG(20) << "commit " << reader_list.size() << " reads to " << server_addr; request->set_timestamp(common::timer::get_micros()); Closure* done = NewClosure(this, &TableImpl::ReaderCallBack, reader_id_list); @@ -1982,14 +1984,6 @@ std::string TableImpl::GetCookieFileName(const std::string& tablename, return fname.str(); } -static int64_t CalcAverage(Counter& sum, Counter& cnt, int64_t interval) { - if (cnt.Get() == 0 || interval == 0) { - return 0; - } else { - return sum.Clear() * 1000 / cnt.Clear() / interval / 1000; - } -} - void TableImpl::DumpPerfCounterLogDelay() { DoDumpPerfCounterLog(); ThreadPool::Task task = @@ -2005,14 +1999,12 @@ void TableImpl::DoDumpPerfCounterLog() { } void TableImpl::PerfCounter::DoDumpPerfCounterLog(const std::string& log_prefix) { - int64_t ts = common::timer::get_micros(); - int64_t interval = (ts - start_time) / 1000; LOG(INFO) << log_prefix << "[delay](ms)" - << " get meta: " << CalcAverage(get_meta, get_meta_cnt, interval) - << " callback: " << CalcAverage(user_callback, user_callback_cnt, interval) - << " rpc_r: " << CalcAverage(rpc_r, rpc_r_cnt, interval) - << " rpc_w: " << CalcAverage(rpc_w, rpc_w_cnt, interval) - << " rpc_s: " << CalcAverage(rpc_s, rpc_s_cnt, interval); + << " get meta: " << (get_meta_cnt.Get() > 0 ? get_meta.Clear() / get_meta_cnt.Clear() / 1000 : 0) + << " callback: " << (user_callback_cnt.Get() > 0 ? user_callback.Clear() / user_callback_cnt.Clear() / 1000 : 0) + << " rpc_r: " << (rpc_r_cnt.Get() > 0 ? rpc_r.Clear() / rpc_r_cnt.Clear() / 1000 : 0) + << " rpc_w: " << (rpc_w_cnt.Get() > 0 ? rpc_w.Clear() / rpc_w_cnt.Clear() / 1000 : 0) + << " rpc_s: " << (rpc_s_cnt.Get() > 0 ? rpc_s.Clear() / rpc_s_cnt.Clear() / 1000 : 0); LOG(INFO) << log_prefix << "[mutation]" << " all: " << mutate_cnt.Clear() @@ -2034,11 +2026,11 @@ void TableImpl::PerfCounter::DoDumpPerfCounterLog(const std::string& log_prefix) << " cnt: " << user_mu_cnt.Clear() << " suc: " << user_mu_suc.Clear() << " fail: " << user_mu_fail.Clear(); - LOG(INFO) << log_prefix << "[user_mu_cost]" << std::fixed + LOG(INFO) << log_prefix << "[user_mu_cost]" << std::fixed << std::setprecision(2) << " cost_ave: " << hist_mu_cost.Average() - << " cost_50: " << hist_mu_cost.Percentile(0.5) - << " cost_90: " << hist_mu_cost.Percentile(0.9) - << " cost_99: " << hist_mu_cost.Percentile(0.99); + << " cost_50: " << hist_mu_cost.Percentile(50) + << " cost_90: " << hist_mu_cost.Percentile(90) + << " cost_99: " << hist_mu_cost.Percentile(99); hist_mu_cost.Clear(); LOG(INFO) << log_prefix << "[user_rd]" @@ -2046,11 +2038,11 @@ void TableImpl::PerfCounter::DoDumpPerfCounterLog(const std::string& log_prefix) << " suc: " << user_read_suc.Clear() << " notfound: " << user_read_notfound.Clear() << " fail: " << user_read_fail.Clear(); - LOG(INFO) << log_prefix << "[user_rd_cost]" << std::fixed + LOG(INFO) << log_prefix << "[user_rd_cost]" << std::fixed << std::setprecision(2) << " cost_ave: " << hist_read_cost.Average() - << " cost_50: " << hist_read_cost.Percentile(0.5) - << " cost_90: " << hist_read_cost.Percentile(0.9) - << " cost_99: " << hist_read_cost.Percentile(0.99); + << " cost_50: " << hist_read_cost.Percentile(50) + << " cost_90: " << hist_read_cost.Percentile(90) + << " cost_99: " << hist_read_cost.Percentile(99); hist_read_cost.Clear(); }