Skip to content

Commit 5b825d6

Browse files
committed
[RocksDB] Use raw pointer instead of shared pointer when passing Statistics object internally
Summary: liveness of the statistics object is already ensured by the shared pointer in DB options. There's no reason to pass again shared pointer among internal functions. Raw pointer is sufficient and efficient. Test Plan: make check Reviewers: dhruba, MarkCallaghan, igor Reviewed By: dhruba CC: leveldb, reconnect.grayhat Differential Revision: https://reviews.facebook.net/D14289
1 parent 0c93df9 commit 5b825d6

14 files changed

+117
-90
lines changed

db/builder.cc

+3-2
Original file line numberDiff line numberDiff line change
@@ -112,6 +112,7 @@ Status BuildTable(const std::string& dbname,
112112

113113
if (this_ikey.type == kTypeMerge) {
114114
// Handle merge-type keys using the MergeHelper
115+
// TODO: pass statistics to MergeUntil
115116
merge.MergeUntil(iter, 0 /* don't worry about snapshot */);
116117
iterator_at_next = true;
117118
if (merge.IsSuccess()) {
@@ -188,10 +189,10 @@ Status BuildTable(const std::string& dbname,
188189
// Finish and check for file errors
189190
if (s.ok() && !options.disableDataSync) {
190191
if (options.use_fsync) {
191-
StopWatch sw(env, options.statistics, TABLE_SYNC_MICROS);
192+
StopWatch sw(env, options.statistics.get(), TABLE_SYNC_MICROS);
192193
s = file->Fsync();
193194
} else {
194-
StopWatch sw(env, options.statistics, TABLE_SYNC_MICROS);
195+
StopWatch sw(env, options.statistics.get(), TABLE_SYNC_MICROS);
195196
s = file->Sync();
196197
}
197198
}

db/db_impl.cc

+42-33
Original file line numberDiff line numberDiff line change
@@ -404,7 +404,7 @@ const Status DBImpl::CreateArchivalDirectory() {
404404
}
405405

406406
void DBImpl::PrintStatistics() {
407-
auto dbstats = options_.statistics;
407+
auto dbstats = options_.statistics.get();
408408
if (dbstats) {
409409
Log(options_.info_log,
410410
"STATISTCS:\n %s",
@@ -860,7 +860,7 @@ Status DBImpl::Recover(VersionEdit* edit, MemTable* external_table,
860860
if (versions_->LastSequence() < max_sequence) {
861861
versions_->SetLastSequence(max_sequence);
862862
}
863-
SetTickerCount(options_.statistics, SEQUENCE_NUMBER,
863+
SetTickerCount(options_.statistics.get(), SEQUENCE_NUMBER,
864864
versions_->LastSequence());
865865
}
866866
}
@@ -1297,7 +1297,7 @@ SequenceNumber DBImpl::GetLatestSequenceNumber() const {
12971297
Status DBImpl::GetUpdatesSince(SequenceNumber seq,
12981298
unique_ptr<TransactionLogIterator>* iter) {
12991299

1300-
RecordTick(options_.statistics, GET_UPDATES_SINCE_CALLS);
1300+
RecordTick(options_.statistics.get(), GET_UPDATES_SINCE_CALLS);
13011301
if (seq > versions_->LastSequence()) {
13021302
return Status::IOError("Requested sequence not yet written in the db");
13031303
}
@@ -1971,10 +1971,12 @@ Status DBImpl::FinishCompactionOutputFile(CompactionState* compact,
19711971
// Finish and check for file errors
19721972
if (s.ok() && !options_.disableDataSync) {
19731973
if (options_.use_fsync) {
1974-
StopWatch sw(env_, options_.statistics, COMPACTION_OUTFILE_SYNC_MICROS);
1974+
StopWatch sw(env_, options_.statistics.get(),
1975+
COMPACTION_OUTFILE_SYNC_MICROS);
19751976
s = compact->outfile->Fsync();
19761977
} else {
1977-
StopWatch sw(env_, options_.statistics, COMPACTION_OUTFILE_SYNC_MICROS);
1978+
StopWatch sw(env_, options_.statistics.get(),
1979+
COMPACTION_OUTFILE_SYNC_MICROS);
19781980
s = compact->outfile->Sync();
19791981
}
19801982
}
@@ -2212,7 +2214,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact,
22122214
ParseInternalKey(key, &ikey);
22132215
// no value associated with delete
22142216
value.clear();
2215-
RecordTick(options_.statistics, COMPACTION_KEY_DROP_USER);
2217+
RecordTick(options_.statistics.get(), COMPACTION_KEY_DROP_USER);
22162218
} else if (value_changed) {
22172219
value = compaction_filter_value;
22182220
}
@@ -2238,7 +2240,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact,
22382240
// TODO: why not > ?
22392241
assert(last_sequence_for_key >= ikey.sequence);
22402242
drop = true; // (A)
2241-
RecordTick(options_.statistics, COMPACTION_KEY_DROP_NEWER_ENTRY);
2243+
RecordTick(options_.statistics.get(), COMPACTION_KEY_DROP_NEWER_ENTRY);
22422244
} else if (ikey.type == kTypeDeletion &&
22432245
ikey.sequence <= earliest_snapshot &&
22442246
compact->compaction->IsBaseLevelForKey(ikey.user_key)) {
@@ -2250,7 +2252,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact,
22502252
// few iterations of this loop (by rule (A) above).
22512253
// Therefore this deletion marker is obsolete and can be dropped.
22522254
drop = true;
2253-
RecordTick(options_.statistics, COMPACTION_KEY_DROP_OBSOLETE);
2255+
RecordTick(options_.statistics.get(), COMPACTION_KEY_DROP_OBSOLETE);
22542256
} else if (ikey.type == kTypeMerge) {
22552257
// We know the merge type entry is not hidden, otherwise we would
22562258
// have hit (A)
@@ -2259,7 +2261,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact,
22592261
// logic could also be nicely re-used for memtable flush purge
22602262
// optimization in BuildTable.
22612263
merge.MergeUntil(input.get(), prev_snapshot, bottommost_level,
2262-
options_.statistics);
2264+
options_.statistics.get());
22632265
current_entry_is_merging = true;
22642266
if (merge.IsSuccess()) {
22652267
// Successfully found Put/Delete/(end-of-key-range) while merging
@@ -2412,8 +2414,8 @@ Status DBImpl::DoCompactionWork(CompactionState* compact,
24122414

24132415
CompactionStats stats;
24142416
stats.micros = env_->NowMicros() - start_micros - imm_micros;
2415-
if (options_.statistics) {
2416-
options_.statistics->measureTime(COMPACTION_TIME, stats.micros);
2417+
if (options_.statistics.get()) {
2418+
options_.statistics.get()->measureTime(COMPACTION_TIME, stats.micros);
24172419
}
24182420
stats.files_in_leveln = compact->compaction->num_input_files(0);
24192421
stats.files_in_levelnp1 = compact->compaction->num_input_files(1);
@@ -2554,7 +2556,7 @@ Status DBImpl::GetImpl(const ReadOptions& options,
25542556
bool* value_found) {
25552557
Status s;
25562558

2557-
StopWatch sw(env_, options_.statistics, DB_GET);
2559+
StopWatch sw(env_, options_.statistics.get(), DB_GET);
25582560
SequenceNumber snapshot;
25592561
mutex_.Lock();
25602562
if (options.snapshot != nullptr) {
@@ -2605,16 +2607,16 @@ Status DBImpl::GetImpl(const ReadOptions& options,
26052607

26062608
LogFlush(options_.info_log);
26072609
// Note, tickers are atomic now - no lock protection needed any more.
2608-
RecordTick(options_.statistics, NUMBER_KEYS_READ);
2609-
RecordTick(options_.statistics, BYTES_READ, value->size());
2610+
RecordTick(options_.statistics.get(), NUMBER_KEYS_READ);
2611+
RecordTick(options_.statistics.get(), BYTES_READ, value->size());
26102612
return s;
26112613
}
26122614

26132615
std::vector<Status> DBImpl::MultiGet(const ReadOptions& options,
26142616
const std::vector<Slice>& keys,
26152617
std::vector<std::string>* values) {
26162618

2617-
StopWatch sw(env_, options_.statistics, DB_MULTIGET);
2619+
StopWatch sw(env_, options_.statistics.get(), DB_MULTIGET);
26182620
SequenceNumber snapshot;
26192621
mutex_.Lock();
26202622
if (options.snapshot != nullptr) {
@@ -2683,9 +2685,9 @@ std::vector<Status> DBImpl::MultiGet(const ReadOptions& options,
26832685
mutex_.Unlock();
26842686

26852687
LogFlush(options_.info_log);
2686-
RecordTick(options_.statistics, NUMBER_MULTIGET_CALLS);
2687-
RecordTick(options_.statistics, NUMBER_MULTIGET_KEYS_READ, numKeys);
2688-
RecordTick(options_.statistics, NUMBER_MULTIGET_BYTES_READ, bytesRead);
2688+
RecordTick(options_.statistics.get(), NUMBER_MULTIGET_CALLS);
2689+
RecordTick(options_.statistics.get(), NUMBER_MULTIGET_KEYS_READ, numKeys);
2690+
RecordTick(options_.statistics.get(), NUMBER_MULTIGET_BYTES_READ, bytesRead);
26892691

26902692
return statList;
26912693
}
@@ -2760,7 +2762,7 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) {
27602762
w.disableWAL = options.disableWAL;
27612763
w.done = false;
27622764

2763-
StopWatch sw(env_, options_.statistics, DB_WRITE);
2765+
StopWatch sw(env_, options_.statistics.get(), DB_WRITE);
27642766
MutexLock l(&mutex_);
27652767
writers_.push_back(&w);
27662768
while (!w.done && &w != writers_.front()) {
@@ -2793,8 +2795,9 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) {
27932795
int my_batch_count = WriteBatchInternal::Count(updates);
27942796
last_sequence += my_batch_count;
27952797
// Record statistics
2796-
RecordTick(options_.statistics, NUMBER_KEYS_WRITTEN, my_batch_count);
2797-
RecordTick(options_.statistics,
2798+
RecordTick(options_.statistics.get(),
2799+
NUMBER_KEYS_WRITTEN, my_batch_count);
2800+
RecordTick(options_.statistics.get(),
27982801
BYTES_WRITTEN,
27992802
WriteBatchInternal::ByteSize(updates));
28002803
if (options.disableWAL) {
@@ -2808,10 +2811,10 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) {
28082811
BumpPerfTime(&perf_context.wal_write_time, &timer);
28092812
if (status.ok() && options.sync) {
28102813
if (options_.use_fsync) {
2811-
StopWatch(env_, options_.statistics, WAL_FILE_SYNC_MICROS);
2814+
StopWatch(env_, options_.statistics.get(), WAL_FILE_SYNC_MICROS);
28122815
status = log_->file()->Fsync();
28132816
} else {
2814-
StopWatch(env_, options_.statistics, WAL_FILE_SYNC_MICROS);
2817+
StopWatch(env_, options_.statistics.get(), WAL_FILE_SYNC_MICROS);
28152818
status = log_->file()->Sync();
28162819
}
28172820
}
@@ -2826,7 +2829,8 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) {
28262829
// have succeeded in memtable but Status reports error for all writes.
28272830
throw std::runtime_error("In memory WriteBatch corruption!");
28282831
}
2829-
SetTickerCount(options_.statistics, SEQUENCE_NUMBER, last_sequence);
2832+
SetTickerCount(options_.statistics.get(),
2833+
SEQUENCE_NUMBER, last_sequence);
28302834
}
28312835
LogFlush(options_.info_log);
28322836
mutex_.Lock();
@@ -2975,15 +2979,15 @@ Status DBImpl::MakeRoomForWrite(bool force) {
29752979
mutex_.Unlock();
29762980
uint64_t delayed;
29772981
{
2978-
StopWatch sw(env_, options_.statistics, STALL_L0_SLOWDOWN_COUNT);
2982+
StopWatch sw(env_, options_.statistics.get(), STALL_L0_SLOWDOWN_COUNT);
29792983
env_->SleepForMicroseconds(
29802984
SlowdownAmount(versions_->NumLevelFiles(0),
29812985
options_.level0_slowdown_writes_trigger,
29822986
options_.level0_stop_writes_trigger)
29832987
);
29842988
delayed = sw.ElapsedMicros();
29852989
}
2986-
RecordTick(options_.statistics, STALL_L0_SLOWDOWN_MICROS, delayed);
2990+
RecordTick(options_.statistics.get(), STALL_L0_SLOWDOWN_MICROS, delayed);
29872991
stall_level0_slowdown_ += delayed;
29882992
stall_level0_slowdown_count_++;
29892993
allow_delay = false; // Do not delay a single write more than once
@@ -3003,12 +3007,13 @@ Status DBImpl::MakeRoomForWrite(bool force) {
30033007
Log(options_.info_log, "wait for memtable compaction...\n");
30043008
uint64_t stall;
30053009
{
3006-
StopWatch sw(env_, options_.statistics,
3010+
StopWatch sw(env_, options_.statistics.get(),
30073011
STALL_MEMTABLE_COMPACTION_COUNT);
30083012
bg_cv_.Wait();
30093013
stall = sw.ElapsedMicros();
30103014
}
3011-
RecordTick(options_.statistics, STALL_MEMTABLE_COMPACTION_MICROS, stall);
3015+
RecordTick(options_.statistics.get(),
3016+
STALL_MEMTABLE_COMPACTION_MICROS, stall);
30123017
stall_memtable_compaction_ += stall;
30133018
stall_memtable_compaction_count_++;
30143019
} else if (versions_->NumLevelFiles(0) >=
@@ -3018,11 +3023,12 @@ Status DBImpl::MakeRoomForWrite(bool force) {
30183023
Log(options_.info_log, "wait for fewer level0 files...\n");
30193024
uint64_t stall;
30203025
{
3021-
StopWatch sw(env_, options_.statistics, STALL_L0_NUM_FILES_COUNT);
3026+
StopWatch sw(env_, options_.statistics.get(),
3027+
STALL_L0_NUM_FILES_COUNT);
30223028
bg_cv_.Wait();
30233029
stall = sw.ElapsedMicros();
30243030
}
3025-
RecordTick(options_.statistics, STALL_L0_NUM_FILES_MICROS, stall);
3031+
RecordTick(options_.statistics.get(), STALL_L0_NUM_FILES_MICROS, stall);
30263032
stall_level0_num_files_ += stall;
30273033
stall_level0_num_files_count_++;
30283034
} else if (
@@ -3034,7 +3040,8 @@ Status DBImpl::MakeRoomForWrite(bool force) {
30343040
mutex_.Unlock();
30353041
uint64_t delayed;
30363042
{
3037-
StopWatch sw(env_, options_.statistics, HARD_RATE_LIMIT_DELAY_COUNT);
3043+
StopWatch sw(env_, options_.statistics.get(),
3044+
HARD_RATE_LIMIT_DELAY_COUNT);
30383045
env_->SleepForMicroseconds(1000);
30393046
delayed = sw.ElapsedMicros();
30403047
}
@@ -3043,7 +3050,8 @@ Status DBImpl::MakeRoomForWrite(bool force) {
30433050
// Make sure the following value doesn't round to zero.
30443051
uint64_t rate_limit = std::max((delayed / 1000), (uint64_t) 1);
30453052
rate_limit_delay_millis += rate_limit;
3046-
RecordTick(options_.statistics, RATE_LIMIT_DELAY_MILLIS, rate_limit);
3053+
RecordTick(options_.statistics.get(),
3054+
RATE_LIMIT_DELAY_MILLIS, rate_limit);
30473055
if (options_.rate_limit_delay_max_milliseconds > 0 &&
30483056
rate_limit_delay_millis >=
30493057
(unsigned)options_.rate_limit_delay_max_milliseconds) {
@@ -3058,7 +3066,8 @@ Status DBImpl::MakeRoomForWrite(bool force) {
30583066
// TODO: add statistics
30593067
mutex_.Unlock();
30603068
{
3061-
StopWatch sw(env_, options_.statistics, SOFT_RATE_LIMIT_DELAY_COUNT);
3069+
StopWatch sw(env_, options_.statistics.get(),
3070+
SOFT_RATE_LIMIT_DELAY_COUNT);
30623071
env_->SleepForMicroseconds(SlowdownAmount(
30633072
score,
30643073
options_.soft_rate_limit,

db/db_iter.cc

+2-2
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ class DBIter: public Iterator {
6969
direction_(kForward),
7070
valid_(false),
7171
current_entry_is_merged_(false),
72-
statistics_(options.statistics) {
72+
statistics_(options.statistics.get()) {
7373
RecordTick(statistics_, NO_ITERATORS, 1);
7474
max_skip_ = options.max_sequential_skip_in_iterations;
7575
}
@@ -135,7 +135,7 @@ class DBIter: public Iterator {
135135
Direction direction_;
136136
bool valid_;
137137
bool current_entry_is_merged_;
138-
std::shared_ptr<Statistics> statistics_;
138+
Statistics* statistics_;
139139
uint64_t max_skip_;
140140

141141
// No copying allowed

db/memtable.cc

+3-2
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
#include "util/coding.h"
2020
#include "util/mutexlock.h"
2121
#include "util/murmurhash.h"
22+
#include "util/statistics_imp.h"
2223

2324
namespace std {
2425
template <>
@@ -203,7 +204,7 @@ bool MemTable::Get(const LookupKey& key, std::string* value, Status* s,
203204
assert(merge_operator);
204205
if (!merge_operator->FullMerge(key.user_key(), &v, *operands,
205206
value, logger.get())) {
206-
RecordTick(options.statistics, NUMBER_MERGE_FAILURES);
207+
RecordTick(options.statistics.get(), NUMBER_MERGE_FAILURES);
207208
*s = Status::Corruption("Error: Could not perform merge.");
208209
}
209210
} else {
@@ -220,7 +221,7 @@ bool MemTable::Get(const LookupKey& key, std::string* value, Status* s,
220221
*s = Status::OK();
221222
if (!merge_operator->FullMerge(key.user_key(), nullptr, *operands,
222223
value, logger.get())) {
223-
RecordTick(options.statistics, NUMBER_MERGE_FAILURES);
224+
RecordTick(options.statistics.get(), NUMBER_MERGE_FAILURES);
224225
*s = Status::Corruption("Error: Could not perform merge.");
225226
}
226227
} else {

db/merge_helper.cc

+2-1
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
#include "rocksdb/comparator.h"
99
#include "rocksdb/db.h"
1010
#include "rocksdb/merge_operator.h"
11+
#include "util/statistics_imp.h"
1112
#include <string>
1213
#include <stdio.h>
1314

@@ -20,7 +21,7 @@ namespace rocksdb {
2021
// operands_ stores the list of merge operands encountered while merging.
2122
// keys_[i] corresponds to operands_[i] for each i.
2223
void MergeHelper::MergeUntil(Iterator* iter, SequenceNumber stop_before,
23-
bool at_bottom, shared_ptr<Statistics> stats) {
24+
bool at_bottom, Statistics* stats) {
2425
// Get a copy of the internal key, before it's invalidated by iter->Next()
2526
// Also maintain the list of merge operands seen.
2627
keys_.clear();

db/merge_helper.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@
88

99
#include "db/dbformat.h"
1010
#include "rocksdb/slice.h"
11-
#include "rocksdb/statistics.h"
1211
#include <string>
1312
#include <deque>
1413

@@ -18,6 +17,7 @@ class Comparator;
1817
class Iterator;
1918
class Logger;
2019
class MergeOperator;
20+
class Statistics;
2121

2222
class MergeHelper {
2323
public:
@@ -46,7 +46,7 @@ class MergeHelper {
4646
// at_bottom: (IN) true if the iterator covers the bottem level, which means
4747
// we could reach the start of the history of this user key.
4848
void MergeUntil(Iterator* iter, SequenceNumber stop_before = 0,
49-
bool at_bottom = false, shared_ptr<Statistics> stats=nullptr);
49+
bool at_bottom = false, Statistics* stats = nullptr);
5050

5151
// Query the merge result
5252
// These are valid until the next MergeUntil call

db/table_cache.cc

+3-3
Original file line numberDiff line numberDiff line change
@@ -65,20 +65,20 @@ Status TableCache::FindTable(const EnvOptions& toptions,
6565
unique_ptr<RandomAccessFile> file;
6666
unique_ptr<TableReader> table_reader;
6767
s = env_->NewRandomAccessFile(fname, &file, toptions);
68-
RecordTick(options_->statistics, NO_FILE_OPENS);
68+
RecordTick(options_->statistics.get(), NO_FILE_OPENS);
6969
if (s.ok()) {
7070
if (options_->advise_random_on_open) {
7171
file->Hint(RandomAccessFile::RANDOM);
7272
}
73-
StopWatch sw(env_, options_->statistics, TABLE_OPEN_IO_MICROS);
73+
StopWatch sw(env_, options_->statistics.get(), TABLE_OPEN_IO_MICROS);
7474
s = options_->table_factory->GetTableReader(*options_, toptions,
7575
std::move(file), file_size,
7676
&table_reader);
7777
}
7878

7979
if (!s.ok()) {
8080
assert(table_reader == nullptr);
81-
RecordTick(options_->statistics, NO_FILE_ERRORS);
81+
RecordTick(options_->statistics.get(), NO_FILE_ERRORS);
8282
// We do not cache error results so that if the error is transient,
8383
// or somebody repairs the file, we recover automatically.
8484
} else {

0 commit comments

Comments
 (0)