@@ -294,24 +294,24 @@ Status SanitizeDBOptionsByCFOptions(
294
294
return Status::OK ();
295
295
}
296
296
297
- CompressionType GetCompressionFlush (const Options& options ) {
297
+ CompressionType GetCompressionFlush (const ImmutableCFOptions& ioptions ) {
298
298
// Compressing memtable flushes might not help unless the sequential load
299
299
// optimization is used for leveled compaction. Otherwise the CPU and
300
300
// latency overhead is not offset by saving much space.
301
301
302
302
bool can_compress;
303
303
304
- if (options .compaction_style == kCompactionStyleUniversal ) {
304
+ if (ioptions .compaction_style == kCompactionStyleUniversal ) {
305
305
can_compress =
306
- (options .compaction_options_universal .compression_size_percent < 0 );
306
+ (ioptions .compaction_options_universal .compression_size_percent < 0 );
307
307
} else {
308
308
// For leveled compress when min_level_to_compress == 0.
309
- can_compress = options .compression_per_level .empty () ||
310
- options .compression_per_level [0 ] != kNoCompression ;
309
+ can_compress = ioptions .compression_per_level .empty () ||
310
+ ioptions .compression_per_level [0 ] != kNoCompression ;
311
311
}
312
312
313
313
if (can_compress) {
314
- return options .compression ;
314
+ return ioptions .compression ;
315
315
} else {
316
316
return kNoCompression ;
317
317
}
@@ -1424,8 +1424,8 @@ Status DBImpl::WriteLevel0TableForRecovery(ColumnFamilyData* cfd, MemTable* mem,
1424
1424
s = BuildTable (
1425
1425
dbname_, env_, *cfd->ioptions (), env_options_, cfd->table_cache (),
1426
1426
iter.get (), &meta, cfd->internal_comparator (), newest_snapshot,
1427
- earliest_seqno_in_memtable, GetCompressionFlush (*cfd->options ()),
1428
- cfd->options ()->compression_opts , Env::IO_HIGH);
1427
+ earliest_seqno_in_memtable, GetCompressionFlush (*cfd->ioptions ()),
1428
+ cfd->ioptions ()->compression_opts , Env::IO_HIGH);
1429
1429
LogFlush (db_options_.info_log );
1430
1430
mutex_.Lock ();
1431
1431
}
@@ -1498,8 +1498,8 @@ Status DBImpl::WriteLevel0Table(ColumnFamilyData* cfd,
1498
1498
s = BuildTable (
1499
1499
dbname_, env_, *cfd->ioptions (), env_options_, cfd->table_cache (),
1500
1500
iter.get (), &meta, cfd->internal_comparator (), newest_snapshot,
1501
- earliest_seqno_in_memtable, GetCompressionFlush (*cfd->options ()),
1502
- cfd->options ()->compression_opts , Env::IO_HIGH);
1501
+ earliest_seqno_in_memtable, GetCompressionFlush (*cfd->ioptions ()),
1502
+ cfd->ioptions ()->compression_opts , Env::IO_HIGH);
1503
1503
LogFlush (db_options_.info_log );
1504
1504
}
1505
1505
Log (db_options_.info_log ,
@@ -1537,7 +1537,7 @@ Status DBImpl::WriteLevel0Table(ColumnFamilyData* cfd,
1537
1537
// threads could be concurrently producing compacted files for
1538
1538
// that key range.
1539
1539
if (base != nullptr && db_options_.max_background_compactions <= 1 &&
1540
- cfd->options ()->compaction_style == kCompactionStyleLevel ) {
1540
+ cfd->ioptions ()->compaction_style == kCompactionStyleLevel ) {
1541
1541
level = base->PickLevelForMemTableOutput (min_user_key, max_user_key);
1542
1542
}
1543
1543
edit->AddFile (level, meta.fd .GetNumber (), meta.fd .GetPathId (),
@@ -1666,8 +1666,8 @@ Status DBImpl::CompactRange(ColumnFamilyHandle* column_family,
1666
1666
// bottom-most level, the output level will be the same as input one.
1667
1667
// level 0 can never be the bottommost level (i.e. if all files are in level
1668
1668
// 0, we will compact to level 1)
1669
- if (cfd->options ()->compaction_style == kCompactionStyleUniversal ||
1670
- cfd->options ()->compaction_style == kCompactionStyleFIFO ||
1669
+ if (cfd->ioptions ()->compaction_style == kCompactionStyleUniversal ||
1670
+ cfd->ioptions ()->compaction_style == kCompactionStyleFIFO ||
1671
1671
(level == max_level_with_files && level > 0 )) {
1672
1672
s = RunManualCompaction (cfd, level, level, target_path_id, begin, end);
1673
1673
} else {
@@ -1828,16 +1828,16 @@ Status DBImpl::RunManualCompaction(ColumnFamilyData* cfd, int input_level,
1828
1828
// For universal compaction, we enforce every manual compaction to compact
1829
1829
// all files.
1830
1830
if (begin == nullptr ||
1831
- cfd->options ()->compaction_style == kCompactionStyleUniversal ||
1832
- cfd->options ()->compaction_style == kCompactionStyleFIFO ) {
1831
+ cfd->ioptions ()->compaction_style == kCompactionStyleUniversal ||
1832
+ cfd->ioptions ()->compaction_style == kCompactionStyleFIFO ) {
1833
1833
manual.begin = nullptr ;
1834
1834
} else {
1835
1835
begin_storage = InternalKey (*begin, kMaxSequenceNumber , kValueTypeForSeek );
1836
1836
manual.begin = &begin_storage;
1837
1837
}
1838
1838
if (end == nullptr ||
1839
- cfd->options ()->compaction_style == kCompactionStyleUniversal ||
1840
- cfd->options ()->compaction_style == kCompactionStyleFIFO ) {
1839
+ cfd->ioptions ()->compaction_style == kCompactionStyleUniversal ||
1840
+ cfd->ioptions ()->compaction_style == kCompactionStyleFIFO ) {
1841
1841
manual.end = nullptr ;
1842
1842
} else {
1843
1843
end_storage = InternalKey (*end, 0 , static_cast <ValueType>(0 ));
@@ -2288,7 +2288,7 @@ Status DBImpl::BackgroundCompaction(bool* madeProgress,
2288
2288
// file if there is alive snapshot pointing to it
2289
2289
assert (c->num_input_files (1 ) == 0 );
2290
2290
assert (c->level () == 0 );
2291
- assert (c->column_family_data ()->options ()->compaction_style ==
2291
+ assert (c->column_family_data ()->ioptions ()->compaction_style ==
2292
2292
kCompactionStyleFIFO );
2293
2293
for (const auto & f : *c->inputs (0 )) {
2294
2294
c->edit ()->DeleteFile (c->level (), f->fd .GetNumber ());
@@ -2371,8 +2371,8 @@ Status DBImpl::BackgroundCompaction(bool* madeProgress,
2371
2371
// We only compacted part of the requested range. Update *m
2372
2372
// to the range that is left to be compacted.
2373
2373
// Universal and FIFO compactions should always compact the whole range
2374
- assert (m->cfd ->options ()->compaction_style != kCompactionStyleUniversal );
2375
- assert (m->cfd ->options ()->compaction_style != kCompactionStyleFIFO );
2374
+ assert (m->cfd ->ioptions ()->compaction_style != kCompactionStyleUniversal );
2375
+ assert (m->cfd ->ioptions ()->compaction_style != kCompactionStyleFIFO );
2376
2376
m->tmp_storage = *manual_end;
2377
2377
m->begin = &m->tmp_storage ;
2378
2378
}
@@ -2465,7 +2465,7 @@ Status DBImpl::OpenCompactionOutputFile(CompactionState* compact) {
2465
2465
compact->builder .reset (NewTableBuilder (
2466
2466
*cfd->ioptions (), cfd->internal_comparator (), compact->outfile .get (),
2467
2467
compact->compaction ->OutputCompressionType (),
2468
- cfd->options ()->compression_opts ));
2468
+ cfd->ioptions ()->compression_opts ));
2469
2469
}
2470
2470
LogFlush (db_options_.info_log );
2471
2471
return s;
@@ -2640,7 +2640,7 @@ Status DBImpl::ProcessKeyValueCompaction(
2640
2640
SequenceNumber visible_in_snapshot = kMaxSequenceNumber ;
2641
2641
ColumnFamilyData* cfd = compact->compaction ->column_family_data ();
2642
2642
MergeHelper merge (
2643
- cfd->user_comparator (), cfd->options ()->merge_operator . get () ,
2643
+ cfd->user_comparator (), cfd->ioptions ()->merge_operator ,
2644
2644
db_options_.info_log .get (), cfd->options ()->min_partial_merge_operands ,
2645
2645
false /* internal key corruption is expected */ );
2646
2646
auto compaction_filter = cfd->options ()->compaction_filter ;
@@ -3673,30 +3673,31 @@ bool DBImpl::KeyMayExist(const ReadOptions& options,
3673
3673
return s.ok () || s.IsIncomplete ();
3674
3674
}
3675
3675
3676
- Iterator* DBImpl::NewIterator (const ReadOptions& options ,
3676
+ Iterator* DBImpl::NewIterator (const ReadOptions& read_options ,
3677
3677
ColumnFamilyHandle* column_family) {
3678
3678
auto cfh = reinterpret_cast <ColumnFamilyHandleImpl*>(column_family);
3679
3679
auto cfd = cfh->cfd ();
3680
3680
3681
- if (options .tailing ) {
3681
+ if (read_options .tailing ) {
3682
3682
#ifdef ROCKSDB_LITE
3683
3683
// not supported in lite version
3684
3684
return nullptr ;
3685
3685
#else
3686
- // TODO(ljin): remove tailing iterator
3687
- auto iter = new ForwardIterator ( this , options , cfd);
3688
- return NewDBIterator (env_, *cfd-> options (), cfd-> user_comparator (), iter ,
3689
- kMaxSequenceNumber , options. iterate_upper_bound );
3690
- // return new TailingIterator(env_, this, options, cfd );
3686
+ auto iter = new ForwardIterator ( this , read_options, cfd);
3687
+ return NewDBIterator (env_, *cfd-> ioptions () , cfd-> user_comparator (), iter,
3688
+ kMaxSequenceNumber ,
3689
+ cfd-> options ()-> max_sequential_skip_in_iterations ,
3690
+ read_options. iterate_upper_bound );
3691
3691
#endif
3692
3692
} else {
3693
3693
SequenceNumber latest_snapshot = versions_->LastSequence ();
3694
3694
SuperVersion* sv = nullptr ;
3695
3695
sv = cfd->GetReferencedSuperVersion (&mutex_);
3696
3696
3697
3697
auto snapshot =
3698
- options.snapshot != nullptr
3699
- ? reinterpret_cast <const SnapshotImpl*>(options.snapshot )->number_
3698
+ read_options.snapshot != nullptr
3699
+ ? reinterpret_cast <const SnapshotImpl*>(
3700
+ read_options.snapshot )->number_
3700
3701
: latest_snapshot;
3701
3702
3702
3703
// Try to generate a DB iterator tree in continuous memory area to be
@@ -3742,19 +3743,22 @@ Iterator* DBImpl::NewIterator(const ReadOptions& options,
3742
3743
// likely that any iterator pointer is close to the iterator it points to so
3743
3744
// that they are likely to be in the same cache line and/or page.
3744
3745
ArenaWrappedDBIter* db_iter = NewArenaWrappedDbIterator (
3745
- env_, *cfd->options (), cfd->user_comparator (),
3746
- snapshot, options.iterate_upper_bound );
3746
+ env_, *cfd->ioptions (), cfd->user_comparator (),
3747
+ snapshot, cfd->options ()->max_sequential_skip_in_iterations ,
3748
+ read_options.iterate_upper_bound );
3747
3749
3748
3750
Iterator* internal_iter =
3749
- NewInternalIterator (options , cfd, sv, db_iter->GetArena ());
3751
+ NewInternalIterator (read_options , cfd, sv, db_iter->GetArena ());
3750
3752
db_iter->SetIterUnderDBIter (internal_iter);
3751
3753
3752
3754
return db_iter;
3753
3755
}
3756
+ // To stop compiler from complaining
3757
+ return nullptr ;
3754
3758
}
3755
3759
3756
3760
Status DBImpl::NewIterators (
3757
- const ReadOptions& options ,
3761
+ const ReadOptions& read_options ,
3758
3762
const std::vector<ColumnFamilyHandle*>& column_families,
3759
3763
std::vector<Iterator*>* iterators) {
3760
3764
iterators->clear ();
@@ -3763,7 +3767,7 @@ Status DBImpl::NewIterators(
3763
3767
std::vector<SuperVersion*> super_versions;
3764
3768
super_versions.reserve (column_families.size ());
3765
3769
3766
- if (!options .tailing ) {
3770
+ if (!read_options .tailing ) {
3767
3771
mutex_.Lock ();
3768
3772
latest_snapshot = versions_->LastSequence ();
3769
3773
for (auto cfh : column_families) {
@@ -3773,17 +3777,18 @@ Status DBImpl::NewIterators(
3773
3777
mutex_.Unlock ();
3774
3778
}
3775
3779
3776
- if (options .tailing ) {
3780
+ if (read_options .tailing ) {
3777
3781
#ifdef ROCKSDB_LITE
3778
3782
return Status::InvalidArgument (
3779
3783
" Tailing interator not supported in RocksDB lite" );
3780
3784
#else
3781
3785
for (auto cfh : column_families) {
3782
3786
auto cfd = reinterpret_cast <ColumnFamilyHandleImpl*>(cfh)->cfd ();
3783
- auto iter = new ForwardIterator (this , options , cfd);
3787
+ auto iter = new ForwardIterator (this , read_options , cfd);
3784
3788
iterators->push_back (
3785
- NewDBIterator (env_, *cfd->options (), cfd->user_comparator (), iter,
3786
- kMaxSequenceNumber ));
3789
+ NewDBIterator (env_, *cfd->ioptions (), cfd->user_comparator (), iter,
3790
+ kMaxSequenceNumber ,
3791
+ cfd->options ()->max_sequential_skip_in_iterations ));
3787
3792
}
3788
3793
#endif
3789
3794
} else {
@@ -3792,14 +3797,16 @@ Status DBImpl::NewIterators(
3792
3797
auto cfd = cfh->cfd ();
3793
3798
3794
3799
auto snapshot =
3795
- options.snapshot != nullptr
3796
- ? reinterpret_cast <const SnapshotImpl*>(options.snapshot )->number_
3800
+ read_options.snapshot != nullptr
3801
+ ? reinterpret_cast <const SnapshotImpl*>(
3802
+ read_options.snapshot )->number_
3797
3803
: latest_snapshot;
3798
3804
3799
3805
ArenaWrappedDBIter* db_iter = NewArenaWrappedDbIterator (
3800
- env_, *cfd->options (), cfd->user_comparator (), snapshot);
3806
+ env_, *cfd->ioptions (), cfd->user_comparator (), snapshot,
3807
+ cfd->options ()->max_sequential_skip_in_iterations );
3801
3808
Iterator* internal_iter = NewInternalIterator (
3802
- options , cfd, super_versions[i], db_iter->GetArena ());
3809
+ read_options , cfd, super_versions[i], db_iter->GetArena ());
3803
3810
db_iter->SetIterUnderDBIter (internal_iter);
3804
3811
iterators->push_back (db_iter);
3805
3812
}
@@ -3838,7 +3845,7 @@ Status DBImpl::Put(const WriteOptions& o, ColumnFamilyHandle* column_family,
3838
3845
Status DBImpl::Merge (const WriteOptions& o, ColumnFamilyHandle* column_family,
3839
3846
const Slice& key, const Slice& val) {
3840
3847
auto cfh = reinterpret_cast <ColumnFamilyHandleImpl*>(column_family);
3841
- if (!cfh->cfd ()->options ()->merge_operator ) {
3848
+ if (!cfh->cfd ()->ioptions ()->merge_operator ) {
3842
3849
return Status::NotSupported (" Provide a merge_operator when opening DB" );
3843
3850
} else {
3844
3851
return DB::Merge (o, column_family, key, val);
@@ -4814,8 +4821,8 @@ Status DB::Open(const DBOptions& db_options, const std::string& dbname,
4814
4821
4815
4822
if (s.ok ()) {
4816
4823
for (auto cfd : *impl->versions_ ->GetColumnFamilySet ()) {
4817
- if (cfd->options ()->compaction_style == kCompactionStyleUniversal ||
4818
- cfd->options ()->compaction_style == kCompactionStyleFIFO ) {
4824
+ if (cfd->ioptions ()->compaction_style == kCompactionStyleUniversal ||
4825
+ cfd->ioptions ()->compaction_style == kCompactionStyleFIFO ) {
4819
4826
Version* current = cfd->current ();
4820
4827
for (int i = 1 ; i < current->NumberLevels (); ++i) {
4821
4828
int num_files = current->NumLevelFiles (i);
@@ -4827,7 +4834,7 @@ Status DB::Open(const DBOptions& db_options, const std::string& dbname,
4827
4834
}
4828
4835
}
4829
4836
}
4830
- if (cfd->options ()->merge_operator != nullptr &&
4837
+ if (cfd->ioptions ()->merge_operator != nullptr &&
4831
4838
!cfd->mem ()->IsMergeOperatorSupported ()) {
4832
4839
s = Status::InvalidArgument (
4833
4840
" The memtable of column family %s does not support merge operator "
0 commit comments