@@ -156,7 +156,13 @@ class LRUCache {
156
156
Cache::Handle * Lookup (const Slice& key, uint32_t hash);
157
157
void Release (Cache::Handle * handle);
158
158
void Erase (const Slice& key, uint32_t hash);
159
- size_t GetUsage () const { return usage_.load (std::memory_order_relaxed); }
159
+ // Although in some platforms the update of size_t is atomic, to make sure
160
+ // GetUsage() works correctly under any platforms, we'll protect this
161
+ // function with mutex.
162
+ size_t GetUsage () const {
163
+ MutexLock l (&mutex_);
164
+ return usage_;
165
+ }
160
166
161
167
private:
162
168
void LRU_Remove (LRUHandle* e);
@@ -172,8 +178,10 @@ class LRUCache {
172
178
uint32_t remove_scan_count_limit_;
173
179
174
180
// mutex_ protects the following state.
175
- port::Mutex mutex_;
176
- std::atomic_size_t usage_;
181
+ // We don't count mutex_ as the cache's internal state so semantically we
182
+ // don't mind mutex_ invoking the non-const actions.
183
+ mutable port::Mutex mutex_;
184
+ size_t usage_;
177
185
178
186
// Dummy head of LRU list.
179
187
// lru.prev is newest entry, lru.next is oldest entry.
@@ -215,7 +223,7 @@ void LRUCache::FreeEntry(LRUHandle* e) {
215
223
void LRUCache::LRU_Remove (LRUHandle* e) {
216
224
e->next ->prev = e->prev ;
217
225
e->prev ->next = e->next ;
218
- usage_. fetch_sub ( e->charge , std::memory_order_relaxed) ;
226
+ usage_ -= e->charge ;
219
227
}
220
228
221
229
void LRUCache::LRU_Append (LRUHandle* e) {
@@ -224,7 +232,7 @@ void LRUCache::LRU_Append(LRUHandle* e) {
224
232
e->prev = lru_.prev ;
225
233
e->prev ->next = e;
226
234
e->next ->prev = e;
227
- usage_. fetch_add ( e->charge , std::memory_order_relaxed) ;
235
+ usage_ += e->charge ;
228
236
}
229
237
230
238
Cache::Handle * LRUCache::Lookup (const Slice& key, uint32_t hash) {
@@ -283,7 +291,7 @@ Cache::Handle* LRUCache::Insert(
283
291
// referenced by the cache first.
284
292
LRUHandle* cur = lru_.next ;
285
293
for (unsigned int scanCount = 0 ;
286
- GetUsage () > capacity_ && cur != &lru_
294
+ usage_ > capacity_ && cur != &lru_
287
295
&& scanCount < remove_scan_count_limit_; scanCount++) {
288
296
LRUHandle* next = cur->next ;
289
297
if (cur->refs <= 1 ) {
@@ -299,7 +307,7 @@ Cache::Handle* LRUCache::Insert(
299
307
300
308
// Free the space following strict LRU policy until enough space
301
309
// is freed.
302
- while (GetUsage () > capacity_ && lru_.next != &lru_) {
310
+ while (usage_ > capacity_ && lru_.next != &lru_) {
303
311
LRUHandle* old = lru_.next ;
304
312
LRU_Remove (old);
305
313
table_.Remove (old->key (), old->hash );
0 commit comments