Skip to content

Commit 5f5e5fc

Browse files
committed
Revert atomic_size_t usage
Summary: By disassemble the function, we found that the atomic variables do invoke the `lock` that locks the memory bus. As a tradeoff, we protect the GetUsage by mutex and leave usage_ as plain size_t. Test Plan: passed `cache_test` Reviewers: dhruba CC: leveldb Differential Revision: https://reviews.facebook.net/D14667
1 parent 5090316 commit 5f5e5fc

File tree

1 file changed

+15
-7
lines changed

1 file changed

+15
-7
lines changed

util/cache.cc

+15-7
Original file line numberDiff line numberDiff line change
@@ -156,7 +156,13 @@ class LRUCache {
156156
Cache::Handle* Lookup(const Slice& key, uint32_t hash);
157157
void Release(Cache::Handle* handle);
158158
void Erase(const Slice& key, uint32_t hash);
159-
size_t GetUsage() const { return usage_.load(std::memory_order_relaxed); }
159+
// Although in some platforms the update of size_t is atomic, to make sure
160+
// GetUsage() works correctly under any platforms, we'll protect this
161+
// function with mutex.
162+
size_t GetUsage() const {
163+
MutexLock l(&mutex_);
164+
return usage_;
165+
}
160166

161167
private:
162168
void LRU_Remove(LRUHandle* e);
@@ -172,8 +178,10 @@ class LRUCache {
172178
uint32_t remove_scan_count_limit_;
173179

174180
// mutex_ protects the following state.
175-
port::Mutex mutex_;
176-
std::atomic_size_t usage_;
181+
// We don't count mutex_ as the cache's internal state so semantically we
182+
// don't mind mutex_ invoking the non-const actions.
183+
mutable port::Mutex mutex_;
184+
size_t usage_;
177185

178186
// Dummy head of LRU list.
179187
// lru.prev is newest entry, lru.next is oldest entry.
@@ -215,7 +223,7 @@ void LRUCache::FreeEntry(LRUHandle* e) {
215223
void LRUCache::LRU_Remove(LRUHandle* e) {
216224
e->next->prev = e->prev;
217225
e->prev->next = e->next;
218-
usage_.fetch_sub(e->charge, std::memory_order_relaxed);
226+
usage_ -= e->charge;
219227
}
220228

221229
void LRUCache::LRU_Append(LRUHandle* e) {
@@ -224,7 +232,7 @@ void LRUCache::LRU_Append(LRUHandle* e) {
224232
e->prev = lru_.prev;
225233
e->prev->next = e;
226234
e->next->prev = e;
227-
usage_.fetch_add(e->charge, std::memory_order_relaxed);
235+
usage_ += e->charge;
228236
}
229237

230238
Cache::Handle* LRUCache::Lookup(const Slice& key, uint32_t hash) {
@@ -283,7 +291,7 @@ Cache::Handle* LRUCache::Insert(
283291
// referenced by the cache first.
284292
LRUHandle* cur = lru_.next;
285293
for (unsigned int scanCount = 0;
286-
GetUsage() > capacity_ && cur != &lru_
294+
usage_ > capacity_ && cur != &lru_
287295
&& scanCount < remove_scan_count_limit_; scanCount++) {
288296
LRUHandle* next = cur->next;
289297
if (cur->refs <= 1) {
@@ -299,7 +307,7 @@ Cache::Handle* LRUCache::Insert(
299307

300308
// Free the space following strict LRU policy until enough space
301309
// is freed.
302-
while (GetUsage() > capacity_ && lru_.next != &lru_) {
310+
while (usage_ > capacity_ && lru_.next != &lru_) {
303311
LRUHandle* old = lru_.next;
304312
LRU_Remove(old);
305313
table_.Remove(old->key(), old->hash);

0 commit comments

Comments
 (0)