Skip to content

Commit

Permalink
Add a blob-specific cache priority (facebook#10461) (tikv#354)
Browse files Browse the repository at this point in the history
* Add a blob-specific cache priority (facebook#10461)

Summary:
RocksDB's `Cache` abstraction currently supports two priority levels for items: high (used for frequently accessed/highly valuable SST metablocks like index/filter blocks) and low (used for SST data blocks). Blobs are typically lower-value targets for caching than data blocks, since 1) with BlobDB, data blocks containing blob references conceptually form an index structure which has to be consulted before we can read the blob value, and 2) cached blobs represent only a single key-value, while cached data blocks generally contain multiple KVs. Since we would like to make it possible to use the same backing cache for the block cache and the blob cache, it would make sense to add a new, lower-than-low cache priority level (bottom level) for blobs so data blocks are prioritized over them.

This task is a part of facebook#10156

Pull Request resolved: facebook#10461

Reviewed By: siying

Differential Revision: D38672823

Pulled By: ltamasi

fbshipit-source-id: 90cf7362036563d79891f47be2cc24b827482743

* make format

Signed-off-by: Connor1996 <[email protected]>

* make format

Signed-off-by: Connor1996 <[email protected]>

---------

Signed-off-by: Connor1996 <[email protected]>
Co-authored-by: Gang Liao <[email protected]>
  • Loading branch information
Connor1996 and gangliao authored Feb 1, 2024
1 parent dcf2f8d commit 3dba9fa
Show file tree
Hide file tree
Showing 21 changed files with 1,249 additions and 534 deletions.
395 changes: 379 additions & 16 deletions HISTORY.md

Large diffs are not rendered by default.

4 changes: 4 additions & 0 deletions cache/cache.cc
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,10 @@ static std::unordered_map<std::string, OptionTypeInfo>
{offsetof(struct LRUCacheOptions, high_pri_pool_ratio),
OptionType::kDouble, OptionVerificationType::kNormal,
OptionTypeFlags::kMutable}},
{"low_pri_pool_ratio",
{offsetof(struct LRUCacheOptions, low_pri_pool_ratio),
OptionType::kDouble, OptionVerificationType::kNormal,
OptionTypeFlags::kMutable}},
};
#endif // ROCKSDB_LITE

Expand Down
104 changes: 87 additions & 17 deletions cache/lru_cache.cc
Original file line number Diff line number Diff line change
Expand Up @@ -111,14 +111,17 @@ void LRUHandleTable::Resize() {

LRUCacheShard::LRUCacheShard(
size_t capacity, bool strict_capacity_limit, double high_pri_pool_ratio,
bool use_adaptive_mutex, CacheMetadataChargePolicy metadata_charge_policy,
int max_upper_hash_bits,
double low_pri_pool_ratio, bool use_adaptive_mutex,
CacheMetadataChargePolicy metadata_charge_policy, int max_upper_hash_bits,
const std::shared_ptr<SecondaryCache>& secondary_cache)
: capacity_(0),
high_pri_pool_usage_(0),
low_pri_pool_usage_(0),
strict_capacity_limit_(strict_capacity_limit),
high_pri_pool_ratio_(high_pri_pool_ratio),
high_pri_pool_capacity_(0),
low_pri_pool_ratio_(low_pri_pool_ratio),
low_pri_pool_capacity_(0),
table_(max_upper_hash_bits),
usage_(0),
lru_usage_(0),
Expand All @@ -129,6 +132,7 @@ LRUCacheShard::LRUCacheShard(
lru_.next = &lru_;
lru_.prev = &lru_;
lru_low_pri_ = &lru_;
lru_bottom_pri_ = &lru_;
SetCapacity(capacity);
}

Expand Down Expand Up @@ -191,10 +195,12 @@ void LRUCacheShard::ApplyToSomeEntries(
index_begin, index_end);
}

void LRUCacheShard::TEST_GetLRUList(LRUHandle** lru, LRUHandle** lru_low_pri) {
void LRUCacheShard::TEST_GetLRUList(LRUHandle** lru, LRUHandle** lru_low_pri,
LRUHandle** lru_bottom_pri) {
MutexLock l(&mutex_);
*lru = &lru_;
*lru_low_pri = lru_low_pri_;
*lru_bottom_pri = lru_bottom_pri_;
}

size_t LRUCacheShard::TEST_GetLRUSize() {
Expand All @@ -213,12 +219,20 @@ double LRUCacheShard::GetHighPriPoolRatio() {
return high_pri_pool_ratio_;
}

double LRUCacheShard::GetLowPriPoolRatio() {
MutexLock l(&mutex_);
return low_pri_pool_ratio_;
}

void LRUCacheShard::LRU_Remove(LRUHandle* e) {
assert(e->next != nullptr);
assert(e->prev != nullptr);
if (lru_low_pri_ == e) {
lru_low_pri_ = e->prev;
}
if (lru_bottom_pri_ == e) {
lru_bottom_pri_ = e->prev;
}
e->next->prev = e->prev;
e->prev->next = e->next;
e->prev = e->next = nullptr;
Expand All @@ -228,6 +242,9 @@ void LRUCacheShard::LRU_Remove(LRUHandle* e) {
if (e->InHighPriPool()) {
assert(high_pri_pool_usage_ >= total_charge);
high_pri_pool_usage_ -= total_charge;
} else if (e->InLowPriPool()) {
assert(low_pri_pool_usage_ >= total_charge);
low_pri_pool_usage_ -= total_charge;
}
}

Expand All @@ -242,17 +259,34 @@ void LRUCacheShard::LRU_Insert(LRUHandle* e) {
e->prev->next = e;
e->next->prev = e;
e->SetInHighPriPool(true);
e->SetInLowPriPool(false);
high_pri_pool_usage_ += total_charge;
MaintainPoolSize();
} else {
// Insert "e" to the head of low-pri pool. Note that when
// high_pri_pool_ratio is 0, head of low-pri pool is also head of LRU list.
} else if (low_pri_pool_ratio_ > 0 &&
(e->IsHighPri() || e->IsLowPri() || e->HasHit())) {
// Insert "e" to the head of low-pri pool.
e->next = lru_low_pri_->next;
e->prev = lru_low_pri_;
e->prev->next = e;
e->next->prev = e;
e->SetInHighPriPool(false);
e->SetInLowPriPool(true);
low_pri_pool_usage_ += total_charge;
MaintainPoolSize();
lru_low_pri_ = e;
} else {
// Insert "e" to the head of bottom-pri pool.
e->next = lru_bottom_pri_->next;
e->prev = lru_bottom_pri_;
e->prev->next = e;
e->next->prev = e;
e->SetInHighPriPool(false);
e->SetInLowPriPool(false);
// if the low-pri pool is empty, lru_low_pri_ also needs to be updated.
if (lru_bottom_pri_ == lru_low_pri_) {
lru_low_pri_ = e;
}
lru_bottom_pri_ = e;
}
lru_usage_ += total_charge;
}
Expand All @@ -263,10 +297,24 @@ void LRUCacheShard::MaintainPoolSize() {
lru_low_pri_ = lru_low_pri_->next;
assert(lru_low_pri_ != &lru_);
lru_low_pri_->SetInHighPriPool(false);
lru_low_pri_->SetInLowPriPool(true);
size_t total_charge =
lru_low_pri_->CalcTotalCharge(metadata_charge_policy_);
assert(high_pri_pool_usage_ >= total_charge);
high_pri_pool_usage_ -= total_charge;
low_pri_pool_usage_ += total_charge;
}

while (low_pri_pool_usage_ > low_pri_pool_capacity_) {
// Overflow last entry in low-pri pool to bottom-pri pool.
lru_bottom_pri_ = lru_bottom_pri_->next;
assert(lru_bottom_pri_ != &lru_);
lru_bottom_pri_->SetInHighPriPool(false);
lru_bottom_pri_->SetInLowPriPool(false);
size_t total_charge =
lru_bottom_pri_->CalcTotalCharge(metadata_charge_policy_);
assert(low_pri_pool_usage_ >= total_charge);
low_pri_pool_usage_ -= total_charge;
}
}

Expand All @@ -292,6 +340,7 @@ void LRUCacheShard::SetCapacity(size_t capacity) {
MutexLock l(&mutex_);
capacity_ = capacity;
high_pri_pool_capacity_ = capacity_ * high_pri_pool_ratio_;
low_pri_pool_capacity_ = capacity_ * low_pri_pool_ratio_;
EvictFromLRU(0, &last_reference_list);
}

Expand Down Expand Up @@ -507,6 +556,13 @@ void LRUCacheShard::SetHighPriorityPoolRatio(double high_pri_pool_ratio) {
MaintainPoolSize();
}

void LRUCacheShard::SetLowPriorityPoolRatio(double low_pri_pool_ratio) {
MutexLock l(&mutex_);
low_pri_pool_ratio_ = low_pri_pool_ratio;
low_pri_pool_capacity_ = capacity_ * low_pri_pool_ratio_;
MaintainPoolSize();
}

bool LRUCacheShard::Release(Cache::Handle* handle, bool force_erase) {
if (handle == nullptr) {
return false;
Expand Down Expand Up @@ -640,12 +696,15 @@ std::string LRUCacheShard::GetPrintableOptions() const {
MutexLock l(&mutex_);
snprintf(buffer, kBufferSize, " high_pri_pool_ratio: %.3lf\n",
high_pri_pool_ratio_);
snprintf(buffer + strlen(buffer), kBufferSize - strlen(buffer),
" low_pri_pool_ratio: %.3lf\n", low_pri_pool_ratio_);
}
return std::string(buffer);
}

LRUCache::LRUCache(size_t capacity, int num_shard_bits,
bool strict_capacity_limit, double high_pri_pool_ratio,
double low_pri_pool_ratio,
std::shared_ptr<MemoryAllocator> allocator,
bool use_adaptive_mutex,
CacheMetadataChargePolicy metadata_charge_policy,
Expand All @@ -659,7 +718,7 @@ LRUCache::LRUCache(size_t capacity, int num_shard_bits,
for (int i = 0; i < num_shards_; i++) {
new (&shards_[i]) LRUCacheShard(
per_shard, strict_capacity_limit, high_pri_pool_ratio,
use_adaptive_mutex, metadata_charge_policy,
low_pri_pool_ratio, use_adaptive_mutex, metadata_charge_policy,
/* max_upper_hash_bits */ 32 - num_shard_bits, secondary_cache);
}
secondary_cache_ = secondary_cache;
Expand Down Expand Up @@ -763,38 +822,49 @@ std::shared_ptr<Cache> NewLRUCache(
double high_pri_pool_ratio,
std::shared_ptr<MemoryAllocator> memory_allocator, bool use_adaptive_mutex,
CacheMetadataChargePolicy metadata_charge_policy,
const std::shared_ptr<SecondaryCache>& secondary_cache) {
const std::shared_ptr<SecondaryCache>& secondary_cache,
double low_pri_pool_ratio) {
if (num_shard_bits >= 20) {
return nullptr; // the cache cannot be sharded into too many fine pieces
}
if (high_pri_pool_ratio < 0.0 || high_pri_pool_ratio > 1.0) {
// invalid high_pri_pool_ratio
return nullptr;
}
if (low_pri_pool_ratio < 0.0 || low_pri_pool_ratio > 1.0) {
// Invalid high_pri_pool_ratio
return nullptr;
}
if (low_pri_pool_ratio + high_pri_pool_ratio > 1.0) {
// Invalid high_pri_pool_ratio and low_pri_pool_ratio combination
return nullptr;
}
if (num_shard_bits < 0) {
num_shard_bits = GetDefaultCacheShardBits(capacity);
}
return std::make_shared<LRUCache>(
capacity, num_shard_bits, strict_capacity_limit, high_pri_pool_ratio,
std::move(memory_allocator), use_adaptive_mutex, metadata_charge_policy,
secondary_cache);
low_pri_pool_ratio, std::move(memory_allocator), use_adaptive_mutex,
metadata_charge_policy, secondary_cache);
}

std::shared_ptr<Cache> NewLRUCache(const LRUCacheOptions& cache_opts) {
return NewLRUCache(
cache_opts.capacity, cache_opts.num_shard_bits,
cache_opts.strict_capacity_limit, cache_opts.high_pri_pool_ratio,
cache_opts.memory_allocator, cache_opts.use_adaptive_mutex,
cache_opts.metadata_charge_policy, cache_opts.secondary_cache);
return NewLRUCache(cache_opts.capacity, cache_opts.num_shard_bits,
cache_opts.strict_capacity_limit,
cache_opts.high_pri_pool_ratio,
cache_opts.memory_allocator, cache_opts.use_adaptive_mutex,
cache_opts.metadata_charge_policy,
cache_opts.secondary_cache, cache_opts.low_pri_pool_ratio);
}

std::shared_ptr<Cache> NewLRUCache(
size_t capacity, int num_shard_bits, bool strict_capacity_limit,
double high_pri_pool_ratio,
std::shared_ptr<MemoryAllocator> memory_allocator, bool use_adaptive_mutex,
CacheMetadataChargePolicy metadata_charge_policy) {
CacheMetadataChargePolicy metadata_charge_policy,
double low_pri_pool_ratio) {
return NewLRUCache(capacity, num_shard_bits, strict_capacity_limit,
high_pri_pool_ratio, memory_allocator, use_adaptive_mutex,
metadata_charge_policy, nullptr);
metadata_charge_policy, nullptr, low_pri_pool_ratio);
}
} // namespace ROCKSDB_NAMESPACE
Loading

0 comments on commit 3dba9fa

Please sign in to comment.