File: tiered_secondary_cache.cc

package info (click to toggle)
rocksdb 9.10.0-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 46,052 kB
  • sloc: cpp: 500,768; java: 42,992; ansic: 9,789; python: 8,373; perl: 5,822; sh: 4,921; makefile: 2,386; asm: 550; xml: 342
file content (125 lines) | stat: -rw-r--r-- 5,107 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
//  Copyright (c) Meta Platforms, Inc. and affiliates.
//  This source code is licensed under both the GPLv2 (found in the
//  COPYING file in the root directory) and Apache 2.0 License
//  (found in the LICENSE.Apache file in the root directory).

#include "cache/tiered_secondary_cache.h"

#include "monitoring/statistics_impl.h"

namespace ROCKSDB_NAMESPACE {

// Creation callback for use in the lookup path. It calls the upper layer
// create_cb to create the object, and optionally calls the compressed
// secondary cache InsertSaved to save the compressed block. If
// advise_erase is set, it means the primary cache wants the block to be
// erased in the secondary cache, so we skip calling InsertSaved.
//
// For the time being, we assume that all blocks in the nvm tier belong to
// the primary block cache (i.e CacheTier::kVolatileTier). That can be changed
// if we implement demotion from the compressed secondary cache to the nvm
// cache in the future.
Status TieredSecondaryCache::MaybeInsertAndCreate(
    const Slice& data, CompressionType type, CacheTier source,
    Cache::CreateContext* ctx, MemoryAllocator* allocator,
    Cache::ObjectPtr* out_obj, size_t* out_charge) {
  TieredSecondaryCache::CreateContext* context =
      static_cast<TieredSecondaryCache::CreateContext*>(ctx);
  assert(source == CacheTier::kVolatileTier);
  if (!context->advise_erase && type != kNoCompression) {
    // Attempt to insert into compressed secondary cache
    // TODO: Don't hardcode the source
    context->comp_sec_cache->InsertSaved(*context->key, data, type, source)
        .PermitUncheckedError();
    RecordTick(context->stats, COMPRESSED_SECONDARY_CACHE_PROMOTIONS);
  } else {
    RecordTick(context->stats, COMPRESSED_SECONDARY_CACHE_PROMOTION_SKIPS);
  }
  // Primary cache will accept the object, so call its helper to create
  // the object
  return context->helper->create_cb(data, type, source, context->inner_ctx,
                                    allocator, out_obj, out_charge);
}

// The lookup first looks up in the compressed secondary cache. If its a miss,
// then the nvm cache lookup is called. The cache item helper and create
// context are wrapped in order to intercept the creation callback to make
// the decision on promoting to the compressed secondary cache.
std::unique_ptr<SecondaryCacheResultHandle> TieredSecondaryCache::Lookup(
    const Slice& key, const Cache::CacheItemHelper* helper,
    Cache::CreateContext* create_context, bool wait, bool advise_erase,
    Statistics* stats, bool& kept_in_sec_cache) {
  bool dummy = false;
  std::unique_ptr<SecondaryCacheResultHandle> result =
      target()->Lookup(key, helper, create_context, wait, advise_erase, stats,
                       /*kept_in_sec_cache=*/dummy);
  // We never want the item to spill back into the secondary cache
  kept_in_sec_cache = true;
  if (result) {
    assert(result->IsReady());
    return result;
  }

  // If wait is true, then we can be a bit more efficient and avoid a memory
  // allocation for the CReateContext.
  const Cache::CacheItemHelper* outer_helper =
      TieredSecondaryCache::GetHelper();
  if (wait) {
    TieredSecondaryCache::CreateContext ctx;
    ctx.key = &key;
    ctx.advise_erase = advise_erase;
    ctx.helper = helper;
    ctx.inner_ctx = create_context;
    ctx.comp_sec_cache = target();
    ctx.stats = stats;

    return nvm_sec_cache_->Lookup(key, outer_helper, &ctx, wait, advise_erase,
                                  stats, kept_in_sec_cache);
  }

  // If wait is false, i.e its an async lookup, we have to allocate a result
  // handle for tracking purposes. Embed the CreateContext inside the handle
  // so we need only allocate memory once instead of twice.
  std::unique_ptr<ResultHandle> handle(new ResultHandle());
  handle->ctx()->key = &key;
  handle->ctx()->advise_erase = advise_erase;
  handle->ctx()->helper = helper;
  handle->ctx()->inner_ctx = create_context;
  handle->ctx()->comp_sec_cache = target();
  handle->ctx()->stats = stats;
  handle->SetInnerHandle(
      nvm_sec_cache_->Lookup(key, outer_helper, handle->ctx(), wait,
                             advise_erase, stats, kept_in_sec_cache));
  if (!handle->inner_handle()) {
    handle.reset();
  } else {
    result.reset(handle.release());
  }

  return result;
}

// Call the nvm cache WaitAll to complete the lookups
void TieredSecondaryCache::WaitAll(
    std::vector<SecondaryCacheResultHandle*> handles) {
  std::vector<SecondaryCacheResultHandle*> nvm_handles;
  std::vector<ResultHandle*> my_handles;
  nvm_handles.reserve(handles.size());
  for (auto handle : handles) {
    // The handle could belong to the compressed secondary cache. Skip if
    // that's the case.
    if (handle->IsReady()) {
      continue;
    }
    ResultHandle* hdl = static_cast<ResultHandle*>(handle);
    nvm_handles.push_back(hdl->inner_handle());
    my_handles.push_back(hdl);
  }
  nvm_sec_cache_->WaitAll(nvm_handles);
  for (auto handle : my_handles) {
    assert(handle->inner_handle()->IsReady());
    handle->Complete();
  }
}

}  // namespace ROCKSDB_NAMESPACE