File: CacheTest.cpp

package info (click to toggle)
watchman 4.9.0-9
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 9,992 kB
  • sloc: cpp: 27,459; python: 6,538; java: 3,404; php: 3,257; ansic: 2,803; javascript: 1,116; makefile: 671; ruby: 364; sh: 124; xml: 102; lisp: 4
file content (185 lines) | stat: -rw-r--r-- 5,377 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
/* Copyright 2017-present Facebook, Inc.
 * Licensed under the Apache License, Version 2.0 */
#include "watchman_system.h"
#include <deque>
#include <string>
#include "Future.h"
#include "LRUCache.h"
#include "thirdparty/tap.h"

using namespace watchman;

static constexpr const std::chrono::milliseconds kErrorTTL(1000);

void test_basics() {
  LRUCache<std::string, bool> cache(5, kErrorTTL);

  ok(cache.size() == 0, "initially empty");
  ok(cache.get("foo") == nullptr, "nullptr for non-existent item");

  ok(cache.set("foo", true)->value(), "inserted true");
  ok(cache.size() == 1, "size is now one");
  ok(cache.get("foo")->value(), "looked up item");

  ok(cache.set("foo", false)->value() == false, "replaced true with false");
  ok(cache.get("foo")->value() == false, "looked up new false item");
  ok(cache.size() == 1, "replacement didn't change size");

  ok(cache.erase("foo")->value() == false, "erased and returned false foo");
  ok(cache.erase("foo") == nullptr, "double erase doesn't return anything");
  ok(cache.get("foo") == nullptr, "nullptr for non-existent item");

  for (size_t i = 0; i < 6; ++i) {
    ok(cache.set(std::to_string(i), true) != nullptr, "inserted");
  }

  ok(cache.size() == 5,
     "limited to 5 items, despite inserting 6 total. size=%" PRIsize_t,
     cache.size());

  ok(cache.get("0") == nullptr, "we expect 0 to have been evicted");
  for (size_t i = 1; i < 6; ++i) {
    ok(cache.get(std::to_string(i)), "found later node %" PRIsize_t, i);
  }

  ok(cache.set("bar", true), "added new item");
  ok(cache.get("1") == nullptr, "we expect 1 to be evicted");
  ok(cache.get("2"), "2 should be there, and we just touched it");
  ok(cache.set("baz", true), "added new item");
  ok(cache.size() == 5, "max size still respected");
  ok(cache.get("2"), "2 should still be there; not evicted");
  ok(cache.get("3") == nullptr, "we expect 3 to be evicted");

  cache.clear();
  ok(cache.size() == 0, "cleared out and have zero items");
}

// Helper for deferring execution of a future during tests
class ManualExecutor : public Executor {
 public:
  void run(std::function<void()>&& func) override {
    funcs_.emplace_back(std::move(func));
  }

  void runNext() {
    auto& f = funcs_.front();
    f();
    funcs_.pop_front();
  }

  size_t size() const {
    return funcs_.size();
  }

  void runAll() {
    while (!funcs_.empty()) {
      runNext();
    }
  }

 private:
  std::deque<std::function<void()>> funcs_;
};

void test_future() {
  using Cache = LRUCache<int, int>;
  using Node = typename Cache::NodeType;
  Cache cache(5, kErrorTTL);
  ManualExecutor exec;

  auto now = std::chrono::steady_clock::now();

  auto okGetter = [&exec](int k) {
    return makeFuture(k).via(&exec).then(
        [](Result<int>&& key) { return (1 + key.value()) * 2; });
  };

  auto failGetter = [&exec](int k) {
    return makeFuture(k).via(&exec).then(
        [](Result<int> &&) -> int { throw std::runtime_error("bleet"); });
  };

  // Queue up a get via a getter that will succeed
  auto f = cache.get(0, okGetter, now);
  ok(!f.isReady(), "future didn't finish yet");

  try {
    cache.get(0);
    ok(false, "should throw");
  } catch (const std::runtime_error&) {
    ok(true, "should throw runtime_error for mixing getters");
  }

  // Queue up a second get using the same getter
  auto f2 = cache.get(0, okGetter, now);
  ok(!f2.isReady(), "also not ready");

  ok(exec.size() == 1, "only scheduled a single call to getter");
  exec.runNext();

  ok(f.isReady(), "first is ready");
  ok(f2.isReady(), "second is ready");

  ok(f.get()->value() == 2, "got correct value for first");
  ok(f2.get() == f.get(), "got same value for second");

  // Now to saturate the cache with failed lookups

  cache.clear();
  std::vector<Future<std::shared_ptr<const Node>>> futures;
  for (size_t i = 1; i < 6; ++i) {
    futures.emplace_back(
        cache.get(i, failGetter, now + std::chrono::milliseconds(i)));
  }

  // And one more to check that we'll throw for exceeding capacity
  try {
    cache.get(7, failGetter, now);
    ok(false, "should throw");
  } catch (const std::runtime_error&) {
    ok(true, "threw error for exceeding pending capacity");
  }

  ok(exec.size() == 5, "should be 5 things pending");

  // Let them make progress
  exec.runAll();

  ok(cache.size() == 5, "cache should be full");

  collectAll(futures.begin(), futures.end())
      .then([](
          Result<std::vector<Result<std::shared_ptr<const Node>>>>&& result) {

        for (auto& r : result.value()) {
          ok(r.value()->result().hasError(), "should be an error node");
        }
      })
      .wait();

  ok(cache.size() == 5, "cache should still be full");

  ok(cache.get(42, now) == nullptr, "we don't have 42 yet");

  // Because we are saturated with error results, an explicit set
  // should fail
  try {
    cache.set(42, 42, now);
    ok(false, "should throw");
  } catch (const std::runtime_error&) {
    ok(true, "threw error for exceeding pending capacity");
  }

  // Now if we "sleep" for long enough, we should be able to evict
  // the error nodes and allow the insert to happen.
  ok(cache.get(42, now) == nullptr, "we don't have 42 yet");
  ok(cache.set(42, 42, now + kErrorTTL + std::chrono::milliseconds(1)),
     "inserted");
}

int main() {
  plan_tests(53);
  test_basics();
  test_future();
  return exit_status();
}