1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201
|
package lazycache
import (
"sync"
"github.com/hashicorp/golang-lru/v2/simplelru"
)
// New creates a new Cache.
func New[K comparable, V any](options Options[K, V]) *Cache[K, V] {
var onEvict simplelru.EvictCallback[K, *valueWrapper[V]] = nil
if options.OnEvict != nil {
onEvict = func(key K, value *valueWrapper[V]) {
value.wait()
if value.found {
options.OnEvict(key, value.value)
}
}
}
lru, err := simplelru.NewLRU[K, *valueWrapper[V]](int(options.MaxEntries), onEvict)
if err != nil {
panic(err)
}
c := &Cache[K, V]{
lru: lru,
}
return c
}
// Options holds the cache options.
type Options[K comparable, V any] struct {
// MaxEntries is the maximum number of entries that the cache should hold.
// Note that this can also be adjusted after the cache is created with Resize.
MaxEntries int
// OnEvict is an optional callback that is called when an entry is evicted.
OnEvict func(key K, value V)
}
// Cache is a thread-safe resizable LRU cache.
type Cache[K comparable, V any] struct {
lru *simplelru.LRU[K, *valueWrapper[V]]
mu sync.RWMutex
zerov V
}
// Delete deletes the item with given key from the cache, returning if the
// key was contained.
func (c *Cache[K, V]) Delete(key K) bool {
c.mu.Lock()
defer c.mu.Unlock()
return c.lru.Remove(key)
}
// DeleteFunc deletes all entries for which the given function returns true.
func (c *Cache[K, V]) DeleteFunc(matches func(key K, item V) bool) int {
c.mu.RLock()
keys := c.lru.Keys()
var keysToDelete []K
for _, key := range keys {
w, _ := c.lru.Peek(key)
if !w.wait().found {
continue
}
if matches(key, w.value) {
keysToDelete = append(keysToDelete, key)
}
}
c.mu.RUnlock()
c.mu.Lock()
defer c.mu.Unlock()
var deleteCount int
for _, key := range keysToDelete {
if c.lru.Remove(key) {
deleteCount++
}
}
return deleteCount
}
// Get returns the value associated with key.
func (c *Cache[K, V]) Get(key K) (V, bool) {
c.mu.Lock()
w := c.get(key)
c.mu.Unlock()
if w == nil {
return c.zerov, false
}
w.wait()
return w.value, w.found
}
// GetOrCreate returns the value associated with key, or creates it if it doesn't.
// It also returns a bool indicating if the value was found in the cache.
// Note that create, the cache prime function, is called once and then not called again for a given key
// unless the cache entry is evicted; it does not block other goroutines from calling GetOrCreate,
// it is not called with the cache lock held.
// Note that any error returned by create will be returned by GetOrCreate and repeated calls with the same key will
// receive the same error.
func (c *Cache[K, V]) GetOrCreate(key K, create func(key K) (V, error)) (V, bool, error) {
c.mu.Lock()
w := c.get(key)
if w != nil {
c.mu.Unlock()
w.wait()
// If w.ready is nil, we will repeat any error from the create function to concurrent callers.
return w.value, true, w.err
}
w = &valueWrapper[V]{
ready: make(chan struct{}),
}
// Concurrent access to the same key will see w, but needs to wait for w.ready
// to get the value.
c.lru.Add(key, w)
c.mu.Unlock()
// Create the value with the lock released.
v, err := create(key)
w.err = err
w.value = v
w.found = err == nil
close(w.ready)
if err != nil {
c.Delete(key)
return c.zerov, false, err
}
return v, false, nil
}
// Resize changes the cache size and returns the number of entries evicted.
func (c *Cache[K, V]) Resize(size int) (evicted int) {
c.mu.Lock()
evicted = c.lru.Resize(size)
c.mu.Unlock()
return evicted
}
// Set associates value with key.
func (c *Cache[K, V]) Set(key K, value V) {
c.mu.Lock()
c.lru.Add(key, &valueWrapper[V]{value: value, found: true})
c.mu.Unlock()
}
func (c *Cache[K, V]) get(key K) *valueWrapper[V] {
w, ok := c.lru.Get(key)
if !ok {
return nil
}
return w
}
// contains returns true if the given key is in the cache.
// note that this wil also return true if the key is in the cache but the value is not yet ready.
func (c *Cache[K, V]) contains(key K) bool {
c.mu.RLock()
b := c.lru.Contains(key)
c.mu.RUnlock()
return b
}
// keys returns a slice of the keys in the cache, oldest first.
// note that this wil also include keys that are not yet ready.
func (c *Cache[K, V]) keys() []K {
c.mu.RLock()
defer c.mu.RUnlock()
return c.lru.Keys()
}
// len returns the number of items in the cache.
// note that this wil also include values that are not yet ready.
func (c *Cache[K, V]) len() int {
c.mu.RLock()
defer c.mu.RUnlock()
return c.lru.Len()
}
// valueWrapper holds a cache value that is not available unless the done channel is nil or closed.
// This construct makes more sense if you look at the code in GetOrCreate.
type valueWrapper[V any] struct {
value V
found bool
err error
ready chan struct{}
}
func (w *valueWrapper[V]) wait() *valueWrapper[V] {
if w.ready != nil {
<-w.ready
}
return w
}
|