1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304
|
package sievecache
import (
"sync"
)
// SyncSieveCache is a thread-safe wrapper around SieveCache.
// It provides the same functionality but with thread safety guarantees.
type SyncSieveCache[K comparable, V any] struct {
cache *SieveCache[K, V]
mutex sync.RWMutex
}
// NewSync creates a new thread-safe cache with the given capacity.
func NewSync[K comparable, V any](capacity int) (*SyncSieveCache[K, V], error) {
cache, err := New[K, V](capacity)
if err != nil {
return nil, err
}
return &SyncSieveCache[K, V]{
cache: cache,
mutex: sync.RWMutex{},
}, nil
}
// DefaultSync creates a new thread-safe cache with a default capacity of 100.
func DefaultSync[K comparable, V any]() *SyncSieveCache[K, V] {
cache, err := NewSync[K, V](100)
if err != nil {
// This should never happen with non-zero capacity
panic("Failed to create cache with default capacity")
}
return cache
}
// FromSieveCache creates a new thread-safe cache from an existing SieveCache.
func FromSieveCache[K comparable, V any](cache *SieveCache[K, V]) *SyncSieveCache[K, V] {
return &SyncSieveCache[K, V]{
cache: cache,
mutex: sync.RWMutex{},
}
}
// Capacity returns the maximum number of entries the cache can hold.
func (c *SyncSieveCache[K, V]) Capacity() int {
c.mutex.RLock()
defer c.mutex.RUnlock()
return c.cache.Capacity()
}
// Len returns the number of cached values.
func (c *SyncSieveCache[K, V]) Len() int {
c.mutex.RLock()
defer c.mutex.RUnlock()
return c.cache.Len()
}
// IsEmpty returns true when no values are currently cached.
func (c *SyncSieveCache[K, V]) IsEmpty() bool {
c.mutex.RLock()
defer c.mutex.RUnlock()
return c.cache.IsEmpty()
}
// ContainsKey returns true if there is a value in the cache mapped to by key.
func (c *SyncSieveCache[K, V]) ContainsKey(key K) bool {
c.mutex.RLock()
defer c.mutex.RUnlock()
return c.cache.ContainsKey(key)
}
// Get returns the value in the cache mapped to by key.
// Unlike the unwrapped SieveCache, this returns a copy of the value
// rather than a reference, since the mutex guard is released after this method returns.
func (c *SyncSieveCache[K, V]) Get(key K) (V, bool) {
c.mutex.Lock()
defer c.mutex.Unlock()
return c.cache.Get(key)
}
// GetMut gets a mutable reference to the value in the cache mapped to by key via a callback function.
// Returns true if the key exists and the callback was invoked, false otherwise.
func (c *SyncSieveCache[K, V]) GetMut(key K, f func(*V)) bool {
// First get a copy of the value to avoid holding the lock during callback
c.mutex.Lock()
var valueCopy V
var exists bool
ptr := c.cache.GetPointer(key)
if ptr != nil {
valueCopy = *ptr
exists = true
}
c.mutex.Unlock()
if !exists {
return false
}
// Execute callback on the copy
f(&valueCopy)
// Update the value back in the cache
c.mutex.Lock()
defer c.mutex.Unlock()
// Check if the key still exists
ptr = c.cache.GetPointer(key)
if ptr != nil {
*ptr = valueCopy
return true
}
return false
}
// Insert maps key to value in the cache, possibly evicting old entries.
func (c *SyncSieveCache[K, V]) Insert(key K, value V) bool {
c.mutex.Lock()
defer c.mutex.Unlock()
return c.cache.Insert(key, value)
}
// Remove removes the cache entry mapped to by key.
func (c *SyncSieveCache[K, V]) Remove(key K) (V, bool) {
c.mutex.Lock()
defer c.mutex.Unlock()
return c.cache.Remove(key)
}
// Evict removes and returns a value from the cache that was not recently accessed.
func (c *SyncSieveCache[K, V]) Evict() (V, bool) {
c.mutex.Lock()
defer c.mutex.Unlock()
return c.cache.Evict()
}
// Clear removes all entries from the cache.
func (c *SyncSieveCache[K, V]) Clear() {
c.mutex.Lock()
defer c.mutex.Unlock()
c.cache.Clear()
}
// Keys returns a slice of all keys in the cache.
func (c *SyncSieveCache[K, V]) Keys() []K {
c.mutex.RLock()
defer c.mutex.RUnlock()
return c.cache.Keys()
}
// Values returns a slice of all values in the cache.
func (c *SyncSieveCache[K, V]) Values() []V {
c.mutex.RLock()
defer c.mutex.RUnlock()
return c.cache.Values()
}
// Items returns a slice of all key-value pairs in the cache.
func (c *SyncSieveCache[K, V]) Items() []struct {
Key K
Value V
} {
c.mutex.RLock()
defer c.mutex.RUnlock()
return c.cache.Items()
}
// ForEachValue applies a function to all values in the cache.
// The function receives and can modify a copy of each value, and changes will be saved back to the cache.
func (c *SyncSieveCache[K, V]) ForEachValue(f func(*V)) {
// First collect all items under the read lock
c.mutex.RLock()
items := c.cache.Items()
c.mutex.RUnlock()
// Process each value without holding the lock
// Pre-allocate map with the expected size to prevent resizing
updatedItems := make(map[K]V, len(items))
for _, item := range items {
valueCopy := item.Value
f(&valueCopy)
updatedItems[item.Key] = valueCopy
}
// Update any changed values back to the cache
c.mutex.Lock()
defer c.mutex.Unlock()
for k, v := range updatedItems {
if c.cache.ContainsKey(k) {
c.cache.Insert(k, v)
}
}
}
// ForEachEntry applies a function to all key-value pairs in the cache.
// The function receives the key and can modify a copy of each value, and changes will be saved back to the cache.
func (c *SyncSieveCache[K, V]) ForEachEntry(f func(K, *V)) {
// First collect all items under the read lock
c.mutex.RLock()
items := c.cache.Items()
c.mutex.RUnlock()
// Process each entry without holding the lock
// Pre-allocate map with the expected size to prevent resizing
updatedItems := make(map[K]V, len(items))
for _, item := range items {
valueCopy := item.Value
f(item.Key, &valueCopy)
updatedItems[item.Key] = valueCopy
}
// Update any changed values back to the cache
c.mutex.Lock()
defer c.mutex.Unlock()
for k, v := range updatedItems {
if c.cache.ContainsKey(k) {
c.cache.Insert(k, v)
}
}
}
// WithLock gets exclusive access to the underlying cache to perform multiple operations atomically.
// This is useful when you need to perform a series of operations that depend on each other.
func (c *SyncSieveCache[K, V]) WithLock(f func(*SieveCache[K, V])) {
c.mutex.Lock()
defer c.mutex.Unlock()
f(c.cache)
}
// Retain only keeps elements specified by the predicate.
// Removes all entries for which f returns false.
func (c *SyncSieveCache[K, V]) Retain(f func(K, V) bool) {
// First collect all items under the read lock
c.mutex.RLock()
items := c.cache.Items()
c.mutex.RUnlock()
// Estimate number of elements to remove - pre-allocate with a reasonable capacity
estimatedRemoveCount := len(items) / 4 // Assume about 25% will be removed
if estimatedRemoveCount < 8 {
estimatedRemoveCount = 8 // Minimum size for small caches
}
if estimatedRemoveCount > 1024 {
estimatedRemoveCount = 1024 // Cap at reasonable maximum
}
// Check each entry against the predicate without holding the lock
keysToRemove := make([]K, 0, estimatedRemoveCount)
for _, item := range items {
if !f(item.Key, item.Value) {
keysToRemove = append(keysToRemove, item.Key)
}
}
// Remove entries that don't match the predicate
c.mutex.Lock()
defer c.mutex.Unlock()
for _, key := range keysToRemove {
c.cache.Remove(key)
}
}
// RetainBatch is an optimized version of Retain that collects all keys to remove first,
// then removes them in a single batch operation with a single lock acquisition.
func (c *SyncSieveCache[K, V]) RetainBatch(f func(K, V) bool) {
// First collect all items under the read lock
c.mutex.RLock()
items := c.cache.Items()
c.mutex.RUnlock()
// Estimate number of elements to remove - pre-allocate with a reasonable capacity
estimatedRemoveCount := len(items) / 4 // Assume about 25% will be removed
if estimatedRemoveCount < 8 {
estimatedRemoveCount = 8 // Minimum size for small caches
}
if estimatedRemoveCount > 1024 {
estimatedRemoveCount = 1024 // Cap at reasonable maximum
}
// Collect keys to remove without holding the lock
keysToRemove := make([]K, 0, estimatedRemoveCount)
for _, item := range items {
if !f(item.Key, item.Value) {
keysToRemove = append(keysToRemove, item.Key)
}
}
// If there are keys to remove, do it in a single batch operation
if len(keysToRemove) > 0 {
c.mutex.Lock()
defer c.mutex.Unlock()
for _, key := range keysToRemove {
c.cache.Remove(key)
}
}
}
// RecommendedCapacity analyzes the current cache utilization and recommends a new capacity.
func (c *SyncSieveCache[K, V]) RecommendedCapacity(minFactor, maxFactor, lowThreshold, highThreshold float64) int {
c.mutex.RLock()
defer c.mutex.RUnlock()
return c.cache.RecommendedCapacity(minFactor, maxFactor, lowThreshold, highThreshold)
}
|