File: cache_with_err.go

package info (click to toggle)
gitlab-agent 16.1.3-2
  • links: PTS, VCS
  • area: contrib
  • in suites: forky, sid, trixie
  • size: 6,324 kB
  • sloc: makefile: 175; sh: 52; ruby: 3
file content (91 lines) | stat: -rw-r--r-- 2,721 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
package cache

import (
	"context"
	"time"

	"go.opentelemetry.io/otel/trace"
)

type GetItemDirectly[V any] func() (V, error)

type ErrCacher[K any] interface {
	// GetError retrieves a cached error.
	// Returns nil if no cached error found or if there was a problem accessing the cache.
	GetError(ctx context.Context, key K) error
	// CacheError puts error into the cache.
	CacheError(ctx context.Context, key K, err error, errTtl time.Duration)
}

type CacheWithErr[K comparable, V any] struct {
	cache     *Cache[K, V]
	ttl       time.Duration
	errTtl    time.Duration
	errCacher ErrCacher[K]
	tracer    trace.Tracer
	// isCacheable determines whether an error is cacheable or not.
	// Returns true if cacheable and false otherwise.
	isCacheable func(error) bool
}

func NewWithError[K comparable, V any](ttl, errTtl time.Duration, errCacher ErrCacher[K], tracer trace.Tracer,
	isCacheableFunc func(error) bool) *CacheWithErr[K, V] {
	return &CacheWithErr[K, V]{
		cache:       New[K, V](ttl),
		ttl:         ttl,
		errTtl:      errTtl,
		errCacher:   errCacher,
		tracer:      tracer,
		isCacheable: isCacheableFunc,
	}
}

func (c *CacheWithErr[K, V]) GetItem(ctx context.Context, key K, f GetItemDirectly[V]) (V, error) {
	ctx, span := c.tracer.Start(ctx, "cache.GetItem", trace.WithSpanKind(trace.SpanKindInternal))
	defer span.End()
	if c.ttl == 0 {
		return f()
	}
	c.cache.EvictExpiredEntries()
	lockCtx, lockSpan := c.tracer.Start(ctx, "cache.Lock", trace.WithSpanKind(trace.SpanKindInternal))
	entry := c.cache.GetOrCreateCacheEntry(key)
	locked := entry.Lock(lockCtx)
	lockSpan.End()
	if !locked { // a concurrent caller may be refreshing the entry. Block until exclusive access is available.
		var v V
		return v, ctx.Err()
	}
	evictEntry := false
	defer func() {
		entry.Unlock()
		if evictEntry {
			// Currently, cache (e.g. in EvictExpiredEntries()) grabs the cache lock and then an entry's lock,
			// but only via TryLock(). We may need to use Lock() rather than TryLock() in the future in some
			// other method. That would lead to deadlocks if we grab an entry's lock and then such method is called
			// concurrently. Hence,	to future-proof the code, calling EvictEntry() after entry's lock has been
			// unlocked here.
			c.cache.EvictEntry(key, entry)
		}
	}()
	if entry.IsNeedRefreshLocked() {
		err := c.errCacher.GetError(ctx, key)
		if err != nil {
			evictEntry = true
			var v V
			return v, err
		}
		item, err := f()
		if err != nil {
			if c.isCacheable(err) {
				// cacheable error
				c.errCacher.CacheError(ctx, key, err, c.errTtl)
			}
			var v V
			return v, err
		}
		entry.Item = item
		entry.HasItem = true
		entry.Expires = time.Now().Add(c.ttl)
	}
	return entry.Item, nil
}