File: janitor.go

package info (click to toggle)
golang-github-twin-gocache 2.4.0-1
  • links: PTS, VCS
  • area: main
  • in suites: sid
  • size: 196 kB
  • sloc: makefile: 2
file content (146 lines) | stat: -rw-r--r-- 5,415 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
package gocache

import (
	"log"
	"time"
)

const (
	// JanitorShiftTarget is the target number of expired keys to find during passive clean up duty
	// before pausing the passive expired keys eviction process
	JanitorShiftTarget = 25

	// JanitorMaxIterationsPerShift is the maximum number of nodes to traverse before pausing
	//
	// This is to prevent the janitor from traversing the entire cache, which could take a long time
	// to complete depending on the size of the cache.
	//
	// By limiting it to a small number, we are effectively reducing the impact of passive eviction.
	JanitorMaxIterationsPerShift = 1000

	// JanitorMinShiftBackOff is the minimum interval between each iteration of steps
	// defined by JanitorMaxIterationsPerShift
	JanitorMinShiftBackOff = 50 * time.Millisecond

	// JanitorMaxShiftBackOff is the maximum interval between each iteration of steps
	// defined by JanitorMaxIterationsPerShift
	JanitorMaxShiftBackOff = 500 * time.Millisecond
)

// StartJanitor starts the janitor on a different goroutine
// The janitor's job is to delete expired keys in the background, in other words, it takes care of passive eviction.
// It can be stopped by calling Cache.StopJanitor.
// If you do not start the janitor, expired keys will only be deleted when they are accessed through Get, GetByKeys, or
// GetAll.
func (cache *Cache) StartJanitor() error {
	if cache.stopJanitor != nil {
		return ErrJanitorAlreadyRunning
	}
	cache.stopJanitor = make(chan bool)
	go func() {
		// rather than starting from the tail on every run, we can try to start from the last traversed entry
		var lastTraversedNode *Entry
		totalNumberOfExpiredKeysInPreviousRunFromTailToHead := 0
		backOff := JanitorMinShiftBackOff
		for {
			select {
			case <-time.After(backOff):
				// Passive clean up duty
				cache.mutex.Lock()
				if cache.tail != nil {
					start := time.Now()
					steps := 0
					expiredEntriesFound := 0
					current := cache.tail
					if lastTraversedNode != nil {
						// Make sure the lastTraversedNode is still in the cache, otherwise we might be traversing nodes that were already deleted.
						// Furthermore, we need to make sure that the entry from the cache has the same pointer as the lastTraversedNode
						// to verify that there isn't just a new cache entry with the same key (i.e. in case lastTraversedNode got evicted)
						if entryFromCache, isInCache := cache.get(lastTraversedNode.Key); isInCache && entryFromCache == lastTraversedNode {
							current = lastTraversedNode
						}
					}
					if current == cache.tail {
						if Debug {
							log.Printf("There are currently %d entries in the cache. The last walk resulted in finding %d expired keys", len(cache.entries), totalNumberOfExpiredKeysInPreviousRunFromTailToHead)
						}
						totalNumberOfExpiredKeysInPreviousRunFromTailToHead = 0
					}
					for current != nil {
						// since we're walking from the tail to the head, we get the previous reference
						var previous *Entry
						steps++
						if current.Expired() {
							expiredEntriesFound++
							// Because delete will remove the previous reference from the entry, we need to store the
							// previous reference before we delete it
							previous = current.previous
							cache.delete(current.Key)
							cache.stats.ExpiredKeys++
						}
						if current == cache.head {
							lastTraversedNode = nil
							break
						}
						// Travel to the current node's previous node only if no specific previous node has been specified
						if previous != nil {
							current = previous
						} else {
							current = current.previous
						}
						lastTraversedNode = current
						if steps == JanitorMaxIterationsPerShift || expiredEntriesFound >= JanitorShiftTarget {
							if expiredEntriesFound > 0 {
								backOff = JanitorMinShiftBackOff
							} else {
								if backOff*2 <= JanitorMaxShiftBackOff {
									backOff *= 2
								} else {
									backOff = JanitorMaxShiftBackOff
								}
							}
							break
						}
					}
					if Debug {
						log.Printf("traversed %d nodes and found %d expired entries in %s before stopping\n", steps, expiredEntriesFound, time.Since(start))
					}
					totalNumberOfExpiredKeysInPreviousRunFromTailToHead += expiredEntriesFound
				} else {
					if backOff*2 < JanitorMaxShiftBackOff {
						backOff *= 2
					} else {
						backOff = JanitorMaxShiftBackOff
					}
				}
				cache.mutex.Unlock()
			case <-cache.stopJanitor:
				cache.stopJanitor <- true
				return
			}
		}
	}()
	//if Debug {
	//	go func() {
	//		var m runtime.MemStats
	//		for {
	//			runtime.ReadMemStats(&m)
	//			log.Printf("Alloc=%vMB; HeapReleased=%vMB; Sys=%vMB; HeapInUse=%vMB; HeapObjects=%v; HeapObjectsFreed=%v; GC=%v; cache.memoryUsage=%vMB; cacheSize=%d\n", m.Alloc/1024/1024, m.HeapReleased/1024/1024, m.Sys/1024/1024, m.HeapInuse/1024/1024, m.HeapObjects, m.Frees, m.NumGC, cache.memoryUsage/1024/1024, cache.Count())
	//			time.Sleep(3 * time.Second)
	//		}
	//	}()
	//}
	return nil
}

// StopJanitor stops the janitor
func (cache *Cache) StopJanitor() {
	if cache.stopJanitor != nil {
		// Tell the janitor to stop, and then wait for the janitor to reply on the same channel that it's stopping
		// This may seem a bit odd, but this allows us to avoid a data race condition when trying to set
		// cache.stopJanitor to nil
		cache.stopJanitor <- true
		<-cache.stopJanitor
		cache.stopJanitor = nil
	}
}