1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
|
// SPDX-License-Identifier: GPL-2.0-or-later
#include "cache.h"
#include "backing_dev.h"
#include "cache_dev.h"
#include "dm_pcache.h"
/**
* cache_key_gc - Releases the reference of a cache key segment.
* @cache: Pointer to the pcache_cache structure.
* @key: Pointer to the cache key to be garbage collected.
*
* This function decrements the reference count of the cache segment
* associated with the given key. If the reference count drops to zero,
* the segment may be invalidated and reused.
*/
static void cache_key_gc(struct pcache_cache *cache, struct pcache_cache_key *key)
{
cache_seg_put(key->cache_pos.cache_seg);
}
static bool need_gc(struct pcache_cache *cache, struct pcache_cache_pos *dirty_tail, struct pcache_cache_pos *key_tail)
{
struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
struct pcache_cache_kset_onmedia *kset_onmedia;
void *dirty_addr, *key_addr;
u32 segs_used, segs_gc_threshold, to_copy;
int ret;
dirty_addr = cache_pos_addr(dirty_tail);
key_addr = cache_pos_addr(key_tail);
if (dirty_addr == key_addr) {
pcache_dev_debug(pcache, "key tail is equal to dirty tail: %u:%u\n",
dirty_tail->cache_seg->cache_seg_id,
dirty_tail->seg_off);
return false;
}
kset_onmedia = (struct pcache_cache_kset_onmedia *)cache->gc_kset_onmedia_buf;
to_copy = min(PCACHE_KSET_ONMEDIA_SIZE_MAX, PCACHE_SEG_SIZE - key_tail->seg_off);
ret = copy_mc_to_kernel(kset_onmedia, key_addr, to_copy);
if (ret) {
pcache_dev_err(pcache, "error to read kset: %d", ret);
return false;
}
/* Check if kset_onmedia is corrupted */
if (kset_onmedia->magic != PCACHE_KSET_MAGIC) {
pcache_dev_debug(pcache, "gc error: magic is not as expected. key_tail: %u:%u magic: %llx, expected: %llx\n",
key_tail->cache_seg->cache_seg_id, key_tail->seg_off,
kset_onmedia->magic, PCACHE_KSET_MAGIC);
return false;
}
/* Verify the CRC of the kset_onmedia */
if (kset_onmedia->crc != cache_kset_crc(kset_onmedia)) {
pcache_dev_debug(pcache, "gc error: crc is not as expected. crc: %x, expected: %x\n",
cache_kset_crc(kset_onmedia), kset_onmedia->crc);
return false;
}
segs_used = bitmap_weight(cache->seg_map, cache->n_segs);
segs_gc_threshold = cache->n_segs * pcache_cache_get_gc_percent(cache) / 100;
if (segs_used < segs_gc_threshold) {
pcache_dev_debug(pcache, "segs_used: %u, segs_gc_threshold: %u\n", segs_used, segs_gc_threshold);
return false;
}
return true;
}
/**
* last_kset_gc - Advances the garbage collection for the last kset.
* @cache: Pointer to the pcache_cache structure.
* @kset_onmedia: Pointer to the kset_onmedia structure for the last kset.
*/
static void last_kset_gc(struct pcache_cache *cache, struct pcache_cache_kset_onmedia *kset_onmedia)
{
struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
struct pcache_cache_segment *cur_seg, *next_seg;
cur_seg = cache->key_tail.cache_seg;
next_seg = &cache->segments[kset_onmedia->next_cache_seg_id];
mutex_lock(&cache->key_tail_lock);
cache->key_tail.cache_seg = next_seg;
cache->key_tail.seg_off = 0;
cache_encode_key_tail(cache);
mutex_unlock(&cache->key_tail_lock);
pcache_dev_debug(pcache, "gc advance kset seg: %u\n", cur_seg->cache_seg_id);
spin_lock(&cache->seg_map_lock);
__clear_bit(cur_seg->cache_seg_id, cache->seg_map);
spin_unlock(&cache->seg_map_lock);
}
void pcache_cache_gc_fn(struct work_struct *work)
{
struct pcache_cache *cache = container_of(work, struct pcache_cache, gc_work.work);
struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
struct pcache_cache_pos dirty_tail, key_tail;
struct pcache_cache_kset_onmedia *kset_onmedia;
struct pcache_cache_key_onmedia *key_onmedia;
struct pcache_cache_key *key;
int ret;
int i;
kset_onmedia = (struct pcache_cache_kset_onmedia *)cache->gc_kset_onmedia_buf;
while (true) {
if (pcache_is_stopping(pcache) || atomic_read(&cache->gc_errors))
return;
/* Get new tail positions */
mutex_lock(&cache->dirty_tail_lock);
cache_pos_copy(&dirty_tail, &cache->dirty_tail);
mutex_unlock(&cache->dirty_tail_lock);
mutex_lock(&cache->key_tail_lock);
cache_pos_copy(&key_tail, &cache->key_tail);
mutex_unlock(&cache->key_tail_lock);
if (!need_gc(cache, &dirty_tail, &key_tail))
break;
if (kset_onmedia->flags & PCACHE_KSET_FLAGS_LAST) {
/* Don't move to the next segment if dirty_tail has not moved */
if (dirty_tail.cache_seg == key_tail.cache_seg)
break;
last_kset_gc(cache, kset_onmedia);
continue;
}
for (i = 0; i < kset_onmedia->key_num; i++) {
struct pcache_cache_key key_tmp = { 0 };
key_onmedia = &kset_onmedia->data[i];
key = &key_tmp;
cache_key_init(&cache->req_key_tree, key);
ret = cache_key_decode(cache, key_onmedia, key);
if (ret) {
/* return without re-arm gc work, and prevent future
* gc, because we can't retry the partial-gc-ed kset
*/
atomic_inc(&cache->gc_errors);
pcache_dev_err(pcache, "failed to decode cache key in gc\n");
return;
}
cache_key_gc(cache, key);
}
pcache_dev_debug(pcache, "gc advance: %u:%u %u\n",
key_tail.cache_seg->cache_seg_id,
key_tail.seg_off,
get_kset_onmedia_size(kset_onmedia));
mutex_lock(&cache->key_tail_lock);
cache_pos_advance(&cache->key_tail, get_kset_onmedia_size(kset_onmedia));
cache_encode_key_tail(cache);
mutex_unlock(&cache->key_tail_lock);
}
queue_delayed_work(cache_get_wq(cache), &cache->gc_work, PCACHE_CACHE_GC_INTERVAL);
}
|