1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
|
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2024 Google */
#include <linux/bpf.h>
#include <linux/btf_ids.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/seq_file.h>
#include "../../mm/slab.h" /* kmem_cache, slab_caches and slab_mutex */
/* open-coded version */
struct bpf_iter_kmem_cache {
__u64 __opaque[1];
} __attribute__((aligned(8)));
struct bpf_iter_kmem_cache_kern {
struct kmem_cache *pos;
} __attribute__((aligned(8)));
#define KMEM_CACHE_POS_START ((void *)1L)
__bpf_kfunc_start_defs();
__bpf_kfunc int bpf_iter_kmem_cache_new(struct bpf_iter_kmem_cache *it)
{
struct bpf_iter_kmem_cache_kern *kit = (void *)it;
BUILD_BUG_ON(sizeof(*kit) > sizeof(*it));
BUILD_BUG_ON(__alignof__(*kit) != __alignof__(*it));
kit->pos = KMEM_CACHE_POS_START;
return 0;
}
__bpf_kfunc struct kmem_cache *bpf_iter_kmem_cache_next(struct bpf_iter_kmem_cache *it)
{
struct bpf_iter_kmem_cache_kern *kit = (void *)it;
struct kmem_cache *prev = kit->pos;
struct kmem_cache *next;
bool destroy = false;
if (!prev)
return NULL;
mutex_lock(&slab_mutex);
if (list_empty(&slab_caches)) {
mutex_unlock(&slab_mutex);
return NULL;
}
if (prev == KMEM_CACHE_POS_START)
next = list_first_entry(&slab_caches, struct kmem_cache, list);
else if (list_last_entry(&slab_caches, struct kmem_cache, list) == prev)
next = NULL;
else
next = list_next_entry(prev, list);
/* boot_caches have negative refcount, don't touch them */
if (next && next->refcount > 0)
next->refcount++;
/* Skip kmem_cache_destroy() for active entries */
if (prev && prev != KMEM_CACHE_POS_START) {
if (prev->refcount > 1)
prev->refcount--;
else if (prev->refcount == 1)
destroy = true;
}
mutex_unlock(&slab_mutex);
if (destroy)
kmem_cache_destroy(prev);
kit->pos = next;
return next;
}
__bpf_kfunc void bpf_iter_kmem_cache_destroy(struct bpf_iter_kmem_cache *it)
{
struct bpf_iter_kmem_cache_kern *kit = (void *)it;
struct kmem_cache *s = kit->pos;
bool destroy = false;
if (s == NULL || s == KMEM_CACHE_POS_START)
return;
mutex_lock(&slab_mutex);
/* Skip kmem_cache_destroy() for active entries */
if (s->refcount > 1)
s->refcount--;
else if (s->refcount == 1)
destroy = true;
mutex_unlock(&slab_mutex);
if (destroy)
kmem_cache_destroy(s);
}
__bpf_kfunc_end_defs();
struct bpf_iter__kmem_cache {
__bpf_md_ptr(struct bpf_iter_meta *, meta);
__bpf_md_ptr(struct kmem_cache *, s);
};
union kmem_cache_iter_priv {
struct bpf_iter_kmem_cache it;
struct bpf_iter_kmem_cache_kern kit;
};
static void *kmem_cache_iter_seq_start(struct seq_file *seq, loff_t *pos)
{
loff_t cnt = 0;
bool found = false;
struct kmem_cache *s;
union kmem_cache_iter_priv *p = seq->private;
mutex_lock(&slab_mutex);
/* Find an entry at the given position in the slab_caches list instead
* of keeping a reference (of the last visited entry, if any) out of
* slab_mutex. It might miss something if one is deleted in the middle
* while it releases the lock. But it should be rare and there's not
* much we can do about it.
*/
list_for_each_entry(s, &slab_caches, list) {
if (cnt == *pos) {
/* Make sure this entry remains in the list by getting
* a new reference count. Note that boot_cache entries
* have a negative refcount, so don't touch them.
*/
if (s->refcount > 0)
s->refcount++;
found = true;
break;
}
cnt++;
}
mutex_unlock(&slab_mutex);
if (!found)
s = NULL;
p->kit.pos = s;
return s;
}
static void kmem_cache_iter_seq_stop(struct seq_file *seq, void *v)
{
struct bpf_iter_meta meta;
struct bpf_iter__kmem_cache ctx = {
.meta = &meta,
.s = v,
};
union kmem_cache_iter_priv *p = seq->private;
struct bpf_prog *prog;
meta.seq = seq;
prog = bpf_iter_get_info(&meta, true);
if (prog && !ctx.s)
bpf_iter_run_prog(prog, &ctx);
bpf_iter_kmem_cache_destroy(&p->it);
}
static void *kmem_cache_iter_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
union kmem_cache_iter_priv *p = seq->private;
++*pos;
return bpf_iter_kmem_cache_next(&p->it);
}
static int kmem_cache_iter_seq_show(struct seq_file *seq, void *v)
{
struct bpf_iter_meta meta;
struct bpf_iter__kmem_cache ctx = {
.meta = &meta,
.s = v,
};
struct bpf_prog *prog;
int ret = 0;
meta.seq = seq;
prog = bpf_iter_get_info(&meta, false);
if (prog)
ret = bpf_iter_run_prog(prog, &ctx);
return ret;
}
static const struct seq_operations kmem_cache_iter_seq_ops = {
.start = kmem_cache_iter_seq_start,
.next = kmem_cache_iter_seq_next,
.stop = kmem_cache_iter_seq_stop,
.show = kmem_cache_iter_seq_show,
};
BTF_ID_LIST_GLOBAL_SINGLE(bpf_kmem_cache_btf_id, struct, kmem_cache)
static const struct bpf_iter_seq_info kmem_cache_iter_seq_info = {
.seq_ops = &kmem_cache_iter_seq_ops,
.seq_priv_size = sizeof(union kmem_cache_iter_priv),
};
static void bpf_iter_kmem_cache_show_fdinfo(const struct bpf_iter_aux_info *aux,
struct seq_file *seq)
{
seq_puts(seq, "kmem_cache iter\n");
}
DEFINE_BPF_ITER_FUNC(kmem_cache, struct bpf_iter_meta *meta,
struct kmem_cache *s)
static struct bpf_iter_reg bpf_kmem_cache_reg_info = {
.target = "kmem_cache",
.feature = BPF_ITER_RESCHED,
.show_fdinfo = bpf_iter_kmem_cache_show_fdinfo,
.ctx_arg_info_size = 1,
.ctx_arg_info = {
{ offsetof(struct bpf_iter__kmem_cache, s),
PTR_TO_BTF_ID_OR_NULL | PTR_TRUSTED },
},
.seq_info = &kmem_cache_iter_seq_info,
};
static int __init bpf_kmem_cache_iter_init(void)
{
bpf_kmem_cache_reg_info.ctx_arg_info[0].btf_id = bpf_kmem_cache_btf_id[0];
return bpf_iter_reg_target(&bpf_kmem_cache_reg_info);
}
late_initcall(bpf_kmem_cache_iter_init);
|