1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
* Copyright (c) 2024 Tejun Heo <tj@kernel.org>
* Copyright (c) 2024 David Vernet <dvernet@meta.com>
*/
#ifndef __SCX_COMPAT_BPF_H
#define __SCX_COMPAT_BPF_H
#define __COMPAT_ENUM_OR_ZERO(__type, __ent) \
({ \
__type __ret = 0; \
if (bpf_core_enum_value_exists(__type, __ent)) \
__ret = __ent; \
__ret; \
})
/* v6.12: 819513666966 ("sched_ext: Add cgroup support") */
struct cgroup *scx_bpf_task_cgroup___new(struct task_struct *p) __ksym __weak;
#define scx_bpf_task_cgroup(p) \
(bpf_ksym_exists(scx_bpf_task_cgroup___new) ? \
scx_bpf_task_cgroup___new((p)) : NULL)
/*
* v6.13: The verb `dispatch` was too overloaded and confusing. kfuncs are
* renamed to unload the verb.
*
* scx_bpf_dispatch_from_dsq() and friends were added during v6.12 by
* 4c30f5ce4f7a ("sched_ext: Implement scx_bpf_dispatch[_vtime]_from_dsq()").
*/
bool scx_bpf_dsq_move_to_local___new(u64 dsq_id) __ksym __weak;
void scx_bpf_dsq_move_set_slice___new(struct bpf_iter_scx_dsq *it__iter, u64 slice) __ksym __weak;
void scx_bpf_dsq_move_set_vtime___new(struct bpf_iter_scx_dsq *it__iter, u64 vtime) __ksym __weak;
bool scx_bpf_dsq_move___new(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
bool scx_bpf_dsq_move_vtime___new(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
bool scx_bpf_consume___old(u64 dsq_id) __ksym __weak;
void scx_bpf_dispatch_from_dsq_set_slice___old(struct bpf_iter_scx_dsq *it__iter, u64 slice) __ksym __weak;
void scx_bpf_dispatch_from_dsq_set_vtime___old(struct bpf_iter_scx_dsq *it__iter, u64 vtime) __ksym __weak;
bool scx_bpf_dispatch_from_dsq___old(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
bool scx_bpf_dispatch_vtime_from_dsq___old(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
#define scx_bpf_dsq_move_to_local(dsq_id) \
(bpf_ksym_exists(scx_bpf_dsq_move_to_local___new) ? \
scx_bpf_dsq_move_to_local___new((dsq_id)) : \
scx_bpf_consume___old((dsq_id)))
#define scx_bpf_dsq_move_set_slice(it__iter, slice) \
(bpf_ksym_exists(scx_bpf_dsq_move_set_slice___new) ? \
scx_bpf_dsq_move_set_slice___new((it__iter), (slice)) : \
(bpf_ksym_exists(scx_bpf_dispatch_from_dsq_set_slice___old) ? \
scx_bpf_dispatch_from_dsq_set_slice___old((it__iter), (slice)) : \
(void)0))
#define scx_bpf_dsq_move_set_vtime(it__iter, vtime) \
(bpf_ksym_exists(scx_bpf_dsq_move_set_vtime___new) ? \
scx_bpf_dsq_move_set_vtime___new((it__iter), (vtime)) : \
(bpf_ksym_exists(scx_bpf_dispatch_from_dsq_set_vtime___old) ? \
scx_bpf_dispatch_from_dsq_set_vtime___old((it__iter), (vtime)) : \
(void)0))
#define scx_bpf_dsq_move(it__iter, p, dsq_id, enq_flags) \
(bpf_ksym_exists(scx_bpf_dsq_move___new) ? \
scx_bpf_dsq_move___new((it__iter), (p), (dsq_id), (enq_flags)) : \
(bpf_ksym_exists(scx_bpf_dispatch_from_dsq___old) ? \
scx_bpf_dispatch_from_dsq___old((it__iter), (p), (dsq_id), (enq_flags)) : \
false))
#define scx_bpf_dsq_move_vtime(it__iter, p, dsq_id, enq_flags) \
(bpf_ksym_exists(scx_bpf_dsq_move_vtime___new) ? \
scx_bpf_dsq_move_vtime___new((it__iter), (p), (dsq_id), (enq_flags)) : \
(bpf_ksym_exists(scx_bpf_dispatch_vtime_from_dsq___old) ? \
scx_bpf_dispatch_vtime_from_dsq___old((it__iter), (p), (dsq_id), (enq_flags)) : \
false))
/*
* v6.15: 950ad93df2fc ("bpf: add kfunc for populating cpumask bits")
*
* Compat macro will be dropped on v6.19 release.
*/
int bpf_cpumask_populate(struct cpumask *dst, void *src, size_t src__sz) __ksym __weak;
#define __COMPAT_bpf_cpumask_populate(cpumask, src, size__sz) \
(bpf_ksym_exists(bpf_cpumask_populate) ? \
(bpf_cpumask_populate(cpumask, src, size__sz)) : -EOPNOTSUPP)
/*
* v6.19: Introduce lockless peek API for user DSQs.
*
* Preserve the following macro until v6.21.
*/
static inline struct task_struct *__COMPAT_scx_bpf_dsq_peek(u64 dsq_id)
{
struct task_struct *p = NULL;
struct bpf_iter_scx_dsq it;
if (bpf_ksym_exists(scx_bpf_dsq_peek))
return scx_bpf_dsq_peek(dsq_id);
if (!bpf_iter_scx_dsq_new(&it, dsq_id, 0))
p = bpf_iter_scx_dsq_next(&it);
bpf_iter_scx_dsq_destroy(&it);
return p;
}
/**
* __COMPAT_is_enq_cpu_selected - Test if SCX_ENQ_CPU_SELECTED is on
* in a compatible way. We will preserve this __COMPAT helper until v6.16.
*
* @enq_flags: enqueue flags from ops.enqueue()
*
* Return: True if SCX_ENQ_CPU_SELECTED is turned on in @enq_flags
*/
static inline bool __COMPAT_is_enq_cpu_selected(u64 enq_flags)
{
#ifdef HAVE_SCX_ENQ_CPU_SELECTED
/*
* This is the case that a BPF code compiled against vmlinux.h
* where the enum SCX_ENQ_CPU_SELECTED exists.
*/
/*
* We should temporarily suspend the macro expansion of
* 'SCX_ENQ_CPU_SELECTED'. This avoids 'SCX_ENQ_CPU_SELECTED' being
* rewritten to '__SCX_ENQ_CPU_SELECTED' when 'SCX_ENQ_CPU_SELECTED'
* is defined in 'scripts/gen_enums.py'.
*/
#pragma push_macro("SCX_ENQ_CPU_SELECTED")
#undef SCX_ENQ_CPU_SELECTED
u64 flag;
/*
* When the kernel did not have SCX_ENQ_CPU_SELECTED,
* select_task_rq_scx() has never been skipped. Thus, this case
* should be considered that the CPU has already been selected.
*/
if (!bpf_core_enum_value_exists(enum scx_enq_flags,
SCX_ENQ_CPU_SELECTED))
return true;
flag = bpf_core_enum_value(enum scx_enq_flags, SCX_ENQ_CPU_SELECTED);
return enq_flags & flag;
/*
* Once done, resume the macro expansion of 'SCX_ENQ_CPU_SELECTED'.
*/
#pragma pop_macro("SCX_ENQ_CPU_SELECTED")
#else
/*
* This is the case that a BPF code compiled against vmlinux.h
* where the enum SCX_ENQ_CPU_SELECTED does NOT exist.
*/
return true;
#endif /* HAVE_SCX_ENQ_CPU_SELECTED */
}
#define scx_bpf_now() \
(bpf_ksym_exists(scx_bpf_now) ? \
scx_bpf_now() : \
bpf_ktime_get_ns())
/*
* v6.15: Introduce event counters.
*
* Preserve the following macro until v6.17.
*/
#define __COMPAT_scx_bpf_events(events, size) \
(bpf_ksym_exists(scx_bpf_events) ? \
scx_bpf_events(events, size) : ({}))
/*
* v6.15: Introduce NUMA-aware kfuncs to operate with per-node idle
* cpumasks.
*
* Preserve the following __COMPAT_scx_*_node macros until v6.17.
*/
#define __COMPAT_scx_bpf_nr_node_ids() \
(bpf_ksym_exists(scx_bpf_nr_node_ids) ? \
scx_bpf_nr_node_ids() : 1U)
#define __COMPAT_scx_bpf_cpu_node(cpu) \
(bpf_ksym_exists(scx_bpf_cpu_node) ? \
scx_bpf_cpu_node(cpu) : 0)
#define __COMPAT_scx_bpf_get_idle_cpumask_node(node) \
(bpf_ksym_exists(scx_bpf_get_idle_cpumask_node) ? \
scx_bpf_get_idle_cpumask_node(node) : \
scx_bpf_get_idle_cpumask()) \
#define __COMPAT_scx_bpf_get_idle_smtmask_node(node) \
(bpf_ksym_exists(scx_bpf_get_idle_smtmask_node) ? \
scx_bpf_get_idle_smtmask_node(node) : \
scx_bpf_get_idle_smtmask())
#define __COMPAT_scx_bpf_pick_idle_cpu_node(cpus_allowed, node, flags) \
(bpf_ksym_exists(scx_bpf_pick_idle_cpu_node) ? \
scx_bpf_pick_idle_cpu_node(cpus_allowed, node, flags) : \
scx_bpf_pick_idle_cpu(cpus_allowed, flags))
#define __COMPAT_scx_bpf_pick_any_cpu_node(cpus_allowed, node, flags) \
(bpf_ksym_exists(scx_bpf_pick_any_cpu_node) ? \
scx_bpf_pick_any_cpu_node(cpus_allowed, node, flags) : \
scx_bpf_pick_any_cpu(cpus_allowed, flags))
/*
* v6.18: Add a helper to retrieve the current task running on a CPU.
*
* Keep this helper available until v6.20 for compatibility.
*/
static inline struct task_struct *__COMPAT_scx_bpf_cpu_curr(int cpu)
{
struct rq *rq;
if (bpf_ksym_exists(scx_bpf_cpu_curr))
return scx_bpf_cpu_curr(cpu);
rq = scx_bpf_cpu_rq(cpu);
return rq ? rq->curr : NULL;
}
/*
* v6.19: To work around BPF maximum parameter limit, the following kfuncs are
* replaced with variants that pack scalar arguments in a struct. Wrappers are
* provided to maintain source compatibility.
*
* v6.13: scx_bpf_dsq_insert_vtime() renaming is also handled here. See the
* block on dispatch renaming above for more details.
*
* The kernel will carry the compat variants until v6.23 to maintain binary
* compatibility. After v6.23 release, remove the compat handling and move the
* wrappers to common.bpf.h.
*/
s32 scx_bpf_select_cpu_and___compat(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
const struct cpumask *cpus_allowed, u64 flags) __ksym __weak;
void scx_bpf_dispatch_vtime___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) __ksym __weak;
void scx_bpf_dsq_insert_vtime___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) __ksym __weak;
/**
* scx_bpf_select_cpu_and - Pick an idle CPU usable by task @p
* @p: task_struct to select a CPU for
* @prev_cpu: CPU @p was on previously
* @wake_flags: %SCX_WAKE_* flags
* @cpus_allowed: cpumask of allowed CPUs
* @flags: %SCX_PICK_IDLE* flags
*
* Inline wrapper that packs scalar arguments into a struct and calls
* __scx_bpf_select_cpu_and(). See __scx_bpf_select_cpu_and() for details.
*/
static inline s32
scx_bpf_select_cpu_and(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
const struct cpumask *cpus_allowed, u64 flags)
{
if (bpf_core_type_exists(struct scx_bpf_select_cpu_and_args)) {
struct scx_bpf_select_cpu_and_args args = {
.prev_cpu = prev_cpu,
.wake_flags = wake_flags,
.flags = flags,
};
return __scx_bpf_select_cpu_and(p, cpus_allowed, &args);
} else {
return scx_bpf_select_cpu_and___compat(p, prev_cpu, wake_flags,
cpus_allowed, flags);
}
}
/**
* scx_bpf_dsq_insert_vtime - Insert a task into the vtime priority queue of a DSQ
* @p: task_struct to insert
* @dsq_id: DSQ to insert into
* @slice: duration @p can run for in nsecs, 0 to keep the current value
* @vtime: @p's ordering inside the vtime-sorted queue of the target DSQ
* @enq_flags: SCX_ENQ_*
*
* Inline wrapper that packs scalar arguments into a struct and calls
* __scx_bpf_dsq_insert_vtime(). See __scx_bpf_dsq_insert_vtime() for details.
*/
static inline bool
scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime,
u64 enq_flags)
{
if (bpf_core_type_exists(struct scx_bpf_dsq_insert_vtime_args)) {
struct scx_bpf_dsq_insert_vtime_args args = {
.dsq_id = dsq_id,
.slice = slice,
.vtime = vtime,
.enq_flags = enq_flags,
};
return __scx_bpf_dsq_insert_vtime(p, &args);
} else if (bpf_ksym_exists(scx_bpf_dsq_insert_vtime___compat)) {
scx_bpf_dsq_insert_vtime___compat(p, dsq_id, slice, vtime,
enq_flags);
return true;
} else {
scx_bpf_dispatch_vtime___compat(p, dsq_id, slice, vtime,
enq_flags);
return true;
}
}
/*
* v6.19: scx_bpf_dsq_insert() now returns bool instead of void. Move
* scx_bpf_dsq_insert() decl to common.bpf.h and drop compat helper after v6.22.
* The extra ___compat suffix is to work around libbpf not ignoring __SUFFIX on
* kernel side. The entire suffix can be dropped later.
*
* v6.13: scx_bpf_dsq_insert() renaming is also handled here. See the block on
* dispatch renaming above for more details.
*/
bool scx_bpf_dsq_insert___v2___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak;
void scx_bpf_dsq_insert___v1(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak;
void scx_bpf_dispatch___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak;
static inline bool
scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags)
{
if (bpf_ksym_exists(scx_bpf_dsq_insert___v2___compat)) {
return scx_bpf_dsq_insert___v2___compat(p, dsq_id, slice, enq_flags);
} else if (bpf_ksym_exists(scx_bpf_dsq_insert___v1)) {
scx_bpf_dsq_insert___v1(p, dsq_id, slice, enq_flags);
return true;
} else {
scx_bpf_dispatch___compat(p, dsq_id, slice, enq_flags);
return true;
}
}
/*
* v6.19: scx_bpf_task_set_slice() and scx_bpf_task_set_dsq_vtime() added to for
* sub-sched authority checks. Drop the wrappers and move the decls to
* common.bpf.h after v6.22.
*/
bool scx_bpf_task_set_slice___new(struct task_struct *p, u64 slice) __ksym __weak;
bool scx_bpf_task_set_dsq_vtime___new(struct task_struct *p, u64 vtime) __ksym __weak;
static inline void scx_bpf_task_set_slice(struct task_struct *p, u64 slice)
{
if (bpf_ksym_exists(scx_bpf_task_set_slice___new))
scx_bpf_task_set_slice___new(p, slice);
else
p->scx.slice = slice;
}
static inline void scx_bpf_task_set_dsq_vtime(struct task_struct *p, u64 vtime)
{
if (bpf_ksym_exists(scx_bpf_task_set_dsq_vtime___new))
scx_bpf_task_set_dsq_vtime___new(p, vtime);
else
p->scx.dsq_vtime = vtime;
}
/*
* v6.19: The new void variant can be called from anywhere while the older v1
* variant can only be called from ops.cpu_release(). The double ___ prefixes on
* the v2 variant need to be removed once libbpf is updated to ignore ___ prefix
* on kernel side. Drop the wrapper and move the decl to common.bpf.h after
* v6.22.
*/
u32 scx_bpf_reenqueue_local___v1(void) __ksym __weak;
void scx_bpf_reenqueue_local___v2___compat(void) __ksym __weak;
static inline bool __COMPAT_scx_bpf_reenqueue_local_from_anywhere(void)
{
return bpf_ksym_exists(scx_bpf_reenqueue_local___v2___compat);
}
static inline void scx_bpf_reenqueue_local(void)
{
if (__COMPAT_scx_bpf_reenqueue_local_from_anywhere())
scx_bpf_reenqueue_local___v2___compat();
else
scx_bpf_reenqueue_local___v1();
}
/*
* Define sched_ext_ops. This may be expanded to define multiple variants for
* backward compatibility. See compat.h::SCX_OPS_LOAD/ATTACH().
*/
#define SCX_OPS_DEFINE(__name, ...) \
SEC(".struct_ops.link") \
struct sched_ext_ops __name = { \
__VA_ARGS__, \
};
#endif /* __SCX_COMPAT_BPF_H */
|