1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
|
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
#include <test_progs.h>
#include "uretprobe_stack.skel.h"
#include "../sdt.h"
/* We set up target_1() -> target_2() -> target_3() -> target_4() -> USDT()
* call chain, each being traced by our BPF program. On entry or return from
* each target_*() we are capturing user stack trace and recording it in
* global variable, so that user space part of the test can validate it.
*
* Note, we put each target function into a custom section to get those
* __start_XXX/__stop_XXX symbols, generated by linker for us, which allow us
* to know address range of those functions
*/
__attribute__((section("uprobe__target_4")))
__weak int target_4(void)
{
STAP_PROBE1(uretprobe_stack, target, 42);
return 42;
}
extern const void *__start_uprobe__target_4;
extern const void *__stop_uprobe__target_4;
__attribute__((section("uprobe__target_3")))
__weak int target_3(void)
{
return target_4();
}
extern const void *__start_uprobe__target_3;
extern const void *__stop_uprobe__target_3;
__attribute__((section("uprobe__target_2")))
__weak int target_2(void)
{
return target_3();
}
extern const void *__start_uprobe__target_2;
extern const void *__stop_uprobe__target_2;
__attribute__((section("uprobe__target_1")))
__weak int target_1(int depth)
{
if (depth < 1)
return 1 + target_1(depth + 1);
else
return target_2();
}
extern const void *__start_uprobe__target_1;
extern const void *__stop_uprobe__target_1;
extern const void *__start_uretprobe_stack_sec;
extern const void *__stop_uretprobe_stack_sec;
struct range {
long start;
long stop;
};
static struct range targets[] = {
{}, /* we want target_1 to map to target[1], so need 1-based indexing */
{ (long)&__start_uprobe__target_1, (long)&__stop_uprobe__target_1 },
{ (long)&__start_uprobe__target_2, (long)&__stop_uprobe__target_2 },
{ (long)&__start_uprobe__target_3, (long)&__stop_uprobe__target_3 },
{ (long)&__start_uprobe__target_4, (long)&__stop_uprobe__target_4 },
};
static struct range caller = {
(long)&__start_uretprobe_stack_sec,
(long)&__stop_uretprobe_stack_sec,
};
static void validate_stack(__u64 *ips, int stack_len, int cnt, ...)
{
int i, j;
va_list args;
if (!ASSERT_GT(stack_len, 0, "stack_len"))
return;
stack_len /= 8;
/* check if we have enough entries to satisfy test expectations */
if (!ASSERT_GE(stack_len, cnt, "stack_len2"))
return;
if (env.verbosity >= VERBOSE_NORMAL) {
printf("caller: %#lx - %#lx\n", caller.start, caller.stop);
for (i = 1; i < ARRAY_SIZE(targets); i++)
printf("target_%d: %#lx - %#lx\n", i, targets[i].start, targets[i].stop);
for (i = 0; i < stack_len; i++) {
for (j = 1; j < ARRAY_SIZE(targets); j++) {
if (ips[i] >= targets[j].start && ips[i] < targets[j].stop)
break;
}
if (j < ARRAY_SIZE(targets)) { /* found target match */
printf("ENTRY #%d: %#lx (in target_%d)\n", i, (long)ips[i], j);
} else if (ips[i] >= caller.start && ips[i] < caller.stop) {
printf("ENTRY #%d: %#lx (in caller)\n", i, (long)ips[i]);
} else {
printf("ENTRY #%d: %#lx\n", i, (long)ips[i]);
}
}
}
va_start(args, cnt);
for (i = cnt - 1; i >= 0; i--) {
/* most recent entry is the deepest target function */
const struct range *t = va_arg(args, const struct range *);
ASSERT_GE(ips[i], t->start, "addr_start");
ASSERT_LT(ips[i], t->stop, "addr_stop");
}
va_end(args);
}
/* __weak prevents inlining */
__attribute__((section("uretprobe_stack_sec")))
__weak void test_uretprobe_stack(void)
{
LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
struct uretprobe_stack *skel;
int err;
skel = uretprobe_stack__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
err = uretprobe_stack__attach(skel);
if (!ASSERT_OK(err, "skel_attach"))
goto cleanup;
/* trigger */
ASSERT_EQ(target_1(0), 42 + 1, "trigger_return");
/*
* Stacks captured on ENTRY uprobes
*/
/* (uprobe 1) target_1 in stack trace*/
validate_stack(skel->bss->entry_stack1, skel->bss->entry1_len,
2, &caller, &targets[1]);
/* (uprobe 1, recursed) */
validate_stack(skel->bss->entry_stack1_recur, skel->bss->entry1_recur_len,
3, &caller, &targets[1], &targets[1]);
/* (uprobe 2) caller -> target_1 -> target_1 -> target_2 */
validate_stack(skel->bss->entry_stack2, skel->bss->entry2_len,
4, &caller, &targets[1], &targets[1], &targets[2]);
/* (uprobe 3) */
validate_stack(skel->bss->entry_stack3, skel->bss->entry3_len,
5, &caller, &targets[1], &targets[1], &targets[2], &targets[3]);
/* (uprobe 4) caller -> target_1 -> target_1 -> target_2 -> target_3 -> target_4 */
validate_stack(skel->bss->entry_stack4, skel->bss->entry4_len,
6, &caller, &targets[1], &targets[1], &targets[2], &targets[3], &targets[4]);
/* (USDT): full caller -> target_1 -> target_1 -> target_2 (uretprobed)
* -> target_3 -> target_4 (uretprobes) chain
*/
validate_stack(skel->bss->usdt_stack, skel->bss->usdt_len,
6, &caller, &targets[1], &targets[1], &targets[2], &targets[3], &targets[4]);
/*
* Now stacks captured on the way out in EXIT uprobes
*/
/* (uretprobe 4) everything up to target_4, but excluding it */
validate_stack(skel->bss->exit_stack4, skel->bss->exit4_len,
5, &caller, &targets[1], &targets[1], &targets[2], &targets[3]);
/* we didn't install uretprobes on target_2 and target_3 */
/* (uretprobe 1, recur) first target_1 call only */
validate_stack(skel->bss->exit_stack1_recur, skel->bss->exit1_recur_len,
2, &caller, &targets[1]);
/* (uretprobe 1) just a caller in the stack trace */
validate_stack(skel->bss->exit_stack1, skel->bss->exit1_len,
1, &caller);
cleanup:
uretprobe_stack__destroy(skel);
}
|