1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
|
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2023 SUSE LLC */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "../../../include/linux/filter.h"
#include "bpf_misc.h"
SEC("?raw_tp")
__success __log_level(2)
__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10")
__msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0xfffffff8 goto pc+2")
__msg("mark_precise: frame0: regs=r2 stack= before 1: (87) r2 = -r2")
__msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 8")
__naked int bpf_neg(void)
{
asm volatile (
"r2 = 8;"
"r2 = -r2;"
"if r2 != -8 goto 1f;"
"r1 = r10;"
"r1 += r2;"
"1:"
"r0 = 0;"
"exit;"
::: __clobber_all);
}
SEC("?raw_tp")
__success __log_level(2)
__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10")
__msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0x0 goto pc+2")
__msg("mark_precise: frame0: regs=r2 stack= before 1: (d4) r2 = le16 r2")
__msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 0")
__naked int bpf_end_to_le(void)
{
asm volatile (
"r2 = 0;"
"r2 = le16 r2;"
"if r2 != 0 goto 1f;"
"r1 = r10;"
"r1 += r2;"
"1:"
"r0 = 0;"
"exit;"
::: __clobber_all);
}
SEC("?raw_tp")
__success __log_level(2)
__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10")
__msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0x0 goto pc+2")
__msg("mark_precise: frame0: regs=r2 stack= before 1: (dc) r2 = be16 r2")
__msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 0")
__naked int bpf_end_to_be(void)
{
asm volatile (
"r2 = 0;"
"r2 = be16 r2;"
"if r2 != 0 goto 1f;"
"r1 = r10;"
"r1 += r2;"
"1:"
"r0 = 0;"
"exit;"
::: __clobber_all);
}
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390)) && \
__clang_major__ >= 18
SEC("?raw_tp")
__success __log_level(2)
__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10")
__msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0x0 goto pc+2")
__msg("mark_precise: frame0: regs=r2 stack= before 1: (d7) r2 = bswap16 r2")
__msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 0")
__naked int bpf_end_bswap(void)
{
asm volatile (
"r2 = 0;"
"r2 = bswap16 r2;"
"if r2 != 0 goto 1f;"
"r1 = r10;"
"r1 += r2;"
"1:"
"r0 = 0;"
"exit;"
::: __clobber_all);
}
#ifdef CAN_USE_LOAD_ACQ_STORE_REL
SEC("?raw_tp")
__success __log_level(2)
__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r3 = r10")
__msg("mark_precise: frame0: regs=r2 stack= before 2: (db) r2 = load_acquire((u64 *)(r10 -8))")
__msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1")
__msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
__naked int bpf_load_acquire(void)
{
asm volatile (
"r1 = 8;"
"*(u64 *)(r10 - 8) = r1;"
".8byte %[load_acquire_insn];" /* r2 = load_acquire((u64 *)(r10 - 8)); */
"r3 = r10;"
"r3 += r2;" /* mark_precise */
"r0 = 0;"
"exit;"
:
: __imm_insn(load_acquire_insn,
BPF_ATOMIC_OP(BPF_DW, BPF_LOAD_ACQ, BPF_REG_2, BPF_REG_10, -8))
: __clobber_all);
}
SEC("?raw_tp")
__success __log_level(2)
__msg("mark_precise: frame0: regs=r1 stack= before 3: (bf) r2 = r10")
__msg("mark_precise: frame0: regs=r1 stack= before 2: (79) r1 = *(u64 *)(r10 -8)")
__msg("mark_precise: frame0: regs= stack=-8 before 1: (db) store_release((u64 *)(r10 -8), r1)")
__msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
__naked int bpf_store_release(void)
{
asm volatile (
"r1 = 8;"
".8byte %[store_release_insn];" /* store_release((u64 *)(r10 - 8), r1); */
"r1 = *(u64 *)(r10 - 8);"
"r2 = r10;"
"r2 += r1;" /* mark_precise */
"r0 = 0;"
"exit;"
:
: __imm_insn(store_release_insn,
BPF_ATOMIC_OP(BPF_DW, BPF_STORE_REL, BPF_REG_10, BPF_REG_1, -8))
: __clobber_all);
}
#endif /* CAN_USE_LOAD_ACQ_STORE_REL */
#endif /* v4 instruction */
SEC("?raw_tp")
__success __log_level(2)
/*
* Without the bug fix there will be no history between "last_idx 3 first_idx 3"
* and "parent state regs=" lines. "R0_w=6" parts are here to help anchor
* expected log messages to the one specific mark_chain_precision operation.
*
* This is quite fragile: if verifier checkpointing heuristic changes, this
* might need adjusting.
*/
__msg("2: (07) r0 += 1 ; R0_w=6")
__msg("3: (35) if r0 >= 0xa goto pc+1")
__msg("mark_precise: frame0: last_idx 3 first_idx 3 subseq_idx -1")
__msg("mark_precise: frame0: regs=r0 stack= before 2: (07) r0 += 1")
__msg("mark_precise: frame0: regs=r0 stack= before 1: (07) r0 += 1")
__msg("mark_precise: frame0: regs=r0 stack= before 4: (05) goto pc-4")
__msg("mark_precise: frame0: regs=r0 stack= before 3: (35) if r0 >= 0xa goto pc+1")
__msg("mark_precise: frame0: parent state regs= stack=: R0_rw=P4")
__msg("3: R0_w=6")
__naked int state_loop_first_last_equal(void)
{
asm volatile (
"r0 = 0;"
"l0_%=:"
"r0 += 1;"
"r0 += 1;"
/* every few iterations we'll have a checkpoint here with
* first_idx == last_idx, potentially confusing precision
* backtracking logic
*/
"if r0 >= 10 goto l1_%=;" /* checkpoint + mark_precise */
"goto l0_%=;"
"l1_%=:"
"exit;"
::: __clobber_common
);
}
__used __naked static void __bpf_cond_op_r10(void)
{
asm volatile (
"r2 = 2314885393468386424 ll;"
"goto +0;"
"if r2 <= r10 goto +3;"
"if r1 >= -1835016 goto +0;"
"if r2 <= 8 goto +0;"
"if r3 <= 0 goto +0;"
"exit;"
::: __clobber_all);
}
SEC("?raw_tp")
__success __log_level(2)
__msg("8: (bd) if r2 <= r10 goto pc+3")
__msg("9: (35) if r1 >= 0xffe3fff8 goto pc+0")
__msg("10: (b5) if r2 <= 0x8 goto pc+0")
__msg("mark_precise: frame1: last_idx 10 first_idx 0 subseq_idx -1")
__msg("mark_precise: frame1: regs=r2 stack= before 9: (35) if r1 >= 0xffe3fff8 goto pc+0")
__msg("mark_precise: frame1: regs=r2 stack= before 8: (bd) if r2 <= r10 goto pc+3")
__msg("mark_precise: frame1: regs=r2 stack= before 7: (05) goto pc+0")
__naked void bpf_cond_op_r10(void)
{
asm volatile (
"r3 = 0 ll;"
"call __bpf_cond_op_r10;"
"r0 = 0;"
"exit;"
::: __clobber_all);
}
SEC("?raw_tp")
__success __log_level(2)
__msg("3: (bf) r3 = r10")
__msg("4: (bd) if r3 <= r2 goto pc+1")
__msg("5: (b5) if r2 <= 0x8 goto pc+2")
__msg("mark_precise: frame0: last_idx 5 first_idx 0 subseq_idx -1")
__msg("mark_precise: frame0: regs=r2 stack= before 4: (bd) if r3 <= r2 goto pc+1")
__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r3 = r10")
__naked void bpf_cond_op_not_r10(void)
{
asm volatile (
"r0 = 0;"
"r2 = 2314885393468386424 ll;"
"r3 = r10;"
"if r3 <= r2 goto +1;"
"if r2 <= 8 goto +2;"
"r0 = 2 ll;"
"exit;"
::: __clobber_all);
}
char _license[] SEC("license") = "GPL";
|