1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
|
/*
Copyright (C) 2021 The Falco Authors.
This file is dual licensed under either the MIT or GPL 2. See MIT.txt
or GPL2.txt for full copies of the license.
*/
#ifndef __TYPES_H
#define __TYPES_H
#ifdef __KERNEL__
#define __bpf_section(NAME) __attribute__((section(NAME), used))
#ifndef __always_inline
#define __always_inline inline __attribute__((always_inline))
#endif
#ifdef BPF_SUPPORTS_RAW_TRACEPOINTS
#define TP_NAME "raw_tracepoint/"
#else
#define TP_NAME "tracepoint/"
#endif
#ifdef BPF_SUPPORTS_RAW_TRACEPOINTS
struct sys_enter_args {
unsigned long regs;
unsigned long id;
};
#else
struct sys_enter_args {
__u64 pad;
long id;
unsigned long args[6];
};
#endif
#ifdef BPF_SUPPORTS_RAW_TRACEPOINTS
struct sys_exit_args {
unsigned long regs;
unsigned long ret;
};
#else
struct sys_exit_args {
__u64 pad;
long id;
long ret;
};
#endif
#ifdef BPF_SUPPORTS_RAW_TRACEPOINTS
struct sched_process_exit_args {
unsigned long p;
};
#else
struct sched_process_exit_args {
__u64 pad;
char comm[16];
pid_t pid;
int prio;
};
#endif
#ifdef BPF_SUPPORTS_RAW_TRACEPOINTS
struct sched_switch_args {
unsigned long preempt;
unsigned long prev;
unsigned long next;
};
#else
struct sched_switch_args {
__u64 pad;
char prev_comm[TASK_COMM_LEN];
pid_t prev_pid;
int prev_prio;
long prev_state;
char next_comm[TASK_COMM_LEN];
pid_t next_pid;
int next_prio;
};
#endif
#ifndef BPF_SUPPORTS_RAW_TRACEPOINTS
struct sched_process_fork_args {
__u64 pad;
char parent_comm[TASK_COMM_LEN];
pid_t parent_pid;
char child_comm[TASK_COMM_LEN];
pid_t child_pid;
};
#endif
#ifdef BPF_SUPPORTS_RAW_TRACEPOINTS
struct page_fault_args {
unsigned long address;
unsigned long regs;
unsigned long error_code;
};
#else
struct page_fault_args {
__u64 pad;
unsigned long address;
unsigned long ip;
unsigned long error_code;
};
#endif
#ifdef BPF_SUPPORTS_RAW_TRACEPOINTS
struct signal_deliver_args {
unsigned long sig;
unsigned long info;
unsigned long ka;
};
#else
struct signal_deliver_args {
__u64 pad;
int sig;
int errno;
int code;
unsigned long sa_handler;
unsigned long sa_flags;
};
#endif
#ifndef BPF_SUPPORTS_RAW_TRACEPOINTS
struct sys_stash_args {
unsigned long args[6];
};
#endif
struct filler_data {
void *ctx;
struct scap_bpf_settings *settings;
struct scap_bpf_per_cpu_state *state;
char *tmp_scratch;
const struct ppm_event_info *evt;
const struct ppm_event_entry *filler_info;
bool curarg_already_on_frame;
char *buf;
#ifndef BPF_SUPPORTS_RAW_TRACEPOINTS
unsigned long *args;
#endif
int fd;
};
struct perf_event_header {
__u32 type;
__u16 misc;
__u16 size;
};
struct perf_event_sample {
struct perf_event_header header;
__u32 size;
char data[];
};
/*
* Unfortunately the entire perf event length must fit in u16
*/
#define PERF_EVENT_MAX_SIZE (0xffff - sizeof(struct perf_event_sample))
/*
* Due to the way the verifier works with accessing variable memory,
* the scratch size needs to be at least 2^N > PERF_EVENT_MAX_SIZE * 2
*/
#define SCRATCH_SIZE (1 << 18)
#define SCRATCH_SIZE_MAX (SCRATCH_SIZE - 1)
#define SCRATCH_SIZE_HALF (SCRATCH_SIZE_MAX >> 1)
#endif /* __KERNEL__ */
enum scap_map_types {
SCAP_PERF_MAP = 0,
SCAP_TAIL_MAP = 1,
SCAP_SYSCALL_CODE_ROUTING_TABLE = 2,
SCAP_SYSCALL_TABLE = 3,
SCAP_EVENT_INFO_TABLE = 4,
SCAP_FILLERS_TABLE = 5,
SCAP_FRAME_SCRATCH_MAP = 6,
SCAP_TMP_SCRATCH_MAP = 7,
SCAP_SETTINGS_MAP = 8,
SCAP_LOCAL_STATE_MAP = 9,
#ifndef BPF_SUPPORTS_RAW_TRACEPOINTS
SCAP_STASH_MAP = 10,
#endif
};
struct scap_bpf_settings {
uint64_t boot_time;
void *socket_file_ops;
uint32_t snaplen;
uint32_t sampling_ratio;
bool capture_enabled;
bool do_dynamic_snaplen;
bool page_faults;
bool dropping_mode;
bool is_dropping;
bool tracers_enabled;
uint16_t fullcapture_port_range_start;
uint16_t fullcapture_port_range_end;
uint16_t statsd_port;
} __attribute__((packed));
struct tail_context {
enum ppm_event_type evt_type;
unsigned long long ts;
unsigned long curarg;
unsigned long curoff;
unsigned long len;
int prev_res;
} __attribute__((packed));
struct scap_bpf_per_cpu_state {
struct tail_context tail_ctx;
unsigned long long n_evts;
unsigned long long n_drops_buffer;
unsigned long long n_drops_scratch_map;
unsigned long long n_drops_pf;
unsigned long long n_drops_bug;
unsigned int hotplug_cpu;
bool in_use;
} __attribute__((packed));
#endif
|