1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
|
/*
* SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
*/
#ifdef USE_BPF
#include <linux/bpf.h>
#include <linux/filter.h>
#include "arch.h"
#include "bpf.h"
#include "net.h"
#include "random.h"
#include "sanitise.h"
static unsigned long bpf_prog_types[] = {
BPF_PROG_TYPE_UNSPEC,
BPF_PROG_TYPE_SOCKET_FILTER,
BPF_PROG_TYPE_KPROBE,
BPF_PROG_TYPE_SCHED_CLS,
BPF_PROG_TYPE_SCHED_ACT,
BPF_PROG_TYPE_TRACEPOINT,
BPF_PROG_TYPE_XDP,
BPF_PROG_TYPE_PERF_EVENT,
};
static const char license[] = "GPLv2";
static void bpf_prog_load(union bpf_attr *attr)
{
unsigned long *insns = NULL, len = 0;
attr->prog_type = RAND_ARRAY(bpf_prog_types);
switch (attr->prog_type) {
case BPF_PROG_TYPE_SOCKET_FILTER:
bpf_gen_filter(&insns, &len);
break;
default:
// this will go away when all the other cases are enumerated
insns = zmalloc(page_size);
generate_rand_bytes((unsigned char *)insns, len);
break;
}
attr->insn_cnt = len;
attr->insns = (u64) insns;
attr->license = (u64) license;
attr->log_level = 0;
attr->log_size = rnd() % page_size;
attr->log_buf = (u64) get_writable_address(page_size);
attr->kern_version = rnd(); // TODO: stick uname in here.
}
#ifndef BPF_OBJ_PIN
#define BPF_OBJ_PIN 6
#define BPF_OBJ_GET 7
#endif
static void sanitise_bpf(struct syscallrecord *rec)
{
union bpf_attr *attr;
unsigned long bpf_map_types[] = {
BPF_MAP_TYPE_HASH, BPF_MAP_TYPE_ARRAY, BPF_MAP_TYPE_PROG_ARRAY, BPF_MAP_TYPE_PERF_EVENT_ARRAY,
BPF_MAP_TYPE_PERCPU_HASH, BPF_MAP_TYPE_PERCPU_ARRAY, BPF_MAP_TYPE_STACK_TRACE, BPF_MAP_TYPE_CGROUP_ARRAY,
BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH, BPF_MAP_TYPE_LPM_TRIE,
};
attr = zmalloc(sizeof(union bpf_attr));
rec->a2 = (unsigned long) attr;
switch (rec->a1) {
case BPF_MAP_CREATE:
attr->map_type = RAND_ARRAY(bpf_map_types);
attr->key_size = rnd() % 1024;
attr->value_size = rnd() % (1024 * 64);
attr->max_entries = rnd() % 1024;
attr->flags = RAND_RANGE(0, 4);
rec->a3 = 20;
break;
case BPF_MAP_LOOKUP_ELEM:
attr->map_fd = get_rand_bpf_fd();
attr->key = RAND_RANGE(0, 10);
attr->value = rnd();
rec->a3 = 32;
break;
case BPF_MAP_UPDATE_ELEM:
attr->map_fd = get_rand_bpf_fd();
attr->key = RAND_RANGE(0, 10);
attr->value = rnd();
attr->next_key = rnd();
attr->flags = RAND_RANGE(0, 4);
rec->a3 = 32;
break;
case BPF_MAP_DELETE_ELEM:
attr->map_fd = get_rand_bpf_fd();
attr->key = RAND_RANGE(0, 10);
rec->a3 = 32;
break;
case BPF_MAP_GET_NEXT_KEY:
attr->map_fd = get_rand_bpf_fd();
attr->key = RAND_RANGE(0, 10);
attr->value = rnd();
rec->a3 = 32;
break;
case BPF_OBJ_PIN:
case BPF_OBJ_GET:
attr->map_fd = get_rand_bpf_fd();
rec->a3 = 32;
break;
case BPF_PROG_LOAD:
bpf_prog_load(attr);
rec->a3 = 48;
break;
default:
break;
}
}
static void post_bpf(struct syscallrecord *rec)
{
union bpf_attr *attr = (union bpf_attr *) rec->a2;
switch (rec->a1) {
case BPF_MAP_CREATE:
//TODO: add fd to local object cache
break;
case BPF_PROG_LOAD:
//TODO: add fd to local object cache
if (attr->prog_type == BPF_PROG_TYPE_SOCKET_FILTER) {
void *ptr = (void *) attr->insns;
free(ptr);
}
break;
default:
break;
}
freeptr(&rec->a2);
}
static unsigned long bpf_cmds[] = {
BPF_MAP_CREATE, BPF_MAP_LOOKUP_ELEM, BPF_MAP_UPDATE_ELEM, BPF_MAP_DELETE_ELEM,
BPF_MAP_GET_NEXT_KEY, BPF_PROG_LOAD, BPF_OBJ_PIN, BPF_OBJ_GET,
};
struct syscallentry syscall_bpf = {
.name = "bpf",
.num_args = 3,
.arg1name = "cmd",
.arg1type = ARG_OP,
.arg1list = ARGLIST(bpf_cmds),
.arg2name = "uattr",
.arg3name = "size",
.sanitise = sanitise_bpf,
.post = post_bpf,
};
#endif
|