1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
|
#include <linux/list.h>
#include <linux/jhash.h>
#include "stp_helper_lock.h"
// When handling mmap()/munmap()/mprotect() syscall tracing to notice
// memory map changes, we need to cache syscall entry parameter values
// for processing at syscall exit.
// __stp_tf_map_lock protects the hash table.
// Documentation/spinlocks.txt suggest we can be a bit more clever
// if we guarantee that in interrupt context we only read, not write
// the datastructures. We should never change the hash table or the
// contents in interrupt context (which should only ever call
// stap_find_map_map_info for getting stored info). So we might
// want to look into that if this seems a bottleneck.
static STP_DEFINE_RWLOCK(__stp_tf_map_lock);
#define __STP_TF_HASH_BITS 4
#define __STP_TF_TABLE_SIZE (1 << __STP_TF_HASH_BITS)
#ifndef TASK_FINDER_MAP_ENTRY_ITEMS
#define TASK_FINDER_MAP_ENTRY_ITEMS 100
#endif
struct __stp_tf_map_entry {
/* private: */
struct hlist_node hlist;
int usage;
/* public: */
pid_t pid;
long syscall_no;
unsigned long arg0;
unsigned long arg1;
unsigned long arg2;
};
static struct __stp_tf_map_entry
__stp_tf_map_free_list_items[TASK_FINDER_MAP_ENTRY_ITEMS];
static struct hlist_head __stp_tf_map_free_list[1];
static struct hlist_head __stp_tf_map_table[__STP_TF_TABLE_SIZE];
// __stp_tf_map_initialize(): Initialize the free list. Grabs the
// lock.
static void
__stp_tf_map_initialize(void)
{
int i;
struct hlist_head *head = &__stp_tf_map_free_list[0];
unsigned long flags;
stp_write_lock_irqsave(&__stp_tf_map_lock, flags);
for (i = 0; i < TASK_FINDER_MAP_ENTRY_ITEMS; i++) {
hlist_add_head(&__stp_tf_map_free_list_items[i].hlist, head);
}
stp_write_unlock_irqrestore(&__stp_tf_map_lock, flags);
}
// __stp_tf_map_get_free_entry(): Returns an entry from the free list
// or NULL. The __stp_tf_map_lock must be write locked before calling this
// function.
static struct __stp_tf_map_entry *
__stp_tf_map_get_free_entry(void)
{
struct hlist_head *head = &__stp_tf_map_free_list[0];
struct hlist_node *node;
struct __stp_tf_map_entry *entry = NULL;
if (hlist_empty(head))
return NULL;
stap_hlist_for_each_entry(entry, node, head, hlist) {
break;
}
if (entry != NULL)
hlist_del(&entry->hlist);
return entry;
}
// __stp_tf_map_put_free_entry(): Puts an entry back on the free
// list. The __stp_tf_map_lock must be write locked before calling this
// function.
static void
__stp_tf_map_put_free_entry(struct __stp_tf_map_entry *entry)
{
struct hlist_head *head = &__stp_tf_map_free_list[0];
hlist_add_head(&entry->hlist, head);
}
// __stp_tf_map_hash(): Compute the map hash.
static inline u32
__stp_tf_map_hash(struct task_struct *tsk)
{
return (jhash_1word(tsk->pid, 0) & (__STP_TF_TABLE_SIZE - 1));
}
// Get map_entry if the map is present in the map hash table.
// Returns NULL if not present. Takes a read lock on __stp_tf_map_lock.
static struct __stp_tf_map_entry *
__stp_tf_get_map_entry(struct task_struct *tsk)
{
struct hlist_head *head;
struct hlist_node *node;
struct __stp_tf_map_entry *entry;
unsigned long flags;
stp_read_lock_irqsave(&__stp_tf_map_lock, flags);
head = &__stp_tf_map_table[__stp_tf_map_hash(tsk)];
stap_hlist_for_each_entry(entry, node, head, hlist) {
if (tsk->pid == entry->pid) {
stp_read_unlock_irqrestore(&__stp_tf_map_lock, flags);
return entry;
}
}
stp_read_unlock_irqrestore(&__stp_tf_map_lock, flags);
return NULL;
}
// Add the map info to the map hash table. Takes a write lock on
// __stp_tf_map_lock.
static int
__stp_tf_add_map(struct task_struct *tsk, long syscall_no, unsigned long arg0,
unsigned long arg1, unsigned long arg2)
{
struct hlist_head *head;
struct hlist_node *node;
struct __stp_tf_map_entry *entry;
unsigned long flags;
stp_write_lock_irqsave(&__stp_tf_map_lock, flags);
head = &__stp_tf_map_table[__stp_tf_map_hash(tsk)];
stap_hlist_for_each_entry(entry, node, head, hlist) {
// If we find an existing entry, just increment the
// usage count.
if (tsk->pid == entry->pid) {
entry->usage++;
stp_write_unlock_irqrestore(&__stp_tf_map_lock, flags);
return 0;
}
}
// Get an element from the free list.
entry = __stp_tf_map_get_free_entry();
if (!entry) {
stp_write_unlock_irqrestore(&__stp_tf_map_lock, flags);
return -ENOMEM;
}
entry->usage = 1;
entry->pid = tsk->pid;
entry->syscall_no = syscall_no;
entry->arg0 = arg0;
entry->arg1 = arg1;
entry->arg2 = arg2;
hlist_add_head(&entry->hlist, head);
stp_write_unlock_irqrestore(&__stp_tf_map_lock, flags);
return 0;
}
// Remove the map entry from the map hash table. Takes a write lock on
// __stp_tf_map_lock.
static int
__stp_tf_remove_map_entry(struct __stp_tf_map_entry *entry)
{
struct hlist_head *head;
struct hlist_node *node;
int found = 0;
if (entry != NULL) {
unsigned long flags;
stp_write_lock_irqsave(&__stp_tf_map_lock, flags);
// Decrement the usage count.
entry->usage--;
// If the entry is unused, put it back on the free
// list.
if (entry->usage == 0) {
hlist_del(&entry->hlist);
__stp_tf_map_put_free_entry(entry);
}
stp_write_unlock_irqrestore(&__stp_tf_map_lock, flags);
}
return 0;
}
|