File: stp_task_work.c

package info (click to toggle)
systemtap 5.1-5
  • links: PTS, VCS
  • area: main
  • in suites: sid, trixie
  • size: 47,964 kB
  • sloc: cpp: 80,838; ansic: 54,757; xml: 49,725; exp: 43,665; sh: 11,527; python: 5,003; perl: 2,252; tcl: 1,312; makefile: 1,006; javascript: 149; lisp: 105; awk: 101; asm: 91; java: 70; sed: 16
file content (136 lines) | stat: -rw-r--r-- 3,999 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
#ifndef _STP_TASK_WORK_C
#define _STP_TASK_WORK_C

#include "linux/task_work_compatibility.h"

// Handle kernel commit 68cbd415dd4b9c5b9df69f0f091879e56bf5907a
// task_work: s/task_work_cancel()/task_work_cancel_func()/
#if defined(STAPCONF_TASK_WORK_CANCEL_FUNC)
#define TASK_WORK_CANCEL_FN task_work_cancel_func
#else
#define TASK_WORK_CANCEL_FN task_work_cancel
#endif

#define STRINGIFY(x) #x
#define TOSTRING(x) STRINGIFY(x)

#if !defined(STAPCONF_TASK_WORK_ADD_EXPORTED)
// First typedef from the original decls, then #define as typecasted calls.
typedef typeof(&task_work_add) task_work_add_fn;
#define task_work_add(a,b,c) ibt_wrapper(int, (* (task_work_add_fn)kallsyms_task_work_add)((a), (b), (c)))
#endif
#if !defined(STAPCONF_TASK_WORK_CANCEL_EXPORTED)
typedef typeof(&TASK_WORK_CANCEL_FN) task_work_cancel_fn;
#define task_work_cancel(a,b) ibt_wrapper(struct callback_head *, (* (task_work_cancel_fn)kallsyms_task_work_cancel_fn)((a), (b)))
#endif

/* To avoid a crash when a task_work callback gets called after the
 * module is unloaded, keep track of the number of current callbacks. */
static atomic_t stp_task_work_callbacks = ATOMIC_INIT(0);
static DECLARE_WAIT_QUEUE_HEAD(stp_task_work_waitq);

/*
 * stp_task_work_init() should be called before any other
 * stp_task_work_* functions are called to do setup.
 */
static int
stp_task_work_init(void)
{
#if !defined(STAPCONF_TASK_WORK_ADD_EXPORTED)
	/* The task_work_add()/task_work_cancel() functions aren't
	 * exported. Look up those function addresses. */
        kallsyms_task_work_add = (void *)kallsyms_lookup_name("task_work_add");
        if (kallsyms_task_work_add == NULL) {
		_stp_error("Can't resolve task_work_add!");
		return -ENOENT;
        }
#endif
#if !defined(STAPCONF_TASK_WORK_CANCEL_EXPORTED)
        kallsyms_task_work_cancel_fn = (void *)kallsyms_lookup_name(TOSTRING(TASK_WORK_CANCEL_FN));
        if (kallsyms_task_work_cancel_fn == NULL) {
                _stp_error("Can't resolve %s!", TOSTRING(TASK_WORK_CANCEL_FN));
		return -ENOENT;
        }
#endif
	return 0;
}

/*
 * stap_task_work_exit() should be called when no more
 * stp_task_work_* functions will be called (before module exit).
 *
 * This function makes sure that all the callbacks are finished before
 * letting the module unload.  If the module unloads before a callback
 * is called, the kernel will try to make a function call to an
 * invalid address.
 */
static void
stp_task_work_exit(void)
{
	wait_event(stp_task_work_waitq, !atomic_read(&stp_task_work_callbacks));
}

static void
stp_task_work_get(void)
{
	/*
	 * We use atomic_inc_return() here instead of atomic_inc() because
	 * atomic_inc_return() implies a full memory barrier and we need the
	 * updates to stp_task_work_callbacks to be ordered correctly, otherwise
	 * there could still be a task worker active after stp_task_work_exit()
	 * returns (assuming that no task workers are added *after*
	 * stp_task_work_exit() returns).
	 */
	atomic_inc_return(&stp_task_work_callbacks);
}

static void
stp_task_work_put(void)
{
	if (atomic_dec_and_test(&stp_task_work_callbacks))
		wake_up(&stp_task_work_waitq);
}

/*
 * Our task_work_add() wrapper that remembers that we've got a pending
 * callback.
 */
static int
stp_task_work_add(struct task_struct *task, struct task_work *twork)
{
	int rc;

	rc = task_work_add(task, twork, TWA_RESUME);
	if (rc == 0)
		stp_task_work_get();
	return rc;
}

/*
 * Our task_work_cancel() wrapper that remembers that a callback has
 * been cancelled.
 */
static struct task_work *
stp_task_work_cancel(struct task_struct *task, task_work_func_t func)
{
	struct task_work *twork;

	twork = task_work_cancel(task, func);
	if (twork != NULL)
		stp_task_work_put();
	return twork;
}

/*
 * stp_task_work_func_done() should be called at the very end of a
 * task_work callback function so that we can keep up with callback
 * accounting.
 */
static void
stp_task_work_func_done(void)
{
	stp_task_work_put();
}


#endif /* _STP_TASK_WORK_C */