1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
  
     | 
    
      #ifndef _STP_TASK_WORK_C
#define _STP_TASK_WORK_C
#include "linux/task_work_compatibility.h"
#if !defined(STAPCONF_TASK_WORK_ADD_EXPORTED)
// First typedef from the original decls, then #define as typecasted calls.
typedef typeof(&task_work_add) task_work_add_fn;
#define task_work_add (* (task_work_add_fn)kallsyms_task_work_add)
typedef typeof(&task_work_cancel) task_work_cancel_fn;
#define task_work_cancel (* (task_work_cancel_fn)kallsyms_task_work_cancel)
#endif
/* To avoid a crash when a task_work callback gets called after the
 * module is unloaded, keep track of the number of current callbacks. */
static atomic_t stp_task_work_callbacks = ATOMIC_INIT(0);
/*
 * stp_task_work_init() should be called before any other
 * stp_task_work_* functions are called to do setup.
 */
static int
stp_task_work_init(void)
{
#if !defined(STAPCONF_TASK_WORK_ADD_EXPORTED)
	/* The task_work_add()/task_work_cancel() functions aren't
	 * exported. Look up those function addresses. */
        kallsyms_task_work_add = (void *)kallsyms_lookup_name("task_work_add");
        if (kallsyms_task_work_add == NULL) {
		_stp_error("Can't resolve task_work_add!");
		return -ENOENT;
        }
        kallsyms_task_work_cancel = (void *)kallsyms_lookup_name("task_work_cancel");
        if (kallsyms_task_work_cancel == NULL) {
		_stp_error("Can't resolve task_work_cancel!");
		return -ENOENT;
        }
#endif
	return 0;
}
/*
 * stap_task_work_exit() should be called when no more
 * stp_task_work_* functions will be called (before module exit).
 *
 * This function makes sure that all the callbacks are finished before
 * letting the module unload.  If the module unloads before a callback
 * is called, the kernel will try to make a function call to an
 * invalid address.
 */
static void
stp_task_work_exit(void)
{
	while (atomic_read(&stp_task_work_callbacks))
		schedule_timeout_uninterruptible(1);
	return;
}
/*
 * Our task_work_add() wrapper that remembers that we've got a pending
 * callback.
 */
static int
stp_task_work_add(struct task_struct *task, struct task_work *twork)
{
	int rc;
	rc = task_work_add(task, twork, true);
	if (rc == 0)
		atomic_inc(&stp_task_work_callbacks);	
	return rc;
}
/*
 * Our task_work_cancel() wrapper that remembers that a callback has
 * been cancelled.
 */
static struct task_work *
stp_task_work_cancel(struct task_struct *task, task_work_func_t func)
{
	struct task_work *twork;
	twork = task_work_cancel(task, func);
	if (twork != NULL)
		atomic_dec(&stp_task_work_callbacks);
	return twork;
}
/*
 * stp_task_work_func_done() should be called at the very end of a
 * task_work callback function so that we can keep up with callback
 * accounting.
 */
static void
stp_task_work_func_done(void)
{
	atomic_dec(&stp_task_work_callbacks);
}
#endif /* _STP_TASK_WORK_C */
 
     |