File: smplock.h

package info (click to toggle)
kernel-source-2.4.14 2.4.14-1
  • links: PTS
  • area: main
  • in suites: woody
  • size: 139,160 kB
  • ctags: 428,423
  • sloc: ansic: 2,435,554; asm: 141,119; makefile: 8,258; sh: 3,099; perl: 2,561; yacc: 1,177; cpp: 755; tcl: 577; lex: 352; awk: 251; lisp: 218; sed: 72
file content (58 lines) | stat: -rw-r--r-- 1,033 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
/*
 * <asm/smplock.h>
 *
 * Default SMP lock implementation
 */
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/sched.h>

#include <asm/current.h>
#include <asm/hardirq.h>

extern spinlock_t kernel_flag;

#define kernel_locked()		spin_is_locked(&kernel_flag)

/*
 * Release global kernel lock and global interrupt lock
 */
static __inline__ void 
release_kernel_lock(struct task_struct *task, int cpu)
{
	if (task->lock_depth >= 0)
		spin_unlock(&kernel_flag);
	release_irqlock(cpu);
	__sti();
}

/*
 * Re-acquire the kernel lock
 */
static __inline__ void 
reacquire_kernel_lock(struct task_struct *task)
{
	if (task->lock_depth >= 0)
		spin_lock(&kernel_flag);
}

/*
 * Getting the big kernel lock.
 *
 * This cannot happen asynchronously,
 * so we only need to worry about other
 * CPU's.
 */
static __inline__ void 
lock_kernel(void)
{
	if (!++current->lock_depth)
		spin_lock(&kernel_flag);
}

static __inline__ void 
unlock_kernel(void)
{
	if (--current->lock_depth < 0)
		spin_unlock(&kernel_flag);
}