File: spinlock.h

package info (click to toggle)
kernel-image-2.4.17-hppa 32.4
  • links: PTS
  • area: main
  • in suites: woody
  • size: 156,356 kB
  • ctags: 442,585
  • sloc: ansic: 2,542,442; asm: 144,771; makefile: 8,468; sh: 3,097; perl: 2,578; yacc: 1,177; tcl: 577; lex: 352; awk: 251; lisp: 218; sed: 72
file content (103 lines) | stat: -rw-r--r-- 2,441 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
#ifndef __ASM_SPINLOCK_H
#define __ASM_SPINLOCK_H

#include <asm/system.h>

/* Note that PA-RISC has to use `1' to mean unlocked and `0' to mean locked
 * since it only has load-and-zero.
 */

#undef SPIN_LOCK_UNLOCKED
#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 }

#define spin_lock_init(x)	do { (x)->lock = 1; } while(0)

#define spin_is_locked(x) ((x)->lock == 0)

#define spin_unlock_wait(x)	do { barrier(); } while(((volatile spinlock_t *)(x))->lock == 0)

#if 1
#define spin_lock(x) do { \
	while (__ldcw (&(x)->lock) == 0) \
		while (((x)->lock) == 0) ; } while (0)

#else
#define spin_lock(x) \
	do { while(__ldcw(&(x)->lock) == 0); } while(0)
#endif
	
#define spin_unlock(x) \
	do { (x)->lock = 1; } while(0)

#define spin_trylock(x) (__ldcw(&(x)->lock) != 0)

/*
 * Read-write spinlocks, allowing multiple readers
 * but only one writer.
 */
typedef struct {
	spinlock_t lock;
	volatile int counter;
} rwlock_t;

#define RW_LOCK_UNLOCKED (rwlock_t) { SPIN_LOCK_UNLOCKED, 0 }

#define rwlock_init(lp)	do { *(lp) = RW_LOCK_UNLOCKED; } while (0)

/* read_lock, read_unlock are pretty straightforward.  Of course it somehow
 * sucks we end up saving/restoring flags twice for read_lock_irqsave aso. */

static inline void read_lock(rwlock_t *rw)
{
	unsigned long flags;
	spin_lock_irqsave(&rw->lock, flags);

	rw->counter++;

	spin_unlock_irqrestore(&rw->lock, flags);
}

static inline void read_unlock(rwlock_t *rw)
{
	unsigned long flags;
	spin_lock_irqsave(&rw->lock, flags);

	rw->counter--;

	spin_unlock_irqrestore(&rw->lock, flags);
}

/* write_lock is less trivial.  We optimistically grab the lock and check
 * if we surprised any readers.  If so we release the lock and wait till
 * they're all gone before trying again
 *
 * Also note that we don't use the _irqsave / _irqrestore suffixes here.
 * If we're called with interrupts enabled and we've got readers (or other
 * writers) in interrupt handlers someone fucked up and we'd dead-lock
 * sooner or later anyway.   prumpf */

static inline void write_lock(rwlock_t *rw)
{
retry:
	spin_lock(&rw->lock);

	if(rw->counter != 0) {
		/* this basically never happens */
		spin_unlock(&rw->lock);

		while(rw->counter != 0);

		goto retry;
	}

	/* got it.  now leave without unlocking */
}

/* write_unlock is absolutely trivial - we don't have to wait for anything */

static inline void write_unlock(rwlock_t *rw)
{
	spin_unlock(&rw->lock);
}

#endif /* __ASM_SPINLOCK_H */