1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
|
#ifndef _ALPHA_SEMAPHORE_H
#define _ALPHA_SEMAPHORE_H
/*
* SMP- and interrupt-safe semaphores..
*
* (C) Copyright 1996 Linus Torvalds
* (C) Copyright 1996, 2000 Richard Henderson
*/
#include <asm/current.h>
#include <asm/system.h>
#include <asm/atomic.h>
#include <linux/compiler.h>
#include <linux/wait.h>
#include <linux/rwsem.h>
struct semaphore {
atomic_t count;
wait_queue_head_t wait;
};
#define __SEMAPHORE_INITIALIZER(name, n) \
{ \
.count = ATOMIC_INIT(n), \
.wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \
}
#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
static inline void sema_init(struct semaphore *sem, int val)
{
/*
* Logically,
* *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
* except that gcc produces better initializing by parts yet.
*/
atomic_set(&sem->count, val);
init_waitqueue_head(&sem->wait);
}
static inline void init_MUTEX (struct semaphore *sem)
{
sema_init(sem, 1);
}
static inline void init_MUTEX_LOCKED (struct semaphore *sem)
{
sema_init(sem, 0);
}
extern void down(struct semaphore *);
extern void __down_failed(struct semaphore *);
extern int down_interruptible(struct semaphore *);
extern int __down_failed_interruptible(struct semaphore *);
extern int down_trylock(struct semaphore *);
extern void up(struct semaphore *);
extern void __up_wakeup(struct semaphore *);
/*
* Hidden out of line code is fun, but extremely messy. Rely on newer
* compilers to do a respectable job with this. The contention cases
* are handled out of line in arch/alpha/kernel/semaphore.c.
*/
static inline void __down(struct semaphore *sem)
{
long count;
might_sleep();
count = atomic_dec_return(&sem->count);
if (unlikely(count < 0))
__down_failed(sem);
}
static inline int __down_interruptible(struct semaphore *sem)
{
long count;
might_sleep();
count = atomic_dec_return(&sem->count);
if (unlikely(count < 0))
return __down_failed_interruptible(sem);
return 0;
}
/*
* down_trylock returns 0 on success, 1 if we failed to get the lock.
*/
static inline int __down_trylock(struct semaphore *sem)
{
long ret;
/* "Equivalent" C:
do {
ret = ldl_l;
--ret;
if (ret < 0)
break;
ret = stl_c = ret;
} while (ret == 0);
*/
__asm__ __volatile__(
"1: ldl_l %0,%1\n"
" subl %0,1,%0\n"
" blt %0,2f\n"
" stl_c %0,%1\n"
" beq %0,3f\n"
" mb\n"
"2:\n"
".subsection 2\n"
"3: br 1b\n"
".previous"
: "=&r" (ret), "=m" (sem->count)
: "m" (sem->count));
return ret < 0;
}
static inline void __up(struct semaphore *sem)
{
if (unlikely(atomic_inc_return(&sem->count) <= 0))
__up_wakeup(sem);
}
#if !defined(CONFIG_DEBUG_SEMAPHORE)
extern inline void down(struct semaphore *sem)
{
__down(sem);
}
extern inline int down_interruptible(struct semaphore *sem)
{
return __down_interruptible(sem);
}
extern inline int down_trylock(struct semaphore *sem)
{
return __down_trylock(sem);
}
extern inline void up(struct semaphore *sem)
{
__up(sem);
}
#endif
#endif
|