1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
|
#ifndef _SPARC_SEMAPHORE_HELPER_H
#define _SPARC_SEMAPHORE_HELPER_H
/*
* (barely) SMP- and interrupt-safe semaphore helper functions, sparc version.
*
* (C) Copyright 1999 David S. Miller (davem@redhat.com)
* (C) Copyright 1999 Jakub Jelinek (jj@ultra.linux.cz)
*/
#define wake_one_more(sem) atomic_inc(&(sem)->waking)
static __inline__ int waking_non_zero(struct semaphore *sem)
{
int ret;
#ifdef __SMP__
int tmp;
__asm__ __volatile__("
rd %%psr, %%g1
or %%g1, %3, %0
wr %0, 0x0, %%psr
nop; nop; nop;
1: ldstub [%2 + 3], %0
tst %0
bne 1b
ld [%2], %0
andn %0, 0xff, %1
subcc %0, 0x1ff, %0
bl,a 1f
mov 0, %0
mov %0, %1
mov 1, %0
1: st %1, [%2]
wr %%g1, 0x0, %%psr
nop; nop; nop\n"
: "=&r" (ret), "=&r" (tmp)
: "r" (&sem->waking), "i" (PSR_PIL)
: "g1", "memory", "cc");
#else
__asm__ __volatile__("
rd %%psr, %%g1
or %%g1, %2, %0
wr %0, 0x0, %%psr
nop; nop; nop;
ld [%1], %0
subcc %0, 1, %0
bl,a 1f
mov 0, %0
st %0, [%1]
mov 1, %0
1: wr %%g1, 0x0, %%psr
nop; nop; nop\n"
: "=&r" (ret)
: "r" (&sem->waking), "i" (PSR_PIL)
: "g1", "memory", "cc");
#endif
return ret;
}
static __inline__ int waking_non_zero_interruptible(struct semaphore *sem,
struct task_struct *tsk)
{
int ret;
#ifdef __SMP__
int tmp;
__asm__ __volatile__("
rd %%psr, %%g1
or %%g1, %3, %0
wr %0, 0x0, %%psr
nop; nop; nop;
1: ldstub [%2 + 3], %0
tst %0
bne 1b
ld [%2], %0
andn %0, 0xff, %1
subcc %0, 0x1ff, %0
bl,a 1f
mov 0, %0
mov %0, %1
mov 1, %0
1: st %1, [%2]
wr %%g1, 0x0, %%psr
nop; nop; nop\n"
: "=&r" (ret), "=&r" (tmp)
: "r" (&sem->waking), "i" (PSR_PIL)
: "g1", "memory", "cc");
#else
__asm__ __volatile__("
rd %%psr, %%g1
or %%g1, %2, %0
wr %0, 0x0, %%psr
nop; nop; nop;
ld [%1], %0
subcc %0, 1, %0
bl,a 1f
mov 0, %0
st %0, [%1]
mov 1, %0
1: wr %%g1, 0x0, %%psr
nop; nop; nop\n"
: "=&r" (ret)
: "r" (&sem->waking), "i" (PSR_PIL)
: "g1", "memory", "cc");
#endif
if(ret == 0 && signal_pending(tsk)) {
atomic_inc(&sem->count);
ret = -EINTR;
}
return ret;
}
static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
{
int ret;
#ifdef __SMP__
int tmp;
__asm__ __volatile__("
rd %%psr, %%g1
or %%g1, %3, %0
wr %0, 0x0, %%psr
nop; nop; nop;
1: ldstub [%2 + 3], %0
tst %0
bne 1b
ld [%2], %0
andn %0, 0xff, %1
subcc %0, 0x1ff, %0
bl,a 1f
mov 0, %0
mov %0, %1
mov 1, %0
1: st %1, [%2]
wr %%g1, 0x0, %%psr
nop; nop; nop\n"
: "=&r" (ret), "=&r" (tmp)
: "r" (&sem->waking), "i" (PSR_PIL)
: "g1", "memory", "cc");
#else
__asm__ __volatile__("
rd %%psr, %%g1
or %%g1, %2, %0
wr %0, 0x0, %%psr
nop; nop; nop;
ld [%1], %0
subcc %0, 1, %0
bl,a 1f
mov 0, %0
st %0, [%1]
mov 1, %0
1: wr %%g1, 0x0, %%psr
nop; nop; nop\n"
: "=&r" (ret)
: "r" (&sem->waking), "i" (PSR_PIL)
: "g1", "memory", "cc");
#endif
ret = !ret;
if(ret == 1)
atomic_inc(&sem->count);
return ret;
}
#endif /* !(_SPARC_SEMAPHORE_HELPER_H) */
|