1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
|
/* atomic.h: These still suck, but the I-cache hit rate is higher.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au)
*/
#ifndef __ARCH_SPARC_ATOMIC__
#define __ARCH_SPARC_ATOMIC__
#include <linux/config.h>
typedef struct { volatile int counter; } atomic_t;
#ifdef __KERNEL__
#ifndef CONFIG_SMP
#define ATOMIC_INIT(i) { (i) }
#define atomic_read(v) ((v)->counter)
#define atomic_set(v, i) (((v)->counter) = i)
#else
/* We do the bulk of the actual work out of line in two common
* routines in assembler, see arch/sparc/lib/atomic.S for the
* "fun" details.
*
* For SMP the trick is you embed the spin lock byte within
* the word, use the low byte so signedness is easily retained
* via a quick arithmetic shift. It looks like this:
*
* ----------------------------------------
* | signed 24-bit counter value | lock | atomic_t
* ----------------------------------------
* 31 8 7 0
*/
#define ATOMIC_INIT(i) { (i << 8) }
static __inline__ int atomic_read(atomic_t *v)
{
int ret = v->counter;
while(ret & 0xff)
ret = v->counter;
return ret >> 8;
}
#define atomic_set(v, i) (((v)->counter) = ((i) << 8))
#endif
static __inline__ int __atomic_add(int i, atomic_t *v)
{
register volatile int *ptr asm("g1");
register int increment asm("g2");
ptr = &v->counter;
increment = i;
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
"call ___atomic_add\n\t"
" add %%o7, 8, %%o7\n"
: "=&r" (increment)
: "0" (increment), "r" (ptr)
: "g3", "g4", "g7", "memory", "cc");
return increment;
}
static __inline__ int __atomic_sub(int i, atomic_t *v)
{
register volatile int *ptr asm("g1");
register int increment asm("g2");
ptr = &v->counter;
increment = i;
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
"call ___atomic_sub\n\t"
" add %%o7, 8, %%o7\n"
: "=&r" (increment)
: "0" (increment), "r" (ptr)
: "g3", "g4", "g7", "memory", "cc");
return increment;
}
#define atomic_add(i, v) ((void)__atomic_add((i), (v)))
#define atomic_sub(i, v) ((void)__atomic_sub((i), (v)))
#define atomic_dec_return(v) __atomic_sub(1, (v))
#define atomic_inc_return(v) __atomic_add(1, (v))
#define atomic_sub_and_test(i, v) (__atomic_sub((i), (v)) == 0)
#define atomic_dec_and_test(v) (__atomic_sub(1, (v)) == 0)
#define atomic_inc(v) ((void)__atomic_add(1, (v)))
#define atomic_dec(v) ((void)__atomic_sub(1, (v)))
#define atomic_add_negative(i, v) (__atomic_add((i), (v)) < 0)
/* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
#endif /* !(__KERNEL__) */
#endif /* !(__ARCH_SPARC_ATOMIC__) */
|