1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
|
/* $Id: fpm_atomic.h,v 1.3 2008/09/18 23:34:11 anight Exp $ */
/* (c) 2007,2008 Andrei Nigmatulin */
#ifndef FPM_ATOMIC_H
#define FPM_ATOMIC_H 1
#if defined(__m68k__)
#define _GNU_SOURCE
#include <unistd.h>
#include <sys/syscall.h>
#endif
#if HAVE_INTTYPES_H
# include <inttypes.h>
#else
# include <stdint.h>
#endif
#include <sched.h>
#ifdef HAVE_BUILTIN_ATOMIC
/**
* all the cases below (as provided by upstream) define:
* word as atomic_int_t, and
* unsigned word as atomic_uint_t
* and only use volatile atomic_uint_t as atomic_t
*/
typedef volatile unsigned long atomic_t;
#define atomic_cmp_set(a,b,c) __sync_bool_compare_and_swap(a,b,c)
#elif ( __i386__ || __i386 )
typedef int32_t atomic_int_t;
typedef uint32_t atomic_uint_t;
typedef volatile atomic_uint_t atomic_t;
static inline atomic_int_t atomic_fetch_add(atomic_t *value, atomic_int_t add) /* {{{ */
{
__asm__ volatile ( "lock;" "xaddl %0, %1;" :
"+r" (add) : "m" (*value) : "memory");
return add;
}
/* }}} */
static inline atomic_uint_t atomic_cmp_set(atomic_t *lock, atomic_uint_t old, atomic_uint_t set) /* {{{ */
{
unsigned char res;
__asm__ volatile ( "lock;" "cmpxchgl %3, %1;" "sete %0;" :
"=a" (res) : "m" (*lock), "a" (old), "r" (set) : "memory");
return res;
}
/* }}} */
#elif ( __amd64__ || __amd64 || __x86_64__ )
typedef int64_t atomic_int_t;
typedef uint64_t atomic_uint_t;
typedef volatile atomic_uint_t atomic_t;
static inline atomic_int_t atomic_fetch_add(atomic_t *value, atomic_int_t add) /* {{{ */
{
__asm__ volatile ( "lock;" "xaddq %0, %1;" :
"+r" (add) : "m" (*value) : "memory");
return add;
}
/* }}} */
static inline atomic_uint_t atomic_cmp_set(atomic_t *lock, atomic_uint_t old, atomic_uint_t set) /* {{{ */
{
unsigned char res;
__asm__ volatile ( "lock;" "cmpxchgq %3, %1;" "sete %0;" :
"=a" (res) : "m" (*lock), "a" (old), "r" (set) : "memory");
return res;
}
/* }}} */
#if (__GNUC__) && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 2))
#elif ( __arm__ || __arm ) /* W-Mark Kubacki */
#if (__arch64__ || __arch64)
typedef int64_t atomic_int_t;
typedef uint64_t atomic_uint_t;
#else
typedef int32_t atomic_int_t;
typedef uint32_t atomic_uint_t;
#endif
#define atomic_cmp_set(a,b,c) __sync_bool_compare_and_swap(a,b,c)
#endif /* defined (__GNUC__) &&... */
#elif ( __sparc__ || __sparc ) /* Marcin Ochab */
#if (__sparcv9 || __sparcv9__)
#if (__arch64__ || __arch64)
typedef uint64_t atomic_uint_t;
typedef volatile atomic_uint_t atomic_t;
static inline int atomic_cas_64(atomic_t *lock, atomic_uint_t old, atomic_uint_t new) /* {{{ */
{
__asm__ __volatile__("casx [%2], %3, %0 " : "=&r"(new) : "0"(new), "r"(lock), "r"(old): "memory");
return new;
}
/* }}} */
static inline atomic_uint_t atomic_cmp_set(atomic_t *lock, atomic_uint_t old, atomic_uint_t set) /* {{{ */
{
return (atomic_cas_64(lock, old, set)==old);
}
/* }}} */
#else
typedef uint32_t atomic_uint_t;
typedef volatile atomic_uint_t atomic_t;
static inline int atomic_cas_32(atomic_t *lock, atomic_uint_t old, atomic_uint_t new) /* {{{ */
{
__asm__ __volatile__("cas [%2], %3, %0 " : "=&r"(new) : "0"(new), "r"(lock), "r"(old): "memory");
return new;
}
/* }}} */
static inline atomic_uint_t atomic_cmp_set(atomic_t *lock, atomic_uint_t old, atomic_uint_t set) /* {{{ */
{
return (atomic_cas_32(lock, old, set)==old);
}
/* }}} */
#endif
#else /* #if (__sparcv9 || __sparcv9__) */
#error Sparc v8 and predecessors are not and will not be supported (see bug report 53310)
#endif /* #if (__sparcv9 || __sparcv9__) */
#elif defined(__m68k__) && defined(__linux__)
typedef signed int atomic_int_t __attribute__((__aligned__(4)));
typedef unsigned int atomic_uint_t __attribute__((__aligned__(4)));
typedef volatile unsigned int atomic_t __attribute__((__aligned__(4)));
#ifndef SYS_atomic_cmpxchg_32
#define SYS_atomic_cmpxchg_32 335
#endif
static inline atomic_uint_t atomic_cas_32(atomic_t *lock, atomic_uint_t old, atomic_uint_t new) /* {{{ */
{
register atomic_t *a0 asm("a0") = lock;
register atomic_uint_t d2 asm("d2") = old;
register atomic_uint_t d1 asm("d1") = new;
register atomic_uint_t d0 asm("d0") = SYS_atomic_cmpxchg_32;
asm volatile("trap #0" : "+r" (d0), "+r" (d1), "+r" (a0) : "r" (d2) : "memory", "a1");
return (d0);
}
/* }}} */
static inline atomic_uint_t atomic_cmp_set(atomic_t *lock, atomic_uint_t old, atomic_uint_t set) /* {{{ */
{
return (atomic_cas_32(lock, old, set) == old);
}
/* }}} */
#else
#error Unsupported processor. Please open a bug report (bugs.php.net).
#endif
static inline int fpm_spinlock(atomic_t *lock, int try_once) /* {{{ */
{
if (try_once) {
return atomic_cmp_set(lock, 0, 1) ? 1 : 0;
}
for (;;) {
if (atomic_cmp_set(lock, 0, 1)) {
break;
}
sched_yield();
}
return 1;
}
/* }}} */
#define fpm_unlock(lock) lock = 0
#endif
|