1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373
|
//===-- atomic.c - Implement support functions for atomic operations.------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// atomic.c defines a set of functions for performing atomic accesses on
// arbitrary-sized memory locations. This design uses locks that should
// be fast in the uncontended case, for two reasons:
//
// 1) This code must work with C programs that do not link to anything
// (including pthreads) and so it should not depend on any pthread
// functions.
// 2) Atomic operations, rather than explicit mutexes, are most commonly used
// on code where contended operations are rate.
//
// To avoid needing a per-object lock, this code allocates an array of
// locks and hashes the object pointers to find the one that it should use.
// For operations that must be atomic on two locations, the lower lock is
// always acquired first, to avoid deadlock.
//
//===----------------------------------------------------------------------===//
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include "assembly.h"
// We use __builtin_mem* here to avoid dependencies on libc-provided headers.
#define memcpy __builtin_memcpy
#define memcmp __builtin_memcmp
// Clang objects if you redefine a builtin. This little hack allows us to
// define a function with the same name as an intrinsic.
#pragma redefine_extname __atomic_load_c SYMBOL_NAME(__atomic_load)
#pragma redefine_extname __atomic_store_c SYMBOL_NAME(__atomic_store)
#pragma redefine_extname __atomic_exchange_c SYMBOL_NAME(__atomic_exchange)
#pragma redefine_extname __atomic_compare_exchange_c SYMBOL_NAME( \
__atomic_compare_exchange)
#pragma redefine_extname __atomic_is_lock_free_c SYMBOL_NAME( \
__atomic_is_lock_free)
/// Number of locks. This allocates one page on 32-bit platforms, two on
/// 64-bit. This can be specified externally if a different trade between
/// memory usage and contention probability is required for a given platform.
#ifndef SPINLOCK_COUNT
#define SPINLOCK_COUNT (1 << 10)
#endif
static const long SPINLOCK_MASK = SPINLOCK_COUNT - 1;
////////////////////////////////////////////////////////////////////////////////
// Platform-specific lock implementation. Falls back to spinlocks if none is
// defined. Each platform should define the Lock type, and corresponding
// lock() and unlock() functions.
////////////////////////////////////////////////////////////////////////////////
#if defined(__FreeBSD__) || defined(__DragonFly__)
#include <errno.h>
// clang-format off
#include <sys/types.h>
#include <machine/atomic.h>
#include <sys/umtx.h>
// clang-format on
typedef struct _usem Lock;
__inline static void unlock(Lock *l) {
__c11_atomic_store((_Atomic(uint32_t) *)&l->_count, 1, __ATOMIC_RELEASE);
__c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
if (l->_has_waiters)
_umtx_op(l, UMTX_OP_SEM_WAKE, 1, 0, 0);
}
__inline static void lock(Lock *l) {
uint32_t old = 1;
while (!__c11_atomic_compare_exchange_weak((_Atomic(uint32_t) *)&l->_count,
&old, 0, __ATOMIC_ACQUIRE,
__ATOMIC_RELAXED)) {
_umtx_op(l, UMTX_OP_SEM_WAIT, 0, 0, 0);
old = 1;
}
}
/// locks for atomic operations
static Lock locks[SPINLOCK_COUNT] = {[0 ... SPINLOCK_COUNT - 1] = {0, 1, 0}};
#elif defined(__APPLE__)
#include <libkern/OSAtomic.h>
typedef OSSpinLock Lock;
__inline static void unlock(Lock *l) { OSSpinLockUnlock(l); }
/// Locks a lock. In the current implementation, this is potentially
/// unbounded in the contended case.
__inline static void lock(Lock *l) { OSSpinLockLock(l); }
static Lock locks[SPINLOCK_COUNT]; // initialized to OS_SPINLOCK_INIT which is 0
#else
_Static_assert(__atomic_always_lock_free(sizeof(uintptr_t), 0),
"Implementation assumes lock-free pointer-size cmpxchg");
typedef _Atomic(uintptr_t) Lock;
/// Unlock a lock. This is a release operation.
__inline static void unlock(Lock *l) {
__c11_atomic_store(l, 0, __ATOMIC_RELEASE);
}
/// Locks a lock. In the current implementation, this is potentially
/// unbounded in the contended case.
__inline static void lock(Lock *l) {
uintptr_t old = 0;
while (!__c11_atomic_compare_exchange_weak(l, &old, 1, __ATOMIC_ACQUIRE,
__ATOMIC_RELAXED))
old = 0;
}
/// locks for atomic operations
static Lock locks[SPINLOCK_COUNT];
#endif
/// Returns a lock to use for a given pointer.
static __inline Lock *lock_for_pointer(void *ptr) {
intptr_t hash = (intptr_t)ptr;
// Disregard the lowest 4 bits. We want all values that may be part of the
// same memory operation to hash to the same value and therefore use the same
// lock.
hash >>= 4;
// Use the next bits as the basis for the hash
intptr_t low = hash & SPINLOCK_MASK;
// Now use the high(er) set of bits to perturb the hash, so that we don't
// get collisions from atomic fields in a single object
hash >>= 16;
hash ^= low;
// Return a pointer to the word to use
return locks + (hash & SPINLOCK_MASK);
}
/// Macros for determining whether a size is lock free.
#define ATOMIC_ALWAYS_LOCK_FREE_OR_ALIGNED_LOCK_FREE(size, p) \
(__atomic_always_lock_free(size, p) || \
(__atomic_always_lock_free(size, 0) && ((uintptr_t)p % size) == 0))
#define IS_LOCK_FREE_1(p) ATOMIC_ALWAYS_LOCK_FREE_OR_ALIGNED_LOCK_FREE(1, p)
#define IS_LOCK_FREE_2(p) ATOMIC_ALWAYS_LOCK_FREE_OR_ALIGNED_LOCK_FREE(2, p)
#define IS_LOCK_FREE_4(p) ATOMIC_ALWAYS_LOCK_FREE_OR_ALIGNED_LOCK_FREE(4, p)
#define IS_LOCK_FREE_8(p) ATOMIC_ALWAYS_LOCK_FREE_OR_ALIGNED_LOCK_FREE(8, p)
#define IS_LOCK_FREE_16(p) ATOMIC_ALWAYS_LOCK_FREE_OR_ALIGNED_LOCK_FREE(16, p)
/// Macro that calls the compiler-generated lock-free versions of functions
/// when they exist.
#define TRY_LOCK_FREE_CASE(n, type, ptr) \
case n: \
if (IS_LOCK_FREE_##n(ptr)) { \
LOCK_FREE_ACTION(type); \
} \
break;
#ifdef __SIZEOF_INT128__
#define TRY_LOCK_FREE_CASE_16(p) TRY_LOCK_FREE_CASE(16, __uint128_t, p)
#else
#define TRY_LOCK_FREE_CASE_16(p) /* __uint128_t not available */
#endif
#define LOCK_FREE_CASES(ptr) \
do { \
switch (size) { \
TRY_LOCK_FREE_CASE(1, uint8_t, ptr) \
TRY_LOCK_FREE_CASE(2, uint16_t, ptr) \
TRY_LOCK_FREE_CASE(4, uint32_t, ptr) \
TRY_LOCK_FREE_CASE(8, uint64_t, ptr) \
TRY_LOCK_FREE_CASE_16(ptr) /* __uint128_t may not be supported */ \
default: \
break; \
} \
} while (0)
/// Whether atomic operations for the given size (and alignment) are lock-free.
bool __atomic_is_lock_free_c(size_t size, void *ptr) {
#define LOCK_FREE_ACTION(type) return true;
LOCK_FREE_CASES(ptr);
#undef LOCK_FREE_ACTION
return false;
}
/// An atomic load operation. This is atomic with respect to the source
/// pointer only.
void __atomic_load_c(int size, void *src, void *dest, int model) {
#define LOCK_FREE_ACTION(type) \
*((type *)dest) = __c11_atomic_load((_Atomic(type) *)src, model); \
return;
LOCK_FREE_CASES(src);
#undef LOCK_FREE_ACTION
Lock *l = lock_for_pointer(src);
lock(l);
memcpy(dest, src, size);
unlock(l);
}
/// An atomic store operation. This is atomic with respect to the destination
/// pointer only.
void __atomic_store_c(int size, void *dest, void *src, int model) {
#define LOCK_FREE_ACTION(type) \
__c11_atomic_store((_Atomic(type) *)dest, *(type *)src, model); \
return;
LOCK_FREE_CASES(dest);
#undef LOCK_FREE_ACTION
Lock *l = lock_for_pointer(dest);
lock(l);
memcpy(dest, src, size);
unlock(l);
}
/// Atomic compare and exchange operation. If the value at *ptr is identical
/// to the value at *expected, then this copies value at *desired to *ptr. If
/// they are not, then this stores the current value from *ptr in *expected.
///
/// This function returns 1 if the exchange takes place or 0 if it fails.
int __atomic_compare_exchange_c(int size, void *ptr, void *expected,
void *desired, int success, int failure) {
#define LOCK_FREE_ACTION(type) \
return __c11_atomic_compare_exchange_strong( \
(_Atomic(type) *)ptr, (type *)expected, *(type *)desired, success, \
failure)
LOCK_FREE_CASES(ptr);
#undef LOCK_FREE_ACTION
Lock *l = lock_for_pointer(ptr);
lock(l);
if (memcmp(ptr, expected, size) == 0) {
memcpy(ptr, desired, size);
unlock(l);
return 1;
}
memcpy(expected, ptr, size);
unlock(l);
return 0;
}
/// Performs an atomic exchange operation between two pointers. This is atomic
/// with respect to the target address.
void __atomic_exchange_c(int size, void *ptr, void *val, void *old, int model) {
#define LOCK_FREE_ACTION(type) \
*(type *)old = \
__c11_atomic_exchange((_Atomic(type) *)ptr, *(type *)val, model); \
return;
LOCK_FREE_CASES(ptr);
#undef LOCK_FREE_ACTION
Lock *l = lock_for_pointer(ptr);
lock(l);
memcpy(old, ptr, size);
memcpy(ptr, val, size);
unlock(l);
}
////////////////////////////////////////////////////////////////////////////////
// Where the size is known at compile time, the compiler may emit calls to
// specialised versions of the above functions.
////////////////////////////////////////////////////////////////////////////////
#ifdef __SIZEOF_INT128__
#define OPTIMISED_CASES \
OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t) \
OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t) \
OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t) \
OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t) \
OPTIMISED_CASE(16, IS_LOCK_FREE_16, __uint128_t)
#else
#define OPTIMISED_CASES \
OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t) \
OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t) \
OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t) \
OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t)
#endif
#define OPTIMISED_CASE(n, lockfree, type) \
type __atomic_load_##n(type *src, int model) { \
if (lockfree(src)) \
return __c11_atomic_load((_Atomic(type) *)src, model); \
Lock *l = lock_for_pointer(src); \
lock(l); \
type val = *src; \
unlock(l); \
return val; \
}
OPTIMISED_CASES
#undef OPTIMISED_CASE
#define OPTIMISED_CASE(n, lockfree, type) \
void __atomic_store_##n(type *dest, type val, int model) { \
if (lockfree(dest)) { \
__c11_atomic_store((_Atomic(type) *)dest, val, model); \
return; \
} \
Lock *l = lock_for_pointer(dest); \
lock(l); \
*dest = val; \
unlock(l); \
return; \
}
OPTIMISED_CASES
#undef OPTIMISED_CASE
#define OPTIMISED_CASE(n, lockfree, type) \
type __atomic_exchange_##n(type *dest, type val, int model) { \
if (lockfree(dest)) \
return __c11_atomic_exchange((_Atomic(type) *)dest, val, model); \
Lock *l = lock_for_pointer(dest); \
lock(l); \
type tmp = *dest; \
*dest = val; \
unlock(l); \
return tmp; \
}
OPTIMISED_CASES
#undef OPTIMISED_CASE
#define OPTIMISED_CASE(n, lockfree, type) \
bool __atomic_compare_exchange_##n(type *ptr, type *expected, type desired, \
int success, int failure) { \
if (lockfree(ptr)) \
return __c11_atomic_compare_exchange_strong( \
(_Atomic(type) *)ptr, expected, desired, success, failure); \
Lock *l = lock_for_pointer(ptr); \
lock(l); \
if (*ptr == *expected) { \
*ptr = desired; \
unlock(l); \
return true; \
} \
*expected = *ptr; \
unlock(l); \
return false; \
}
OPTIMISED_CASES
#undef OPTIMISED_CASE
////////////////////////////////////////////////////////////////////////////////
// Atomic read-modify-write operations for integers of various sizes.
////////////////////////////////////////////////////////////////////////////////
#define ATOMIC_RMW(n, lockfree, type, opname, op) \
type __atomic_fetch_##opname##_##n(type *ptr, type val, int model) { \
if (lockfree(ptr)) \
return __c11_atomic_fetch_##opname((_Atomic(type) *)ptr, val, model); \
Lock *l = lock_for_pointer(ptr); \
lock(l); \
type tmp = *ptr; \
*ptr = tmp op val; \
unlock(l); \
return tmp; \
}
#define ATOMIC_RMW_NAND(n, lockfree, type) \
type __atomic_fetch_nand_##n(type *ptr, type val, int model) { \
if (lockfree(ptr)) \
return __c11_atomic_fetch_nand((_Atomic(type) *)ptr, val, model); \
Lock *l = lock_for_pointer(ptr); \
lock(l); \
type tmp = *ptr; \
*ptr = ~(tmp & val); \
unlock(l); \
return tmp; \
}
#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, add, +)
OPTIMISED_CASES
#undef OPTIMISED_CASE
#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, sub, -)
OPTIMISED_CASES
#undef OPTIMISED_CASE
#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, and, &)
OPTIMISED_CASES
#undef OPTIMISED_CASE
#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, or, |)
OPTIMISED_CASES
#undef OPTIMISED_CASE
#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, xor, ^)
OPTIMISED_CASES
#undef OPTIMISED_CASE
// Allow build with clang without __c11_atomic_fetch_nand builtin (pre-14)
#if __has_builtin(__c11_atomic_fetch_nand)
#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW_NAND(n, lockfree, type)
OPTIMISED_CASES
#undef OPTIMISED_CASE
#endif
|