1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412
|
/*
* Copyright (C) 2007, 2008, 2010, 2012 Apple Inc. All rights reserved.
* Copyright (C) 2007 Justin Haygood (jhaygood@reaktix.com)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef Atomics_h
#define Atomics_h
#include "wtf/AddressSanitizer.h"
#include "wtf/Assertions.h"
#include "wtf/CPU.h"
#include <stdint.h>
#if COMPILER(MSVC)
#include <windows.h>
#endif
#if defined(THREAD_SANITIZER)
#include <sanitizer/tsan_interface_atomic.h>
#endif
#if defined(ADDRESS_SANITIZER)
#include <sanitizer/asan_interface.h>
#endif
namespace WTF {
#if COMPILER(MSVC)
// atomicAdd returns the result of the addition.
ALWAYS_INLINE int atomicAdd(int volatile* addend, int increment) {
return InterlockedExchangeAdd(reinterpret_cast<long volatile*>(addend),
static_cast<long>(increment)) +
increment;
}
ALWAYS_INLINE unsigned atomicAdd(unsigned volatile* addend,
unsigned increment) {
return InterlockedExchangeAdd(reinterpret_cast<long volatile*>(addend),
static_cast<long>(increment)) +
increment;
}
#if defined(_WIN64)
ALWAYS_INLINE unsigned long long atomicAdd(unsigned long long volatile* addend,
unsigned long long increment) {
return InterlockedExchangeAdd64(reinterpret_cast<long long volatile*>(addend),
static_cast<long long>(increment)) +
increment;
}
#endif
// atomicSubtract returns the result of the subtraction.
ALWAYS_INLINE int atomicSubtract(int volatile* addend, int decrement) {
return InterlockedExchangeAdd(reinterpret_cast<long volatile*>(addend),
static_cast<long>(-decrement)) -
decrement;
}
ALWAYS_INLINE unsigned atomicSubtract(unsigned volatile* addend,
unsigned decrement) {
return InterlockedExchangeAdd(reinterpret_cast<long volatile*>(addend),
-static_cast<long>(decrement)) -
decrement;
}
#if defined(_WIN64)
ALWAYS_INLINE unsigned long long atomicSubtract(
unsigned long long volatile* addend,
unsigned long long decrement) {
return InterlockedExchangeAdd64(reinterpret_cast<long long volatile*>(addend),
-static_cast<long long>(decrement)) -
decrement;
}
#endif
ALWAYS_INLINE int atomicIncrement(int volatile* addend) {
return InterlockedIncrement(reinterpret_cast<long volatile*>(addend));
}
ALWAYS_INLINE int atomicDecrement(int volatile* addend) {
return InterlockedDecrement(reinterpret_cast<long volatile*>(addend));
}
ALWAYS_INLINE int64_t atomicIncrement(int64_t volatile* addend) {
return InterlockedIncrement64(reinterpret_cast<long long volatile*>(addend));
}
ALWAYS_INLINE int64_t atomicDecrement(int64_t volatile* addend) {
return InterlockedDecrement64(reinterpret_cast<long long volatile*>(addend));
}
ALWAYS_INLINE int atomicTestAndSetToOne(int volatile* ptr) {
int ret = InterlockedExchange(reinterpret_cast<long volatile*>(ptr), 1);
DCHECK(!ret || ret == 1);
return ret;
}
ALWAYS_INLINE void atomicSetOneToZero(int volatile* ptr) {
DCHECK_EQ(*ptr, 1);
InterlockedExchange(reinterpret_cast<long volatile*>(ptr), 0);
}
#else
// atomicAdd returns the result of the addition.
ALWAYS_INLINE int atomicAdd(int volatile* addend, int increment) {
return __sync_add_and_fetch(addend, increment);
}
ALWAYS_INLINE unsigned atomicAdd(unsigned volatile* addend,
unsigned increment) {
return __sync_add_and_fetch(addend, increment);
}
ALWAYS_INLINE unsigned long atomicAdd(unsigned long volatile* addend,
unsigned long increment) {
return __sync_add_and_fetch(addend, increment);
}
// atomicSubtract returns the result of the subtraction.
ALWAYS_INLINE int atomicSubtract(int volatile* addend, int decrement) {
return __sync_sub_and_fetch(addend, decrement);
}
ALWAYS_INLINE unsigned atomicSubtract(unsigned volatile* addend,
unsigned decrement) {
return __sync_sub_and_fetch(addend, decrement);
}
ALWAYS_INLINE unsigned long atomicSubtract(unsigned long volatile* addend,
unsigned long decrement) {
return __sync_sub_and_fetch(addend, decrement);
}
ALWAYS_INLINE int atomicIncrement(int volatile* addend) {
return atomicAdd(addend, 1);
}
ALWAYS_INLINE int atomicDecrement(int volatile* addend) {
return atomicSubtract(addend, 1);
}
ALWAYS_INLINE int64_t atomicIncrement(int64_t volatile* addend) {
return __sync_add_and_fetch(addend, 1);
}
ALWAYS_INLINE int64_t atomicDecrement(int64_t volatile* addend) {
return __sync_sub_and_fetch(addend, 1);
}
ALWAYS_INLINE int atomicTestAndSetToOne(int volatile* ptr) {
int ret = __sync_lock_test_and_set(ptr, 1);
DCHECK(!ret || ret == 1);
return ret;
}
ALWAYS_INLINE void atomicSetOneToZero(int volatile* ptr) {
DCHECK_EQ(*ptr, 1);
__sync_lock_release(ptr);
}
#endif
#if defined(THREAD_SANITIZER)
// The definitions below assume an LP64 data model. This is fine because
// TSan is only supported on x86_64 Linux.
#if CPU(64BIT) && OS(LINUX)
ALWAYS_INLINE void releaseStore(volatile int* ptr, int value) {
__tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
}
ALWAYS_INLINE void releaseStore(volatile unsigned* ptr, unsigned value) {
__tsan_atomic32_store(reinterpret_cast<volatile int*>(ptr),
static_cast<int>(value), __tsan_memory_order_release);
}
ALWAYS_INLINE void releaseStore(volatile long* ptr, long value) {
__tsan_atomic64_store(reinterpret_cast<volatile __tsan_atomic64*>(ptr),
static_cast<__tsan_atomic64>(value),
__tsan_memory_order_release);
}
ALWAYS_INLINE void releaseStore(volatile unsigned long* ptr,
unsigned long value) {
__tsan_atomic64_store(reinterpret_cast<volatile __tsan_atomic64*>(ptr),
static_cast<__tsan_atomic64>(value),
__tsan_memory_order_release);
}
ALWAYS_INLINE void releaseStore(volatile unsigned long long* ptr,
unsigned long long value) {
__tsan_atomic64_store(reinterpret_cast<volatile __tsan_atomic64*>(ptr),
static_cast<__tsan_atomic64>(value),
__tsan_memory_order_release);
}
ALWAYS_INLINE void releaseStore(void* volatile* ptr, void* value) {
__tsan_atomic64_store(reinterpret_cast<volatile __tsan_atomic64*>(ptr),
reinterpret_cast<__tsan_atomic64>(value),
__tsan_memory_order_release);
}
ALWAYS_INLINE int acquireLoad(volatile const int* ptr) {
return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
}
ALWAYS_INLINE unsigned acquireLoad(volatile const unsigned* ptr) {
return static_cast<unsigned>(__tsan_atomic32_load(
reinterpret_cast<volatile const int*>(ptr), __tsan_memory_order_acquire));
}
ALWAYS_INLINE long acquireLoad(volatile const long* ptr) {
return static_cast<long>(__tsan_atomic64_load(
reinterpret_cast<volatile const __tsan_atomic64*>(ptr),
__tsan_memory_order_acquire));
}
ALWAYS_INLINE unsigned long acquireLoad(volatile const unsigned long* ptr) {
return static_cast<unsigned long>(__tsan_atomic64_load(
reinterpret_cast<volatile const __tsan_atomic64*>(ptr),
__tsan_memory_order_acquire));
}
ALWAYS_INLINE void* acquireLoad(void* volatile const* ptr) {
return reinterpret_cast<void*>(__tsan_atomic64_load(
reinterpret_cast<volatile const __tsan_atomic64*>(ptr),
__tsan_memory_order_acquire));
}
// Do not use noBarrierStore/noBarrierLoad for synchronization.
ALWAYS_INLINE void noBarrierStore(volatile float* ptr, float value) {
static_assert(sizeof(int) == sizeof(float),
"int and float are different sizes");
union {
int ivalue;
float fvalue;
} u;
u.fvalue = value;
__tsan_atomic32_store(reinterpret_cast<volatile __tsan_atomic32*>(ptr),
u.ivalue, __tsan_memory_order_relaxed);
}
ALWAYS_INLINE float noBarrierLoad(volatile const float* ptr) {
static_assert(sizeof(int) == sizeof(float),
"int and float are different sizes");
union {
int ivalue;
float fvalue;
} u;
u.ivalue = __tsan_atomic32_load(reinterpret_cast<volatile const int*>(ptr),
__tsan_memory_order_relaxed);
return u.fvalue;
}
#endif
#else // defined(THREAD_SANITIZER)
#if CPU(X86) || CPU(X86_64)
// Only compiler barrier is needed.
#if COMPILER(MSVC)
// Starting from Visual Studio 2005 compiler guarantees acquire and release
// semantics for operations on volatile variables. See MSDN entry for
// MemoryBarrier macro.
#define MEMORY_BARRIER()
#else
#define MEMORY_BARRIER() __asm__ __volatile__("" : : : "memory")
#endif
#elif CPU(ARM) && OS(ANDROID)
// On ARM __sync_synchronize generates dmb which is very expensive on single
// core devices which don't actually need it. Avoid the cost by calling into
// kuser_memory_barrier helper.
inline void memoryBarrier() {
// Note: This is a function call, which is also an implicit compiler barrier.
typedef void (*KernelMemoryBarrierFunc)();
((KernelMemoryBarrierFunc)0xffff0fa0)();
}
#define MEMORY_BARRIER() memoryBarrier()
#else
// Fallback to the compiler intrinsic on all other platforms.
#define MEMORY_BARRIER() __sync_synchronize()
#endif
ALWAYS_INLINE void releaseStore(volatile int* ptr, int value) {
MEMORY_BARRIER();
*ptr = value;
}
ALWAYS_INLINE void releaseStore(volatile unsigned* ptr, unsigned value) {
MEMORY_BARRIER();
*ptr = value;
}
ALWAYS_INLINE void releaseStore(volatile long* ptr, long value) {
MEMORY_BARRIER();
*ptr = value;
}
ALWAYS_INLINE void releaseStore(volatile unsigned long* ptr,
unsigned long value) {
MEMORY_BARRIER();
*ptr = value;
}
#if CPU(64BIT)
ALWAYS_INLINE void releaseStore(volatile unsigned long long* ptr,
unsigned long long value) {
MEMORY_BARRIER();
*ptr = value;
}
#endif
ALWAYS_INLINE void releaseStore(void* volatile* ptr, void* value) {
MEMORY_BARRIER();
*ptr = value;
}
ALWAYS_INLINE int acquireLoad(volatile const int* ptr) {
int value = *ptr;
MEMORY_BARRIER();
return value;
}
ALWAYS_INLINE unsigned acquireLoad(volatile const unsigned* ptr) {
unsigned value = *ptr;
MEMORY_BARRIER();
return value;
}
ALWAYS_INLINE long acquireLoad(volatile const long* ptr) {
long value = *ptr;
MEMORY_BARRIER();
return value;
}
ALWAYS_INLINE unsigned long acquireLoad(volatile const unsigned long* ptr) {
unsigned long value = *ptr;
MEMORY_BARRIER();
return value;
}
#if CPU(64BIT)
ALWAYS_INLINE unsigned long long acquireLoad(
volatile const unsigned long long* ptr) {
unsigned long long value = *ptr;
MEMORY_BARRIER();
return value;
}
#endif
ALWAYS_INLINE void* acquireLoad(void* volatile const* ptr) {
void* value = *ptr;
MEMORY_BARRIER();
return value;
}
// Do not use noBarrierStore/noBarrierLoad for synchronization.
ALWAYS_INLINE void noBarrierStore(volatile float* ptr, float value) {
*ptr = value;
}
ALWAYS_INLINE float noBarrierLoad(volatile const float* ptr) {
float value = *ptr;
return value;
}
#if defined(ADDRESS_SANITIZER)
NO_SANITIZE_ADDRESS ALWAYS_INLINE void asanUnsafeReleaseStore(
volatile unsigned* ptr,
unsigned value) {
MEMORY_BARRIER();
*ptr = value;
}
NO_SANITIZE_ADDRESS ALWAYS_INLINE unsigned asanUnsafeAcquireLoad(
volatile const unsigned* ptr) {
unsigned value = *ptr;
MEMORY_BARRIER();
return value;
}
#endif // defined(ADDRESS_SANITIZER)
#undef MEMORY_BARRIER
#endif
#if !defined(ADDRESS_SANITIZER)
ALWAYS_INLINE void asanUnsafeReleaseStore(volatile unsigned* ptr,
unsigned value) {
releaseStore(ptr, value);
}
ALWAYS_INLINE unsigned asanUnsafeAcquireLoad(volatile const unsigned* ptr) {
return acquireLoad(ptr);
}
#endif
} // namespace WTF
using WTF::atomicAdd;
using WTF::atomicSubtract;
using WTF::atomicDecrement;
using WTF::atomicIncrement;
using WTF::atomicTestAndSetToOne;
using WTF::atomicSetOneToZero;
using WTF::acquireLoad;
using WTF::releaseStore;
using WTF::noBarrierLoad;
using WTF::noBarrierStore;
// These methods allow loading from and storing to poisoned memory. Only
// use these methods if you know what you are doing since they will
// silence use-after-poison errors from ASan.
using WTF::asanUnsafeAcquireLoad;
using WTF::asanUnsafeReleaseStore;
#endif // Atomics_h
|