1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
|
//===-- sanitizer_atomic_test.cpp -----------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
//
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_atomic.h"
#include "gtest/gtest.h"
#ifndef __has_extension
#define __has_extension(x) 0
#endif
#ifndef ATOMIC_LLONG_LOCK_FREE
# if __has_extension(c_atomic) || __has_extension(cxx_atomic)
# define ATOMIC_LLONG_LOCK_FREE __CLANG_ATOMIC_LLONG_LOCK_FREE
# elif __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7)
# define ATOMIC_LLONG_LOCK_FREE __GCC_ATOMIC_LLONG_LOCK_FREE
# else
# error Unsupported compiler.
# endif
#endif
namespace __sanitizer {
template<typename T>
struct ValAndMagic {
typename T::Type magic0;
T a;
typename T::Type magic1;
static ValAndMagic<T> *sink;
};
template<typename T>
ValAndMagic<T> *ValAndMagic<T>::sink;
template<typename T, memory_order load_mo, memory_order store_mo>
void CheckStoreLoad() {
typedef typename T::Type Type;
ValAndMagic<T> val;
// Prevent the compiler from scalarizing the struct.
ValAndMagic<T>::sink = &val;
// Ensure that surrounding memory is not overwritten.
val.magic0 = val.magic1 = (Type)-3;
for (u64 i = 0; i < 100; i++) {
// Generate a value that occupies all bytes of the variable.
u64 v = i;
v |= v << 8;
v |= v << 16;
v |= v << 32;
val.a.val_dont_use = (Type)v;
EXPECT_EQ(atomic_load(&val.a, load_mo), (Type)v);
val.a.val_dont_use = (Type)-1;
atomic_store(&val.a, (Type)v, store_mo);
EXPECT_EQ(val.a.val_dont_use, (Type)v);
}
EXPECT_EQ(val.magic0, (Type)-3);
EXPECT_EQ(val.magic1, (Type)-3);
}
TEST(SanitizerCommon, AtomicStoreLoad) {
CheckStoreLoad<atomic_uint8_t, memory_order_relaxed, memory_order_relaxed>();
CheckStoreLoad<atomic_uint8_t, memory_order_consume, memory_order_relaxed>();
CheckStoreLoad<atomic_uint8_t, memory_order_acquire, memory_order_relaxed>();
CheckStoreLoad<atomic_uint8_t, memory_order_relaxed, memory_order_release>();
CheckStoreLoad<atomic_uint8_t, memory_order_seq_cst, memory_order_seq_cst>();
CheckStoreLoad<atomic_uint16_t, memory_order_relaxed, memory_order_relaxed>();
CheckStoreLoad<atomic_uint16_t, memory_order_consume, memory_order_relaxed>();
CheckStoreLoad<atomic_uint16_t, memory_order_acquire, memory_order_relaxed>();
CheckStoreLoad<atomic_uint16_t, memory_order_relaxed, memory_order_release>();
CheckStoreLoad<atomic_uint16_t, memory_order_seq_cst, memory_order_seq_cst>();
CheckStoreLoad<atomic_uint32_t, memory_order_relaxed, memory_order_relaxed>();
CheckStoreLoad<atomic_uint32_t, memory_order_consume, memory_order_relaxed>();
CheckStoreLoad<atomic_uint32_t, memory_order_acquire, memory_order_relaxed>();
CheckStoreLoad<atomic_uint32_t, memory_order_relaxed, memory_order_release>();
CheckStoreLoad<atomic_uint32_t, memory_order_seq_cst, memory_order_seq_cst>();
// Avoid fallbacking to software emulated compiler atomics, that are usually
// provided by libatomic, which is not always present.
#if ATOMIC_LLONG_LOCK_FREE == 2
CheckStoreLoad<atomic_uint64_t, memory_order_relaxed, memory_order_relaxed>();
CheckStoreLoad<atomic_uint64_t, memory_order_consume, memory_order_relaxed>();
CheckStoreLoad<atomic_uint64_t, memory_order_acquire, memory_order_relaxed>();
CheckStoreLoad<atomic_uint64_t, memory_order_relaxed, memory_order_release>();
CheckStoreLoad<atomic_uint64_t, memory_order_seq_cst, memory_order_seq_cst>();
#endif
CheckStoreLoad<atomic_uintptr_t, memory_order_relaxed, memory_order_relaxed>
();
CheckStoreLoad<atomic_uintptr_t, memory_order_consume, memory_order_relaxed>
();
CheckStoreLoad<atomic_uintptr_t, memory_order_acquire, memory_order_relaxed>
();
CheckStoreLoad<atomic_uintptr_t, memory_order_relaxed, memory_order_release>
();
CheckStoreLoad<atomic_uintptr_t, memory_order_seq_cst, memory_order_seq_cst>
();
}
// Clang crashes while compiling this test for Android:
// http://llvm.org/bugs/show_bug.cgi?id=15587
#if !SANITIZER_ANDROID
template<typename T>
void CheckAtomicCompareExchange() {
typedef typename T::Type Type;
{
Type old_val = 42;
Type new_val = 24;
Type var = old_val;
EXPECT_TRUE(atomic_compare_exchange_strong((T*)&var, &old_val, new_val,
memory_order_relaxed));
EXPECT_FALSE(atomic_compare_exchange_strong((T*)&var, &old_val, new_val,
memory_order_relaxed));
EXPECT_EQ(new_val, old_val);
}
{
Type old_val = 42;
Type new_val = 24;
Type var = old_val;
EXPECT_TRUE(atomic_compare_exchange_weak((T*)&var, &old_val, new_val,
memory_order_relaxed));
EXPECT_FALSE(atomic_compare_exchange_weak((T*)&var, &old_val, new_val,
memory_order_relaxed));
EXPECT_EQ(new_val, old_val);
}
}
TEST(SanitizerCommon, AtomicCompareExchangeTest) {
CheckAtomicCompareExchange<atomic_uint8_t>();
CheckAtomicCompareExchange<atomic_uint16_t>();
CheckAtomicCompareExchange<atomic_uint32_t>();
#if ATOMIC_LLONG_LOCK_FREE == 2
CheckAtomicCompareExchange<atomic_uint64_t>();
#endif
CheckAtomicCompareExchange<atomic_uintptr_t>();
}
#endif //!SANITIZER_ANDROID
} // namespace __sanitizer
|