1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
|
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef TEST_STD_ATOMICS_ATOMIC_REF_TEST_HELPER_H
#define TEST_STD_ATOMICS_ATOMIC_REF_TEST_HELPER_H
#include <atomic>
#include <cassert>
#include <cmath>
#include <vector>
#include "test_macros.h"
#ifndef TEST_HAS_NO_THREADS
# include "make_test_thread.h"
# include <thread>
#endif
template <class T>
bool equals(T x, T y) {
return x == y;
}
template <class T>
T make_value(int i) {
assert(i == 0 || i == 1);
if constexpr (std::is_pointer_v<T>) {
// So that pointers returned can be subtracted from one another
static std::remove_const_t<std::remove_pointer_t<T>> d[2];
return &d[i];
} else {
return T(i);
}
}
// Test that all threads see the exact same sequence of events
// Test will pass 100% if store_op and load_op are correctly
// affecting the memory with seq_cst order
template <class T, class StoreOp, class LoadOp>
void test_seq_cst(StoreOp store_op, LoadOp load_op) {
#ifndef TEST_HAS_NO_THREADS
for (int i = 0; i < 100; ++i) {
T old_value(make_value<T>(0));
T new_value(make_value<T>(1));
T copy_x = old_value;
std::atomic_ref<T> const x(copy_x);
T copy_y = old_value;
std::atomic_ref<T> const y(copy_y);
std::atomic_bool x_updated_first(false);
std::atomic_bool y_updated_first(false);
auto t1 = support::make_test_thread([&] { store_op(x, old_value, new_value); });
auto t2 = support::make_test_thread([&] { store_op(y, old_value, new_value); });
auto t3 = support::make_test_thread([&] {
while (!equals(load_op(x), new_value)) {
std::this_thread::yield();
}
if (!equals(load_op(y), new_value)) {
x_updated_first.store(true, std::memory_order_relaxed);
}
});
auto t4 = support::make_test_thread([&] {
while (!equals(load_op(y), new_value)) {
std::this_thread::yield();
}
if (!equals(load_op(x), new_value)) {
y_updated_first.store(true, std::memory_order_relaxed);
}
});
t1.join();
t2.join();
t3.join();
t4.join();
// thread 3 and thread 4 cannot see different orders of storing x and y
assert(!(x_updated_first && y_updated_first));
}
#else
(void)store_op;
(void)load_op;
#endif
}
// Test that all writes before the store are seen by other threads after the load
// Test will pass 100% if store_op and load_op are correctly
// affecting the memory with acquire-release order
template <class T, class StoreOp, class LoadOp>
void test_acquire_release(StoreOp store_op, LoadOp load_op) {
#ifndef TEST_HAS_NO_THREADS
for (auto i = 0; i < 100; ++i) {
T old_value(make_value<T>(0));
T new_value(make_value<T>(1));
T copy = old_value;
std::atomic_ref<T> const at(copy);
int non_atomic = 5;
constexpr auto number_of_threads = 8;
std::vector<std::thread> threads;
threads.reserve(number_of_threads);
for (auto j = 0; j < number_of_threads; ++j) {
threads.push_back(support::make_test_thread([&at, &non_atomic, load_op, new_value] {
while (!equals(load_op(at), new_value)) {
std::this_thread::yield();
}
// Other thread's writes before the release store are visible
// in this thread's read after the acquire load
assert(non_atomic == 6);
}));
}
non_atomic = 6;
store_op(at, old_value, new_value);
for (auto& thread : threads) {
thread.join();
}
}
#else
(void)store_op;
(void)load_op;
#endif
}
#endif // TEST_STD_ATOMICS_ATOMIC_REF_TEST_HELPER_H
|