1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
|
#ifndef __MCSLOCK_H__
#define __MCSLOCK_H__
#include <atomic>
#include <sched.h>
/*
* Based on TBB's atomic_backoff: https://github.com/oneapi-src/oneTBB/blob/60b7d0a78f8910976678ba63a19fdaee22c0ef65/include/tbb/tbb_machine.h
*/
class cpu_backoff {
public:
cpu_backoff(): count(1) {}
inline void pause() {
if (count <= LOOPS_BEFORE_YIELD) {
for (int32_t i = 0; i < count; i++) {
#ifdef __aarch64__
__asm__ __volatile__("yield" ::: "memory");
#elif __ppc__
__asm__ __volatile__("or 27,27,27" ::: "memory");
#elif __x86_64__
__asm__ __volatile__("pause;");
#else
// do nothing
#endif
}
count *= 2;
} else {
sched_yield();
}
}
private:
static const int32_t LOOPS_BEFORE_YIELD = 16;
int32_t count;
};
class spin_lock {
std::atomic_flag flag;
public:
spin_lock() : flag(false) {}
void lock();
void unlock();
};
class mcs_lock {
public:
mcs_lock(): q(nullptr) {}
struct mcs_node {
std::atomic<mcs_node *> next;
std::atomic_bool unlocked;
};
void lock(mcs_node &node);
void unlock(mcs_node &node);
typedef std::atomic<mcs_node *> mcs_node_ptr;
private:
void spin_while_eq(const volatile mcs_node_ptr& value, const volatile mcs_node *expected) {
cpu_backoff backoff;
while (value.load(std::memory_order_acquire) == expected)
backoff.pause();
}
void spin_while_eq(const volatile std::atomic_bool& value, const volatile bool expected) {
cpu_backoff backoff;
while (value.load(std::memory_order_acquire) == expected)
backoff.pause();
}
std::atomic<mcs_node *> q;
};
#endif // __MCS_LOCK_H__
|