1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
|
//===-- scudo_tsd.h ---------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// Scudo thread specific data definition.
/// Implementation will differ based on the thread local storage primitives
/// offered by the underlying platform.
///
//===----------------------------------------------------------------------===//
#ifndef SCUDO_TSD_H_
#define SCUDO_TSD_H_
#include "scudo_allocator.h"
#include "scudo_utils.h"
#include <pthread.h>
namespace __scudo {
struct ALIGNED(64) ScudoTSD {
AllocatorCache Cache;
uptr QuarantineCachePlaceHolder[4];
void init(bool Shared);
void commitBack();
INLINE bool tryLock() {
if (Mutex.TryLock()) {
atomic_store_relaxed(&Precedence, 0);
return true;
}
if (atomic_load_relaxed(&Precedence) == 0)
atomic_store_relaxed(&Precedence, MonotonicNanoTime());
return false;
}
INLINE void lock() {
Mutex.Lock();
atomic_store_relaxed(&Precedence, 0);
}
INLINE void unlock() {
if (!UnlockRequired)
return;
Mutex.Unlock();
}
INLINE u64 getPrecedence() {
return atomic_load_relaxed(&Precedence);
}
private:
bool UnlockRequired;
StaticSpinMutex Mutex;
atomic_uint64_t Precedence;
};
void initThread(bool MinimalInit);
// TSD model specific fastpath functions definitions.
#include "scudo_tsd_exclusive.inc"
#include "scudo_tsd_shared.inc"
} // namespace __scudo
#endif // SCUDO_TSD_H_
|