| 12
 3
 4
 5
 6
 7
 8
 9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 
 | //===-- scudo_tsd.h ---------------------------------------------*- C++ -*-===//
//
//                     The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// Scudo thread specific data definition.
/// Implementation will differ based on the thread local storage primitives
/// offered by the underlying platform.
///
//===----------------------------------------------------------------------===//
#ifndef SCUDO_TSD_H_
#define SCUDO_TSD_H_
#include "scudo_allocator.h"
#include "scudo_utils.h"
#include <pthread.h>
namespace __scudo {
struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) ScudoTSD {
  AllocatorCacheT Cache;
  uptr QuarantineCachePlaceHolder[4];
  void init();
  void commitBack();
  INLINE bool tryLock() {
    if (Mutex.TryLock()) {
      atomic_store_relaxed(&Precedence, 0);
      return true;
    }
    if (atomic_load_relaxed(&Precedence) == 0)
      atomic_store_relaxed(&Precedence, static_cast<uptr>(
          MonotonicNanoTime() >> FIRST_32_SECOND_64(16, 0)));
    return false;
  }
  INLINE void lock() {
    atomic_store_relaxed(&Precedence, 0);
    Mutex.Lock();
  }
  INLINE void unlock() { Mutex.Unlock(); }
  INLINE uptr getPrecedence() { return atomic_load_relaxed(&Precedence); }
 private:
  StaticSpinMutex Mutex;
  atomic_uintptr_t Precedence;
};
void initThread(bool MinimalInit);
// TSD model specific fastpath functions definitions.
#include "scudo_tsd_exclusive.inc"
#include "scudo_tsd_shared.inc"
}  // namespace __scudo
#endif  // SCUDO_TSD_H_
 |