1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233
|
#ifndef Py_INTERNAL_CRITICAL_SECTION_H
#define Py_INTERNAL_CRITICAL_SECTION_H
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "pycore_lock.h" // PyMutex
#include "pycore_pystate.h" // _PyThreadState_GET()
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
// Tagged pointers to critical sections use the two least significant bits to
// mark if the pointed-to critical section is inactive and whether it is a
// PyCriticalSection2 object.
#define _Py_CRITICAL_SECTION_INACTIVE 0x1
#define _Py_CRITICAL_SECTION_TWO_MUTEXES 0x2
#define _Py_CRITICAL_SECTION_MASK 0x3
#ifdef Py_GIL_DISABLED
# define Py_BEGIN_CRITICAL_SECTION_MUT(mutex) \
{ \
PyCriticalSection _py_cs; \
_PyCriticalSection_BeginMutex(&_py_cs, mutex)
# define Py_BEGIN_CRITICAL_SECTION2_MUT(m1, m2) \
{ \
PyCriticalSection2 _py_cs2; \
_PyCriticalSection2_BeginMutex(&_py_cs2, m1, m2)
// Specialized version of critical section locking to safely use
// PySequence_Fast APIs without the GIL. For performance, the argument *to*
// PySequence_Fast() is provided to the macro, not the *result* of
// PySequence_Fast(), which would require an extra test to determine if the
// lock must be acquired.
# define Py_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(original) \
{ \
PyObject *_orig_seq = _PyObject_CAST(original); \
const bool _should_lock_cs = PyList_CheckExact(_orig_seq); \
PyCriticalSection _cs; \
if (_should_lock_cs) { \
_PyCriticalSection_Begin(&_cs, _orig_seq); \
}
# define Py_END_CRITICAL_SECTION_SEQUENCE_FAST() \
if (_should_lock_cs) { \
PyCriticalSection_End(&_cs); \
} \
}
// Asserts that the mutex is locked. The mutex must be held by the
// top-most critical section otherwise there's the possibility
// that the mutex would be swalled out in some code paths.
#define _Py_CRITICAL_SECTION_ASSERT_MUTEX_LOCKED(mutex) \
_PyCriticalSection_AssertHeld(mutex)
// Asserts that the mutex for the given object is locked. The mutex must
// be held by the top-most critical section otherwise there's the
// possibility that the mutex would be swalled out in some code paths.
#ifdef Py_DEBUG
# define _Py_CRITICAL_SECTION_ASSERT_OBJECT_LOCKED(op) \
if (Py_REFCNT(op) != 1) { \
_Py_CRITICAL_SECTION_ASSERT_MUTEX_LOCKED(&_PyObject_CAST(op)->ob_mutex); \
}
#else /* Py_DEBUG */
# define _Py_CRITICAL_SECTION_ASSERT_OBJECT_LOCKED(op)
#endif /* Py_DEBUG */
#else /* !Py_GIL_DISABLED */
// The critical section APIs are no-ops with the GIL.
# define Py_BEGIN_CRITICAL_SECTION_MUT(mut) {
# define Py_BEGIN_CRITICAL_SECTION2_MUT(m1, m2) {
# define Py_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(original) {
# define Py_END_CRITICAL_SECTION_SEQUENCE_FAST() }
# define _Py_CRITICAL_SECTION_ASSERT_MUTEX_LOCKED(mutex)
# define _Py_CRITICAL_SECTION_ASSERT_OBJECT_LOCKED(op)
#endif /* !Py_GIL_DISABLED */
// Resumes the top-most critical section.
PyAPI_FUNC(void)
_PyCriticalSection_Resume(PyThreadState *tstate);
// (private) slow path for locking the mutex
PyAPI_FUNC(void)
_PyCriticalSection_BeginSlow(PyCriticalSection *c, PyMutex *m);
PyAPI_FUNC(void)
_PyCriticalSection2_BeginSlow(PyCriticalSection2 *c, PyMutex *m1, PyMutex *m2,
int is_m1_locked);
PyAPI_FUNC(void)
_PyCriticalSection_SuspendAll(PyThreadState *tstate);
#ifdef Py_GIL_DISABLED
static inline int
_PyCriticalSection_IsActive(uintptr_t tag)
{
return tag != 0 && (tag & _Py_CRITICAL_SECTION_INACTIVE) == 0;
}
static inline void
_PyCriticalSection_BeginMutex(PyCriticalSection *c, PyMutex *m)
{
if (PyMutex_LockFast(&m->_bits)) {
PyThreadState *tstate = _PyThreadState_GET();
c->_cs_mutex = m;
c->_cs_prev = tstate->critical_section;
tstate->critical_section = (uintptr_t)c;
}
else {
_PyCriticalSection_BeginSlow(c, m);
}
}
static inline void
_PyCriticalSection_Begin(PyCriticalSection *c, PyObject *op)
{
_PyCriticalSection_BeginMutex(c, &op->ob_mutex);
}
#define PyCriticalSection_Begin _PyCriticalSection_Begin
// Removes the top-most critical section from the thread's stack of critical
// sections. If the new top-most critical section is inactive, then it is
// resumed.
static inline void
_PyCriticalSection_Pop(PyCriticalSection *c)
{
PyThreadState *tstate = _PyThreadState_GET();
uintptr_t prev = c->_cs_prev;
tstate->critical_section = prev;
if ((prev & _Py_CRITICAL_SECTION_INACTIVE) != 0) {
_PyCriticalSection_Resume(tstate);
}
}
static inline void
_PyCriticalSection_End(PyCriticalSection *c)
{
PyMutex_Unlock(c->_cs_mutex);
_PyCriticalSection_Pop(c);
}
#define PyCriticalSection_End _PyCriticalSection_End
static inline void
_PyCriticalSection2_BeginMutex(PyCriticalSection2 *c, PyMutex *m1, PyMutex *m2)
{
if (m1 == m2) {
// If the two mutex arguments are the same, treat this as a critical
// section with a single mutex.
c->_cs_mutex2 = NULL;
_PyCriticalSection_BeginMutex(&c->_cs_base, m1);
return;
}
if ((uintptr_t)m2 < (uintptr_t)m1) {
// Sort the mutexes so that the lower address is locked first.
// The exact order does not matter, but we need to acquire the mutexes
// in a consistent order to avoid lock ordering deadlocks.
PyMutex *tmp = m1;
m1 = m2;
m2 = tmp;
}
if (PyMutex_LockFast(&m1->_bits)) {
if (PyMutex_LockFast(&m2->_bits)) {
PyThreadState *tstate = _PyThreadState_GET();
c->_cs_base._cs_mutex = m1;
c->_cs_mutex2 = m2;
c->_cs_base._cs_prev = tstate->critical_section;
uintptr_t p = (uintptr_t)c | _Py_CRITICAL_SECTION_TWO_MUTEXES;
tstate->critical_section = p;
}
else {
_PyCriticalSection2_BeginSlow(c, m1, m2, 1);
}
}
else {
_PyCriticalSection2_BeginSlow(c, m1, m2, 0);
}
}
static inline void
_PyCriticalSection2_Begin(PyCriticalSection2 *c, PyObject *a, PyObject *b)
{
_PyCriticalSection2_BeginMutex(c, &a->ob_mutex, &b->ob_mutex);
}
#define PyCriticalSection2_Begin _PyCriticalSection2_Begin
static inline void
_PyCriticalSection2_End(PyCriticalSection2 *c)
{
if (c->_cs_mutex2) {
PyMutex_Unlock(c->_cs_mutex2);
}
PyMutex_Unlock(c->_cs_base._cs_mutex);
_PyCriticalSection_Pop(&c->_cs_base);
}
#define PyCriticalSection2_End _PyCriticalSection2_End
static inline void
_PyCriticalSection_AssertHeld(PyMutex *mutex)
{
#ifdef Py_DEBUG
PyThreadState *tstate = _PyThreadState_GET();
uintptr_t prev = tstate->critical_section;
if (prev & _Py_CRITICAL_SECTION_TWO_MUTEXES) {
PyCriticalSection2 *cs = (PyCriticalSection2 *)(prev & ~_Py_CRITICAL_SECTION_MASK);
assert(cs != NULL && (cs->_cs_base._cs_mutex == mutex || cs->_cs_mutex2 == mutex));
}
else {
PyCriticalSection *cs = (PyCriticalSection *)(tstate->critical_section & ~_Py_CRITICAL_SECTION_MASK);
assert(cs != NULL && cs->_cs_mutex == mutex);
}
#endif
}
#endif /* Py_GIL_DISABLED */
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_CRITICAL_SECTION_H */
|