File: spinlock.h

package info (click to toggle)
libcds 2.3.3-6
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 15,632 kB
  • sloc: cpp: 135,002; ansic: 7,234; perl: 243; sh: 237; makefile: 6
file content (382 lines) | stat: -rw-r--r-- 14,158 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)

#ifndef CDSLIB_SYNC_SPINLOCK_H
#define CDSLIB_SYNC_SPINLOCK_H

#include <cds/algo/atomic.h>
#include <cds/os/thread.h>
#include <cds/algo/backoff_strategy.h>

namespace cds {
    /// Synchronization primitives
    namespace sync {
        /// Spin lock
        /**
            Simple and light-weight spin-lock critical section
            It is useful to gain access to small (short-timed) code

            Algorithm:

                TATAS (test-and-test-and-lock)
                [1984] L. Rudolph, Z. Segall. Dynamic Decentralized Cache Schemes for MIMD Parallel Processors.

            No serialization performed - any of waiting threads may owns the spin-lock.
            This spin-lock is NOT recursive: the thread owned the lock cannot call \p lock() method without deadlock.
            The method \p unlock() can call any thread

            DEBUG version: The spinlock stores owner thead id. Assertion is raised when:
                - double lock attempt encountered by same thread (deadlock)
                - unlock by another thread

            If spin-lock is locked the \p Backoff algorithm is called. Predefined \p backoff::LockDefault class yields current
            thread and repeats lock attempts later

            Template parameters:
                - \p Backoff - backoff strategy. Used when spin lock is locked
        */
        template <typename Backoff >
        class spin_lock
        {
        public:
            typedef Backoff backoff_strategy;   ///< back-off strategy type
        private:
            atomics::atomic<bool>    m_spin;    ///< Spin
#    ifdef CDS_DEBUG
            typename OS::ThreadId    m_dbgOwnerId; ///< Owner thread id (only for debug mode)
#    endif

        public:
            /// Construct free (unlocked) spin-lock
            spin_lock() noexcept
#    ifdef CDS_DEBUG
                : m_dbgOwnerId( OS::c_NullThreadId )
#    endif
            {
                m_spin.store( false, atomics::memory_order_release );
            }

            /// Construct spin-lock in specified state
            /**
                In debug mode: if \p bLocked = true then spin-lock is made owned by current thread
            */
            explicit spin_lock( bool bLocked ) noexcept
#    ifdef CDS_DEBUG
                : m_dbgOwnerId( bLocked ? cds::OS::get_current_thread_id() : cds::OS::c_NullThreadId )
#    endif
            {
                m_spin.store( bLocked, atomics::memory_order_release );
            }

            /// Dummy copy constructor
            /**
                The ctor initializes the spin to free (unlocked) state like the default ctor.
            */
            spin_lock(const spin_lock<Backoff>& ) noexcept
                : m_spin( false )
#   ifdef CDS_DEBUG
                , m_dbgOwnerId( cds::OS::c_NullThreadId )
#   endif
            {
                CDS_TSAN_ANNOTATE_MUTEX_CREATE( this );
            }

            /// Destructor. On debug time it checks whether spin-lock is free
            ~spin_lock()
            {
                assert( !m_spin.load( atomics::memory_order_relaxed ));
                CDS_TSAN_ANNOTATE_MUTEX_DESTROY( this );
            }

            /// Check if the spin is locked
            bool is_locked() const noexcept
            {
                return m_spin.load( atomics::memory_order_relaxed );
            }

            /// Try to lock the object
            /**
                Returns \p true if locking is succeeded
                otherwise (if the spin is already locked) returns \p false

                Debug version: deadlock can be detected
            */
            bool try_lock() noexcept
            {
#           ifdef CDS_THREAD_SANITIZER_ENABLED
                bool bCurrent = m_spin.exchange( true, atomics::memory_order_acq_rel );
                if ( !bCurrent )
                    CDS_TSAN_ANNOTATE_MUTEX_ACQUIRED( this );
#           else
                bool bCurrent = m_spin.exchange( true, atomics::memory_order_acquire );
#           endif

                CDS_DEBUG_ONLY(
                    if ( !bCurrent ) {
                        m_dbgOwnerId = OS::get_current_thread_id();
                    }
                )
                return !bCurrent;
            }

            /// Try to lock the object, repeat \p nTryCount times if failed
            /**
                Returns \p true if locking is succeeded
                otherwise (if the spin is already locked) returns \p false
            */
            bool try_lock( unsigned int nTryCount ) noexcept( noexcept( backoff_strategy()()))
            {
                backoff_strategy backoff;
                while ( nTryCount-- ) {
                    if ( try_lock())
                        return true;
                    backoff();
                }
                return false;
            }

            /// Lock the spin-lock. Waits infinitely while spin-lock is locked. Debug version: deadlock may be detected
            void lock() noexcept(noexcept( backoff_strategy()()))
            {
                backoff_strategy backoff;

                // Deadlock detected
                CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
                assert( m_dbgOwnerId != OS::get_current_thread_id());
                CDS_TSAN_ANNOTATE_IGNORE_READS_END;

                // TATAS algorithm
                while ( !try_lock()) {
                    while ( m_spin.load( atomics::memory_order_acquire ))
                        backoff();
                }

                assert( m_dbgOwnerId == OS::get_current_thread_id());
            }

            /// Unlock the spin-lock. Debug version: deadlock may be detected
            void unlock() noexcept
            {
                assert( m_spin.load( atomics::memory_order_relaxed ));
                assert( m_dbgOwnerId == OS::get_current_thread_id());
                CDS_DEBUG_ONLY( m_dbgOwnerId = OS::c_NullThreadId; )

                CDS_TSAN_ANNOTATE_MUTEX_RELEASED( this );
                m_spin.store( false, atomics::memory_order_release );
            }
        };

        /// Spin-lock implementation default for the current platform
        typedef spin_lock<backoff::LockDefault > spin;

        /// Recursive spin lock.
        /**
            Allows recursive calls: the owner thread may recursive enter to critical section guarded by the spin-lock.

            Template parameters:
                - \p Integral       one of integral atomic type: <tt>unsigned int</tt>, \p int, and others
                - \p Backoff        backoff strategy. Used when spin lock is locked
        */
        template <typename Integral, class Backoff>
        class reentrant_spin_lock
        {
            typedef OS::ThreadId    thread_id;          ///< The type of thread id

        public:
            typedef Integral        integral_type;      ///< The integral type
            typedef Backoff         backoff_strategy;   ///< The backoff type

        private:
            //@cond
            atomics::atomic<integral_type>  m_spin;    ///< spin-lock atomic
            atomics::atomic<thread_id>      m_OwnerId; ///< Owner thread id. If spin-lock is not locked it usually equals to \p OS::c_NullThreadId
            //@endcond

        private:
            //@cond
            void take( thread_id tid ) noexcept
            {
                m_OwnerId.store( tid, atomics::memory_order_relaxed );
            }

            void free() noexcept
            {
                m_OwnerId.store( OS::c_NullThreadId, atomics::memory_order_relaxed );
            }

            bool is_taken( thread_id tid ) const noexcept
            {
                return m_OwnerId.load( atomics::memory_order_relaxed ) == tid;
            }

            bool try_taken_lock( thread_id tid ) noexcept
            {
                if ( is_taken( tid )) {
                    m_spin.fetch_add( 1, atomics::memory_order_relaxed );
                    return true;
                }
                return false;
            }

            bool try_acquire() noexcept
            {
                integral_type nCurrent = 0;
                bool bRet = m_spin.compare_exchange_weak( nCurrent, 1, atomics::memory_order_acquire, atomics::memory_order_acquire );

#           ifdef CDS_THREAD_SANITIZER_ENABLED
                if ( bRet )
                    CDS_TSAN_ANNOTATE_MUTEX_ACQUIRED( this );
#           endif

                return bRet;
            }

            bool try_acquire( unsigned int nTryCount ) noexcept( noexcept( backoff_strategy()()))
            {
                backoff_strategy bkoff;

                while ( nTryCount-- ) {
                    if ( try_acquire())
                        return true;
                    bkoff();
                }
                return false;
            }

            void acquire() noexcept( noexcept( backoff_strategy()()))
            {
                // TATAS algorithm
                backoff_strategy bkoff;
                while ( !try_acquire()) {
                    while ( m_spin.load( atomics::memory_order_acquire ))
                        bkoff();
                }
            }
            //@endcond

        public:
            /// Default constructor initializes spin to free (unlocked) state
            reentrant_spin_lock() noexcept
                : m_spin(0)
                , m_OwnerId( OS::c_NullThreadId )
            {
                CDS_TSAN_ANNOTATE_MUTEX_CREATE( this );
            }

            /// Dummy copy constructor
            /**
                In theory, spin-lock cannot be copied. However, it is not practical.
                Therefore, we provide dummy copy constructor that do no copy in fact. The ctor
                initializes the spin to free (unlocked) state like default ctor.
            */
            reentrant_spin_lock( const reentrant_spin_lock<Integral, Backoff>& ) noexcept
                : m_spin(0)
                , m_OwnerId( OS::c_NullThreadId )
            {
                CDS_TSAN_ANNOTATE_MUTEX_CREATE( this );
            }

            /// Construct object in specified state
            explicit reentrant_spin_lock( bool bLocked )
                : m_spin(0)
                , m_OwnerId( OS::c_NullThreadId )
            {
                CDS_TSAN_ANNOTATE_MUTEX_CREATE( this );
                if ( bLocked )
                    lock();
            }

            /// Dtor. Spin-lock must be unlocked
            ~reentrant_spin_lock()
            {
                assert( m_spin.load( atomics::memory_order_acquire ) == 0 );
                assert( m_OwnerId.load( atomics::memory_order_relaxed ) == OS::c_NullThreadId );

                CDS_TSAN_ANNOTATE_MUTEX_DESTROY( this );
            }

            /// Checks if the spin is locked
            /**
                The spin is locked if lock count > 0 and the current thread is not an owner of the lock.
                Otherwise (i.e. lock count == 0 or the curren thread owns the spin) the spin is unlocked.
            */
            bool is_locked() const noexcept
            {
                return !( m_spin.load( atomics::memory_order_relaxed ) == 0 || is_taken( cds::OS::get_current_thread_id()));
            }

            /// Try to lock the spin-lock
            bool try_lock() noexcept( noexcept( std::declval<reentrant_spin_lock>().try_acquire()))
            {
                thread_id tid = OS::get_current_thread_id();
                if ( try_taken_lock( tid ))
                    return true;
                if ( try_acquire()) {
                    take( tid );
                    return true;
                }
                return false;
            }

            /// Try to lock up to \p nTryCount attempts
            bool try_lock( unsigned int nTryCount ) noexcept( noexcept( std::declval<reentrant_spin_lock>().try_acquire( nTryCount )))
            {
                thread_id tid = OS::get_current_thread_id();
                if ( try_taken_lock( tid ))
                    return true;
                if ( try_acquire( nTryCount )) {
                    take( tid );
                    return true;
                }
                return false;
            }

            /// Lock the object waits if it is busy
            void lock() noexcept( noexcept( std::declval<reentrant_spin_lock>().acquire()))
            {
                thread_id tid = OS::get_current_thread_id();
                if ( !try_taken_lock( tid )) {
                    acquire();
                    take( tid );
                }
            }

            /// Unlock the spin-lock
            void unlock() noexcept
            {
                assert( is_taken( OS::get_current_thread_id()));

                integral_type n = m_spin.load( atomics::memory_order_relaxed );
                if ( n > 1 )
                    m_spin.store( n - 1, atomics::memory_order_relaxed );
                else {
                    free();
                    CDS_TSAN_ANNOTATE_MUTEX_RELEASED( this );
                    m_spin.store( 0, atomics::memory_order_release );
                }
            }

            /// Change the owner of locked spin-lock. May be called by thread that owns spin-lock
            void change_owner( OS::ThreadId newOwnerId ) noexcept
            {
                assert( is_taken( OS::get_current_thread_id()));
                assert( newOwnerId != OS::c_NullThreadId );

                m_OwnerId.store( newOwnerId, atomics::memory_order_relaxed );
            }
        };

        /// Recursive 32bit spin-lock
        typedef reentrant_spin_lock<uint32_t, backoff::LockDefault> reentrant_spin32;

        /// Default recursive spin-lock
        typedef reentrant_spin32 reentrant_spin;

        /// Recursive 64bit spin-lock
        typedef reentrant_spin_lock<uint64_t, backoff::LockDefault> reentrant_spin64;
    }    // namespace sync
} // namespace cds

#endif  // #ifndef CDSLIB_SYNC_SPINLOCK_H