File: pool_monitor.h

package info (click to toggle)
libcds 2.3.3-6
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 15,632 kB
  • sloc: cpp: 135,002; ansic: 7,234; perl: 243; sh: 237; makefile: 6
file content (265 lines) | stat: -rw-r--r-- 10,524 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
// Copyright (c) 2006-2018 Maxim Khizhinsky
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)

#ifndef CDSLIB_SYNC_POOL_MONITOR_H
#define CDSLIB_SYNC_POOL_MONITOR_H

#include <cds/sync/monitor.h>
#include <cds/algo/atomic.h>
#include <cds/algo/backoff_strategy.h>
#include <cds/opt/options.h> // opt::none

namespace cds { namespace sync {

    /// \p pool_monitor traits
    struct pool_monitor_traits {

        /// Dummy internal statistics if \p Stat template parameter is \p false
        struct empty_stat
        {
            //@cond
            void onLock()              const {}
            void onUnlock()            const {}
            void onLockContention()    const {}
            void onUnlockContention()  const {}
            void onLockAllocation()    const {}
            void onLockDeallocation()  const {}
            //@endcond
        };

        /// Monitor's internal statistics, used if \p Stat template parameter is \p true
        template <typename Counter = cds::atomicity::event_counter >
        struct stat
        {
            typedef Counter event_counter; ///< measure type

            event_counter m_nLockCount;         ///< Number of monitor \p lock() call
            event_counter m_nUnlockCount;       ///< Number of monitor \p unlock() call
            event_counter m_nMaxLocked;         ///< Max number of simuntaneously locked mutexes
            event_counter m_nLockContention;    ///< Number of \p lock() contenton
            event_counter m_nUnlockContention;  ///< Number of \p unlock() contention
            event_counter m_nLockAllocation;    ///< Number of the lock allocation from the pool
            event_counter m_nLockDeallocation;  ///< Number of the lock deallocation
            event_counter m_nMaxAllocated;      ///< Max number of sumultaneously allocated mutexes

            //@cond
            void onLock()
            {
                ++m_nLockCount;
                int nDiff = static_cast<int>( m_nLockCount.get() - m_nUnlockCount.get());
                if ( nDiff > 0 && m_nMaxLocked.get() < static_cast<typename event_counter::value_type>( nDiff ))
                    m_nMaxLocked = static_cast<typename event_counter::value_type>( nDiff );
            }
            void onUnlock()             { ++m_nUnlockCount;     }
            void onLockContention()     { ++m_nLockContention;  }
            void onUnlockContention()   { ++m_nUnlockContention;}
            void onLockAllocation()
            {
                ++m_nLockAllocation;
                int nDiff = static_cast<int>( m_nLockAllocation.get() - m_nLockDeallocation.get());
                if ( nDiff > 0 && m_nMaxAllocated.get() < static_cast<typename event_counter::value_type>( nDiff ))
                    m_nMaxAllocated = static_cast<typename event_counter::value_type>( nDiff );
            }
            void onLockDeallocation()   { ++m_nLockDeallocation;}
            //@endcond
        };
    };


    /// @ref cds_sync_monitor "Monitor" that allocates node's lock when needed
    /**
        The monitor is intended for reducing the number of system mutexes for
        huge containers like a tree. The monitor allocates the mutex from the pool \p LockPool
        only when container's node should be locked. Lifetime of node's mutex is managed by
        reference counter. When the reference counter to node's mutex becomes zero,
        the mutex is given back to the pool.

        The monitor is blocked: the access to node's mutex is performed under the spin-lock.
        However, node locking/unlocking is performed beyond the spin-lock.

        Template arguments:
        - \p LockPool - the @ref cds_memory_pool "pool type". The pool must maintain
            the objects of type \p std::mutex or similar. The access to the pool is not synchronized.
        - \p BackOff - back-off strategy for spinning, default is \p cds::backoff::Default
        - \p Stat - enable (\p true) or disable (\p false, the default) monitor's internal statistics.

        <b>How to use</b>
        \code
        typedef cds::memory::vyukov_queue_pool< std::mutex > pool_type;
        typedef cds::sync::pool_monitor< pool_type > sync_monitor;
        \endcode
    */
    template <class LockPool, typename BackOff = cds::backoff::Default, bool Stat = false >
    class pool_monitor
    {
    public:
        typedef LockPool pool_type; ///< Pool type
        typedef typename pool_type::value_type lock_type; ///< node lock type
        typedef typename std::conditional<
            std::is_same< BackOff, cds::opt::none >::value,
            cds::backoff::yield,
            BackOff
        >::type  back_off;  ///< back-off strategy for spinning
        typedef uint32_t refspin_type;  ///< Reference counter + spin-lock bit

        /// Internal statistics
        typedef typename std::conditional<
            Stat,
            typename pool_monitor_traits::stat<>,
            typename pool_monitor_traits::empty_stat
        >::type internal_stat;

        /// Pool's default capacity
        static constexpr size_t const c_nDefaultCapacity = 256;

    private:
        //@cond
        static constexpr refspin_type const c_nSpinBit = 1;
        static constexpr refspin_type const c_nRefIncrement = 2;
        mutable pool_type      m_Pool;
        mutable internal_stat  m_Stat;
        //@endcond

    public:

        /// Node injection
        struct node_injection
        {
            mutable atomics::atomic<refspin_type>   m_RefSpin;  ///< Spin-lock for \p m_pLock (bit 0) + reference counter
            mutable lock_type *                     m_pLock;    ///< Node-level lock

            //@cond
            node_injection()
                : m_pLock( nullptr )
            {
                m_RefSpin.store( 0, atomics::memory_order_release );
            }

            ~node_injection()
            {
                assert( m_pLock == nullptr );
                assert( m_RefSpin.load( atomics::memory_order_relaxed ) == 0 );
            }

            bool check_free() const
            {
                return m_pLock == nullptr && m_RefSpin.load( atomics::memory_order_relaxed ) == 0;
            }
            //@endcond
        };

        /// Initializes the pool of 256 preallocated mutexes
        pool_monitor()
            : m_Pool( c_nDefaultCapacity )
        {}

        /// Initializes the pool of \p nPoolCapacity preallocated mutexes
        pool_monitor( size_t nPoolCapacity )
            : m_Pool( nPoolCapacity ? nPoolCapacity : c_nDefaultCapacity )
        {}

        /// Makes exclusive access to node \p p
        template <typename Node>
        void lock( Node const& p ) const
        {
            lock_type * pLock;

            m_Stat.onLock();

            // try lock spin and increment reference counter
            refspin_type cur = p.m_SyncMonitorInjection.m_RefSpin.load( atomics::memory_order_relaxed ) & ~c_nSpinBit;
            if ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur + c_nRefIncrement + c_nSpinBit,
                atomics::memory_order_acq_rel, atomics::memory_order_acquire ))
            {
                back_off bkoff;
                do {
                    m_Stat.onLockContention();
                    bkoff();
                    cur &= ~c_nSpinBit;
                } while ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur + c_nRefIncrement + c_nSpinBit,
                    atomics::memory_order_acq_rel, atomics::memory_order_acquire ));
            }

            // spin locked
            // If the node has no lock, allocate it from pool
            pLock = p.m_SyncMonitorInjection.m_pLock;
            if ( !pLock ) {
                assert( cur == 0 );
                pLock = p.m_SyncMonitorInjection.m_pLock = m_Pool.allocate( 1 );
                assert( pLock != nullptr );
                m_Stat.onLockAllocation();
            }

            // unlock spin
            p.m_SyncMonitorInjection.m_RefSpin.store( cur + c_nRefIncrement, atomics::memory_order_release );

            // lock the node
            pLock->lock();
        }

        /// Unlocks the node \p p
        template <typename Node>
        void unlock( Node const& p ) const
        {
            lock_type * pLock = nullptr;

            m_Stat.onUnlock();

            assert( p.m_SyncMonitorInjection.m_pLock != nullptr );
            p.m_SyncMonitorInjection.m_pLock->unlock();

            // try lock spin
            refspin_type cur = p.m_SyncMonitorInjection.m_RefSpin.load( atomics::memory_order_relaxed ) & ~c_nSpinBit;
            if ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur | c_nSpinBit,
                atomics::memory_order_acquire, atomics::memory_order_acquire ))
            {
                back_off bkoff;
                do {
                    m_Stat.onUnlockContention();
                    bkoff();
                    cur &= ~c_nSpinBit;
                } while ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur | c_nSpinBit,
                    atomics::memory_order_acquire, atomics::memory_order_acquire ));
            }

            // spin locked now

            // If we are the unique owner - deallocate lock
            if ( cur == c_nRefIncrement ) {
                pLock = p.m_SyncMonitorInjection.m_pLock;
                p.m_SyncMonitorInjection.m_pLock = nullptr;
            }

            // unlock spin
            p.m_SyncMonitorInjection.m_RefSpin.store( cur - c_nRefIncrement, atomics::memory_order_release );

            // free pLock
            if ( pLock ) {
                m_Pool.deallocate( pLock, 1 );
                m_Stat.onLockDeallocation();
            }
        }

        /// Scoped lock
        template <typename Node>
        using scoped_lock = monitor_scoped_lock< pool_monitor, Node >;

        /// Returns the reference to internal statistics
        /**
            If class' template argument \p Stat is \p false,
            the function returns \ref pool_monitor_traits::empty_stat "dummy statistics".
            Otherwise, it returns the reference to monitor's internal statistics
            of type \ref pool_monitor_traits::stat.
        */
        internal_stat const& statistics() const
        {
            return m_Stat;
        }
    };

}} // namespace cds::sync

#endif // #ifndef CDSLIB_SYNC_POOL_MONITOR_H