File: SequenceLocked.h

package info (click to toggle)
webkit2gtk 2.48.3-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, trixie
  • size: 429,620 kB
  • sloc: cpp: 3,696,936; javascript: 194,444; ansic: 169,997; python: 46,499; asm: 19,276; ruby: 18,528; perl: 16,602; xml: 4,650; yacc: 2,360; sh: 2,098; java: 1,993; lex: 1,327; pascal: 366; makefile: 298
file content (133 lines) | stat: -rw-r--r-- 5,761 bytes parent folder | download | duplicates (6)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
//
// Copyright 2020 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//      https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include <array>
#include <atomic>
#include <type_traits>
#include <wtf/Compiler.h>
#include <wtf/StdLibExtras.h>
#include <wtf/text/ParsingUtilities.h>

namespace WTF {

// A SequenceLock implements lock-free reads. A sequence counter is incremented
// before and after each write, and readers access the counter before and after
// accessing the protected data. If the counter is verified to not change during
// the access, and the sequence counter value was even, then the reader knows
// that the read was race-free and valid.
//
// Works best with types T that are initialized to zeros.
//
// The memory reads and writes protected by this lock must use the provided
// `load()` and `store()` functions.
// As an implementation detail, the protected data must be an array of
// `std::atomic<uint64_t>`. This is to comply with the C++ standard, which
// considers data races on non-atomic objects to be undefined behavior. See "Can
// Seqlocks Get Along With Programming Language Memory Models?"[1] by Hans J.
// Boehm for more details.
//
// [1] https://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf
template<typename T>
class SequenceLocked {
public:
    static_assert(std::atomic<uint64_t>::is_always_lock_free);
    static_assert(std::is_trivially_copyable_v<T>);

    // Copy T from store and return it, protected as a read-side
    // critical section of the sequence lock.
    T load() const
    {
        T result;
        for (;;) {
            // Acquire barrier ensures that no loads are reordered
            // above the first load of the sequence counter.
            uint64_t versionBefore = m_version.load(std::memory_order_acquire);
            if (UNLIKELY((versionBefore & 1) == 1))
                continue;
            relaxedCopyFromAtomic(asMutableByteSpan(result), std::span { m_storage });
            // Another acquire fence ensures that the load of 'm_version' below is
            // strictly ordered after the RelaxedCopyToAtomic call above.
            std::atomic_thread_fence(std::memory_order_acquire);
            uint64_t versionAfter = m_version.load(std::memory_order_relaxed);
            if (LIKELY(versionBefore == versionAfter))
                break;
        }
        return result;
    }

    // Copy value to store as a write-side critical section
    // of the sequence lock. Any concurrent readers will be forced to retry
    // until they get a read that does not conflict with this write.
    //
    // This call must be externally synchronized against other calls to write,
    // but may proceed concurrently with reads.
    void store(const T& value)
    {
        // We can use relaxed instructions to increment the counter since we
        // are extenally synchronized. The std::atomic_thread_fence below
        // ensures that the counter updates don't get interleaved with the
        // copy to the data.
        uint64_t version = m_version.load(std::memory_order_relaxed);
        m_version.store(version + 1, std::memory_order_relaxed);

        // We put a release fence between update to m_version and writes to shared data.
        // Thus all stores to shared data are effectively release operations and
        // update to m_version above cannot be re-ordered past any of them. Note that
        // this barrier is not for the fetch_add above. A release barrier for the
        // fetch_add would be before it, not after.
        std::atomic_thread_fence(std::memory_order_release);
        relaxedCopyToAtomic(std::span { m_storage }, asByteSpan(value));
        // "Release" semantics ensure that none of the writes done by
        // relaxedCopyToAtomic() can be reordered after the following modification.
        m_version.store(version + 2, std::memory_order_release);
    }

private:
    // Perform the equivalent of "memcpy(dst, src, size)", but using relaxed
    // atomics.
    static constexpr size_t storageSize = (sizeof(T) + sizeof(uint64_t) - 1) / sizeof(std::atomic<uint64_t>);
    static void relaxedCopyFromAtomic(std::span<uint8_t> destination, std::span<const std::atomic<uint64_t>, storageSize> source)
    {
        size_t sourceIndex = 0;
        while (destination.size() >= sizeof(uint64_t)) {
            uint64_t word = source[sourceIndex++].load(std::memory_order_relaxed);
            memcpySpan(consumeSpan(destination, sizeof(word)), asByteSpan(word));
        }
        ASSERT(destination.empty());
    }

    // Perform the equivalent of "memcpy(dst, src, size)", but using relaxed
    // atomics.
    static void relaxedCopyToAtomic(std::span<std::atomic<uint64_t>, storageSize> destination, std::span<const uint8_t> source)
    {
        size_t destinationIndex = 0;
        while (source.size() >= sizeof(uint64_t)) {
            uint64_t word;
            memcpySpan(asMutableByteSpan(word), consumeSpan(source, sizeof(word)));
            destination[destinationIndex++].store(word, std::memory_order_relaxed);
        }
        ASSERT(source.empty());
    }

    std::atomic<uint64_t> m_version { 0 };
    using Storage = std::array<std::atomic<uint64_t>, storageSize>;
    Storage m_storage { };
};

}

using WTF::SequenceLocked;