File: atomicops.cc

package info (click to toggle)
chromium 139.0.7258.127-1
  • links: PTS, VCS
  • area: main
  • in suites:
  • size: 6,122,068 kB
  • sloc: cpp: 35,100,771; ansic: 7,163,530; javascript: 4,103,002; python: 1,436,920; asm: 946,517; xml: 746,709; pascal: 187,653; perl: 88,691; sh: 88,436; objc: 79,953; sql: 51,488; cs: 44,583; fortran: 24,137; makefile: 22,147; tcl: 15,277; php: 13,980; yacc: 8,984; ruby: 7,485; awk: 3,720; lisp: 3,096; lex: 1,327; ada: 727; jsp: 228; sed: 36
file content (65 lines) | stat: -rw-r--r-- 2,404 bytes parent folder | download | duplicates (6)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
// Copyright 2024 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "base/atomicops.h"

#include <atomic>

#include "base/memory/aligned_memory.h"

namespace base::subtle {

void RelaxedAtomicWriteMemcpy(base::span<uint8_t> dst,
                              base::span<const uint8_t> src) {
  CHECK_EQ(dst.size(), src.size());
  size_t bytes = dst.size();
  uint8_t* dst_byte_ptr = dst.data();
  const uint8_t* src_byte_ptr = src.data();
  // Make sure that we can at least copy byte by byte with atomics.
  static_assert(std::atomic_ref<uint8_t>::required_alignment == 1);

  // Alignment for uintmax_t atomics that we use in the happy case.
  constexpr size_t kDesiredAlignment =
      std::atomic_ref<uintmax_t>::required_alignment;

  // Copy byte-by-byte until `dst_byte_ptr` is not properly aligned for
  // the happy case.
  while (bytes > 0 && !IsAligned(dst_byte_ptr, kDesiredAlignment)) {
    std::atomic_ref<uint8_t>(*dst_byte_ptr)
        .store(*src_byte_ptr, std::memory_order_relaxed);
    // SAFETY: We check above that `dst_byte_ptr` and `src_byte_ptr` point
    // to spans of sufficient size.
    UNSAFE_BUFFERS(++dst_byte_ptr);
    UNSAFE_BUFFERS(++src_byte_ptr);
    --bytes;
  }

  // Happy case where both `src_byte_ptr` and `dst_byte_ptr` are both properly
  // aligned and the largest possible atomic is used for copying.
  if (IsAligned(src_byte_ptr, kDesiredAlignment)) {
    while (bytes >= sizeof(uintmax_t)) {
      std::atomic_ref<uintmax_t>(*reinterpret_cast<uintmax_t*>(dst_byte_ptr))
          .store(*reinterpret_cast<const uintmax_t*>(src_byte_ptr),
                 std::memory_order_relaxed);
      // SAFETY: We check above that `dst_byte_ptr` and `src_byte_ptr` point
      // to spans of sufficient size.
      UNSAFE_BUFFERS(dst_byte_ptr += sizeof(uintmax_t));
      UNSAFE_BUFFERS(src_byte_ptr += sizeof(uintmax_t));
      bytes -= sizeof(uintmax_t);
    }
  }

  // Copy what's left after the happy-case byte-by-byte.
  while (bytes > 0) {
    std::atomic_ref<uint8_t>(*dst_byte_ptr)
        .store(*src_byte_ptr, std::memory_order_relaxed);
    // SAFETY: We check above that `dst_byte_ptr` and `src_byte_ptr` point
    // to spans of sufficient size.
    UNSAFE_BUFFERS(++dst_byte_ptr);
    UNSAFE_BUFFERS(++src_byte_ptr);
    --bytes;
  }
}

}  // namespace base::subtle