File: allocator_state.cc

package info (click to toggle)
chromium 139.0.7258.127-1
  • links: PTS, VCS
  • area: main
  • in suites:
  • size: 6,122,068 kB
  • sloc: cpp: 35,100,771; ansic: 7,163,530; javascript: 4,103,002; python: 1,436,920; asm: 946,517; xml: 746,709; pascal: 187,653; perl: 88,691; sh: 88,436; objc: 79,953; sql: 51,488; cs: 44,583; fortran: 24,137; makefile: 22,147; tcl: 15,277; php: 13,980; yacc: 8,984; ruby: 7,485; awk: 3,720; lisp: 3,096; lex: 1,327; ada: 727; jsp: 228; sed: 36
file content (178 lines) | stat: -rw-r--r-- 6,111 bytes parent folder | download | duplicates (6)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
// Copyright 2018 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40285824): Remove this and convert code to safer constructs.
#pragma allow_unsafe_buffers
#endif

#include "components/gwp_asan/common/allocator_state.h"

#include <algorithm>

#include "base/bits.h"
#include "base/logging.h"
#include "base/memory/page_size.h"
#include "base/strings/stringprintf.h"
#include "base/threading/platform_thread.h"
#include "build/build_config.h"

namespace gwp_asan {
namespace internal {

AllocatorState::AllocatorState() = default;

AllocatorState::GetMetadataReturnType AllocatorState::GetMetadataForAddress(
    uintptr_t exception_address,
    const SlotMetadata* metadata_arr,
    const MetadataIdx* slot_to_metadata,
    MetadataIdx* metadata_idx,
    std::string* error) const {
  CHECK(IsValid());
  CHECK(PointerIsMine(exception_address));

  AllocatorState::SlotIdx slot_idx = GetNearestSlot(exception_address);
  if (slot_idx >= total_reserved_pages) {
    *error = base::StringPrintf("Bad slot index %u >= %zu", slot_idx,
                                total_reserved_pages);
    return GetMetadataReturnType::kErrorBadSlot;
  }

  AllocatorState::MetadataIdx index = slot_to_metadata[slot_idx];
  if (index == kInvalidMetadataIdx)
    return GetMetadataReturnType::kGwpAsanCrashWithMissingMetadata;

  if (index >= num_metadata) {
    *error =
        base::StringPrintf("Bad metadata index %u >= %zu", index, num_metadata);
    return GetMetadataReturnType::kErrorBadMetadataIndex;
  }

  if (GetNearestSlot(metadata_arr[index].alloc_ptr) != slot_idx) {
    *error = base::StringPrintf(
        "Outdated metadata index %u: slot for %zx does not match %zx", index,
        metadata_arr[index].alloc_ptr, exception_address);
    return GetMetadataReturnType::kErrorOutdatedMetadataIndex;
  }

  *metadata_idx = index;
  return GetMetadataReturnType::kGwpAsanCrash;
}

bool AllocatorState::IsValid() const {
  if (!page_size || page_size != base::GetPageSize())
    return false;

  if (total_requested_pages == 0 || total_requested_pages > kMaxRequestedSlots)
    return false;

  if (total_reserved_pages == 0 || total_reserved_pages > kMaxReservedSlots ||
      total_reserved_pages < total_requested_pages)
    return false;

  if (num_metadata == 0 ||
      num_metadata > std::min(kMaxMetadata, total_requested_pages))
    return false;

  if (pages_base_addr % page_size != 0 || pages_end_addr % page_size != 0 ||
      first_page_addr % page_size != 0)
    return false;

  if (pages_base_addr >= pages_end_addr)
    return false;

  if (first_page_addr != pages_base_addr + page_size ||
      pages_end_addr - pages_base_addr !=
          page_size * (total_reserved_pages * 2 + 1))
    return false;

  if (!metadata_addr || !slot_to_metadata_addr)
    return false;

  return true;
}

uintptr_t AllocatorState::GetPageAddr(uintptr_t addr) const {
  const uintptr_t addr_mask = ~(page_size - 1ULL);
  return addr & addr_mask;
}

uintptr_t AllocatorState::GetNearestValidPage(uintptr_t addr) const {
  if (addr < first_page_addr)
    return first_page_addr;
  const uintptr_t last_page_addr = pages_end_addr - 2 * page_size;
  if (addr > last_page_addr)
    return last_page_addr;

  uintptr_t offset = addr - first_page_addr;
  // If addr is already on a valid page, just return addr.
  if ((offset >> base::bits::Log2Floor(page_size)) % 2 == 0)
    return addr;

  // ptr points to a guard page, so get nearest valid page.
  const size_t kHalfPageSize = page_size / 2;
  if ((offset >> base::bits::Log2Floor(kHalfPageSize)) % 2 == 0) {
    return addr - kHalfPageSize;  // Round down.
  }
  return addr + kHalfPageSize;  // Round up.
}

AllocatorState::SlotIdx AllocatorState::GetNearestSlot(uintptr_t addr) const {
  return AddrToSlot(GetPageAddr(GetNearestValidPage(addr)));
}

AllocatorState::ErrorType AllocatorState::GetErrorType(uintptr_t addr,
                                                       bool allocated,
                                                       bool deallocated) const {
  if (free_invalid_address)
    return ErrorType::kFreeInvalidAddress;
  if (!allocated)
    return ErrorType::kUnknown;
  if (double_free_address)
    return ErrorType::kDoubleFree;
  if (deallocated)
    return ErrorType::kUseAfterFree;
  if (addr < first_page_addr)
    return ErrorType::kBufferUnderflow;
  const uintptr_t last_page_addr = pages_end_addr - 2 * page_size;
  if (addr > last_page_addr)
    return ErrorType::kBufferOverflow;
  const uintptr_t offset = addr - first_page_addr;

  // If we hit this condition, it means we crashed on accessing an allocation
  // even though it's currently allocated [there is a if(deallocated) return
  // earlier.] This can happen when a use-after-free causes a crash and another
  // thread manages to allocate the page in another thread before it's stopped.
  // This can happen with low sampling frequencies and high parallel allocator
  // usage.
  if ((offset >> base::bits::Log2Floor(page_size)) % 2 == 0) {
    LOG(WARNING) << "Hit impossible error condition, likely caused by a racy "
                    "use-after-free";
    return ErrorType::kUnknown;
  }

  const size_t kHalfPageSize = page_size / 2;
  return (offset >> base::bits::Log2Floor(kHalfPageSize)) % 2 == 0
             ? ErrorType::kBufferOverflow
             : ErrorType::kBufferUnderflow;
}

uintptr_t AllocatorState::SlotToAddr(AllocatorState::SlotIdx slot) const {
  DCHECK_LE(slot, kMaxReservedSlots);
  return first_page_addr + 2 * slot * page_size;
}

AllocatorState::SlotIdx AllocatorState::AddrToSlot(uintptr_t addr) const {
  DCHECK_EQ(addr % page_size, 0ULL);
  uintptr_t offset = addr - first_page_addr;
  DCHECK_EQ((offset >> base::bits::Log2Floor(page_size)) % 2, 0ULL);
  size_t slot = (offset >> base::bits::Log2Floor(page_size)) / 2;
  DCHECK_LE(slot, kMaxReservedSlots);
  return static_cast<SlotIdx>(slot);
}

AllocatorState::SlotMetadata::SlotMetadata() = default;

}  // namespace internal
}  // namespace gwp_asan