1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
|
// Copyright 2018 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// The AllocatorState class is the subset of the GuardedPageAllocator that is
// required by the crash handler to analyzer crashes and provide debug
// information. The crash handler initializes an instance of this class from
// the crashed processes memory. Because the out-of-process allocator could be
// corrupted or maliciously tampered with, this class is security sensitive and
// needs to be modified with care. It has been purposefully designed to be:
// - Minimal: This is the minimum set of methods and members required by the
// crash handler.
// - Trivially copyable: An instance of this object is copied from another
// processes memory. Ensuring it is trivially copyable means that the crash
// handler will not accidentally trigger a complex destructor on objects
// initialized from another processes memory.
// - Free of pointers: Pointers are all uintptr_t since none of these pointers
// need to be directly dereferenced. Encourage users like the crash handler
// to consider them addresses instead of pointers.
// - Validatable: The IsValid() method is intended to sanity check the internal
// fields such that it's safe to call any method on a valid object. All
// additional methods and fields need to be audited to ensure they maintain
// this invariant!
#ifndef COMPONENTS_GWP_ASAN_COMMON_ALLOCATOR_STATE_H_
#define COMPONENTS_GWP_ASAN_COMMON_ALLOCATOR_STATE_H_
#include <atomic>
#include <limits>
#include <string>
#include <type_traits>
#include "components/gwp_asan/common/allocation_info.h"
namespace gwp_asan {
namespace internal {
class GuardedPageAllocator;
class AllocatorState {
public:
using MetadataIdx = uint16_t;
using SlotIdx = uint16_t;
// Maximum number of virtual memory slots (guard-page buffered pages) this
// class can allocate.
static constexpr size_t kMaxRequestedSlots = 32767;
// When PartitionAlloc is used as the backing allocator, we might have to
// reserve extra slots to store PA metadata. Therefore, the number of reserved
// slots might be higher than the number of requested slots. Note that the
// current value is just a reasonable upper bound; the actual "slot overhead"
// from PA is significantly smaller.
static constexpr size_t kMaxReservedSlots = 2 * kMaxRequestedSlots;
// Maximum number of concurrent allocations/metadata this class can allocate.
static constexpr size_t kMaxMetadata = 16384;
// Invalid metadata index.
static constexpr MetadataIdx kInvalidMetadataIdx = kMaxMetadata;
// Maximum number of stack trace frames to collect for an allocation or
// deallocation.
static constexpr size_t kMaxStackFrames = 100;
// Number of bytes to allocate for both allocation and deallocation packed
// stack traces. (Stack trace entries take ~3.5 bytes on average.)
static constexpr size_t kMaxPackedTraceLength = 400;
static_assert(sizeof(SlotIdx) >= sizeof(MetadataIdx),
"Should not need more metadata slots than allocation "
"slots.");
static_assert(std::numeric_limits<SlotIdx>::max() >= kMaxReservedSlots,
"SlotIdx can hold all possible slot index values");
static_assert(std::numeric_limits<MetadataIdx>::max() >= kMaxMetadata - 1,
"MetadataIdx can hold all possible metadata index values");
static_assert(kInvalidMetadataIdx >= kMaxMetadata,
"kInvalidMetadataIdx can not reference a real index");
// These should not be renumbered and should be kept in sync with
// Crash::ErrorType in crash.proto
enum class ErrorType {
kUseAfterFree = 0,
kBufferUnderflow = 1,
kBufferOverflow = 2,
kDoubleFree = 3,
kUnknown = 4,
kFreeInvalidAddress = 5,
};
enum class GetMetadataReturnType {
kGwpAsanCrash = 0,
kGwpAsanCrashWithMissingMetadata = 1,
kErrorBadSlot = 2,
kErrorBadMetadataIndex = 3,
kErrorOutdatedMetadataIndex = 4,
};
// Structure for storing data about a slot.
struct SlotMetadata {
SlotMetadata();
// Size of the allocation
size_t alloc_size = 0;
// The allocation address.
uintptr_t alloc_ptr = 0;
// Used to synchronize whether a deallocation has occurred (e.g. whether a
// double free has occurred) between threads.
std::atomic<bool> deallocation_occurred{false};
// Holds the combined allocation/deallocation stack traces. The deallocation
// stack trace is stored immediately after the allocation stack trace to
// optimize on space.
uint8_t stack_trace_pool[kMaxPackedTraceLength];
static_assert(
std::numeric_limits<decltype(AllocationInfo::trace_len)>::max() >=
kMaxPackedTraceLength,
"AllocationInfo::trace_len can hold all possible length values.");
AllocationInfo alloc;
AllocationInfo dealloc;
};
AllocatorState();
AllocatorState(const AllocatorState&) = delete;
AllocatorState& operator=(const AllocatorState&) = delete;
// Returns true if address is in memory managed by this class.
inline bool PointerIsMine(uintptr_t addr) const {
return pages_base_addr <= addr && addr < pages_end_addr;
}
// Sanity check allocator internals. This method is used to verify that
// the allocator base state is well formed when the crash handler analyzes the
// allocator from a crashing process. This method is security-sensitive, it
// must validate parameters to ensure that an attacker with the ability to
// modify the allocator internals can not cause the crash handler to misbehave
// and cause memory errors.
bool IsValid() const;
// This method is meant to be called from the crash handler with a validated
// AllocatorState object read from the crashed process and an exception
// address known to be in the GWP-ASan allocator region. Given the metadata
// and slot to metadata arrays for the allocator, this method returns an enum
// indicating an error or a GWP-ASan exception with or without metadata. If
// metadata is available, the |metadata_idx| parameter stores the index of the
// relevant metadata in the given array. If an error occurs, the |error|
// parameter is filled out with an error string.
GetMetadataReturnType GetMetadataForAddress(
uintptr_t exception_address,
const SlotMetadata* metadata_arr,
const MetadataIdx* slot_to_metadata,
MetadataIdx* metadata_idx,
std::string* error) const;
// Returns the likely error type given an exception address and whether its
// previously been allocated and deallocated.
ErrorType GetErrorType(uintptr_t addr,
bool allocated,
bool deallocated) const;
// Returns the address of the page that addr resides on.
uintptr_t GetPageAddr(uintptr_t addr) const;
// Returns an address somewhere on the valid page nearest to addr.
uintptr_t GetNearestValidPage(uintptr_t addr) const;
// Returns the slot number for the page nearest to addr.
SlotIdx GetNearestSlot(uintptr_t addr) const;
uintptr_t SlotToAddr(SlotIdx slot) const;
SlotIdx AddrToSlot(uintptr_t addr) const;
uintptr_t pages_base_addr = 0; // Points to start of mapped region.
uintptr_t pages_end_addr = 0; // Points to the end of mapped region.
uintptr_t first_page_addr = 0; // Points to first allocatable page.
size_t num_metadata = 0; // Number of entries in |metadata_addr|.
size_t total_requested_pages = 0; // Virtual memory page pool size.
size_t total_reserved_pages = 0; // |total_requested_pages| plus zero or
// more pages to store allocator metadata.
size_t page_size = 0; // Page size.
// Pointer to an array of metadata about every allocation, including its size,
// offset, and pointers to the allocation/deallocation stack traces (if
// present.)
uintptr_t metadata_addr = 0;
// Pointer to an array that maps a slot index to a metadata index (or
// kInvalidMetadataIdx if no such mapping exists) in |metadata_addr|.
uintptr_t slot_to_metadata_addr;
// Set to the address of a double freed allocation if a double free occurred.
uintptr_t double_free_address = 0;
// If an invalid pointer has been free()d, this is the address of that invalid
// pointer.
uintptr_t free_invalid_address = 0;
};
// Ensure that the allocator state is a plain-old-data. That way we can safely
// initialize it by copying memory from out-of-process without worrying about
// destructors operating on the fields in an unexpected way.
static_assert(std::is_trivially_copyable<AllocatorState>(),
"AllocatorState must be POD");
static_assert(std::is_trivially_copyable<AllocatorState::SlotMetadata>(),
"AllocatorState::SlotMetadata must be POD");
} // namespace internal
} // namespace gwp_asan
#endif // COMPONENTS_GWP_ASAN_COMMON_ALLOCATOR_STATE_H_
|