1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
|
//===-- scudo_allocator_secondary.h -----------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// Scudo Secondary Allocator.
/// This services allocation that are too large to be serviced by the Primary
/// Allocator. It is directly backed by the memory mapping functions of the
/// operating system.
///
//===----------------------------------------------------------------------===//
#ifndef SCUDO_ALLOCATOR_SECONDARY_H_
#define SCUDO_ALLOCATOR_SECONDARY_H_
#ifndef SCUDO_ALLOCATOR_H_
# error "This file must be included inside scudo_allocator.h."
#endif
// Secondary backed allocations are standalone chunks that contain extra
// information stored in a LargeChunk::Header prior to the frontend's header.
//
// The secondary takes care of alignment requirements (so that it can release
// unnecessary pages in the rare event of larger alignments), and as such must
// know about the frontend's header size.
//
// Since Windows doesn't support partial releasing of a reserved memory region,
// we have to keep track of both the reserved and the committed memory.
//
// The resulting chunk resembles the following:
//
// +--------------------+
// | Guard page(s) |
// +--------------------+
// | Unused space* |
// +--------------------+
// | LargeChunk::Header |
// +--------------------+
// | {Unp,P}ackedHeader |
// +--------------------+
// | Data (aligned) |
// +--------------------+
// | Unused space** |
// +--------------------+
// | Guard page(s) |
// +--------------------+
namespace LargeChunk {
struct Header {
ReservedAddressRange StoredRange;
uptr CommittedSize;
uptr Size;
};
constexpr uptr getHeaderSize() {
return RoundUpTo(sizeof(Header), MinAlignment);
}
static Header *getHeader(uptr Ptr) {
return reinterpret_cast<Header *>(Ptr - getHeaderSize());
}
static Header *getHeader(const void *Ptr) {
return getHeader(reinterpret_cast<uptr>(Ptr));
}
} // namespace LargeChunk
class LargeMmapAllocator {
public:
void Init() {
internal_memset(this, 0, sizeof(*this));
}
void *Allocate(AllocatorStats *Stats, uptr Size, uptr Alignment) {
const uptr UserSize = Size - Chunk::getHeaderSize();
// The Scudo frontend prevents us from allocating more than
// MaxAllowedMallocSize, so integer overflow checks would be superfluous.
uptr ReservedSize = Size + LargeChunk::getHeaderSize();
if (UNLIKELY(Alignment > MinAlignment))
ReservedSize += Alignment;
const uptr PageSize = GetPageSizeCached();
ReservedSize = RoundUpTo(ReservedSize, PageSize);
// Account for 2 guard pages, one before and one after the chunk.
ReservedSize += 2 * PageSize;
ReservedAddressRange AddressRange;
uptr ReservedBeg = AddressRange.Init(ReservedSize, SecondaryAllocatorName);
if (UNLIKELY(ReservedBeg == ~static_cast<uptr>(0)))
return nullptr;
// A page-aligned pointer is assumed after that, so check it now.
DCHECK(IsAligned(ReservedBeg, PageSize));
uptr ReservedEnd = ReservedBeg + ReservedSize;
// The beginning of the user area for that allocation comes after the
// initial guard page, and both headers. This is the pointer that has to
// abide by alignment requirements.
uptr CommittedBeg = ReservedBeg + PageSize;
uptr UserBeg = CommittedBeg + HeadersSize;
uptr UserEnd = UserBeg + UserSize;
uptr CommittedEnd = RoundUpTo(UserEnd, PageSize);
// In the rare event of larger alignments, we will attempt to fit the mmap
// area better and unmap extraneous memory. This will also ensure that the
// offset and unused bytes field of the header stay small.
if (UNLIKELY(Alignment > MinAlignment)) {
if (!IsAligned(UserBeg, Alignment)) {
UserBeg = RoundUpTo(UserBeg, Alignment);
CommittedBeg = RoundDownTo(UserBeg - HeadersSize, PageSize);
const uptr NewReservedBeg = CommittedBeg - PageSize;
DCHECK_GE(NewReservedBeg, ReservedBeg);
if (!SANITIZER_WINDOWS && NewReservedBeg != ReservedBeg) {
AddressRange.Unmap(ReservedBeg, NewReservedBeg - ReservedBeg);
ReservedBeg = NewReservedBeg;
}
UserEnd = UserBeg + UserSize;
CommittedEnd = RoundUpTo(UserEnd, PageSize);
}
const uptr NewReservedEnd = CommittedEnd + PageSize;
DCHECK_LE(NewReservedEnd, ReservedEnd);
if (!SANITIZER_WINDOWS && NewReservedEnd != ReservedEnd) {
AddressRange.Unmap(NewReservedEnd, ReservedEnd - NewReservedEnd);
ReservedEnd = NewReservedEnd;
}
}
DCHECK_LE(UserEnd, CommittedEnd);
const uptr CommittedSize = CommittedEnd - CommittedBeg;
// Actually mmap the memory, preserving the guard pages on either sides.
CHECK_EQ(CommittedBeg, AddressRange.Map(CommittedBeg, CommittedSize));
const uptr Ptr = UserBeg - Chunk::getHeaderSize();
LargeChunk::Header *H = LargeChunk::getHeader(Ptr);
H->StoredRange = AddressRange;
H->Size = CommittedEnd - Ptr;
H->CommittedSize = CommittedSize;
// The primary adds the whole class size to the stats when allocating a
// chunk, so we will do something similar here. But we will not account for
// the guard pages.
{
SpinMutexLock l(&StatsMutex);
Stats->Add(AllocatorStatAllocated, CommittedSize);
Stats->Add(AllocatorStatMapped, CommittedSize);
AllocatedBytes += CommittedSize;
if (LargestSize < CommittedSize)
LargestSize = CommittedSize;
NumberOfAllocs++;
}
return reinterpret_cast<void *>(Ptr);
}
void Deallocate(AllocatorStats *Stats, void *Ptr) {
LargeChunk::Header *H = LargeChunk::getHeader(Ptr);
// Since we're unmapping the entirety of where the ReservedAddressRange
// actually is, copy onto the stack.
ReservedAddressRange AddressRange = H->StoredRange;
const uptr Size = H->CommittedSize;
{
SpinMutexLock l(&StatsMutex);
Stats->Sub(AllocatorStatAllocated, Size);
Stats->Sub(AllocatorStatMapped, Size);
FreedBytes += Size;
NumberOfFrees++;
}
AddressRange.Unmap(reinterpret_cast<uptr>(AddressRange.base()),
AddressRange.size());
}
static uptr GetActuallyAllocatedSize(void *Ptr) {
return LargeChunk::getHeader(Ptr)->Size;
}
void PrintStats() {
Printf("Stats: LargeMmapAllocator: allocated %zd times (%zd K), "
"freed %zd times (%zd K), remains %zd (%zd K) max %zd M\n",
NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees,
FreedBytes >> 10, NumberOfAllocs - NumberOfFrees,
(AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20);
}
private:
static constexpr uptr HeadersSize =
LargeChunk::getHeaderSize() + Chunk::getHeaderSize();
StaticSpinMutex StatsMutex;
u32 NumberOfAllocs;
u32 NumberOfFrees;
uptr AllocatedBytes;
uptr FreedBytes;
uptr LargestSize;
};
#endif // SCUDO_ALLOCATOR_SECONDARY_H_
|