1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
|
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "third_party/blink/renderer/platform/heap/heap_allocator.h"
namespace blink {
void HeapAllocator::BackingFree(void* address) {
if (!address)
return;
ThreadState* state = ThreadState::Current();
if (state->SweepForbidden())
return;
DCHECK(!state->in_atomic_pause());
// Don't promptly free large objects because their page is never reused.
// Don't free backings allocated on other threads.
BasePage* page = PageFromObject(address);
if (page->IsLargeObjectPage() || page->Arena()->GetThreadState() != state)
return;
HeapObjectHeader* header = HeapObjectHeader::FromPayload(address);
// Don't promptly free marked backing as they may be registered on the marking
// callback stack. The effect on non incremental marking GCs is that promptly
// free is disabled for surviving backings during lazy sweeping.
if (header->IsMarked())
return;
state->Heap().PromptlyFreed(header->GcInfoIndex());
static_cast<NormalPage*>(page)->ArenaForNormalPage()->PromptlyFreeObject(
header);
}
void HeapAllocator::FreeVectorBacking(void* address) {
BackingFree(address);
}
void HeapAllocator::FreeInlineVectorBacking(void* address) {
BackingFree(address);
}
void HeapAllocator::FreeHashTableBacking(void* address) {
// When incremental marking is enabled weak callbacks may have been
// registered.
if (!ThreadState::Current()->IsMarkingInProgress())
BackingFree(address);
}
bool HeapAllocator::BackingExpand(void* address, size_t new_size) {
if (!address)
return false;
ThreadState* state = ThreadState::Current();
if (state->SweepForbidden())
return false;
DCHECK(!state->in_atomic_pause());
DCHECK(state->IsAllocationAllowed());
DCHECK_EQ(&state->Heap(), &ThreadState::FromObject(address)->Heap());
// FIXME: Support expand for large objects.
// Don't expand backings allocated on other threads.
BasePage* page = PageFromObject(address);
if (page->IsLargeObjectPage() || page->Arena()->GetThreadState() != state)
return false;
HeapObjectHeader* header = HeapObjectHeader::FromPayload(address);
NormalPageArena* arena = static_cast<NormalPage*>(page)->ArenaForNormalPage();
bool succeed = arena->ExpandObject(header, new_size);
if (succeed)
state->Heap().AllocationPointAdjusted(arena->ArenaIndex());
return succeed;
}
bool HeapAllocator::ExpandVectorBacking(void* address, size_t new_size) {
return BackingExpand(address, new_size);
}
bool HeapAllocator::ExpandInlineVectorBacking(void* address, size_t new_size) {
return BackingExpand(address, new_size);
}
bool HeapAllocator::ExpandHashTableBacking(void* address, size_t new_size) {
return BackingExpand(address, new_size);
}
bool HeapAllocator::BackingShrink(void* address,
size_t quantized_current_size,
size_t quantized_shrunk_size) {
if (!address || quantized_shrunk_size == quantized_current_size)
return true;
DCHECK_LT(quantized_shrunk_size, quantized_current_size);
ThreadState* state = ThreadState::Current();
if (state->SweepForbidden())
return false;
DCHECK(!state->in_atomic_pause());
DCHECK(state->IsAllocationAllowed());
DCHECK_EQ(&state->Heap(), &ThreadState::FromObject(address)->Heap());
// FIXME: Support shrink for large objects.
// Don't shrink backings allocated on other threads.
BasePage* page = PageFromObject(address);
if (page->IsLargeObjectPage() || page->Arena()->GetThreadState() != state)
return false;
HeapObjectHeader* header = HeapObjectHeader::FromPayload(address);
NormalPageArena* arena = static_cast<NormalPage*>(page)->ArenaForNormalPage();
// We shrink the object only if the shrinking will make a non-small
// prompt-free block.
// FIXME: Optimize the threshold size.
if (quantized_current_size <= quantized_shrunk_size +
sizeof(HeapObjectHeader) +
sizeof(void*) * 32 &&
!arena->IsObjectAllocatedAtAllocationPoint(header))
return true;
bool succeeded_at_allocation_point =
arena->ShrinkObject(header, quantized_shrunk_size);
if (succeeded_at_allocation_point)
state->Heap().AllocationPointAdjusted(arena->ArenaIndex());
return true;
}
bool HeapAllocator::ShrinkVectorBacking(void* address,
size_t quantized_current_size,
size_t quantized_shrunk_size) {
return BackingShrink(address, quantized_current_size, quantized_shrunk_size);
}
bool HeapAllocator::ShrinkInlineVectorBacking(void* address,
size_t quantized_current_size,
size_t quantized_shrunk_size) {
return BackingShrink(address, quantized_current_size, quantized_shrunk_size);
}
} // namespace blink
|