1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130
|
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "third_party/blink/renderer/platform/heap/gc_info.h"
#include "base/bits.h"
#include "third_party/blink/renderer/platform/heap/heap.h"
#include "third_party/blink/renderer/platform/wtf/std_lib_extras.h"
namespace blink {
namespace {
constexpr size_t kEntrySize = sizeof(GCInfo*);
// Allocation and resizing are built around the following invariants.
static_assert(base::bits::IsPowerOfTwo(kEntrySize),
"GCInfoTable entries size must be power of "
"two");
static_assert(
0 == base::kPageAllocationGranularity % base::kSystemPageSize,
"System page size must be a multiple of page page allocation granularity");
constexpr size_t ComputeInitialTableLimit() {
// (Light) experimentation suggests that Blink doesn't need more than this
// while handling content on popular web properties.
constexpr size_t kInitialWantedLimit = 512;
// Different OSes have different page sizes, so we have to choose the minimum
// of memory wanted and OS page size.
constexpr size_t memory_wanted = kInitialWantedLimit * kEntrySize;
return base::RoundUpToPageAllocationGranularity(memory_wanted) / kEntrySize;
}
constexpr size_t MaxTableSize() {
constexpr size_t kMaxTableSize = base::RoundUpToPageAllocationGranularity(
GCInfoTable::kMaxIndex * kEntrySize);
return kMaxTableSize;
}
} // namespace
GCInfoTable* GCInfoTable::global_table_ = nullptr;
void GCInfoTable::CreateGlobalTable() {
DEFINE_STATIC_LOCAL(GCInfoTable, table, ());
global_table_ = &table;
}
void GCInfoTable::EnsureGCInfoIndex(const GCInfo* gc_info,
uint32_t* gc_info_index_slot) {
DCHECK(gc_info);
DCHECK(gc_info_index_slot);
// Ensuring a new index involves current index adjustment as well
// as potentially resizing the table, both operations that require
// a lock.
MutexLocker locker(table_mutex_);
// If more than one thread ends up allocating a slot for
// the same GCInfo, have later threads reuse the slot
// allocated by the first.
if (*gc_info_index_slot)
return;
uint32_t gc_info_index = ++current_index_;
CHECK(gc_info_index < GCInfoTable::kMaxIndex);
if (current_index_ >= limit_)
Resize();
table_[gc_info_index] = gc_info;
ReleaseStore(gc_info_index_slot, gc_info_index);
}
void GCInfoTable::Resize() {
const size_t new_limit = (limit_) ? 2 * limit_ : ComputeInitialTableLimit();
const size_t old_committed_size = limit_ * kEntrySize;
const size_t new_committed_size = new_limit * kEntrySize;
CHECK(table_);
CHECK_EQ(0u, new_committed_size % base::kPageAllocationGranularity);
CHECK_GE(MaxTableSize(), limit_ * kEntrySize);
// Recommitting and zapping assumes byte-addressable storage.
uint8_t* const current_table_end =
reinterpret_cast<uint8_t*>(table_) + old_committed_size;
const size_t table_size_delta = new_committed_size - old_committed_size;
// Commit the new size and allow read/write.
// TODO(ajwong): SetSystemPagesAccess should be part of RecommitSystemPages to
// avoid having two calls here.
bool ok = base::SetSystemPagesAccess(current_table_end, table_size_delta,
base::PageReadWrite);
CHECK(ok);
ok = base::RecommitSystemPages(current_table_end, table_size_delta,
base::PageReadWrite);
CHECK(ok);
#if DCHECK_IS_ON()
// Check that newly-committed memory is zero-initialized.
for (size_t i = 0; i < (table_size_delta / sizeof(uintptr_t)); ++i) {
DCHECK(!reinterpret_cast<uintptr_t*>(current_table_end)[i]);
}
#endif // DCHECK_IS_ON()
limit_ = new_limit;
}
GCInfoTable::GCInfoTable() {
CHECK(!table_);
table_ = reinterpret_cast<GCInfo const**>(base::AllocPages(
nullptr, MaxTableSize(), base::kPageAllocationGranularity,
base::PageInaccessible));
CHECK(table_);
Resize();
}
#if DCHECK_IS_ON()
void AssertObjectHasGCInfo(const void* payload, size_t gc_info_index) {
HeapObjectHeader::CheckFromPayload(payload);
#if !defined(COMPONENT_BUILD)
// On component builds we cannot compare the gcInfos as they are statically
// defined in each of the components and hence will not match.
DCHECK_EQ(HeapObjectHeader::FromPayload(payload)->GcInfoIndex(),
gc_info_index);
#endif
}
#endif
} // namespace blink
|