1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
|
// Copyright 2016 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/metrics/persistent_sample_map.h"
#include <stddef.h>
#include <stdint.h>
#include <atomic>
#include <type_traits>
#include "base/check_op.h"
#include "base/containers/contains.h"
#include "base/metrics/histogram_base.h"
#include "base/metrics/histogram_macros.h"
#include "base/metrics/histogram_samples.h"
#include "base/metrics/persistent_histogram_allocator.h"
#include "base/metrics/sample_map_iterator.h"
#include "base/notreached.h"
#include "build/buildflag.h"
#if !BUILDFLAG(IS_NACL)
#include "base/debug/crash_logging.h"
#endif
namespace base {
using Count32 = HistogramBase::Count32;
using Sample32 = HistogramBase::Sample32;
namespace {
// This structure holds an entry for a PersistentSampleMap within a persistent
// memory allocator. The "id" must be unique across all maps held by an
// allocator or they will get attached to the wrong sample map.
struct SampleRecord {
// SHA1(SampleRecord): Increment this if structure changes!
static constexpr uint32_t kPersistentTypeId = 0x8FE6A69F + 1;
// Expected size for 32/64-bit check.
static constexpr size_t kExpectedInstanceSize = 16;
uint64_t id; // Unique identifier of owner.
Sample32 value; // The value for which this record holds a count.
std::atomic<Count32> count; // The count associated with the above value.
// `count` may operate inter-process and so must be lock-free.
static_assert(std::atomic<Count32>::is_always_lock_free);
// For backwards compatibility, `std::atomic<Count>` and `Count` must have
// the same memory layouts. If this ever changes, make sure to increment
// `kPersistentTypeId` above.
static_assert(std::is_standard_layout_v<std::atomic<Count32>>);
static_assert(sizeof(std::atomic<Count32>) == sizeof(Count32));
static_assert(alignof(std::atomic<Count32>) == alignof(Count32));
};
} // namespace
PersistentSampleMap::PersistentSampleMap(
uint64_t id,
PersistentHistogramAllocator* allocator,
Metadata* meta)
: HistogramSamples(id, meta), allocator_(allocator) {}
PersistentSampleMap::~PersistentSampleMap() = default;
void PersistentSampleMap::Accumulate(Sample32 value, Count32 count) {
// We have to do the following atomically, because even if the caller is using
// a lock, a separate process (that is not aware of this lock) may
// concurrently modify the value.
GetOrCreateSampleCountStorage(value)->fetch_add(count,
std::memory_order_relaxed);
IncreaseSumAndCount(int64_t{count} * value, count);
}
Count32 PersistentSampleMap::GetCount(Sample32 value) const {
const std::atomic<Count32>* const count_pointer = GetSampleCountStorage(value);
return count_pointer ? count_pointer->load(std::memory_order_relaxed) : 0;
}
Count32 PersistentSampleMap::TotalCount() const {
// Make sure all samples have been loaded before trying to iterate over the
// map.
ImportSamples();
Count32 count = 0;
for (const auto& entry : sample_counts_) {
count += entry.second->load(std::memory_order_relaxed);
}
return count;
}
std::unique_ptr<SampleCountIterator> PersistentSampleMap::Iterator() const {
// Make sure all samples have been loaded before trying to iterate over the
// map.
ImportSamples();
return std::make_unique<SampleMapIterator<SampleToCountMap, false>>(
sample_counts_);
}
std::unique_ptr<SampleCountIterator> PersistentSampleMap::ExtractingIterator() {
// Make sure all samples have been loaded before trying to iterate over the
// map.
ImportSamples();
return std::make_unique<SampleMapIterator<SampleToCountMap, true>>(
sample_counts_);
}
bool PersistentSampleMap::IsDefinitelyEmpty() const {
// Not implemented.
NOTREACHED();
}
// static
PersistentMemoryAllocator::Reference
PersistentSampleMap::GetNextPersistentRecord(
PersistentMemoryAllocator::Iterator& iterator,
uint64_t* sample_map_id,
Sample32* value) {
const SampleRecord* record = iterator.GetNextOfObject<SampleRecord>();
if (!record) {
return 0;
}
*sample_map_id = record->id;
*value = record->value;
return iterator.GetAsReference(record);
}
// static
PersistentMemoryAllocator::Reference
PersistentSampleMap::CreatePersistentRecord(
PersistentMemoryAllocator* allocator,
uint64_t sample_map_id,
Sample32 value) {
SampleRecord* record = allocator->New<SampleRecord>();
if (record) {
record->id = sample_map_id;
record->value = value;
record->count = 0;
PersistentMemoryAllocator::Reference ref =
allocator->GetAsReference(record);
allocator->MakeIterable(ref);
return ref;
}
if (!allocator->IsFull()) {
const bool corrupt = allocator->IsCorrupt();
#if !BUILDFLAG(IS_NACL)
// TODO(crbug.com/40064026): Remove.
SCOPED_CRASH_KEY_BOOL("PersistentSampleMap", "corrupted", corrupt);
#endif // !BUILDFLAG(IS_NACL)
DUMP_WILL_BE_NOTREACHED() << "corrupt=" << corrupt;
}
return 0;
}
bool PersistentSampleMap::AddSubtractImpl(SampleCountIterator* iter,
Operator op) {
Sample32 min;
int64_t max;
Count32 count;
for (; !iter->Done(); iter->Next()) {
iter->Get(&min, &max, &count);
if (count == 0) {
continue;
}
if (int64_t{min} + 1 != max) {
return false; // SparseHistogram only supports bucket with size 1.
}
// We have to do the following atomically, because even if the caller is
// using a lock, a separate process (that is not aware of this lock) may
// concurrently modify the value.
GetOrCreateSampleCountStorage(min)->fetch_add(
(op == HistogramSamples::ADD) ? count : -count,
std::memory_order_seq_cst);
}
return true;
}
std::atomic<Count32>* PersistentSampleMap::GetSampleCountStorage(
Sample32 value) const {
// If |value| is already in the map, just return that.
const auto it = sample_counts_.find(value);
return (it == sample_counts_.end()) ? ImportSamples(value) : it->second.get();
}
std::atomic<Count32>* PersistentSampleMap::GetOrCreateSampleCountStorage(
Sample32 value) {
// Get any existing count storage.
std::atomic<Count32>* count_pointer = GetSampleCountStorage(value);
if (count_pointer) {
return count_pointer;
}
// Create a new record in persistent memory for the value. |records_| will
// have been initialized by the GetSampleCountStorage() call above.
CHECK(records_);
PersistentMemoryAllocator::Reference ref = records_->CreateNew(value);
if (!ref) {
// If a new record could not be created then the underlying allocator is
// full or corrupt. Instead, allocate the counter from the heap. This
// sample will not be persistent, will not be shared, and will leak...
// but it's better than crashing.
count_pointer = new std::atomic<Count32>(0);
sample_counts_[value] = count_pointer;
return count_pointer;
}
// A race condition between two independent processes (i.e. two independent
// histogram objects sharing the same sample data) could cause two of the
// above records to be created. The allocator, however, forces a strict
// ordering on iterable objects so use the import method to actually add the
// just-created record. This ensures that all PersistentSampleMap objects
// will always use the same record, whichever was first made iterable.
// Thread-safety within a process where multiple threads use the same
// histogram object is delegated to the controlling histogram object which,
// for sparse histograms, is a lock object.
count_pointer = ImportSamples(value);
DCHECK(count_pointer);
return count_pointer;
}
PersistentSampleMapRecords* PersistentSampleMap::GetRecords() const {
// The |records_| pointer is lazily fetched from the |allocator_| only on
// first use. Sometimes duplicate histograms are created by race conditions
// and if both were to grab the records object, there would be a conflict.
// Use of a histogram, and thus a call to this method, won't occur until
// after the histogram has been de-dup'd.
if (!records_) {
records_ = allocator_->CreateSampleMapRecords(id());
}
return records_.get();
}
std::atomic<Count32>* PersistentSampleMap::ImportSamples(
std::optional<Sample32> until_value) const {
std::vector<PersistentMemoryAllocator::Reference> refs;
PersistentSampleMapRecords* records = GetRecords();
while (!(refs = records->GetNextRecords(until_value)).empty()) {
// GetNextRecords() returns a list of new unseen records belonging to this
// map. Iterate through them all and store them internally. Note that if
// |until_value| was found, it will be the last element in |refs|.
for (auto ref : refs) {
SampleRecord* const record = records->GetAsObject<SampleRecord>(ref);
if (!record) {
continue;
}
DCHECK_EQ(id(), record->id);
// Check if the record's value is already known.
const auto ret = sample_counts_.insert({record->value, &record->count});
if (!ret.second) {
// Yes: Ignore it; it's a duplicate caused by a race condition -- see
// code & comment in GetOrCreateSampleCountStorage() for details.
// Check that nothing ever operated on the duplicate record.
DCHECK_EQ(0, record->count);
}
// Check if it's the value being searched for and, if so, stop here.
// Because race conditions can cause multiple records for a single value,
// be sure to return the first one found.
if (until_value.has_value() && record->value == until_value.value()) {
// Ensure that this was the last value in |refs|.
CHECK_EQ(refs.back(), ref);
return &record->count;
}
}
}
return nullptr;
}
} // namespace base
|