1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312
|
// Copyright 2017 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "gpu/command_buffer/client/client_discardable_manager.h"
#include "base/atomic_sequence_num.h"
#include "base/containers/contains.h"
#include "base/containers/flat_set.h"
#include "base/numerics/safe_conversions.h"
#include "base/system/sys_info.h"
#include "build/build_config.h"
namespace gpu {
namespace {
// Stores a set of offsets, initially 0 to |element_count_|. Allows callers to
// take and return offsets from the set. Internally stores the offsets as a set
// of ranges. This means that in the worst case (every other offset taken), the
// set will use |element_count_| uints, but should typically use fewer.
class FreeOffsetSet {
public:
// Creates a new set, containing 0 to |element_count|.
explicit FreeOffsetSet(uint32_t element_count);
FreeOffsetSet(const FreeOffsetSet&) = delete;
FreeOffsetSet& operator=(const FreeOffsetSet&) = delete;
// Returns true if the set contains at least one element.
bool HasFreeOffset() const;
// Returns true if any element from the set has been taken.
bool HasUsedOffset() const;
// Takes a free offset from the set. Should only be called if HasFreeOffset().
uint32_t TakeFreeOffset();
// Returns an offset to the set.
void ReturnFreeOffset(uint32_t offset);
private:
struct FreeRange {
uint32_t start;
uint32_t end;
};
struct CompareFreeRanges {
bool operator()(const FreeRange& a, const FreeRange& b) const {
return a.start < b.start;
}
};
const uint32_t element_count_;
base::flat_set<FreeRange, CompareFreeRanges> free_ranges_;
};
FreeOffsetSet::FreeOffsetSet(uint32_t element_count)
: element_count_(element_count) {
free_ranges_.insert({0, element_count_});
}
bool FreeOffsetSet::HasFreeOffset() const {
return !free_ranges_.empty();
}
bool FreeOffsetSet::HasUsedOffset() const {
if (free_ranges_.size() != 1 || free_ranges_.begin()->start != 0 ||
free_ranges_.begin()->end != element_count_)
return true;
return false;
}
uint32_t FreeOffsetSet::TakeFreeOffset() {
DCHECK(HasFreeOffset());
auto it = free_ranges_.begin();
uint32_t offset_to_return = it->start;
FreeRange new_range{it->start + 1, it->end};
free_ranges_.erase(it);
if (new_range.start != new_range.end)
free_ranges_.insert(new_range);
return offset_to_return;
}
void FreeOffsetSet::ReturnFreeOffset(uint32_t offset) {
FreeRange new_range{offset, offset + 1};
// Find the FreeRange directly before/after our new range.
auto next_range = free_ranges_.lower_bound(new_range);
auto prev_range = free_ranges_.end();
if (next_range != free_ranges_.begin()) {
prev_range = std::prev(next_range);
}
// Collapse ranges if possible.
if (prev_range != free_ranges_.end() && prev_range->end == new_range.start) {
new_range.start = prev_range->start;
// Erase invalidates the next_range iterator, so re-acquire it.
next_range = free_ranges_.erase(prev_range);
}
if (next_range != free_ranges_.end() && next_range->start == new_range.end) {
new_range.end = next_range->end;
free_ranges_.erase(next_range);
}
free_ranges_.insert(new_range);
}
// Returns the size of the allocation which ClientDiscardableManager will
// sub-allocate from. This should be at least as big as the minimum shared
// memory allocation size.
size_t AllocationSize() {
#if BUILDFLAG(IS_NACL)
// base::SysInfo isn't available under NaCl.
size_t system_allocation_size = getpagesize();
#else
size_t system_allocation_size = base::SysInfo::VMAllocationGranularity();
#endif
// If the allocation is small (less than 2K), round it up to at least 2K.
return std::max(size_t{2048}, system_allocation_size);
}
ClientDiscardableHandle::Id GetNextHandleId() {
static base::AtomicSequenceNumber g_next_handle_id;
// AtomicSequenceNumber is 0-based, add 1 to have a 1-based ID where 0 is
// invalid.
return ClientDiscardableHandle::Id::FromUnsafeValue(
g_next_handle_id.GetNext() + 1);
}
} // namespace
struct ClientDiscardableManager::Allocation {
Allocation(uint32_t element_count) : free_offsets(element_count) {}
scoped_refptr<Buffer> buffer;
int32_t shm_id = 0;
FreeOffsetSet free_offsets;
};
ClientDiscardableManager::ClientDiscardableManager()
: allocation_size_(AllocationSize()) {}
ClientDiscardableManager::~ClientDiscardableManager() = default;
ClientDiscardableHandle::Id ClientDiscardableManager::CreateHandle(
CommandBuffer* command_buffer) {
scoped_refptr<Buffer> buffer;
int32_t shm_id;
uint32_t offset = 0;
if (!FindAllocation(command_buffer, &buffer, &shm_id, &offset)) {
// This can fail if we've lost context, return an invalid Id.
return ClientDiscardableHandle::Id();
}
uint32_t byte_offset = base::checked_cast<uint32_t>(offset * element_size_);
ClientDiscardableHandle handle(std::move(buffer), byte_offset, shm_id);
ClientDiscardableHandle::Id handle_id = GetNextHandleId();
handles_.emplace(handle_id, handle);
return handle_id;
}
bool ClientDiscardableManager::LockHandle(
ClientDiscardableHandle::Id handle_id) {
auto found = handles_.find(handle_id);
if (found == handles_.end())
return false;
return found->second.Lock();
}
void ClientDiscardableManager::FreeHandle(
ClientDiscardableHandle::Id handle_id) {
auto found = handles_.find(handle_id);
if (found == handles_.end())
return;
pending_handles_.push(found->second);
handles_.erase(found);
}
bool ClientDiscardableManager::HandleIsValid(
ClientDiscardableHandle::Id handle_id) const {
return base::Contains(handles_, handle_id);
}
ClientDiscardableHandle ClientDiscardableManager::GetHandle(
ClientDiscardableHandle::Id handle_id) {
auto found = handles_.find(handle_id);
if (found == handles_.end())
return ClientDiscardableHandle();
return found->second;
}
bool ClientDiscardableManager::HandleIsDeleted(
ClientDiscardableHandle::Id handle_id) {
auto found = handles_.find(handle_id);
if (found == handles_.end())
return true;
if (found->second.CanBeReUsed()) {
handles_.erase(found);
return true;
}
return false;
}
bool ClientDiscardableManager::HandleIsDeletedForTracing(
ClientDiscardableHandle::Id handle_id) const {
auto found = handles_.find(handle_id);
if (found == handles_.end())
return true;
return found->second.IsDeletedForTracing();
}
bool ClientDiscardableManager::FindAllocation(CommandBuffer* command_buffer,
scoped_refptr<Buffer>* buffer,
int32_t* shm_id,
uint32_t* offset) {
CheckPending(command_buffer);
if (FindExistingAllocation(command_buffer, buffer, shm_id, offset))
return true;
// We couldn't find an existing free entry and are about to allocate more
// space. Check whether any handles have been deleted on the service side.
if (CheckDeleted(command_buffer)) {
// We deleted at least one entry, try to find an allocaiton. If the entry
// we deleted was the last one in an allocation, it's possbile that we
// *still* won't have allocaitons, so this isn't guaranteed to succeed.
if (FindExistingAllocation(command_buffer, buffer, shm_id, offset))
return true;
}
// Allocate more space.
auto allocation = std::make_unique<Allocation>(elements_per_allocation_);
allocation->buffer = command_buffer->CreateTransferBuffer(
base::checked_cast<uint32_t>(allocation_size_), &allocation->shm_id);
if (!allocation->buffer)
return false;
*offset = allocation->free_offsets.TakeFreeOffset();
*shm_id = allocation->shm_id;
*buffer = allocation->buffer;
allocations_.push_back(std::move(allocation));
return true;
}
bool ClientDiscardableManager::FindExistingAllocation(
CommandBuffer* command_buffer,
scoped_refptr<Buffer>* buffer,
int32_t* shm_id,
uint32_t* offset) {
for (auto& allocation : allocations_) {
if (!allocation->free_offsets.HasFreeOffset())
continue;
*offset = allocation->free_offsets.TakeFreeOffset();
*shm_id = allocation->shm_id;
*buffer = allocation->buffer;
return true;
}
return false;
}
void ClientDiscardableManager::ReturnAllocation(
CommandBuffer* command_buffer,
const ClientDiscardableHandle& handle) {
for (auto it = allocations_.begin(); it != allocations_.end(); ++it) {
Allocation* allocation = it->get();
if (allocation->shm_id != handle.shm_id())
continue;
allocation->free_offsets.ReturnFreeOffset(
static_cast<uint32_t>(handle.byte_offset() / element_size_));
if (!allocation->free_offsets.HasUsedOffset()) {
command_buffer->DestroyTransferBuffer(allocation->shm_id);
allocations_.erase(it);
return;
}
}
}
void ClientDiscardableManager::CheckPending(CommandBuffer* command_buffer) {
while (pending_handles_.size() > 0 &&
pending_handles_.front().CanBeReUsed()) {
ReturnAllocation(command_buffer, pending_handles_.front());
pending_handles_.pop();
}
}
bool ClientDiscardableManager::CheckDeleted(CommandBuffer* command_buffer) {
bool freed_entry = false;
for (auto it = handles_.begin(); it != handles_.end();) {
if (it->second.CanBeReUsed()) {
ReturnAllocation(command_buffer, it->second);
it = handles_.erase(it);
freed_entry = true;
} else {
++it;
}
}
return freed_entry;
}
} // namespace gpu
|