1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
|
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_
#define GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_
#include <stdint.h>
#include "base/bind.h"
#include "base/macros.h"
#include "base/memory/scoped_vector.h"
#include "gpu/command_buffer/client/fenced_allocator.h"
#include "gpu/command_buffer/common/buffer.h"
#include "gpu/gpu_export.h"
namespace gpu {
class CommandBufferHelper;
// Manages a shared memory segment.
class GPU_EXPORT MemoryChunk {
public:
MemoryChunk(int32_t shm_id,
scoped_refptr<gpu::Buffer> shm,
CommandBufferHelper* helper,
const base::Closure& poll_callback);
~MemoryChunk();
// Gets the size of the largest free block that is available without waiting.
unsigned int GetLargestFreeSizeWithoutWaiting() {
return allocator_.GetLargestFreeSize();
}
// Gets the size of the largest free block that can be allocated if the
// caller can wait.
unsigned int GetLargestFreeSizeWithWaiting() {
return allocator_.GetLargestFreeOrPendingSize();
}
// Gets the size of the chunk.
unsigned int GetSize() const {
return static_cast<unsigned int>(shm_->size());
}
// The shared memory id for this chunk.
int32_t shm_id() const {
return shm_id_;
}
// Allocates a block of memory. If the buffer is out of directly available
// memory, this function may wait until memory that was freed "pending a
// token" can be re-used.
//
// Parameters:
// size: the size of the memory block to allocate.
//
// Returns:
// the pointer to the allocated memory block, or NULL if out of
// memory.
void* Alloc(unsigned int size) {
return allocator_.Alloc(size);
}
// Gets the offset to a memory block given the base memory and the address.
// It translates NULL to FencedAllocator::kInvalidOffset.
unsigned int GetOffset(void* pointer) {
return allocator_.GetOffset(pointer);
}
// Frees a block of memory.
//
// Parameters:
// pointer: the pointer to the memory block to free.
void Free(void* pointer) {
allocator_.Free(pointer);
}
// Frees a block of memory, pending the passage of a token. That memory won't
// be re-allocated until the token has passed through the command stream.
//
// Parameters:
// pointer: the pointer to the memory block to free.
// token: the token value to wait for before re-using the memory.
void FreePendingToken(void* pointer, unsigned int token) {
allocator_.FreePendingToken(pointer, token);
}
// Frees any blocks whose tokens have passed.
void FreeUnused() {
allocator_.FreeUnused();
}
// Returns true if pointer is in the range of this block.
bool IsInChunk(void* pointer) const {
return pointer >= shm_->memory() &&
pointer <
reinterpret_cast<const int8_t*>(shm_->memory()) + shm_->size();
}
// Returns true of any memory in this chunk is in use.
bool InUse() {
return allocator_.InUse();
}
size_t bytes_in_use() const {
return allocator_.bytes_in_use();
}
private:
int32_t shm_id_;
scoped_refptr<gpu::Buffer> shm_;
FencedAllocatorWrapper allocator_;
DISALLOW_COPY_AND_ASSIGN(MemoryChunk);
};
// Manages MemoryChunks.
class GPU_EXPORT MappedMemoryManager {
public:
enum MemoryLimit {
kNoLimit = 0,
};
// |unused_memory_reclaim_limit|: When exceeded this causes pending memory
// to be reclaimed before allocating more memory.
MappedMemoryManager(CommandBufferHelper* helper,
const base::Closure& poll_callback,
size_t unused_memory_reclaim_limit);
~MappedMemoryManager();
unsigned int chunk_size_multiple() const {
return chunk_size_multiple_;
}
void set_chunk_size_multiple(unsigned int multiple) {
chunk_size_multiple_ = multiple;
}
// Allocates a block of memory
// Parameters:
// size: size of memory to allocate.
// shm_id: pointer to variable to receive the shared memory id.
// shm_offset: pointer to variable to receive the shared memory offset.
// Returns:
// pointer to allocated block of memory. NULL if failure.
void* Alloc(
unsigned int size, int32_t* shm_id, unsigned int* shm_offset);
// Frees a block of memory.
//
// Parameters:
// pointer: the pointer to the memory block to free.
void Free(void* pointer);
// Frees a block of memory, pending the passage of a token. That memory won't
// be re-allocated until the token has passed through the command stream.
//
// Parameters:
// pointer: the pointer to the memory block to free.
// token: the token value to wait for before re-using the memory.
void FreePendingToken(void* pointer, int32_t token);
// Free Any Shared memory that is not in use.
void FreeUnused();
// Used for testing
size_t num_chunks() const {
return chunks_.size();
}
size_t bytes_in_use() const {
size_t bytes_in_use = 0;
for (size_t ii = 0; ii < chunks_.size(); ++ii) {
MemoryChunk* chunk = chunks_[ii];
bytes_in_use += chunk->bytes_in_use();
}
return bytes_in_use;
}
// Used for testing
size_t allocated_memory() const {
return allocated_memory_;
}
private:
typedef ScopedVector<MemoryChunk> MemoryChunkVector;
// size a chunk is rounded up to.
unsigned int chunk_size_multiple_;
CommandBufferHelper* helper_;
base::Closure poll_callback_;
MemoryChunkVector chunks_;
size_t allocated_memory_;
size_t max_free_bytes_;
DISALLOW_COPY_AND_ASSIGN(MappedMemoryManager);
};
} // namespace gpu
#endif // GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_
|