1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
|
// Copyright 2025 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef GPU_COMMAND_BUFFER_SERVICE_GRAPHITE_SHARED_CONTEXT_H_
#define GPU_COMMAND_BUFFER_SERVICE_GRAPHITE_SHARED_CONTEXT_H_
#include <atomic>
#include "base/functional/callback.h"
#include "base/synchronization/lock.h"
#include "gpu/gpu_gles2_export.h"
#include "third_party/skia/include/core/SkImage.h"
#include "third_party/skia/include/core/SkSurface.h"
#include "third_party/skia/include/gpu/graphite/GraphiteTypes.h"
#include "third_party/skia/include/gpu/graphite/Recorder.h"
class SkTraceMemoryDump;
namespace skgpu ::graphite {
class BackendTexture;
struct InsertRecordingInfo;
class PrecompileContext;
class Recorder;
struct RecorderOptions;
} // namespace skgpu::graphite
namespace gpu {
class GpuProcessShmCount;
// This is a thread safe wrapper class to skgpu::graphite::Context. In order to
// support multi-threading, locks are used to ensure thread safety in
// skgpu::graphite::Context. All clients need to call the wrapper functions in
// GraphiteSharedContext. Only GraphiteSharedContext can communicate with
// skgpu::graphite::Context directly. If |is_thread_safe| is false, the locks
// are equivalent to no-op.
class GPU_GLES2_EXPORT GraphiteSharedContext {
public:
using SkImageReadPixelsCallback = base::OnceCallback<
void(void* ctx, std::unique_ptr<const SkSurface::AsyncReadResult>)>;
using FlushCallback = base::RepeatingCallback<void()>;
GraphiteSharedContext(
std::unique_ptr<skgpu::graphite::Context> graphite_context,
GpuProcessShmCount* use_shader_cache_shm_count,
bool is_thread_safe,
FlushCallback backend_flush_callback = FlushCallback());
GraphiteSharedContext(const GraphiteSharedContext&) = delete;
GraphiteSharedContext(GraphiteSharedContext&&) = delete;
GraphiteSharedContext& operator=(const GraphiteSharedContext&) = delete;
GraphiteSharedContext& operator=(GraphiteSharedContext&&) = delete;
~GraphiteSharedContext();
bool IsThreadSafe() const { return !!lock_; }
// Wrapper function implementations for skgpu::graphite:Context
skgpu::BackendApi backend() const;
std::unique_ptr<skgpu::graphite::Recorder> makeRecorder(
const skgpu::graphite::RecorderOptions& = {});
std::unique_ptr<skgpu::graphite::PrecompileContext> makePrecompileContext();
bool insertRecording(const skgpu::graphite::InsertRecordingInfo& info);
void submit(skgpu::graphite::SyncToCpu = skgpu::graphite::SyncToCpu::kNo);
// The difference between this and submit() is that it will trigger the
// provided backend_flush_callback in addition to calling submit(). This is
// needed because on some backend such as D3D11 we could enable a delayed
// flush toggle. In that case, submit() won't send the commands to the GPU
// immediately and require an explicit flush.
void submitAndFlushBackend(
skgpu::graphite::SyncToCpu = skgpu::graphite::SyncToCpu::kNo);
bool hasUnfinishedGpuWork() const;
void asyncRescaleAndReadPixels(const SkImage* src,
const SkImageInfo& dstImageInfo,
const SkIRect& srcRect,
SkImage::RescaleGamma rescaleGamma,
SkImage::RescaleMode rescaleMode,
SkImageReadPixelsCallback callback,
SkImage::ReadPixelsContext context);
void asyncRescaleAndReadPixels(const SkSurface* src,
const SkImageInfo& dstImageInfo,
const SkIRect& srcRect,
SkImage::RescaleGamma rescaleGamma,
SkImage::RescaleMode rescaleMode,
SkImageReadPixelsCallback callback,
SkImage::ReadPixelsContext context);
bool asyncRescaleAndReadPixelsAndSubmit(const SkImage* src,
const SkImageInfo& dstImageInfo,
const SkIRect& srcRect,
SkImage::RescaleGamma rescaleGamma,
SkImage::RescaleMode rescaleMode,
SkImageReadPixelsCallback callback,
SkImage::ReadPixelsContext context);
bool asyncRescaleAndReadPixelsAndSubmit(const SkSurface* src,
const SkImageInfo& dstImageInfo,
const SkIRect& srcRect,
SkImage::RescaleGamma rescaleGamma,
SkImage::RescaleMode rescaleMode,
SkImageReadPixelsCallback callback,
SkImage::ReadPixelsContext context);
void asyncRescaleAndReadPixelsYUV420(const SkImage* src,
SkYUVColorSpace yuvColorSpace,
sk_sp<SkColorSpace> dstColorSpace,
const SkIRect& srcRect,
const SkISize& dstSize,
SkImage::RescaleGamma rescaleGamma,
SkImage::RescaleMode rescaleMode,
SkImageReadPixelsCallback callback,
SkImage::ReadPixelsContext context);
void asyncRescaleAndReadPixelsYUV420(const SkSurface* src,
SkYUVColorSpace yuvColorSpace,
sk_sp<SkColorSpace> dstColorSpace,
const SkIRect& srcRect,
const SkISize& dstSize,
SkImage::RescaleGamma rescaleGamma,
SkImage::RescaleMode rescaleMode,
SkImageReadPixelsCallback callback,
SkImage::ReadPixelsContext context);
void asyncRescaleAndReadPixelsYUVA420(const SkImage* src,
SkYUVColorSpace yuvColorSpace,
sk_sp<SkColorSpace> dstColorSpace,
const SkIRect& srcRect,
const SkISize& dstSize,
SkImage::RescaleGamma rescaleGamma,
SkImage::RescaleMode rescaleMode,
SkImageReadPixelsCallback callback,
SkImage::ReadPixelsContext context);
void asyncRescaleAndReadPixelsYUVA420(const SkSurface* src,
SkYUVColorSpace yuvColorSpace,
sk_sp<SkColorSpace> dstColorSpace,
const SkIRect& srcRect,
const SkISize& dstSize,
SkImage::RescaleGamma rescaleGamma,
SkImage::RescaleMode rescaleMode,
SkImageReadPixelsCallback callback,
SkImage::ReadPixelsContext context);
void checkAsyncWorkCompletion();
void deleteBackendTexture(const skgpu::graphite::BackendTexture&);
void freeGpuResources();
void performDeferredCleanup(std::chrono::milliseconds msNotUsed);
// TODO(crbug.com/407874799): Some of the methods below (maybe above as well)
// may be safe to use without locks. Review and delete unneeded locks in
// those methods once we start to run with |is_thread_safe| set to true.
size_t currentBudgetedBytes() const;
size_t currentPurgeableBytes() const;
size_t maxBudgetedBytes() const;
void setMaxBudgetedBytes(size_t bytes);
void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const;
bool isDeviceLost() const;
int maxTextureSize() const;
bool supportsProtectedContent() const;
skgpu::GpuStatsFlags supportedGpuStats() const;
private:
class AutoLock;
bool InsertRecordingImpl(const skgpu::graphite::InsertRecordingInfo&);
bool SubmitImpl(skgpu::graphite::SyncToCpu);
void SubmitAndFlushBackendImpl(skgpu::graphite::SyncToCpu);
// The lock for protecting skgpu::graphite::Context.
// Valid only when |is_thread_safe| is set to true in Ctor.
mutable std::optional<base::Lock> lock_;
// This is the id of the thread where |lock_| is acquired, used for detecting
// recursive lock. |locked_thread_id_| is set to kInvalidThreadId when
// is_thread_safe is not enabled or when |lock_| is released.
mutable std::atomic<base::PlatformThreadId> locked_thread_id_{
base::kInvalidThreadId};
const std::unique_ptr<skgpu::graphite::Context> graphite_context_;
raw_ptr<GpuProcessShmCount> use_shader_cache_shm_count_ = nullptr;
size_t num_pending_recordings_ = 0;
FlushCallback backend_flush_callback_;
};
} // namespace gpu
#endif // GPU_COMMAND_BUFFER_SERVICE_GRAPHITE_SHARED_CONTEXT_H_
|