| 12
 3
 4
 5
 6
 7
 8
 9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 134
 135
 136
 137
 138
 139
 140
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 
 | // Copyright 2012 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef GPU_COMMAND_BUFFER_SERVICE_SYNC_POINT_MANAGER_H_
#define GPU_COMMAND_BUFFER_SERVICE_SYNC_POINT_MANAGER_H_
#include <stdint.h>
#include <functional>
#include <memory>
#include <queue>
#include <vector>
#include "base/atomic_sequence_num.h"
#include "base/check.h"
#include "base/containers/flat_map.h"
#include "base/containers/queue.h"
#include "base/functional/callback.h"
#include "base/memory/raw_ptr.h"
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_refptr.h"
#include "base/synchronization/lock.h"
#include "base/thread_annotations.h"
#include "base/threading/thread_checker.h"
#include "gpu/command_buffer/common/command_buffer_id.h"
#include "gpu/command_buffer/common/constants.h"
#include "gpu/command_buffer/common/sync_token.h"
#include "gpu/command_buffer/service/sequence_id.h"
#include "gpu/gpu_export.h"
namespace base {
class SingleThreadTaskRunner;
}  // namespace base
namespace gpu {
class SyncPointClient;
class SyncPointClientState;
class SyncPointManager;
class GPU_EXPORT SyncPointOrderData
    : public base::RefCountedThreadSafe<SyncPointOrderData> {
 public:
  SyncPointOrderData(const SyncPointOrderData&) = delete;
  SyncPointOrderData& operator=(const SyncPointOrderData&) = delete;
  // Helper function that calls SyncPointManager::DestroySyncPointOrderData.
  void Destroy() LOCKS_EXCLUDED(lock_);
  SequenceId sequence_id() { return sequence_id_; }
  uint32_t processed_order_num() const {
    base::AutoLock auto_lock(lock_);
    return processed_order_num_;
  }
  uint32_t unprocessed_order_num() const {
    base::AutoLock auto_lock(lock_);
    return last_unprocessed_order_num_;
  }
  uint32_t current_order_num() const {
    DCHECK(processing_thread_checker_.CalledOnValidThread());
    return current_order_num_;
  }
  bool IsProcessingOrderNumber() {
    DCHECK(processing_thread_checker_.CalledOnValidThread());
    return !paused_ && current_order_num_ > processed_order_num();
  }
  uint32_t GenerateUnprocessedOrderNumber();
  void BeginProcessingOrderNumber(uint32_t order_num);
  void PauseProcessingOrderNumber(uint32_t order_num);
  void FinishProcessingOrderNumber(uint32_t order_num);
 private:
  friend class base::RefCountedThreadSafe<SyncPointOrderData>;
  friend class SyncPointManager;
  friend class SyncPointClientState;
  struct OrderFence {
    uint32_t order_num;
    uint64_t fence_release;
    scoped_refptr<SyncPointClientState> client_state;
    // ID that is unique to the particular SyncPointOrderData.
    uint64_t callback_id;
    OrderFence(uint32_t order,
               uint64_t release,
               scoped_refptr<SyncPointClientState> state,
               uint64_t callback_id);
    OrderFence(const OrderFence& other);
    ~OrderFence();
    bool operator>(const OrderFence& rhs) const {
      return std::tie(order_num, fence_release) >
             std::tie(rhs.order_num, rhs.fence_release);
    }
  };
  typedef std::
      priority_queue<OrderFence, std::vector<OrderFence>, std::greater<>>
          OrderFenceQueue;
  SyncPointOrderData(SyncPointManager* sync_point_manager,
                     SequenceId seqeunce_id);
  ~SyncPointOrderData();
  // Called by SyncPointManager after it has removed this SyncPointerOrderData
  // from its order_data_map_.
  void DestroyInternal() LOCKS_EXCLUDED(lock_);
  // Returns callback_id for created OrderFence on success, 0 on failure.
  uint64_t ValidateReleaseOrderNumber(
      scoped_refptr<SyncPointClientState> client_state,
      uint32_t wait_order_num,
      uint64_t fence_release) LOCKS_EXCLUDED(lock_);
  const raw_ptr<SyncPointManager> sync_point_manager_;
  const SequenceId sequence_id_;
  uint64_t current_callback_id_ GUARDED_BY(lock_) = 0;
  // Non thread-safe functions need to be called from a single thread.
  base::ThreadChecker processing_thread_checker_;
  // Current IPC order number being processed (only used on processing thread).
  uint32_t current_order_num_ = 0;
  // Whether or not the current order number is being processed or paused.
  bool paused_ = false;
  mutable base::Lock lock_;
  bool destroyed_ GUARDED_BY(lock_) = false;
  // Last finished IPC order number.
  uint32_t processed_order_num_ GUARDED_BY(lock_) = 0;
  // Last unprocessed order number. Updated in GenerateUnprocessedOrderNumber.
  uint32_t last_unprocessed_order_num_ GUARDED_BY(lock_) = 0;
  // Queue of unprocessed order numbers. Order numbers are enqueued in
  // GenerateUnprocessedOrderNumber, and dequeued in
  // FinishProcessingOrderNumber.
  base::queue<uint32_t> unprocessed_order_nums_ GUARDED_BY(lock_);
  // In situations where we are waiting on fence syncs that do not exist, we
  // validate by making sure the order number does not pass the order number
  // which the wait command was issued. If the order number reaches the
  // wait command's, we should automatically release up to the expected
  // release count. Note that this also releases other lower release counts,
  // so a single misbehaved fence sync is enough to invalidate/signal all
  // previous fence syncs. All order numbers (n) in order_fence_queue_ must
  // follow the invariant:
  //   unprocessed_order_nums_.front() < n <= unprocessed_order_nums_.back().
  OrderFenceQueue order_fence_queue_ GUARDED_BY(lock_);
};
class GPU_EXPORT SyncPointClientState
    : public base::RefCountedThreadSafe<SyncPointClientState> {
 public:
  SyncPointClientState(const SyncPointClientState&) = delete;
  SyncPointClientState& operator=(const SyncPointClientState&) = delete;
  // Calls SyncPointManager::DestroySyncPointClientState.
  void Destroy() LOCKS_EXCLUDED(fence_sync_lock_);
  CommandBufferNamespace namespace_id() const { return namespace_id_; }
  CommandBufferId command_buffer_id() const { return command_buffer_id_; }
  SequenceId sequence_id() const { return order_data_->sequence_id(); }
  // This behaves similarly to SyncPointManager::Wait but uses the order data
  // to guarantee no deadlocks with other clients. Must be called on order
  // number processing thread.
  bool Wait(const SyncToken& sync_token, base::OnceClosure callback)
      LOCKS_EXCLUDED(fence_sync_lock_);
  // Like Wait but runs the callback on the given task runner's thread. Must be
  // called on order number processing thread.
  // TODO(elgarawany): Rename this method to instead make it explicit that the
  // callback is going to run on |task_runner|.
  bool WaitNonThreadSafe(
      const SyncToken& sync_token,
      scoped_refptr<base::SingleThreadTaskRunner> task_runner,
      base::OnceClosure callback) LOCKS_EXCLUDED(fence_sync_lock_);
  // Release fence sync and run queued callbacks. Must be called on order number
  // processing thread.
  void ReleaseFenceSync(uint64_t release) LOCKS_EXCLUDED(fence_sync_lock_);
 private:
  friend class base::RefCountedThreadSafe<SyncPointClientState>;
  friend class SyncPointManager;
  friend class SyncPointOrderData;
  struct ReleaseCallback {
    uint64_t release_count;
    base::OnceClosure callback_closure;
    uint64_t callback_id;
    ReleaseCallback(uint64_t release,
                    base::OnceClosure callback,
                    uint64_t callback_id);
    ReleaseCallback(ReleaseCallback&& other);
    ~ReleaseCallback();
    ReleaseCallback& operator=(ReleaseCallback&& other) = default;
    bool operator>(const ReleaseCallback& rhs) const {
      return release_count > rhs.release_count;
    }
  };
  typedef std::priority_queue<ReleaseCallback,
                              std::vector<ReleaseCallback>,
                              std::greater<>>
      ReleaseCallbackQueue;
  SyncPointClientState(SyncPointManager* sync_point_manager,
                       scoped_refptr<SyncPointOrderData> order_data,
                       CommandBufferNamespace namespace_id,
                       CommandBufferId command_buffer_id);
  ~SyncPointClientState();
  std::vector<base::OnceClosure> DestroyAndReturnCallbacks()
      LOCKS_EXCLUDED(fence_sync_lock_);
  // Returns true if fence sync has been released.
  bool IsFenceSyncReleased(uint64_t release) LOCKS_EXCLUDED(fence_sync_lock_);
  // Queues the callback to be called if the release is valid. If the release
  // is invalid this function will return False and the callback will never
  // be called.
  bool WaitForRelease(uint64_t release,
                      uint32_t wait_order_num,
                      base::OnceClosure callback)
      LOCKS_EXCLUDED(fence_sync_lock_);
  // Does not release the fence sync, but releases callbacks waiting on that
  // fence sync.
  void EnsureWaitReleased(uint64_t release, uint64_t callback_id)
      LOCKS_EXCLUDED(fence_sync_lock_);
  void ReleaseFenceSyncHelper(uint64_t release)
      LOCKS_EXCLUDED(fence_sync_lock_);
  // Sync point manager is guaranteed to exist in the lifetime of the client.
  raw_ptr<SyncPointManager> sync_point_manager_ = nullptr;
  // Global order data where releases will originate from.
  const scoped_refptr<SyncPointOrderData> order_data_;
  // Unique namespace/client id pair for this sync point client.
  const CommandBufferNamespace namespace_id_;
  const CommandBufferId command_buffer_id_;
  // Protects fence_sync_release_, fence_callback_queue_.
  base::Lock fence_sync_lock_;
  // Current fence sync release that has been signaled.
  uint64_t fence_sync_release_ GUARDED_BY(fence_sync_lock_) = 0;
  // In well defined fence sync operations, fence syncs are released in order
  // so simply having a priority queue for callbacks is enough.
  ReleaseCallbackQueue release_callback_queue_ GUARDED_BY(fence_sync_lock_);
};
// This class manages the sync points, which allow cross-channel
// synchronization.
class GPU_EXPORT SyncPointManager {
 public:
  SyncPointManager();
  SyncPointManager(const SyncPointManager&) = delete;
  SyncPointManager& operator=(const SyncPointManager&) = delete;
  ~SyncPointManager();
  scoped_refptr<SyncPointOrderData> CreateSyncPointOrderData();
  scoped_refptr<SyncPointClientState> CreateSyncPointClientState(
      CommandBufferNamespace namespace_id,
      CommandBufferId command_buffer_id,
      SequenceId sequence_id);
  // Returns true if the sync token has been released or if the command
  // buffer does not exist.
  bool IsSyncTokenReleased(const SyncToken& sync_token) LOCKS_EXCLUDED(lock_);
  // Returns the sequence ID that will release this sync token.
  SequenceId GetSyncTokenReleaseSequenceId(const SyncToken& sync_token)
      LOCKS_EXCLUDED(lock_);
  // Returns the global last processed order number.
  uint32_t GetProcessedOrderNum() const LOCKS_EXCLUDED(lock_);
  // // Returns the global last unprocessed order number.
  uint32_t GetUnprocessedOrderNum() const LOCKS_EXCLUDED(lock_);
  // If the wait is valid (sync token hasn't been processed or command buffer
  // does not exist), the callback is queued to run when the sync point is
  // released. If the wait is invalid, the callback is NOT run. The callback
  // runs on the thread the sync point is released. Clients should use
  // SyncPointClient::Wait because that uses order data to prevent deadlocks.
  bool Wait(const SyncToken& sync_token,
            SequenceId sequence_id,
            uint32_t wait_order_num,
            base::OnceClosure callback) LOCKS_EXCLUDED(lock_);
  // Like Wait but runs the callback on the given task runner's thread.
  // TODO(elgarawany): Rename this method to instead make it explicit that the
  // callback is going to run on |task_runner|.
  bool WaitNonThreadSafe(
      const SyncToken& sync_token,
      SequenceId sequence_id,
      uint32_t wait_order_num,
      scoped_refptr<base::SingleThreadTaskRunner> task_runner,
      base::OnceClosure callback) LOCKS_EXCLUDED(lock_);
  // WaitOutOfOrder allows waiting for a sync token indefinitely, so it
  // should be used with trusted sync tokens only.
  bool WaitOutOfOrder(const SyncToken& trusted_sync_token,
                      base::OnceClosure callback) LOCKS_EXCLUDED(lock_);
  // Used by SyncPointOrderData.
  uint32_t GenerateOrderNumber();
  // Is called by SyncPointOrderData::Destroy to remove `order_data` from
  // client_state_map_.
  void RemoveSyncPointOrderData(scoped_refptr<SyncPointOrderData> order_data)
      LOCKS_EXCLUDED(lock_);
  // Grabs any remaining callbacks in |client_state|'s release queue, destroys
  // |client_state|, then runs those remaining callbacks.
  void DestroySyncPointClientState(
      scoped_refptr<SyncPointClientState> client_state)
      LOCKS_EXCLUDED(lock_, client_state->fence_sync_lock_);
 private:
  using ClientStateMap =
      base::flat_map<CommandBufferId, scoped_refptr<SyncPointClientState>>;
  using OrderDataMap =
      base::flat_map<SequenceId, scoped_refptr<SyncPointOrderData>>;
  scoped_refptr<SyncPointOrderData> GetSyncPointOrderData(
      SequenceId sequence_id) EXCLUSIVE_LOCKS_REQUIRED(lock_);
  scoped_refptr<SyncPointClientState> GetSyncPointClientState(
      CommandBufferNamespace namespace_id,
      CommandBufferId command_buffer_id) EXCLUSIVE_LOCKS_REQUIRED(lock_);
  // Internal version of GetSyncTokenReleaseSequenceId that requires lock to be
  // acquired.
  SequenceId GetSyncTokenReleaseSequenceIdInternal(const SyncToken& sync_token)
      EXCLUSIVE_LOCKS_REQUIRED(lock_);
  // Order number is global for all clients.
  base::AtomicSequenceNumber order_num_generator_;
  // The following are protected by |lock_|.
  // Map of command buffer id to client state for each namespace.
  ClientStateMap client_state_maps_[NUM_COMMAND_BUFFER_NAMESPACES] GUARDED_BY(
      lock_);
  // Map of sequence id to order data.
  OrderDataMap order_data_map_ GUARDED_BY(lock_);
  SequenceId::Generator sequence_id_generator_ GUARDED_BY(lock_);
  mutable base::Lock lock_;
};
}  // namespace gpu
#endif  // GPU_COMMAND_BUFFER_SERVICE_SYNC_POINT_MANAGER_H_
 |