File: chunked_queue.h

package info (click to toggle)
abseil 20260107.0-3
  • links: PTS, VCS
  • area: main
  • in suites: experimental
  • size: 15,092 kB
  • sloc: cpp: 174,898; ansic: 1,748; pascal: 1,591; sh: 805; python: 535; makefile: 59
file content (755 lines) | stat: -rw-r--r-- 25,195 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
// Copyright 2025 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//      https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// -----------------------------------------------------------------------------
// File: chunked_queue.h
// -----------------------------------------------------------------------------
//
// `std::deque` provides random access and fast push/pop back/front. It is
// implemented as an array of fixed blocks. It provides no control of block size
// and implementations differ; libstdc++ tries to allocate blocks of ~512 bytes
// and libc++ tries for blocks of ~4k bytes.
//
// `absl::chunked_queue` provides the same minus random access. It is
// implemented as a double-linked list of fixed or variable sized blocks.
//
// `absl::chunked_queue` is useful when memory usage is paramount as it provides
//  finegrained and configurable block sizing.
//
// The interface supported by this class is limited to:
//
//   empty()
//   size()
//   max_size()
//   shrink_to_fit()
//   resize()
//   assign()
//   push_back()
//   emplace_back()
//   pop_front()
//   front()
//   back()
//   swap()
//   clear()
//   begin(), end()
//   cbegin(), cend()
//
// === ADVANCED USAGE
//
// == clear()
//
// As an optimization clear() leaves the first block of the chunked_queue
// allocated (but empty). So clear will not delete all memory of the container.
// In order to do so, call shrink_to_fit() or swap the container with an empty
// one.
//
//   absl::chunked_queue<int64> q = {1, 2, 3};
//   q.clear();
//   q.shrink_to_fit();
//
// == block size customization
//
// chunked_queue allows customization of the block size for each block. By
// default the block size is set to 1 element and the size doubles for the next
// block until it reaches the default max block size, which is 128 elements.
//
// = fixed size
//
// When only the first block size parameter is specified, it sets a fixed block
// size for all blocks:
//
//   chunked_queue<T, 32>: 32 elements per block
//
// The smaller the block size, the less the memory usage for small queues at the
// cost of performance. Caveat: For large queues, a smaller block size will
// increase memory usage, and reduce performance.
//
// = variable size
//
// When both block size parameters are specified, they set the min and max block
// sizes for the blocks. Initially the queue starts with the min block size and
// as it grows, the size of each block grows until it reaches the max block
// size.
// New blocks are double the size of the tail block (so they at least
// double the size of the queue).
//
//   chunked_queue<T, 4, 64>: first block 4 elements, second block 8 elements,
//                            third block 16 elements, fourth block 32 elements,
//                            all other blocks 64 elements
//
// One can specify a min and max such that small queues will not waste memory
// and large queues will not have too many blocks.

#ifndef ABSL_CONTAINER_CHUNKED_QUEUE_H_
#define ABSL_CONTAINER_CHUNKED_QUEUE_H_

#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <initializer_list>
#include <iterator>
#include <memory>
#include <new>
#include <tuple>
#include <type_traits>
#include <utility>

#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/iterator_traits.h"
#include "absl/base/macros.h"
#include "absl/container/internal/chunked_queue.h"
#include "absl/container/internal/layout.h"

namespace absl {
ABSL_NAMESPACE_BEGIN

template <typename T, size_t BLo = 0, size_t BHi = BLo,
          typename Allocator = std::allocator<T>>
class chunked_queue {
 public:
  static constexpr size_t kBlockSizeMin = (BLo == 0 && BHi == 0) ? 1 : BLo;
  static constexpr size_t kBlockSizeMax = (BLo == 0 && BHi == 0) ? 128 : BHi;

 private:
  static_assert(kBlockSizeMin > 0, "Min block size cannot be zero");
  static_assert(kBlockSizeMin <= kBlockSizeMax, "Invalid block size bounds");

  using Block = container_internal::ChunkedQueueBlock<T, Allocator>;
  using AllocatorTraits = std::allocator_traits<Allocator>;

  class iterator_common {
   public:
    friend bool operator==(const iterator_common& a, const iterator_common& b) {
      return a.ptr == b.ptr;
    }

    friend bool operator!=(const iterator_common& a, const iterator_common& b) {
      return !(a == b);
    }

   protected:
    iterator_common() = default;
    explicit iterator_common(Block* b)
        : block(b), ptr(b->start()), limit(b->limit()) {}

    void Incr() {
      // If we do not have a next block, make ptr point one past the end of this
      // block. If we do have a next block, make ptr point to the first element
      // of the next block.
      ++ptr;
      if (ptr == limit && block->next()) *this = iterator_common(block->next());
    }

    void IncrBy(size_t n) {
      while (ptr + n > limit) {
        n -= limit - ptr;
        *this = iterator_common(block->next());
      }
      ptr += n;
    }

    Block* block = nullptr;
    T* ptr = nullptr;
    T* limit = nullptr;
  };

  // CT can be either T or const T.
  template <typename CT>
  class basic_iterator : public iterator_common {
   public:
    using iterator_category = std::forward_iterator_tag;
    using value_type = typename AllocatorTraits::value_type;
    using difference_type = typename AllocatorTraits::difference_type;
    using pointer =
        typename std::conditional<std::is_const<CT>::value,
                                  typename AllocatorTraits::const_pointer,
                                  typename AllocatorTraits::pointer>::type;
    using reference = CT&;

    basic_iterator() = default;

    // Copy ctor if CT is T.
    // Otherwise it's a conversion of iterator to const_iterator.
    basic_iterator(const basic_iterator<T>& it)  // NOLINT(runtime/explicit)
        : iterator_common(it) {}

    basic_iterator& operator=(const basic_iterator& other) = default;

    reference operator*() const { return *this->ptr; }
    pointer operator->() const { return this->ptr; }
    basic_iterator& operator++() {
      this->Incr();
      return *this;
    }
    basic_iterator operator++(int) {
      basic_iterator t = *this;
      ++*this;
      return t;
    }

   private:
    explicit basic_iterator(Block* b) : iterator_common(b) {}

    friend chunked_queue;
  };

 public:
  using allocator_type = typename AllocatorTraits::allocator_type;
  using value_type = typename AllocatorTraits::value_type;
  using size_type = typename AllocatorTraits::size_type;
  using difference_type = typename AllocatorTraits::difference_type;
  using reference = value_type&;
  using const_reference = const value_type&;
  using iterator = basic_iterator<T>;
  using const_iterator = basic_iterator<const T>;

  // Constructs an empty queue.
  chunked_queue() : chunked_queue(allocator_type()) {}

  // Constructs an empty queue with a custom allocator.
  explicit chunked_queue(const allocator_type& alloc)
      : alloc_and_size_(alloc) {}

  // Constructs a queue with `count` default-inserted elements.
  explicit chunked_queue(size_type count,
                         const allocator_type& alloc = allocator_type())
      : alloc_and_size_(alloc) {
    resize(count);
  }

  // Constructs a queue with `count` copies of `value`.
  chunked_queue(size_type count, const T& value,
                const allocator_type& alloc = allocator_type())
      : alloc_and_size_(alloc) {
    assign(count, value);
  }

  // Constructs a queue with the contents of the range [first, last).
  template <typename Iter,
            typename = std::enable_if_t<
                base_internal::IsAtLeastInputIterator<Iter>::value>>
  chunked_queue(Iter first, Iter last,
                const allocator_type& alloc = allocator_type())
      : alloc_and_size_(alloc) {
    using Tag = typename std::iterator_traits<Iter>::iterator_category;
    RangeInit(first, last, Tag());
  }

  // Constructs a queue with the contents of the initializer list `list`.
  chunked_queue(std::initializer_list<T> list,
                const allocator_type& alloc = allocator_type())
      : chunked_queue(list.begin(), list.end(), alloc) {}

  ~chunked_queue();

  // Copy constructor.
  chunked_queue(const chunked_queue& other)
      : chunked_queue(other,
                      AllocatorTraits::select_on_container_copy_construction(
                          other.alloc_and_size_.allocator())) {}

  // Copy constructor with specific allocator.
  chunked_queue(const chunked_queue& other, const allocator_type& alloc)
      : alloc_and_size_(alloc) {
    for (const_reference item : other) {
      push_back(item);
    }
  }

  // Move constructor.
  chunked_queue(chunked_queue&& other) noexcept
      : head_(other.head_),
        tail_(other.tail_),
        alloc_and_size_(std::move(other.alloc_and_size_)) {
    other.head_ = {};
    other.tail_ = {};
    other.alloc_and_size_.size = 0;
  }

  // Replaces contents with those from initializer list `il`.
  chunked_queue& operator=(std::initializer_list<T> il) {
    assign(il.begin(), il.end());
    return *this;
  }

  // Copy assignment operator.
  chunked_queue& operator=(const chunked_queue& other) {
    if (this == &other) {
      return *this;
    }
    if (AllocatorTraits::propagate_on_container_copy_assignment::value &&
        (alloc_and_size_.allocator() != other.alloc_and_size_.allocator())) {
      // Destroy all current elements and blocks with the current allocator,
      // before switching this to use the allocator propagated from "other".
      DestroyAndDeallocateAll();
      alloc_and_size_ = AllocatorAndSize(other.alloc_and_size_.allocator());
    }
    assign(other.begin(), other.end());
    return *this;
  }

  // Move assignment operator.
  chunked_queue& operator=(chunked_queue&& other) noexcept;

  // Returns true if the queue contains no elements.
  bool empty() const { return alloc_and_size_.size == 0; }

  // Returns the number of elements in the queue.
  size_t size() const { return alloc_and_size_.size; }

  // Returns the maximum number of elements the queue is able to hold.
  size_type max_size() const noexcept {
    return AllocatorTraits::max_size(alloc_and_size_.allocator());
  }

  // Resizes the container to contain `new_size` elements.
  // If `new_size > size()`, additional default-inserted elements are appended.
  // If `new_size < size()`, elements are removed from the end.
  void resize(size_t new_size);

  // Resizes the container to contain `new_size` elements.
  // If `new_size > size()`, additional copies of `value` are appended.
  // If `new_size < size()`, elements are removed from the end.
  void resize(size_type new_size, const T& value) {
    if (new_size > size()) {
      size_t to_add = new_size - size();
      for (size_t i = 0; i < to_add; ++i) {
        push_back(value);
      }
    } else {
      resize(new_size);
    }
  }

  // Requests the removal of unused capacity.
  void shrink_to_fit() {
    // As an optimization clear() leaves the first block of the chunked_queue
    // allocated (but empty). When empty, shrink_to_fit() deallocates the first
    // block by swapping it a newly constructed container that has no first
    // block.
    if (empty()) {
      chunked_queue(alloc_and_size_.allocator()).swap(*this);
    }
  }

  // Replaces the contents with copies of those in the range [first, last).
  template <typename Iter,
            typename = std::enable_if_t<
                base_internal::IsAtLeastInputIterator<Iter>::value>>
  void assign(Iter first, Iter last) {
    auto out = begin();
    Block* prev_block = nullptr;

    // Overwrite existing elements.
    for (; out != end() && first != last; ++first) {
      // Track the previous block so we can correctly update tail_ if we stop
      // exactly at a block boundary.
      if (out.ptr + 1 == out.block->limit()) {
        prev_block = out.block;
      }
      *out = *first;
      ++out;
    }

    // If we stopped exactly at the start of a block (meaning the previous block
    // was full), we must ensure tail_ points to the end of the previous block,
    // not the start of the current (now empty and to be deleted) block.
    // This maintains the invariant required by back() which assumes tail_
    // never points to the start of a block (unless it's the only block).
    if (!empty() && out.block != nullptr && out.ptr == out.block->start() &&
        prev_block != nullptr) {
      // Delete the current block and all subsequent blocks.
      //
      // NOTE: Calling EraseAllFrom on an iterator that points to the limit of
      // the previous block will not delete any element from the previous block.
      iterator prev_block_end(prev_block);
      prev_block_end.ptr = prev_block->limit();
      EraseAllFrom(prev_block_end);

      // Update tail_ to point to the end of the previous block.
      tail_ = prev_block_end;
      prev_block->set_next(nullptr);
    } else {
      // Standard erase from the current position to the end.
      EraseAllFrom(out);
    }

    // Append any remaining new elements.
    for (; first != last; ++first) {
      push_back(*first);
    }
  }

  // Replaces the contents with `count` copies of `value`.
  void assign(size_type count, const T& value) {
    clear();
    for (size_type i = 0; i < count; ++i) {
      push_back(value);
    }
  }

  // Replaces the contents with the elements from the initializer list `il`.
  void assign(std::initializer_list<T> il) { assign(il.begin(), il.end()); }

  // Appends the given element value to the end of the container.
  // Invalidates `end()` iterator. References to other elements remain valid.
  void push_back(const T& val) { emplace_back(val); }
  void push_back(T&& val) { emplace_back(std::move(val)); }

  // Appends a new element to the end of the container.
  // The element is constructed in-place with `args`.
  // Returns a reference to the new element.
  // Invalidates `end()` iterator. References to other elements remain valid.
  template <typename... A>
  T& emplace_back(A&&... args) {
    T* storage = AllocateBack();
    AllocatorTraits::construct(alloc_and_size_.allocator(), storage,
                               std::forward<A>(args)...);
    return *storage;
  }

  // Removes the first element of the container.
  // Invalidates iterators to the removed element.
  // REQUIRES: !empty()
  void pop_front();

  // Returns a reference to the first element in the container.
  // REQUIRES: !empty()
  T& front() {
    ABSL_HARDENING_ASSERT(!empty());
    return *head_;
  }
  const T& front() const {
    ABSL_HARDENING_ASSERT(!empty());
    return *head_;
  }

  // Returns a reference to the last element in the container.
  // REQUIRES: !empty()
  T& back() {
    ABSL_HARDENING_ASSERT(!empty());
    return *(&*tail_ - 1);
  }
  const T& back() const {
    ABSL_HARDENING_ASSERT(!empty());
    return *(&*tail_ - 1);
  }

  // Swaps the contents of this queue with `other`.
  void swap(chunked_queue& other) noexcept {
    using std::swap;
    swap(head_, other.head_);
    swap(tail_, other.tail_);
    if (AllocatorTraits::propagate_on_container_swap::value) {
      swap(alloc_and_size_, other.alloc_and_size_);
    } else {
      // Swap only the sizes; each object keeps its allocator.
      //
      // (It is undefined behavior to swap between two containers with unequal
      // allocators if propagate_on_container_swap is false, so we don't have to
      // handle that here like we do in the move-assignment operator.)
      ABSL_HARDENING_ASSERT(get_allocator() == other.get_allocator());
      swap(alloc_and_size_.size, other.alloc_and_size_.size);
    }
  }

  // Erases all elements from the container.
  // Note: Leaves one empty block allocated as an optimization.
  // To free all memory, call shrink_to_fit() after calling clear().
  void clear();

  iterator begin() { return head_; }
  iterator end() { return tail_; }

  const_iterator begin() const { return head_; }
  const_iterator end() const { return tail_; }

  const_iterator cbegin() const { return head_; }
  const_iterator cend() const { return tail_; }

  // Returns the allocator associated with the container.
  allocator_type get_allocator() const { return alloc_and_size_.allocator(); }

 private:
  // Empty base-class optimization: bundle storage for our allocator together
  // with a field we had to store anyway (size), via inheriting from the
  // allocator, so this allocator instance doesn't consume any storage
  // when its type has no data members.
  struct AllocatorAndSize : private allocator_type {
    explicit AllocatorAndSize(const allocator_type& alloc)
        : allocator_type(alloc) {}
    const allocator_type& allocator() const { return *this; }
    allocator_type& allocator() { return *this; }
    size_t size = 0;
  };

  template <typename Iter>
  void RangeInit(Iter first, Iter last, std::input_iterator_tag) {
    while (first != last) {
      AddTailBlock();
      for (; first != last && tail_.ptr != tail_.limit;
           ++alloc_and_size_.size, ++tail_.ptr, ++first) {
        AllocatorTraits::construct(alloc_and_size_.allocator(), tail_.ptr,
                                   *first);
      }
    }
  }

  void Construct(T* start, T* limit) {
    ABSL_ASSERT(start <= limit);
    for (; start != limit; ++start) {
      AllocatorTraits::construct(alloc_and_size_.allocator(), start);
    }
  }

  size_t Destroy(T* start, T* limit) {
    ABSL_ASSERT(start <= limit);
    const size_t n = limit - start;
    for (; start != limit; ++start) {
      AllocatorTraits::destroy(alloc_and_size_.allocator(), start);
    }
    return n;
  }

  T* block_begin(Block* b) const {
    return b == head_.block ? head_.ptr : b->start();
  }
  T* block_end(Block* b) const {
    // We have the choice of !b->next or b == tail_.block to determine if b is
    // the tail or not. !b->next is usually faster because the caller of
    // block_end() is most likely traversing the list of blocks so b->next is
    // already fetched into some register.
    return !b->next() ? tail_.ptr : b->limit();
  }

  void AddTailBlock();
  size_t NewBlockSize() {
    // Double the last block size and bound to [kBlockSizeMin, kBlockSizeMax].
    if (!tail_.block) return kBlockSizeMin;
    return (std::min)(kBlockSizeMax, 2 * tail_.block->size());
  }

  T* AllocateBack();
  void EraseAllFrom(iterator i);

  // Destroys any contained elements and destroys all allocated storage.
  // (Like clear(), except this doesn't leave any empty blocks behind.)
  void DestroyAndDeallocateAll();

  // The set of elements in the queue is the following:
  //
  // (1) When we have just one block:
  //      [head_.ptr .. tail_.ptr-1]
  // (2) When we have multiple blocks:
  //      [head_.ptr .. head_.limit-1]
  //      ... concatenation of all elements from interior blocks ...
  //      [tail_.ptr .. tail_.limit-1]
  //
  // Rep invariants:
  // When have just one block:
  //   head_.limit == tail_.limit == &head_.block->element[kBlockSize]
  // Always:
  //   head_.ptr <= head_.limit
  //   tail_.ptr <= tail_.limit

  iterator head_;
  iterator tail_;
  AllocatorAndSize alloc_and_size_;
};

template <typename T, size_t BLo, size_t BHi, typename Allocator>
constexpr size_t chunked_queue<T, BLo, BHi, Allocator>::kBlockSizeMin;

template <typename T, size_t BLo, size_t BHi, typename Allocator>
constexpr size_t chunked_queue<T, BLo, BHi, Allocator>::kBlockSizeMax;

template <typename T, size_t BLo, size_t BHi, typename Allocator>
inline void swap(chunked_queue<T, BLo, BHi, Allocator>& a,
                 chunked_queue<T, BLo, BHi, Allocator>& b) noexcept {
  a.swap(b);
}

template <typename T, size_t BLo, size_t BHi, typename Allocator>
chunked_queue<T, BLo, BHi, Allocator>&
chunked_queue<T, BLo, BHi, Allocator>::operator=(
    chunked_queue&& other) noexcept {
  if (this == &other) {
    return *this;
  }
  DestroyAndDeallocateAll();

  if constexpr (AllocatorTraits::propagate_on_container_move_assignment::
                    value) {
    // Take over the storage of "other", along with its allocator.
    head_ = other.head_;
    tail_ = other.tail_;
    alloc_and_size_ = std::move(other.alloc_and_size_);
    other.head_ = {};
    other.tail_ = {};
    other.alloc_and_size_.size = 0;
  } else if (get_allocator() == other.get_allocator()) {
    // Take over the storage of "other", with which we share an allocator.
    head_ = other.head_;
    tail_ = other.tail_;
    alloc_and_size_.size = other.alloc_and_size_.size;
    other.head_ = {};
    other.tail_ = {};
    other.alloc_and_size_.size = 0;
  } else {
    // We cannot take over of the storage from "other", since it has a different
    // allocator; we're stuck move-assigning elements individually.
    for (auto& elem : other) {
      push_back(std::move(elem));
    }
  }
  return *this;
}

template <typename T, size_t BLo, size_t BHi, typename Allocator>
inline chunked_queue<T, BLo, BHi, Allocator>::~chunked_queue() {
  Block* b = head_.block;
  while (b) {
    Block* next = b->next();
    Destroy(block_begin(b), block_end(b));
    Block::Delete(b, &alloc_and_size_.allocator());
    b = next;
  }
}

template <typename T, size_t BLo, size_t BHi, typename Allocator>
void chunked_queue<T, BLo, BHi, Allocator>::resize(size_t new_size) {
  while (new_size > size()) {
    ptrdiff_t to_add = new_size - size();
    if (tail_.ptr == tail_.limit) {
      AddTailBlock();
    }
    T* start = tail_.ptr;
    T* limit = (std::min)(tail_.limit, start + to_add);
    Construct(start, limit);
    tail_.ptr = limit;
    alloc_and_size_.size += limit - start;
  }
  if (size() == new_size) {
    return;
  }
  ABSL_ASSERT(new_size < size());
  auto new_end = begin();
  new_end.IncrBy(new_size);
  ABSL_ASSERT(new_end != end());
  EraseAllFrom(new_end);
}

template <typename T, size_t BLo, size_t BHi, typename Allocator>
inline void chunked_queue<T, BLo, BHi, Allocator>::AddTailBlock() {
  ABSL_ASSERT(tail_.ptr == tail_.limit);
  auto* b = Block::New(NewBlockSize(), &alloc_and_size_.allocator());
  if (!head_.block) {
    ABSL_ASSERT(!tail_.block);
    head_ = iterator(b);
  } else {
    ABSL_ASSERT(tail_.block);
    tail_.block->set_next(b);
  }
  tail_ = iterator(b);
}

template <typename T, size_t BLo, size_t BHi, typename Allocator>
inline T* chunked_queue<T, BLo, BHi, Allocator>::AllocateBack() {
  if (tail_.ptr == tail_.limit) {
    AddTailBlock();
  }
  ++alloc_and_size_.size;
  return tail_.ptr++;
}

template <typename T, size_t BLo, size_t BHi, typename Allocator>
inline void chunked_queue<T, BLo, BHi, Allocator>::EraseAllFrom(iterator i) {
  if (!i.block) {
    return;
  }
  ABSL_ASSERT(i.ptr);
  ABSL_ASSERT(i.limit);
  alloc_and_size_.size -= Destroy(i.ptr, block_end(i.block));
  Block* b = i.block->next();
  while (b) {
    Block* next = b->next();
    alloc_and_size_.size -= Destroy(b->start(), block_end(b));
    Block::Delete(b, &alloc_and_size_.allocator());
    b = next;
  }
  tail_ = i;
  tail_.block->set_next(nullptr);
}

template <typename T, size_t BLo, size_t BHi, typename Allocator>
inline void chunked_queue<T, BLo, BHi, Allocator>::DestroyAndDeallocateAll() {
  Block* b = head_.block;
  while (b) {
    Block* next = b->next();
    Destroy(block_begin(b), block_end(b));
    Block::Delete(b, &alloc_and_size_.allocator());
    b = next;
  }
  head_ = iterator();
  tail_ = iterator();
  alloc_and_size_.size = 0;
}

template <typename T, size_t BLo, size_t BHi, typename Allocator>
inline void chunked_queue<T, BLo, BHi, Allocator>::pop_front() {
  ABSL_HARDENING_ASSERT(!empty());
  ABSL_ASSERT(head_.block);
  AllocatorTraits::destroy(alloc_and_size_.allocator(), head_.ptr);
  ++head_.ptr;
  --alloc_and_size_.size;
  if (empty()) {
    // Reset head and tail to the start of the (only) block.
    ABSL_ASSERT(head_.block == tail_.block);
    head_.ptr = tail_.ptr = head_.block->start();
    return;
  }
  if (head_.ptr == head_.limit) {
    Block* n = head_.block->next();
    Block::Delete(head_.block, &alloc_and_size_.allocator());
    head_ = iterator(n);
  }
}

template <typename T, size_t BLo, size_t BHi, typename Allocator>
void chunked_queue<T, BLo, BHi, Allocator>::clear() {
  // NOTE: As an optimization we leave one block allocated.
  Block* b = head_.block;
  if (!b) {
    ABSL_ASSERT(empty());
    return;
  }
  while (b) {
    Block* next = b->next();
    Destroy(block_begin(b), block_end(b));
    if (head_.block != b) {
      Block::Delete(b, &alloc_and_size_.allocator());
    }
    b = next;
  }
  b = head_.block;
  b->set_next(nullptr);
  head_ = tail_ = iterator(b);
  alloc_and_size_.size = 0;
}

ABSL_NAMESPACE_END
}  // namespace absl

#endif  // ABSL_CONTAINER_CHUNKED_QUEUE_H_