1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
|
//===- ArrayList.h ----------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_DWARFLINKER_PARALLEL_ARRAYLIST_H
#define LLVM_LIB_DWARFLINKER_PARALLEL_ARRAYLIST_H
#include "llvm/Support/PerThreadBumpPtrAllocator.h"
#include <atomic>
namespace llvm {
namespace dwarf_linker {
namespace parallel {
/// This class is a simple list of T structures. It keeps elements as
/// pre-allocated groups to save memory for each element's next pointer.
/// It allocates internal data using specified per-thread BumpPtrAllocator.
/// Method add() can be called asynchronously.
template <typename T, size_t ItemsGroupSize = 512> class ArrayList {
public:
ArrayList(llvm::parallel::PerThreadBumpPtrAllocator *Allocator)
: Allocator(Allocator) {}
/// Add specified \p Item to the list.
T &add(const T &Item) {
assert(Allocator);
// Allocate head group if it is not allocated yet.
while (!LastGroup) {
if (allocateNewGroup(GroupsHead))
LastGroup = GroupsHead.load();
}
ItemsGroup *CurGroup;
size_t CurItemsCount;
do {
CurGroup = LastGroup;
CurItemsCount = CurGroup->ItemsCount.fetch_add(1);
// Check whether current group is full.
if (CurItemsCount < ItemsGroupSize)
break;
// Allocate next group if necessary.
if (!CurGroup->Next)
allocateNewGroup(CurGroup->Next);
LastGroup.compare_exchange_weak(CurGroup, CurGroup->Next);
} while (true);
// Store item into the current group.
CurGroup->Items[CurItemsCount] = Item;
return CurGroup->Items[CurItemsCount];
}
using ItemHandlerTy = function_ref<void(T &)>;
/// Enumerate all items and apply specified \p Handler to each.
void forEach(ItemHandlerTy Handler) {
for (ItemsGroup *CurGroup = GroupsHead; CurGroup;
CurGroup = CurGroup->Next) {
for (T &Item : *CurGroup)
Handler(Item);
}
}
/// Check whether list is empty.
bool empty() { return !GroupsHead; }
/// Erase list.
void erase() {
GroupsHead = nullptr;
LastGroup = nullptr;
}
void sort(function_ref<bool(const T &LHS, const T &RHS)> Comparator) {
SmallVector<T> SortedItems;
forEach([&](T &Item) { SortedItems.push_back(Item); });
if (SortedItems.size()) {
std::sort(SortedItems.begin(), SortedItems.end(), Comparator);
size_t SortedItemIdx = 0;
forEach([&](T &Item) { Item = SortedItems[SortedItemIdx++]; });
assert(SortedItemIdx == SortedItems.size());
}
}
size_t size() {
size_t Result = 0;
for (ItemsGroup *CurGroup = GroupsHead; CurGroup != nullptr;
CurGroup = CurGroup->Next)
Result += CurGroup->getItemsCount();
return Result;
}
protected:
struct ItemsGroup {
using ArrayTy = std::array<T, ItemsGroupSize>;
// Array of items kept by this group.
ArrayTy Items;
// Pointer to the next items group.
std::atomic<ItemsGroup *> Next = nullptr;
// Number of items in this group.
// NOTE: ItemsCount could be inaccurate as it might be incremented by
// several threads. Use getItemsCount() method to get real number of items
// inside ItemsGroup.
std::atomic<size_t> ItemsCount = 0;
size_t getItemsCount() const {
return std::min(ItemsCount.load(), ItemsGroupSize);
}
typename ArrayTy::iterator begin() { return Items.begin(); }
typename ArrayTy::iterator end() { return Items.begin() + getItemsCount(); }
};
// Allocate new group. Put allocated group into the \p AtomicGroup if
// it is empty. If \p AtomicGroup is filled by another thread then
// put allocated group into the end of groups list.
// \returns true if allocated group is put into the \p AtomicGroup.
bool allocateNewGroup(std::atomic<ItemsGroup *> &AtomicGroup) {
ItemsGroup *CurGroup = nullptr;
// Allocate new group.
ItemsGroup *NewGroup = Allocator->Allocate<ItemsGroup>();
NewGroup->ItemsCount = 0;
NewGroup->Next = nullptr;
// Try to replace current group with allocated one.
if (AtomicGroup.compare_exchange_weak(CurGroup, NewGroup))
return true;
// Put allocated group as last group.
while (CurGroup) {
ItemsGroup *NextGroup = CurGroup->Next;
if (!NextGroup) {
if (CurGroup->Next.compare_exchange_weak(NextGroup, NewGroup))
break;
}
CurGroup = NextGroup;
}
return false;
}
std::atomic<ItemsGroup *> GroupsHead = nullptr;
std::atomic<ItemsGroup *> LastGroup = nullptr;
llvm::parallel::PerThreadBumpPtrAllocator *Allocator = nullptr;
};
} // end of namespace parallel
} // end of namespace dwarf_linker
} // end of namespace llvm
#endif // LLVM_LIB_DWARFLINKER_PARALLEL_ARRAYLIST_H
|