1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581
|
/*
Copyright (c) 2005-2024 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#ifndef __TBB_detail__segment_table_H
#define __TBB_detail__segment_table_H
#include "_config.h"
#include "_allocator_traits.h"
#include "_template_helpers.h"
#include "_utils.h"
#include "_assert.h"
#include "_exception.h"
#include <atomic>
#include <type_traits>
#include <memory>
#include <cstring>
#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
#pragma warning(push)
#pragma warning(disable: 4127) // warning C4127: conditional expression is constant
#endif
namespace tbb {
namespace detail {
namespace d1 {
template <typename T, typename Allocator, typename DerivedType, std::size_t PointersPerEmbeddedTable>
class segment_table {
public:
using value_type = T;
using segment_type = T*;
using atomic_segment = std::atomic<segment_type>;
using segment_table_type = atomic_segment*;
using size_type = std::size_t;
using segment_index_type = std::size_t;
using allocator_type = Allocator;
using allocator_traits_type = tbb::detail::allocator_traits<allocator_type>;
using segment_table_allocator_type = typename allocator_traits_type::template rebind_alloc<atomic_segment>;
protected:
using segment_table_allocator_traits = tbb::detail::allocator_traits<segment_table_allocator_type>;
using derived_type = DerivedType;
static constexpr size_type pointers_per_embedded_table = PointersPerEmbeddedTable;
static constexpr size_type pointers_per_long_table = sizeof(size_type) * 8;
public:
segment_table( const allocator_type& alloc = allocator_type() )
: my_segment_table_allocator(alloc), my_segment_table(nullptr)
, my_first_block{}, my_size{}, my_segment_table_allocation_failed{}
{
my_segment_table.store(my_embedded_table, std::memory_order_relaxed);
zero_table(my_embedded_table, pointers_per_embedded_table);
}
segment_table( const segment_table& other )
: my_segment_table_allocator(segment_table_allocator_traits::
select_on_container_copy_construction(other.my_segment_table_allocator))
, my_segment_table(nullptr), my_first_block{}, my_size{}, my_segment_table_allocation_failed{}
{
my_segment_table.store(my_embedded_table, std::memory_order_relaxed);
zero_table(my_embedded_table, pointers_per_embedded_table);
try_call( [&] {
internal_transfer(other, copy_segment_body_type{*this});
} ).on_exception( [&] {
clear();
});
}
segment_table( const segment_table& other, const allocator_type& alloc )
: my_segment_table_allocator(alloc), my_segment_table(nullptr)
, my_first_block{}, my_size{}, my_segment_table_allocation_failed{}
{
my_segment_table.store(my_embedded_table, std::memory_order_relaxed);
zero_table(my_embedded_table, pointers_per_embedded_table);
try_call( [&] {
internal_transfer(other, copy_segment_body_type{*this});
} ).on_exception( [&] {
clear();
});
}
segment_table( segment_table&& other )
: my_segment_table_allocator(std::move(other.my_segment_table_allocator)), my_segment_table(nullptr)
, my_first_block{}, my_size{}, my_segment_table_allocation_failed{}
{
my_segment_table.store(my_embedded_table, std::memory_order_relaxed);
zero_table(my_embedded_table, pointers_per_embedded_table);
internal_move(std::move(other));
}
segment_table( segment_table&& other, const allocator_type& alloc )
: my_segment_table_allocator(alloc), my_segment_table(nullptr), my_first_block{}
, my_size{}, my_segment_table_allocation_failed{}
{
my_segment_table.store(my_embedded_table, std::memory_order_relaxed);
zero_table(my_embedded_table, pointers_per_embedded_table);
using is_equal_type = typename segment_table_allocator_traits::is_always_equal;
internal_move_construct_with_allocator(std::move(other), alloc, is_equal_type());
}
~segment_table() {
clear();
}
segment_table& operator=( const segment_table& other ) {
if (this != &other) {
copy_assign_allocators(my_segment_table_allocator, other.my_segment_table_allocator);
internal_transfer(other, copy_segment_body_type{*this});
}
return *this;
}
segment_table& operator=( segment_table&& other )
noexcept(derived_type::is_noexcept_assignment)
{
using pocma_type = typename segment_table_allocator_traits::propagate_on_container_move_assignment;
using is_equal_type = typename segment_table_allocator_traits::is_always_equal;
if (this != &other) {
move_assign_allocators(my_segment_table_allocator, other.my_segment_table_allocator);
internal_move_assign(std::move(other), tbb::detail::disjunction<is_equal_type, pocma_type>());
}
return *this;
}
void swap( segment_table& other )
noexcept(derived_type::is_noexcept_swap)
{
using is_equal_type = typename segment_table_allocator_traits::is_always_equal;
using pocs_type = typename segment_table_allocator_traits::propagate_on_container_swap;
if (this != &other) {
swap_allocators(my_segment_table_allocator, other.my_segment_table_allocator);
internal_swap(other, tbb::detail::disjunction<is_equal_type, pocs_type>());
}
}
segment_type get_segment( segment_index_type index ) const {
return get_table()[index] + segment_base(index);
}
value_type& operator[]( size_type index ) {
return internal_subscript<true>(index);
}
const value_type& operator[]( size_type index ) const {
return const_cast<segment_table*>(this)->internal_subscript<true>(index);
}
const segment_table_allocator_type& get_allocator() const {
return my_segment_table_allocator;
}
segment_table_allocator_type& get_allocator() {
return my_segment_table_allocator;
}
void enable_segment( segment_type& segment, segment_table_type table, segment_index_type seg_index, size_type index ) {
// Allocate new segment
segment_type new_segment = self()->create_segment(table, seg_index, index);
if (new_segment != nullptr) {
// Store (new_segment - segment_base) into the segment table to allow access to the table by index via
// my_segment_table[segment_index_of(index)][index]
segment_type disabled_segment = nullptr;
if (!table[seg_index].compare_exchange_strong(disabled_segment, new_segment - segment_base(seg_index))) {
// compare_exchange failed => some other thread has already enabled this segment
// Deallocate the memory
self()->deallocate_segment(new_segment, seg_index);
}
}
segment = table[seg_index].load(std::memory_order_acquire);
__TBB_ASSERT(segment != nullptr, "If create_segment returned nullptr, the element should be stored in the table");
}
void delete_segment( segment_index_type seg_index ) {
segment_type segment_to_delete = self()->nullify_segment(get_table(), seg_index);
if (segment_to_delete == segment_allocation_failure_tag) {
return;
}
segment_to_delete += segment_base(seg_index);
// Deallocate the segment
self()->destroy_segment(segment_to_delete, seg_index);
}
size_type number_of_segments( segment_table_type table ) const {
// Check for an active table, if it is embedded table - return the number of embedded segments
// Otherwise - return the maximum number of segments
return table == my_embedded_table ? pointers_per_embedded_table : pointers_per_long_table;
}
size_type capacity() const noexcept {
segment_table_type table = get_table();
size_type num_segments = number_of_segments(table);
for (size_type seg_index = 0; seg_index < num_segments; ++seg_index) {
// Check if the pointer is valid (allocated)
if (table[seg_index].load(std::memory_order_relaxed) <= segment_allocation_failure_tag) {
return segment_base(seg_index);
}
}
return segment_base(num_segments);
}
size_type find_last_allocated_segment( segment_table_type table ) const noexcept {
size_type end = 0;
size_type num_segments = number_of_segments(table);
for (size_type seg_index = 0; seg_index < num_segments; ++seg_index) {
// Check if the pointer is valid (allocated)
if (table[seg_index].load(std::memory_order_relaxed) > segment_allocation_failure_tag) {
end = seg_index + 1;
}
}
return end;
}
void reserve( size_type n ) {
if (n > allocator_traits_type::max_size(my_segment_table_allocator)) {
throw_exception(exception_id::reservation_length_error);
}
size_type size = my_size.load(std::memory_order_relaxed);
segment_index_type start_seg_idx = size == 0 ? 0 : segment_index_of(size - 1) + 1;
for (segment_index_type seg_idx = start_seg_idx; segment_base(seg_idx) < n; ++seg_idx) {
size_type first_index = segment_base(seg_idx);
internal_subscript<true>(first_index);
}
}
void clear() {
clear_segments();
clear_table();
my_size.store(0, std::memory_order_relaxed);
my_first_block.store(0, std::memory_order_relaxed);
}
void clear_segments() {
segment_table_type current_segment_table = get_table();
for (size_type i = number_of_segments(current_segment_table); i != 0; --i) {
if (current_segment_table[i - 1].load(std::memory_order_relaxed) != nullptr) {
// If the segment was enabled - disable and deallocate it
delete_segment(i - 1);
}
}
}
void destroy_and_deallocate_table(segment_table_type table, size_type num_segments) {
auto& alloc = get_allocator();
for (size_type seg_idx = 0; seg_idx < num_segments; ++seg_idx) {
segment_table_allocator_traits::destroy(alloc, &table[seg_idx]);
}
segment_table_allocator_traits::deallocate(alloc, table, num_segments);
}
void clear_table() {
segment_table_type current_segment_table = get_table();
if (current_segment_table != my_embedded_table) {
// If the active table is not the embedded one - deallocate the active table
destroy_and_deallocate_table(current_segment_table, pointers_per_long_table);
my_segment_table.store(my_embedded_table, std::memory_order_relaxed);
zero_table(my_embedded_table, pointers_per_embedded_table);
}
}
void extend_table_if_necessary(segment_table_type& table, size_type start_index, size_type end_index) {
// Extend segment table if an active table is an embedded one and the requested index is
// outside it
if (table == my_embedded_table && end_index > embedded_table_size) {
if (start_index <= embedded_table_size) {
// More than one thread can get here: the one that has assigned the first block and
// is in the process of allocating it now, and the one that saw the first block has
// been assigned already, but not yet allocated. This latter thread decides not to
// wait for the first one and extend the table itself.
try_call([&] {
segment_table_type new_table =
self()->allocate_long_table(my_embedded_table, start_index);
// It is possible that the table was extended by the thread that allocated first
// block. In this case, the below CAS fails and re-reads the new table pointer.
if (my_segment_table.compare_exchange_strong(
table, new_table,
/*memory order in case of a success*/std::memory_order_release,
/*memory order in case of a failure*/std::memory_order_acquire))
{
// CAS was successful, update the local table pointer with now actual
table = new_table;
} else if (new_table) {
// Other thread was the first to replace the segment table. Current thread's
// table is not needed anymore, so destroying it.
destroy_and_deallocate_table(new_table, pointers_per_long_table);
}
}).on_exception([&] {
my_segment_table_allocation_failed.store(true, std::memory_order_relaxed);
});
} else {
atomic_backoff backoff;
do {
if (my_segment_table_allocation_failed.load(std::memory_order_relaxed)) {
throw_exception(exception_id::bad_alloc);
}
backoff.pause();
table = my_segment_table.load(std::memory_order_acquire);
} while (table == my_embedded_table);
}
}
}
// Return the segment where index is stored
static constexpr segment_index_type segment_index_of( size_type index ) {
return size_type(tbb::detail::log2(uintptr_t(index|1)));
}
// Needed to calculate the offset in segment
static constexpr size_type segment_base( size_type index ) {
return size_type(1) << index & ~size_type(1);
}
// Return size of the segment
static constexpr size_type segment_size( size_type index ) {
return index == 0 ? 2 : size_type(1) << index;
}
private:
derived_type* self() {
return static_cast<derived_type*>(this);
}
struct copy_segment_body_type {
void operator()( segment_index_type index, segment_type from, segment_type to ) const {
my_instance.self()->copy_segment(index, from, to);
}
segment_table& my_instance;
};
struct move_segment_body_type {
void operator()( segment_index_type index, segment_type from, segment_type to ) const {
my_instance.self()->move_segment(index, from, to);
}
segment_table& my_instance;
};
// Transgers all segments from the other table
template <typename TransferBody>
void internal_transfer( const segment_table& other, TransferBody transfer_segment ) {
static_cast<derived_type*>(this)->destroy_elements();
assign_first_block_if_necessary(other.my_first_block.load(std::memory_order_relaxed));
my_size.store(other.my_size.load(std::memory_order_relaxed), std::memory_order_relaxed);
segment_table_type other_table = other.get_table();
size_type end_segment_size = segment_size(other.find_last_allocated_segment(other_table));
// If an exception occurred in other, then the size may be greater than the size of the end segment.
size_type other_size = end_segment_size < other.my_size.load(std::memory_order_relaxed) ?
other.my_size.load(std::memory_order_relaxed) : end_segment_size;
other_size = my_segment_table_allocation_failed ? embedded_table_size : other_size;
for (segment_index_type i = 0; segment_base(i) < other_size; ++i) {
// If the segment in other table is enabled - transfer it
if (other_table[i].load(std::memory_order_relaxed) == segment_allocation_failure_tag)
{
my_size = segment_base(i);
break;
} else if (other_table[i].load(std::memory_order_relaxed) != nullptr) {
internal_subscript<true>(segment_base(i));
transfer_segment(i, other.get_table()[i].load(std::memory_order_relaxed) + segment_base(i),
get_table()[i].load(std::memory_order_relaxed) + segment_base(i));
}
}
}
// Moves the other segment table
// Only equal allocators are allowed
void internal_move( segment_table&& other ) {
// NOTE: allocators should be equal
clear();
my_first_block.store(other.my_first_block.load(std::memory_order_relaxed), std::memory_order_relaxed);
my_size.store(other.my_size.load(std::memory_order_relaxed), std::memory_order_relaxed);
// If an active table in other is embedded - restore all of the embedded segments
if (other.get_table() == other.my_embedded_table) {
for ( size_type i = 0; i != pointers_per_embedded_table; ++i ) {
segment_type other_segment = other.my_embedded_table[i].load(std::memory_order_relaxed);
my_embedded_table[i].store(other_segment, std::memory_order_relaxed);
other.my_embedded_table[i].store(nullptr, std::memory_order_relaxed);
}
my_segment_table.store(my_embedded_table, std::memory_order_relaxed);
} else {
my_segment_table.store(other.my_segment_table, std::memory_order_relaxed);
other.my_segment_table.store(other.my_embedded_table, std::memory_order_relaxed);
zero_table(other.my_embedded_table, pointers_per_embedded_table);
}
other.my_size.store(0, std::memory_order_relaxed);
}
// Move construct the segment table with the allocator object
// if any instances of allocator_type are always equal
void internal_move_construct_with_allocator( segment_table&& other, const allocator_type&,
/*is_always_equal = */ std::true_type ) {
internal_move(std::move(other));
}
// Move construct the segment table with the allocator object
// if any instances of allocator_type are always equal
void internal_move_construct_with_allocator( segment_table&& other, const allocator_type& alloc,
/*is_always_equal = */ std::false_type ) {
if (other.my_segment_table_allocator == alloc) {
// If allocators are equal - restore pointers
internal_move(std::move(other));
} else {
// If allocators are not equal - perform per element move with reallocation
try_call( [&] {
internal_transfer(other, move_segment_body_type{*this});
} ).on_exception( [&] {
clear();
});
}
}
// Move assigns the segment table to other is any instances of allocator_type are always equal
// or propagate_on_container_move_assignment is true
void internal_move_assign( segment_table&& other, /*is_always_equal || POCMA = */ std::true_type ) {
internal_move(std::move(other));
}
// Move assigns the segment table to other is any instances of allocator_type are not always equal
// and propagate_on_container_move_assignment is false
void internal_move_assign( segment_table&& other, /*is_always_equal || POCMA = */ std::false_type ) {
if (my_segment_table_allocator == other.my_segment_table_allocator) {
// If allocators are equal - restore pointers
internal_move(std::move(other));
} else {
// If allocators are not equal - perform per element move with reallocation
internal_transfer(other, move_segment_body_type{*this});
}
}
// Swaps two segment tables if any instances of allocator_type are always equal
// or propagate_on_container_swap is true
void internal_swap( segment_table& other, /*is_always_equal || POCS = */ std::true_type ) {
internal_swap_fields(other);
}
// Swaps two segment tables if any instances of allocator_type are not always equal
// and propagate_on_container_swap is false
// According to the C++ standard, swapping of two containers with unequal allocators
// is an undefined behavior scenario
void internal_swap( segment_table& other, /*is_always_equal || POCS = */ std::false_type ) {
__TBB_ASSERT(my_segment_table_allocator == other.my_segment_table_allocator,
"Swapping with unequal allocators is not allowed");
internal_swap_fields(other);
}
void internal_swap_fields( segment_table& other ) {
// If an active table in either *this segment table or other is an embedded one - swaps the embedded tables
if (get_table() == my_embedded_table ||
other.get_table() == other.my_embedded_table) {
for (size_type i = 0; i != pointers_per_embedded_table; ++i) {
segment_type current_segment = my_embedded_table[i].load(std::memory_order_relaxed);
segment_type other_segment = other.my_embedded_table[i].load(std::memory_order_relaxed);
my_embedded_table[i].store(other_segment, std::memory_order_relaxed);
other.my_embedded_table[i].store(current_segment, std::memory_order_relaxed);
}
}
segment_table_type current_segment_table = get_table();
segment_table_type other_segment_table = other.get_table();
// If an active table is an embedded one -
// store an active table in other to the embedded one from other
if (current_segment_table == my_embedded_table) {
other.my_segment_table.store(other.my_embedded_table, std::memory_order_relaxed);
} else {
// Otherwise - store it to the active segment table
other.my_segment_table.store(current_segment_table, std::memory_order_relaxed);
}
// If an active table in other segment table is an embedded one -
// store an active table in other to the embedded one from *this
if (other_segment_table == other.my_embedded_table) {
my_segment_table.store(my_embedded_table, std::memory_order_relaxed);
} else {
// Otherwise - store it to the active segment table in other
my_segment_table.store(other_segment_table, std::memory_order_relaxed);
}
auto first_block = other.my_first_block.load(std::memory_order_relaxed);
other.my_first_block.store(my_first_block.load(std::memory_order_relaxed), std::memory_order_relaxed);
my_first_block.store(first_block, std::memory_order_relaxed);
auto size = other.my_size.load(std::memory_order_relaxed);
other.my_size.store(my_size.load(std::memory_order_relaxed), std::memory_order_relaxed);
my_size.store(size, std::memory_order_relaxed);
}
protected:
// A flag indicates that an exception was throws during segment allocations
const segment_type segment_allocation_failure_tag = reinterpret_cast<segment_type>(1);
static constexpr size_type embedded_table_size = segment_size(pointers_per_embedded_table);
template <bool allow_out_of_range_access>
value_type& internal_subscript( size_type index ) {
segment_index_type seg_index = segment_index_of(index);
segment_table_type table = my_segment_table.load(std::memory_order_acquire);
segment_type segment = nullptr;
if (allow_out_of_range_access) {
if (derived_type::allow_table_extending) {
extend_table_if_necessary(table, index, index + 1);
}
segment = table[seg_index].load(std::memory_order_acquire);
// If the required segment is disabled - enable it
if (segment == nullptr) {
enable_segment(segment, table, seg_index, index);
}
// Check if an exception was thrown during segment allocation
if (segment == segment_allocation_failure_tag) {
throw_exception(exception_id::bad_alloc);
}
} else {
segment = table[seg_index].load(std::memory_order_acquire);
}
__TBB_ASSERT(segment != nullptr, nullptr);
return segment[index];
}
void assign_first_block_if_necessary(segment_index_type index) {
size_type zero = 0;
if (this->my_first_block.load(std::memory_order_relaxed) == zero) {
this->my_first_block.compare_exchange_strong(zero, index);
}
}
void zero_table( segment_table_type table, size_type count ) {
for (size_type i = 0; i != count; ++i) {
table[i].store(nullptr, std::memory_order_relaxed);
}
}
segment_table_type get_table() const {
return my_segment_table.load(std::memory_order_acquire);
}
segment_table_allocator_type my_segment_table_allocator;
std::atomic<segment_table_type> my_segment_table;
atomic_segment my_embedded_table[pointers_per_embedded_table];
// Number of segments in first block
std::atomic<size_type> my_first_block;
// Number of elements in table
std::atomic<size_type> my_size;
// Flag to indicate failed extend table
std::atomic<bool> my_segment_table_allocation_failed;
}; // class segment_table
} // namespace d1
} // namespace detail
} // namespace tbb
#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
#pragma warning(pop) // warning 4127 is back
#endif
#endif // __TBB_detail__segment_table_H
|