1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
|
/*****************************************************************************
Copyright (c) 2020, 2025, Oracle and/or its affiliates.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License, version 2.0, as published by the
Free Software Foundation.
This program is designed to work with certain software (including
but not limited to OpenSSL) that is licensed under separate terms,
as designated in a particular file or component or in included license
documentation. The authors of MySQL hereby grant you an additional
permission to link the program and your derivative works with the
separately licensed software that they have either included with
the program or referenced in the documentation.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License, version 2.0,
for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*****************************************************************************/
#define LOCK_MODULE_IMPLEMENTATION
#include "lock0latches.h"
#include "lock0lock.h"
#include "lock0priv.h"
namespace locksys {
size_t Latches::Page_shards::get_shard(const page_id_t &page_id) {
/* We always use lock_sys->rec_hash regardless of the exact type of the
lock. It may happen that the lock is a predicate lock, in which case, it would
make more sense to use hash_calc_cell_id with proper hash table size. The
current implementation works, because the size of all three hashmaps is always
the same. This allows an interface with less arguments. */
ut_ad(lock_sys->rec_hash.get_n_cells() == lock_sys->prdt_hash.get_n_cells());
ut_ad(lock_sys->rec_hash.get_n_cells() ==
lock_sys->prdt_page_hash.get_n_cells());
/* We need a property that if two pages are mapped to the same bucket of the
hash table, and thus their lock queues are merged, then these two lock queues
are protected by the same shard. This is why to compute the shard we use the
cell_id as the input and not the original lock_rec_hash_value's result. */
return lock_sys->rec_hash.get_cell_id(lock_rec_hash_value(page_id)) %
SHARDS_COUNT;
}
const Lock_mutex &Latches::Page_shards::get_mutex(
const page_id_t &page_id) const {
return mutexes[get_shard(page_id)];
}
Lock_mutex &Latches::Page_shards::get_mutex(const page_id_t &page_id) {
/* See "Effective C++ item 3: Use const whenever possible" for explanation of
this pattern, which avoids code duplication by reusing const version. */
return const_cast<Lock_mutex &>(
const_cast<const Latches::Page_shards *>(this)->get_mutex(page_id));
}
Lock_mutex &Latches::Page_shards::get_mutex(const uint64_t cell_id) {
return mutexes[cell_id % SHARDS_COUNT];
}
size_t Latches::Table_shards::get_shard(const table_id_t table_id) {
return table_id % SHARDS_COUNT;
}
const Lock_mutex &Latches::Table_shards::get_mutex(
const table_id_t table_id) const {
return mutexes[get_shard(table_id)];
}
Lock_mutex &Latches::Table_shards::get_mutex(const table_id_t table_id) {
/* See "Effective C++ item 3: Use const whenever possible" for explanation of
this pattern, which avoids code duplication by reusing const version. */
return const_cast<Lock_mutex &>(
const_cast<const Latches::Table_shards *>(this)->get_mutex(table_id));
}
const Lock_mutex &Latches::Table_shards::get_mutex(
const dict_table_t &table) const {
return get_mutex(table.id);
}
thread_local size_t Latches::Unique_sharded_rw_lock::m_shard_id{NOT_IN_USE};
Latches::Unique_sharded_rw_lock::Unique_sharded_rw_lock() {
rw_lock.create(
#ifdef UNIV_PFS_RWLOCK
lock_sys_global_rw_lock_key,
#endif
LATCH_ID_LOCK_SYS_GLOBAL, 64);
}
Latches::Unique_sharded_rw_lock::~Unique_sharded_rw_lock() { rw_lock.free(); }
Latches::Page_shards::Page_shards() {
for (size_t i = 0; i < SHARDS_COUNT; ++i) {
mutex_create(LATCH_ID_LOCK_SYS_PAGE, mutexes + i);
}
}
Latches::Page_shards::~Page_shards() {
for (size_t i = 0; i < SHARDS_COUNT; ++i) {
mutex_destroy(mutexes + i);
}
}
Latches::Table_shards::Table_shards() {
for (size_t i = 0; i < SHARDS_COUNT; ++i) {
mutex_create(LATCH_ID_LOCK_SYS_TABLE, mutexes + i);
}
}
Latches::Table_shards::~Table_shards() {
for (size_t i = 0; i < SHARDS_COUNT; ++i) {
mutex_destroy(mutexes + i);
}
}
} // namespace locksys
|