1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
|
--source include/have_debug_sync.inc
--echo ################################################################
--echo # #
--echo # Bug #27944920: INNODB: ASSERTION FAILURE: #
--echo # LOCK_GET_TYPE_LOW(LOCK) == 32 IN LOCK0PRIV.IC #
--echo # #
--echo ################################################################
# We want a situation in which:
# C1 has a rec lock granted
# C1 waits for a table lock
# C2 waits for a rec lock
# and this triggers the bug in proccessing C1.wait_lock of LOCK_TABLE type.
# However it turns out it is not so simple to generate a situation in which
# C1 waits for a table lock, because most of the time table level conflicts
# are handled at the mysql layer, not at the InnoDB layer.
# In particular IS and IX table locks do not conflict with each other, and
# they conflict with S or X table locks, but to obtain X or S lock you need to
# perform LOCK TABLES ..statement which informs the mysql layer about what
# you are doing, making it much harder to get trough with conflicting queries
# to the InnoDB layer.
# The simplest way to slip trough the mysql's checks is to play with LOCK_AUTO_INC
# locks, which conflict with each other.
# There is a difficulty though: even in the traditional innodb_autoinc_lock_mode=0
# such locks are held only for the duration of a query, and not for the whole transaction.
# So, if you want to make the LOCK_AUTO_INC to be held long enough to cause another
# transaction to conflict with it, you must somehow make the query take longer.
# A simple way to do that is to use INSERT ... SELECT ... statement, where
# the SELECT has to wait for a lock.
# So compiling this all ideas together here is our plan:
# 1. C3 obtains an X-lock on a record t2.id=2
# (fun fact: it will not work with id=1, because in step 2, we need at least one
# row to be successfuly inserted before blocking on the locked row)
# 2. C4 performs INSERT INTO t1 (val) SELECT id FROM t2;
# and has to wait for C3 to release rec lock,
# while C4 is holding LOCK_AUTO_INC
# 3. C1 obtains an S-lock on a record t1.id=1
# 4. C1 performs INSERT INTO T1 (val) VALUES (7)
# and has to wait for C4 to release LOCK_AUTO_INC
# 5. C2 tries to SELECT * FROM t1 WHERE id=1 FOR UPDATE
# and has to wait for C1 to realse the S-lock
# this causes the bug
# This test requires innodb_autoinc_lock_mode == 0, so we explicitly check it here:
SHOW VARIABLES LIKE 'innodb_autoinc_lock_mode';
# Prepare the tables
CREATE TABLE t1 (
id INT PRIMARY KEY AUTO_INCREMENT,
val INT
) Engine=InnoDB;
CREATE TABLE t2 (
id INT PRIMARY KEY
) Engine=InnoDB;
INSERT INTO t1 (id, val) VALUES (1,1);
INSERT INTO t2 (id) VALUES (1),(2),(3);
# Save the original settings, to be restored at the end of test
SET @innodb_lock_wait_timeout_saved = @@global.innodb_lock_wait_timeout;
# Make sure that transactions will not finish prematurely
SET @@global.innodb_lock_wait_timeout = 100000;
--connect (C1, localhost, root,,)
--connect (C2, localhost, root,,)
--connect (C3, localhost, root,,)
--connect (C4, localhost, root,,)
--connection C3
BEGIN;
SELECT * FROM t2 WHERE id=2 FOR UPDATE;
--connection C4
BEGIN;
SET DEBUG_SYNC = 'lock_wait_will_wait SIGNAL C4_will_wait';
--send INSERT INTO t1 (val) SELECT id FROM t2
--connection C1
BEGIN;
SELECT * FROM t1 WHERE id=1 FOR SHARE;
SET DEBUG_SYNC = 'now WAIT_FOR C4_will_wait';
SET DEBUG_SYNC = 'lock_wait_will_wait SIGNAL C1_will_wait';
--send INSERT INTO t1 (val) VALUES (7)
--connection C2
BEGIN;
SET DEBUG_SYNC = 'now WAIT_FOR C1_will_wait';
SET DEBUG_SYNC = 'lock_wait_will_wait SIGNAL C2_will_wait';
--send SELECT * FROM t1 WHERE id=1 FOR UPDATE
# The bug if present, will manifest at this moment.
--connection default
SET DEBUG_SYNC = 'now WAIT_FOR C2_will_wait';
# Clean up all transactions
--connection C3
ROLLBACK;
--connection C4
--reap
ROLLBACK;
--connection C1
--reap
ROLLBACK;
--connection C2
--reap
ROLLBACK;
# Clean up connections
--connection default
--disconnect C1
--disconnect C2
--disconnect C3
--disconnect C4
# Clean up tables
DROP TABLE t2;
DROP TABLE t1;
# Restore saved state
SET @@global.innodb_lock_wait_timeout = @innodb_lock_wait_timeout_saved;
--echo ########################
--echo # #
--echo # End of Bug #27944920 #
--echo # #
--echo ########################
# Following scenario is intended to cover the rare case of trx being
# killed while waiting for a table lock, which excersises the table
# lock case in lock_cancel_waiting_and_release function.
#
# To generate a situation when trx is waiting for a table lock inside
# InnoDB we use following scenario:
# C1 locks t2.id = 2
# C3 locks t2.id = 3
# C2 obtains t1.AUTO_INC and waits for C1 t2.id=2 row lock
# C3 tries to insert to t1, and has to wait for C2's autoinc lock
# C1 rolls back, which unlocks t2.id=2, and C2 proceeds to lock t2.id=3,
# and now is blocked by C3, but C3 is already blocked by C2, so we have
# a deadlock cycle.
# We make C2 heavy to make sure that C3 is chosen as victim, by modyfing
# many rows in t3.
CREATE TABLE t1 (
id INT PRIMARY KEY AUTO_INCREMENT,
val INT
) Engine=InnoDB;
CREATE TABLE t2 (
id INT PRIMARY KEY
) Engine=InnoDB;
CREATE TABLE t3 (
id INT PRIMARY KEY,
val INT
) Engine=InnoDB;
INSERT INTO t1 (id, val) VALUES (1,1);
INSERT INTO t2 (id) VALUES (1),(2),(3);
INSERT INTO t3 (id, val) VALUES (1,1),(2,2),(3,3),(4,4),(5,5),(6,6);
# Save the original settings, to be restored at the end of test
SET @innodb_lock_wait_timeout_saved = @@global.innodb_lock_wait_timeout;
# Make sure that transactions will not finish prematurely
SET @@global.innodb_lock_wait_timeout = 100000;
--connect (C1, localhost, root,,)
--connect (C2, localhost, root,,)
--connect (C3, localhost, root,,)
--connection C1
BEGIN;
SELECT * FROM t2 WHERE id=2 FOR UPDATE;
--connection C3
BEGIN;
SELECT * FROM t2 WHERE id=3 FOR UPDATE;
--connection C2
BEGIN;
UPDATE t3 SET val = 13;
SET DEBUG_SYNC = 'lock_wait_will_wait SIGNAL C2_will_wait';
--send INSERT INTO t1 (val) SELECT id FROM t2
# C2 --waits-for[t2.id=2]--> C1
--connection C3
SET DEBUG_SYNC = 'now WAIT_FOR C2_will_wait';
SET DEBUG_SYNC = 'lock_wait_will_wait SIGNAL C3_will_wait';
--send INSERT INTO t1 (val) VALUES (13);
# C3 --waits-for[t1.autoinc]--> C2 --waits-for[t2.id=2]--> C1
--connection C1
SET DEBUG_SYNC = 'now WAIT_FOR C3_will_wait';
ROLLBACK;
# C3 --waits-for[t1.autoinc]--> C2 --waits-for[t2.id=3]--> C3
# this is a deadlock.
--connection C3
--error ER_LOCK_DEADLOCK
--reap
ROLLBACK;
--connection C2
--reap
ROLLBACK;
--connection default
--disconnect C1
--disconnect C2
--disconnect C3
DROP TABLES t1,t2,t3;
# Restore saved state
SET @@global.innodb_lock_wait_timeout = @innodb_lock_wait_timeout_saved;
|