1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
|
#
# Test cases for WL#5597
#
# In this file, we only test for the following cases:
#
# CASE #1: update/delete multiple records from a table that share the
# same hashtable key (in slave HASH_SCAN algorithm).
#
# CASE #2: same as CASE #1, but the reason is that the master has more
# columns than the slave, thence duplicate keys in slave's
# hashtable are a side effect, but should not be a problem.
#
# CASE #3: the slave stops gracefully when it is updating a row that
# does not exist on its table.
#
# CASE #4: update/delete multiple records with blobs. Given that blobs
# are not included in hashing, some records keys will collide.
#
# CASE #5: update/delete tables with only blob columns.
#
-- source include/not_group_replication_plugin.inc
-- source include/have_binlog_format_row.inc
-- source include/master-slave.inc
# sql_replica_skip_counter is not supported with compression
--source include/not_binlog_transaction_compression_on.inc
SET sql_mode = 'NO_ENGINE_SUBSTITUTION';
call mtr.add_suppression("Replica SQL for channel '': .*Could not execute Update_rows event on table test.t1; Can't find record in 't1', Error_code: 1032; handler error HA_ERR_END_OF_FILE; the event's source log master-bin.[0-9]*, end_log_pos [0-9]*, Error_code: MY-001032");
call mtr.add_suppression("Replica: Can't find record in 't1' Error_code: MY-001032");
call mtr.add_suppression("Replica SQL for channel '': .*Could not execute Delete_rows event on table test.t1; Can't find record in 't1', Error_code: 1032; handler error HA_ERR_END_OF_FILE; the event's source log master-bin.[0-9]*, end_log_pos [0-9]*, Error_code: MY-001032");
call mtr.add_suppression("Replica SQL for channel '': ... The replica coordinator and worker threads are stopped, possibly leaving data in inconsistent state. A restart should restore consistency automatically, although using non-transactional storage for data or info tables or DDL queries could lead to problems. In such cases you have to examine your data (see documentation for details). Error_code: 1756");
call mtr.add_suppression("Replica SQL for channel '': .*Could not execute Delete_rows event on table test.t1; Can't find record in 't1', Error_code: 1032; handler error HA_ERR_END_OF_FILE; the event's source log FIRST, end_log_pos [0-9]*, Error_code: MY-001032");
SET sql_mode = default;
--connection slave
SET @saved_slave_rows_search_algorithms= @@global.slave_rows_search_algorithms;
SET GLOBAL slave_rows_search_algorithms= 'INDEX_SCAN,HASH_SCAN';
#
# CASE #1: entries that generate the same key for the slave internal
# hash table.
#
# ASSERTS that no updates are lost due to having multiple entries for
# the same hashtable key in the slave HASH_SCAN.
#
-- connection master
CREATE TABLE t1 (a INT);
INSERT INTO t1 VALUES (1), (1), (2), (3);
--source include/sync_slave_sql_with_master.inc
DELETE FROM t1;
# try to change the order of the rows in the engine.
INSERT INTO t1 VALUES (2), (1), (3), (1);
-- connection master
UPDATE t1 SET a=1000 WHERE a=1;
--source include/sync_slave_sql_with_master.inc
--let $diff_tables= master:test.t1, slave:test.t1
-- source include/diff_tables.inc
-- connection master
DELETE FROM t1 WHERE a=1000;
DELETE FROM t1 WHERE a=2 OR a=3;
--source include/sync_slave_sql_with_master.inc
--let $diff_tables= master:test.t1, slave:test.t1
-- source include/diff_tables.inc
#cleanup for case#1
--connection master
DROP TABLE t1;
--source include/sync_slave_sql_with_master.inc
-- source include/rpl_reset.inc
# CASE #2: entries generating the same key for the slave internal
# hashtable because master table has more columns than the
# slave's.
#
# ASSERTS that no updates are lost due to having multiple entries for
# the same hashtable key in the slave HASH_SCAN when master
# has more tables than the slave.
-- connection master
SET SQL_LOG_BIN=0;
CREATE TABLE t1 (a INT, b INT);
SET SQL_LOG_BIN=1;
-- connection slave
CREATE TABLE t1 (a INT);
-- connection master
INSERT INTO t1 VALUES (1,1), (1,2), (2,1), (2,2);
UPDATE t1 SET a=1000 WHERE a=1;
SELECT * FROM t1;
--source include/sync_slave_sql_with_master.inc
SELECT * FROM t1;
-- connection master
DELETE FROM t1 WHERE a=1000;
DELETE FROM t1 WHERE a=2;
SELECT * FROM t1;
--source include/sync_slave_sql_with_master.inc
SELECT * FROM t1;
#cleanup for case#2
--connection master
DROP TABLE t1;
--source include/sync_slave_sql_with_master.inc
-- source include/rpl_reset.inc
#
# CASE #3: The master updates and deletes some row that the slave does
# not have.
#
# ASSERTS that the slave shall fail gracefully when the row is not found.
#
-- connection master
CREATE TABLE t1 (a INT);
INSERT INTO t1 VALUES (1), (1), (2), (3);
--source include/sync_slave_sql_with_master.inc
DELETE FROM t1 WHERE a=1;
DELETE FROM t1 WHERE a=2;
-- connection master
UPDATE t1 SET a=1000 WHERE a=1;
-- let $slave_sql_errno= 1032
-- source include/wait_for_slave_sql_error_and_skip.inc
-- connection master
DELETE FROM t1 WHERE a=2;
-- let $slave_sql_errno= 1032
-- source include/wait_for_slave_sql_error_and_skip.inc
DROP TABLE t1;
--source include/sync_slave_sql_with_master.inc
-- source include/rpl_reset.inc
#
# CASE #4: covers the case of tables that have blobs in them.
#
# ASSERTS that there are no lost updates
-- connection master
CREATE TABLE t1 (a INT, b TINYBLOB);
INSERT INTO t1 VALUES (1,'a'), (1, 'b'), (2,'aa'), (2, 'aa');
UPDATE t1 SET b='c' WHERE a=1;
--source include/sync_slave_sql_with_master.inc
--let $diff_tables= master:test.t1, slave:test.t1
-- source include/diff_tables.inc
-- connection master
UPDATE t1 SET a=10000 WHERE b='aa';
--source include/sync_slave_sql_with_master.inc
--let $diff_tables= master:test.t1, slave:test.t1
-- source include/diff_tables.inc
-- connection master
UPDATE t1 SET b='c' WHERE b='aa';
--source include/sync_slave_sql_with_master.inc
--let $diff_tables= master:test.t1, slave:test.t1
-- source include/diff_tables.inc
-- connection master
DELETE FROM t1 WHERE b='c';
--source include/sync_slave_sql_with_master.inc
--let $diff_tables= master:test.t1, slave:test.t1
-- source include/diff_tables.inc
#cleanup for case#4
--connection master
DROP TABLE t1;
--source include/sync_slave_sql_with_master.inc
-- source include/rpl_reset.inc
#
# CASE #5: covers the case in which the table has only blobs in it.
#
# ASSERTS that there are no issues even if blobs are skipped from the
# hashing. Tables on master and slave will not go out-of-sync.
#
-- connection master
CREATE TABLE t1 (a TINYBLOB, b TINYBLOB);
INSERT INTO t1 VALUES ('a','a'), ('b', 'b'), ('a','aa'), ('a', 'aa');
UPDATE t1 SET b='c' WHERE b='aa';
--source include/sync_slave_sql_with_master.inc
--let $diff_tables= master:test.t1, slave:test.t1
-- source include/diff_tables.inc
-- connection master
DELETE FROM t1;
--source include/sync_slave_sql_with_master.inc
--let $diff_tables= master:test.t1, slave:test.t1
-- source include/diff_tables.inc
-- connection master
INSERT INTO t1 VALUES (NULL,NULL), (NULL, NULL);
DELETE FROM t1;
--source include/sync_slave_sql_with_master.inc
--let $diff_tables= master:test.t1, slave:test.t1
-- source include/diff_tables.inc
#cleanup for case#5
--connection master
DROP TABLE t1;
--source include/sync_slave_sql_with_master.inc
SET @@global.slave_rows_search_algorithms= @saved_slave_rows_search_algorithms;
--source include/rpl_end.inc
|