File: rpl_error_on_multiple_row_update.test

package info (click to toggle)
mysql-8.0 8.0.43-3
  • links: PTS, VCS
  • area: main
  • in suites: sid
  • size: 1,273,924 kB
  • sloc: cpp: 4,684,605; ansic: 412,450; pascal: 108,398; java: 83,641; perl: 30,221; cs: 27,067; sql: 26,594; sh: 24,181; python: 21,816; yacc: 17,169; php: 11,522; xml: 7,388; javascript: 7,076; makefile: 2,194; lex: 1,075; awk: 670; asm: 520; objc: 183; ruby: 97; lisp: 86
file content (76 lines) | stat: -rw-r--r-- 2,505 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
# === Purpose ===
#
# When using the hash_scan algorithm to apply update row events,
# and the algorithm has updated a row, the new row needs to be
# looked up again in the hash. This ensures that successive updates
# of the same row within one event are applied correctly. The test
# verifies that this is the case when using binlog_row_value_options=PARTIAL_JSON.
#
# ==== Implementation ====
#
# 1. Create source-replica topology.
# 2. Set binlog_row_value_options value to PARTIAL_JSON on source.
# 3. Set slave_rows_search_algorithms to 'HASH_SCAN' on replica.
# 4. Create tables, insert data and procedures.
# 5. Sync replica with source.
# 6. Cleanup.

# === References===
#
# Bug#32221703 ERROR APPLYING EVENT UPDATING SAME ROW MULTIPLE TIMES WHEN USING PARTIAL JSON
#

--source include/have_binlog_format_row.inc
--source include/have_debug.inc
--let $skip_configuration_privilege_checks_user= 'skip'
--let $rpl_privilege_checks_user_grant_all= 1
# 1. Create source-replica topology.
--source include/master-slave.inc

# 2. Set binlog_row_value_options value to PARTIAL_JSON.
SET @old_binlog_row_value_options= @@session.BINLOG_ROW_VALUE_OPTIONS;
SET @@session.binlog_row_value_options = 'PARTIAL_JSON';

# 3. Set slave_rows_search_algorithms to 'HASH_SCAN' on replica.
--source include/rpl_connection_slave.inc
SET @saved_slave_rows_search_algorithms= @@global.slave_rows_search_algorithms;
SET GLOBAL slave_rows_search_algorithms= 'HASH_SCAN';

# 4. Create tables, procedures and insert data.
--source include/rpl_connection_master.inc
CREATE TABLE t1 (a INT UNIQUE KEY, b JSON);
INSERT INTO t1 VALUES (1, '{ "long string": "long string", "x": 2}');
CREATE TABLE t2 (a INT);

--delimiter |
CREATE FUNCTION f () RETURNS INT BEGIN
  UPDATE t1 SET a = 3, b = JSON_SET(b, '$.x', 3);
  UPDATE t1 SET a = 2;
  UPDATE t1 SET a = 4;
  RETURN 1;
END|
--delimiter ;

BEGIN;
INSERT INTO t2 VALUES (f());
COMMIT;

# 5. Sync replica with source server.
--source include/sync_slave_sql_with_master.inc

--let diff_tables=master:test.t1, slave:test.t1
--source include/diff_tables.inc

--let diff_tables=master:test.t2, slave:test.t2
--source include/diff_tables.inc

SHOW STATUS LIKE 'Slave_rows_last_search_algorithm_used';

# 6. Cleanup.
SET GLOBAL slave_rows_search_algorithms= @saved_slave_rows_search_algorithms;
--source include/rpl_connection_master.inc
SET @@SESSION.BINLOG_ROW_VALUE_OPTIONS= @old_binlog_row_value_options;
DROP TABLE t1;
DROP TABLE t2;
DROP FUNCTION f;
--source include/rpl_end.inc