File: gr_recovery_conflicting_data_failover.test

package info (click to toggle)
mysql-8.0 8.0.43-3
  • links: PTS, VCS
  • area: main
  • in suites: sid
  • size: 1,273,924 kB
  • sloc: cpp: 4,684,605; ansic: 412,450; pascal: 108,398; java: 83,641; perl: 30,221; cs: 27,067; sql: 26,594; sh: 24,181; python: 21,816; yacc: 17,169; php: 11,522; xml: 7,388; javascript: 7,076; makefile: 2,194; lex: 1,075; awk: 670; asm: 520; objc: 183; ruby: 97; lisp: 86
file content (191 lines) | stat: -rw-r--r-- 7,014 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
################################################################################
# This test evaluates that whenever the donor has conflicting data that causes
# errors in the joiner, a failover to another donor will happen.
#
# The test steps are:
#   0) The test requires three servers: M1, M2 and M3.
#   1) Bootstrap start M1 with no replication user for recovery. Insert some
#      data.
#   2) Insert some extra data on M2 before starting. Create a replication user
#      recovery.
#   3) Add conflicting data on M3 and start it with the configured replication
#      user and watch it fail as:
#      1. M1 does not has the correct replication user.
#      2. M2 has conflicting data.
#   4) Create the recovery user on M1. The joiner M3 should now connect to M1
#      when failing over.
#   5) The joiner M3 should become ONLINE. Validate data on M3.
#   6) Clean up.
################################################################################

--source include/big_test.inc
--let $group_replication_group_name=65b51c20-262d-11e5-867f-0800200c9a66
--source include/have_group_replication_plugin.inc
--let $rpl_skip_group_replication_start= 1
--let $rpl_server_count= 3
--source include/group_replication.inc

--let $recovery_user= recovery_user
--let $recovery_password= recovery_password
--let $fake_transaction_uuid= aaaaaaaa-bbbb-aaaa-bbbb-aaaaaaaaaaaa

--echo #
--echo # Start a new member with some group data but no replication user
--echo #

--connection server1
--source include/start_and_bootstrap_group_replication.inc

#insert some data
CREATE TABLE t1 (c1 INT NOT NULL PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);

# Log fake transactions to allow server2 to join the group with
# extra data.
--eval SET GTID_NEXT= "$fake_transaction_uuid:1"
BEGIN;
COMMIT;
SET GTID_NEXT= "AUTOMATIC";
--eval SET GTID_NEXT= "$fake_transaction_uuid:2"
BEGIN;
COMMIT;
SET GTID_NEXT= "AUTOMATIC";

--echo #
--echo # Add some extra data on server 2 and then start group replication
--echo # Create a replication user so recovery can connect
--echo #

--connection server2

--eval SET GTID_NEXT= "$fake_transaction_uuid:1"
CREATE TABLE lost_test_table (c1 INT NOT NULL PRIMARY KEY) ENGINE=InnoDB;
SET GTID_NEXT= "AUTOMATIC";
--eval SET GTID_NEXT= "$fake_transaction_uuid:2"
INSERT INTO lost_test_table VALUES (1);
SET GTID_NEXT= "AUTOMATIC";

SET SESSION sql_log_bin= 0;
--eval CREATE USER "$recovery_user" IDENTIFIED BY "$recovery_password"
--eval GRANT REPLICATION SLAVE ON *.* TO "$recovery_user"
--eval GRANT GROUP_REPLICATION_STREAM ON *.* TO "$recovery_user"@'%';
FLUSH PRIVILEGES;
SET SESSION sql_log_bin= 1;

--source include/start_group_replication.inc

--echo #
--echo # Start recovery on member 3 and watch it fail for a bit as:
--echo # 1) Server 1 does not has the correct replication user
--echo # 2) Server 2 has conflicting data
--echo #

--connection server3

SET SESSION sql_log_bin= 0;
--eval CREATE USER "$recovery_user" IDENTIFIED BY "$recovery_password"
--eval GRANT GROUP_REPLICATION_STREAM ON *.* TO "$recovery_user"@'%';
FLUSH PRIVILEGES;
SET SESSION sql_log_bin= 1;

SET SESSION sql_log_bin= 0;
call mtr.add_suppression("There was an error when connecting to the donor*");
call mtr.add_suppression("For details please check performance_schema.replication_connection_status table and error log messages of Replica I/O for channel group_replication_recovery.");
call mtr.add_suppression("Replica SQL for channel 'group_replication_recovery': Error 'Table 'lost_test_table'*");
call mtr.add_suppression("Replica: Table 'lost_test_table' already exists Error_code:*");
call mtr.add_suppression("Replica SQL for channel 'group_replication_recovery': Worker .* failed executing transaction .*; Error 'Table 'lost_test_table' already exists' on query.");
call mtr.add_suppression("Replica SQL for channel 'group_replication_recovery': ... The replica coordinator and worker threads are stopped.*");
call mtr.add_suppression("Error while starting the group replication incremental recovery receiver/applier threads");
call mtr.add_suppression("Replica I/O for channel 'group_replication_recovery': Source command COM_REGISTER_REPLICA failed: Access denied for user 'recovery_user'.*");
call mtr.add_suppression("Replica I/O for channel 'group_replication_recovery': Source command COM_REGISTER_REPLICA failed: failed registering on source, reconnecting to try again.*");
call mtr.add_suppression("Replica I/O thread couldn't register on source");
call mtr.add_suppression(".*This member has more executed transactions *.*");
call mtr.add_suppression("The member contains transactions not present in the group. It is only allowed to *.*");
SET SESSION sql_log_bin= 1;

SET @debug_save_rec_int= @@GLOBAL.group_replication_recovery_reconnect_interval;

--disable_warnings
--eval CHANGE REPLICATION SOURCE TO SOURCE_USER='$recovery_user', SOURCE_PASSWORD='$recovery_password' FOR CHANNEL 'group_replication_recovery'
--enable_warnings

SET GLOBAL group_replication_recovery_reconnect_interval= 1; # 1 second

SET SESSION sql_log_bin= 0;
CREATE TABLE lost_test_table (c1 INT NOT NULL PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO lost_test_table VALUES (1);
SET SESSION sql_log_bin= 1;

--eval SET GLOBAL group_replication_group_name= "$group_replication_group_name"
--source include/start_group_replication_command.inc

#give it time to fail several times
--sleep 5

--let $group_replication_member_state= RECOVERING
--source include/gr_wait_for_member_state.inc

--echo #
--echo # Create the recovery user on server 1
--echo # The joiner should now connect to server 1 when failing over.
--echo #

--connection server1
SET SESSION sql_log_bin= 0;
--eval CREATE USER "$recovery_user" IDENTIFIED BY "$recovery_password"
--eval GRANT REPLICATION SLAVE ON *.* TO "$recovery_user"
--eval GRANT GROUP_REPLICATION_STREAM ON *.* TO "$recovery_user"@'%';
FLUSH PRIVILEGES;
SET SESSION sql_log_bin= 1;

--connection server3

--let $group_replication_member_state= ONLINE
--source include/gr_wait_for_member_state.inc

--let $assert_text= On the recovered member, the table should exist and have 1 elements
--let $assert_cond= [select count(*) from t1] = 1;
--source include/assert.inc

--echo #
--echo # Cleaning up
--echo # We use RESET MASTER here due to deviations on all members GTID sets
--echo #

SET @@GLOBAL.group_replication_recovery_reconnect_interval= @debug_save_rec_int;

DROP TABLE t1;

--source include/stop_group_replication.inc

DROP TABLE lost_test_table;

RESET MASTER;

SET SESSION sql_log_bin= 0;
--eval DROP USER "$recovery_user"
SET SESSION sql_log_bin= 1;

--connection server2

--source include/stop_group_replication.inc

SET SESSION sql_log_bin= 0;
--eval DROP USER "$recovery_user"
SET SESSION sql_log_bin= 1;

DROP TABLE lost_test_table;

RESET MASTER;

--connection server1

--source include/stop_group_replication.inc

SET SESSION sql_log_bin= 0;
--eval DROP USER "$recovery_user"
SET SESSION sql_log_bin= 1;

RESET MASTER;

--source include/group_replication_end.inc