File: gr_recovery_endpoints_retry_clone.test

package info (click to toggle)
mysql-8.0 8.0.43-3
  • links: PTS, VCS
  • area: main
  • in suites: sid
  • size: 1,273,924 kB
  • sloc: cpp: 4,684,605; ansic: 412,450; pascal: 108,398; java: 83,641; perl: 30,221; cs: 27,067; sql: 26,594; sh: 24,181; python: 21,816; yacc: 17,169; php: 11,522; xml: 7,388; javascript: 7,076; makefile: 2,194; lex: 1,075; awk: 670; asm: 520; objc: 183; ruby: 97; lisp: 86
file content (197 lines) | stat: -rw-r--r-- 6,766 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
###############################################################################
# This test verifies that if the recovery endpoints is DEFAULT recovery shall
# try clone from other donor if current fails.
#
# Test:
#   0. The test requires three servers
#   1. Install clone plugin on server1.
#   2. Bootstrap server1 and add some data
#   3. Restart server3 with a monitoring process (mysqld_safe) if needed
#   4. Setup the server so group replication starts on boot
#      Install the Clone plugin
#   5. Ensure clone is used on recovery
#   6. Activate debug point that will simulate failure on first clone
#      donor
#   7. On a empty server3 start group replication
#   8. Clone donor will fail due to debug point and succeed on second
#      endpoint
#   9. Wait for server to restart and come back
#  10. Clone will continue iteration on recovery endpoints
#      and will do recovery from second endpoint
#  11. Cleanup
#
###############################################################################

--source include/have_debug_sync.inc
--source include/have_mysqld_monitoring_process.inc
--source include/have_clone_plugin.inc
--source include/have_group_replication_plugin.inc
--let $rpl_skip_group_replication_start= 1
--let $rpl_server_count= 3
--source include/group_replication.inc

--echo
--echo # 1. Install clone plugin on server1.

--let $rpl_connection_name= server1
--source include/rpl_connection.inc

--replace_result $CLONE_PLUGIN CLONE_PLUGIN
--eval INSTALL PLUGIN clone SONAME '$CLONE_PLUGIN'

--echo
--echo # 2. Bootstrap server1 and add some data

--source include/start_and_bootstrap_group_replication.inc

CREATE TABLE t1 (c1 INT NOT NULL PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
INSERT INTO t1 VALUES (2);

--let $rpl_connection_name= server2
--source include/rpl_connection.inc

--replace_result $CLONE_PLUGIN CLONE_PLUGIN
--eval INSTALL PLUGIN clone SONAME '$CLONE_PLUGIN'

--source include/start_group_replication.inc

--echo
--echo # 3. Restart server3 with a monitoring process (mysqld_safe) if needed

--let $rpl_connection_name= server3
--source include/rpl_connection.inc

--let $_group_replication_local_address= `SELECT @@GLOBAL.group_replication_local_address`
--let $_group_replication_group_seeds= `SELECT @@GLOBAL.group_replication_group_seeds`
--let $_group_replication_start_on_boot= `SELECT @@GLOBAL.group_replication_start_on_boot`
--let $_group_replication_comm_stack= `SELECT @@GLOBAL.group_replication_communication_stack`

--let $plugin_list= $GROUP_REPLICATION
--source include/spawn_monitoring_process.inc

--echo
--echo # 4. Setup the server so group replication starts on boot
--echo #    Install the Clone plugin

--disable_query_log
--eval SET PERSIST group_replication_group_name= "$group_replication_group_name"
--eval SET PERSIST group_replication_local_address= "$_group_replication_local_address"
--eval SET PERSIST group_replication_group_seeds= "$_group_replication_group_seeds"
--eval SET PERSIST group_replication_communication_stack= "$_group_replication_comm_stack"

SET PERSIST group_replication_start_on_boot= ON;
--enable_query_log

--replace_result $CLONE_PLUGIN CLONE_PLUGIN
--eval INSTALL PLUGIN clone SONAME '$CLONE_PLUGIN'

--echo
--echo # 5. Ensure clone is used on recovery

--let $_group_replication_threshold_save= `SELECT @@GLOBAL.group_replication_clone_threshold`
SET GLOBAL group_replication_clone_threshold= 1;

--echo
--echo # 6. Activate debug point that will simulate failure on first clone
--echo #    donor

--let $rpl_connection_name= server3
--source include/rpl_connection.inc

SET @debug_save= @@GLOBAL.DEBUG;
SET @@GLOBAL.DEBUG='+d,gr_run_clone_query_fail_once';

--echo
--echo # 7. On a empty server3 start group replication

--let $rpl_connection_name= server3
--source include/rpl_connection.inc


--send START GROUP_REPLICATION

--let $rpl_connection_name= server_3
--source include/rpl_connection.inc

--echo
--echo # 8. Clone donor will fail due to debug point and succeed on second
--echo #    endpoint

SET DEBUG_SYNC = "now WAIT_FOR signal.run_clone_query_waiting";
SET DEBUG_SYNC = "now SIGNAL signal.run_clone_query_continue";

--echo # 9. Wait for server to restart and come back

--let $rpl_connection_name= server3
--source include/rpl_connection.inc

--reap

--source include/wait_until_disconnected.inc

--let $rpl_server_number= 3
--source include/rpl_reconnect.inc

--let $group_replication_member_state=ONLINE
--source include/gr_wait_for_member_state.inc

--echo
--echo # 10. Clone will continue iteration on recovery endpoints
--echo #     and will do recovery from second endpoint

--let $assert_text= Clone must be completed
--let $assert_cond= [SELECT state="Completed" FROM performance_schema.clone_status] = 1;
--source include/assert.inc

# See if the data has been properly cloned in server3
--let $diff_tables=server1:test.t1 ,server3:test.t1
--source include/diff_tables.inc

--echo
--echo # 11. Cleanup

SET @@GLOBAL.DEBUG= @debug_save;

--eval SET GLOBAL group_replication_clone_threshold= $_group_replication_threshold_save

RESET PERSIST IF EXISTS group_replication_group_name;
RESET PERSIST IF EXISTS group_replication_local_address;
RESET PERSIST IF EXISTS group_replication_group_seeds;
RESET PERSIST IF EXISTS group_replication_start_on_boot;
RESET PERSIST group_replication_communication_stack;

--replace_result $_group_replication_start_on_boot START_ON_BOOT_VALUE
--eval SET GLOBAL group_replication_start_on_boot= $_group_replication_start_on_boot

DROP TABLE t1;

--source include/rpl_sync.inc

set session sql_log_bin=0;
call mtr.add_suppression("Due to the number of missing transactions being higher than the configured threshold of*");
call mtr.add_suppression("Clone removing all user data for provisioning: Started");
call mtr.add_suppression("Clone removing all user data for provisioning: Finished");
call mtr.add_suppression("Server was not able to find a rotate event from source server to initialize relay log recovery for channel ''. Skipping relay log recovery for the channel.");
call mtr.add_suppression("Error during --relay-log-recovery: Could not locate rotate event from the source.");
call mtr.add_suppression("Applier metadata information for channel '' was found after a clone operation. Relay log recovery");
set session sql_log_bin=1;

--source include/clean_monitoring_process.inc

UNINSTALL PLUGIN clone;

# This should be done on include/rpl_change_topology_helper.inc
CHANGE REPLICATION SOURCE TO SOURCE_AUTO_POSITION= 0 FOR CHANNEL '';

--let $rpl_connection_name= server2
--source include/rpl_connection.inc

UNINSTALL PLUGIN clone;

--let $rpl_connection_name= server1
--source include/rpl_connection.inc

UNINSTALL PLUGIN clone;

--source include/group_replication_end.inc