File: gr_reset_slave_channel.test

package info (click to toggle)
mysql-8.0 8.0.43-3
  • links: PTS, VCS
  • area: main
  • in suites: sid
  • size: 1,273,924 kB
  • sloc: cpp: 4,684,605; ansic: 412,450; pascal: 108,398; java: 83,641; perl: 30,221; cs: 27,067; sql: 26,594; sh: 24,181; python: 21,816; yacc: 17,169; php: 11,522; xml: 7,388; javascript: 7,076; makefile: 2,194; lex: 1,075; awk: 670; asm: 520; objc: 183; ruby: 97; lisp: 86
file content (322 lines) | stat: -rw-r--r-- 11,827 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
################################################################################
# This test verifies that group replication channels are not affect by global
# RESET SLAVE commands when the group replication is running.
# If a command is used directly in a group channel it should work.
# If a command is used when group replication is stopped it should work.
#
# Test:
# 0. The test requires two servers: M1, M2 and M3.
#
# 1. Phase 1: Setup a 2 server group.
#  a) Check that the applier files are not there before start GR.
#  b) Bootstrap start group on M1. Add some data for recovery.
#  c) Start GR on M2.
#  d) Create a slave channel on M1 with M3 i.e. M3 -> M1.
#
# 2. Phase 2: Check that the group replication applier files are present.
#  a) On M1, group applier files should now be present with the channel.
#  b) On M1, slave files should be present.
#  c) On M2, group applier files should now be present with the channel.
#  b) On M2, recovery files should be present.
#
# 3. Phase 3: Check that the RESET SLAVE command doesn't affect GR specific
#    channels when the GR member is ONLINE.
#  a) Execute RESET SLAVE ALL command on M1.
#  b) Validate that group applier files are still present on M1.
#  c) Check that slave files are removed on M1.
#
# 4. Phase 4: After a direct reset command all files should disappear.
#  a) RESET SLAVE ALL command should fail on an ONLINE member M1.
#  b) Stop GR on M1.
#  c) Execute RESET SLAVE ALL command on M1.
#  d) Validate that group applier files are removed.
#
# 5. Phase 5: Restart server 1 plugin, all should be fine.
#  a) Add some data for recovery on M2.
#  b) Start GR on M1.
#  c) Add some data on M1 to check if all is fine.
#  d) Validate that both members have all the data.
#
# 6. Phase 6: When GR is stopped, after global reset command all files
#    should disappear.
#  a) Stop GR on M2.
#  b) Execute RESET SLAVE command on M2
#  c) Validate that slave_master_info and slave_relay_log_info tables are
#     cleared.
#  d) Start GR on M2. The node should be ONLINE.
#
# 7. Phase 7: When GR is stopped, after global RESET SLAVE ALL command all
#    files should disappear including recovery channel specific credentials.
#  a) Stop GR on M2.
#  b) Execute RESET SLAVE ALL command on M2
#  c) Validate that slave_master_info and slave_relay_log_info tables are
#  cleared.
#  d) Execute CHANGE MASTER command to provide recovery channel credentials.
#  f) Start GR on M2. The node should be ONLINE.
#
# 6. Phase 6: Cleanup.
################################################################################

--source include/not_have_privilege_checks_user.inc
--source include/have_group_replication_plugin.inc
--let $rpl_skip_group_replication_start= 1
--let $rpl_server_count= 3
--source include/group_replication.inc

#
# Phase 1: Setup a 2 server group
# Check that the applier files are not there before start
# Create also a slave channel on server
#

--connection server1
--echo server1

--let $datadir_1= `SELECT @@GLOBAL.datadir`

--let $relay_log_file=`SELECT CONCAT('$datadir_1','mgr-group_replication_applier.000001')`
--error 1
--file_exists $relay_log_file

--let $relay_log_index= `SELECT CONCAT('$datadir_1', 'mgr-group_replication_applier.index')`
--error 1
--file_exists $relay_log_index

--source include/start_and_bootstrap_group_replication.inc

# Add some data for recovery
CREATE TABLE t1 (c1 INT NOT NULL PRIMARY KEY) ENGINE=InnoDB;
BEGIN;
INSERT INTO t1 VALUES (1);
INSERT INTO t1 VALUES (2);
COMMIT;
INSERT INTO t1 VALUES (3);

--connection server2
--echo server2
--source include/start_group_replication.inc

--connection server1
--let $group_replication_number_of_members= 2
--source include/gr_wait_for_number_of_members.inc

# Create a slave channel with server 3

--disable_warnings
--replace_result $SERVER_MYPORT_3 SERVER_3_PORT
--eval CHANGE REPLICATION SOURCE TO SOURCE_HOST="127.0.0.1", SOURCE_USER="root", SOURCE_PASSWORD="", SOURCE_PORT=$SERVER_MYPORT_3, SOURCE_AUTO_POSITION=1 FOR CHANNEL "channel_1"
--enable_warnings

#
# Phase 2: Check that the group replication applier files are present.
# Recovery files should be present on the recovered server as they were only
# reset.
#

--connection server1

# Group applier files should now be there with the channel

--file_exists $relay_log_file
--file_exists $relay_log_index

--let $assert_text= 'The group replication applier channel is present'
--let $assert_cond= [SELECT COUNT(*) AS count FROM mysql.slave_relay_log_info WHERE channel_name="group_replication_applier", count, 1] = 1
--source include/assert.inc

# Slave files should be there

--let $slave_relay_log_file=`SELECT CONCAT('$datadir_1','mgr-channel_1.000001')`
--file_exists $slave_relay_log_file

--let $slave_relay_log_index= `SELECT CONCAT('$datadir_1', 'mgr-channel_1.index')`
--file_exists $slave_relay_log_index

--let $assert_text= 'The slave channel is present'
--let $assert_cond= [SELECT COUNT(*) AS count FROM mysql.slave_relay_log_info WHERE channel_name="channel_1", count, 1] = 1
--source include/assert.inc

--connection server2

--let $datadir_2= `SELECT @@GLOBAL.datadir`

--let $relay_log_file=`SELECT CONCAT('$datadir_2','mgr-group_replication_applier.000001')`
--file_exists $relay_log_file

--let $relay_log_index= `SELECT CONCAT('$datadir_2', 'mgr-group_replication_applier.index')`
--file_exists $relay_log_index

# Recovery files are also there

--let $recovery_relay_log_file= `SELECT CONCAT('$datadir_2', 'mgr-group_replication_recovery.000001')`
--file_exists $recovery_relay_log_file

--let $recovery_relay_log_index= `SELECT CONCAT('$datadir_2', 'mgr-group_replication_recovery.index')`
--file_exists $recovery_relay_log_index

#
# Phase 3: Check that the RESET SLAVE command doesn't affect group replication
# specific channels when the GR member is ONLINE. Only the slave channel should
# not be there
#

--connection server1

# Global RESET SLAVE ALL command will not work on group replication channels
# when the group memeber is ONLINE.
--error ER_REPLICA_CHANNEL_OPERATION_NOT_ALLOWED
RESET SLAVE ALL;

--let $relay_log_file=`SELECT CONCAT('$datadir_1','mgr-group_replication_applier.000001')`
--file_exists $relay_log_file

--let $relay_log_index= `SELECT CONCAT('$datadir_1', 'mgr-group_replication_applier.index')`
--file_exists $relay_log_index

--let $assert_text= 'The group replication applier channel is still present'
--let $assert_cond= [SELECT COUNT(*) AS count FROM mysql.slave_relay_log_info WHERE channel_name="group_replication_applier", count, 1] = 1
--source include/assert.inc

--let $assert_text= 'The group replication applier channel is ON'
--let $assert_cond= [SELECT COUNT(*) AS count FROM performance_schema.replication_connection_status where CHANNEL_NAME="group_replication_applier" and SERVICE_STATE="ON", count, 1] = 1
--source include/assert.inc

# The slave channel was removed

--error 1
--file_exists $slave_relay_log_file

--error 1
--file_exists $slave_relay_log_index

--let $assert_text= 'The slave channel is not present'
--let $assert_cond= [SELECT COUNT(*) AS count FROM mysql.slave_relay_log_info WHERE channel_name="channel_1", count, 1] = 0
--source include/assert.inc

#
# Phase 4: After a direct reset command all files should disappear
# The command should not be allowed on group replication is running
#

--error ER_REPLICA_CHANNEL_OPERATION_NOT_ALLOWED
RESET SLAVE ALL FOR CHANNEL "group_replication_applier";

--source include/stop_group_replication.inc

RESET SLAVE ALL FOR CHANNEL "group_replication_applier";

--let $relay_log_file_mgr=`SELECT CONCAT('$datadir_1','mgr-group_replication_applier.000001')`
--error 1
--file_exists $relay_log_file_mgr

--let $relay_log_index_mgr= `SELECT CONCAT('$datadir_1', 'mgr-group_replication_applier.index')`
--error 1
--file_exists $relay_log_index_mgr

--let $assert_text= 'The group replication applier channel is not present'
--let $assert_cond= [SELECT COUNT(*) AS count FROM mysql.slave_relay_log_info WHERE channel_name="group_replication_applier", count, 1] = 0
--source include/assert.inc

#
# Phase 5: Restart server 1 plugin, all should be fine
#

--connection server2
--echo server2

INSERT INTO t1 VALUES (4);

--connection server1
--echo server1
--source include/start_group_replication.inc

INSERT INTO t1 VALUES (5);

--let $sync_slave_connection= server2
--source include/rpl_sync.inc

--let $assert_text= The table should contain 5 elements
--let $assert_cond= [select count(*) from t1] = 5;
--source include/assert.inc

#
# Phase 6: After a global reset command all files should disappear
#
--connection server2
--echo server2

--echo Vefiry that group replication channels are present
--let $assert_text= 'The group replication applier channel is present'
--let $assert_cond= [SELECT COUNT(*) AS count FROM mysql.slave_relay_log_info WHERE channel_name="group_replication_applier", count, 1] = 1
--source include/assert.inc

--let $assert_text= 'The group replication recovery channel is present'
--let $assert_cond= [SELECT COUNT(*) AS count FROM mysql.slave_relay_log_info WHERE channel_name="group_replication_recovery", count, 1] = 1
--source include/assert.inc
--let $datadir_2= `SELECT @@GLOBAL.datadir`

--source include/stop_group_replication.inc

--echo RESET SLAVE command clears master and slave info repositories and will flush master info
RESET SLAVE;

--let $assert_text= 'mysql.slave_relay_log_info contains flushed group replication channel information'
--let $assert_cond= [SELECT COUNT(*) AS count FROM mysql.slave_relay_log_info WHERE channel_name like "group_replication%", count, 1] = 2
--source include/assert.inc

--let $assert_text= 'mysql.slave_master_info contains flushed group replication channel information'
--let $assert_cond= [SELECT COUNT(*) AS count FROM mysql.slave_master_info WHERE channel_name like "group_replication%", count, 1] = 2
--source include/assert.inc

# Applier files are recreated
--let $relay_log_index= `SELECT CONCAT('$datadir_2', 'mgr-group_replication_applier.index')`
--file_exists $relay_log_index

# Recovery files are recreated
--let $recovery_relay_log_index= `SELECT CONCAT('$datadir_2', 'mgr-group_replication_recovery.index')`
--file_exists $recovery_relay_log_index

--source include/start_group_replication.inc

--let $assert_text= 'The group replication applier and recovery channel are present'
--let $assert_cond= [SELECT COUNT(*) AS count FROM mysql.slave_relay_log_info WHERE channel_name like "group_replication%", count, 1] = 2
--source include/assert.inc

#
# Phase 7: After a global reset slave all command, all files should disappear
# Executed the CHANGE MASTER command to create recovery channel and enable
# group replication.
#

--source include/stop_group_replication.inc
RESET SLAVE ALL;

--let $assert_text= 'mysql.slave_relay_log_info does not contrain group replication channel information'
--let $assert_cond= [SELECT COUNT(*) AS count FROM mysql.slave_relay_log_info WHERE channel_name like "group_replication%", count, 1] = 0
--source include/assert.inc

--let $assert_text= 'mysql.slave_master_info does not contrain group replication channel information'
--let $assert_cond= [SELECT COUNT(*) AS count FROM mysql.slave_master_info WHERE channel_name like "group_replication%", count, 1] = 0
--source include/assert.inc

--let $relay_log_index= `SELECT CONCAT('$datadir_2', 'mgr-group_replication_applier.index')`
--error 1
--file_exists $relay_log_index

# Recovery files are also removed

--let $recovery_relay_log_index= `SELECT CONCAT('$datadir_2', 'mgr-group_replication_recovery.index')`
--error 1
--file_exists $recovery_relay_log_index

CHANGE REPLICATION SOURCE TO SOURCE_USER="root" FOR CHANNEL "group_replication_recovery";

--source include/start_group_replication.inc

#
# Phase 8: Cleanup
#

DROP TABLE t1;

--source include/group_replication_end.inc