File: gr_start_stop_recovery.test

package info (click to toggle)
mysql-8.0 8.0.43-3
  • links: PTS, VCS
  • area: main
  • in suites: sid
  • size: 1,273,924 kB
  • sloc: cpp: 4,684,605; ansic: 412,450; pascal: 108,398; java: 83,641; perl: 30,221; cs: 27,067; sql: 26,594; sh: 24,181; python: 21,816; yacc: 17,169; php: 11,522; xml: 7,388; javascript: 7,076; makefile: 2,194; lex: 1,075; awk: 670; asm: 520; objc: 183; ruby: 97; lisp: 86
file content (110 lines) | stat: -rw-r--r-- 3,177 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
################################################################################
# This test intends to test a recovery race condition against member changes.
# When start and stops are done sequentially, there was a chance that recovery
# would see a one member group and think it was alone declaring the node online
# in the log
#
# Test:
# 0. The test requires two servers: M1 and M2.
# 1. Bootstrap start a group on M1.
# 2. Set DEBUG point on M2 to wait right before the process decides if the
#    server is alone. Start GR on M2. Check member_state is RECOVERING.
# 3. On another connection of M2 stop GR.
# 4. Signal recovery to continue on M2. Check member_state is OFFLINE.
# 5. To test everything is working fine, add data on M1 and start GR on M2.
#    Validate data on M2.
# 6. Clean up.
################################################################################

--source include/big_test.inc
--source include/have_debug.inc
--source include/have_debug_sync.inc
--let $group_replication_group_name= 572e4320-cd68-11e4-8830-0800200c9a66
--source include/have_group_replication_plugin.inc
--let $rpl_skip_group_replication_start= 1

--source include/group_replication.inc

--echo #
--echo # Start group replication on the first server
--echo #

--connection server1
--echo server1
--source include/start_and_bootstrap_group_replication.inc

--echo #
--echo # Start group replication on server 2
--echo # Wait right before the process decides if the server is alone.
--echo #

--connection server2
 --echo server2
SET SESSION sql_log_bin= 0;
call mtr.add_suppression("Error while sending message in the group replication incremental recovery process.");
call mtr.add_suppression("Error while sending stats message");
SET SESSION sql_log_bin= 1;

 SET @debug_save= @@GLOBAL.DEBUG;

--echo # Set the debug flag to block recovery
SET @@GLOBAL.DEBUG='d,recovery_thread_start_wait_num_of_members';

--let $group_replication_start_member_state= RECOVERING
--source include/start_group_replication.inc

#sleep to give time for the start to get stuck on recovery
--sleep 5

--echo #
--echo # On another connection stop group replication on server 2
--echo #

--connection slave
--send
STOP GROUP_REPLICATION;

--echo #
--echo # Continue with recovery.
--echo #

--connection server2

#sleep to give time for the stop to make the number of members go to 1
--sleep 5

SET DEBUG_SYNC= "now SIGNAL signal.recovery_continue";
SET @@GLOBAL.DEBUG= @debug_save;

--connection slave
--reap

--let $group_replication_member_state= OFFLINE
--source include/gr_wait_for_member_state.inc
--source include/assert_and_disable_read_only.inc

--echo #
--echo # Test if the servers are working properly
--echo #

--connection server1
--echo server1

CREATE TABLE t1 (c1 INT NOT NULL PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);

--connection server2
--echo server2
--source include/start_group_replication.inc

--let $assert_text= On the recovered member, the table should exist and have 1 elements;
--let $assert_cond= [select count(*) from t1] = 1;
--source include/assert.inc

--echo #
--echo # Clean up
--echo #

DROP TABLE t1;

--source include/group_replication_end.inc