File: gr_clone_with_consistency_after.test

package info (click to toggle)
mysql-8.0 8.0.43-3
  • links: PTS, VCS
  • area: main
  • in suites: sid
  • size: 1,273,924 kB
  • sloc: cpp: 4,684,605; ansic: 412,450; pascal: 108,398; java: 83,641; perl: 30,221; cs: 27,067; sql: 26,594; sh: 24,181; python: 21,816; yacc: 17,169; php: 11,522; xml: 7,388; javascript: 7,076; makefile: 2,194; lex: 1,075; awk: 670; asm: 520; objc: 183; ruby: 97; lisp: 86
file content (164 lines) | stat: -rw-r--r-- 6,194 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
###############################################################################
# This test verifies that clone can be used during member join when it is
# configured with group_replication_consistency= AFTER.
#
# 0. The test requires two servers: M1 and M2.
# 1. Install the clone plugin on server 1
# 2. Bootstrap server1 and add some data
# 3. Configure globally and persist group_replication_consistency globally to AFTER.
#    Restart server 2 with a monitoring process (mysqld_safe) if needed
# 4. Setup the server so group replication starts on boot
#    Install the Clone plugin
# 5. On a empty server2 start group replication
#    Wait for it to restart and come back
#    Check clone was completed
# 6. Cleanup
#

--source include/have_mysqld_monitoring_process.inc
--source include/have_clone_plugin.inc
--source include/have_group_replication_plugin.inc
--let $rpl_skip_group_replication_start= 1
--source include/group_replication.inc

# Validate plugins
--let plugins = CLONE_PLUGIN,GROUP_REPLICATION
--source include/check_plugin_dir.inc


--echo
--echo ############################################################
--echo # 1. Install clone plugin on server1.

--let $rpl_connection_name= server1
--source include/rpl_connection.inc
--let $server1_port= `SELECT @@GLOBAL.PORT`

--replace_result $CLONE_PLUGIN CLONE_PLUGIN
--eval INSTALL PLUGIN clone SONAME '$CLONE_PLUGIN'

--echo
--echo ############################################################
--echo # 2. Bootstrap server1 and add some data

--source include/start_and_bootstrap_group_replication.inc

CREATE TABLE t1 (c1 INT NOT NULL PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
INSERT INTO t1 VALUES (2);

--echo
--echo #######################################################################
--echo # 3. Configure globally and persist group_replication_consistency globally to AFTER.
--echo #    Restart server 2 with a monitoring process (mysqld_safe) if needed

--let $rpl_connection_name= server2
--source include/rpl_connection.inc

--let $_group_replication_local_address= `SELECT @@GLOBAL.group_replication_local_address`
--let $_group_replication_group_seeds= `SELECT @@GLOBAL.group_replication_group_seeds`
--let $_group_replication_start_on_boot= `SELECT @@GLOBAL.group_replication_start_on_boot`
--let $_group_replication_comm_stack= `SELECT @@GLOBAL.group_replication_communication_stack`

--let $_group_replication_consistency_save= `SELECT @@GLOBAL.group_replication_consistency`
SET PERSIST group_replication_consistency= AFTER;

--let $plugin_list= $GROUP_REPLICATION
--source include/spawn_monitoring_process.inc

--echo
--echo ############################################################
--echo # 4. Setup the server so group replication starts on boot
--echo #    Install the Clone plugin

--disable_query_log
--eval SET PERSIST group_replication_group_name= "$group_replication_group_name"
--eval SET PERSIST group_replication_local_address= "$_group_replication_local_address"
--eval SET PERSIST group_replication_group_seeds= "$_group_replication_group_seeds"
--eval SET PERSIST  group_replication_communication_stack= "$_group_replication_comm_stack"

SET PERSIST group_replication_start_on_boot= ON;
--enable_query_log

--replace_result $CLONE_PLUGIN CLONE_PLUGIN
--eval INSTALL PLUGIN clone SONAME '$CLONE_PLUGIN'

--echo
--echo ############################################################
--echo # 5. On a empty server2 start group replication
--echo #    Wait for it to restart and come back
--echo #    Check clone was completed

--let $_group_replication_clone_threshold_save= `SELECT @@GLOBAL.group_replication_clone_threshold`
SET GLOBAL group_replication_clone_threshold= 1;

--let $assert_text= group_replication_consistency is AFTER
--let $assert_cond= "[SELECT @@GLOBAL.group_replication_consistency]" = "AFTER"
--source include/assert.inc

START GROUP_REPLICATION;

--source include/wait_until_disconnected.inc

--let $rpl_server_number= 2
--source include/rpl_reconnect.inc

--let $group_replication_member_state=ONLINE
--source include/gr_wait_for_member_state.inc

--let $assert_text= group_replication_consistency is AFTER
--let $assert_cond= "[SELECT @@GLOBAL.group_replication_consistency]" = "AFTER"
--source include/assert.inc

--let $assert_text= Clone must be completed
--let $assert_cond= [SELECT state="Completed" FROM performance_schema.clone_status] = 1;
--source include/assert.inc

# See if the data has been properly cloned in server2
--let $diff_tables=server1:test.t1 ,server2:test.t1
--source include/diff_tables.inc

--echo
--echo ############################################################
--echo # 6. Cleanup

--let $rpl_connection_name= server1
--source include/rpl_connection.inc

UNINSTALL PLUGIN clone;

DROP TABLE test.t1;

--source include/rpl_sync.inc

--let $rpl_connection_name= server2
--source include/rpl_connection.inc

UNINSTALL PLUGIN clone;

RESET PERSIST group_replication_group_name;
RESET PERSIST group_replication_local_address;
RESET PERSIST group_replication_group_seeds;
RESET PERSIST group_replication_start_on_boot;
RESET PERSIST group_replication_consistency;
RESET PERSIST group_replication_communication_stack;


--replace_result $_group_replication_start_on_boot START_ON_BOOT_VALUE
--eval SET GLOBAL group_replication_start_on_boot= $_group_replication_start_on_boot

--replace_result $_group_replication_consistency_save GROUP_REPLICATION_CONSISTENCY
--eval SET GLOBAL group_replication_consistency = $_group_replication_consistency_save

--replace_result $_group_replication_clone_threshold_save GROUP_REPLICATION_CLONE_THRESHOLD
--eval SET GLOBAL group_replication_clone_threshold= $_group_replication_clone_threshold_save

set session sql_log_bin=0;
call mtr.add_suppression("This member will start distributed recovery using clone. It is due to the number of missing transactions being higher than the configured threshold of*");
call mtr.add_suppression("Clone removing all user data for provisioning: Started");
call mtr.add_suppression("Clone removing all user data for provisioning: Finished");
set session sql_log_bin=1;

--source include/clean_monitoring_process.inc

--source include/group_replication_end.inc