File: galera_vote_rejoin_mysqldump.test

package info (click to toggle)
mariadb 1%3A11.8.2-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, trixie
  • size: 765,428 kB
  • sloc: ansic: 2,382,827; cpp: 1,803,532; asm: 378,315; perl: 63,176; sh: 46,496; pascal: 40,776; java: 39,363; yacc: 20,428; python: 19,506; sql: 17,864; xml: 12,463; ruby: 8,544; makefile: 6,059; cs: 5,855; ada: 1,700; lex: 1,193; javascript: 1,039; objc: 80; tcl: 73; awk: 46; php: 22
file content (92 lines) | stat: -rw-r--r-- 3,171 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
#
# Test that mysqldump SST is possible after a vote without a cluster restart
#

--source include/galera_cluster.inc
--source suite/galera/include/galera_sst_set_mysqldump.inc

--connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3

# Save original auto_increment_offset values.
--let $node_1=node_1
--let $node_2=node_2
--let $node_3=node_3
--source ../galera/include/auto_increment_offset_save.inc

--connection node_1
CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;

# Introduce inconsistency on node #2

--connection node_2
--let $wsrep_cluster_address_node2 = `SELECT @@wsrep_cluster_address`
SET SESSION wsrep_on=OFF;
ALTER TABLE t1 ADD PRIMARY KEY (f1);
SET SESSION wsrep_on=ON;

# Run DDL that will fail on nodes #1 and #3 but succeed on node #2

--connection node_1
--error ER_CANT_DROP_FIELD_OR_KEY
ALTER TABLE t1 LOCK=SHARED, DROP PRIMARY KEY;

# Nodes #1 and #3 remain in the cluster

--connection node_1
--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
--source include/wait_condition.inc

SELECT VARIABLE_VALUE AS expect_Primary FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status';

--connection node_3
SELECT VARIABLE_VALUE AS expect_2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
SELECT VARIABLE_VALUE AS expect_Primary FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status';

# Node #2 is kicked out

--connection node_2
SET SESSION wsrep_on=OFF;
SELECT VARIABLE_VALUE AS expect_Disconnected FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status';
SET SESSION wsrep_on=ON;

# Restore cluster

--disable_query_log
--eval SET GLOBAL wsrep_cluster_address = '$wsrep_cluster_address_node2'
--enable_query_log
--enable_reconnect

--let $wait_condition = SELECT VARIABLE_VALUE = 3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
--source include/wait_condition.inc

SELECT VARIABLE_VALUE AS expect_3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
SELECT VARIABLE_VALUE AS expect_Primary FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status';

# Confirm that the table is now identical throughout

--connection node_1
SHOW CREATE TABLE t1;

--connection node_2
SHOW CREATE TABLE t1;

--connection node_3
SHOW CREATE TABLE t1;
DROP TABLE t1;
CALL mtr.add_suppression("Slave SQL: Error 'Can't DROP 'PRIMARY'; check that column/key exists'");

--connection node_1
--source suite/galera/include/galera_sst_restore.inc

--connection node_2
# restart node so we don't fail on WSREP_START_POSITION internal check
--source include/restart_mysqld.inc
--source include/wait_until_connected_again.inc
CALL mtr.add_suppression("WSREP: .+ is inconsistent with group");

--connection node_1
--let $wait_condition = SELECT VARIABLE_VALUE = 3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
--source include/wait_condition.inc

# Restore original auto_increment_offset values.
--source ../galera/include/auto_increment_offset_restore.inc