File: rpl_parallel_conflicts.test

package info (click to toggle)
mysql-8.0 8.0.43-3
  • links: PTS, VCS
  • area: main
  • in suites: sid
  • size: 1,273,924 kB
  • sloc: cpp: 4,684,605; ansic: 412,450; pascal: 108,398; java: 83,641; perl: 30,221; cs: 27,067; sql: 26,594; sh: 24,181; python: 21,816; yacc: 17,169; php: 11,522; xml: 7,388; javascript: 7,076; makefile: 2,194; lex: 1,075; awk: 670; asm: 520; objc: 183; ruby: 97; lisp: 86
file content (239 lines) | stat: -rw-r--r-- 6,185 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
#
# WL#5569 MTS
#
# The test checks cases of hashing conflicts forcing a special hanling.
# The cases include
#
# I. two Worker jobs conflict to each other
#
#   a. two multi-statement transactions containing more than one partition
#      in which one is common are mapped to different Workers.
#   b. similarly two autocommit queries or ddl:s
#
# Handling of the cases is carried out as the following:
# when Coordinator hits to an occupied by not the currenly assigned Worker
# partition it marks the partition and goes to wait till the Worker-owner
# has released it and signaled.
#
# II. An event requires the sequential execution
#
# Coordinator does not schedule the event and is waiting till all workers have
# released their partitions and signalled.

--source include/not_group_replication_plugin.inc
--source include/have_replica_parallel_type_database.inc
--source include/master-slave.inc

#
#  Testing with the statement format requires
#  @@global.slave_run_query_in_parallel = 1.
#  Notice, parallelization for Query-log-event is limitted
#  to the default dababase. That's why 'use db'.
#  With the default @@global.slave_run_query_in_parallel == 0
#  the tests in stmt format still run to prove switching to the sequential.

#  TODO: convert this file into two tests for either value of
#        @@global.slave_run_query_in_parallel
--source include/have_binlog_format_row.inc

connection slave;

--disable_query_log
--disable_result_log
call mtr.add_suppression('Error reading replica worker configuration');
--enable_query_log
--enable_result_log

source include/stop_slave.inc;

set @save.replica_parallel_workers= @@global.replica_parallel_workers;
set @@global.replica_parallel_workers= 4;

source include/start_slave.inc;


connection master;

create database d1;
create database d2;
create database d3;
create table d1.t1 (a int auto_increment primary key) engine=innodb;
create table d2.t1 (a int auto_increment primary key) engine=innodb;
create table d3.t1 (a int auto_increment primary key) engine=innodb;

#

# I. Two parallel jobs conflict
#
# two conflicting jobs to follow

--source include/sync_slave_sql_with_master.inc
# To be really conflicting slave needs to block commit of the first.
#connection slave;

begin;
insert into d2.t1 values (1);

connection master;

# Job_1
begin;
use d1;
insert into d1.t1 values (null);
use d2;
insert into d2.t1 values (1);   # will be block at this point on Worker
commit;

# Job_2
begin;
use d3;
insert into d3.t1 values (null);
use d1;
insert into d1.t1 values (null); #  will be block at this point on Coord if no transaction compression used
commit;

connection slave;

--echo Either the coordinator is waiting for a worker to unlock d1
--echo (binlog transaction compression OFF) or the coordinator
--echo has scheduled the 2nd transaction to the sane worker (binlog
--echo transaction compression ON)

--let $wait_condition=SELECT COUNT(*) = 1 FROM performance_schema.data_locks WHERE object_schema="d2" AND object_name="t1" AND lock_status="WAITING"
--source include/wait_condition.inc

if (`SELECT @@global.binlog_transaction_compression = FALSE`)
{
  #
  # note that if no compression used, then the coordinator had
  # started to assign 2nd transaction to the a second worker,
  # since it schedules event by event (blocks when the 2nd
  # event for the 2nd transaction is scheduled - d1's event).
  #
  # if compression is on, then the coordinator knows beforehand
  # which databases a transaction is going to touch, as it inspects
  # the payload event and gathers all the information about the
  # transaction events. Then it does not wait  and assigns 2nd
  # transaction to the worker handling d1 already.
  #
  --let $wait_condition=SELECT COUNT(*) = 1 FROM performance_schema.threads WHERE processlist_state LIKE 'Waiting for Replica Worker to release partition' AND name = 'thread/sql/replica_sql'
  --source include/wait_condition.inc
}

# release the Worker
rollback;

# Bug#21461991 : RPL.RPL_PARALLEL_CONFLICTS FAILS SPORADICALLY
# ON DAILY AND WEEKLY TRUNK
# Waits until the slave SQL thread has been synced with master
connection master;
--source include/sync_slave_sql_with_master.inc

let $count= 2;
let $table= d1.t1;
source include/wait_until_rows_count.inc;


#
# II. The only-sequential conflicts with ongoing parallel applying
#

# a. DDL waits for all workers have processed their earlier scheduled assignments

connection slave1;

# fix the tables status. Tables are supposed to exist, possibly with data left
# after previous part.

select count(*) from d1.t1 into @d1;
select count(*) from d2.t1 into @d2;
select count(*) from d3.t1 into @d3;
use d1;
create table `exists_only_on_slave` (a int);

connection slave;

# put in the way of workers blocking load

begin;
insert into d1.t1 values (null);
insert into d2.t1 values (null);
insert into d3.t1 values (null);

connection master;

# Job_1
begin;
use d1;
insert into d1.t1 values (null);
commit;

# Job_2
begin;
use d2;
insert into d2.t1 values (null);
commit;


# Job_3
begin;
use d3;
insert into d3.t1 values (null);
commit;

--disable_warnings
use d1;
drop table if exists `exists_only_on_slave`;
--enable_warnings


connection slave1;

select sleep(1);  # give Workers a little time to process (but they won't)

select count(*) - @d1 as 'zero' from d1.t1;
select count(*) - @d2 as 'zero' from d2.t1;
select count(*) - @d3 as 'zero' from d3.t1;

# proof the master DDL has not got through
use d1;
select count(*) as 'zero' from `exists_only_on_slave`;

connection slave;

rollback; # release workers

connection slave1;

# to finish up with getting all committed.

let $count= `select @d1 + 1`;
let $table= d1.t1;
source include/wait_until_rows_count.inc;

let $count= `select @d2 + 1`;
let $table= d2.t1;
source include/wait_until_rows_count.inc;

let $count= `select @d3 + 1`;
let $table= d3.t1;
source include/wait_until_rows_count.inc;
connection slave;


#
# cleanup
#

connection master;

drop database d1;
drop database d2;
drop database d3;

--source include/sync_slave_sql_with_master.inc
#connection slave;

set @@global.replica_parallel_workers= @save.replica_parallel_workers;

--source include/rpl_end.inc