File: rpl_mts_pending_max.result

package info (click to toggle)
mysql-8.0 8.0.43-3
  • links: PTS, VCS
  • area: main
  • in suites: sid
  • size: 1,273,924 kB
  • sloc: cpp: 4,684,605; ansic: 412,450; pascal: 108,398; java: 83,641; perl: 30,221; cs: 27,067; sql: 26,594; sh: 24,181; python: 21,816; yacc: 17,169; php: 11,522; xml: 7,388; javascript: 7,076; makefile: 2,194; lex: 1,075; awk: 670; asm: 520; objc: 183; ruby: 97; lisp: 86
file content (201 lines) | stat: -rw-r--r-- 6,616 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
include/master-slave.inc
Warnings:
Note	####	Sending passwords in plain text without SSL/TLS is extremely insecure.
Note	####	Storing MySQL user name or password information in the connection metadata repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START REPLICA; see the 'START REPLICA Syntax' in the MySQL Manual for more information.
[connection master]
#
#   0) Set REPLICA_PENDING_JOBS_SIZE_MAX to a smaller number (1024)
#      and replica_parallel_workers to 2 to make the test easy.
#
[connection slave]
SET @save.replica_pending_jobs_size_max= @@global.replica_pending_jobs_size_max;
SET @save.replica_parallel_workers= @@global.replica_parallel_workers;
SET @@GLOBAL.replica_pending_jobs_size_max= 1024;
SET @@GLOBAL.replica_parallel_workers= 2;
include/start_slave.inc
#
#
#   1) Create initial data required for the test
#      (two databases (db1, db2) and two tables (db1.t and db2.t)).
#
[connection master]
CREATE DATABASE db1;
CREATE DATABASE db2;
CREATE TABLE db1.t (a TEXT) ENGINE=INNODB;
CREATE TABLE db2.t (a TEXT) ENGINE=INNODB;
include/sync_slave_sql_with_master.inc
#
#   2) Prove that Coordinator will make a big event (bigger in size than
#      REPLICA_PENDING_JOBS_SIZE_MAX) wait for all workers to finish
#      their work (empty their queue) before processing the big event.
#
#
#  1) On Slave, lock one table so that any operation on that
#     will be waiting for the lock to be released.
#
[connection slave1]
LOCK TABLE db1.t WRITE;
#
#  2) Execute query that is going to wait for the table lock.
#
[connection master]
INSERT INTO db1.t VALUES ('small event');
#
#  3) Wait on Slave till a worker picks this event and wait for the
#     lock (which is acquired in step 1)
#
[connection slave]
#
#  4) Now on Master, insert another query that reaches slave.
#
[connection master]
INSERT INTO db2.t VALUES (REPEAT('big event', 1024));
#
#  5) Check that Coordinator waits for the query (at step 2) to be
#     executed before dedicating this new query (at step 4) to
#     one of the workers because of the event size limits.
#
[connection slave]
include/assert.inc [Check that one of the applier worker thread is waiting for the table metadata lock.]
#
#  6) Release the lock acquired in step 1, so that first query will
#     continue it's work and once it is done, second big event
#     will also continue it's work.
#
[connection slave1]
UNLOCK TABLES;
#
#  7) check that slave is able to catch up with master after releasing the
#     lock in step 6.
#
#     7.1) Sync SQL thread with Master.
#
[connection master]
include/sync_slave_sql_with_master.inc
#
#     7.2) Diff all the tables involved in the test to prove
#          that replication worked fine.
#
include/diff_tables.inc [master:db1.t,slave:db1.t]
include/diff_tables.inc [master: db2.t,slave: db2.t]
#
#   3) When a big event is being processed by a worker,
#      Coordinator will make smaller events to wait until the big event
#      is executed completely.
#
#
#
#  1) On Slave, lock one table so that any operation on that
#     will be waiting for the lock to be released.
#
[connection slave1]
LOCK TABLE db1.t WRITE;
#
#  2) Execute query that is going to wait for the table lock.
#
[connection master]
INSERT INTO db1.t VALUES (REPEAT('big event', 1024));
#
#  3) Wait on Slave till a worker picks this event and wait for the
#     lock (which is acquired in step 1)
#
[connection slave]
#
#  4) Now on Master, insert another query that reaches slave.
#
[connection master]
INSERT INTO db2.t VALUES ('small event');
#
#  5) Check that Coordinator waits for the query (at step 2) to be
#     executed before dedicating this new query (at step 4) to
#     one of the workers because of the event size limits.
#
[connection slave]
include/assert.inc [Check that one of the applier worker thread is waiting for the table metadata lock.]
#
#  6) Release the lock acquired in step 1, so that first query will
#     continue it's work and once it is done, second big event
#     will also continue it's work.
#
[connection slave1]
UNLOCK TABLES;
#
#  7) check that slave is able to catch up with master after releasing the
#     lock in step 6.
#
#     7.1) Sync SQL thread with Master.
#
[connection master]
include/sync_slave_sql_with_master.inc
#
#     7.2) Diff all the tables involved in the test to prove
#          that replication worked fine.
#
include/diff_tables.inc [master:db1.t,slave:db1.t]
include/diff_tables.inc [master: db2.t,slave: db2.t]
#
#   4) When a big event is being processed by a worker,
#      Coordinator will make another big event also to wait until the
#      first big event is executed completely.
#
#
#  1) On Slave, lock one table so that any operation on that
#     will be waiting for the lock to be released.
#
[connection slave1]
LOCK TABLE db1.t WRITE;
#
#  2) Execute query that is going to wait for the table lock.
#
[connection master]
INSERT INTO db1.t VALUES (REPEAT('big event', 1024));
#
#  3) Wait on Slave till a worker picks this event and wait for the
#     lock (which is acquired in step 1)
#
[connection slave]
#
#  4) Now on Master, insert another query that reaches slave.
#
[connection master]
INSERT INTO db2.t VALUES (REPEAT('big event', 1024));
#
#  5) Check that Coordinator waits for the query (at step 2) to be
#     executed before dedicating this new query (at step 4) to
#     one of the workers because of the event size limits.
#
[connection slave]
include/assert.inc [Check that one of the applier worker thread is waiting for the table metadata lock.]
#
#  6) Release the lock acquired in step 1, so that first query will
#     continue it's work and once it is done, second big event
#     will also continue it's work.
#
[connection slave1]
UNLOCK TABLES;
#
#  7) check that slave is able to catch up with master after releasing the
#     lock in step 6.
#
#     7.1) Sync SQL thread with Master.
#
[connection master]
include/sync_slave_sql_with_master.inc
#
#     7.2) Diff all the tables involved in the test to prove
#          that replication worked fine.
#
include/diff_tables.inc [master:db1.t,slave:db1.t]
include/diff_tables.inc [master: db2.t,slave: db2.t]
#
#   5) Cleanup (drop tables/databases and reset the variables)
#
[connection master]
DROP DATABASE db1;
DROP DATABASE db2;
include/sync_slave_sql_with_master.inc
include/stop_slave_sql.inc
SET @@global.replica_pending_jobs_size_max=  @save.replica_pending_jobs_size_max;
SET @@GLOBAL.replica_parallel_workers= @save.replica_parallel_workers;
include/start_slave_sql.inc
include/rpl_end.inc