File: flush_block_commit_notembedded.test

package info (click to toggle)
mariadb 1%3A11.8.2-1
  • links: PTS, VCS
  • area: main
  • in suites: trixie
  • size: 765,428 kB
  • sloc: ansic: 2,382,827; cpp: 1,803,532; asm: 378,315; perl: 63,176; sh: 46,496; pascal: 40,776; java: 39,363; yacc: 20,428; python: 19,506; sql: 17,864; xml: 12,463; ruby: 8,544; makefile: 6,059; cs: 5,855; ada: 1,700; lex: 1,193; javascript: 1,039; objc: 80; tcl: 73; awk: 46; php: 22
file content (68 lines) | stat: -rw-r--r-- 1,729 bytes parent folder | download | duplicates (7)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
# Let's see if FLUSH TABLES WITH READ LOCK blocks COMMIT of existing
# transactions.
# We verify that we did not introduce a deadlock.
# This is intended to mimick how mysqldump and innobackup work.

--source include/have_log_bin.inc

# And it requires InnoDB
--source include/have_log_bin.inc
--source include/have_innodb.inc

--echo # Save the initial number of concurrent sessions
--source include/count_sessions.inc

--disable_query_log
# This may be triggered on a slow system or one that lacks native AIO.
call mtr.add_suppression("InnoDB: Trying to delete tablespace.*pending operations");
--enable_query_log

connect (con1,localhost,root,,);
connect (con2,localhost,root,,);

# FLUSH TABLES WITH READ LOCK should block writes to binlog too
connection con1;
CREATE TABLE t1 (a INT) ENGINE=innodb;
RESET MASTER;
SET AUTOCOMMIT=0;
SELECT 1;
connection con2;
FLUSH TABLES WITH READ LOCK;
--source include/show_binlog_events.inc
connection con1;
send INSERT INTO t1 VALUES (1);
connection con2;
sleep 1;
--source include/show_binlog_events.inc
UNLOCK TABLES;
connection con1;
reap;
DROP TABLE t1;
SET AUTOCOMMIT=1;

# GLR blocks new transactions
create table t1 (a int) engine=innodb;
connection con1;
flush tables with read lock;
connection con2;
begin;
--send insert into t1 values (1);
connection con1;
let $wait_condition=
  select count(*) = 1 from information_schema.processlist
  where state = "Waiting for backup lock" and
        info = "insert into t1 values (1)";
--source include/wait_condition.inc
unlock tables;
connection con2;
--reap
commit;
drop table t1;

connection default;
disconnect con1;
disconnect con2;

--echo # Wait till all disconnects are completed
--source include/wait_until_count_sessions.inc