File: binlog_wrong_last_committed.test

package info (click to toggle)
mysql-8.0 8.0.43-3
  • links: PTS, VCS
  • area: main
  • in suites: sid
  • size: 1,273,924 kB
  • sloc: cpp: 4,684,605; ansic: 412,450; pascal: 108,398; java: 83,641; perl: 30,221; cs: 27,067; sql: 26,594; sh: 24,181; python: 21,816; yacc: 17,169; php: 11,522; xml: 7,388; javascript: 7,076; makefile: 2,194; lex: 1,075; awk: 670; asm: 520; objc: 183; ruby: 97; lisp: 86
file content (85 lines) | stat: -rw-r--r-- 3,240 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
################################################################################
# BUG#25379659 MASTER MAY BINLOG A WRONG LAST_COMMITED
#
# Transactions could binlog a last_commited smaller than expected. With the
# wrong last_commited values, the transactions which sould be applied
# sequentially could be applied parallel. That caused applier errors or data
#  inconsistency.
#
# When committing a transaction, its last_commited is set to the value of
# max_committed_transaction. max_committed_transaction is the maximum
# sequence_number of committed transactions. It is maintained just before
# committing each transaction to engine. If its sequence_number is not
# SEQ_UNINIT then updates max_committed_transaction accordingly.
#
# However, it checked wrong sequence_number(the sequence_number of the
# leader thread's transaction instead of the sequence_number of the transaction).
# That caused that max_committed_transaction was only updated in time for leader
# thread's transaction. The update for following transactions were delay to
# finish_commit() which was after the transaction commited to engine.
#
# The test verifys last_committed is corret in the bug situation.
#
# Step 1. Use debug sync to gurantee that commit queue has two transactions.
# Step 2. Use debug sync to pause the second transaction when it enters
#         finshi_commit()
# Step 3. Execute a transaction and check if its last_committed is correct.
################################################################################
--source include/have_binlog_format_statement.inc
--source include/have_debug_sync.inc
--let $option_name = binlog_order_commits
--let $option_value = 1
--source include/only_with_option.inc


# Reset sequence_number and last_committed, so we can check the exact number
# Make sure it starts from binlog.000001
RESET MASTER;

CREATE TABLE t1(c1 INT PRIMARY KEY, c2 INT) ENGINE = InnoDB;

# Make the INSERT to wait for another INSERT into the flush queue
SET DEBUG_SYNC = "bgc_after_enrolling_for_commit_stage
                  SIGNAL insert1_ready WAIT_FOR continue_commit_stage";
--send INSERT INTO t1 VALUES(1, 1)

--connect(conn1, localhost, root)
--connect(conn2, localhost, root)
--connection conn1

# Make sure above INSERT is the leader
SET DEBUG_SYNC = "now WAIT_FOR insert1_ready";
# Record the INSERT's binlog position
--let $binlog_pos= query_get_value(SHOW MASTER STATUS, Position, 1)

# Pause the INSERT when it enters finishi_commit()
SET DEBUG_SYNC = "reached_finish_commit WAIT_FOR insert2_finish";
--send INSERT INTO t1 VALUES(2, 1)

# Wait until above INSERT is binlogged
--connection conn2
--let $show_statement= SHOW MASTER STATUS
--let $field= Position
--let $condition= != '$binlog_pos'
--source include/wait_show_condition.inc

# Signal insert1 to finish the commit group
SET DEBUG_SYNC = "now SIGNAL continue_commit_stage";
--connection default
--reap

UPDATE t1 SET c2 = 2 WHERE c1 = 2;

SET DEBUG_SYNC = "now SIGNAL insert2_finish";
--connection conn1
--reap

--connection default
# INSERT(2,1): sequence_number = 3
# UPDATE: sequence_number = 4, last_committed = 3

--let $binlog_file= binlog.000001
--let $logical_timestamps= 3 4
--source include/assert_logical_timestamps.inc

DROP TABLE t1;