File: rr_i_40-44.test

package info (click to toggle)
mariadb 1%3A11.8.2-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, trixie
  • size: 765,428 kB
  • sloc: ansic: 2,382,827; cpp: 1,803,532; asm: 378,315; perl: 63,176; sh: 46,496; pascal: 40,776; java: 39,363; yacc: 20,428; python: 19,506; sql: 17,864; xml: 12,463; ruby: 8,544; makefile: 6,059; cs: 5,855; ada: 1,700; lex: 1,193; javascript: 1,039; objc: 80; tcl: 73; awk: 46; php: 22
file content (133 lines) | stat: -rw-r--r-- 5,673 bytes parent folder | download | duplicates (19)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
################################################################################
#
# INSERT
#
# INSERT and commit new rows, using the constants "40" for most values.
# For each new transaction, the constant is increased by 1.
#
# This test runs a number of consecutive transactions to generate high 
# concurrency:
#
# Tx 1:
#  - multi-statemement insert, inserting first positive then negative number (0-sum).
#
# Tx 2:
#  - insert multiple rows using a single statement.
#
# Tx 3:
#  - INSERT IGNORE using both known duplicate values and non-duplicates.
#
# Net effect: 6 more rows
#
# In this test we need some kind of valid unique integer value for the columns
# with unique indexes.
#
# Alternatively: 
#  - Set unique value as 0 and rollback if ERR_DUP_KEY (see 
#    check_error_rollback.inc), then make sure to UPDATE where unique value is 0
#    in other tests.
#  - OR: insert NULL (requires special handling when calculating row sums in 
#    other tests).
#  - OR: skip unique indexes entirely (except `pk`) (remove from t1 in init).
#
# Using CONNECTION_ID (swithcing sign and doing +/- 3) as unique value, meaning 
# that some of the INSERTs will fail with duplicate key until this is high 
# enough (should not take long with a relatively high number of threads and some
# duration, given that the number of initial rows is relatively low, ~1000). 
# Let's just say this is a warm-up period.
#
# Alternatively, add some random integer to the value or use UNIX_TIMESTAMP()
# (the latter requires that some care is taken in subsequent updates etc. For
# example, simply doubling the value will cause overflow/truncation).
# 
# No need to ROLLBACK if all statements in a transaction by themselves are
# consistent.
#
#
################################################################################
SET autocommit = 0;
START TRANSACTION;
--echo
--echo *** multi-statemement insert, inserting first positive then negative number:
--echo

--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_DUP_ENTRY
INSERT INTO t1 (`id`, `int1`, `int1_key`, `int1_unique`,
                `int2`, `int2_key`, `int2_unique`,
                `for_update`, `connection_id`, `thread_id`, `is_uncommitted`, `is_consistent`)
        VALUES (40, 40, 40, CONNECTION_ID(),
                -40, -40, -CONNECTION_ID(),
                0, CONNECTION_ID(), 0, 0, 1);


--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_DUP_ENTRY
INSERT INTO t1 (`id`, `int1`, `int1_key`, `int1_unique`,
                `int2`, `int2_key`, `int2_unique`,
                `for_update`, `connection_id`, `thread_id`, `is_uncommitted`, `is_consistent`)
        VALUES (-40, -40, -40, -CONNECTION_ID(),
                40, 40, CONNECTION_ID(),
                0, CONNECTION_ID(), 0, 0, 1);

COMMIT;

START TRANSACTION;
--echo
--echo *** insert multiple rows using a single statement:
--echo

# First row is by itself consistent (sum = 0). Row 3 zero-sums row 2, so the 
# statement itself is consistent.
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_DUP_ENTRY
INSERT INTO t1 (`id`, `int1`, `int1_key`, `int1_unique`,
                `int2`, `int2_key`, `int2_unique`,
                `for_update`, `connection_id`, `thread_id`, `is_uncommitted`, `is_consistent`)
        VALUES (41, 41, 41, CONNECTION_ID()+1,
                -41, -41, -(CONNECTION_ID()+1),
                0, CONNECTION_ID(), 0, 0, 1),
               (41, 41, 41, CONNECTION_ID()+2,
                41, 41, CONNECTION_ID()+2,
                0, CONNECTION_ID(), 0, 0, 0),
               (41, -41, -41, -(CONNECTION_ID()+2),
                -41, -41, -(CONNECTION_ID()+2),
                0, CONNECTION_ID(), 0, 0, 0);

COMMIT;

START TRANSACTION;
--echo
--echo *** INSERT IGNORE using both known duplicate values and non-duplicates:
--echo

# This MAY be discarded (duplicate entry in UNIQUE index) - should succeed if CONNECTION_ID is high enough (int*_unique).
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_DUP_ENTRY
INSERT IGNORE INTO t1 (`id`, `int1`, `int1_key`, `int1_unique`,
                       `int2`, `int2_key`, `int2_unique`,
                       `for_update`, `connection_id`, `thread_id`, `is_uncommitted`, `is_consistent`)
               VALUES (42, 42, 42, CONNECTION_ID()+3,
                       -42, -42, -(CONNECTION_ID()+3),
                       0, CONNECTION_ID(), 0, 0, 1);

--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT
# This WILL (SHOULD) be discarded (duplicate entry as primary key).
# pk's that are 1000 or less AND divisible by 5 should all be present (i.e. never deleted), so we pick pk 5.
# Note that we insert an inconsistent row, so it will show up as a sum anomaly if it succeeds.
INSERT IGNORE INTO t1 (`pk`, `id`, `int1`, `int1_key`, `int1_unique`,
                       `int2`, `int2_key`, `int2_unique`,
                       `for_update`, `connection_id`, `thread_id`, `is_uncommitted`, `is_consistent`)
               VALUES (5, 43, 42, 42, CONNECTION_ID(),
                       -42, -42, CONNECTION_ID(),
                       0, CONNECTION_ID(), 0, 0, 0);

--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT
# This MAY be discarded (duplicate entry in UNIQUE index).
INSERT IGNORE INTO t1 (`id`, `int1`, `int1_key`, `int1_unique`,
                       `int2`, `int2_key`, `int2_unique`,
                       `for_update`, `connection_id`, `thread_id`, `is_uncommitted`, `is_consistent`)
               VALUES (44, 42, 42, (CONNECTION_ID() + 1000) MOD 5000,
                       -42, -42, -((CONNECTION_ID() + 1000) MOD 5000),
                       0, CONNECTION_ID(), 0, 0, 1);

--source suite/engines/rr_trx/include/check_for_error_rollback.inc

COMMIT;