File: attach_checkpoint_deadlock.test_slow

package info (click to toggle)
duckdb 1.5.1-3
  • links: PTS, VCS
  • area: main
  • in suites:
  • size: 299,196 kB
  • sloc: cpp: 865,414; ansic: 57,292; python: 18,871; sql: 12,663; lisp: 11,751; yacc: 7,412; lex: 1,682; sh: 747; makefile: 564
file content (36 lines) | stat: -rw-r--r-- 1,124 bytes parent folder | download | duplicates (4)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
# name: test/sql/attach/attach_checkpoint_deadlock.test_slow
# description: Deadlock when checkpointing multiple databases
# group: [attach]

concurrentforeach dbname foo bar i1 i2 i3 i4 i5 i6 i7 i8 i9

statement ok
attach '__TEST_DIR__/checkpoint_${dbname}.duckdb' as ${dbname}

statement ok
create table ${dbname}.${dbname}(foo bigint)

statement ok
insert into ${dbname}.${dbname} select sum(i) from range(1000000) t(i)

statement maybe
checkpoint ${dbname}
----
there are other write transactions

statement ok
select
    coalesce(t.table_catalog, current_database()) as "database",
    t.table_schema as "schema",
    t.table_name as "name",
    t.table_type as "type",
    array_agg(c.column_name order by c.ordinal_position) as "column_names",
    array_agg(c.data_type order by c.ordinal_position) as "column_types",
    array_agg(c.is_nullable = 'YES' order by c.ordinal_position) as "column_nullable"
from information_schema.tables t
join information_schema.columns c on t.table_schema = c.table_schema and t.table_name = c.table_name
where t.table_schema = 'main'
group by 1, 2, 3, 4
order by 1, 2, 3, 4

endloop