File: analyze_stmt_prefetch_count.test

package info (click to toggle)
mariadb 1%3A11.8.3-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 772,520 kB
  • sloc: ansic: 2,414,714; cpp: 1,791,394; asm: 381,336; perl: 62,905; sh: 49,647; pascal: 40,897; java: 39,363; python: 20,791; yacc: 20,432; sql: 17,907; xml: 12,344; ruby: 8,544; cs: 6,542; makefile: 6,145; ada: 1,879; lex: 1,193; javascript: 996; objc: 80; tcl: 73; awk: 46; php: 22
file content (77 lines) | stat: -rw-r--r-- 2,579 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
--source include/have_innodb.inc
--source include/have_sequence.inc


# Each row is 1K.
create table t1 (
  a varchar(255),
  b varchar(255),
  c varchar(255),
  d varchar(255),
  primary key(a,b,c,d)
) engine=innodb character set latin1;

# The data size is 16K * 1K = 16M 
#  16M / (page_size=16K) = 1K pages.
SET unique_checks=0, foreign_key_checks= 0;
begin;
insert into t1 select
  repeat(uuid(), 7), 
  repeat(uuid(), 7), 
  repeat(uuid(), 7), 
  repeat(uuid(), 7)
from seq_1_to_16384;
insert into t1 values ('z','z','z','z');
commit;

--echo # Restart the server to make sure we have an empty InnoDB Buffer Pool 
--echo # (in the test's .opt file we've disabled buffer pool saving/loading 
--echo #  and also tried to disable any background activity)
SET GLOBAL innodb_fast_shutdown=0;
--source include/restart_mysqld.inc

set @innodb_pages_read0= 
  (select variable_value 
   from information_schema.session_status 
   where variable_name like 'innodb_pages_read');

let $analyze_output= `analyze format=json 
select * from t1 force index (PRIMARY) order by a,b,c,d`;
evalp set @js='$analyze_output';

set @js=json_extract(@js, '$.query_block.nested_loop[0].table.r_engine_stats');
#select @js;
set @pages_accessed= cast(json_value(@js,'$.pages_accessed') as INT);
set @pages_read_count= cast(json_value(@js,'$.pages_read_count') as INT);
set @pages_prefetch_read_count= cast(json_value(@js,'$.pages_prefetch_read_count') as INT);

select @pages_accessed > 1000 and @pages_accessed < 1500;

set @total_read = (@pages_read_count + @pages_prefetch_read_count);

select @pages_accessed*0.75 < @total_read, @total_read < @pages_accessed*1.25;

set @innodb_pages_read1= 
  (select variable_value 
   from information_schema.session_status 
   where variable_name like 'innodb_pages_read');

set @innodb_pages_read_incr= (@innodb_pages_read1 - @innodb_pages_read0);

select @innodb_pages_read_incr > 1000, @innodb_pages_read_incr < 1500;

let $analyze_output= `analyze format=json 
select * from t1 force index (PRIMARY) order by a,b,c,d`;
evalp set @js='$analyze_output';
set @js=json_extract(@js, '$.query_block.nested_loop[0].table.r_engine_stats');

--echo # This must just print pages_accessed. No page reads or prefetch reads,
--echo # because the previous query has read all the needed pages into the
--echo # buffer pool, which is set to be large enough to accomodate the whole
--echo # table.
--replace_regex /[0-9]+/NUMBER/
select @js;
set @pages_accessed2= cast(json_value(@js,'$.pages_accessed') as INT);

select @pages_accessed2 = @pages_accessed;
drop table t1;