1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209
|
# 2007 April 12
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library.
# The focus of the tests in this file are to verify that the
# pager optimizations implemented in version 3.3.14 work.
#
# $Id: pageropt.test,v 1.5 2008/08/20 14:49:25 danielk1977 Exp $
set testdir [file dirname $argv0]
source $testdir/tester.tcl
do_not_use_codec
ifcapable {!pager_pragmas||secure_delete||direct_read} {
finish_test
return
}
# A non-zero reserved_bytes value changes the number of pages in the
# database file, which messes up the results in this test.
if {[nonzero_reserved_bytes]} {finish_test; return;}
# Run the SQL statement supplied by the argument and return
# the results. Prepend four integers to the beginning of the
# result which are
#
# (1) The number of page reads from the database
# (2) The number of page writes to the database
# (3) The number of page writes to the journal
# (4) The number of cache pages freed
#
proc pagercount_sql {sql {db db}} {
global sqlite3_pager_readdb_count
global sqlite3_pager_writedb_count
global sqlite3_pager_writej_count
global sqlite3_pager_pgfree_count
set sqlite3_pager_readdb_count 0
set sqlite3_pager_writedb_count 0
set sqlite3_pager_writej_count 0
set r [$db eval $sql]
set cnt [list $sqlite3_pager_readdb_count \
$sqlite3_pager_writedb_count \
$sqlite3_pager_writej_count ]
return [concat $cnt $r]
}
# Setup the test database
#
do_test pageropt-1.1 {
sqlite3_soft_heap_limit 0
execsql {
PRAGMA auto_vacuum = OFF;
PRAGMA page_size = 1024;
}
pagercount_sql {
CREATE TABLE t1(x);
}
} {0 2 0}
do_test pageropt-1.2 {
pagercount_sql {
INSERT INTO t1 VALUES(randomblob(5000));
}
} {0 6 2}
# Verify that values remain in cache on for subsequent reads.
# We should not have to go back to disk.
#
do_test pageropt-1.3 {
pagercount_sql {
SELECT length(x) FROM t1
}
} {0 0 0 5000}
# If another thread reads the database, the original cache
# remains valid.
#
sqlite3 db2 test.db
set blobcontent [db2 one {SELECT hex(x) FROM t1}]
do_test pageropt-1.4 {
pagercount_sql {
SELECT hex(x) FROM t1
}
} [list 0 0 0 $blobcontent]
# But if the other thread modifies the database, then the cache
# must refill.
#
ifcapable mmap {
set x [expr {[permutation]=="mmap" ? 1 : 6}]
} else {
set x 6
}
do_test pageropt-1.5 {
db2 eval {CREATE TABLE t2(y)}
pagercount_sql {
SELECT hex(x) FROM t1
}
} [list $x 0 0 $blobcontent]
do_test pageropt-1.6 {
pagercount_sql {
SELECT hex(x) FROM t1
}
} [list 0 0 0 $blobcontent]
# Verify that the last page of an overflow chain is not read from
# disk when deleting a row. The one row of t1(x) has four pages
# of overflow. So deleting that row from t1 should involve reading
# the sqlite_master table (1 page) the main page of t1 (1 page) and
# the three overflow pages of t1 for a total of 5 pages.
#
# Pages written are page 1 (for the freelist pointer), the root page
# of the table, and one of the overflow chain pointers because it
# becomes the trunk of the freelist. Total 3.
#
do_test pageropt-2.1 {
db close
sqlite3 db test.db
pagercount_sql {
DELETE FROM t1 WHERE rowid=1
}
} {5 3 3}
# When pulling pages off of the freelist, there is no reason
# to actually bring in the old content.
#
do_test pageropt-2.2 {
db close
sqlite3 db test.db
pagercount_sql {
INSERT INTO t1 VALUES(randomblob(1500));
}
} {3 4 3}
do_test pageropt-2.3 {
pagercount_sql {
INSERT INTO t1 VALUES(randomblob(1500));
}
} {0 4 3}
# Note the new optimization that when pulling the very last page off of the
# freelist we do not read the content of that page.
#
do_test pageropt-2.4 {
pagercount_sql {
INSERT INTO t1 VALUES(randomblob(1500));
}
} {0 5 3}
# Appending a large quantity of data does not involve writing much
# to the journal file.
#
do_test pageropt-3.1 {
pagercount_sql {
INSERT INTO t2 SELECT * FROM t1;
}
} {1 7 2}
# Once again, we do not need to read the last page of an overflow chain
# while deleting.
#
do_test pageropt-3.2 {
pagercount_sql {
DROP TABLE t2;
}
} {0 2 3}
do_test pageropt-3.3 {
pagercount_sql {
DELETE FROM t1;
}
} {0 3 3}
# There are now 11 pages on the freelist. Move them all into an
# overflow chain by inserting a single large record. Starting from
# a cold cache, only page 1, the root page of table t1, and the trunk
# of the freelist need to be read (3 pages). And only those three
# pages need to be journalled. But 13 pages need to be written:
# page1, the root page of table t1, and an 11 page overflow chain.
#
do_test pageropt-4.1 {
db close
sqlite3 db test.db
pagercount_sql {
INSERT INTO t1 VALUES(randomblob(11300))
}
} {3 13 3}
# Now we delete that big entries starting from a cold cache and an
# empty freelist. The first 10 of the 11 pages overflow chain have
# to be read, together with page1 and the root of the t1 table. 12
# reads total. But only page1, the t1 root, and the trunk of the
# freelist need to be journalled and written back.
#
do_test pageropt-4.2 {
db close
sqlite3 db test.db
pagercount_sql {
DELETE FROM t1
}
} {12 3 3}
sqlite3_soft_heap_limit $cmdlinearg(soft-heap-limit)
catch {db2 close}
finish_test
|