File: selectG.test

package info (click to toggle)
sqlcipher 3.4.1-2
  • links: PTS, VCS
  • area: main
  • in suites: bookworm, bullseye
  • size: 70,888 kB
  • sloc: ansic: 195,357; tcl: 14,300; sh: 3,782; yacc: 1,245; makefile: 958; cs: 299; cpp: 128
file content (39 lines) | stat: -rw-r--r-- 1,345 bytes parent folder | download | duplicates (5)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
# 2015-01-05
#
# The author disclaims copyright to this source code.  In place of
# a legal notice, here is a blessing:
#
#    May you do good and not evil.
#    May you find forgiveness for yourself and forgive others.
#    May you share freely, never taking more than you give.
#
#***********************************************************************
#
# This file verifies that INSERT operations with a very large number of
# VALUE terms works and does not hit the SQLITE_LIMIT_COMPOUND_SELECT limit.
#

set testdir [file dirname $argv0]
source $testdir/tester.tcl
set testprefix selectG

# Do an INSERT with a VALUES clause that contains 100,000 entries.  Verify
# that this insert happens quickly (in less than 10 seconds).  Actually, the
# insert will normally happen in less than 0.5 seconds on a workstation, but
# we allow plenty of overhead for slower machines.  The speed test checks
# for an O(N*N) inefficiency that was once in the code and that would make
# the insert run for over a minute.
#
do_test 100 {
  set sql "CREATE TABLE t1(x);\nINSERT INTO t1(x) VALUES"
  for {set i 1} {$i<100000} {incr i} {
    append sql "($i),"
  }
  append sql "($i);"
  set microsec [lindex [time {db eval $sql}] 0]
  db eval {
    SELECT count(x), sum(x), avg(x), $microsec<10000000 FROM t1;
  }
} {100000 5000050000 50000.5 1}
  
finish_test