blob: 7191661ba5d7cd4e599c9129bbda142b87f4d9d5 [file] [log] [blame]
drh538f5702007-04-13 02:14:30 +00001# 2007 April 12
2#
3# The author disclaims copyright to this source code. In place of
4# a legal notice, here is a blessing:
5#
6# May you do good and not evil.
7# May you find forgiveness for yourself and forgive others.
8# May you share freely, never taking more than you give.
9#
10#***********************************************************************
11# This file implements regression tests for SQLite library.
12# The focus of the tests in this file are to verify that the
13# pager optimizations implemented in version 3.3.14 work.
14#
danielk19778c0a7912008-08-20 14:49:23 +000015# $Id: pageropt.test,v 1.5 2008/08/20 14:49:25 danielk1977 Exp $
drh538f5702007-04-13 02:14:30 +000016
17set testdir [file dirname $argv0]
18source $testdir/tester.tcl
19
danf4ba1092011-10-08 14:57:07 +000020ifcapable {!pager_pragmas||secure_delete||direct_read} {
drh538f5702007-04-13 02:14:30 +000021 finish_test
22 return
23}
24
25# Run the SQL statement supplied by the argument and return
26# the results. Prepend four integers to the beginning of the
27# result which are
28#
29# (1) The number of page reads from the database
30# (2) The number of page writes to the database
31# (3) The number of page writes to the journal
32# (4) The number of cache pages freed
33#
34proc pagercount_sql {sql {db db}} {
35 global sqlite3_pager_readdb_count
36 global sqlite3_pager_writedb_count
37 global sqlite3_pager_writej_count
38 global sqlite3_pager_pgfree_count
39 set sqlite3_pager_readdb_count 0
40 set sqlite3_pager_writedb_count 0
41 set sqlite3_pager_writej_count 0
drh538f5702007-04-13 02:14:30 +000042 set r [$db eval $sql]
43 set cnt [list $sqlite3_pager_readdb_count \
44 $sqlite3_pager_writedb_count \
danielk19778c0a7912008-08-20 14:49:23 +000045 $sqlite3_pager_writej_count ]
drh538f5702007-04-13 02:14:30 +000046 return [concat $cnt $r]
47}
48
49# Setup the test database
50#
51do_test pageropt-1.1 {
drh3aefaba2007-08-12 20:07:58 +000052 sqlite3_soft_heap_limit 0
drh538f5702007-04-13 02:14:30 +000053 execsql {
54 PRAGMA auto_vacuum = OFF;
55 PRAGMA page_size = 1024;
56 }
57 pagercount_sql {
58 CREATE TABLE t1(x);
59 }
danielk19778c0a7912008-08-20 14:49:23 +000060} {0 2 0}
drh538f5702007-04-13 02:14:30 +000061do_test pageropt-1.2 {
62 pagercount_sql {
63 INSERT INTO t1 VALUES(randomblob(5000));
64 }
danielk19778c0a7912008-08-20 14:49:23 +000065} {0 6 2}
drh538f5702007-04-13 02:14:30 +000066
67# Verify that values remain in cache on for subsequent reads.
68# We should not have to go back to disk.
69#
70do_test pageropt-1.3 {
71 pagercount_sql {
72 SELECT length(x) FROM t1
73 }
danielk19778c0a7912008-08-20 14:49:23 +000074} {0 0 0 5000}
drh538f5702007-04-13 02:14:30 +000075
76# If another thread reads the database, the original cache
77# remains valid.
78#
79sqlite3 db2 test.db
80set blobcontent [db2 one {SELECT hex(x) FROM t1}]
81do_test pageropt-1.4 {
82 pagercount_sql {
83 SELECT hex(x) FROM t1
84 }
danielk19778c0a7912008-08-20 14:49:23 +000085} [list 0 0 0 $blobcontent]
drh538f5702007-04-13 02:14:30 +000086
87# But if the other thread modifies the database, then the cache
88# must refill.
89#
drh188d4882013-04-08 20:47:49 +000090ifcapable mmap {
drh9b4c59f2013-04-15 17:03:42 +000091 set x [expr {[permutation]=="mmap" ? 1 : 6}]
drh188d4882013-04-08 20:47:49 +000092} else {
93 set x 6
94}
drh538f5702007-04-13 02:14:30 +000095do_test pageropt-1.5 {
96 db2 eval {CREATE TABLE t2(y)}
97 pagercount_sql {
98 SELECT hex(x) FROM t1
99 }
drh188d4882013-04-08 20:47:49 +0000100} [list $x 0 0 $blobcontent]
drh538f5702007-04-13 02:14:30 +0000101do_test pageropt-1.6 {
102 pagercount_sql {
103 SELECT hex(x) FROM t1
104 }
danielk19778c0a7912008-08-20 14:49:23 +0000105} [list 0 0 0 $blobcontent]
drh538f5702007-04-13 02:14:30 +0000106
107# Verify that the last page of an overflow chain is not read from
108# disk when deleting a row. The one row of t1(x) has four pages
109# of overflow. So deleting that row from t1 should involve reading
110# the sqlite_master table (1 page) the main page of t1 (1 page) and
111# the three overflow pages of t1 for a total of 5 pages.
112#
113# Pages written are page 1 (for the freelist pointer), the root page
114# of the table, and one of the overflow chain pointers because it
115# becomes the trunk of the freelist. Total 3.
116#
117do_test pageropt-2.1 {
118 db close
119 sqlite3 db test.db
120 pagercount_sql {
121 DELETE FROM t1 WHERE rowid=1
122 }
danielk19778c0a7912008-08-20 14:49:23 +0000123} {5 3 3}
drh538f5702007-04-13 02:14:30 +0000124
125# When pulling pages off of the freelist, there is no reason
126# to actually bring in the old content.
127#
128do_test pageropt-2.2 {
129 db close
130 sqlite3 db test.db
131 pagercount_sql {
132 INSERT INTO t1 VALUES(randomblob(1500));
133 }
danielk19778c0a7912008-08-20 14:49:23 +0000134} {3 4 3}
drh538f5702007-04-13 02:14:30 +0000135do_test pageropt-2.3 {
136 pagercount_sql {
137 INSERT INTO t1 VALUES(randomblob(1500));
138 }
danielk19778c0a7912008-08-20 14:49:23 +0000139} {0 4 3}
drh538f5702007-04-13 02:14:30 +0000140
drh6558db82007-04-13 03:23:21 +0000141# Note the new optimization that when pulling the very last page off of the
142# freelist we do not read the content of that page.
143#
144do_test pageropt-2.4 {
145 pagercount_sql {
146 INSERT INTO t1 VALUES(randomblob(1500));
147 }
danielk19778c0a7912008-08-20 14:49:23 +0000148} {0 5 3}
drh6558db82007-04-13 03:23:21 +0000149
150# Appending a large quantity of data does not involve writing much
151# to the journal file.
152#
153do_test pageropt-3.1 {
154 pagercount_sql {
155 INSERT INTO t2 SELECT * FROM t1;
156 }
danielk19778c0a7912008-08-20 14:49:23 +0000157} {1 7 2}
drh6558db82007-04-13 03:23:21 +0000158
159# Once again, we do not need to read the last page of an overflow chain
160# while deleting.
161#
162do_test pageropt-3.2 {
163 pagercount_sql {
164 DROP TABLE t2;
165 }
danielk19778c0a7912008-08-20 14:49:23 +0000166} {0 2 3}
drh6558db82007-04-13 03:23:21 +0000167do_test pageropt-3.3 {
168 pagercount_sql {
169 DELETE FROM t1;
170 }
danielk19778c0a7912008-08-20 14:49:23 +0000171} {0 3 3}
drh6558db82007-04-13 03:23:21 +0000172
173# There are now 11 pages on the freelist. Move them all into an
174# overflow chain by inserting a single large record. Starting from
175# a cold cache, only page 1, the root page of table t1, and the trunk
176# of the freelist need to be read (3 pages). And only those three
177# pages need to be journalled. But 13 pages need to be written:
178# page1, the root page of table t1, and an 11 page overflow chain.
179#
180do_test pageropt-4.1 {
181 db close
182 sqlite3 db test.db
183 pagercount_sql {
184 INSERT INTO t1 VALUES(randomblob(11300))
185 }
danielk19778c0a7912008-08-20 14:49:23 +0000186} {3 13 3}
drh6558db82007-04-13 03:23:21 +0000187
188# Now we delete that big entries starting from a cold cache and an
189# empty freelist. The first 10 of the 11 pages overflow chain have
190# to be read, together with page1 and the root of the t1 table. 12
191# reads total. But only page1, the t1 root, and the trunk of the
192# freelist need to be journalled and written back.
193#
drhc5d0bd92008-04-14 01:00:57 +0000194do_test pageropt-4.2 {
drh6558db82007-04-13 03:23:21 +0000195 db close
196 sqlite3 db test.db
197 pagercount_sql {
198 DELETE FROM t1
199 }
danielk19778c0a7912008-08-20 14:49:23 +0000200} {12 3 3}
drh6558db82007-04-13 03:23:21 +0000201
danc1a60c52010-06-07 14:28:16 +0000202sqlite3_soft_heap_limit $cmdlinearg(soft-heap-limit)
drh538f5702007-04-13 02:14:30 +0000203catch {db2 close}
204finish_test