Merge branch '2.3' into 2.4

This commit is contained in:
Johan Wikman
2019-10-31 10:06:27 +02:00
935 changed files with 339179 additions and 1 deletions

View File

@ -1316,6 +1316,12 @@ GRANT SELECT ON mysql.* TO 'maxscale'@'maxscalehost';
`mysql.*` in addition to the normal grants. This is to work around MDEV-13453
which was fixed in MariaDB 10.2.11.
If you are using MariaDB ColumnStore, the follwing grant is requried.
```
GRANT ALL ON infinidb_vtable.* TO 'maxscale'@'maxscalehost';
```
See [MaxScale Troubleshooting](https://mariadb.com/kb/en/mariadb-enterprise/maxscale-troubleshooting/)
for more information on how to troubleshoot authentication related problems.

View File

@ -1,4 +1,4 @@
# MariaDB MaxScale 2.3.13 Release Notes
# MariaDB MaxScale 2.3.13 Release Notes -- 2019-10-30
Release 2.3.13 is a GA release.

View File

@ -0,0 +1,197 @@
# 2011 May 17
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
#
# Test cases for the SQLITE_ENABLE_8_3_NAMES feature that forces all
# filename extensions to be limited to 3 characters. Some embedded
# systems need this to work around microsoft FAT patents, but this
# feature should be disabled on most deployments.
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
ifcapable !8_3_names {
finish_test
return
}
db close
sqlite3_shutdown
sqlite3_config_uri 1
do_test 8_3_names-1.0 {
forcedelete test.db test.nal test.db-journal
sqlite3 db test.db
db eval {
PRAGMA cache_size=10;
CREATE TABLE t1(x);
INSERT INTO t1 VALUES(randomblob(20000));
BEGIN;
DELETE FROM t1;
INSERT INTO t1 VALUES(randomblob(15000));
}
file exists test.db-journal
} 1
do_test 8_3_names-1.1 {
file exists test.nal
} 0
do_test 8_3_names-1.2 {
db eval {
ROLLBACK;
SELECT length(x) FROM t1
}
} 20000
db close
do_test 8_3_names-2.0 {
forcedelete test.db test.nal test.db-journal
sqlite3 db file:./test.db?8_3_names=1
db eval {
PRAGMA cache_size=10;
CREATE TABLE t1(x);
INSERT INTO t1 VALUES(randomblob(20000));
BEGIN;
DELETE FROM t1;
INSERT INTO t1 VALUES(randomblob(15000));
}
file exists test.db-journal
} 0
do_test 8_3_names-2.1 {
file exists test.nal
} 1
forcedelete test2.db test2.nal test2.db-journal
copy_file test.db test2.db
copy_file test.nal test2.nal
do_test 8_3_names-2.2 {
db eval {
COMMIT;
SELECT length(x) FROM t1
}
} 15000
do_test 8_3_names-2.3 {
sqlite3 db2 file:./test2.db?8_3_names=1
db2 eval {
PRAGMA integrity_check;
SELECT length(x) FROM t1;
}
} {ok 20000}
db close
do_test 8_3_names-3.0 {
forcedelete test.db test.nal test.db-journal
sqlite3 db file:./test.db?8_3_names=0
db eval {
PRAGMA cache_size=10;
CREATE TABLE t1(x);
INSERT INTO t1 VALUES(randomblob(20000));
BEGIN;
DELETE FROM t1;
INSERT INTO t1 VALUES(randomblob(15000));
}
file exists test.db-journal
} 1
do_test 8_3_names-3.1 {
file exists test.nal
} 0
forcedelete test2.db test2.nal test2.db-journal
copy_file test.db test2.db
copy_file test.db-journal test2.db-journal
do_test 8_3_names-3.2 {
db eval {
COMMIT;
SELECT length(x) FROM t1
}
} 15000
do_test 8_3_names-3.3 {
sqlite3 db2 file:./test2.db?8_3_names=0
db2 eval {
PRAGMA integrity_check;
SELECT length(x) FROM t1;
}
} {ok 20000}
##########################################################################
# Master journals.
#
db close
forcedelete test.db test2.db
do_test 8_3_names-4.0 {
sqlite3 db file:./test.db?8_3_names=1
db eval {
CREATE TABLE t1(x);
INSERT INTO t1 VALUES(1);
ATTACH 'file:./test2.db?8_3_names=1' AS db2;
CREATE TABLE db2.t2(y);
INSERT INTO t2 VALUES(2);
BEGIN;
INSERT INTO t1 VALUES(3);
INSERT INTO t2 VALUES(4);
COMMIT;
SELECT * FROM t1, t2 ORDER BY x, y
}
} {1 2 1 4 3 2 3 4}
##########################################################################
# WAL mode.
#
ifcapable !wal {
finish_test
return
}
db close
forcedelete test.db
do_test 8_3_names-5.0 {
sqlite3 db file:./test.db?8_3_names=1
load_static_extension db wholenumber
db eval {
PRAGMA journal_mode=WAL;
CREATE TABLE t1(x);
CREATE VIRTUAL TABLE nums USING wholenumber;
INSERT INTO t1 SELECT value FROM nums WHERE value BETWEEN 1 AND 1000;
BEGIN;
UPDATE t1 SET x=x*2;
}
sqlite3 db2 file:./test.db?8_3_names=1
load_static_extension db2 wholenumber
db2 eval {
BEGIN;
SELECT sum(x) FROM t1;
}
} {500500}
do_test 8_3_names-5.1 {
file exists test.db-wal
} 0
do_test 8_3_names-5.2 {
file exists test.wal
} 1
do_test 8_3_names-5.3 {
file exists test.db-shm
} 0
do_test 8_3_names-5.4 {
file exists test.shm
} 1
do_test 8_3_names-5.5 {
db eval {
COMMIT;
SELECT sum(x) FROM t1;
}
} {1001000}
do_test 8_3_names-5.6 {
db2 eval {
SELECT sum(x) FROM t1;
}
} {500500}
finish_test

View File

@ -0,0 +1,61 @@
# 2015-06-02
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library. The
# focus of this file is type affinity in comparison operations.
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
do_execsql_test affinity2-100 {
CREATE TABLE t1(
xi INTEGER,
xr REAL,
xb BLOB,
xn NUMERIC,
xt TEXT
);
INSERT INTO t1(rowid,xi,xr,xb,xn,xt) VALUES(1,1,1,1,1,1);
INSERT INTO t1(rowid,xi,xr,xb,xn,xt) VALUES(2,'2','2','2','2','2');
INSERT INTO t1(rowid,xi,xr,xb,xn,xt) VALUES(3,'03','03','03','03','03');
} {}
do_execsql_test affinity2-110 {
SELECT xi, typeof(xi) FROM t1 ORDER BY rowid;
} {1 integer 2 integer 3 integer}
do_execsql_test affinity2-120 {
SELECT xr, typeof(xr) FROM t1 ORDER BY rowid;
} {1.0 real 2.0 real 3.0 real}
do_execsql_test affinity2-130 {
SELECT xb, typeof(xb) FROM t1 ORDER BY rowid;
} {1 integer 2 text 03 text}
do_execsql_test affinity2-140 {
SELECT xn, typeof(xn) FROM t1 ORDER BY rowid;
} {1 integer 2 integer 3 integer}
do_execsql_test affinity2-150 {
SELECT xt, typeof(xt) FROM t1 ORDER BY rowid;
} {1 text 2 text 03 text}
do_execsql_test affinity2-200 {
SELECT rowid, xi==xt, xi==xb, xi==+xt FROM t1 ORDER BY rowid;
} {1 1 1 1 2 1 1 1 3 1 1 1}
do_execsql_test affinity2-210 {
SELECT rowid, xr==xt, xr==xb, xr==+xt FROM t1 ORDER BY rowid;
} {1 1 1 1 2 1 1 1 3 1 1 1}
do_execsql_test affinity2-220 {
SELECT rowid, xn==xt, xn==xb, xn==+xt FROM t1 ORDER BY rowid;
} {1 1 1 1 2 1 1 1 3 1 1 1}
do_execsql_test affinity2-300 {
SELECT rowid, xt==+xi, xt==xi, xt==xb FROM t1 ORDER BY rowid;
} {1 1 1 0 2 1 1 1 3 0 1 1}
finish_test

View File

@ -0,0 +1,78 @@
# 2006 January 20
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library.
#
# This file implements tests for calling sqlite3_result_error()
# from within an aggregate function implementation.
#
# $Id: aggerror.test,v 1.3 2006/05/03 23:34:06 drh Exp $
set testdir [file dirname $argv0]
source $testdir/tester.tcl
# Add the x_count aggregate function to the database handle.
# x_count will error out if its input is 40 or 41 or if its
# final results is 42. Make sure that such errors are handled
# appropriately.
#
do_test aggerror-1.1 {
set DB [sqlite3_connection_pointer db]
sqlite3_create_aggregate $DB
execsql {
CREATE TABLE t1(a);
INSERT INTO t1 VALUES(1);
INSERT INTO t1 VALUES(2);
INSERT INTO t1 SELECT a+2 FROM t1;
INSERT INTO t1 SELECT a+4 FROM t1;
INSERT INTO t1 SELECT a+8 FROM t1;
INSERT INTO t1 SELECT a+16 FROM t1;
INSERT INTO t1 SELECT a+32 FROM t1 ORDER BY a LIMIT 7;
SELECT x_count(*) FROM t1;
}
} {39}
do_test aggerror-1.2 {
execsql {
INSERT INTO t1 VALUES(40);
SELECT x_count(*) FROM t1;
}
} {40}
do_test aggerror-1.3 {
catchsql {
SELECT x_count(a) FROM t1;
}
} {1 {value of 40 handed to x_count}}
ifcapable utf16 {
do_test aggerror-1.4 {
execsql {
UPDATE t1 SET a=41 WHERE a=40
}
catchsql {
SELECT x_count(a) FROM t1;
}
} {1 abc}
}
do_test aggerror-1.5 {
execsql {
SELECT x_count(*) FROM t1
}
} 40
do_test aggerror-1.6 {
execsql {
INSERT INTO t1 VALUES(40);
INSERT INTO t1 VALUES(42);
}
catchsql {
SELECT x_count(*) FROM t1;
}
} {1 {x_count totals to 42}}
finish_test

View File

@ -0,0 +1,237 @@
# 2012 August 23
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library.
#
# This file implements tests for processing aggregate queries with
# subqueries in which the subqueries hold the aggregate functions
# or in which the subqueries are themselves aggregate queries
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
do_test aggnested-1.1 {
db eval {
CREATE TABLE t1(a1 INTEGER);
INSERT INTO t1 VALUES(1), (2), (3);
CREATE TABLE t2(b1 INTEGER);
INSERT INTO t2 VALUES(4), (5);
SELECT (SELECT group_concat(a1,'x') FROM t2) FROM t1;
}
} {1x2x3}
do_test aggnested-1.2 {
db eval {
SELECT
(SELECT group_concat(a1,'x') || '-' || group_concat(b1,'y') FROM t2)
FROM t1;
}
} {1x2x3-4y5}
do_test aggnested-1.3 {
db eval {
SELECT (SELECT group_concat(b1,a1) FROM t2) FROM t1;
}
} {415 425 435}
do_test aggnested-1.4 {
db eval {
SELECT (SELECT group_concat(a1,b1) FROM t2) FROM t1;
}
} {151 252 353}
# This test case is a copy of the one in
# http://www.mail-archive.com/sqlite-users@sqlite.org/msg70787.html
#
do_test aggnested-2.0 {
sqlite3 db2 :memory:
db2 eval {
CREATE TABLE t1 (A1 INTEGER NOT NULL,A2 INTEGER NOT NULL,A3 INTEGER NOT
NULL,A4 INTEGER NOT NULL,PRIMARY KEY(A1));
REPLACE INTO t1 VALUES(1,11,111,1111);
REPLACE INTO t1 VALUES(2,22,222,2222);
REPLACE INTO t1 VALUES(3,33,333,3333);
CREATE TABLE t2 (B1 INTEGER NOT NULL,B2 INTEGER NOT NULL,B3 INTEGER NOT
NULL,B4 INTEGER NOT NULL,PRIMARY KEY(B1));
REPLACE INTO t2 VALUES(1,88,888,8888);
REPLACE INTO t2 VALUES(2,99,999,9999);
SELECT (SELECT GROUP_CONCAT(CASE WHEN a1=1 THEN'A' ELSE 'B' END) FROM t2),
t1.*
FROM t1;
}
} {A,B,B 3 33 333 3333}
db2 close
##################### Test cases for ticket [bfbf38e5e9956ac69f] ############
#
# This first test case is the original problem report:
do_test aggnested-3.0 {
db eval {
CREATE TABLE AAA (
aaa_id INTEGER PRIMARY KEY AUTOINCREMENT
);
CREATE TABLE RRR (
rrr_id INTEGER PRIMARY KEY AUTOINCREMENT,
rrr_date INTEGER NOT NULL,
rrr_aaa INTEGER
);
CREATE TABLE TTT (
ttt_id INTEGER PRIMARY KEY AUTOINCREMENT,
target_aaa INTEGER NOT NULL,
source_aaa INTEGER NOT NULL
);
insert into AAA (aaa_id) values (2);
insert into TTT (ttt_id, target_aaa, source_aaa)
values (4469, 2, 2);
insert into TTT (ttt_id, target_aaa, source_aaa)
values (4476, 2, 1);
insert into RRR (rrr_id, rrr_date, rrr_aaa)
values (0, 0, NULL);
insert into RRR (rrr_id, rrr_date, rrr_aaa)
values (2, 4312, 2);
SELECT i.aaa_id,
(SELECT sum(CASE WHEN (t.source_aaa == i.aaa_id) THEN 1 ELSE 0 END)
FROM TTT t
) AS segfault
FROM
(SELECT curr.rrr_aaa as aaa_id
FROM RRR curr
-- you also can comment out the next line
-- it causes segfault to happen after one row is outputted
INNER JOIN AAA a ON (curr.rrr_aaa = aaa_id)
LEFT JOIN RRR r ON (r.rrr_id <> 0 AND r.rrr_date < curr.rrr_date)
GROUP BY curr.rrr_id
HAVING r.rrr_date IS NULL
) i;
}
} {2 1}
# Further variants of the test case, as found in the ticket
#
do_test aggnested-3.1 {
db eval {
DROP TABLE IF EXISTS t1;
DROP TABLE IF EXISTS t2;
CREATE TABLE t1 (
id1 INTEGER PRIMARY KEY AUTOINCREMENT,
value1 INTEGER
);
INSERT INTO t1 VALUES(4469,2),(4476,1);
CREATE TABLE t2 (
id2 INTEGER PRIMARY KEY AUTOINCREMENT,
value2 INTEGER
);
INSERT INTO t2 VALUES(0,1),(2,2);
SELECT
(SELECT sum(value2==xyz) FROM t2)
FROM
(SELECT curr.value1 as xyz
FROM t1 AS curr LEFT JOIN t1 AS other
GROUP BY curr.id1);
}
} {1 1}
do_test aggnested-3.2 {
db eval {
DROP TABLE IF EXISTS t1;
DROP TABLE IF EXISTS t2;
CREATE TABLE t1 (
id1 INTEGER,
value1 INTEGER,
x1 INTEGER
);
INSERT INTO t1 VALUES(4469,2,98),(4469,1,99),(4469,3,97);
CREATE TABLE t2 (
value2 INTEGER
);
INSERT INTO t2 VALUES(1);
SELECT
(SELECT sum(value2==xyz) FROM t2)
FROM
(SELECT value1 as xyz, max(x1) AS pqr
FROM t1
GROUP BY id1);
SELECT
(SELECT sum(value2<>xyz) FROM t2)
FROM
(SELECT value1 as xyz, max(x1) AS pqr
FROM t1
GROUP BY id1);
}
} {1 0}
do_test aggnested-3.3 {
db eval {
DROP TABLE IF EXISTS t1;
DROP TABLE IF EXISTS t2;
CREATE TABLE t1(id1, value1);
INSERT INTO t1 VALUES(4469,2),(4469,1);
CREATE TABLE t2 (value2);
INSERT INTO t2 VALUES(1);
SELECT (SELECT sum(value2=value1) FROM t2), max(value1)
FROM t1
GROUP BY id1;
}
} {0 2}
# A batch of queries all doing approximately the same operation involving
# two nested aggregate queries.
#
do_test aggnested-3.11 {
db eval {
DROP TABLE IF EXISTS t1;
DROP TABLE IF EXISTS t2;
CREATE TABLE t1(id1, value1);
INSERT INTO t1 VALUES(4469,12),(4469,11),(4470,34);
CREATE INDEX t1id1 ON t1(id1);
CREATE TABLE t2 (value2);
INSERT INTO t2 VALUES(12),(34),(34);
INSERT INTO t2 SELECT value2 FROM t2;
SELECT max(value1), (SELECT count(*) FROM t2 WHERE value2=max(value1))
FROM t1
GROUP BY id1;
}
} {12 2 34 4}
do_test aggnested-3.12 {
db eval {
SELECT max(value1), (SELECT count(*) FROM t2 WHERE value2=value1)
FROM t1
GROUP BY id1;
}
} {12 2 34 4}
do_test aggnested-3.13 {
db eval {
SELECT value1, (SELECT sum(value2=value1) FROM t2)
FROM t1;
}
} {12 2 11 0 34 4}
do_test aggnested-3.14 {
db eval {
SELECT value1, (SELECT sum(value2=value1) FROM t2)
FROM t1
WHERE value1 IN (SELECT max(value1) FROM t1 GROUP BY id1);
}
} {12 2 34 4}
do_test aggnested-3.15 {
# FIXME: If case 3.16 works, then this case really ought to work too...
catchsql {
SELECT max(value1), (SELECT sum(value2=max(value1)) FROM t2)
FROM t1
GROUP BY id1;
}
} {1 {misuse of aggregate function max()}}
do_test aggnested-3.16 {
db eval {
SELECT max(value1), (SELECT sum(value2=value1) FROM t2)
FROM t1
GROUP BY id1;
}
} {12 2 34 4}
finish_test

View File

@ -0,0 +1,140 @@
# 2008 August 28
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
#
# This file implements regression tests for SQLite library. The
# focus of this script is correct code generation of aliased result-set
# values. See ticket #3343.
#
# $Id: alias.test,v 1.3 2009/04/23 13:22:44 drh Exp $
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
# Aliases are currently evaluated twice. We might try to change this
# in the future. But not now.
return
# A procedure to return a sequence of increasing integers.
#
namespace eval ::seq {
variable counter 0
proc value {args} {
variable counter
incr counter
return $counter
}
proc reset {} {
variable counter
set counter 0
}
}
do_test alias-1.1 {
db function sequence ::seq::value
db eval {
CREATE TABLE t1(x);
INSERT INTO t1 VALUES(9);
INSERT INTO t1 VALUES(8);
INSERT INTO t1 VALUES(7);
SELECT x, sequence() FROM t1;
}
} {9 1 8 2 7 3}
do_test alias-1.2 {
::seq::reset
db eval {
SELECT x, sequence() AS y FROM t1 WHERE y>0
}
} {9 1 8 2 7 3}
do_test alias-1.3 {
::seq::reset
db eval {
SELECT x, sequence() AS y FROM t1 WHERE y>0 AND y<99
}
} {9 1 8 2 7 3}
do_test alias-1.4 {
::seq::reset
db eval {
SELECT x, sequence() AS y FROM t1 WHERE y>0 AND y<99 AND y!=55
}
} {9 1 8 2 7 3}
do_test alias-1.5 {
::seq::reset
db eval {
SELECT x, sequence() AS y FROM t1
WHERE y>0 AND y<99 AND y!=55 AND y NOT IN (56,57,58)
AND y NOT LIKE 'abc%' AND y%10==2
}
} {8 2}
do_test alias-1.6 {
::seq::reset
db eval {
SELECT x, sequence() AS y FROM t1 WHERE y BETWEEN 0 AND 99
}
} {9 1 8 2 7 3}
#do_test alias-1.7 {
# ::seq::reset
# db eval {
# SELECT x, sequence() AS y FROM t1 WHERE y IN (55,66,3)
# }
#} {7 3}
do_test alias-1.8 {
::seq::reset
db eval {
SELECT x, 1-sequence() AS y FROM t1 ORDER BY y
}
} {7 -2 8 -1 9 0}
do_test alias-1.9 {
::seq::reset
db eval {
SELECT x, sequence() AS y FROM t1 ORDER BY -y
}
} {7 3 8 2 9 1}
do_test alias-1.10 {
::seq::reset
db eval {
SELECT x, sequence() AS y FROM t1 ORDER BY x%2, y
}
} {8 2 9 1 7 3}
unset -nocomplain random_int_list
set random_int_list [db eval {
SELECT random()&2147483647 AS r FROM t1, t1, t1, t1 ORDER BY r
}]
do_test alias-1.11 {
lsort -integer $::random_int_list
} $random_int_list
do_test alias-2.1 {
db eval {
SELECT 4 UNION SELECT 1 ORDER BY 1
}
} {1 4}
do_test alias-2.2 {
db eval {
SELECT 4 UNION SELECT 1 UNION SELECT 9 ORDER BY 1
}
} {1 4 9}
if 0 {
# Aliases in the GROUP BY clause cause the expression to be evaluated
# twice in the current implementation. This might change in the future.
#
do_test alias-3.1 {
::seq::reset
db eval {
SELECT sequence(*) AS y, count(*) AS z FROM t1 GROUP BY y ORDER BY z, y
}
} {1 1 2 1 3 1}
}
finish_test

View File

@ -0,0 +1,50 @@
# 2001 September 15
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file runs all tests.
#
set testdir [file dirname $argv0]
source $testdir/permutations.test
run_test_suite full
run_test_suite no_optimization
run_test_suite memsubsys1
run_test_suite memsubsys2
run_test_suite singlethread
run_test_suite multithread
run_test_suite onefile
run_test_suite utf16
run_test_suite exclusive
run_test_suite persistent_journal
run_test_suite persistent_journal_error
run_test_suite no_journal
run_test_suite no_journal_error
run_test_suite autovacuum_ioerr
run_test_suite no_mutex_try
run_test_suite fullmutex
run_test_suite journaltest
run_test_suite inmemory_journal
run_test_suite pcache0
run_test_suite pcache10
run_test_suite pcache50
run_test_suite pcache90
run_test_suite pcache100
run_test_suite prepare
run_test_suite mmap
if {$::tcl_platform(platform)=="unix"} {
ifcapable !default_autovacuum {
run_test_suite autovacuum_crash
}
}
finish_test

View File

@ -0,0 +1,925 @@
# 2004 November 10
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#*************************************************************************
# This file implements regression tests for SQLite library. The
# focus of this script is testing the ALTER TABLE statement.
#
# $Id: alter.test,v 1.32 2009/03/24 15:08:10 drh Exp $
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
# If SQLITE_OMIT_ALTERTABLE is defined, omit this file.
ifcapable !altertable {
finish_test
return
}
#----------------------------------------------------------------------
# Test organization:
#
# alter-1.1.* - alter-1.7.*: Basic tests of ALTER TABLE, including tables
# with implicit and explicit indices. These tests came from an earlier
# fork of SQLite that also supported ALTER TABLE.
# alter-1.8.*: Tests for ALTER TABLE when the table resides in an
# attached database.
# alter-1.9.*: Tests for ALTER TABLE when their is whitespace between the
# table name and left parenthesis token. i.e:
# "CREATE TABLE abc (a, b, c);"
# alter-2.*: Test error conditions and messages.
# alter-3.*: Test ALTER TABLE on tables that have TRIGGERs attached to them.
# alter-4.*: Test ALTER TABLE on tables that have AUTOINCREMENT fields.
# ...
# alter-12.*: Test ALTER TABLE on views.
#
# Create some tables to rename. Be sure to include some TEMP tables
# and some tables with odd names.
#
do_test alter-1.1 {
ifcapable tempdb {
set ::temp TEMP
} else {
set ::temp {}
}
execsql [subst -nocommands {
CREATE TABLE t1(a,b);
INSERT INTO t1 VALUES(1,2);
CREATE TABLE [t1'x1](c UNIQUE, b PRIMARY KEY);
INSERT INTO [t1'x1] VALUES(3,4);
CREATE INDEX t1i1 ON T1(B);
CREATE INDEX t1i2 ON t1(a,b);
CREATE INDEX i3 ON [t1'x1](b,c);
CREATE $::temp TABLE "temp table"(e,f,g UNIQUE);
CREATE INDEX i2 ON [temp table](f);
INSERT INTO [temp table] VALUES(5,6,7);
}]
execsql {
SELECT 't1', * FROM t1;
SELECT 't1''x1', * FROM "t1'x1";
SELECT * FROM [temp table];
}
} {t1 1 2 t1'x1 3 4 5 6 7}
do_test alter-1.2 {
execsql [subst {
CREATE $::temp TABLE objlist(type, name, tbl_name);
INSERT INTO objlist SELECT type, name, tbl_name
FROM sqlite_master WHERE NAME!='objlist';
}]
ifcapable tempdb {
execsql {
INSERT INTO objlist SELECT type, name, tbl_name
FROM sqlite_temp_master WHERE NAME!='objlist';
}
}
execsql {
SELECT type, name, tbl_name FROM objlist ORDER BY tbl_name, type desc, name;
}
} [list \
table t1 t1 \
index t1i1 t1 \
index t1i2 t1 \
table t1'x1 t1'x1 \
index i3 t1'x1 \
index {sqlite_autoindex_t1'x1_1} t1'x1 \
index {sqlite_autoindex_t1'x1_2} t1'x1 \
table {temp table} {temp table} \
index i2 {temp table} \
index {sqlite_autoindex_temp table_1} {temp table} \
]
# Make some changes
#
integrity_check alter-1.3.0
do_test alter-1.3 {
execsql {
ALTER TABLE [T1] RENAME to [-t1-];
ALTER TABLE "t1'x1" RENAME TO T2;
ALTER TABLE [temp table] RENAME to TempTab;
}
} {}
integrity_check alter-1.3.1
do_test alter-1.4 {
execsql {
SELECT 't1', * FROM [-t1-];
SELECT 't2', * FROM t2;
SELECT * FROM temptab;
}
} {t1 1 2 t2 3 4 5 6 7}
do_test alter-1.5 {
execsql {
DELETE FROM objlist;
INSERT INTO objlist SELECT type, name, tbl_name
FROM sqlite_master WHERE NAME!='objlist';
}
catchsql {
INSERT INTO objlist SELECT type, name, tbl_name
FROM sqlite_temp_master WHERE NAME!='objlist';
}
execsql {
SELECT type, name, tbl_name FROM objlist ORDER BY tbl_name, type desc, name;
}
} [list \
table -t1- -t1- \
index t1i1 -t1- \
index t1i2 -t1- \
table T2 T2 \
index i3 T2 \
index {sqlite_autoindex_T2_1} T2 \
index {sqlite_autoindex_T2_2} T2 \
table {TempTab} {TempTab} \
index i2 {TempTab} \
index {sqlite_autoindex_TempTab_1} {TempTab} \
]
# Make sure the changes persist after restarting the database.
# (The TEMP table will not persist, of course.)
#
ifcapable tempdb {
do_test alter-1.6 {
db close
sqlite3 db test.db
set DB [sqlite3_connection_pointer db]
execsql {
CREATE TEMP TABLE objlist(type, name, tbl_name);
INSERT INTO objlist SELECT type, name, tbl_name FROM sqlite_master;
INSERT INTO objlist
SELECT type, name, tbl_name FROM sqlite_temp_master
WHERE NAME!='objlist';
SELECT type, name, tbl_name FROM objlist
ORDER BY tbl_name, type desc, name;
}
} [list \
table -t1- -t1- \
index t1i1 -t1- \
index t1i2 -t1- \
table T2 T2 \
index i3 T2 \
index {sqlite_autoindex_T2_1} T2 \
index {sqlite_autoindex_T2_2} T2 \
]
} else {
execsql {
DROP TABLE TempTab;
}
}
# Create bogus application-defined functions for functions used
# internally by ALTER TABLE, to ensure that ALTER TABLE falls back
# to the built-in functions.
#
proc failing_app_func {args} {error "bad function"}
do_test alter-1.7-prep {
db func substr failing_app_func
db func like failing_app_func
db func sqlite_rename_table failing_app_func
db func sqlite_rename_trigger failing_app_func
db func sqlite_rename_parent failing_app_func
catchsql {SELECT substr(name,1,3) FROM sqlite_master}
} {1 {bad function}}
# Make sure the ALTER TABLE statements work with the
# non-callback API
#
do_test alter-1.7 {
stepsql $DB {
ALTER TABLE [-t1-] RENAME to [*t1*];
ALTER TABLE T2 RENAME TO [<t2>];
}
execsql {
DELETE FROM objlist;
INSERT INTO objlist SELECT type, name, tbl_name
FROM sqlite_master WHERE NAME!='objlist';
}
catchsql {
INSERT INTO objlist SELECT type, name, tbl_name
FROM sqlite_temp_master WHERE NAME!='objlist';
}
execsql {
SELECT type, name, tbl_name FROM objlist ORDER BY tbl_name, type desc, name;
}
} [list \
table *t1* *t1* \
index t1i1 *t1* \
index t1i2 *t1* \
table <t2> <t2> \
index i3 <t2> \
index {sqlite_autoindex_<t2>_1} <t2> \
index {sqlite_autoindex_<t2>_2} <t2> \
]
# Check that ALTER TABLE works on attached databases.
#
ifcapable attach {
do_test alter-1.8.1 {
forcedelete test2.db
forcedelete test2.db-journal
execsql {
ATTACH 'test2.db' AS aux;
}
} {}
do_test alter-1.8.2 {
execsql {
CREATE TABLE t4(a PRIMARY KEY, b, c);
CREATE TABLE aux.t4(a PRIMARY KEY, b, c);
CREATE INDEX i4 ON t4(b);
CREATE INDEX aux.i4 ON t4(b);
}
} {}
do_test alter-1.8.3 {
execsql {
INSERT INTO t4 VALUES('main', 'main', 'main');
INSERT INTO aux.t4 VALUES('aux', 'aux', 'aux');
SELECT * FROM t4 WHERE a = 'main';
}
} {main main main}
do_test alter-1.8.4 {
execsql {
ALTER TABLE t4 RENAME TO t5;
SELECT * FROM t4 WHERE a = 'aux';
}
} {aux aux aux}
do_test alter-1.8.5 {
execsql {
SELECT * FROM t5;
}
} {main main main}
do_test alter-1.8.6 {
execsql {
SELECT * FROM t5 WHERE b = 'main';
}
} {main main main}
do_test alter-1.8.7 {
execsql {
ALTER TABLE aux.t4 RENAME TO t5;
SELECT * FROM aux.t5 WHERE b = 'aux';
}
} {aux aux aux}
}
do_test alter-1.9.1 {
execsql {
CREATE TABLE tbl1 (a, b, c);
INSERT INTO tbl1 VALUES(1, 2, 3);
}
} {}
do_test alter-1.9.2 {
execsql {
SELECT * FROM tbl1;
}
} {1 2 3}
do_test alter-1.9.3 {
execsql {
ALTER TABLE tbl1 RENAME TO tbl2;
SELECT * FROM tbl2;
}
} {1 2 3}
do_test alter-1.9.4 {
execsql {
DROP TABLE tbl2;
}
} {}
# Test error messages
#
do_test alter-2.1 {
catchsql {
ALTER TABLE none RENAME TO hi;
}
} {1 {no such table: none}}
do_test alter-2.2 {
execsql {
CREATE TABLE t3(p,q,r);
}
catchsql {
ALTER TABLE [<t2>] RENAME TO t3;
}
} {1 {there is already another table or index with this name: t3}}
do_test alter-2.3 {
catchsql {
ALTER TABLE [<t2>] RENAME TO i3;
}
} {1 {there is already another table or index with this name: i3}}
do_test alter-2.4 {
catchsql {
ALTER TABLE SqLiTe_master RENAME TO master;
}
} {1 {table sqlite_master may not be altered}}
do_test alter-2.5 {
catchsql {
ALTER TABLE t3 RENAME TO sqlite_t3;
}
} {1 {object name reserved for internal use: sqlite_t3}}
do_test alter-2.6 {
catchsql {
ALTER TABLE t3 ADD COLUMN (ALTER TABLE t3 ADD COLUMN);
}
} {1 {near "(": syntax error}}
# If this compilation does not include triggers, omit the alter-3.* tests.
ifcapable trigger {
#-----------------------------------------------------------------------
# Tests alter-3.* test ALTER TABLE on tables that have triggers.
#
# alter-3.1.*: ALTER TABLE with triggers.
# alter-3.2.*: Test that the ON keyword cannot be used as a database,
# table or column name unquoted. This is done because part of the
# ALTER TABLE code (specifically the implementation of SQL function
# "sqlite_alter_trigger") will break in this case.
# alter-3.3.*: ALTER TABLE with TEMP triggers (todo).
#
# An SQL user-function for triggers to fire, so that we know they
# are working.
proc trigfunc {args} {
set ::TRIGGER $args
}
db func trigfunc trigfunc
do_test alter-3.1.0 {
execsql {
CREATE TABLE t6(a, b, c);
-- Different case for the table name in the trigger.
CREATE TRIGGER trig1 AFTER INSERT ON T6 BEGIN
SELECT trigfunc('trig1', new.a, new.b, new.c);
END;
}
} {}
do_test alter-3.1.1 {
execsql {
INSERT INTO t6 VALUES(1, 2, 3);
}
set ::TRIGGER
} {trig1 1 2 3}
do_test alter-3.1.2 {
execsql {
ALTER TABLE t6 RENAME TO t7;
INSERT INTO t7 VALUES(4, 5, 6);
}
set ::TRIGGER
} {trig1 4 5 6}
do_test alter-3.1.3 {
execsql {
DROP TRIGGER trig1;
}
} {}
do_test alter-3.1.4 {
execsql {
CREATE TRIGGER trig2 AFTER INSERT ON main.t7 BEGIN
SELECT trigfunc('trig2', new.a, new.b, new.c);
END;
INSERT INTO t7 VALUES(1, 2, 3);
}
set ::TRIGGER
} {trig2 1 2 3}
do_test alter-3.1.5 {
execsql {
ALTER TABLE t7 RENAME TO t8;
INSERT INTO t8 VALUES(4, 5, 6);
}
set ::TRIGGER
} {trig2 4 5 6}
do_test alter-3.1.6 {
execsql {
DROP TRIGGER trig2;
}
} {}
do_test alter-3.1.7 {
execsql {
CREATE TRIGGER trig3 AFTER INSERT ON main.'t8'BEGIN
SELECT trigfunc('trig3', new.a, new.b, new.c);
END;
INSERT INTO t8 VALUES(1, 2, 3);
}
set ::TRIGGER
} {trig3 1 2 3}
do_test alter-3.1.8 {
execsql {
ALTER TABLE t8 RENAME TO t9;
INSERT INTO t9 VALUES(4, 5, 6);
}
set ::TRIGGER
} {trig3 4 5 6}
# Make sure "ON" cannot be used as a database, table or column name without
# quoting. Otherwise the sqlite_alter_trigger() function might not work.
forcedelete test3.db
forcedelete test3.db-journal
ifcapable attach {
do_test alter-3.2.1 {
catchsql {
ATTACH 'test3.db' AS ON;
}
} {1 {near "ON": syntax error}}
do_test alter-3.2.2 {
catchsql {
ATTACH 'test3.db' AS 'ON';
}
} {0 {}}
do_test alter-3.2.3 {
catchsql {
CREATE TABLE ON.t1(a, b, c);
}
} {1 {near "ON": syntax error}}
do_test alter-3.2.4 {
catchsql {
CREATE TABLE 'ON'.t1(a, b, c);
}
} {0 {}}
do_test alter-3.2.4 {
catchsql {
CREATE TABLE 'ON'.ON(a, b, c);
}
} {1 {near "ON": syntax error}}
do_test alter-3.2.5 {
catchsql {
CREATE TABLE 'ON'.'ON'(a, b, c);
}
} {0 {}}
}
do_test alter-3.2.6 {
catchsql {
CREATE TABLE t10(a, ON, c);
}
} {1 {near "ON": syntax error}}
do_test alter-3.2.7 {
catchsql {
CREATE TABLE t10(a, 'ON', c);
}
} {0 {}}
do_test alter-3.2.8 {
catchsql {
CREATE TRIGGER trig4 AFTER INSERT ON ON BEGIN SELECT 1; END;
}
} {1 {near "ON": syntax error}}
ifcapable attach {
do_test alter-3.2.9 {
catchsql {
CREATE TRIGGER 'on'.trig4 AFTER INSERT ON 'ON' BEGIN SELECT 1; END;
}
} {0 {}}
}
do_test alter-3.2.10 {
execsql {
DROP TABLE t10;
}
} {}
do_test alter-3.3.1 {
execsql [subst {
CREATE TABLE tbl1(a, b, c);
CREATE $::temp TRIGGER trig1 AFTER INSERT ON tbl1 BEGIN
SELECT trigfunc('trig1', new.a, new.b, new.c);
END;
}]
} {}
do_test alter-3.3.2 {
execsql {
INSERT INTO tbl1 VALUES('a', 'b', 'c');
}
set ::TRIGGER
} {trig1 a b c}
do_test alter-3.3.3 {
execsql {
ALTER TABLE tbl1 RENAME TO tbl2;
INSERT INTO tbl2 VALUES('d', 'e', 'f');
}
set ::TRIGGER
} {trig1 d e f}
do_test alter-3.3.4 {
execsql [subst {
CREATE $::temp TRIGGER trig2 AFTER UPDATE ON tbl2 BEGIN
SELECT trigfunc('trig2', new.a, new.b, new.c);
END;
}]
} {}
do_test alter-3.3.5 {
execsql {
ALTER TABLE tbl2 RENAME TO tbl3;
INSERT INTO tbl3 VALUES('g', 'h', 'i');
}
set ::TRIGGER
} {trig1 g h i}
do_test alter-3.3.6 {
execsql {
UPDATE tbl3 SET a = 'G' where a = 'g';
}
set ::TRIGGER
} {trig2 G h i}
do_test alter-3.3.7 {
execsql {
DROP TABLE tbl3;
}
} {}
ifcapable tempdb {
do_test alter-3.3.8 {
execsql {
SELECT * FROM sqlite_temp_master WHERE type = 'trigger';
}
} {}
}
} ;# ifcapable trigger
# If the build does not include AUTOINCREMENT fields, omit alter-4.*.
ifcapable autoinc {
do_test alter-4.1 {
execsql {
CREATE TABLE tbl1(a INTEGER PRIMARY KEY AUTOINCREMENT);
INSERT INTO tbl1 VALUES(10);
}
} {}
do_test alter-4.2 {
execsql {
INSERT INTO tbl1 VALUES(NULL);
SELECT a FROM tbl1;
}
} {10 11}
do_test alter-4.3 {
execsql {
ALTER TABLE tbl1 RENAME TO tbl2;
DELETE FROM tbl2;
INSERT INTO tbl2 VALUES(NULL);
SELECT a FROM tbl2;
}
} {12}
do_test alter-4.4 {
execsql {
DROP TABLE tbl2;
}
} {}
} ;# ifcapable autoinc
# Test that it is Ok to execute an ALTER TABLE immediately after
# opening a database.
do_test alter-5.1 {
execsql {
CREATE TABLE tbl1(a, b, c);
INSERT INTO tbl1 VALUES('x', 'y', 'z');
}
} {}
do_test alter-5.2 {
sqlite3 db2 test.db
execsql {
ALTER TABLE tbl1 RENAME TO tbl2;
SELECT * FROM tbl2;
} db2
} {x y z}
do_test alter-5.3 {
db2 close
} {}
foreach tblname [execsql {
SELECT name FROM sqlite_master
WHERE type='table' AND name NOT GLOB 'sqlite*'
}] {
execsql "DROP TABLE \"$tblname\""
}
set ::tbl_name "abc\uABCDdef"
do_test alter-6.1 {
string length $::tbl_name
} {7}
do_test alter-6.2 {
execsql "
CREATE TABLE ${tbl_name}(a, b, c);
"
set ::oid [execsql {SELECT max(oid) FROM sqlite_master}]
execsql "
SELECT sql FROM sqlite_master WHERE oid = $::oid;
"
} "{CREATE TABLE ${::tbl_name}(a, b, c)}"
execsql "
SELECT * FROM ${::tbl_name}
"
set ::tbl_name2 "abcXdef"
do_test alter-6.3 {
execsql "
ALTER TABLE $::tbl_name RENAME TO $::tbl_name2
"
execsql "
SELECT sql FROM sqlite_master WHERE oid = $::oid
"
} "{CREATE TABLE \"${::tbl_name2}\"(a, b, c)}"
do_test alter-6.4 {
execsql "
ALTER TABLE $::tbl_name2 RENAME TO $::tbl_name
"
execsql "
SELECT sql FROM sqlite_master WHERE oid = $::oid
"
} "{CREATE TABLE \"${::tbl_name}\"(a, b, c)}"
set ::col_name ghi\1234\jkl
do_test alter-6.5 {
execsql "
ALTER TABLE $::tbl_name ADD COLUMN $::col_name VARCHAR
"
execsql "
SELECT sql FROM sqlite_master WHERE oid = $::oid
"
} "{CREATE TABLE \"${::tbl_name}\"(a, b, c, $::col_name VARCHAR)}"
set ::col_name2 B\3421\A
do_test alter-6.6 {
db close
sqlite3 db test.db
execsql "
ALTER TABLE $::tbl_name ADD COLUMN $::col_name2
"
execsql "
SELECT sql FROM sqlite_master WHERE oid = $::oid
"
} "{CREATE TABLE \"${::tbl_name}\"(a, b, c, $::col_name VARCHAR, $::col_name2)}"
do_test alter-6.7 {
execsql "
INSERT INTO ${::tbl_name} VALUES(1, 2, 3, 4, 5);
SELECT $::col_name, $::col_name2 FROM $::tbl_name;
"
} {4 5}
# Ticket #1665: Make sure ALTER TABLE ADD COLUMN works on a table
# that includes a COLLATE clause.
#
do_realnum_test alter-7.1 {
execsql {
CREATE TABLE t1(a TEXT COLLATE BINARY);
ALTER TABLE t1 ADD COLUMN b INTEGER COLLATE NOCASE;
INSERT INTO t1 VALUES(1,'-2');
INSERT INTO t1 VALUES(5.4e-08,'5.4e-08');
SELECT typeof(a), a, typeof(b), b FROM t1;
}
} {text 1 integer -2 text 5.4e-08 real 5.4e-08}
# Make sure that when a column is added by ALTER TABLE ADD COLUMN and has
# a default value that the default value is used by aggregate functions.
#
do_test alter-8.1 {
execsql {
CREATE TABLE t2(a INTEGER);
INSERT INTO t2 VALUES(1);
INSERT INTO t2 VALUES(1);
INSERT INTO t2 VALUES(2);
ALTER TABLE t2 ADD COLUMN b INTEGER DEFAULT 9;
SELECT sum(b) FROM t2;
}
} {27}
do_test alter-8.2 {
execsql {
SELECT a, sum(b) FROM t2 GROUP BY a;
}
} {1 18 2 9}
#--------------------------------------------------------------------------
# alter-9.X - Special test: Make sure the sqlite_rename_trigger() and
# rename_table() functions do not crash when handed bad input.
#
ifcapable trigger {
do_test alter-9.1 {
execsql {SELECT SQLITE_RENAME_TRIGGER(0,0)}
} {{}}
}
do_test alter-9.2 {
execsql {
SELECT SQLITE_RENAME_TABLE(0,0);
SELECT SQLITE_RENAME_TABLE(10,20);
SELECT SQLITE_RENAME_TABLE('foo', 'foo');
}
} {{} {} {}}
#------------------------------------------------------------------------
# alter-10.X - Make sure ALTER TABLE works with multi-byte UTF-8 characters
# in the names.
#
do_test alter-10.1 {
execsql "CREATE TABLE xyz(x UNIQUE)"
execsql "ALTER TABLE xyz RENAME TO xyz\u1234abc"
execsql {SELECT name FROM sqlite_master WHERE name GLOB 'xyz*'}
} [list xyz\u1234abc]
do_test alter-10.2 {
execsql {SELECT name FROM sqlite_master WHERE name GLOB 'sqlite_autoindex*'}
} [list sqlite_autoindex_xyz\u1234abc_1]
do_test alter-10.3 {
execsql "ALTER TABLE xyz\u1234abc RENAME TO xyzabc"
execsql {SELECT name FROM sqlite_master WHERE name GLOB 'xyz*'}
} [list xyzabc]
do_test alter-10.4 {
execsql {SELECT name FROM sqlite_master WHERE name GLOB 'sqlite_autoindex*'}
} [list sqlite_autoindex_xyzabc_1]
do_test alter-11.1 {
sqlite3_exec db {CREATE TABLE t11(%c6%c6)}
execsql {
ALTER TABLE t11 ADD COLUMN abc;
}
catchsql {
ALTER TABLE t11 ADD COLUMN abc;
}
} {1 {duplicate column name: abc}}
set isutf16 [regexp 16 [db one {PRAGMA encoding}]]
if {!$isutf16} {
do_test alter-11.2 {
execsql {INSERT INTO t11 VALUES(1,2)}
sqlite3_exec db {SELECT %c6%c6 AS xyz, abc FROM t11}
} {0 {xyz abc 1 2}}
}
do_test alter-11.3 {
sqlite3_exec db {CREATE TABLE t11b("%81%82%83" text)}
execsql {
ALTER TABLE t11b ADD COLUMN abc;
}
catchsql {
ALTER TABLE t11b ADD COLUMN abc;
}
} {1 {duplicate column name: abc}}
if {!$isutf16} {
do_test alter-11.4 {
execsql {INSERT INTO t11b VALUES(3,4)}
sqlite3_exec db {SELECT %81%82%83 AS xyz, abc FROM t11b}
} {0 {xyz abc 3 4}}
do_test alter-11.5 {
sqlite3_exec db {SELECT [%81%82%83] AS xyz, abc FROM t11b}
} {0 {xyz abc 3 4}}
do_test alter-11.6 {
sqlite3_exec db {SELECT "%81%82%83" AS xyz, abc FROM t11b}
} {0 {xyz abc 3 4}}
}
do_test alter-11.7 {
sqlite3_exec db {CREATE TABLE t11c(%81%82%83 text)}
execsql {
ALTER TABLE t11c ADD COLUMN abc;
}
catchsql {
ALTER TABLE t11c ADD COLUMN abc;
}
} {1 {duplicate column name: abc}}
if {!$isutf16} {
do_test alter-11.8 {
execsql {INSERT INTO t11c VALUES(5,6)}
sqlite3_exec db {SELECT %81%82%83 AS xyz, abc FROM t11c}
} {0 {xyz abc 5 6}}
do_test alter-11.9 {
sqlite3_exec db {SELECT [%81%82%83] AS xyz, abc FROM t11c}
} {0 {xyz abc 5 6}}
do_test alter-11.10 {
sqlite3_exec db {SELECT "%81%82%83" AS xyz, abc FROM t11c}
} {0 {xyz abc 5 6}}
}
do_test alter-12.1 {
execsql {
CREATE TABLE t12(a, b, c);
CREATE VIEW v1 AS SELECT * FROM t12;
}
} {}
do_test alter-12.2 {
catchsql {
ALTER TABLE v1 RENAME TO v2;
}
} {1 {view v1 may not be altered}}
do_test alter-12.3 {
execsql { SELECT * FROM v1; }
} {}
do_test alter-12.4 {
db close
sqlite3 db test.db
execsql { SELECT * FROM v1; }
} {}
do_test alter-12.5 {
catchsql {
ALTER TABLE v1 ADD COLUMN new_column;
}
} {1 {Cannot add a column to a view}}
# Ticket #3102:
# Verify that comments do not interfere with the table rename
# algorithm.
#
do_test alter-13.1 {
execsql {
CREATE TABLE /* hi */ t3102a(x);
CREATE TABLE t3102b -- comment
(y);
CREATE INDEX t3102c ON t3102a(x);
SELECT name FROM sqlite_master WHERE name GLOB 't3102*' ORDER BY 1;
}
} {t3102a t3102b t3102c}
do_test alter-13.2 {
execsql {
ALTER TABLE t3102a RENAME TO t3102a_rename;
SELECT name FROM sqlite_master WHERE name GLOB 't3102*' ORDER BY 1;
}
} {t3102a_rename t3102b t3102c}
do_test alter-13.3 {
execsql {
ALTER TABLE t3102b RENAME TO t3102b_rename;
SELECT name FROM sqlite_master WHERE name GLOB 't3102*' ORDER BY 1;
}
} {t3102a_rename t3102b_rename t3102c}
# Ticket #3651
do_test alter-14.1 {
catchsql {
CREATE TABLE t3651(a UNIQUE);
ALTER TABLE t3651 ADD COLUMN b UNIQUE;
}
} {1 {Cannot add a UNIQUE column}}
do_test alter-14.2 {
catchsql {
ALTER TABLE t3651 ADD COLUMN b PRIMARY KEY;
}
} {1 {Cannot add a PRIMARY KEY column}}
#-------------------------------------------------------------------------
# Test that it is not possible to use ALTER TABLE on any system table.
#
set system_table_list {1 sqlite_master}
catchsql ANALYZE
ifcapable analyze { lappend system_table_list 2 sqlite_stat1 }
ifcapable stat3 { lappend system_table_list 3 sqlite_stat3 }
ifcapable stat4 { lappend system_table_list 4 sqlite_stat4 }
foreach {tn tbl} $system_table_list {
do_test alter-15.$tn.1 {
catchsql "ALTER TABLE $tbl RENAME TO xyz"
} [list 1 "table $tbl may not be altered"]
do_test alter-15.$tn.2 {
catchsql "ALTER TABLE $tbl ADD COLUMN xyz"
} [list 1 "table $tbl may not be altered"]
}
#------------------------------------------------------------------------
# Verify that ALTER TABLE works on tables with the WITHOUT rowid option.
#
do_execsql_test alter-16.1 {
CREATE TABLE t16a(a TEXT, b REAL, c INT, PRIMARY KEY(a,b)) WITHOUT rowid;
INSERT INTO t16a VALUES('abc',1.25,99);
ALTER TABLE t16a ADD COLUMN d TEXT DEFAULT 'xyzzy';
INSERT INTO t16a VALUES('cba',5.5,98,'fizzle');
SELECT * FROM t16a ORDER BY a;
} {abc 1.25 99 xyzzy cba 5.5 98 fizzle}
do_execsql_test alter-16.2 {
ALTER TABLE t16a RENAME TO t16a_rn;
SELECT * FROM t16a_rn ORDER BY a;
} {abc 1.25 99 xyzzy cba 5.5 98 fizzle}
#-------------------------------------------------------------------------
# Verify that NULL values into the internal-use-only sqlite_rename_*()
# functions do not cause problems.
#
do_execsql_test alter-17.1 {
SELECT sqlite_rename_table('CREATE TABLE xyz(a,b,c)','abc');
} {{CREATE TABLE "abc"(a,b,c)}}
do_execsql_test alter-17.2 {
SELECT sqlite_rename_table('CREATE TABLE xyz(a,b,c)',NULL);
} {{CREATE TABLE "(NULL)"(a,b,c)}}
do_execsql_test alter-17.3 {
SELECT sqlite_rename_table(NULL,'abc');
} {{}}
do_execsql_test alter-17.4 {
SELECT sqlite_rename_trigger('CREATE TRIGGER r1 ON xyz WHEN','abc');
} {{CREATE TRIGGER r1 ON "abc" WHEN}}
do_execsql_test alter-17.5 {
SELECT sqlite_rename_trigger('CREATE TRIGGER r1 ON xyz WHEN',NULL);
} {{CREATE TRIGGER r1 ON "(NULL)" WHEN}}
do_execsql_test alter-17.6 {
SELECT sqlite_rename_trigger(NULL,'abc');
} {{}}
do_execsql_test alter-17.7 {
SELECT sqlite_rename_parent('CREATE TABLE t1(a REFERENCES "xyzzy")',
'xyzzy','lmnop');
} {{CREATE TABLE t1(a REFERENCES "lmnop")}}
do_execsql_test alter-17.8 {
SELECT sqlite_rename_parent('CREATE TABLE t1(a REFERENCES "xyzzy")',
'xyzzy',NULL);
} {{CREATE TABLE t1(a REFERENCES "(NULL)")}}
do_execsql_test alter-17.9 {
SELECT sqlite_rename_parent('CREATE TABLE t1(a REFERENCES "xyzzy")',
NULL, 'lmnop');
} {{}}
do_execsql_test alter-17.10 {
SELECT sqlite_rename_parent(NULL,'abc','xyz');
} {{}}
do_execsql_test alter-17.11 {
SELECT sqlite_rename_parent('create references ''','abc','xyz');
} {{create references '}}
do_execsql_test alter-17.12 {
SELECT sqlite_rename_parent('create references "abc"123" ','abc','xyz');
} {{create references "xyz"123" }}
do_execsql_test alter-17.13 {
SELECT sqlite_rename_parent("references '''",'abc','xyz');
} {{references '''}}
finish_test

View File

@ -0,0 +1,468 @@
# 2005 February 18
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#*************************************************************************
# This file implements regression tests for SQLite library. The
# focus of this script is testing that SQLite can handle a subtle
# file format change that may be used in the future to implement
# "ALTER TABLE ... ADD COLUMN".
#
# $Id: alter2.test,v 1.14 2009/04/07 14:14:22 danielk1977 Exp $
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
# We have to have pragmas in order to do this test
ifcapable {!pragma} return
# Do not use a codec for tests in this file, as the database file is
# manipulated directly using tcl scripts. See proc [set_file_format].
#
do_not_use_codec
# The file format change affects the way row-records stored in tables (but
# not indices) are interpreted. Before version 3.1.3, a row-record for a
# table with N columns was guaranteed to contain exactly N fields. As
# of version 3.1.3, the record may contain up to N fields. In this case
# the M fields that are present are the values for the left-most M
# columns. The (N-M) rightmost columns contain NULL.
#
# If any records in the database contain less fields than their table
# has columns, then the file-format meta value should be set to (at least) 2.
#
# This procedure sets the value of the file-format in file 'test.db'
# to $newval. Also, the schema cookie is incremented.
#
proc set_file_format {newval} {
hexio_write test.db 44 [hexio_render_int32 $newval]
set schemacookie [hexio_get_int [hexio_read test.db 40 4]]
incr schemacookie
hexio_write test.db 40 [hexio_render_int32 $schemacookie]
return {}
}
# This procedure returns the value of the file-format in file 'test.db'.
#
proc get_file_format {{fname test.db}} {
return [hexio_get_int [hexio_read $fname 44 4]]
}
# This procedure sets the SQL statement stored for table $tbl in the
# sqlite_master table of file 'test.db' to $sql. Also set the file format
# to the supplied value. This is 2 if the added column has a default that is
# NULL, or 3 otherwise.
#
proc alter_table {tbl sql {file_format 2}} {
sqlite3 dbat test.db
set s [string map {' ''} $sql]
set t [string map {' ''} $tbl]
dbat eval [subst {
PRAGMA writable_schema = 1;
UPDATE sqlite_master SET sql = '$s' WHERE name = '$t' AND type = 'table';
PRAGMA writable_schema = 0;
}]
dbat close
set_file_format 2
}
# Create bogus application-defined functions for functions used
# internally by ALTER TABLE, to ensure that ALTER TABLE falls back
# to the built-in functions.
#
proc failing_app_func {args} {error "bad function"}
do_test alter2-1.0 {
db func substr failing_app_func
db func like failing_app_func
db func sqlite_rename_table failing_app_func
db func sqlite_rename_trigger failing_app_func
db func sqlite_rename_parent failing_app_func
catchsql {SELECT substr('abcdefg',1,3)}
} {1 {bad function}}
#-----------------------------------------------------------------------
# Some basic tests to make sure short rows are handled.
#
do_test alter2-1.1 {
execsql {
CREATE TABLE abc(a, b);
INSERT INTO abc VALUES(1, 2);
INSERT INTO abc VALUES(3, 4);
INSERT INTO abc VALUES(5, 6);
}
} {}
do_test alter2-1.2 {
# ALTER TABLE abc ADD COLUMN c;
alter_table abc {CREATE TABLE abc(a, b, c);}
} {}
do_test alter2-1.3 {
execsql {
SELECT * FROM abc;
}
} {1 2 {} 3 4 {} 5 6 {}}
do_test alter2-1.4 {
execsql {
UPDATE abc SET c = 10 WHERE a = 1;
SELECT * FROM abc;
}
} {1 2 10 3 4 {} 5 6 {}}
do_test alter2-1.5 {
execsql {
CREATE INDEX abc_i ON abc(c);
}
} {}
do_test alter2-1.6 {
execsql {
SELECT c FROM abc ORDER BY c;
}
} {{} {} 10}
do_test alter2-1.7 {
execsql {
SELECT * FROM abc WHERE c = 10;
}
} {1 2 10}
do_test alter2-1.8 {
execsql {
SELECT sum(a), c FROM abc GROUP BY c;
}
} {8 {} 1 10}
do_test alter2-1.9 {
# ALTER TABLE abc ADD COLUMN d;
alter_table abc {CREATE TABLE abc(a, b, c, d);}
if {[permutation] == "prepare"} { db cache flush }
execsql { SELECT * FROM abc; }
execsql {
UPDATE abc SET d = 11 WHERE c IS NULL AND a<4;
SELECT * FROM abc;
}
} {1 2 10 {} 3 4 {} 11 5 6 {} {}}
do_test alter2-1.10 {
execsql {
SELECT typeof(d) FROM abc;
}
} {null integer null}
do_test alter2-1.99 {
execsql {
DROP TABLE abc;
}
} {}
#-----------------------------------------------------------------------
# Test that views work when the underlying table structure is changed.
#
ifcapable view {
do_test alter2-2.1 {
execsql {
CREATE TABLE abc2(a, b, c);
INSERT INTO abc2 VALUES(1, 2, 10);
INSERT INTO abc2 VALUES(3, 4, NULL);
INSERT INTO abc2 VALUES(5, 6, NULL);
CREATE VIEW abc2_v AS SELECT * FROM abc2;
SELECT * FROM abc2_v;
}
} {1 2 10 3 4 {} 5 6 {}}
do_test alter2-2.2 {
# ALTER TABLE abc ADD COLUMN d;
alter_table abc2 {CREATE TABLE abc2(a, b, c, d);}
execsql {
SELECT * FROM abc2_v;
}
} {1 2 10 {} 3 4 {} {} 5 6 {} {}}
do_test alter2-2.3 {
execsql {
DROP TABLE abc2;
DROP VIEW abc2_v;
}
} {}
}
#-----------------------------------------------------------------------
# Test that triggers work when a short row is copied to the old.*
# trigger pseudo-table.
#
ifcapable trigger {
do_test alter2-3.1 {
execsql {
CREATE TABLE abc3(a, b);
CREATE TABLE blog(o, n);
CREATE TRIGGER abc3_t AFTER UPDATE OF b ON abc3 BEGIN
INSERT INTO blog VALUES(old.b, new.b);
END;
}
} {}
do_test alter2-3.2 {
execsql {
INSERT INTO abc3 VALUES(1, 4);
UPDATE abc3 SET b = 2 WHERE b = 4;
SELECT * FROM blog;
}
} {4 2}
do_test alter2-3.3 {
execsql {
INSERT INTO abc3 VALUES(3, 4);
INSERT INTO abc3 VALUES(5, 6);
}
alter_table abc3 {CREATE TABLE abc3(a, b, c);}
execsql {
SELECT * FROM abc3;
}
} {1 2 {} 3 4 {} 5 6 {}}
do_test alter2-3.4 {
execsql {
UPDATE abc3 SET b = b*2 WHERE a<4;
SELECT * FROM abc3;
}
} {1 4 {} 3 8 {} 5 6 {}}
do_test alter2-3.5 {
execsql {
SELECT * FROM blog;
}
} {4 2 2 4 4 8}
do_test alter2-3.6 {
execsql {
CREATE TABLE clog(o, n);
CREATE TRIGGER abc3_t2 AFTER UPDATE OF c ON abc3 BEGIN
INSERT INTO clog VALUES(old.c, new.c);
END;
UPDATE abc3 SET c = a*2;
SELECT * FROM clog;
}
} {{} 2 {} 6 {} 10}
} else {
execsql { CREATE TABLE abc3(a, b); }
}
#---------------------------------------------------------------------
# Check that an error occurs if the database is upgraded to a file
# format that SQLite does not support (in this case 5). Note: The
# file format is checked each time the schema is read, so changing the
# file format requires incrementing the schema cookie.
#
do_test alter2-4.1 {
db close
set_file_format 5
catch { sqlite3 db test.db }
set {} {}
} {}
do_test alter2-4.2 {
# We have to run two queries here because the Tcl interface uses
# sqlite3_prepare_v2(). In this case, the first query encounters an
# SQLITE_SCHEMA error. Then, when trying to recompile the statement, the
# "unsupported file format" error is encountered. So the error code
# returned is SQLITE_SCHEMA, not SQLITE_ERROR as required by the following
# test case.
#
# When the query is attempted a second time, the same error message is
# returned but the error code is SQLITE_ERROR, because the unsupported
# file format was detected during a call to sqlite3_prepare(), not
# sqlite3_step().
#
catchsql { SELECT * FROM sqlite_master; }
catchsql { SELECT * FROM sqlite_master; }
} {1 {unsupported file format}}
do_test alter2-4.3 {
sqlite3_errcode db
} {SQLITE_ERROR}
do_test alter2-4.4 {
set ::DB [sqlite3_connection_pointer db]
catchsql {
SELECT * FROM sqlite_master;
}
} {1 {unsupported file format}}
do_test alter2-4.5 {
sqlite3_errcode db
} {SQLITE_ERROR}
#---------------------------------------------------------------------
# Check that executing VACUUM on a file with file-format version 2
# resets the file format to 1.
#
set default_file_format [expr $SQLITE_DEFAULT_FILE_FORMAT==4 ? 4 : 1]
ifcapable vacuum {
do_test alter2-5.1 {
set_file_format 2
db close
sqlite3 db test.db
execsql {SELECT 1 FROM sqlite_master LIMIT 1;}
get_file_format
} {2}
do_test alter2-5.2 {
execsql { VACUUM }
} {}
do_test alter2-5.3 {
get_file_format
} $default_file_format
}
#---------------------------------------------------------------------
# Test that when a database with file-format 2 is opened, new
# databases are still created with file-format 1.
#
do_test alter2-6.1 {
db close
set_file_format 2
sqlite3 db test.db
get_file_format
} {2}
ifcapable attach {
do_test alter2-6.2 {
forcedelete test2.db-journal
forcedelete test2.db
execsql {
ATTACH 'test2.db' AS aux;
CREATE TABLE aux.t1(a, b);
}
get_file_format test2.db
} $default_file_format
}
do_test alter2-6.3 {
execsql {
CREATE TABLE t1(a, b);
}
get_file_format
} {2}
#---------------------------------------------------------------------
# Test that types and values for columns added with default values
# other than NULL work with SELECT statements.
#
do_test alter2-7.1 {
execsql {
DROP TABLE t1;
CREATE TABLE t1(a);
INSERT INTO t1 VALUES(1);
INSERT INTO t1 VALUES(2);
INSERT INTO t1 VALUES(3);
INSERT INTO t1 VALUES(4);
SELECT * FROM t1;
}
} {1 2 3 4}
do_test alter2-7.2 {
set sql {CREATE TABLE t1(a, b DEFAULT '123', c INTEGER DEFAULT '123')}
alter_table t1 $sql 3
execsql {
SELECT * FROM t1 LIMIT 1;
}
} {1 123 123}
do_test alter2-7.3 {
execsql {
SELECT a, typeof(a), b, typeof(b), c, typeof(c) FROM t1 LIMIT 1;
}
} {1 integer 123 text 123 integer}
do_test alter2-7.4 {
execsql {
SELECT a, typeof(a), b, typeof(b), c, typeof(c) FROM t1 LIMIT 1;
}
} {1 integer 123 text 123 integer}
do_test alter2-7.5 {
set sql {CREATE TABLE t1(a, b DEFAULT -123.0, c VARCHAR(10) default 5)}
alter_table t1 $sql 3
execsql {
SELECT a, typeof(a), b, typeof(b), c, typeof(c) FROM t1 LIMIT 1;
}
} {1 integer -123 integer 5 text}
#-----------------------------------------------------------------------
# Test that UPDATE trigger tables work with default values, and that when
# a row is updated the default values are correctly transfered to the
# new row.
#
ifcapable trigger {
db function set_val {set ::val}
do_test alter2-8.1 {
execsql {
CREATE TRIGGER trig1 BEFORE UPDATE ON t1 BEGIN
SELECT set_val(
old.b||' '||typeof(old.b)||' '||old.c||' '||typeof(old.c)||' '||
new.b||' '||typeof(new.b)||' '||new.c||' '||typeof(new.c)
);
END;
}
list
} {}
}
do_test alter2-8.2 {
execsql {
UPDATE t1 SET c = 10 WHERE a = 1;
SELECT a, typeof(a), b, typeof(b), c, typeof(c) FROM t1 LIMIT 1;
}
} {1 integer -123 integer 10 text}
ifcapable trigger {
do_test alter2-8.3 {
set ::val
} {-123 integer 5 text -123 integer 10 text}
}
#-----------------------------------------------------------------------
# Test that DELETE trigger tables work with default values, and that when
# a row is updated the default values are correctly transfered to the
# new row.
#
ifcapable trigger {
do_test alter2-9.1 {
execsql {
CREATE TRIGGER trig2 BEFORE DELETE ON t1 BEGIN
SELECT set_val(
old.b||' '||typeof(old.b)||' '||old.c||' '||typeof(old.c)
);
END;
}
list
} {}
do_test alter2-9.2 {
execsql {
DELETE FROM t1 WHERE a = 2;
}
set ::val
} {-123 integer 5 text}
}
#-----------------------------------------------------------------------
# Test creating an index on a column added with a default value.
#
ifcapable bloblit {
do_test alter2-10.1 {
execsql {
CREATE TABLE t2(a);
INSERT INTO t2 VALUES('a');
INSERT INTO t2 VALUES('b');
INSERT INTO t2 VALUES('c');
INSERT INTO t2 VALUES('d');
}
alter_table t2 {CREATE TABLE t2(a, b DEFAULT X'ABCD', c DEFAULT NULL);} 3
catchsql {
SELECT * FROM sqlite_master;
}
execsql {
SELECT quote(a), quote(b), quote(c) FROM t2 LIMIT 1;
}
} {'a' X'ABCD' NULL}
do_test alter2-10.2 {
execsql {
CREATE INDEX i1 ON t2(b);
SELECT a FROM t2 WHERE b = X'ABCD';
}
} {a b c d}
do_test alter2-10.3 {
execsql {
DELETE FROM t2 WHERE a = 'c';
SELECT a FROM t2 WHERE b = X'ABCD';
}
} {a b d}
do_test alter2-10.4 {
execsql {
SELECT count(b) FROM t2 WHERE b = X'ABCD';
}
} {3}
}
finish_test

View File

@ -0,0 +1,397 @@
# 2005 February 19
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#*************************************************************************
# This file implements regression tests for SQLite library. The
# focus of this script is testing that SQLite can handle a subtle
# file format change that may be used in the future to implement
# "ALTER TABLE ... ADD COLUMN".
#
# $Id: alter3.test,v 1.11 2008/03/19 00:21:31 drh Exp $
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
# If SQLITE_OMIT_ALTERTABLE is defined, omit this file.
ifcapable !altertable {
finish_test
return
}
# Determine if there is a codec available on this test.
#
if {[catch {sqlite3 -has-codec} r] || $r} {
set has_codec 1
} else {
set has_codec 0
}
# Test Organisation:
# ------------------
#
# alter3-1.*: Test that ALTER TABLE correctly modifies the CREATE TABLE sql.
# alter3-2.*: Test error messages.
# alter3-3.*: Test adding columns with default value NULL.
# alter3-4.*: Test adding columns with default values other than NULL.
# alter3-5.*: Test adding columns to tables in ATTACHed databases.
# alter3-6.*: Test that temp triggers are not accidentally dropped.
# alter3-7.*: Test that VACUUM resets the file-format.
#
# This procedure returns the value of the file-format in file 'test.db'.
#
proc get_file_format {{fname test.db}} {
return [hexio_get_int [hexio_read $fname 44 4]]
}
do_test alter3-1.1 {
execsql {
PRAGMA legacy_file_format=ON;
CREATE TABLE abc(a, b, c);
SELECT sql FROM sqlite_master;
}
} {{CREATE TABLE abc(a, b, c)}}
do_test alter3-1.2 {
execsql {ALTER TABLE abc ADD d INTEGER;}
execsql {
SELECT sql FROM sqlite_master;
}
} {{CREATE TABLE abc(a, b, c, d INTEGER)}}
do_test alter3-1.3 {
execsql {ALTER TABLE abc ADD e}
execsql {
SELECT sql FROM sqlite_master;
}
} {{CREATE TABLE abc(a, b, c, d INTEGER, e)}}
do_test alter3-1.4 {
execsql {
CREATE TABLE main.t1(a, b);
ALTER TABLE t1 ADD c;
SELECT sql FROM sqlite_master WHERE tbl_name = 't1';
}
} {{CREATE TABLE t1(a, b, c)}}
do_test alter3-1.5 {
execsql {
ALTER TABLE t1 ADD d CHECK (a>d);
SELECT sql FROM sqlite_master WHERE tbl_name = 't1';
}
} {{CREATE TABLE t1(a, b, c, d CHECK (a>d))}}
ifcapable foreignkey {
do_test alter3-1.6 {
execsql {
CREATE TABLE t2(a, b, UNIQUE(a, b));
ALTER TABLE t2 ADD c REFERENCES t1(c) ;
SELECT sql FROM sqlite_master WHERE tbl_name = 't2' AND type = 'table';
}
} {{CREATE TABLE t2(a, b, c REFERENCES t1(c), UNIQUE(a, b))}}
}
do_test alter3-1.7 {
execsql {
CREATE TABLE t3(a, b, UNIQUE(a, b));
ALTER TABLE t3 ADD COLUMN c VARCHAR(10, 20);
SELECT sql FROM sqlite_master WHERE tbl_name = 't3' AND type = 'table';
}
} {{CREATE TABLE t3(a, b, c VARCHAR(10, 20), UNIQUE(a, b))}}
do_test alter3-1.99 {
catchsql {
# May not exist if foriegn-keys are omitted at compile time.
DROP TABLE t2;
}
execsql {
DROP TABLE abc;
DROP TABLE t1;
DROP TABLE t3;
}
} {}
do_test alter3-2.1 {
execsql {
CREATE TABLE t1(a, b);
}
catchsql {
ALTER TABLE t1 ADD c PRIMARY KEY;
}
} {1 {Cannot add a PRIMARY KEY column}}
do_test alter3-2.2 {
catchsql {
ALTER TABLE t1 ADD c UNIQUE
}
} {1 {Cannot add a UNIQUE column}}
do_test alter3-2.3 {
catchsql {
ALTER TABLE t1 ADD b VARCHAR(10)
}
} {1 {duplicate column name: b}}
do_test alter3-2.3 {
catchsql {
ALTER TABLE t1 ADD c NOT NULL;
}
} {1 {Cannot add a NOT NULL column with default value NULL}}
do_test alter3-2.4 {
catchsql {
ALTER TABLE t1 ADD c NOT NULL DEFAULT 10;
}
} {0 {}}
ifcapable view {
do_test alter3-2.5 {
execsql {
CREATE VIEW v1 AS SELECT * FROM t1;
}
catchsql {
alter table v1 add column d;
}
} {1 {Cannot add a column to a view}}
}
do_test alter3-2.6 {
catchsql {
alter table t1 add column d DEFAULT CURRENT_TIME;
}
} {1 {Cannot add a column with non-constant default}}
do_test alter3-2.99 {
execsql {
DROP TABLE t1;
}
} {}
do_test alter3-3.1 {
execsql {
CREATE TABLE t1(a, b);
INSERT INTO t1 VALUES(1, 100);
INSERT INTO t1 VALUES(2, 300);
SELECT * FROM t1;
}
} {1 100 2 300}
do_test alter3-3.1 {
execsql {
PRAGMA schema_version = 10;
}
} {}
do_test alter3-3.2 {
execsql {
ALTER TABLE t1 ADD c;
SELECT * FROM t1;
}
} {1 100 {} 2 300 {}}
if {!$has_codec} {
do_test alter3-3.3 {
get_file_format
} {4}
}
ifcapable schema_version {
do_test alter3-3.4 {
execsql {
PRAGMA schema_version;
}
} {11}
}
do_test alter3-4.1 {
db close
forcedelete test.db
set ::DB [sqlite3 db test.db]
execsql {
PRAGMA legacy_file_format=ON;
CREATE TABLE t1(a, b);
INSERT INTO t1 VALUES(1, 100);
INSERT INTO t1 VALUES(2, 300);
SELECT * FROM t1;
}
} {1 100 2 300}
do_test alter3-4.1 {
execsql {
PRAGMA schema_version = 20;
}
} {}
do_test alter3-4.2 {
execsql {
ALTER TABLE t1 ADD c DEFAULT 'hello world';
SELECT * FROM t1;
}
} {1 100 {hello world} 2 300 {hello world}}
if {!$has_codec} {
do_test alter3-4.3 {
get_file_format
} {4}
}
ifcapable schema_version {
do_test alter3-4.4 {
execsql {
PRAGMA schema_version;
}
} {21}
}
do_test alter3-4.99 {
execsql {
DROP TABLE t1;
}
} {}
ifcapable attach {
do_test alter3-5.1 {
forcedelete test2.db
forcedelete test2.db-journal
execsql {
CREATE TABLE t1(a, b);
INSERT INTO t1 VALUES(1, 'one');
INSERT INTO t1 VALUES(2, 'two');
ATTACH 'test2.db' AS aux;
CREATE TABLE aux.t1 AS SELECT * FROM t1;
PRAGMA aux.schema_version = 30;
SELECT sql FROM aux.sqlite_master;
}
} {{CREATE TABLE t1(a,b)}}
do_test alter3-5.2 {
execsql {
ALTER TABLE aux.t1 ADD COLUMN c VARCHAR(128);
SELECT sql FROM aux.sqlite_master;
}
} {{CREATE TABLE t1(a,b, c VARCHAR(128))}}
do_test alter3-5.3 {
execsql {
SELECT * FROM aux.t1;
}
} {1 one {} 2 two {}}
ifcapable schema_version {
do_test alter3-5.4 {
execsql {
PRAGMA aux.schema_version;
}
} {31}
}
if {!$has_codec} {
do_test alter3-5.5 {
list [get_file_format test2.db] [get_file_format]
} {4 4}
}
do_test alter3-5.6 {
execsql {
ALTER TABLE aux.t1 ADD COLUMN d DEFAULT 1000;
SELECT sql FROM aux.sqlite_master;
}
} {{CREATE TABLE t1(a,b, c VARCHAR(128), d DEFAULT 1000)}}
do_test alter3-5.7 {
execsql {
SELECT * FROM aux.t1;
}
} {1 one {} 1000 2 two {} 1000}
ifcapable schema_version {
do_test alter3-5.8 {
execsql {
PRAGMA aux.schema_version;
}
} {32}
}
do_test alter3-5.9 {
execsql {
SELECT * FROM t1;
}
} {1 one 2 two}
do_test alter3-5.99 {
execsql {
DROP TABLE aux.t1;
DROP TABLE t1;
}
} {}
}
#----------------------------------------------------------------
# Test that the table schema is correctly reloaded when a column
# is added to a table.
#
ifcapable trigger&&tempdb {
do_test alter3-6.1 {
execsql {
CREATE TABLE t1(a, b);
CREATE TABLE log(trig, a, b);
CREATE TRIGGER t1_a AFTER INSERT ON t1 BEGIN
INSERT INTO log VALUES('a', new.a, new.b);
END;
CREATE TEMP TRIGGER t1_b AFTER INSERT ON t1 BEGIN
INSERT INTO log VALUES('b', new.a, new.b);
END;
INSERT INTO t1 VALUES(1, 2);
SELECT * FROM log;
}
} {b 1 2 a 1 2}
do_test alter3-6.2 {
execsql {
ALTER TABLE t1 ADD COLUMN c DEFAULT 'c';
INSERT INTO t1(a, b) VALUES(3, 4);
SELECT * FROM log;
}
} {b 1 2 a 1 2 b 3 4 a 3 4}
}
if {!$has_codec} {
ifcapable vacuum {
do_test alter3-7.1 {
execsql {
VACUUM;
}
get_file_format
} {1}
do_test alter3-7.2 {
execsql {
CREATE TABLE abc(a, b, c);
ALTER TABLE abc ADD d DEFAULT NULL;
}
get_file_format
} {4}
do_test alter3-7.3 {
execsql {
ALTER TABLE abc ADD e DEFAULT 10;
}
get_file_format
} {4}
do_test alter3-7.4 {
execsql {
ALTER TABLE abc ADD f DEFAULT NULL;
}
get_file_format
} {4}
do_test alter3-7.5 {
execsql {
VACUUM;
}
get_file_format
} {1}
}
}
# Ticket #1183 - Make sure adding columns to large tables does not cause
# memory corruption (as was the case before this bug was fixed).
do_test alter3-8.1 {
execsql {
CREATE TABLE t4(c1);
}
} {}
set ::sql ""
do_test alter3-8.2 {
set cols c1
for {set i 2} {$i < 100} {incr i} {
execsql "
ALTER TABLE t4 ADD c$i
"
lappend cols c$i
}
set ::sql "CREATE TABLE t4([join $cols {, }])"
list
} {}
do_test alter3-8.2 {
execsql {
SELECT sql FROM sqlite_master WHERE name = 't4';
}
} [list $::sql]
finish_test

View File

@ -0,0 +1,358 @@
# 2009 February 2
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#*************************************************************************
# This file implements regression tests for SQLite library. The
# focus of this script is testing that SQLite can handle a subtle
# file format change that may be used in the future to implement
# "ALTER TABLE ... ADD COLUMN".
#
# $Id: alter4.test,v 1.1 2009/02/02 18:03:22 drh Exp $
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
# If SQLITE_OMIT_ALTERTABLE is defined, omit this file.
ifcapable !altertable {
finish_test
return
}
# Test Organisation:
# ------------------
#
# alter4-1.*: Test that ALTER TABLE correctly modifies the CREATE TABLE sql.
# alter4-2.*: Test error messages.
# alter4-3.*: Test adding columns with default value NULL.
# alter4-4.*: Test adding columns with default values other than NULL.
# alter4-5.*: Test adding columns to tables in ATTACHed databases.
# alter4-6.*: Test that temp triggers are not accidentally dropped.
# alter4-7.*: Test that VACUUM resets the file-format.
#
do_test alter4-1.1 {
execsql {
CREATE TEMP TABLE abc(a, b, c);
SELECT sql FROM sqlite_temp_master;
}
} {{CREATE TABLE abc(a, b, c)}}
do_test alter4-1.2 {
execsql {ALTER TABLE abc ADD d INTEGER;}
execsql {
SELECT sql FROM sqlite_temp_master;
}
} {{CREATE TABLE abc(a, b, c, d INTEGER)}}
do_test alter4-1.3 {
execsql {ALTER TABLE abc ADD e}
execsql {
SELECT sql FROM sqlite_temp_master;
}
} {{CREATE TABLE abc(a, b, c, d INTEGER, e)}}
do_test alter4-1.4 {
execsql {
CREATE TABLE temp.t1(a, b);
ALTER TABLE t1 ADD c;
SELECT sql FROM sqlite_temp_master WHERE tbl_name = 't1';
}
} {{CREATE TABLE t1(a, b, c)}}
do_test alter4-1.5 {
execsql {
ALTER TABLE t1 ADD d CHECK (a>d);
SELECT sql FROM sqlite_temp_master WHERE tbl_name = 't1';
}
} {{CREATE TABLE t1(a, b, c, d CHECK (a>d))}}
ifcapable foreignkey {
do_test alter4-1.6 {
execsql {
CREATE TEMP TABLE t2(a, b, UNIQUE(a, b));
ALTER TABLE t2 ADD c REFERENCES t1(c) ;
SELECT sql FROM sqlite_temp_master
WHERE tbl_name = 't2' AND type = 'table';
}
} {{CREATE TABLE t2(a, b, c REFERENCES t1(c), UNIQUE(a, b))}}
}
do_test alter4-1.7 {
execsql {
CREATE TEMPORARY TABLE t3(a, b, UNIQUE(a, b));
ALTER TABLE t3 ADD COLUMN c VARCHAR(10, 20);
SELECT sql FROM sqlite_temp_master
WHERE tbl_name = 't3' AND type = 'table';
}
} {{CREATE TABLE t3(a, b, c VARCHAR(10, 20), UNIQUE(a, b))}}
do_test alter4-1.99 {
catchsql {
# May not exist if foriegn-keys are omitted at compile time.
DROP TABLE t2;
}
execsql {
DROP TABLE abc;
DROP TABLE t1;
DROP TABLE t3;
}
} {}
do_test alter4-2.1 {
execsql {
CREATE TABLE temp.t1(a, b);
}
catchsql {
ALTER TABLE t1 ADD c PRIMARY KEY;
}
} {1 {Cannot add a PRIMARY KEY column}}
do_test alter4-2.2 {
catchsql {
ALTER TABLE t1 ADD c UNIQUE
}
} {1 {Cannot add a UNIQUE column}}
do_test alter4-2.3 {
catchsql {
ALTER TABLE t1 ADD b VARCHAR(10)
}
} {1 {duplicate column name: b}}
do_test alter4-2.3 {
catchsql {
ALTER TABLE t1 ADD c NOT NULL;
}
} {1 {Cannot add a NOT NULL column with default value NULL}}
do_test alter4-2.4 {
catchsql {
ALTER TABLE t1 ADD c NOT NULL DEFAULT 10;
}
} {0 {}}
ifcapable view {
do_test alter4-2.5 {
execsql {
CREATE TEMPORARY VIEW v1 AS SELECT * FROM t1;
}
catchsql {
alter table v1 add column d;
}
} {1 {Cannot add a column to a view}}
}
do_test alter4-2.6 {
catchsql {
alter table t1 add column d DEFAULT CURRENT_TIME;
}
} {1 {Cannot add a column with non-constant default}}
do_test alter4-2.7 {
catchsql {
alter table t1 add column d default (-5+1);
}
} {1 {Cannot add a column with non-constant default}}
do_test alter4-2.99 {
execsql {
DROP TABLE t1;
}
} {}
do_test alter4-3.1 {
execsql {
CREATE TEMP TABLE t1(a, b);
INSERT INTO t1 VALUES(1, 100);
INSERT INTO t1 VALUES(2, 300);
SELECT * FROM t1;
}
} {1 100 2 300}
do_test alter4-3.1 {
execsql {
PRAGMA schema_version = 10;
}
} {}
do_test alter4-3.2 {
execsql {
ALTER TABLE t1 ADD c;
SELECT * FROM t1;
}
} {1 100 {} 2 300 {}}
ifcapable schema_version {
do_test alter4-3.4 {
execsql {
PRAGMA schema_version;
}
} {10}
}
do_test alter4-4.1 {
db close
forcedelete test.db
set ::DB [sqlite3 db test.db]
execsql {
CREATE TEMP TABLE t1(a, b);
INSERT INTO t1 VALUES(1, 100);
INSERT INTO t1 VALUES(2, 300);
SELECT * FROM t1;
}
} {1 100 2 300}
do_test alter4-4.1 {
execsql {
PRAGMA schema_version = 20;
}
} {}
do_test alter4-4.2 {
execsql {
ALTER TABLE t1 ADD c DEFAULT 'hello world';
SELECT * FROM t1;
}
} {1 100 {hello world} 2 300 {hello world}}
ifcapable schema_version {
do_test alter4-4.4 {
execsql {
PRAGMA schema_version;
}
} {20}
}
do_test alter4-4.99 {
execsql {
DROP TABLE t1;
}
} {}
ifcapable attach {
do_test alter4-5.1 {
forcedelete test2.db
forcedelete test2.db-journal
execsql {
CREATE TEMP TABLE t1(a, b);
INSERT INTO t1 VALUES(1, 'one');
INSERT INTO t1 VALUES(2, 'two');
ATTACH 'test2.db' AS aux;
CREATE TABLE aux.t1 AS SELECT * FROM t1;
PRAGMA aux.schema_version = 30;
SELECT sql FROM aux.sqlite_master;
}
} {{CREATE TABLE t1(a,b)}}
do_test alter4-5.2 {
execsql {
ALTER TABLE aux.t1 ADD COLUMN c VARCHAR(128);
SELECT sql FROM aux.sqlite_master;
}
} {{CREATE TABLE t1(a,b, c VARCHAR(128))}}
do_test alter4-5.3 {
execsql {
SELECT * FROM aux.t1;
}
} {1 one {} 2 two {}}
ifcapable schema_version {
do_test alter4-5.4 {
execsql {
PRAGMA aux.schema_version;
}
} {31}
}
do_test alter4-5.6 {
execsql {
ALTER TABLE aux.t1 ADD COLUMN d DEFAULT 1000;
SELECT sql FROM aux.sqlite_master;
}
} {{CREATE TABLE t1(a,b, c VARCHAR(128), d DEFAULT 1000)}}
do_test alter4-5.7 {
execsql {
SELECT * FROM aux.t1;
}
} {1 one {} 1000 2 two {} 1000}
ifcapable schema_version {
do_test alter4-5.8 {
execsql {
PRAGMA aux.schema_version;
}
} {32}
}
do_test alter4-5.9 {
execsql {
SELECT * FROM t1;
}
} {1 one 2 two}
do_test alter4-5.99 {
execsql {
DROP TABLE aux.t1;
DROP TABLE t1;
}
} {}
}
#----------------------------------------------------------------
# Test that the table schema is correctly reloaded when a column
# is added to a table.
#
ifcapable trigger&&tempdb {
do_test alter4-6.1 {
execsql {
CREATE TEMP TABLE t1(a, b);
CREATE TEMP TABLE log(trig, a, b);
CREATE TRIGGER t1_a AFTER INSERT ON t1 BEGIN
INSERT INTO log VALUES('a', new.a, new.b);
END;
CREATE TEMP TRIGGER t1_b AFTER INSERT ON t1 BEGIN
INSERT INTO log VALUES('b', new.a, new.b);
END;
INSERT INTO t1 VALUES(1, 2);
SELECT * FROM log;
}
} {b 1 2 a 1 2}
do_test alter4-6.2 {
execsql {
ALTER TABLE t1 ADD COLUMN c DEFAULT 'c';
INSERT INTO t1(a, b) VALUES(3, 4);
SELECT * FROM log;
}
} {b 1 2 a 1 2 b 3 4 a 3 4}
}
# Ticket #1183 - Make sure adding columns to large tables does not cause
# memory corruption (as was the case before this bug was fixed).
do_test alter4-8.1 {
execsql {
CREATE TEMP TABLE t4(c1);
}
} {}
set ::sql ""
do_test alter4-8.2 {
set cols c1
for {set i 2} {$i < 100} {incr i} {
execsql "
ALTER TABLE t4 ADD c$i
"
lappend cols c$i
}
set ::sql "CREATE TABLE t4([join $cols {, }])"
list
} {}
do_test alter4-8.2 {
execsql {
SELECT sql FROM sqlite_temp_master WHERE name = 't4';
}
} [list $::sql]
# Test that a default value equal to -1 multipied by the smallest possible
# 64-bit integer is correctly converted to a real.
do_execsql_test alter4-9.1 {
CREATE TABLE t5(
a INTEGER DEFAULT -9223372036854775808,
b INTEGER DEFAULT (-(-9223372036854775808))
);
INSERT INTO t5 DEFAULT VALUES;
}
do_execsql_test alter4-9.2 { SELECT typeof(a), a, typeof(b), b FROM t5; } {
integer -9223372036854775808
real 9.22337203685478e+18
}
do_execsql_test alter4-9.3 {
ALTER TABLE t5 ADD COLUMN c INTEGER DEFAULT (-(-9223372036854775808));
SELECT typeof(c), c FROM t5;
} {real 9.22337203685478e+18}
finish_test

View File

@ -0,0 +1,71 @@
# 2005 September 19
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#*************************************************************************
# This file implements regression tests for SQLite library. The
# focus of this script is testing the ALTER TABLE statement and
# specifically out-of-memory conditions within that command.
#
# $Id: altermalloc.test,v 1.10 2008/10/30 17:21:13 danielk1977 Exp $
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
# If SQLITE_OMIT_ALTERTABLE is defined, omit this file.
ifcapable !altertable||!memdebug {
finish_test
return
}
source $testdir/malloc_common.tcl
do_malloc_test altermalloc-1 -tclprep {
db close
} -tclbody {
if {[catch {sqlite3 db test.db}]} {
error "out of memory"
}
sqlite3_db_config_lookaside db 0 0 0
sqlite3_extended_result_codes db 1
} -sqlbody {
CREATE TABLE t1(a int);
ALTER TABLE t1 ADD COLUMN b INTEGER DEFAULT NULL;
ALTER TABLE t1 ADD COLUMN c TEXT DEFAULT 'default-text';
ALTER TABLE t1 RENAME TO t2;
ALTER TABLE t2 ADD COLUMN d BLOB DEFAULT X'ABCD';
}
# Test malloc() failure on an ALTER TABLE on a virtual table.
#
ifcapable vtab {
do_malloc_test altermalloc-vtab -tclprep {
sqlite3 db2 test.db
sqlite3_db_config_lookaside db2 0 0 0
sqlite3_extended_result_codes db2 1
register_echo_module [sqlite3_connection_pointer db2]
db2 eval {
CREATE TABLE t1(a, b VARCHAR, c INTEGER);
CREATE VIRTUAL TABLE t1echo USING echo(t1);
}
db2 close
register_echo_module [sqlite3_connection_pointer db]
} -tclbody {
set rc [catch {db eval { ALTER TABLE t1echo RENAME TO t1_echo }} msg]
if {$msg eq "vtable constructor failed: t1echo"} {
set msg "out of memory"
}
if {$rc} {
error $msg
}
}
}
finish_test

View File

@ -0,0 +1,117 @@
# 2013-09-30
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#*************************************************************************
# This file implements regression tests for "approximate_match" virtual
# table that is in the "amatch.c" extension.
#
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
# If SQLITE_ENABLE_FTS4 is defined, omit this file.
ifcapable !fts3 {
finish_test
return
}
# Create the fts_kjv_genesis procedure which fills and FTS3/4 table with
# the complete text of the Book of Genesis.
#
source $testdir/genesis.tcl
do_test amatch1-1.0 {
db eval {
CREATE VIRTUAL TABLE t1 USING fts4(words); --, tokenize porter);
}
fts_kjv_genesis
db eval {
INSERT INTO t1(t1) VALUES('optimize');
CREATE VIRTUAL TABLE temp.t1aux USING fts4aux(main, t1);
SELECT term FROM t1aux WHERE col=0 ORDER BY 1 LIMIT 5
}
} {a abated abel abelmizraim abidah}
do_test amatch1-1.1 {
db eval {
SELECT term FROM t1aux WHERE term>'b' AND col=0 ORDER BY 1 LIMIT 5
}
} {baalhanan babel back backward bad}
do_test amatch1-1.2 {
db eval {
SELECT term FROM t1aux WHERE term>'b' AND col=0 LIMIT 5
}
} {baalhanan babel back backward bad}
# Load the amatch extension
load_static_extension db amatch
do_execsql_test amatch1-2.0 {
CREATE TABLE costs(iLang, cFrom, cTo, Cost);
INSERT INTO costs VALUES(0, '', '?', 100);
INSERT INTO costs VALUES(0, '?', '', 100);
INSERT INTO costs VALUES(0, '?', '?', 150);
CREATE TABLE vocab(w TEXT UNIQUE);
INSERT OR IGNORE INTO vocab SELECT term FROM t1aux;
CREATE VIRTUAL TABLE t2 USING approximate_match(
vocabulary_table=t1aux,
vocabulary_word=term,
edit_distances=costs
);
CREATE VIRTUAL TABLE t3 USING approximate_match(
vocabulary_table=vocab,
vocabulary_word=w,
edit_distances=costs
);
CREATE VIRTUAL TABLE t4 USING approximate_match(
vocabulary_table=vtemp,
vocabulary_word=w,
edit_distances=costs
);
} {}
puts "Query against fts4aux: [time {
do_execsql_test amatch1-2.1 {
SELECT word, distance FROM t2
WHERE word MATCH 'josxph' AND distance<300;
} {joseph 150}} 1]"
puts "Query against ordinary table: [time {
do_execsql_test amatch1-2.2 {
SELECT word, distance FROM t3
WHERE word MATCH 'josxph' AND distance<300;
} {joseph 150}} 1]"
puts "Temp table initialized from fts4aux: [time {
do_execsql_test amatch1-2.3a {
CREATE TEMP TABLE vtemp(w TEXT UNIQUE);
INSERT OR IGNORE INTO vtemp SELECT term FROM t1aux;
} {}} 1]"
puts "Query against temp table: [time {
do_execsql_test amatch1-2.3b {
SELECT word, distance FROM t4
WHERE word MATCH 'josxph' AND distance<300;
} {joseph 150}} 1]"
do_execsql_test amatch1-2.11 {
SELECT word, distance FROM t2
WHERE word MATCH 'joxxph' AND distance<=300;
} {joseph 300}
do_execsql_test amatch1-2.12 {
SELECT word, distance FROM t3
WHERE word MATCH 'joxxph' AND distance<=300;
} {joseph 300}
do_execsql_test amatch1-2.21 {
SELECT word, distance FROM t2
WHERE word MATCH 'joxxph' AND distance<300;
} {}
do_execsql_test amatch1-2.22 {
SELECT word, distance FROM t3
WHERE word MATCH 'joxxph' AND distance<300;
} {}
finish_test

View File

@ -0,0 +1,364 @@
# 2005 July 22
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library.
# This file implements tests for the ANALYZE command.
#
# $Id: analyze.test,v 1.9 2008/08/11 18:44:58 drh Exp $
set testdir [file dirname $argv0]
source $testdir/tester.tcl
# There is nothing to test if ANALYZE is disable for this build.
#
ifcapable {!analyze} {
finish_test
return
}
# Basic sanity checks.
#
do_test analyze-1.1 {
catchsql {
ANALYZE no_such_table
}
} {1 {no such table: no_such_table}}
do_test analyze-1.2 {
execsql {
SELECT count(*) FROM sqlite_master WHERE name='sqlite_stat1'
}
} {0}
do_test analyze-1.3 {
catchsql {
ANALYZE no_such_db.no_such_table
}
} {1 {unknown database no_such_db}}
do_test analyze-1.4 {
execsql {
SELECT count(*) FROM sqlite_master WHERE name='sqlite_stat1'
}
} {0}
do_test analyze-1.5.1 {
catchsql {
ANALYZE
}
} {0 {}}
do_test analyze-1.5.2 {
catchsql {
PRAGMA empty_result_callbacks=1;
ANALYZE
}
} {0 {}}
do_test analyze-1.6 {
execsql {
SELECT count(*) FROM sqlite_master WHERE name='sqlite_stat1'
}
} {1}
do_test analyze-1.6.2 {
catchsql {
CREATE INDEX stat1idx ON sqlite_stat1(idx);
}
} {1 {table sqlite_stat1 may not be indexed}}
do_test analyze-1.6.3 {
catchsql {
CREATE INDEX main.stat1idx ON SQLite_stat1(idx);
}
} {1 {table sqlite_stat1 may not be indexed}}
do_test analyze-1.7 {
execsql {
SELECT * FROM sqlite_stat1 WHERE idx NOT NULL
}
} {}
do_test analyze-1.8 {
catchsql {
ANALYZE main
}
} {0 {}}
do_test analyze-1.9 {
execsql {
SELECT * FROM sqlite_stat1 WHERE idx NOT NULL
}
} {}
do_test analyze-1.10 {
catchsql {
CREATE TABLE t1(a,b);
ANALYZE main.t1;
}
} {0 {}}
do_test analyze-1.11 {
execsql {
SELECT * FROM sqlite_stat1
}
} {}
do_test analyze-1.12 {
catchsql {
ANALYZE t1;
}
} {0 {}}
do_test analyze-1.13 {
execsql {
SELECT * FROM sqlite_stat1
}
} {}
# Create some indices that can be analyzed. But do not yet add
# data. Without data in the tables, no analysis is done.
#
do_test analyze-2.1 {
execsql {
CREATE INDEX t1i1 ON t1(a);
ANALYZE main.t1;
SELECT * FROM sqlite_stat1 ORDER BY idx;
}
} {}
do_test analyze-2.2 {
execsql {
CREATE INDEX t1i2 ON t1(b);
ANALYZE t1;
SELECT * FROM sqlite_stat1 ORDER BY idx;
}
} {}
do_test analyze-2.3 {
execsql {
CREATE INDEX t1i3 ON t1(a,b);
ANALYZE main;
SELECT * FROM sqlite_stat1 ORDER BY idx;
}
} {}
# Start adding data to the table. Verify that the analysis
# is done correctly.
#
do_test analyze-3.1 {
execsql {
INSERT INTO t1 VALUES(1,2);
INSERT INTO t1 VALUES(1,3);
ANALYZE main.t1;
SELECT idx, stat FROM sqlite_stat1 ORDER BY idx;
}
} {t1i1 {2 2} t1i2 {2 1} t1i3 {2 2 1}}
do_test analyze-3.2 {
execsql {
INSERT INTO t1 VALUES(1,4);
INSERT INTO t1 VALUES(1,5);
ANALYZE t1;
SELECT idx, stat FROM sqlite_stat1 ORDER BY idx;
}
} {t1i1 {4 4} t1i2 {4 1} t1i3 {4 4 1}}
do_test analyze-3.3 {
execsql {
INSERT INTO t1 VALUES(2,5);
ANALYZE main;
SELECT idx, stat FROM sqlite_stat1 ORDER BY idx;
}
} {t1i1 {5 3} t1i2 {5 2} t1i3 {5 3 1}}
do_test analyze-3.4 {
execsql {
CREATE TABLE t2 AS SELECT * FROM t1;
CREATE INDEX t2i1 ON t2(a);
CREATE INDEX t2i2 ON t2(b);
CREATE INDEX t2i3 ON t2(a,b);
ANALYZE;
SELECT idx, stat FROM sqlite_stat1 ORDER BY idx;
}
} {t1i1 {5 3} t1i2 {5 2} t1i3 {5 3 1} t2i1 {5 3} t2i2 {5 2} t2i3 {5 3 1}}
do_test analyze-3.5 {
execsql {
DROP INDEX t2i3;
ANALYZE t1;
SELECT idx, stat FROM sqlite_stat1 ORDER BY idx;
}
} {t1i1 {5 3} t1i2 {5 2} t1i3 {5 3 1} t2i1 {5 3} t2i2 {5 2}}
do_test analyze-3.6 {
execsql {
ANALYZE t2;
SELECT idx, stat FROM sqlite_stat1 ORDER BY idx;
}
} {t1i1 {5 3} t1i2 {5 2} t1i3 {5 3 1} t2i1 {5 3} t2i2 {5 2}}
do_test analyze-3.7 {
execsql {
DROP INDEX t2i2;
ANALYZE t2;
SELECT idx, stat FROM sqlite_stat1 ORDER BY idx;
}
} {t1i1 {5 3} t1i2 {5 2} t1i3 {5 3 1} t2i1 {5 3}}
do_test analyze-3.8 {
execsql {
CREATE TABLE t3 AS SELECT a, b, rowid AS c, 'hi' AS d FROM t1;
CREATE INDEX t3i1 ON t3(a);
CREATE INDEX t3i2 ON t3(a,b,c,d);
CREATE INDEX t3i3 ON t3(d,b,c,a);
DROP TABLE t1;
DROP TABLE t2;
SELECT idx, stat FROM sqlite_stat1 ORDER BY idx;
}
} {}
do_test analyze-3.9 {
execsql {
ANALYZE;
SELECT idx, stat FROM sqlite_stat1 ORDER BY idx;
}
} {t3i1 {5 3} t3i2 {5 3 1 1 1} t3i3 {5 5 2 1 1}}
do_test analyze-3.10 {
execsql {
CREATE TABLE [silly " name](a, b, c);
CREATE INDEX 'foolish '' name' ON [silly " name](a, b);
CREATE INDEX 'another foolish '' name' ON [silly " name](c);
INSERT INTO [silly " name] VALUES(1, 2, 3);
INSERT INTO [silly " name] VALUES(4, 5, 6);
ANALYZE;
SELECT idx, stat FROM sqlite_stat1 ORDER BY idx;
}
} {{another foolish ' name} {2 1} {foolish ' name} {2 1 1} t3i1 {5 3} t3i2 {5 3 1 1 1} t3i3 {5 5 2 1 1}}
do_test analyze-3.11 {
execsql {
DROP INDEX "foolish ' name";
SELECT idx, stat FROM sqlite_stat1 ORDER BY idx;
}
} {{another foolish ' name} {2 1} t3i1 {5 3} t3i2 {5 3 1 1 1} t3i3 {5 5 2 1 1}}
do_test analyze-3.11 {
execsql {
DROP TABLE "silly "" name";
SELECT idx, stat FROM sqlite_stat1 ORDER BY idx;
}
} {t3i1 {5 3} t3i2 {5 3 1 1 1} t3i3 {5 5 2 1 1}}
# Try corrupting the sqlite_stat1 table and make sure the
# database is still able to function.
#
do_test analyze-4.0 {
sqlite3 db2 test.db
db2 eval {
CREATE TABLE t4(x,y,z);
CREATE INDEX t4i1 ON t4(x);
CREATE INDEX t4i2 ON t4(y);
INSERT INTO t4 SELECT a,b,c FROM t3;
}
db2 close
db close
sqlite3 db test.db
execsql {
ANALYZE;
SELECT idx, stat FROM sqlite_stat1 ORDER BY idx;
}
} {t3i1 {5 3} t3i2 {5 3 1 1 1} t3i3 {5 5 2 1 1} t4i1 {5 3} t4i2 {5 2}}
do_test analyze-4.1 {
execsql {
PRAGMA writable_schema=on;
INSERT INTO sqlite_stat1 VALUES(null,null,null);
PRAGMA writable_schema=off;
}
db close
sqlite3 db test.db
execsql {
SELECT * FROM t4 WHERE x=1234;
}
} {}
do_test analyze-4.2 {
execsql {
PRAGMA writable_schema=on;
DELETE FROM sqlite_stat1;
INSERT INTO sqlite_stat1 VALUES('t4','t4i1','nonsense');
INSERT INTO sqlite_stat1 VALUES('t4','t4i2','120897349817238741092873198273409187234918720394817209384710928374109827172901827349871928741910');
PRAGMA writable_schema=off;
}
db close
sqlite3 db test.db
execsql {
SELECT * FROM t4 WHERE x=1234;
}
} {}
do_test analyze-4.3 {
execsql {
INSERT INTO sqlite_stat1 VALUES('t4','xyzzy','0 1 2 3');
}
db close
sqlite3 db test.db
execsql {
SELECT * FROM t4 WHERE x=1234;
}
} {}
# Verify that DROP TABLE and DROP INDEX remove entries from the
# sqlite_stat1, sqlite_stat3 and sqlite_stat4 tables.
#
do_test analyze-5.0 {
execsql {
DELETE FROM t3;
DELETE FROM t4;
INSERT INTO t3 VALUES(1,2,3,4);
INSERT INTO t3 VALUES(5,6,7,8);
INSERT INTO t3 SELECT a+8, b+8, c+8, d+8 FROM t3;
INSERT INTO t3 SELECT a+16, b+16, c+16, d+16 FROM t3;
INSERT INTO t3 SELECT a+32, b+32, c+32, d+32 FROM t3;
INSERT INTO t3 SELECT a+64, b+64, c+64, d+64 FROM t3;
INSERT INTO t4 SELECT a, b, c FROM t3;
ANALYZE;
SELECT DISTINCT idx FROM sqlite_stat1 ORDER BY 1;
SELECT DISTINCT tbl FROM sqlite_stat1 ORDER BY 1;
}
} {t3i1 t3i2 t3i3 t4i1 t4i2 t3 t4}
ifcapable stat4||stat3 {
ifcapable stat4 {set stat sqlite_stat4} else {set stat sqlite_stat3}
do_test analyze-5.1 {
execsql "
SELECT DISTINCT idx FROM $stat ORDER BY 1;
SELECT DISTINCT tbl FROM $stat ORDER BY 1;
"
} {t3i1 t3i2 t3i3 t4i1 t4i2 t3 t4}
}
do_test analyze-5.2 {
execsql {
DROP INDEX t3i2;
SELECT DISTINCT idx FROM sqlite_stat1 ORDER BY 1;
SELECT DISTINCT tbl FROM sqlite_stat1 ORDER BY 1;
}
} {t3i1 t3i3 t4i1 t4i2 t3 t4}
ifcapable stat4||stat3 {
do_test analyze-5.3 {
execsql "
SELECT DISTINCT idx FROM $stat ORDER BY 1;
SELECT DISTINCT tbl FROM $stat ORDER BY 1;
"
} {t3i1 t3i3 t4i1 t4i2 t3 t4}
}
do_test analyze-5.4 {
execsql {
DROP TABLE t3;
SELECT DISTINCT idx FROM sqlite_stat1 ORDER BY 1;
SELECT DISTINCT tbl FROM sqlite_stat1 ORDER BY 1;
}
} {t4i1 t4i2 t4}
ifcapable stat4||stat3 {
do_test analyze-5.5 {
execsql "
SELECT DISTINCT idx FROM $stat ORDER BY 1;
SELECT DISTINCT tbl FROM $stat ORDER BY 1;
"
} {t4i1 t4i2 t4}
}
# This test corrupts the database file so it must be the last test
# in the series.
#
do_test analyze-99.1 {
execsql {
PRAGMA writable_schema=on;
UPDATE sqlite_master SET sql='nonsense' WHERE name='sqlite_stat1';
}
db close
catch { sqlite3 db test.db }
catchsql {
ANALYZE
}
} {1 {malformed database schema (sqlite_stat1)}}
finish_test

View File

@ -0,0 +1,703 @@
# 2009 August 06
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
#
# This file implements regression tests for SQLite library. This file
# implements tests for range and LIKE constraints that use bound variables
# instead of literal constant arguments.
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
set testprefix analyze3
ifcapable !stat4&&!stat3 {
finish_test
return
}
#----------------------------------------------------------------------
# Test Organization:
#
# analyze3-1.*: Test that the values of bound parameters are considered
# in the same way as constants when planning queries that
# use range constraints.
#
# analyze3-2.*: Test that the values of bound parameters are considered
# in the same way as constants when planning queries that
# use LIKE expressions in the WHERE clause.
#
# analyze3-3.*: Test that binding to a variable does not invalidate the
# query plan when there is no way in which replanning the
# query may produce a superior outcome.
#
# analyze3-4.*: Test that SQL or authorization callback errors occuring
# within sqlite3Reprepare() are handled correctly.
#
# analyze3-5.*: Check that the query plans of applicable statements are
# invalidated if the values of SQL parameter are modified
# using the clear_bindings() or transfer_bindings() APIs.
#
# analyze3-6.*: Test that the problem fixed by commit [127a5b776d] is fixed.
#
# analyze3-7.*: Test that some memory leaks discovered by fuzz testing
# have been fixed.
#
proc getvar {varname} { uplevel #0 set $varname }
db function var getvar
proc eqp {sql {db db}} {
uplevel execsql [list "EXPLAIN QUERY PLAN $sql"] $db
}
proc sf_execsql {sql {db db}} {
set ::sqlite_search_count 0
set r [uplevel [list execsql $sql $db]]
concat $::sqlite_search_count [$db status step] $r
}
#-------------------------------------------------------------------------
#
# analyze3-1.1.1:
# Create a table with two columns. Populate the first column (affinity
# INTEGER) with integer values from 100 to 1100. Create an index on this
# column. ANALYZE the table.
#
# analyze3-1.1.2 - 3.1.3
# Show that there are two possible plans for querying the table with
# a range constraint on the indexed column - "full table scan" or "use
# the index". When the range is specified using literal values, SQLite
# is able to pick the best plan based on the samples in sqlite_stat3.
#
# analyze3-1.1.4 - 3.1.9
# Show that using SQL variables produces the same results as using
# literal values to constrain the range scan.
#
# These tests also check that the compiler code considers column
# affinities when estimating the number of rows scanned by the "use
# index strategy".
#
do_test analyze3-1.1.1 {
execsql {
BEGIN;
CREATE TABLE t1(x INTEGER, y);
CREATE INDEX i1 ON t1(x);
}
for {set i 0} {$i < 1000} {incr i} {
execsql { INSERT INTO t1 VALUES($i+100, $i) }
}
execsql {
COMMIT;
ANALYZE;
}
ifcapable stat4 {
execsql { SELECT count(*)>0 FROM sqlite_stat4; }
} else {
execsql { SELECT count(*)>0 FROM sqlite_stat3; }
}
} {1}
do_execsql_test analyze3-1.1.x {
SELECT count(*) FROM t1 WHERE x>200 AND x<300;
SELECT count(*) FROM t1 WHERE x>0 AND x<1100;
} {99 1000}
# The first of the following two SELECT statements visits 99 rows. So
# it is better to use the index. But the second visits every row in
# the table (1000 in total) so it is better to do a full-table scan.
#
do_eqp_test analyze3-1.1.2 {
SELECT sum(y) FROM t1 WHERE x>200 AND x<300
} {0 0 0 {SEARCH TABLE t1 USING INDEX i1 (x>? AND x<?)}}
do_eqp_test analyze3-1.1.3 {
SELECT sum(y) FROM t1 WHERE x>0 AND x<1100
} {0 0 0 {SCAN TABLE t1}}
do_test analyze3-1.1.4 {
sf_execsql { SELECT sum(y) FROM t1 WHERE x>200 AND x<300 }
} {199 0 14850}
do_test analyze3-1.1.5 {
set l [string range "200" 0 end]
set u [string range "300" 0 end]
sf_execsql { SELECT sum(y) FROM t1 WHERE x>$l AND x<$u }
} {199 0 14850}
do_test analyze3-1.1.6 {
set l [expr int(200)]
set u [expr int(300)]
sf_execsql { SELECT sum(y) FROM t1 WHERE x>$l AND x<$u }
} {199 0 14850}
do_test analyze3-1.1.7 {
sf_execsql { SELECT sum(y) FROM t1 WHERE x>0 AND x<1100 }
} {999 999 499500}
do_test analyze3-1.1.8 {
set l [string range "0" 0 end]
set u [string range "1100" 0 end]
sf_execsql { SELECT sum(y) FROM t1 WHERE x>$l AND x<$u }
} {999 999 499500}
do_test analyze3-1.1.9 {
set l [expr int(0)]
set u [expr int(1100)]
sf_execsql { SELECT sum(y) FROM t1 WHERE x>$l AND x<$u }
} {999 999 499500}
# The following tests are similar to the block above. The difference is
# that the indexed column has TEXT affinity in this case. In the tests
# above the affinity is INTEGER.
#
do_test analyze3-1.2.1 {
execsql {
BEGIN;
CREATE TABLE t2(x TEXT, y);
INSERT INTO t2 SELECT * FROM t1;
CREATE INDEX i2 ON t2(x);
COMMIT;
ANALYZE;
}
} {}
do_execsql_test analyze3-2.1.x {
SELECT count(*) FROM t2 WHERE x>1 AND x<2;
SELECT count(*) FROM t2 WHERE x>0 AND x<99;
} {200 990}
do_eqp_test analyze3-1.2.2 {
SELECT sum(y) FROM t2 WHERE x>1 AND x<2
} {0 0 0 {SEARCH TABLE t2 USING INDEX i2 (x>? AND x<?)}}
do_eqp_test analyze3-1.2.3 {
SELECT sum(y) FROM t2 WHERE x>0 AND x<99
} {0 0 0 {SCAN TABLE t2}}
do_test analyze3-1.2.4 {
sf_execsql { SELECT sum(y) FROM t2 WHERE x>12 AND x<20 }
} {161 0 4760}
do_test analyze3-1.2.5 {
set l [string range "12" 0 end]
set u [string range "20" 0 end]
sf_execsql {SELECT typeof($l), typeof($u), sum(y) FROM t2 WHERE x>$l AND x<$u}
} {161 0 text text 4760}
do_test analyze3-1.2.6 {
set l [expr int(12)]
set u [expr int(20)]
sf_execsql {SELECT typeof($l), typeof($u), sum(y) FROM t2 WHERE x>$l AND x<$u}
} {161 0 integer integer 4760}
do_test analyze3-1.2.7 {
sf_execsql { SELECT sum(y) FROM t2 WHERE x>0 AND x<99 }
} {999 999 490555}
do_test analyze3-1.2.8 {
set l [string range "0" 0 end]
set u [string range "99" 0 end]
sf_execsql {SELECT typeof($l), typeof($u), sum(y) FROM t2 WHERE x>$l AND x<$u}
} {999 999 text text 490555}
do_test analyze3-1.2.9 {
set l [expr int(0)]
set u [expr int(99)]
sf_execsql {SELECT typeof($l), typeof($u), sum(y) FROM t2 WHERE x>$l AND x<$u}
} {999 999 integer integer 490555}
# Same tests a third time. This time, column x has INTEGER affinity and
# is not the leftmost column of the table. This triggered a bug causing
# SQLite to use sub-optimal query plans in 3.6.18 and earlier.
#
do_test analyze3-1.3.1 {
execsql {
BEGIN;
CREATE TABLE t3(y TEXT, x INTEGER);
INSERT INTO t3 SELECT y, x FROM t1;
CREATE INDEX i3 ON t3(x);
COMMIT;
ANALYZE;
}
} {}
do_execsql_test analyze3-1.3.x {
SELECT count(*) FROM t3 WHERE x>200 AND x<300;
SELECT count(*) FROM t3 WHERE x>0 AND x<1100
} {99 1000}
do_eqp_test analyze3-1.3.2 {
SELECT sum(y) FROM t3 WHERE x>200 AND x<300
} {0 0 0 {SEARCH TABLE t3 USING INDEX i3 (x>? AND x<?)}}
do_eqp_test analyze3-1.3.3 {
SELECT sum(y) FROM t3 WHERE x>0 AND x<1100
} {0 0 0 {SCAN TABLE t3}}
do_test analyze3-1.3.4 {
sf_execsql { SELECT sum(y) FROM t3 WHERE x>200 AND x<300 }
} {199 0 14850}
do_test analyze3-1.3.5 {
set l [string range "200" 0 end]
set u [string range "300" 0 end]
sf_execsql { SELECT sum(y) FROM t3 WHERE x>$l AND x<$u }
} {199 0 14850}
do_test analyze3-1.3.6 {
set l [expr int(200)]
set u [expr int(300)]
sf_execsql { SELECT sum(y) FROM t3 WHERE x>$l AND x<$u }
} {199 0 14850}
do_test analyze3-1.3.7 {
sf_execsql { SELECT sum(y) FROM t3 WHERE x>0 AND x<1100 }
} {999 999 499500}
do_test analyze3-1.3.8 {
set l [string range "0" 0 end]
set u [string range "1100" 0 end]
sf_execsql { SELECT sum(y) FROM t3 WHERE x>$l AND x<$u }
} {999 999 499500}
do_test analyze3-1.3.9 {
set l [expr int(0)]
set u [expr int(1100)]
sf_execsql { SELECT sum(y) FROM t3 WHERE x>$l AND x<$u }
} {999 999 499500}
#-------------------------------------------------------------------------
# Test that the values of bound SQL variables may be used for the LIKE
# optimization.
#
drop_all_tables
do_test analyze3-2.1 {
execsql {
PRAGMA case_sensitive_like=off;
BEGIN;
CREATE TABLE t1(a, b TEXT COLLATE nocase);
CREATE INDEX i1 ON t1(b);
}
for {set i 0} {$i < 1000} {incr i} {
set t ""
append t [lindex {a b c d e f g h i j} [expr $i/100]]
append t [lindex {a b c d e f g h i j} [expr ($i/10)%10]]
append t [lindex {a b c d e f g h i j} [expr ($i%10)]]
execsql { INSERT INTO t1 VALUES($i, $t) }
}
execsql COMMIT
} {}
do_eqp_test analyze3-2.2 {
SELECT count(a) FROM t1 WHERE b LIKE 'a%'
} {0 0 0 {SEARCH TABLE t1 USING INDEX i1 (b>? AND b<?)}}
do_eqp_test analyze3-2.3 {
SELECT count(a) FROM t1 WHERE b LIKE '%a'
} {0 0 0 {SCAN TABLE t1}}
# Return the first argument if like_match_blobs is true (the default)
# or the second argument if not
#
proc ilmb {a b} {
ifcapable like_match_blobs {return $a}
return $b
}
do_test analyze3-2.4 {
sf_execsql { SELECT count(*) FROM t1 WHERE b LIKE 'a%' }
} [list [ilmb 102 101] 0 100]
do_test analyze3-2.5 {
sf_execsql { SELECT count(*) FROM t1 WHERE b LIKE '%a' }
} {999 999 100}
do_test analyze3-2.6 {
set like "a%"
sf_execsql { SELECT count(*) FROM t1 WHERE b LIKE $like }
} [list [ilmb 102 101] 0 100]
do_test analyze3-2.7 {
set like "%a"
sf_execsql { SELECT count(*) FROM t1 WHERE b LIKE $like }
} {999 999 100}
do_test analyze3-2.8 {
set like "a"
sf_execsql { SELECT count(*) FROM t1 WHERE b LIKE $like }
} [list [ilmb 102 101] 0 0]
do_test analyze3-2.9 {
set like "ab"
sf_execsql { SELECT count(*) FROM t1 WHERE b LIKE $like }
} [list [ilmb 12 11] 0 0]
do_test analyze3-2.10 {
set like "abc"
sf_execsql { SELECT count(*) FROM t1 WHERE b LIKE $like }
} [list [ilmb 3 2] 0 1]
do_test analyze3-2.11 {
set like "a_c"
sf_execsql { SELECT count(*) FROM t1 WHERE b LIKE $like }
} [list [ilmb 102 101] 0 10]
#-------------------------------------------------------------------------
# This block of tests checks that statements are correctly marked as
# expired when the values bound to any parameters that may affect the
# query plan are modified.
#
drop_all_tables
db auth auth
proc auth {args} {
set ::auth 1
return SQLITE_OK
}
do_test analyze3-3.1 {
execsql {
BEGIN;
CREATE TABLE t1(a, b, c);
CREATE INDEX i1 ON t1(b);
}
for {set i 0} {$i < 100} {incr i} {
execsql { INSERT INTO t1 VALUES($i, $i, $i) }
}
execsql COMMIT
execsql ANALYZE
} {}
do_test analyze3-3.2.1 {
set S [sqlite3_prepare_v2 db "SELECT * FROM t1 WHERE b>?" -1 dummy]
sqlite3_expired $S
} {0}
do_test analyze3-3.2.2 {
sqlite3_bind_text $S 1 "abc" 3
sqlite3_expired $S
} {1}
do_test analyze3-3.2.4 {
sqlite3_finalize $S
} {SQLITE_OK}
do_test analyze3-3.2.5 {
set S [sqlite3_prepare_v2 db "SELECT * FROM t1 WHERE b=?" -1 dummy]
sqlite3_expired $S
} {0}
do_test analyze3-3.2.6 {
sqlite3_bind_text $S 1 "abc" 3
sqlite3_expired $S
} {1}
do_test analyze3-3.2.7 {
sqlite3_finalize $S
} {SQLITE_OK}
do_test analyze3-3.4.1 {
set S [sqlite3_prepare_v2 db "SELECT * FROM t1 WHERE a=? AND b>?" -1 dummy]
sqlite3_expired $S
} {0}
do_test analyze3-3.4.2 {
sqlite3_bind_text $S 1 "abc" 3
sqlite3_expired $S
} {0}
do_test analyze3-3.4.3 {
sqlite3_bind_text $S 2 "def" 3
sqlite3_expired $S
} {1}
do_test analyze3-3.4.4 {
sqlite3_bind_text $S 2 "ghi" 3
sqlite3_expired $S
} {1}
do_test analyze3-3.4.5 {
sqlite3_expired $S
} {1}
do_test analyze3-3.4.6 {
sqlite3_finalize $S
} {SQLITE_OK}
do_test analyze3-3.5.1 {
set S [sqlite3_prepare_v2 db {
SELECT * FROM t1 WHERE a IN (
?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10,
?11, ?12, ?13, ?14, ?15, ?16, ?17, ?18, ?19, ?20,
?21, ?22, ?23, ?24, ?25, ?26, ?27, ?28, ?29, ?30, ?31
) AND b>?32;
} -1 dummy]
sqlite3_expired $S
} {0}
do_test analyze3-3.5.2 {
sqlite3_bind_text $S 31 "abc" 3
sqlite3_expired $S
} {0}
do_test analyze3-3.5.3 {
sqlite3_bind_text $S 32 "def" 3
sqlite3_expired $S
} {1}
do_test analyze3-3.5.5 {
sqlite3_finalize $S
} {SQLITE_OK}
do_test analyze3-3.6.1 {
set S [sqlite3_prepare_v2 db {
SELECT * FROM t1 WHERE a IN (
?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10,
?11, ?12, ?13, ?14, ?15, ?16, ?17, ?18, ?19, ?20,
?21, ?22, ?23, ?24, ?25, ?26, ?27, ?28, ?29, ?30, ?31, ?32
) AND b>?33;
} -1 dummy]
sqlite3_expired $S
} {0}
do_test analyze3-3.6.2 {
sqlite3_bind_text $S 32 "abc" 3
sqlite3_expired $S
} {1}
do_test analyze3-3.6.3 {
sqlite3_bind_text $S 33 "def" 3
sqlite3_expired $S
} {1}
do_test analyze3-3.6.5 {
sqlite3_finalize $S
} {SQLITE_OK}
do_test analyze3-3.7.1 {
set S [sqlite3_prepare_v2 db {
SELECT * FROM t1 WHERE a IN (
?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?33,
?11, ?12, ?13, ?14, ?15, ?16, ?17, ?18, ?19, ?20,
?21, ?22, ?23, ?24, ?25, ?26, ?27, ?28, ?29, ?30, ?31, ?32
) AND b>?10;
} -1 dummy]
sqlite3_expired $S
} {0}
do_test analyze3-3.7.2 {
sqlite3_bind_text $S 32 "abc" 3
sqlite3_expired $S
} {0}
do_test analyze3-3.7.3 {
sqlite3_bind_text $S 33 "def" 3
sqlite3_expired $S
} {0}
do_test analyze3-3.7.4 {
sqlite3_bind_text $S 10 "def" 3
sqlite3_expired $S
} {1}
do_test analyze3-3.7.6 {
sqlite3_finalize $S
} {SQLITE_OK}
do_test analyze3-3.8.1 {
execsql {
CREATE TABLE t4(x, y TEXT COLLATE NOCASE);
CREATE INDEX i4 ON t4(y);
}
} {}
do_test analyze3-3.8.2 {
set S [sqlite3_prepare_v2 db {
SELECT * FROM t4 WHERE x != ? AND y LIKE ?
} -1 dummy]
sqlite3_expired $S
} {0}
do_test analyze3-3.8.3 {
sqlite3_bind_text $S 1 "abc" 3
sqlite3_expired $S
} {0}
do_test analyze3-3.8.4 {
sqlite3_bind_text $S 2 "def" 3
sqlite3_expired $S
} {1}
do_test analyze3-3.8.7 {
sqlite3_bind_text $S 2 "ghi%" 4
sqlite3_expired $S
} {1}
do_test analyze3-3.8.8 {
sqlite3_expired $S
} {1}
do_test analyze3-3.8.9 {
sqlite3_bind_text $S 2 "ghi%def" 7
sqlite3_expired $S
} {1}
do_test analyze3-3.8.10 {
sqlite3_expired $S
} {1}
do_test analyze3-3.8.11 {
sqlite3_bind_text $S 2 "%ab" 3
sqlite3_expired $S
} {1}
do_test analyze3-3.8.12 {
sqlite3_expired $S
} {1}
do_test analyze3-3.8.12 {
sqlite3_bind_text $S 2 "%de" 3
sqlite3_expired $S
} {1}
do_test analyze3-3.8.13 {
sqlite3_expired $S
} {1}
do_test analyze3-3.8.14 {
sqlite3_finalize $S
} {SQLITE_OK}
#-------------------------------------------------------------------------
# These tests check that errors encountered while repreparing an SQL
# statement within sqlite3Reprepare() are handled correctly.
#
# Check a schema error.
#
do_test analyze3-4.1.1 {
set S [sqlite3_prepare_v2 db "SELECT * FROM t1 WHERE a=? AND b>?" -1 dummy]
sqlite3_step $S
} {SQLITE_DONE}
do_test analyze3-4.1.2 {
sqlite3_reset $S
sqlite3_bind_text $S 2 "abc" 3
execsql { DROP TABLE t1 }
sqlite3_step $S
} {SQLITE_ERROR}
do_test analyze3-4.1.3 {
sqlite3_finalize $S
} {SQLITE_ERROR}
# Check an authorization error.
#
do_test analyze3-4.2.1 {
execsql {
BEGIN;
CREATE TABLE t1(a, b, c);
CREATE INDEX i1 ON t1(b);
}
for {set i 0} {$i < 100} {incr i} {
execsql { INSERT INTO t1 VALUES($i, $i, $i) }
}
execsql COMMIT
execsql ANALYZE
set S [sqlite3_prepare_v2 db "SELECT * FROM t1 WHERE a=? AND b>?" -1 dummy]
sqlite3_step $S
} {SQLITE_DONE}
db auth auth
proc auth {args} {
if {[lindex $args 0] == "SQLITE_READ"} {return SQLITE_DENY}
return SQLITE_OK
}
do_test analyze3-4.2.2 {
sqlite3_reset $S
sqlite3_bind_text $S 2 "abc" 3
sqlite3_step $S
} {SQLITE_AUTH}
do_test analyze3-4.2.4 {
sqlite3_finalize $S
} {SQLITE_AUTH}
# Check the effect of an authorization error that occurs in a re-prepare
# performed by sqlite3_step() is the same as one that occurs within
# sqlite3Reprepare().
#
do_test analyze3-4.3.1 {
db auth {}
set S [sqlite3_prepare_v2 db "SELECT * FROM t1 WHERE a=? AND b>?" -1 dummy]
execsql { CREATE TABLE t2(d, e, f) }
db auth auth
sqlite3_step $S
} {SQLITE_AUTH}
do_test analyze3-4.3.2 {
sqlite3_finalize $S
} {SQLITE_AUTH}
db auth {}
#-------------------------------------------------------------------------
# Test that modifying bound variables using the clear_bindings() or
# transfer_bindings() APIs works.
#
# analyze3-5.1.*: sqlite3_clear_bindings()
# analyze3-5.2.*: sqlite3_transfer_bindings()
#
do_test analyze3-5.1.1 {
drop_all_tables
execsql {
CREATE TABLE t1(x TEXT COLLATE NOCASE);
CREATE INDEX i1 ON t1(x);
INSERT INTO t1 VALUES('aaa');
INSERT INTO t1 VALUES('abb');
INSERT INTO t1 VALUES('acc');
INSERT INTO t1 VALUES('baa');
INSERT INTO t1 VALUES('bbb');
INSERT INTO t1 VALUES('bcc');
}
set S [sqlite3_prepare_v2 db "SELECT * FROM t1 WHERE x LIKE ?" -1 dummy]
sqlite3_bind_text $S 1 "a%" 2
set R [list]
while { "SQLITE_ROW" == [sqlite3_step $S] } {
lappend R [sqlite3_column_text $S 0]
}
concat [sqlite3_reset $S] $R
} {SQLITE_OK aaa abb acc}
do_test analyze3-5.1.2 {
sqlite3_clear_bindings $S
set R [list]
while { "SQLITE_ROW" == [sqlite3_step $S] } {
lappend R [sqlite3_column_text $S 0]
}
concat [sqlite3_reset $S] $R
} {SQLITE_OK}
do_test analyze3-5.1.3 {
sqlite3_finalize $S
} {SQLITE_OK}
do_test analyze3-5.1.1 {
set S1 [sqlite3_prepare_v2 db "SELECT * FROM t1 WHERE x LIKE ?" -1 dummy]
sqlite3_bind_text $S1 1 "b%" 2
set R [list]
while { "SQLITE_ROW" == [sqlite3_step $S1] } {
lappend R [sqlite3_column_text $S1 0]
}
concat [sqlite3_reset $S1] $R
} {SQLITE_OK baa bbb bcc}
do_test analyze3-5.1.2 {
set S2 [sqlite3_prepare_v2 db "SELECT * FROM t1 WHERE x = ?" -1 dummy]
sqlite3_bind_text $S2 1 "a%" 2
sqlite3_transfer_bindings $S2 $S1
set R [list]
while { "SQLITE_ROW" == [sqlite3_step $S1] } {
lappend R [sqlite3_column_text $S1 0]
}
concat [sqlite3_reset $S1] $R
} {SQLITE_OK aaa abb acc}
do_test analyze3-5.1.3 {
sqlite3_finalize $S2
sqlite3_finalize $S1
} {SQLITE_OK}
#-------------------------------------------------------------------------
do_test analyze3-6.1 {
execsql { DROP TABLE IF EXISTS t1 }
execsql BEGIN
execsql { CREATE TABLE t1(a, b, c) }
for {set i 0} {$i < 1000} {incr i} {
execsql "INSERT INTO t1 VALUES([expr $i/100], 'x', [expr $i/10])"
}
execsql {
CREATE INDEX i1 ON t1(a, b);
CREATE INDEX i2 ON t1(c);
}
execsql COMMIT
execsql ANALYZE
} {}
do_eqp_test analyze3-6-3 {
SELECT * FROM t1 WHERE a = 5 AND c = 13;
} {0 0 0 {SEARCH TABLE t1 USING INDEX i2 (c=?)}}
do_eqp_test analyze3-6-2 {
SELECT * FROM t1 WHERE a = 5 AND b > 'w' AND c = 13;
} {0 0 0 {SEARCH TABLE t1 USING INDEX i2 (c=?)}}
#-----------------------------------------------------------------------------
# 2015-04-20.
# Memory leak in sqlite3Stat4ProbeFree(). (Discovered while fuzzing.)
#
do_execsql_test analyze-7.1 {
DROP TABLE IF EXISTS t1;
CREATE TABLE t1(a INTEGER PRIMARY KEY, b, c);
INSERT INTO t1 VALUES(1,1,'0000');
CREATE INDEX t0b ON t1(b);
ANALYZE;
SELECT c FROM t1 WHERE b=3 AND a BETWEEN 30 AND hex(1);
} {}
# At one point duplicate stat1 entries were causing a memory leak.
#
reset_db
do_execsql_test 7.2 {
CREATE TABLE t1(a,b,c);
CREATE INDEX t1a ON t1(a);
ANALYZE;
SELECT * FROM sqlite_stat1;
INSERT INTO sqlite_stat1(tbl,idx,stat) VALUES('t1','t1a','12000');
INSERT INTO sqlite_stat1(tbl,idx,stat) VALUES('t1','t1a','12000');
ANALYZE sqlite_master;
}
finish_test

View File

@ -0,0 +1,111 @@
# 2011 January 04
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
#
# This file implements regression tests for SQLite library. This file
# implements tests for ANALYZE to verify that multiple rows containing
# a NULL value count as distinct rows for the purposes of analyze
# statistics.
#
# Also include test cases for collating sequences on indices.
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
do_test analyze4-1.0 {
db eval {
CREATE TABLE t1(a,b);
CREATE INDEX t1a ON t1(a);
CREATE INDEX t1b ON t1(b);
INSERT INTO t1 VALUES(1,NULL);
INSERT INTO t1 SELECT a+1, b FROM t1;
INSERT INTO t1 SELECT a+2, b FROM t1;
INSERT INTO t1 SELECT a+4, b FROM t1;
INSERT INTO t1 SELECT a+8, b FROM t1;
INSERT INTO t1 SELECT a+16, b FROM t1;
INSERT INTO t1 SELECT a+32, b FROM t1;
INSERT INTO t1 SELECT a+64, b FROM t1;
ANALYZE;
}
# Should choose the t1a index since it is more specific than t1b.
db eval {EXPLAIN QUERY PLAN SELECT * FROM t1 WHERE a=5 AND b IS NULL}
} {0 0 0 {SEARCH TABLE t1 USING INDEX t1a (a=?)}}
# Verify that the t1b index shows that it does not narrow down the
# search any at all.
#
do_test analyze4-1.1 {
db eval {
SELECT idx, stat FROM sqlite_stat1 WHERE tbl='t1' ORDER BY idx;
}
} {t1a {128 1} t1b {128 128}}
# Change half of the b values from NULL to a constant. Verify
# that the number of rows selected in stat1 is half the total
# number of rows.
#
do_test analyze4-1.2 {
db eval {
UPDATE t1 SET b='x' WHERE a%2;
ANALYZE;
SELECT idx, stat FROM sqlite_stat1 WHERE tbl='t1' ORDER BY idx;
}
} {t1a {128 1} t1b {128 64}}
# Change the t1.b values all back to NULL. Add columns t1.c and t1.d.
# Create a multi-column indices using t1.b and verify that ANALYZE
# processes them correctly.
#
do_test analyze4-1.3 {
db eval {
UPDATE t1 SET b=NULL;
ALTER TABLE t1 ADD COLUMN c;
ALTER TABLE t1 ADD COLUMN d;
UPDATE t1 SET c=a/4, d=a/2;
CREATE INDEX t1bcd ON t1(b,c,d);
CREATE INDEX t1cdb ON t1(c,d,b);
CREATE INDEX t1cbd ON t1(c,b,d);
ANALYZE;
SELECT idx, stat FROM sqlite_stat1 WHERE tbl='t1' ORDER BY idx;
}
} {t1a {128 1} t1b {128 128} t1bcd {128 128 4 2} t1cbd {128 4 4 2} t1cdb {128 4 2 2}}
# Verify that collating sequences are taken into account when computing
# ANALYZE statistics.
#
do_test analyze4-2.0 {
db eval {
CREATE TABLE t2(
x INTEGER PRIMARY KEY,
a TEXT COLLATE nocase,
b TEXT COLLATE rtrim,
c TEXT COLLATE binary
);
CREATE INDEX t2a ON t2(a);
CREATE INDEX t2b ON t2(b);
CREATE INDEX t2c ON t2(c);
CREATE INDEX t2c2 ON t2(c COLLATE nocase);
CREATE INDEX t2c3 ON t2(c COLLATE rtrim);
INSERT INTO t2 VALUES(1, 'abc', 'abc', 'abc');
INSERT INTO t2 VALUES(2, 'abC', 'abC', 'abC');
INSERT INTO t2 VALUES(3, 'abc ', 'abc ', 'abc ');
INSERT INTO t2 VALUES(4, 'abC ', 'abC ', 'abC ');
INSERT INTO t2 VALUES(5, 'aBc', 'aBc', 'aBc');
INSERT INTO t2 VALUES(6, 'aBC', 'aBC', 'aBC');
INSERT INTO t2 VALUES(7, 'aBc ', 'aBc ', 'aBc ');
INSERT INTO t2 VALUES(8, 'aBC ', 'aBC ', 'aBC ');
ANALYZE;
SELECT idx, stat FROM sqlite_stat1 WHERE tbl='t2' ORDER BY idx;
}
} {t2a {8 4} t2b {8 2} t2c {8 1} t2c2 {8 4} t2c3 {8 2}}
finish_test

View File

@ -0,0 +1,265 @@
# 2011 January 19
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
#
# This file implements tests for SQLite library. The focus of the tests
# in this file is the use of the sqlite_stat4 histogram data on tables
# with many repeated values and only a few distinct values.
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
ifcapable !stat4&&!stat3 {
finish_test
return
}
set testprefix analyze5
proc eqp {sql {db db}} {
uplevel execsql [list "EXPLAIN QUERY PLAN $sql"] $db
}
proc alpha {blob} {
set ret ""
foreach c [split $blob {}] {
if {[string is alpha $c]} {append ret $c}
}
return $ret
}
db func alpha alpha
db func lindex lindex
unset -nocomplain i t u v w x y z
do_test analyze5-1.0 {
db eval {CREATE TABLE t1(t,u,v TEXT COLLATE nocase,w,x,y,z)}
for {set i 0} {$i < 1000} {incr i} {
set y [expr {$i>=25 && $i<=50}]
set z [expr {($i>=400) + ($i>=700) + ($i>=875)}]
set x $z
set w $z
set t [expr {$z+0.5}]
switch $z {
0 {set u "alpha"; unset x}
1 {set u "bravo"}
2 {set u "charlie"}
3 {set u "delta"; unset w}
}
if {$i%2} {set v $u} {set v [string toupper $u]}
db eval {INSERT INTO t1 VALUES($t,$u,$v,$w,$x,$y,$z)}
}
db eval {
CREATE INDEX t1t ON t1(t); -- 0.5, 1.5, 2.5, and 3.5
CREATE INDEX t1u ON t1(u); -- text
CREATE INDEX t1v ON t1(v); -- mixed case text
CREATE INDEX t1w ON t1(w); -- integers 0, 1, 2 and a few NULLs
CREATE INDEX t1x ON t1(x); -- integers 1, 2, 3 and many NULLs
CREATE INDEX t1y ON t1(y); -- integers 0 and very few 1s
CREATE INDEX t1z ON t1(z); -- integers 0, 1, 2, and 3
ANALYZE;
}
ifcapable stat4 {
db eval {
SELECT DISTINCT lindex(test_decode(sample),0)
FROM sqlite_stat4 WHERE idx='t1u' ORDER BY nlt;
}
} else {
db eval {
SELECT sample FROM sqlite_stat3 WHERE idx='t1u' ORDER BY nlt;
}
}
} {alpha bravo charlie delta}
do_test analyze5-1.1 {
ifcapable stat4 {
db eval {
SELECT DISTINCT lower(lindex(test_decode(sample), 0))
FROM sqlite_stat4 WHERE idx='t1v' ORDER BY 1
}
} else {
db eval {
SELECT lower(sample) FROM sqlite_stat3 WHERE idx='t1v' ORDER BY 1
}
}
} {alpha bravo charlie delta}
ifcapable stat4 {
do_test analyze5-1.2 {
db eval {SELECT idx, count(*) FROM sqlite_stat4 GROUP BY 1 ORDER BY 1}
} {t1t 8 t1u 8 t1v 8 t1w 8 t1x 8 t1y 9 t1z 8}
} else {
do_test analyze5-1.2 {
db eval {SELECT idx, count(*) FROM sqlite_stat3 GROUP BY 1 ORDER BY 1}
} {t1t 4 t1u 4 t1v 4 t1w 4 t1x 4 t1y 2 t1z 4}
}
# Verify that range queries generate the correct row count estimates
#
foreach {testid where index rows} {
1 {z>=0 AND z<=0} t1z 400
2 {z>=1 AND z<=1} t1z 300
3 {z>=2 AND z<=2} t1z 175
4 {z>=3 AND z<=3} t1z 125
5 {z>=4 AND z<=4} t1z 1
6 {z>=-1 AND z<=-1} t1z 1
7 {z>1 AND z<3} t1z 175
8 {z>0 AND z<100} t1z 600
9 {z>=1 AND z<100} t1z 600
10 {z>1 AND z<100} t1z 300
11 {z>=2 AND z<100} t1z 300
12 {z>2 AND z<100} t1z 125
13 {z>=3 AND z<100} t1z 125
14 {z>3 AND z<100} t1z 1
15 {z>=4 AND z<100} t1z 1
16 {z>=-100 AND z<=-1} t1z 1
17 {z>=-100 AND z<=0} t1z 400
18 {z>=-100 AND z<0} t1z 1
19 {z>=-100 AND z<=1} t1z 700
20 {z>=-100 AND z<2} t1z 700
21 {z>=-100 AND z<=2} t1z 875
22 {z>=-100 AND z<3} t1z 875
31 {z>=0.0 AND z<=0.0} t1z 400
32 {z>=1.0 AND z<=1.0} t1z 300
33 {z>=2.0 AND z<=2.0} t1z 175
34 {z>=3.0 AND z<=3.0} t1z 125
35 {z>=4.0 AND z<=4.0} t1z 1
36 {z>=-1.0 AND z<=-1.0} t1z 1
37 {z>1.5 AND z<3.0} t1z 174
38 {z>0.5 AND z<100} t1z 599
39 {z>=1.0 AND z<100} t1z 600
40 {z>1.5 AND z<100} t1z 299
41 {z>=2.0 AND z<100} t1z 300
42 {z>2.1 AND z<100} t1z 124
43 {z>=3.0 AND z<100} t1z 125
44 {z>3.2 AND z<100} t1z 1
45 {z>=4.0 AND z<100} t1z 1
46 {z>=-100 AND z<=-1.0} t1z 1
47 {z>=-100 AND z<=0.0} t1z 400
48 {z>=-100 AND z<0.0} t1z 1
49 {z>=-100 AND z<=1.0} t1z 700
50 {z>=-100 AND z<2.0} t1z 700
51 {z>=-100 AND z<=2.0} t1z 875
52 {z>=-100 AND z<3.0} t1z 875
101 {z=-1} t1z 1
102 {z=0} t1z 400
103 {z=1} t1z 300
104 {z=2} t1z 175
105 {z=3} t1z 125
106 {z=4} t1z 1
107 {z=-10.0} t1z 1
108 {z=0.0} t1z 400
109 {z=1.0} t1z 300
110 {z=2.0} t1z 175
111 {z=3.0} t1z 125
112 {z=4.0} t1z 1
113 {z=1.5} t1z 1
114 {z=2.5} t1z 1
201 {z IN (-1)} t1z 1
202 {z IN (0)} t1z 400
203 {z IN (1)} t1z 300
204 {z IN (2)} t1z 175
205 {z IN (3)} t1z 125
206 {z IN (4)} t1z 1
207 {z IN (0.5)} t1z 1
208 {z IN (0,1)} t1z 700
209 {z IN (0,1,2)} t1z 875
210 {z IN (0,1,2,3)} {} 100
211 {z IN (0,1,2,3,4,5)} {} 100
212 {z IN (1,2)} t1z 475
213 {z IN (2,3)} t1z 300
214 {z=3 OR z=2} t1z 300
215 {z IN (-1,3)} t1z 126
216 {z=-1 OR z=3} t1z 126
300 {y=0} t1y 974
301 {y=1} t1y 26
302 {y=0.1} t1y 1
400 {x IS NULL} t1x 400
} {
# Verify that the expected index is used with the expected row count
# No longer valid due to an EXPLAIN QUERY PLAN output format change
# do_test analyze5-1.${testid}a {
# set x [lindex [eqp "SELECT * FROM t1 WHERE $where"] 3]
# set idx {}
# regexp {INDEX (t1.) } $x all idx
# regexp {~([0-9]+) rows} $x all nrow
# list $idx $nrow
# } [list $index $rows]
# Verify that the same result is achieved regardless of whether or not
# the index is used
do_test analyze5-1.${testid}b {
set w2 [string map {y +y z +z} $where]
set a1 [db eval "SELECT rowid FROM t1 NOT INDEXED WHERE $w2\
ORDER BY +rowid"]
set a2 [db eval "SELECT rowid FROM t1 WHERE $where ORDER BY +rowid"]
if {$a1==$a2} {
set res ok
} else {
set res "a1=\[$a1\] a2=\[$a2\]"
}
set res
} {ok}
}
# Increase the number of NULLs in column x
#
db eval {
UPDATE t1 SET x=NULL;
UPDATE t1 SET x=rowid
WHERE rowid IN (SELECT rowid FROM t1 ORDER BY random() LIMIT 5);
ANALYZE;
}
# Verify that range queries generate the correct row count estimates
#
foreach {testid where index rows} {
500 {x IS NULL AND u='charlie'} t1u 17
501 {x=1 AND u='charlie'} t1x 1
502 {x IS NULL} t1x 995
503 {x=1} t1x 1
504 {x IS NOT NULL} t1x 2
505 {+x IS NOT NULL} {} 500
506 {upper(x) IS NOT NULL} {} 500
} {
# Verify that the expected index is used with the expected row count
# No longer valid due to an EXPLAIN QUERY PLAN format change
# do_test analyze5-1.${testid}a {
# set x [lindex [eqp "SELECT * FROM t1 WHERE $where"] 3]
# set idx {}
# regexp {INDEX (t1.) } $x all idx
# regexp {~([0-9]+) rows} $x all nrow
# list $idx $nrow
# } [list $index $rows]
# Verify that the same result is achieved regardless of whether or not
# the index is used
do_test analyze5-1.${testid}b {
set w2 [string map {y +y z +z} $where]
set a1 [db eval "SELECT rowid FROM t1 NOT INDEXED WHERE $w2\
ORDER BY +rowid"]
set a2 [db eval "SELECT rowid FROM t1 WHERE $where ORDER BY +rowid"]
if {$a1==$a2} {
set res ok
} else {
set res "a1=\[$a1\] a2=\[$a2\]"
}
set res
} {ok}
}
finish_test

View File

@ -0,0 +1,122 @@
# 2011 March 3
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
#
# This file implements tests for SQLite library. The focus of the tests
# in this file a corner-case query planner optimization involving the
# join order of two tables of different sizes.
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
ifcapable !stat4&&!stat3 {
finish_test
return
}
set testprefix analyze6
proc eqp {sql {db db}} {
uplevel execsql [list "EXPLAIN QUERY PLAN $sql"] $db
}
do_test analyze6-1.0 {
db eval {
CREATE TABLE cat(x INT, yz TEXT);
CREATE UNIQUE INDEX catx ON cat(x);
/* Give cat 16 unique integers */
INSERT INTO cat(x) VALUES(1);
INSERT INTO cat(x) VALUES(2);
INSERT INTO cat(x) SELECT x+2 FROM cat;
INSERT INTO cat(x) SELECT x+4 FROM cat;
INSERT INTO cat(x) SELECT x+8 FROM cat;
CREATE TABLE ev(y INT);
CREATE INDEX evy ON ev(y);
/* ev will hold 32 copies of 16 integers found in cat */
INSERT INTO ev SELECT x FROM cat;
INSERT INTO ev SELECT x FROM cat;
INSERT INTO ev SELECT y FROM ev;
INSERT INTO ev SELECT y FROM ev;
INSERT INTO ev SELECT y FROM ev;
INSERT INTO ev SELECT y FROM ev;
ANALYZE;
SELECT count(*) FROM cat;
SELECT count(*) FROM ev;
}
} {16 512}
# The lowest cost plan is to scan CAT and for each integer there, do a single
# lookup of the first corresponding entry in EV then read off the equal values
# in EV. (Prior to the 2011-03-04 enhancement to where.c, this query would
# have used EV for the outer loop instead of CAT - which was about 3x slower.)
#
do_test analyze6-1.1 {
eqp {SELECT count(*) FROM ev, cat WHERE x=y}
} {0 0 1 {SCAN TABLE cat USING COVERING INDEX catx} 0 1 0 {SEARCH TABLE ev USING COVERING INDEX evy (y=?)}}
# The same plan is chosen regardless of the order of the tables in the
# FROM clause.
#
do_test analyze6-1.2 {
eqp {SELECT count(*) FROM cat, ev WHERE x=y}
} {0 0 0 {SCAN TABLE cat USING COVERING INDEX catx} 0 1 1 {SEARCH TABLE ev USING COVERING INDEX evy (y=?)}}
# Ticket [83ea97620bd3101645138b7b0e71c12c5498fe3d] 2011-03-30
# If ANALYZE is run on an empty table, make sure indices are used
# on the table.
#
do_test analyze6-2.1 {
execsql {
CREATE TABLE t201(x INTEGER PRIMARY KEY, y UNIQUE, z);
CREATE INDEX t201z ON t201(z);
ANALYZE;
}
eqp {SELECT * FROM t201 WHERE z=5}
} {0 0 0 {SEARCH TABLE t201 USING INDEX t201z (z=?)}}
do_test analyze6-2.2 {
eqp {SELECT * FROM t201 WHERE y=5}
} {0 0 0 {SEARCH TABLE t201 USING INDEX sqlite_autoindex_t201_1 (y=?)}}
do_test analyze6-2.3 {
eqp {SELECT * FROM t201 WHERE x=5}
} {0 0 0 {SEARCH TABLE t201 USING INTEGER PRIMARY KEY (rowid=?)}}
do_test analyze6-2.4 {
execsql {
INSERT INTO t201 VALUES(1,2,3),(2,3,4),(3,4,5);
ANALYZE t201;
}
eqp {SELECT * FROM t201 WHERE z=5}
} {0 0 0 {SEARCH TABLE t201 USING INDEX t201z (z=?)}}
do_test analyze6-2.5 {
eqp {SELECT * FROM t201 WHERE y=5}
} {0 0 0 {SEARCH TABLE t201 USING INDEX sqlite_autoindex_t201_1 (y=?)}}
do_test analyze6-2.6 {
eqp {SELECT * FROM t201 WHERE x=5}
} {0 0 0 {SEARCH TABLE t201 USING INTEGER PRIMARY KEY (rowid=?)}}
do_test analyze6-2.7 {
execsql {
INSERT INTO t201 VALUES(4,5,7);
INSERT INTO t201 SELECT x+100, y+100, z+100 FROM t201;
INSERT INTO t201 SELECT x+200, y+200, z+200 FROM t201;
INSERT INTO t201 SELECT x+400, y+400, z+400 FROM t201;
ANALYZE t201;
}
eqp {SELECT * FROM t201 WHERE z=5}
} {0 0 0 {SEARCH TABLE t201 USING INDEX t201z (z=?)}}
do_test analyze6-2.8 {
eqp {SELECT * FROM t201 WHERE y=5}
} {0 0 0 {SEARCH TABLE t201 USING INDEX sqlite_autoindex_t201_1 (y=?)}}
do_test analyze6-2.9 {
eqp {SELECT * FROM t201 WHERE x=5}
} {0 0 0 {SEARCH TABLE t201 USING INTEGER PRIMARY KEY (rowid=?)}}
finish_test

View File

@ -0,0 +1,114 @@
# 2011 April 1
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library.
# This file implements tests for the ANALYZE command when an idnex
# name is given as the argument.
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
# There is nothing to test if ANALYZE is disable for this build.
#
ifcapable {!analyze||!vtab} {
finish_test
return
}
# Generate some test data
#
do_test analyze7-1.0 {
load_static_extension db wholenumber
execsql {
CREATE TABLE t1(a,b,c,d);
CREATE INDEX t1a ON t1(a);
CREATE INDEX t1b ON t1(b);
CREATE INDEX t1cd ON t1(c,d);
CREATE VIRTUAL TABLE nums USING wholenumber;
INSERT INTO t1 SELECT value, value, value/100, value FROM nums
WHERE value BETWEEN 1 AND 256;
EXPLAIN QUERY PLAN SELECT * FROM t1 WHERE a=123;
}
} {0 0 0 {SEARCH TABLE t1 USING INDEX t1a (a=?)}}
do_test analyze7-1.1 {
execsql {EXPLAIN QUERY PLAN SELECT * FROM t1 WHERE b=123;}
} {0 0 0 {SEARCH TABLE t1 USING INDEX t1b (b=?)}}
do_test analyze7-1.2 {
execsql {EXPLAIN QUERY PLAN SELECT * FROM t1 WHERE c=2;}
} {0 0 0 {SEARCH TABLE t1 USING INDEX t1cd (c=?)}}
# Run an analyze on one of the three indices. Verify that this
# effects the row-count estimate on the one query that uses that
# one index.
#
do_test analyze7-2.0 {
execsql {ANALYZE t1a;}
db cache flush
execsql {EXPLAIN QUERY PLAN SELECT * FROM t1 WHERE a=123;}
} {0 0 0 {SEARCH TABLE t1 USING INDEX t1a (a=?)}}
do_test analyze7-2.1 {
execsql {EXPLAIN QUERY PLAN SELECT * FROM t1 WHERE b=123;}
} {0 0 0 {SEARCH TABLE t1 USING INDEX t1b (b=?)}}
do_test analyze7-2.2 {
execsql {EXPLAIN QUERY PLAN SELECT * FROM t1 WHERE c=2;}
} {0 0 0 {SEARCH TABLE t1 USING INDEX t1cd (c=?)}}
# Verify that since the query planner now things that t1a is more
# selective than t1b, it prefers to use t1a.
#
do_test analyze7-2.3 {
execsql {EXPLAIN QUERY PLAN SELECT * FROM t1 WHERE a=123 AND b=123}
} {0 0 0 {SEARCH TABLE t1 USING INDEX t1a (a=?)}}
# Run an analysis on another of the three indices. Verify that this
# new analysis works and does not disrupt the previous analysis.
#
do_test analyze7-3.0 {
execsql {ANALYZE t1cd;}
db cache flush;
execsql {EXPLAIN QUERY PLAN SELECT * FROM t1 WHERE a=123;}
} {0 0 0 {SEARCH TABLE t1 USING INDEX t1a (a=?)}}
do_test analyze7-3.1 {
execsql {EXPLAIN QUERY PLAN SELECT * FROM t1 WHERE b=123;}
} {0 0 0 {SEARCH TABLE t1 USING INDEX t1b (b=?)}}
do_test analyze7-3.2.1 {
execsql {EXPLAIN QUERY PLAN SELECT * FROM t1 WHERE c=?;}
} {0 0 0 {SEARCH TABLE t1 USING INDEX t1cd (c=?)}}
ifcapable stat4||stat3 {
# If ENABLE_STAT4 is defined, SQLite comes up with a different estimated
# row count for (c=2) than it does for (c=?).
do_test analyze7-3.2.2 {
execsql {EXPLAIN QUERY PLAN SELECT * FROM t1 WHERE c=2;}
} {0 0 0 {SEARCH TABLE t1 USING INDEX t1cd (c=?)}}
} else {
# If ENABLE_STAT4 is not defined, the expected row count for (c=2) is the
# same as that for (c=?).
do_test analyze7-3.2.3 {
execsql {EXPLAIN QUERY PLAN SELECT * FROM t1 WHERE c=2;}
} {0 0 0 {SEARCH TABLE t1 USING INDEX t1cd (c=?)}}
}
do_test analyze7-3.3 {
execsql {EXPLAIN QUERY PLAN SELECT * FROM t1 WHERE a=123 AND b=123}
} {0 0 0 {SEARCH TABLE t1 USING INDEX t1a (a=?)}}
ifcapable {!stat4 && !stat3} {
do_test analyze7-3.4 {
execsql {EXPLAIN QUERY PLAN SELECT * FROM t1 WHERE c=123 AND b=123}
} {0 0 0 {SEARCH TABLE t1 USING INDEX t1b (b=?)}}
do_test analyze7-3.5 {
execsql {EXPLAIN QUERY PLAN SELECT * FROM t1 WHERE a=123 AND c=123}
} {0 0 0 {SEARCH TABLE t1 USING INDEX t1a (a=?)}}
}
do_test analyze7-3.6 {
execsql {EXPLAIN QUERY PLAN SELECT * FROM t1 WHERE c=123 AND d=123 AND b=123}
} {0 0 0 {SEARCH TABLE t1 USING INDEX t1cd (c=? AND d=?)}}
finish_test

View File

@ -0,0 +1,115 @@
# 2011 August 13
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
#
# This file implements tests for SQLite library. The focus of the tests
# in this file is testing the capabilities of sqlite_stat3.
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
ifcapable !stat4&&!stat3 {
finish_test
return
}
set testprefix analyze8
proc eqp {sql {db db}} {
uplevel execsql [list "EXPLAIN QUERY PLAN $sql"] $db
}
# Scenario:
#
# Two indices. One has mostly singleton entries, but for a few
# values there are hundreds of entries. The other has 10-20
# entries per value.
#
# Verify that the query planner chooses the first index for the singleton
# entries and the second index for the others.
#
do_test 1.0 {
db eval {
CREATE TABLE t1(a,b,c,d);
CREATE INDEX t1a ON t1(a);
CREATE INDEX t1b ON t1(b);
CREATE INDEX t1c ON t1(c);
}
for {set i 0} {$i<1000} {incr i} {
if {$i%2==0} {set a $i} {set a [expr {($i%8)*100}]}
set b [expr {$i/10}]
set c [expr {$i/8}]
set c [expr {$c*$c*$c}]
db eval {INSERT INTO t1 VALUES($a,$b,$c,$i)}
}
db eval {ANALYZE}
} {}
# The a==100 comparison is expensive because there are many rows
# with a==100. And so for those cases, choose the t1b index.
#
# Buf ro a==99 and a==101, there are far fewer rows so choose
# the t1a index.
#
do_test 1.1 {
eqp {SELECT * FROM t1 WHERE a=100 AND b=55}
} {0 0 0 {SEARCH TABLE t1 USING INDEX t1b (b=?)}}
do_test 1.2 {
eqp {SELECT * FROM t1 WHERE a=99 AND b=55}
} {0 0 0 {SEARCH TABLE t1 USING INDEX t1a (a=?)}}
do_test 1.3 {
eqp {SELECT * FROM t1 WHERE a=101 AND b=55}
} {0 0 0 {SEARCH TABLE t1 USING INDEX t1a (a=?)}}
do_test 1.4 {
eqp {SELECT * FROM t1 WHERE a=100 AND b=56}
} {0 0 0 {SEARCH TABLE t1 USING INDEX t1b (b=?)}}
do_test 1.5 {
eqp {SELECT * FROM t1 WHERE a=99 AND b=56}
} {0 0 0 {SEARCH TABLE t1 USING INDEX t1a (a=?)}}
do_test 1.6 {
eqp {SELECT * FROM t1 WHERE a=101 AND b=56}
} {0 0 0 {SEARCH TABLE t1 USING INDEX t1a (a=?)}}
do_test 2.1 {
eqp {SELECT * FROM t1 WHERE a=100 AND b BETWEEN 50 AND 54}
} {0 0 0 {SEARCH TABLE t1 USING INDEX t1b (b>? AND b<?)}}
# There are many more values of c between 0 and 100000 than there are
# between 800000 and 900000. So t1c is more selective for the latter
# range.
#
# Test 3.2 is a little unstable. It depends on the planner estimating
# that (b BETWEEN 30 AND 34) will match more rows than (c BETWEEN
# 800000 AND 900000). Which is a pretty close call (50 vs. 32), so
# the planner could get it wrong with an unlucky set of samples. This
# case happens to work, but others ("b BETWEEN 40 AND 44" for example)
# will fail.
#
do_execsql_test 3.0 {
SELECT count(*) FROM t1 WHERE b BETWEEN 30 AND 34;
SELECT count(*) FROM t1 WHERE c BETWEEN 0 AND 100000;
SELECT count(*) FROM t1 WHERE c BETWEEN 800000 AND 900000;
} {50 376 32}
do_test 3.1 {
eqp {SELECT * FROM t1 WHERE b BETWEEN 30 AND 34 AND c BETWEEN 0 AND 100000}
} {0 0 0 {SEARCH TABLE t1 USING INDEX t1b (b>? AND b<?)}}
do_test 3.2 {
eqp {SELECT * FROM t1
WHERE b BETWEEN 30 AND 34 AND c BETWEEN 800000 AND 900000}
} {0 0 0 {SEARCH TABLE t1 USING INDEX t1c (c>? AND c<?)}}
do_test 3.3 {
eqp {SELECT * FROM t1 WHERE a=100 AND c BETWEEN 0 AND 100000}
} {0 0 0 {SEARCH TABLE t1 USING INDEX t1a (a=?)}}
do_test 3.4 {
eqp {SELECT * FROM t1
WHERE a=100 AND c BETWEEN 800000 AND 900000}
} {0 0 0 {SEARCH TABLE t1 USING INDEX t1c (c>? AND c<?)}}
finish_test

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,186 @@
# 2013 August 3
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
#
# This file contains automated tests used to verify that the current build
# (which must be either ENABLE_STAT3 or ENABLE_STAT4) works with both stat3
# and stat4 data.
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
set testprefix analyzeA
ifcapable !stat4&&!stat3 {
finish_test
return
}
# Populate the stat3 table according to the current contents of the db
#
proc populate_stat3 {{bDropTable 1}} {
# Open a second connection on database "test.db" and run ANALYZE. If this
# is an ENABLE_STAT3 build, this is all that is required to create and
# populate the sqlite_stat3 table.
#
sqlite3 db2 test.db
execsql { ANALYZE }
# Now, if this is an ENABLE_STAT4 build, create and populate the
# sqlite_stat3 table based on the stat4 data gathered by the ANALYZE
# above. Then drop the sqlite_stat4 table.
#
ifcapable stat4 {
db2 func lindex lindex
execsql {
PRAGMA writable_schema = on;
CREATE TABLE sqlite_stat3(tbl,idx,neq,nlt,ndlt,sample);
INSERT INTO sqlite_stat3
SELECT DISTINCT tbl, idx,
lindex(neq,0), lindex(nlt,0), lindex(ndlt,0), test_extract(sample, 0)
FROM sqlite_stat4;
} db2
if {$bDropTable} { execsql {DROP TABLE sqlite_stat4} db2 }
execsql { PRAGMA writable_schema = off }
}
# Modify the database schema cookie to ensure that the other connection
# reloads the schema.
#
execsql {
CREATE TABLE obscure_tbl_nm(x);
DROP TABLE obscure_tbl_nm;
} db2
db2 close
}
# Populate the stat4 table according to the current contents of the db
#
proc populate_stat4 {{bDropTable 1}} {
sqlite3 db2 test.db
execsql { ANALYZE }
ifcapable stat3 {
execsql {
PRAGMA writable_schema = on;
CREATE TABLE sqlite_stat4(tbl,idx,neq,nlt,ndlt,sample);
INSERT INTO sqlite_stat4
SELECT tbl, idx, neq, nlt, ndlt, sqlite_record(sample)
FROM sqlite_stat3;
} db2
if {$bDropTable} { execsql {DROP TABLE sqlite_stat3} db2 }
execsql { PRAGMA writable_schema = off }
}
# Modify the database schema cookie to ensure that the other connection
# reloads the schema.
#
execsql {
CREATE TABLE obscure_tbl_nm(x);
DROP TABLE obscure_tbl_nm;
} db2
db2 close
}
# Populate the stat4 table according to the current contents of the db.
# Leave deceptive data in the stat3 table. This data should be ignored
# in favour of that from the stat4 table.
#
proc populate_both {} {
ifcapable stat4 { populate_stat3 0 }
ifcapable stat3 { populate_stat4 0 }
sqlite3 db2 test.db
execsql {
PRAGMA writable_schema = on;
UPDATE sqlite_stat3 SET idx =
CASE idx WHEN 't1b' THEN 't1c' ELSE 't1b'
END;
PRAGMA writable_schema = off;
CREATE TABLE obscure_tbl_nm(x);
DROP TABLE obscure_tbl_nm;
} db2
db2 close
}
foreach {tn analyze_cmd} {
1 populate_stat4
2 populate_stat3
3 populate_both
} {
reset_db
do_test 1.$tn.1 {
execsql { CREATE TABLE t1(a INTEGER PRIMARY KEY, b INT, c INT) }
for {set i 0} {$i < 100} {incr i} {
set c [expr int(pow(1.1,$i)/100)]
set b [expr 125 - int(pow(1.1,99-$i))/100]
execsql {INSERT INTO t1 VALUES($i, $b, $c)}
}
} {}
execsql { CREATE INDEX t1b ON t1(b) }
execsql { CREATE INDEX t1c ON t1(c) }
$analyze_cmd
do_execsql_test 1.$tn.2.1 { SELECT count(*) FROM t1 WHERE b=31 } 1
do_execsql_test 1.$tn.2.2 { SELECT count(*) FROM t1 WHERE c=0 } 49
do_execsql_test 1.$tn.2.3 { SELECT count(*) FROM t1 WHERE b=125 } 49
do_execsql_test 1.$tn.2.4 { SELECT count(*) FROM t1 WHERE c=16 } 1
do_eqp_test 1.$tn.2.5 {
SELECT * FROM t1 WHERE b = 31 AND c = 0;
} {0 0 0 {SEARCH TABLE t1 USING INDEX t1b (b=?)}}
do_eqp_test 1.$tn.2.6 {
SELECT * FROM t1 WHERE b = 125 AND c = 16;
} {0 0 0 {SEARCH TABLE t1 USING INDEX t1c (c=?)}}
do_execsql_test 1.$tn.3.1 {
SELECT count(*) FROM t1 WHERE b BETWEEN 0 AND 50
} {6}
do_execsql_test 1.$tn.3.2 {
SELECT count(*) FROM t1 WHERE c BETWEEN 0 AND 50
} {90}
do_execsql_test 1.$tn.3.3 {
SELECT count(*) FROM t1 WHERE b BETWEEN 75 AND 125
} {90}
do_execsql_test 1.$tn.3.4 {
SELECT count(*) FROM t1 WHERE c BETWEEN 75 AND 125
} {6}
do_eqp_test 1.$tn.3.5 {
SELECT * FROM t1 WHERE b BETWEEN 0 AND 50 AND c BETWEEN 0 AND 50
} {0 0 0 {SEARCH TABLE t1 USING INDEX t1b (b>? AND b<?)}}
do_eqp_test 1.$tn.3.6 {
SELECT * FROM t1 WHERE b BETWEEN 75 AND 125 AND c BETWEEN 75 AND 125
} {0 0 0 {SEARCH TABLE t1 USING INDEX t1c (c>? AND c<?)}}
do_eqp_test 1.$tn.3.7 {
SELECT * FROM t1 WHERE b BETWEEN +0 AND +50 AND c BETWEEN +0 AND +50
} {0 0 0 {SEARCH TABLE t1 USING INDEX t1b (b>? AND b<?)}}
do_eqp_test 1.$tn.3.8 {
SELECT * FROM t1
WHERE b BETWEEN cast('0' AS int) AND cast('50.0' AS real)
AND c BETWEEN cast('0' AS numeric) AND cast('50.0' AS real)
} {0 0 0 {SEARCH TABLE t1 USING INDEX t1b (b>? AND b<?)}}
do_eqp_test 1.$tn.3.9 {
SELECT * FROM t1 WHERE b BETWEEN +75 AND +125 AND c BETWEEN +75 AND +125
} {0 0 0 {SEARCH TABLE t1 USING INDEX t1c (c>? AND c<?)}}
do_eqp_test 1.$tn.3.10 {
SELECT * FROM t1
WHERE b BETWEEN cast('75' AS int) AND cast('125.0' AS real)
AND c BETWEEN cast('75' AS numeric) AND cast('125.0' AS real)
} {0 0 0 {SEARCH TABLE t1 USING INDEX t1c (c>? AND c<?)}}
}
finish_test

View File

@ -0,0 +1,682 @@
# 2013 August 3
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
#
# This file contains automated tests used to verify that the sqlite_stat3
# functionality is working. The tests in this file are based on a subset
# of the sqlite_stat4 tests in analyze9.test.
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
set testprefix analyzeB
ifcapable !stat3 {
finish_test
return
}
do_execsql_test 1.0 {
CREATE TABLE t1(a TEXT, b TEXT);
INSERT INTO t1 VALUES('(0)', '(0)');
INSERT INTO t1 VALUES('(1)', '(1)');
INSERT INTO t1 VALUES('(2)', '(2)');
INSERT INTO t1 VALUES('(3)', '(3)');
INSERT INTO t1 VALUES('(4)', '(4)');
CREATE INDEX i1 ON t1(a, b);
} {}
do_execsql_test 1.1 {
ANALYZE;
} {}
do_execsql_test 1.2 {
SELECT tbl,idx,nEq,nLt,nDLt,quote(sample) FROM sqlite_stat3;
} {
t1 i1 1 0 0 '(0)'
t1 i1 1 1 1 '(1)'
t1 i1 1 2 2 '(2)'
t1 i1 1 3 3 '(3)'
t1 i1 1 4 4 '(4)'
}
if {[permutation] != "utf16"} {
do_execsql_test 1.3 {
SELECT tbl,idx,nEq,nLt,nDLt,quote(sample) FROM sqlite_stat3;
} {
t1 i1 1 0 0 '(0)'
t1 i1 1 1 1 '(1)'
t1 i1 1 2 2 '(2)'
t1 i1 1 3 3 '(3)'
t1 i1 1 4 4 '(4)'
}
}
#-------------------------------------------------------------------------
# This is really just to test SQL user function "test_decode".
#
reset_db
do_execsql_test 2.1 {
CREATE TABLE t1(a, b, c);
INSERT INTO t1(a) VALUES('some text');
INSERT INTO t1(a) VALUES(14);
INSERT INTO t1(a) VALUES(NULL);
INSERT INTO t1(a) VALUES(22.0);
INSERT INTO t1(a) VALUES(x'656667');
CREATE INDEX i1 ON t1(a, b, c);
ANALYZE;
SELECT quote(sample) FROM sqlite_stat3;
} {
NULL 14 22.0 {'some text'} X'656667'
}
#-------------------------------------------------------------------------
#
reset_db
do_execsql_test 3.1 {
CREATE TABLE t2(a, b);
CREATE INDEX i2 ON t2(a, b);
BEGIN;
}
do_test 3.2 {
for {set i 0} {$i < 1000} {incr i} {
set a [expr $i / 10]
set b [expr int(rand() * 15.0)]
execsql { INSERT INTO t2 VALUES($a, $b) }
}
execsql COMMIT
} {}
db func lindex lindex
# Each value of "a" occurs exactly 10 times in the table.
#
do_execsql_test 3.3.1 {
SELECT count(*) FROM t2 GROUP BY a;
} [lrange [string repeat "10 " 100] 0 99]
# The first element in the "nEq" list of all samples should therefore be 10.
#
do_execsql_test 3.3.2 {
ANALYZE;
SELECT nEq FROM sqlite_stat3;
} [lrange [string repeat "10 " 100] 0 23]
#-------------------------------------------------------------------------
#
do_execsql_test 3.4 {
DROP TABLE IF EXISTS t1;
CREATE TABLE t1(a INTEGER PRIMARY KEY, b, c);
INSERT INTO t1 VALUES(1, 1, 'one-a');
INSERT INTO t1 VALUES(11, 1, 'one-b');
INSERT INTO t1 VALUES(21, 1, 'one-c');
INSERT INTO t1 VALUES(31, 1, 'one-d');
INSERT INTO t1 VALUES(41, 1, 'one-e');
INSERT INTO t1 VALUES(51, 1, 'one-f');
INSERT INTO t1 VALUES(61, 1, 'one-g');
INSERT INTO t1 VALUES(71, 1, 'one-h');
INSERT INTO t1 VALUES(81, 1, 'one-i');
INSERT INTO t1 VALUES(91, 1, 'one-j');
INSERT INTO t1 SELECT a+1,2,'two' || substr(c,4) FROM t1;
INSERT INTO t1 SELECT a+2,3,'three'||substr(c,4) FROM t1 WHERE c GLOB 'one-*';
INSERT INTO t1 SELECT a+3,4,'four'||substr(c,4) FROM t1 WHERE c GLOB 'one-*';
INSERT INTO t1 SELECT a+4,5,'five'||substr(c,4) FROM t1 WHERE c GLOB 'one-*';
INSERT INTO t1 SELECT a+5,6,'six'||substr(c,4) FROM t1 WHERE c GLOB 'one-*';
CREATE INDEX t1b ON t1(b);
ANALYZE;
SELECT c FROM t1 WHERE b=3 AND a BETWEEN 30 AND 60;
} {three-d three-e three-f}
#-------------------------------------------------------------------------
# These tests verify that the sample selection for stat3 appears to be
# working as designed.
#
reset_db
db func lindex lindex
db func lrange lrange
do_execsql_test 4.0 {
DROP TABLE IF EXISTS t1;
CREATE TABLE t1(a, b, c);
CREATE INDEX i1 ON t1(c, b, a);
}
proc insert_filler_rows_n {iStart args} {
set A(-ncopy) 1
set A(-nval) 1
foreach {k v} $args {
if {[info exists A($k)]==0} { error "no such option: $k" }
set A($k) $v
}
if {[llength $args] % 2} {
error "option requires an argument: [lindex $args end]"
}
for {set i 0} {$i < $A(-nval)} {incr i} {
set iVal [expr $iStart+$i]
for {set j 0} {$j < $A(-ncopy)} {incr j} {
execsql { INSERT INTO t1 VALUES($iVal, $iVal, $iVal) }
}
}
}
do_test 4.1 {
execsql { BEGIN }
insert_filler_rows_n 0 -ncopy 10 -nval 19
insert_filler_rows_n 20 -ncopy 1 -nval 100
execsql {
INSERT INTO t1(c, b, a) VALUES(200, 1, 'a');
INSERT INTO t1(c, b, a) VALUES(200, 1, 'b');
INSERT INTO t1(c, b, a) VALUES(200, 1, 'c');
INSERT INTO t1(c, b, a) VALUES(200, 2, 'e');
INSERT INTO t1(c, b, a) VALUES(200, 2, 'f');
INSERT INTO t1(c, b, a) VALUES(201, 3, 'g');
INSERT INTO t1(c, b, a) VALUES(201, 4, 'h');
ANALYZE;
SELECT count(*) FROM sqlite_stat3;
SELECT count(*) FROM t1;
}
} {24 297}
do_execsql_test 4.2 {
SELECT neq, nlt, ndlt, sample FROM sqlite_stat3 ORDER BY rowid LIMIT 16;
} {
10 0 0 0
10 10 1 1
10 20 2 2
10 30 3 3
10 40 4 4
10 50 5 5
10 60 6 6
10 70 7 7
10 80 8 8
10 90 9 9
10 100 10 10
10 110 11 11
10 120 12 12
10 130 13 13
10 140 14 14
10 150 15 15
}
do_execsql_test 4.3 {
SELECT neq, nlt, ndlt, sample FROM sqlite_stat3
ORDER BY rowid DESC LIMIT 2;
} {
2 295 120 201
5 290 119 200
}
do_execsql_test 4.4 { SELECT count(DISTINCT c) FROM t1 WHERE c<201 } 120
do_execsql_test 4.5 { SELECT count(DISTINCT c) FROM t1 WHERE c<200 } 119
reset_db
do_test 4.7 {
execsql {
BEGIN;
CREATE TABLE t1(o,t INTEGER PRIMARY KEY);
CREATE INDEX i1 ON t1(o);
}
for {set i 0} {$i<10000} {incr i [expr (($i<1000)?1:10)]} {
execsql { INSERT INTO t1 VALUES('x', $i) }
}
execsql {
COMMIT;
ANALYZE;
SELECT count(*) FROM sqlite_stat3;
}
} {1}
do_execsql_test 4.8 {
SELECT sample FROM sqlite_stat3;
} {x}
#-------------------------------------------------------------------------
# The following would cause a crash at one point.
#
reset_db
do_execsql_test 5.1 {
PRAGMA encoding = 'utf-16';
CREATE TABLE t0(v);
ANALYZE;
}
#-------------------------------------------------------------------------
# This was also crashing (corrupt sqlite_stat3 table).
#
reset_db
do_execsql_test 6.1 {
CREATE TABLE t1(a, b);
CREATE INDEX i1 ON t1(a);
CREATE INDEX i2 ON t1(b);
INSERT INTO t1 VALUES(1, 1);
INSERT INTO t1 VALUES(2, 2);
INSERT INTO t1 VALUES(3, 3);
INSERT INTO t1 VALUES(4, 4);
INSERT INTO t1 VALUES(5, 5);
ANALYZE;
PRAGMA writable_schema = 1;
CREATE TEMP TABLE x1 AS
SELECT tbl,idx,neq,nlt,ndlt,sample FROM sqlite_stat3
ORDER BY (rowid%5), rowid;
DELETE FROM sqlite_stat3;
INSERT INTO sqlite_stat3 SELECT * FROM x1;
PRAGMA writable_schema = 0;
ANALYZE sqlite_master;
}
do_execsql_test 6.2 {
SELECT * FROM t1 WHERE a = 'abc';
}
#-------------------------------------------------------------------------
# The following tests experiment with adding corrupted records to the
# 'sample' column of the sqlite_stat3 table.
#
reset_db
sqlite3_db_config_lookaside db 0 0 0
do_execsql_test 7.1 {
CREATE TABLE t1(a, b);
CREATE INDEX i1 ON t1(a, b);
INSERT INTO t1 VALUES(1, 1);
INSERT INTO t1 VALUES(2, 2);
INSERT INTO t1 VALUES(3, 3);
INSERT INTO t1 VALUES(4, 4);
INSERT INTO t1 VALUES(5, 5);
ANALYZE;
UPDATE sqlite_stat3 SET sample = X'' WHERE rowid = 1;
ANALYZE sqlite_master;
}
do_execsql_test 7.2 {
UPDATE sqlite_stat3 SET sample = X'FFFF';
ANALYZE sqlite_master;
SELECT * FROM t1 WHERE a = 1;
} {1 1}
do_execsql_test 7.3 {
ANALYZE;
UPDATE sqlite_stat3 SET neq = '0 0 0';
ANALYZE sqlite_master;
SELECT * FROM t1 WHERE a = 1;
} {1 1}
do_execsql_test 7.4 {
ANALYZE;
UPDATE sqlite_stat3 SET ndlt = '0 0 0';
ANALYZE sqlite_master;
SELECT * FROM t1 WHERE a = 3;
} {3 3}
do_execsql_test 7.5 {
ANALYZE;
UPDATE sqlite_stat3 SET nlt = '0 0 0';
ANALYZE sqlite_master;
SELECT * FROM t1 WHERE a = 5;
} {5 5}
#-------------------------------------------------------------------------
#
reset_db
do_execsql_test 8.1 {
CREATE TABLE t1(x TEXT);
CREATE INDEX i1 ON t1(x);
INSERT INTO t1 VALUES('1');
INSERT INTO t1 VALUES('2');
INSERT INTO t1 VALUES('3');
INSERT INTO t1 VALUES('4');
ANALYZE;
}
do_execsql_test 8.2 {
SELECT * FROM t1 WHERE x = 3;
} {3}
#-------------------------------------------------------------------------
#
reset_db
do_execsql_test 9.1 {
CREATE TABLE t1(a, b, c, d, e);
CREATE INDEX i1 ON t1(a, b, c, d);
CREATE INDEX i2 ON t1(e);
}
do_test 9.2 {
execsql BEGIN;
for {set i 0} {$i < 100} {incr i} {
execsql "INSERT INTO t1 VALUES('x', 'y', 'z', $i, [expr $i/2])"
}
for {set i 0} {$i < 20} {incr i} {
execsql "INSERT INTO t1 VALUES('x', 'y', 'z', 101, $i)"
}
for {set i 102} {$i < 200} {incr i} {
execsql "INSERT INTO t1 VALUES('x', 'y', 'z', $i, [expr $i/2])"
}
execsql COMMIT
execsql ANALYZE
} {}
do_eqp_test 9.3.1 {
SELECT * FROM t1 WHERE a='x' AND b='y' AND c='z' AND d=101 AND e=5;
} {/t1 USING INDEX i1/}
do_eqp_test 9.3.2 {
SELECT * FROM t1 WHERE a='x' AND b='y' AND c='z' AND d=99 AND e=5;
} {/t1 USING INDEX i1/}
set value_d [expr 101]
do_eqp_test 9.4.1 {
SELECT * FROM t1 WHERE a='x' AND b='y' AND c='z' AND d=$value_d AND e=5
} {/t1 USING INDEX i1/}
set value_d [expr 99]
do_eqp_test 9.4.2 {
SELECT * FROM t1 WHERE a='x' AND b='y' AND c='z' AND d=$value_d AND e=5
} {/t1 USING INDEX i1/}
#-------------------------------------------------------------------------
# Check that the planner takes stat3 data into account when considering
# "IS NULL" and "IS NOT NULL" constraints.
#
do_execsql_test 10.1.1 {
DROP TABLE IF EXISTS t3;
CREATE TABLE t3(a, b);
CREATE INDEX t3a ON t3(a);
CREATE INDEX t3b ON t3(b);
}
do_test 10.1.2 {
for {set i 1} {$i < 100} {incr i} {
if {$i>90} { set a $i } else { set a NULL }
set b [expr $i % 5]
execsql "INSERT INTO t3 VALUES($a, $b)"
}
execsql ANALYZE
} {}
do_eqp_test 10.1.3 {
SELECT * FROM t3 WHERE a IS NULL AND b = 2
} {/t3 USING INDEX t3b/}
do_eqp_test 10.1.4 {
SELECT * FROM t3 WHERE a IS NOT NULL AND b = 2
} {/t3 USING INDEX t3a/}
#-------------------------------------------------------------------------
# Check that stat3 data is used correctly with non-default collation
# sequences.
#
foreach {tn schema} {
1 {
CREATE TABLE t4(a COLLATE nocase, b);
CREATE INDEX t4a ON t4(a);
CREATE INDEX t4b ON t4(b);
}
2 {
CREATE TABLE t4(a, b);
CREATE INDEX t4a ON t4(a COLLATE nocase);
CREATE INDEX t4b ON t4(b);
}
} {
drop_all_tables
do_test 11.$tn.1 { execsql $schema } {}
do_test 11.$tn.2 {
for {set i 0} {$i < 100} {incr i} {
if { ($i % 10)==0 } { set a ABC } else { set a DEF }
set b [expr $i % 5]
execsql { INSERT INTO t4 VALUES($a, $b) }
}
execsql ANALYZE
} {}
do_eqp_test 11.$tn.3 {
SELECT * FROM t4 WHERE a = 'def' AND b = 3;
} {/t4 USING INDEX t4b/}
if {$tn==1} {
set sql "SELECT * FROM t4 WHERE a = 'abc' AND b = 3;"
do_eqp_test 11.$tn.4 $sql {/t4 USING INDEX t4a/}
} else {
set sql "SELECT * FROM t4 WHERE a = 'abc' COLLATE nocase AND b = 3;"
do_eqp_test 11.$tn.5 $sql {/t4 USING INDEX t4a/}
set sql "SELECT * FROM t4 WHERE a COLLATE nocase = 'abc' AND b = 3;"
do_eqp_test 11.$tn.6 $sql {/t4 USING INDEX t4a/}
}
}
#-------------------------------------------------------------------------
# Test that nothing untoward happens if the stat3 table contains entries
# for indexes that do not exist. Or NULL values in the idx column.
# Or NULL values in any of the other columns.
#
drop_all_tables
do_execsql_test 15.1 {
CREATE TABLE x1(a, b, UNIQUE(a, b));
INSERT INTO x1 VALUES(1, 2);
INSERT INTO x1 VALUES(3, 4);
INSERT INTO x1 VALUES(5, 6);
ANALYZE;
INSERT INTO sqlite_stat3 VALUES(NULL, NULL, NULL, NULL, NULL, NULL);
}
db close
sqlite3 db test.db
do_execsql_test 15.2 { SELECT * FROM x1 } {1 2 3 4 5 6}
do_execsql_test 15.3 {
INSERT INTO sqlite_stat3 VALUES(42, 42, 42, 42, 42, 42);
}
db close
sqlite3 db test.db
do_execsql_test 15.4 { SELECT * FROM x1 } {1 2 3 4 5 6}
do_execsql_test 15.5 {
UPDATE sqlite_stat1 SET stat = NULL;
}
db close
sqlite3 db test.db
do_execsql_test 15.6 { SELECT * FROM x1 } {1 2 3 4 5 6}
do_execsql_test 15.7 {
ANALYZE;
UPDATE sqlite_stat1 SET tbl = 'no such tbl';
}
db close
sqlite3 db test.db
do_execsql_test 15.8 { SELECT * FROM x1 } {1 2 3 4 5 6}
do_execsql_test 15.9 {
ANALYZE;
UPDATE sqlite_stat3 SET neq = NULL, nlt=NULL, ndlt=NULL;
}
db close
sqlite3 db test.db
do_execsql_test 15.10 { SELECT * FROM x1 } {1 2 3 4 5 6}
# This is just for coverage....
do_execsql_test 15.11 {
ANALYZE;
UPDATE sqlite_stat1 SET stat = stat || ' unordered';
}
db close
sqlite3 db test.db
do_execsql_test 15.12 { SELECT * FROM x1 } {1 2 3 4 5 6}
#-------------------------------------------------------------------------
# Test that allocations used for sqlite_stat3 samples are included in
# the quantity returned by SQLITE_DBSTATUS_SCHEMA_USED.
#
set one [string repeat x 1000]
set two [string repeat x 2000]
do_test 16.1 {
reset_db
execsql {
CREATE TABLE t1(a, UNIQUE(a));
INSERT INTO t1 VALUES($one);
ANALYZE;
}
set nByte [lindex [sqlite3_db_status db SCHEMA_USED 0] 1]
reset_db
execsql {
CREATE TABLE t1(a, UNIQUE(a));
INSERT INTO t1 VALUES($two);
ANALYZE;
}
set nByte2 [lindex [sqlite3_db_status db SCHEMA_USED 0] 1]
expr {$nByte2 > $nByte+950 && $nByte2 < $nByte+1050}
} {1}
#-------------------------------------------------------------------------
# Test that stat3 data may be used with partial indexes.
#
do_test 17.1 {
reset_db
execsql {
CREATE TABLE t1(a, b, c, d);
CREATE INDEX i1 ON t1(a, b) WHERE d IS NOT NULL;
INSERT INTO t1 VALUES(-1, -1, -1, NULL);
INSERT INTO t1 SELECT 2*a,2*b,2*c,d FROM t1;
INSERT INTO t1 SELECT 2*a,2*b,2*c,d FROM t1;
INSERT INTO t1 SELECT 2*a,2*b,2*c,d FROM t1;
INSERT INTO t1 SELECT 2*a,2*b,2*c,d FROM t1;
INSERT INTO t1 SELECT 2*a,2*b,2*c,d FROM t1;
INSERT INTO t1 SELECT 2*a,2*b,2*c,d FROM t1;
}
for {set i 0} {$i < 32} {incr i} {
execsql { INSERT INTO t1 VALUES($i%2, $b, $i/2, 'abc') }
}
execsql {ANALYZE main.t1}
} {}
do_catchsql_test 17.1.2 {
ANALYZE temp.t1;
} {1 {no such table: temp.t1}}
do_eqp_test 17.2 {
SELECT * FROM t1 WHERE d IS NOT NULL AND a=0;
} {/USING INDEX i1/}
do_eqp_test 17.3 {
SELECT * FROM t1 WHERE d IS NOT NULL AND a=0;
} {/USING INDEX i1/}
do_execsql_test 17.4 {
CREATE INDEX i2 ON t1(c) WHERE d IS NOT NULL;
ANALYZE main.i2;
}
do_eqp_test 17.5 {
SELECT * FROM t1 WHERE d IS NOT NULL AND a=0;
} {/USING INDEX i1/}
do_eqp_test 17.6 {
SELECT * FROM t1 WHERE d IS NOT NULL AND a=0 AND b=0 AND c=10;
} {/USING INDEX i2/}
#-------------------------------------------------------------------------
#
do_test 18.1 {
reset_db
execsql {
CREATE TABLE t1(a, b);
CREATE INDEX i1 ON t1(a, b);
}
for {set i 0} {$i < 9} {incr i} {
execsql {
INSERT INTO t1 VALUES($i, 0);
INSERT INTO t1 VALUES($i, 0);
INSERT INTO t1 VALUES($i, 0);
INSERT INTO t1 VALUES($i, 0);
INSERT INTO t1 VALUES($i, 0);
INSERT INTO t1 VALUES($i, 0);
INSERT INTO t1 VALUES($i, 0);
INSERT INTO t1 VALUES($i, 0);
INSERT INTO t1 VALUES($i, 0);
INSERT INTO t1 VALUES($i, 0);
INSERT INTO t1 VALUES($i, 0);
INSERT INTO t1 VALUES($i, 0);
INSERT INTO t1 VALUES($i, 0);
INSERT INTO t1 VALUES($i, 0);
INSERT INTO t1 VALUES($i, 0);
}
}
execsql ANALYZE
execsql { SELECT count(*) FROM sqlite_stat3 }
} {9}
#-------------------------------------------------------------------------
# For coverage.
#
ifcapable view {
do_test 19.1 {
reset_db
execsql {
CREATE TABLE t1(x, y);
CREATE INDEX i1 ON t1(x, y);
CREATE VIEW v1 AS SELECT * FROM t1;
ANALYZE;
}
} {}
}
ifcapable auth {
proc authproc {op args} {
if {$op == "SQLITE_ANALYZE"} { return "SQLITE_DENY" }
return "SQLITE_OK"
}
do_test 19.2 {
reset_db
db auth authproc
execsql {
CREATE TABLE t1(x, y);
CREATE VIEW v1 AS SELECT * FROM t1;
}
catchsql ANALYZE
} {1 {not authorized}}
}
#-------------------------------------------------------------------------
#
reset_db
proc r {args} { expr rand() }
db func r r
db func lrange lrange
do_test 20.1 {
execsql {
CREATE TABLE t1(a,b,c,d);
CREATE INDEX i1 ON t1(a,b,c,d);
}
for {set i 0} {$i < 16} {incr i} {
execsql {
INSERT INTO t1 VALUES($i, r(), r(), r());
INSERT INTO t1 VALUES($i, $i, r(), r());
INSERT INTO t1 VALUES($i, $i, $i, r());
INSERT INTO t1 VALUES($i, $i, $i, $i);
INSERT INTO t1 VALUES($i, $i, $i, $i);
INSERT INTO t1 VALUES($i, $i, $i, r());
INSERT INTO t1 VALUES($i, $i, r(), r());
INSERT INTO t1 VALUES($i, r(), r(), r());
}
}
} {}
do_execsql_test 20.2 { ANALYZE }
for {set i 0} {$i<16} {incr i} {
set val $i
do_execsql_test 20.3.$i {
SELECT count(*) FROM sqlite_stat3 WHERE sample=$val
} {1}
}
finish_test

View File

@ -0,0 +1,167 @@
# 2014-07-22
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
#
# This file contains automated tests used to verify that the text terms
# at the end of sqlite_stat1.stat are processed correctly.
#
# (1) "unordered" means that the index cannot be used for ORDER BY
# or for range queries
#
# (2) "sz=NNN" sets the relative size of the index entries
#
# (3) All other fields are silently ignored
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
set testprefix analyzeC
# Baseline case. Range queries work OK. Indexes can be used for
# ORDER BY.
#
do_execsql_test 1.0 {
CREATE TABLE t1(a,b,c);
INSERT INTO t1(a,b,c)
VALUES(1,2,3),(7,8,9),(4,5,6),(10,11,12),(4,8,12),(1,11,111);
CREATE INDEX t1a ON t1(a);
CREATE INDEX t1b ON t1(b);
ANALYZE;
DELETE FROM sqlite_stat1;
INSERT INTO sqlite_stat1(tbl,idx,stat)
VALUES('t1','t1a','12345 2'),('t1','t1b','12345 4');
ANALYZE sqlite_master;
SELECT *, '#' FROM t1 WHERE a BETWEEN 3 AND 8 ORDER BY c;
} {4 5 6 # 7 8 9 # 4 8 12 #}
do_execsql_test 1.1 {
EXPLAIN QUERY PLAN
SELECT *, '#' FROM t1 WHERE a BETWEEN 3 AND 8 ORDER BY c;
} {/.* USING INDEX t1a .a>. AND a<...*/}
do_execsql_test 1.2 {
SELECT c FROM t1 ORDER BY a;
} {3 111 6 12 9 12}
do_execsql_test 1.3 {
EXPLAIN QUERY PLAN
SELECT c FROM t1 ORDER BY a;
} {/.*SCAN TABLE t1 USING INDEX t1a.*/}
do_execsql_test 1.3x {
EXPLAIN QUERY PLAN
SELECT c FROM t1 ORDER BY a;
} {~/.*B-TREE FOR ORDER BY.*/}
# Now mark the t1a index as "unordered". Range queries and ORDER BY no
# longer use the index, but equality queries do.
#
do_execsql_test 2.0 {
UPDATE sqlite_stat1 SET stat='12345 2 unordered' WHERE idx='t1a';
ANALYZE sqlite_master;
SELECT *, '#' FROM t1 WHERE a BETWEEN 3 AND 8 ORDER BY c;
} {4 5 6 # 7 8 9 # 4 8 12 #}
do_execsql_test 2.1 {
EXPLAIN QUERY PLAN
SELECT *, '#' FROM t1 WHERE a BETWEEN 3 AND 8 ORDER BY c;
} {~/.*USING INDEX.*/}
do_execsql_test 2.2 {
SELECT c FROM t1 ORDER BY a;
} {3 111 6 12 9 12}
do_execsql_test 2.3 {
EXPLAIN QUERY PLAN
SELECT c FROM t1 ORDER BY a;
} {~/.*USING INDEX.*/}
do_execsql_test 2.3x {
EXPLAIN QUERY PLAN
SELECT c FROM t1 ORDER BY a;
} {/.*B-TREE FOR ORDER BY.*/}
# Ignore extraneous text parameters in the sqlite_stat1.stat field.
#
do_execsql_test 3.0 {
UPDATE sqlite_stat1 SET stat='12345 2 whatever=5 unordered xyzzy=11'
WHERE idx='t1a';
ANALYZE sqlite_master;
SELECT *, '#' FROM t1 WHERE a BETWEEN 3 AND 8 ORDER BY c;
} {4 5 6 # 7 8 9 # 4 8 12 #}
do_execsql_test 3.1 {
EXPLAIN QUERY PLAN
SELECT *, '#' FROM t1 WHERE a BETWEEN 3 AND 8 ORDER BY c;
} {~/.*USING INDEX.*/}
do_execsql_test 3.2 {
SELECT c FROM t1 ORDER BY a;
} {3 111 6 12 9 12}
do_execsql_test 3.3 {
EXPLAIN QUERY PLAN
SELECT c FROM t1 ORDER BY a;
} {~/.*USING INDEX.*/}
do_execsql_test 3.3x {
EXPLAIN QUERY PLAN
SELECT c FROM t1 ORDER BY a;
} {/.*B-TREE FOR ORDER BY.*/}
# The sz=NNN parameter determines which index to scan
#
do_execsql_test 4.0 {
DROP INDEX t1a;
CREATE INDEX t1ab ON t1(a,b);
CREATE INDEX t1ca ON t1(c,a);
DELETE FROM sqlite_stat1;
INSERT INTO sqlite_stat1(tbl,idx,stat)
VALUES('t1','t1ab','12345 3 2 sz=10'),('t1','t1ca','12345 3 2 sz=20');
ANALYZE sqlite_master;
SELECT count(a) FROM t1;
} {6}
do_execsql_test 4.1 {
EXPLAIN QUERY PLAN
SELECT count(a) FROM t1;
} {/.*INDEX t1ab.*/}
do_execsql_test 4.2 {
DELETE FROM sqlite_stat1;
INSERT INTO sqlite_stat1(tbl,idx,stat)
VALUES('t1','t1ab','12345 3 2 sz=20'),('t1','t1ca','12345 3 2 sz=10');
ANALYZE sqlite_master;
SELECT count(a) FROM t1;
} {6}
do_execsql_test 4.3 {
EXPLAIN QUERY PLAN
SELECT count(a) FROM t1;
} {/.*INDEX t1ca.*/}
# The sz=NNN parameter works even if there is other extraneous text
# in the sqlite_stat1.stat column.
#
do_execsql_test 5.0 {
DELETE FROM sqlite_stat1;
INSERT INTO sqlite_stat1(tbl,idx,stat)
VALUES('t1','t1ab','12345 3 2 x=5 sz=10 y=10'),
('t1','t1ca','12345 3 2 whatever sz=20 junk');
ANALYZE sqlite_master;
SELECT count(a) FROM t1;
} {6}
do_execsql_test 5.1 {
EXPLAIN QUERY PLAN
SELECT count(a) FROM t1;
} {/.*INDEX t1ab.*/}
do_execsql_test 5.2 {
DELETE FROM sqlite_stat1;
INSERT INTO sqlite_stat1(tbl,idx,stat)
VALUES('t1','t1ca','12345 3 2 x=5 sz=10 y=10'),
('t1','t1ab','12345 3 2 whatever sz=20 junk');
ANALYZE sqlite_master;
SELECT count(a) FROM t1;
} {6}
do_execsql_test 5.3 {
EXPLAIN QUERY PLAN
SELECT count(a) FROM t1;
} {/.*INDEX t1ca.*/}
finish_test

View File

@ -0,0 +1,116 @@
# 2005 July 22
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library.
# This file implements tests for the ANALYZE command.
#
# $Id: analyze.test,v 1.9 2008/08/11 18:44:58 drh Exp $
set testdir [file dirname $argv0]
source $testdir/tester.tcl
set ::testprefix analyzeD
ifcapable {!stat4} {
finish_test
return
}
# Set up a table with the following properties:
#
# * Contains 1000 rows.
# * Column a contains even integers between 0 and 18, inclusive (so that
# a=? for any such integer matches 100 rows).
# * Column b contains integers between 0 and 9, inclusive.
# * Column c contains integers between 0 and 199, inclusive (so that
# for any such integer, c=? matches 5 rows).
# * Then add 7 rows with a new value for "a" - 3001. The stat4 table will
# not contain any samples with a=3001.
#
do_execsql_test 1.0 {
CREATE TABLE t1(a, b, c);
}
do_test 1.1 {
for {set i 1} {$i < 1000} {incr i} {
set c [expr $i % 200]
execsql { INSERT INTO t1(a, b, c) VALUES( 2*($i/100), $i%10, $c ) }
}
execsql {
INSERT INTO t1 VALUES(3001, 3001, 3001);
INSERT INTO t1 VALUES(3001, 3001, 3002);
INSERT INTO t1 VALUES(3001, 3001, 3003);
INSERT INTO t1 VALUES(3001, 3001, 3004);
INSERT INTO t1 VALUES(3001, 3001, 3005);
INSERT INTO t1 VALUES(3001, 3001, 3006);
INSERT INTO t1 VALUES(3001, 3001, 3007);
CREATE INDEX t1_ab ON t1(a, b);
CREATE INDEX t1_c ON t1(c);
ANALYZE;
}
} {}
# With full ANALYZE data, SQLite sees that c=150 (5 rows) is better than
# a=3001 (7 rows).
#
do_eqp_test 1.2 {
SELECT * FROM t1 WHERE a=3001 AND c=150;
} {
0 0 0 {SEARCH TABLE t1 USING INDEX t1_c (c=?)}
}
do_test 1.3 {
execsql { DELETE FROM sqlite_stat1 }
db close
sqlite3 db test.db
} {}
# Without stat1, because 3001 is larger than all samples in the stat4
# table, SQLite things that a=3001 matches just 1 row. So it (incorrectly)
# chooses it over the c=150 index (5 rows). Even with stat1 data, things
# worked this way before commit [e6f7f97dbc].
#
do_eqp_test 1.4 {
SELECT * FROM t1 WHERE a=3001 AND c=150;
} {
0 0 0 {SEARCH TABLE t1 USING INDEX t1_ab (a=?)}
}
do_test 1.5 {
execsql {
UPDATE t1 SET a=13 WHERE a = 3001;
ANALYZE;
}
} {}
do_eqp_test 1.6 {
SELECT * FROM t1 WHERE a=13 AND c=150;
} {
0 0 0 {SEARCH TABLE t1 USING INDEX t1_c (c=?)}
}
do_test 1.7 {
execsql { DELETE FROM sqlite_stat1 }
db close
sqlite3 db test.db
} {}
# Same test as 1.4, except this time the 7 rows that match the a=? condition
# do not feature larger values than all rows in the stat4 table. So SQLite
# gets this right, even without stat1 data.
do_eqp_test 1.8 {
SELECT * FROM t1 WHERE a=13 AND c=150;
} {
0 0 0 {SEARCH TABLE t1 USING INDEX t1_c (c=?)}
}
finish_test

View File

@ -0,0 +1,242 @@
# 2014-10-08
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements tests for using STAT4 information
# on a descending index in a range query.
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
set ::testprefix analyzeE
ifcapable {!stat4} {
finish_test
return
}
# Verify that range queries on an ASCENDING index will use the
# index only if the range covers only a small fraction of the
# entries.
#
do_execsql_test analyzeE-1.0 {
CREATE TABLE t1(a,b);
WITH RECURSIVE
cnt(x) AS (VALUES(1000) UNION ALL SELECT x+1 FROM cnt WHERE x<2000)
INSERT INTO t1(a,b) SELECT x, x FROM cnt;
CREATE INDEX t1a ON t1(a);
ANALYZE;
} {}
do_execsql_test analyzeE-1.1 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a BETWEEN 500 AND 2500;
} {/SCAN TABLE t1/}
do_execsql_test analyzeE-1.2 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a BETWEEN 2900 AND 3000;
} {/SEARCH TABLE t1 USING INDEX t1a/}
do_execsql_test analyzeE-1.3 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a BETWEEN 1700 AND 1750;
} {/SEARCH TABLE t1 USING INDEX t1a/}
do_execsql_test analyzeE-1.4 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a BETWEEN 1 AND 500
} {/SEARCH TABLE t1 USING INDEX t1a/}
do_execsql_test analyzeE-1.5 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a BETWEEN 3000 AND 3000000
} {/SEARCH TABLE t1 USING INDEX t1a/}
do_execsql_test analyzeE-1.6 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a<500
} {/SEARCH TABLE t1 USING INDEX t1a/}
do_execsql_test analyzeE-1.7 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a>2500
} {/SEARCH TABLE t1 USING INDEX t1a/}
do_execsql_test analyzeE-1.8 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a>1900
} {/SEARCH TABLE t1 USING INDEX t1a/}
do_execsql_test analyzeE-1.9 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a>1100
} {/SCAN TABLE t1/}
do_execsql_test analyzeE-1.10 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a<1100
} {/SEARCH TABLE t1 USING INDEX t1a/}
do_execsql_test analyzeE-1.11 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a<1900
} {/SCAN TABLE t1/}
# Verify that everything works the same on a DESCENDING index.
#
do_execsql_test analyzeE-2.0 {
DROP INDEX t1a;
CREATE INDEX t1a ON t1(a DESC);
ANALYZE;
} {}
do_execsql_test analyzeE-2.1 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a BETWEEN 500 AND 2500;
} {/SCAN TABLE t1/}
do_execsql_test analyzeE-2.2 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a BETWEEN 2900 AND 3000;
} {/SEARCH TABLE t1 USING INDEX t1a/}
do_execsql_test analyzeE-2.3 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a BETWEEN 1700 AND 1750;
} {/SEARCH TABLE t1 USING INDEX t1a/}
do_execsql_test analyzeE-2.4 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a BETWEEN 1 AND 500
} {/SEARCH TABLE t1 USING INDEX t1a/}
do_execsql_test analyzeE-2.5 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a BETWEEN 3000 AND 3000000
} {/SEARCH TABLE t1 USING INDEX t1a/}
do_execsql_test analyzeE-2.6 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a<500
} {/SEARCH TABLE t1 USING INDEX t1a/}
do_execsql_test analyzeE-2.7 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a>2500
} {/SEARCH TABLE t1 USING INDEX t1a/}
do_execsql_test analyzeE-2.8 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a>1900
} {/SEARCH TABLE t1 USING INDEX t1a/}
do_execsql_test analyzeE-2.9 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a>1100
} {/SCAN TABLE t1/}
do_execsql_test analyzeE-2.10 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a<1100
} {/SEARCH TABLE t1 USING INDEX t1a/}
do_execsql_test analyzeE-2.11 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a<1900
} {/SCAN TABLE t1/}
# Now do a range query on the second term of an ASCENDING index
# where the first term is constrained by equality.
#
do_execsql_test analyzeE-3.0 {
DROP TABLE t1;
CREATE TABLE t1(a,b,c);
WITH RECURSIVE
cnt(x) AS (VALUES(1000) UNION ALL SELECT x+1 FROM cnt WHERE x<2000)
INSERT INTO t1(a,b,c) SELECT x, x, 123 FROM cnt;
CREATE INDEX t1ca ON t1(c,a);
ANALYZE;
} {}
do_execsql_test analyzeE-3.1 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a BETWEEN 500 AND 2500 AND c=123;
} {/SCAN TABLE t1/}
do_execsql_test analyzeE-3.2 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a BETWEEN 2900 AND 3000 AND c=123;
} {/SEARCH TABLE t1 USING INDEX t1ca/}
do_execsql_test analyzeE-3.3 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a BETWEEN 1700 AND 1750 AND c=123;
} {/SEARCH TABLE t1 USING INDEX t1ca/}
do_execsql_test analyzeE-3.4 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a BETWEEN 1 AND 500 AND c=123
} {/SEARCH TABLE t1 USING INDEX t1ca/}
do_execsql_test analyzeE-3.5 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a BETWEEN 3000 AND 3000000 AND c=123
} {/SEARCH TABLE t1 USING INDEX t1ca/}
do_execsql_test analyzeE-3.6 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a<500 AND c=123
} {/SEARCH TABLE t1 USING INDEX t1ca/}
do_execsql_test analyzeE-3.7 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a>2500 AND c=123
} {/SEARCH TABLE t1 USING INDEX t1ca/}
do_execsql_test analyzeE-3.8 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a>1900 AND c=123
} {/SEARCH TABLE t1 USING INDEX t1ca/}
do_execsql_test analyzeE-3.9 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a>1100 AND c=123
} {/SCAN TABLE t1/}
do_execsql_test analyzeE-3.10 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a<1100 AND c=123
} {/SEARCH TABLE t1 USING INDEX t1ca/}
do_execsql_test analyzeE-3.11 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a<1900 AND c=123
} {/SCAN TABLE t1/}
# Repeat the 3.x tests using a DESCENDING index
#
do_execsql_test analyzeE-4.0 {
DROP INDEX t1ca;
CREATE INDEX t1ca ON t1(c ASC,a DESC);
ANALYZE;
} {}
do_execsql_test analyzeE-4.1 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a BETWEEN 500 AND 2500 AND c=123;
} {/SCAN TABLE t1/}
do_execsql_test analyzeE-4.2 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a BETWEEN 2900 AND 3000 AND c=123;
} {/SEARCH TABLE t1 USING INDEX t1ca/}
do_execsql_test analyzeE-4.3 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a BETWEEN 1700 AND 1750 AND c=123;
} {/SEARCH TABLE t1 USING INDEX t1ca/}
do_execsql_test analyzeE-4.4 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a BETWEEN 1 AND 500 AND c=123
} {/SEARCH TABLE t1 USING INDEX t1ca/}
do_execsql_test analyzeE-4.5 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a BETWEEN 3000 AND 3000000 AND c=123
} {/SEARCH TABLE t1 USING INDEX t1ca/}
do_execsql_test analyzeE-4.6 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a<500 AND c=123
} {/SEARCH TABLE t1 USING INDEX t1ca/}
do_execsql_test analyzeE-4.7 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a>2500 AND c=123
} {/SEARCH TABLE t1 USING INDEX t1ca/}
do_execsql_test analyzeE-4.8 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a>1900 AND c=123
} {/SEARCH TABLE t1 USING INDEX t1ca/}
do_execsql_test analyzeE-4.9 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a>1100 AND c=123
} {/SCAN TABLE t1/}
do_execsql_test analyzeE-4.10 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a<1100 AND c=123
} {/SEARCH TABLE t1 USING INDEX t1ca/}
do_execsql_test analyzeE-4.11 {
EXPLAIN QUERY PLAN
SELECT * FROM t1 WHERE a<1900 AND c=123
} {/SCAN TABLE t1/}
finish_test

View File

@ -0,0 +1,124 @@
# 2015-03-12
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# Test that deterministic scalar functions passed constant arguments
# are used with stat4 data.
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
set ::testprefix analyzeF
ifcapable {!stat4} {
finish_test
return
}
proc isqrt {i} { expr { int(sqrt($i)) } }
db func isqrt isqrt
do_execsql_test 1.0 {
CREATE TABLE t1(x INTEGER, y INTEGER);
WITH data(i) AS (
SELECT 1 UNION ALL SELECT i+1 FROM data
)
INSERT INTO t1 SELECT isqrt(i), isqrt(i) FROM data LIMIT 400;
CREATE INDEX t1x ON t1(x);
CREATE INDEX t1y ON t1(y);
ANALYZE;
}
proc str {a} { return $a }
db func str str
# Note: tests 7 to 12 might be unstable - as they assume SQLite will
# prefer the expression to the right of the AND clause. Which of
# course could change.
#
# Note 2: tests 9 and 10 depend on the tcl interface creating functions
# without the SQLITE_DETERMINISTIC flag set.
#
foreach {tn where idx} {
1 "x = 4 AND y = 19" {t1x (x=?)}
2 "x = 19 AND y = 4" {t1y (y=?)}
3 "x = '4' AND y = '19'" {t1x (x=?)}
4 "x = '19' AND y = '4'" {t1y (y=?)}
5 "x = substr('5195', 2, 2) AND y = substr('145', 2, 1)" {t1y (y=?)}
6 "x = substr('145', 2, 1) AND y = substr('5195', 2, 2)" {t1x (x=?)}
7 "x = substr('5195', 2, 2+0) AND y = substr('145', 2, 1+0)" {t1y (y=?)}
8 "x = substr('145', 2, 1+0) AND y = substr('5195', 2, 2+0)" {t1y (y=?)}
9 "x = str('19') AND y = str('4')" {t1y (y=?)}
10 "x = str('4') AND y = str('19')" {t1y (y=?)}
11 "x = nullif('19', 0) AND y = nullif('4', 0)" {t1y (y=?)}
12 "x = nullif('4', 0) AND y = nullif('19', 0)" {t1y (y=?)}
} {
set res "0 0 0 {SEARCH TABLE t1 USING INDEX $idx}"
do_eqp_test 1.$tn "SELECT * FROM t1 WHERE $where" $res
}
# Test that functions that do not exist - "func()" - do not cause an error.
#
do_catchsql_test 2.1 {
SELECT * FROM t1 WHERE x = substr('145', 2, 1) AND y = func(1, 2, 3)
} {1 {no such function: func}}
do_catchsql_test 2.2 {
UPDATE t1 SET y=y+1 WHERE x = substr('145', 2, 1) AND y = func(1, 2, 3)
} {1 {no such function: func}}
# Check that functions that accept zero arguments do not cause problems.
#
proc ret {x} { return $x }
db func det4 -deterministic [list ret 4]
db func nondet4 [list ret 4]
db func det19 -deterministic [list ret 19]
db func nondet19 [list ret 19]
foreach {tn where idx} {
1 "x = det4() AND y = det19()" {t1x (x=?)}
2 "x = det19() AND y = det4()" {t1y (y=?)}
3 "x = nondet4() AND y = nondet19()" {t1y (y=?)}
4 "x = nondet19() AND y = nondet4()" {t1y (y=?)}
} {
set res "0 0 0 {SEARCH TABLE t1 USING INDEX $idx}"
do_eqp_test 3.$tn "SELECT * FROM t1 WHERE $where" $res
}
execsql { DELETE FROM t1 }
proc throw_error {err} { error $err }
db func error -deterministic throw_error
do_catchsql_test 4.1 {
SELECT * FROM t1 WHERE x = error('error one') AND y = 4;
} {1 {error one}}
do_catchsql_test 4.2 {
SELECT * FROM t1 WHERE x = zeroblob(2200000000) AND y = 4;
} {1 {string or blob too big}}
sqlite3_limit db SQLITE_LIMIT_LENGTH 1000000
proc dstr {} { return [string repeat x 1100000] }
db func dstr -deterministic dstr
do_catchsql_test 4.3 {
SELECT * FROM t1 WHERE x = dstr() AND y = 11;
} {1 {string or blob too big}}
do_catchsql_test 4.4 {
SELECT * FROM t1 WHERE x = test_zeroblob(1100000) AND y = 4;
} {1 {string or blob too big}}
finish_test

View File

@ -0,0 +1,52 @@
# 2015-05-11
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
#
# Quick tests for the sqlite3_analyzer tool
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
ifcapable !vtab {
finish_test
return
}
if {$tcl_platform(platform)=="windows"} {
set PROG "sqlite3_analyzer.exe"
} else {
set PROG "./sqlite3_analyzer"
}
if {![file exe $PROG]} {
puts "analyzer1 cannot run because $PROG is not available"
finish_test
return
}
db close
forcedelete test.db test.db-journal test.db-wal
sqlite3 db test.db
do_test analyzer1-1.0 {
db eval {
CREATE TABLE t1(a INTEGER PRIMARY KEY, b);
CREATE TABLE t2(a INT PRIMARY KEY, b) WITHOUT ROWID;
WITH RECURSIVE c(x) AS (VALUES(1) UNION ALL SELECT x+1 FROM c WHERE x<250)
INSERT INTO t1(a,b) SELECT x, randomblob(200) FROM c;
INSERT INTO t2(a,b) SELECT a, b FROM t1;
}
set line "exec $PROG test.db"
unset -nocomplain ::MSG
catch {eval $line} ::MSG
} {0}
do_test analyzer1-1.1 {
regexp {^/\*\* Disk-Space Utilization.*COMMIT;\W*$} $::MSG
} {1}
finish_test

View File

@ -0,0 +1,90 @@
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file runs all tests.
#
# $Id: async.test,v 1.21 2009/06/05 17:09:12 drh Exp $
set testdir [file dirname $argv0]
source $testdir/tester.tcl
if {[info commands sqlite3async_initialize] eq ""} {
# The async logic is not built into this system
finish_test
return
}
rename finish_test async_really_finish_test
proc finish_test {} {
catch {db close}
catch {db2 close}
catch {db3 close}
}
if {[info exists G(isquick)]} { set ASYNC_SAVE_ISQUICK $G(isquick) }
set G(isquick) 1
set ASYNC_INCLUDE {
insert.test
insert2.test
insert3.test
lock.test
lock2.test
lock3.test
select1.test
select2.test
select3.test
select4.test
trans.test
}
# Enable asynchronous IO.
sqlite3async_initialize "" 1
# This proc flushes the contents of the async-IO queue through to the
# underlying VFS. A couple of the test scripts identified in $ASYNC_INCLUDE
# above contain lines like "catch flush_async_queue" in places where
# this is required for the tests to work in async mode.
#
proc flush_async_queue {} {
sqlite3async_control halt idle
sqlite3async_start
sqlite3async_wait
sqlite3async_control halt never
}
rename do_test async_really_do_test
proc do_test {name args} {
uplevel async_really_do_test async_io-$name $args
flush_async_queue
}
foreach testfile [lsort -dictionary [glob $testdir/*.test]] {
set tail [file tail $testfile]
if {[lsearch -exact $ASYNC_INCLUDE $tail]<0} continue
source $testfile
# Make sure everything is flushed through. This is because [source]ing
# the next test file will delete the database file on disk (using
# [delete_file]). If the asynchronous backend still has the file
# open, it will become confused.
#
flush_async_queue
}
# Flush the write-queue and disable asynchronous IO. This should ensure
# all allocated memory is cleaned up.
set sqlite3async_trace 1
flush_async_queue
sqlite3async_shutdown
set sqlite3async_trace 0
rename do_test {}
rename async_really_do_test do_test
rename finish_test {}
rename async_really_finish_test finish_test
if {[info exists ASYNC_SAVE_ISQUICK]} { set G(isquick) $ASYNC_SAVE_ISQUICK }
finish_test

View File

@ -0,0 +1,126 @@
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
#
# $Id: async2.test,v 1.12 2009/04/25 08:39:15 danielk1977 Exp $
set testdir [file dirname $argv0]
source $testdir/tester.tcl
if {
[info commands sqlite3async_initialize]=="" ||
[info command sqlite3_memdebug_fail]==""
} {
# The async logic is not built into this system
puts "Skipping async2 tests: not compiled with required features"
finish_test
return
}
# Enable asynchronous IO.
set setup_script {
CREATE TABLE counter(c);
INSERT INTO counter(c) VALUES (1);
}
set sql_script {
BEGIN;
UPDATE counter SET c = 2;
CREATE TABLE t1(a PRIMARY KEY, b, c);
CREATE TABLE t2(a PRIMARY KEY, b, c);
COMMIT;
BEGIN;
UPDATE counter SET c = 3;
INSERT INTO t1 VALUES('abcdefghij', 'four', 'score');
INSERT INTO t2 VALUES('klmnopqrst', 'and', 'seven');
COMMIT;
UPDATE counter SET c = 'FIN';
}
db close
foreach err [list ioerr malloc-transient malloc-persistent] {
set ::go 10
for {set n 1} {$::go} {incr n} {
set ::sqlite_io_error_pending 0
sqlite3_memdebug_fail -1
forcedelete test.db test.db-journal
sqlite3 db test.db
execsql $::setup_script
db close
sqlite3async_initialize "" 1
sqlite3 db test.db
sqlite3_db_config_lookaside db 0 0 0
switch -- $err {
ioerr { set ::sqlite_io_error_pending $n }
malloc-persistent { sqlite3_memdebug_fail $n -repeat 1 }
malloc-transient { sqlite3_memdebug_fail $n -repeat 0 }
}
catchsql $::sql_script
db close
sqlite3async_control halt idle
sqlite3async_start
sqlite3async_wait
sqlite3async_control halt never
sqlite3async_shutdown
set ::sqlite_io_error_pending 0
sqlite3_memdebug_fail -1
sqlite3 db test.db
set c [db one {SELECT c FROM counter LIMIT 1}]
switch -- $c {
1 {
do_test async-$err-1.1.$n {
execsql {
SELECT name FROM sqlite_master;
}
} {counter}
}
2 {
do_test async-$err-1.2.$n.1 {
execsql {
SELECT * FROM t1;
}
} {}
do_test async-$err-1.2.$n.2 {
execsql {
SELECT * FROM t2;
}
} {}
}
3 {
do_test async-$err-1.3.$n.1 {
execsql {
SELECT * FROM t1;
}
} {abcdefghij four score}
do_test async-$err-1.3.$n.2 {
execsql {
SELECT * FROM t2;
}
} {klmnopqrst and seven}
}
FIN {
incr ::go -1
}
}
db close
}
}
catch {db close}
finish_test

View File

@ -0,0 +1,76 @@
# 2007 September 5
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
#
# The focus of this file is testing the code in test_async.c.
# Specifically, it tests that the xFullPathname() method of
# of the asynchronous vfs works correctly.
#
# $Id: async3.test,v 1.5 2009/04/25 08:39:15 danielk1977 Exp $
set testdir [file dirname $argv0]
source $testdir/tester.tcl
if { [info commands sqlite3async_initialize]=="" } {
# The async logic is not built into this system
puts "Skipping async3 tests: not compiled with required features"
finish_test
return
}
db close
sqlite3async_initialize "" 1
#set sqlite3async_trace 1
sqlite3async_start
set paths {
chocolate/banana/vanilla/file.db
chocolate//banana/vanilla/file.db
chocolate/./banana//vanilla/file.db
chocolate/banana/./vanilla/file.db
chocolate/banana/../banana/vanilla/file.db
chocolate/banana/./vanilla/extra_bit/../file.db
}
do_test async3-1.0 {
file mkdir [file join chocolate banana vanilla]
forcedelete chocolate/banana/vanilla/file.db
forcedelete chocolate/banana/vanilla/file.db-journal
} {}
do_test async3-1.1 {
sqlite3 db chocolate/banana/vanilla/file.db
execsql {
CREATE TABLE abc(a, b, c);
BEGIN;
INSERT INTO abc VALUES(1, 2, 3);
}
} {}
set N 2
foreach p $paths {
sqlite3 db2 $p
do_test async3-1.$N.1 {
execsql {SELECT * FROM abc} db2
} {}
do_test async3-1.$N.2 {
catchsql {INSERT INTO abc VALUES(4, 5, 6)} db2
} {1 {database is locked}}
db2 close
incr N
}
db close
sqlite3async_control halt idle
sqlite3async_wait
sqlite3async_control halt never
sqlite3async_shutdown
finish_test

View File

@ -0,0 +1,168 @@
# 2009 April 25
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
#
# $Id: async4.test,v 1.4 2009/06/05 17:09:12 drh Exp $
set testdir [file dirname $argv0]
source $testdir/tester.tcl
# Do not use a codec for tests in this file, as the database file is
# manipulated directly using tcl scripts (using the [hexio_write] command).
#
do_not_use_codec
# These tests only work for Tcl version 8.5 and later on Windows (for now)
#
if {$tcl_platform(platform)=="windows"} {
scan $::tcl_version %f vx
if {$vx<8.5} {
finish_test
return
}
}
if {[info commands sqlite3async_initialize] eq ""} {
# The async logic is not built into this system
finish_test
return
}
db close
# Test layout:
#
# async4.1.*: Test the lockfiles parameter.
# async4.2.*: Test the delay parameter.
do_test async4.1.1 {
sqlite3async_initialize {} 0
sqlite3async_control lockfiles
} {1}
do_test async4.1.2 {
sqlite3async_control lockfiles false
} {0}
do_test async4.1.3 {
sqlite3async_control lockfiles
} {0}
do_test async4.1.4 {
sqlite3async_control lockfiles true
} {1}
do_test async4.1.5 {
sqlite3 db test.db -vfs sqlite3async
execsql { CREATE TABLE t1(a, b, c) }
} {}
do_test async4.1.6 {
list [file exists test.db] [file size test.db]
} {1 0}
do_test async4.1.7 {
sqlite3 db2 test.db
catchsql { CREATE TABLE t2(a, b, c) } db2
} {1 {database is locked}}
do_test async4.1.8 {
sqlite3async_control halt idle
sqlite3async_start
sqlite3async_wait
} {}
do_test async4.1.9 {
catchsql { CREATE TABLE t2(a, b, c) } db2
} {0 {}}
do_test async4.1.10 {
list [catch {sqlite3async_control lockfiles false} msg] $msg
} {1 SQLITE_MISUSE}
do_test async4.1.11 {
db close
list [catch {sqlite3async_control lockfiles false} msg] $msg
} {1 SQLITE_MISUSE}
do_test async4.1.12 {
sqlite3async_start
sqlite3async_wait
sqlite3async_control lockfiles false
} {0}
do_test async4.1.13 {
sqlite3 db test.db -vfs sqlite3async
execsql { CREATE TABLE t3(a, b, c) } db
} {}
do_test async4.1.14 {
execsql {
CREATE INDEX i1 ON t2(a);
CREATE INDEX i2 ON t1(a);
} db2
} {}
do_test async4.1.15 {
sqlite3async_start
sqlite3async_wait
hexio_write test.db 28 00000000
execsql { pragma integrity_check } db2
} {{*** in database main ***
Page 5 is never used}}
do_test async4.1.16 {
db close
db2 close
sqlite3async_start
sqlite3async_wait
} {}
do_test async4.1.17 {
sqlite3async_control lockfiles true
} {1}
do_test async4.2.1 {
sqlite3async_control delay
} {0}
do_test async4.2.2 {
sqlite3async_control delay 23
} {23}
do_test async4.2.3 {
sqlite3async_control delay
} {23}
do_test async4.2.4 {
sqlite3async_control delay 0
} {0}
do_test async4.2.5 {
sqlite3 db test.db -vfs sqlite3async
execsql { CREATE TABLE t4(a, b) }
set T1 [lindex [time {
sqlite3async_start
sqlite3async_wait
}] 0]
sqlite3async_control delay 100
execsql { CREATE TABLE t5(a, b) }
set T2 [lindex [time {
sqlite3async_start
sqlite3async_wait
}] 0]
expr {($T1+1000000) < $T2}
} {1}
do_test async4.2.6 {
sqlite3async_control delay 0
execsql { CREATE TABLE t6(a, b) }
set T1 [lindex [time {
sqlite3async_start
sqlite3async_wait
}] 0]
expr {($T1+1000000) < $T2}
} {1}
do_test async4.2.7 {
list [catch { sqlite3async_control delay -1 } msg] $msg
} {1 SQLITE_MISUSE}
do_test async4.2.8 {
db close
sqlite3async_start
sqlite3async_wait
} {}
finish_test

View File

@ -0,0 +1,68 @@
# 2009 July 19
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file tests that asynchronous IO is compatible with multi-file
# transactions.
#
# $Id: async5.test,v 1.1 2009/07/18 11:52:04 danielk1977 Exp $
set testdir [file dirname $argv0]
source $testdir/tester.tcl
if {[info commands sqlite3async_initialize] eq ""} {
# The async logic is not built into this system
finish_test
return
}
db close
forcedelete test2.db
sqlite3async_initialize "" 1
sqlite3async_control halt never
sqlite3 db test.db
do_test async5-1.1 {
execsql {
ATTACH 'test2.db' AS next;
CREATE TABLE main.t1(a, b);
CREATE TABLE next.t2(a, b);
BEGIN;
INSERT INTO t1 VALUES(1, 2);
INSERT INTO t2 VALUES(3, 4);
COMMIT;
}
} {}
do_test async5-1.2 {
execsql { SELECT * FROM t1 }
} {1 2}
do_test async5-1.3 {
execsql { SELECT * FROM t2 }
} {3 4}
do_test async5-1.4 {
execsql {
BEGIN;
INSERT INTO t1 VALUES('a', 'b');
INSERT INTO t2 VALUES('c', 'd');
COMMIT;
}
} {}
do_test async5-1.5 {
execsql { SELECT * FROM t1 }
} {1 2 a b}
do_test async5-1.6 {
execsql { SELECT * FROM t2 }
} {3 4 c d}
db close
sqlite3async_control halt idle
sqlite3async_start
sqlite3async_wait
sqlite3async_control halt never
sqlite3async_shutdown
set sqlite3async_trace 0
finish_test

View File

@ -0,0 +1,60 @@
# 2012 June 18
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
#
# Tests of the sqlite3AtoF() function.
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
if {$::longdouble_size<=8} {
finish_test
return
}
expr srand(1)
for {set i 1} {$i<20000} {incr i} {
set pow [expr {int((rand()-0.5)*100)}]
set x [expr {pow((rand()-0.5)*2*rand(),$pow)}]
set xf [format %.32e $x]
# Verify that text->real conversions get exactly same ieee754 floating-
# point value in SQLite as they do in TCL.
#
do_test atof1-1.$i.1 {
set y [db eval "SELECT $xf=\$x"]
if {!$y} {
puts -nonewline \173[db eval "SELECT real2hex($xf), real2hex(\$x)"]\175
db eval "SELECT $xf+0.0 AS a, \$x AS b" {
puts [format "\n%.60e\n%.60e\n%.60e" $x $a $b]
}
}
set y
} {1}
# Verify that round-trip real->text->real conversions using the quote()
# function preserve the bits of the numeric value exactly.
#
do_test atof1-1.$i.2 {
set y [db eval {SELECT $x=CAST(quote($x) AS real)}]
if {!$y} {
db eval {SELECT real2hex($x) a, real2hex(CAST(quote($x) AS real)) b} {}
puts "\nIN: $a $xf"
puts [format {QUOTE: %16s %s} {} [db eval {SELECT quote($x)}]]
db eval {SELECT CAST(quote($x) AS real) c} {}
puts "OUT: $b [format %.32e $c]"
}
set y
} {1}
}
finish_test

View File

@ -0,0 +1,874 @@
# 2003 April 4
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library. The
# focus of this script is testing the ATTACH and DETACH commands
# and related functionality.
#
# $Id: attach.test,v 1.52 2009/05/29 14:39:08 drh Exp $
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
ifcapable !attach {
finish_test
return
}
for {set i 2} {$i<=15} {incr i} {
forcedelete test$i.db
forcedelete test$i.db-journal
}
do_test attach-1.1 {
execsql {
CREATE TABLE t1(a,b);
INSERT INTO t1 VALUES(1,2);
INSERT INTO t1 VALUES(3,4);
SELECT * FROM t1;
}
} {1 2 3 4}
do_test attach-1.2 {
sqlite3 db2 test2.db
execsql {
CREATE TABLE t2(x,y);
INSERT INTO t2 VALUES(1,'x');
INSERT INTO t2 VALUES(2,'y');
SELECT * FROM t2;
} db2
} {1 x 2 y}
do_test attach-1.3 {
execsql {
ATTACH DATABASE 'test2.db' AS two;
SELECT * FROM two.t2;
}
} {1 x 2 y}
# Tests for the sqlite3_db_filename interface
#
do_test attach-1.3.1 {
file tail [sqlite3_db_filename db main]
} {test.db}
do_test attach-1.3.2 {
file tail [sqlite3_db_filename db MAIN]
} {test.db}
do_test attach-1.3.3 {
file tail [sqlite3_db_filename db temp]
} {}
do_test attach-1.3.4 {
file tail [sqlite3_db_filename db two]
} {test2.db}
do_test attach-1.3.5 {
file tail [sqlite3_db_filename db three]
} {}
do_test attach-1.4 {
execsql {
SELECT * FROM t2;
}
} {1 x 2 y}
do_test attach-1.5 {
execsql {
DETACH DATABASE two;
SELECT * FROM t1;
}
} {1 2 3 4}
do_test attach-1.6 {
catchsql {
SELECT * FROM t2;
}
} {1 {no such table: t2}}
do_test attach-1.7 {
catchsql {
SELECT * FROM two.t2;
}
} {1 {no such table: two.t2}}
do_test attach-1.8 {
catchsql {
ATTACH DATABASE 'test3.db' AS three;
}
} {0 {}}
do_test attach-1.9 {
catchsql {
SELECT * FROM three.sqlite_master;
}
} {0 {}}
do_test attach-1.10 {
catchsql {
DETACH DATABASE [three];
}
} {0 {}}
do_test attach-1.11 {
execsql {
ATTACH 'test.db' AS db2;
ATTACH 'test.db' AS db3;
ATTACH 'test.db' AS db4;
ATTACH 'test.db' AS db5;
ATTACH 'test.db' AS db6;
ATTACH 'test.db' AS db7;
ATTACH 'test.db' AS db8;
ATTACH 'test.db' AS db9;
}
} {}
proc db_list {db} {
set list {}
foreach {idx name file} [execsql {PRAGMA database_list} $db] {
lappend list $idx $name
}
return $list
}
ifcapable schema_pragmas {
do_test attach-1.11b {
db_list db
} {0 main 2 db2 3 db3 4 db4 5 db5 6 db6 7 db7 8 db8 9 db9}
} ;# ifcapable schema_pragmas
do_test attach-1.12 {
catchsql {
ATTACH 'test.db' as db2;
}
} {1 {database db2 is already in use}}
do_test attach-1.12.2 {
db errorcode
} {1}
do_test attach-1.13 {
catchsql {
ATTACH 'test.db' as db5;
}
} {1 {database db5 is already in use}}
do_test attach-1.14 {
catchsql {
ATTACH 'test.db' as db9;
}
} {1 {database db9 is already in use}}
do_test attach-1.15 {
catchsql {
ATTACH 'test.db' as main;
}
} {1 {database main is already in use}}
ifcapable tempdb {
do_test attach-1.16 {
catchsql {
ATTACH 'test.db' as temp;
}
} {1 {database temp is already in use}}
}
do_test attach-1.17 {
catchsql {
ATTACH 'test.db' as MAIN;
}
} {1 {database MAIN is already in use}}
do_test attach-1.18 {
catchsql {
ATTACH 'test.db' as db10;
ATTACH 'test.db' as db11;
}
} {0 {}}
if {$SQLITE_MAX_ATTACHED==10} {
do_test attach-1.19 {
catchsql {
ATTACH 'test.db' as db12;
}
} {1 {too many attached databases - max 10}}
do_test attach-1.19.1 {
db errorcode
} {1}
}
do_test attach-1.20.1 {
execsql {
DETACH db5;
}
} {}
ifcapable schema_pragmas {
do_test attach-1.20.2 {
db_list db
} {0 main 2 db2 3 db3 4 db4 5 db6 6 db7 7 db8 8 db9 9 db10 10 db11}
} ;# ifcapable schema_pragmas
integrity_check attach-1.20.3
ifcapable tempdb {
execsql {select * from sqlite_temp_master}
}
do_test attach-1.21 {
catchsql {
ATTACH 'test.db' as db12;
}
} {0 {}}
if {$SQLITE_MAX_ATTACHED==10} {
do_test attach-1.22 {
catchsql {
ATTACH 'test.db' as db13;
}
} {1 {too many attached databases - max 10}}
do_test attach-1.22.1 {
db errorcode
} {1}
}
do_test attach-1.23 {
catchsql {
DETACH "db14";
}
} {1 {no such database: db14}}
do_test attach-1.24 {
catchsql {
DETACH db12;
}
} {0 {}}
do_test attach-1.25 {
catchsql {
DETACH db12;
}
} {1 {no such database: db12}}
do_test attach-1.26 {
catchsql {
DETACH main;
}
} {1 {cannot detach database main}}
ifcapable tempdb {
do_test attach-1.27 {
catchsql {
DETACH Temp;
}
} {1 {cannot detach database Temp}}
} else {
do_test attach-1.27 {
catchsql {
DETACH Temp;
}
} {1 {no such database: Temp}}
}
do_test attach-1.28 {
catchsql {
DETACH db11;
DETACH db10;
DETACH db9;
DETACH db8;
DETACH db7;
DETACH db6;
DETACH db4;
DETACH db3;
DETACH db2;
}
} {0 {}}
ifcapable schema_pragmas {
ifcapable tempdb {
do_test attach-1.29 {
db_list db
} {0 main 1 temp}
} else {
do_test attach-1.29 {
db_list db
} {0 main}
}
} ;# ifcapable schema_pragmas
ifcapable {trigger} { # Only do the following tests if triggers are enabled
do_test attach-2.1 {
execsql {
CREATE TABLE tx(x1,x2,y1,y2);
CREATE TRIGGER r1 AFTER UPDATE ON t2 FOR EACH ROW BEGIN
INSERT INTO tx(x1,x2,y1,y2) VALUES(OLD.x,NEW.x,OLD.y,NEW.y);
END;
SELECT * FROM tx;
} db2;
} {}
do_test attach-2.2 {
execsql {
UPDATE t2 SET x=x+10;
SELECT * FROM tx;
} db2;
} {1 11 x x 2 12 y y}
do_test attach-2.3 {
execsql {
CREATE TABLE tx(x1,x2,y1,y2);
SELECT * FROM tx;
}
} {}
do_test attach-2.4 {
execsql {
ATTACH 'test2.db' AS db2;
}
} {}
do_test attach-2.5 {
execsql {
UPDATE db2.t2 SET x=x+10;
SELECT * FROM db2.tx;
}
} {1 11 x x 2 12 y y 11 21 x x 12 22 y y}
do_test attach-2.6 {
execsql {
SELECT * FROM main.tx;
}
} {}
do_test attach-2.7 {
execsql {
SELECT type, name, tbl_name FROM db2.sqlite_master;
}
} {table t2 t2 table tx tx trigger r1 t2}
ifcapable schema_pragmas&&tempdb {
do_test attach-2.8 {
db_list db
} {0 main 1 temp 2 db2}
} ;# ifcapable schema_pragmas&&tempdb
ifcapable schema_pragmas&&!tempdb {
do_test attach-2.8 {
db_list db
} {0 main 2 db2}
} ;# ifcapable schema_pragmas&&!tempdb
do_test attach-2.9 {
execsql {
CREATE INDEX i2 ON t2(x);
SELECT * FROM t2 WHERE x>5;
} db2
} {21 x 22 y}
do_test attach-2.10 {
execsql {
SELECT type, name, tbl_name FROM sqlite_master;
} db2
} {table t2 t2 table tx tx trigger r1 t2 index i2 t2}
#do_test attach-2.11 {
# catchsql {
# SELECT * FROM t2 WHERE x>5;
# }
#} {1 {database schema has changed}}
ifcapable schema_pragmas {
ifcapable tempdb {
do_test attach-2.12 {
db_list db
} {0 main 1 temp 2 db2}
} else {
do_test attach-2.12 {
db_list db
} {0 main 2 db2}
}
} ;# ifcapable schema_pragmas
do_test attach-2.13 {
catchsql {
SELECT * FROM t2 WHERE x>5;
}
} {0 {21 x 22 y}}
do_test attach-2.14 {
execsql {
SELECT type, name, tbl_name FROM sqlite_master;
}
} {table t1 t1 table tx tx}
do_test attach-2.15 {
execsql {
SELECT type, name, tbl_name FROM db2.sqlite_master;
}
} {table t2 t2 table tx tx trigger r1 t2 index i2 t2}
do_test attach-2.16 {
db close
sqlite3 db test.db
execsql {
ATTACH 'test2.db' AS db2;
SELECT type, name, tbl_name FROM db2.sqlite_master;
}
} {table t2 t2 table tx tx trigger r1 t2 index i2 t2}
} ;# End of ifcapable {trigger}
do_test attach-3.1 {
db close
db2 close
sqlite3 db test.db
sqlite3 db2 test2.db
execsql {
SELECT * FROM t1
}
} {1 2 3 4}
# If we are testing a version of the code that lacks trigger support,
# adjust the database contents so that they are the same if triggers
# had been enabled.
ifcapable {!trigger} {
db2 eval {
DELETE FROM t2;
INSERT INTO t2 VALUES(21, 'x');
INSERT INTO t2 VALUES(22, 'y');
CREATE TABLE tx(x1,x2,y1,y2);
INSERT INTO tx VALUES(1, 11, 'x', 'x');
INSERT INTO tx VALUES(2, 12, 'y', 'y');
INSERT INTO tx VALUES(11, 21, 'x', 'x');
INSERT INTO tx VALUES(12, 22, 'y', 'y');
CREATE INDEX i2 ON t2(x);
}
}
do_test attach-3.2 {
catchsql {
SELECT * FROM t2
}
} {1 {no such table: t2}}
do_test attach-3.3 {
catchsql {
ATTACH DATABASE 'test2.db' AS db2;
SELECT * FROM t2
}
} {0 {21 x 22 y}}
# Even though 'db' has started a transaction, it should not yet have
# a lock on test2.db so 'db2' should be readable.
do_test attach-3.4 {
execsql BEGIN
catchsql {
SELECT * FROM t2;
} db2;
} {0 {21 x 22 y}}
# Reading from test2.db from db within a transaction should not
# prevent test2.db from being read by db2.
do_test attach-3.5 {
execsql {SELECT * FROM t2}
catchsql {
SELECT * FROM t2;
} db2;
} {0 {21 x 22 y}}
# Making a change to test2.db through db causes test2.db to get
# a reserved lock. It should still be accessible through db2.
do_test attach-3.6 {
execsql {
UPDATE t2 SET x=x+1 WHERE x=50;
}
catchsql {
SELECT * FROM t2;
} db2;
} {0 {21 x 22 y}}
do_test attach-3.7 {
execsql ROLLBACK
execsql {SELECT * FROM t2} db2
} {21 x 22 y}
# Start transactions on both db and db2. Once again, just because
# we make a change to test2.db using db2, only a RESERVED lock is
# obtained, so test2.db should still be readable using db.
#
do_test attach-3.8 {
execsql BEGIN
execsql BEGIN db2
execsql {UPDATE t2 SET x=0 WHERE 0} db2
catchsql {SELECT * FROM t2}
} {0 {21 x 22 y}}
# It is also still accessible from db2.
do_test attach-3.9 {
catchsql {SELECT * FROM t2} db2
} {0 {21 x 22 y}}
do_test attach-3.10 {
execsql {SELECT * FROM t1}
} {1 2 3 4}
do_test attach-3.11 {
catchsql {UPDATE t1 SET a=a+1}
} {0 {}}
do_test attach-3.12 {
execsql {SELECT * FROM t1}
} {2 2 4 4}
# db2 has a RESERVED lock on test2.db, so db cannot write to any tables
# in test2.db.
do_test attach-3.13 {
catchsql {UPDATE t2 SET x=x+1 WHERE x=50}
} {1 {database is locked}}
# Change for version 3. Transaction is no longer rolled back
# for a locked database.
execsql {ROLLBACK}
# db is able to reread its schema because db2 still only holds a
# reserved lock.
do_test attach-3.14 {
catchsql {SELECT * FROM t1}
} {0 {1 2 3 4}}
do_test attach-3.15 {
execsql COMMIT db2
execsql {SELECT * FROM t1}
} {1 2 3 4}
# Ticket #323
do_test attach-4.1 {
execsql {DETACH db2}
db2 close
sqlite3 db2 test2.db
execsql {
CREATE TABLE t3(x,y);
CREATE UNIQUE INDEX t3i1 ON t3(x);
INSERT INTO t3 VALUES(1,2);
SELECT * FROM t3;
} db2;
} {1 2}
do_test attach-4.2 {
execsql {
CREATE TABLE t3(a,b);
CREATE UNIQUE INDEX t3i1b ON t3(a);
INSERT INTO t3 VALUES(9,10);
SELECT * FROM t3;
}
} {9 10}
do_test attach-4.3 {
execsql {
ATTACH DATABASE 'test2.db' AS db2;
SELECT * FROM db2.t3;
}
} {1 2}
do_test attach-4.4 {
execsql {
SELECT * FROM main.t3;
}
} {9 10}
do_test attach-4.5 {
execsql {
INSERT INTO db2.t3 VALUES(9,10);
SELECT * FROM db2.t3;
}
} {1 2 9 10}
execsql {
DETACH db2;
}
ifcapable {trigger} {
do_test attach-4.6 {
execsql {
CREATE TABLE t4(x);
CREATE TRIGGER t3r3 AFTER INSERT ON t3 BEGIN
INSERT INTO t4 VALUES('db2.' || NEW.x);
END;
INSERT INTO t3 VALUES(6,7);
SELECT * FROM t4;
} db2
} {db2.6}
do_test attach-4.7 {
execsql {
CREATE TABLE t4(y);
CREATE TRIGGER t3r3 AFTER INSERT ON t3 BEGIN
INSERT INTO t4 VALUES('main.' || NEW.a);
END;
INSERT INTO main.t3 VALUES(11,12);
SELECT * FROM main.t4;
}
} {main.11}
}
ifcapable {!trigger} {
# When we do not have trigger support, set up the table like they
# would have been had triggers been there. The tests that follow need
# this setup.
execsql {
CREATE TABLE t4(x);
INSERT INTO t3 VALUES(6,7);
INSERT INTO t4 VALUES('db2.6');
INSERT INTO t4 VALUES('db2.13');
} db2
execsql {
CREATE TABLE t4(y);
INSERT INTO main.t3 VALUES(11,12);
INSERT INTO t4 VALUES('main.11');
}
}
# This one is tricky. On the UNION ALL select, we have to make sure
# the schema for both main and db2 is valid before starting to execute
# the first query of the UNION ALL. If we wait to test the validity of
# the schema for main until after the first query has run, that test will
# fail and the query will abort but we will have already output some
# results. When the query is retried, the results will be repeated.
#
ifcapable compound {
do_test attach-4.8 {
execsql {
ATTACH DATABASE 'test2.db' AS db2;
INSERT INTO db2.t3 VALUES(13,14);
SELECT * FROM db2.t4 UNION ALL SELECT * FROM main.t4;
}
} {db2.6 db2.13 main.11}
do_test attach-4.9 {
ifcapable {!trigger} {execsql {INSERT INTO main.t4 VALUES('main.15')}}
execsql {
INSERT INTO main.t3 VALUES(15,16);
SELECT * FROM db2.t4 UNION ALL SELECT * FROM main.t4;
}
} {db2.6 db2.13 main.11 main.15}
} ;# ifcapable compound
ifcapable !compound {
ifcapable {!trigger} {execsql {INSERT INTO main.t4 VALUES('main.15')}}
execsql {
ATTACH DATABASE 'test2.db' AS db2;
INSERT INTO db2.t3 VALUES(13,14);
INSERT INTO main.t3 VALUES(15,16);
}
} ;# ifcapable !compound
ifcapable view {
do_test attach-4.10 {
execsql {
DETACH DATABASE db2;
}
execsql {
CREATE VIEW v3 AS SELECT x*100+y FROM t3;
SELECT * FROM v3;
} db2
} {102 910 607 1314}
do_test attach-4.11 {
execsql {
CREATE VIEW v3 AS SELECT a*100+b FROM t3;
SELECT * FROM v3;
}
} {910 1112 1516}
do_test attach-4.12 {
execsql {
ATTACH DATABASE 'test2.db' AS db2;
SELECT * FROM db2.v3;
}
} {102 910 607 1314}
do_test attach-4.13 {
execsql {
SELECT * FROM main.v3;
}
} {910 1112 1516}
} ;# ifcapable view
# Tests for the sqliteFix...() routines in attach.c
#
ifcapable {trigger} {
do_test attach-5.1 {
db close
sqlite3 db test.db
db2 close
forcedelete test2.db
sqlite3 db2 test2.db
catchsql {
ATTACH DATABASE 'test.db' AS orig;
CREATE TRIGGER r1 AFTER INSERT ON orig.t1 BEGIN
SELECT 'no-op';
END;
} db2
} {1 {trigger r1 cannot reference objects in database orig}}
do_test attach-5.2 {
catchsql {
CREATE TABLE t5(x,y);
CREATE TRIGGER r5 AFTER INSERT ON t5 BEGIN
SELECT 'no-op';
END;
} db2
} {0 {}}
do_test attach-5.3 {
catchsql {
DROP TRIGGER r5;
CREATE TRIGGER r5 AFTER INSERT ON t5 BEGIN
SELECT 'no-op' FROM orig.t1;
END;
} db2
} {1 {trigger r5 cannot reference objects in database orig}}
ifcapable tempdb {
do_test attach-5.4 {
catchsql {
CREATE TEMP TABLE t6(p,q,r);
CREATE TRIGGER r5 AFTER INSERT ON t5 BEGIN
SELECT 'no-op' FROM temp.t6;
END;
} db2
} {1 {trigger r5 cannot reference objects in database temp}}
}
ifcapable subquery {
do_test attach-5.5 {
catchsql {
CREATE TRIGGER r5 AFTER INSERT ON t5 BEGIN
SELECT 'no-op' || (SELECT * FROM temp.t6);
END;
} db2
} {1 {trigger r5 cannot reference objects in database temp}}
do_test attach-5.6 {
catchsql {
CREATE TRIGGER r5 AFTER INSERT ON t5 BEGIN
SELECT 'no-op' FROM t1 WHERE x<(SELECT min(x) FROM temp.t6);
END;
} db2
} {1 {trigger r5 cannot reference objects in database temp}}
do_test attach-5.7 {
catchsql {
CREATE TRIGGER r5 AFTER INSERT ON t5 BEGIN
SELECT 'no-op' FROM t1 GROUP BY 1 HAVING x<(SELECT min(x) FROM temp.t6);
END;
} db2
} {1 {trigger r5 cannot reference objects in database temp}}
do_test attach-5.7 {
catchsql {
CREATE TRIGGER r5 AFTER INSERT ON t5 BEGIN
SELECT max(1,x,(SELECT min(x) FROM temp.t6)) FROM t1;
END;
} db2
} {1 {trigger r5 cannot reference objects in database temp}}
do_test attach-5.8 {
catchsql {
CREATE TRIGGER r5 AFTER INSERT ON t5 BEGIN
INSERT INTO t1 VALUES((SELECT min(x) FROM temp.t6),5);
END;
} db2
} {1 {trigger r5 cannot reference objects in database temp}}
do_test attach-5.9 {
catchsql {
CREATE TRIGGER r5 AFTER INSERT ON t5 BEGIN
DELETE FROM t1 WHERE x<(SELECT min(x) FROM temp.t6);
END;
} db2
} {1 {trigger r5 cannot reference objects in database temp}}
} ;# endif subquery
} ;# endif trigger
# Check to make sure we get a sensible error if unable to open
# the file that we are trying to attach.
#
do_test attach-6.1 {
catchsql {
ATTACH DATABASE 'no-such-file' AS nosuch;
}
} {0 {}}
if {$tcl_platform(platform)=="unix"} {
do_test attach-6.2 {
sqlite3 dbx cannot-read
dbx eval {CREATE TABLE t1(a,b,c)}
dbx close
file attributes cannot-read -permission 0000
if {[file writable cannot-read]} {
puts "\n**** Tests do not work when run as root ****"
forcedelete cannot-read
exit 1
}
catchsql {
ATTACH DATABASE 'cannot-read' AS noread;
}
} {1 {unable to open database: cannot-read}}
do_test attach-6.2.2 {
db errorcode
} {14}
forcedelete cannot-read
}
# Check the error message if we try to access a database that has
# not been attached.
do_test attach-6.3 {
catchsql {
CREATE TABLE no_such_db.t1(a, b, c);
}
} {1 {unknown database no_such_db}}
for {set i 2} {$i<=15} {incr i} {
catch {db$i close}
}
db close
forcedelete test2.db
forcedelete no-such-file
ifcapable subquery {
do_test attach-7.1 {
forcedelete test.db test.db-journal
sqlite3 db test.db
catchsql {
DETACH RAISE ( IGNORE ) IN ( SELECT "AAAAAA" . * ORDER BY
REGISTER LIMIT "AAAAAA" . "AAAAAA" OFFSET RAISE ( IGNORE ) NOT NULL )
}
} {1 {no such table: AAAAAA}}
}
# Create a malformed file (a file that is not a valid database)
# and try to attach it
#
do_test attach-8.1 {
set fd [open test2.db w]
puts $fd "This file is not a valid SQLite database"
close $fd
catchsql {
ATTACH 'test2.db' AS t2;
}
} {1 {file is encrypted or is not a database}}
do_test attach-8.2 {
db errorcode
} {26}
forcedelete test2.db
do_test attach-8.3 {
sqlite3 db2 test2.db
db2 eval {CREATE TABLE t1(x); BEGIN EXCLUSIVE}
catchsql {
ATTACH 'test2.db' AS t2;
}
} {1 {database is locked}}
do_test attach-8.4 {
db errorcode
} {5}
db2 close
forcedelete test2.db
# Test that it is possible to attach the same database more than
# once when not in shared-cache mode. That this is not possible in
# shared-cache mode is tested in shared7.test.
do_test attach-9.1 {
forcedelete test4.db
execsql {
ATTACH 'test4.db' AS aux1;
CREATE TABLE aux1.t1(a, b);
INSERT INTO aux1.t1 VALUES(1, 2);
ATTACH 'test4.db' AS aux2;
SELECT * FROM aux2.t1;
}
} {1 2}
do_test attach-9.2 {
catchsql {
BEGIN;
INSERT INTO aux1.t1 VALUES(3, 4);
INSERT INTO aux2.t1 VALUES(5, 6);
}
} {1 {database is locked}}
do_test attach-9.3 {
execsql {
COMMIT;
SELECT * FROM aux2.t1;
}
} {1 2 3 4}
# Ticket [abe728bbc311d81334dae9762f0db87c07a98f79].
# Multi-database commit on an attached TEMP database.
#
do_test attach-10.1 {
execsql {
ATTACH '' AS noname;
ATTACH ':memory:' AS inmem;
BEGIN;
CREATE TABLE noname.noname(x);
CREATE TABLE inmem.inmem(y);
CREATE TABLE main.main(z);
COMMIT;
SELECT name FROM noname.sqlite_master;
SELECT name FROM inmem.sqlite_master;
}
} {noname inmem}
do_test attach-10.2 {
lrange [execsql {
PRAGMA database_list;
}] 9 end
} {4 noname {} 5 inmem {}}
# Attach with a very long URI filename.
#
db close
sqlite3 db test.db -uri 1
do_execsql_test attach-11.1 {
ATTACH printf('file:%09000x/x.db?mode=memory&cache=shared',1) AS aux1;
CREATE TABLE aux1.t1(x,y);
INSERT INTO aux1.t1(x,y) VALUES(1,2),(3,4);
SELECT * FROM aux1.t1;
} {1 2 3 4}
finish_test

View File

@ -0,0 +1,397 @@
# 2003 July 1
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library. The
# focus of this script is testing the ATTACH and DETACH commands
# and related functionality.
#
# $Id: attach2.test,v 1.38 2007/12/13 21:54:11 drh Exp $
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
ifcapable !attach {
finish_test
return
}
# Ticket #354
#
# Databases test.db and test2.db contain identical schemas. Make
# sure we can attach test2.db from test.db.
#
do_test attach2-1.1 {
db eval {
CREATE TABLE t1(a,b);
CREATE INDEX x1 ON t1(a);
}
forcedelete test2.db
forcedelete test2.db-journal
sqlite3 db2 test2.db
db2 eval {
CREATE TABLE t1(a,b);
CREATE INDEX x1 ON t1(a);
}
catchsql {
ATTACH 'test2.db' AS t2;
}
} {0 {}}
# Ticket #514
#
proc db_list {db} {
set list {}
foreach {idx name file} [execsql {PRAGMA database_list} $db] {
lappend list $idx $name
}
return $list
}
db eval {DETACH t2}
do_test attach2-2.1 {
# lock test2.db then try to attach it. This is no longer an error because
# db2 just RESERVES the database. It does not obtain a write-lock until
# we COMMIT.
db2 eval {BEGIN}
db2 eval {UPDATE t1 SET a = 0 WHERE 0}
catchsql {
ATTACH 'test2.db' AS t2;
}
} {0 {}}
ifcapable schema_pragmas {
do_test attach2-2.2 {
# make sure test2.db did get attached.
db_list db
} {0 main 2 t2}
} ;# ifcapable schema_pragmas
db2 eval {COMMIT}
do_test attach2-2.5 {
# Make sure we can read test2.db from db
catchsql {
SELECT name FROM t2.sqlite_master;
}
} {0 {t1 x1}}
do_test attach2-2.6 {
# lock test2.db and try to read from it. This should still work because
# the lock is only a RESERVED lock which does not prevent reading.
#
db2 eval BEGIN
db2 eval {UPDATE t1 SET a = 0 WHERE 0}
catchsql {
SELECT name FROM t2.sqlite_master;
}
} {0 {t1 x1}}
do_test attach2-2.7 {
# but we can still read from test1.db even though test2.db is locked.
catchsql {
SELECT name FROM main.sqlite_master;
}
} {0 {t1 x1}}
do_test attach2-2.8 {
# start a transaction on test.db even though test2.db is locked.
catchsql {
BEGIN;
INSERT INTO t1 VALUES(8,9);
}
} {0 {}}
do_test attach2-2.9 {
execsql {
SELECT * FROM t1
}
} {8 9}
do_test attach2-2.10 {
# now try to write to test2.db. the write should fail
catchsql {
INSERT INTO t2.t1 VALUES(1,2);
}
} {1 {database is locked}}
do_test attach2-2.11 {
# when the write failed in the previous test, the transaction should
# have rolled back.
#
# Update for version 3: A transaction is no longer rolled back if a
# database is found to be busy.
execsql {rollback}
db2 eval ROLLBACK
execsql {
SELECT * FROM t1
}
} {}
do_test attach2-2.12 {
catchsql {
COMMIT
}
} {1 {cannot commit - no transaction is active}}
# Ticket #574: Make sure it works using the non-callback API
#
do_test attach2-3.1 {
set DB [sqlite3_connection_pointer db]
set rc [catch {sqlite3_prepare $DB "ATTACH 'test2.db' AS t2" -1 TAIL} VM]
if {$rc} {lappend rc $VM}
sqlite3_step $VM
sqlite3_finalize $VM
set rc
} {0}
do_test attach2-3.2 {
set rc [catch {sqlite3_prepare $DB "DETACH t2" -1 TAIL} VM]
if {$rc} {lappend rc $VM}
sqlite3_step $VM
sqlite3_finalize $VM
set rc
} {0}
db close
for {set i 2} {$i<=15} {incr i} {
catch {db$i close}
}
# A procedure to verify the status of locks on a database.
#
proc lock_status {testnum db expected_result} {
# If the database was compiled with OMIT_TEMPDB set, then
# the lock_status list will not contain an entry for the temp
# db. But the test code doesn't know this, so its easiest
# to filter it out of the $expected_result list here.
ifcapable !tempdb {
set expected_result [concat \
[lrange $expected_result 0 1] \
[lrange $expected_result 4 end] \
]
}
do_test attach2-$testnum [subst {
$db cache flush ;# The lock_status pragma should not be cached
execsql {PRAGMA lock_status} $db
}] $expected_result
}
set sqlite_os_trace 0
# Tests attach2-4.* test that read-locks work correctly with attached
# databases.
do_test attach2-4.1 {
sqlite3 db test.db
sqlite3 db2 test.db
execsql {ATTACH 'test2.db' as file2}
execsql {ATTACH 'test2.db' as file2} db2
} {}
lock_status 4.1.1 db {main unlocked temp closed file2 unlocked}
lock_status 4.1.2 db2 {main unlocked temp closed file2 unlocked}
do_test attach2-4.2 {
# Handle 'db' read-locks test.db
execsql {BEGIN}
execsql {SELECT * FROM t1}
# Lock status:
# db - shared(main)
# db2 -
} {}
lock_status 4.2.1 db {main shared temp closed file2 unlocked}
lock_status 4.2.2 db2 {main unlocked temp closed file2 unlocked}
do_test attach2-4.3 {
# The read lock held by db does not prevent db2 from reading test.db
execsql {SELECT * FROM t1} db2
} {}
lock_status 4.3.1 db {main shared temp closed file2 unlocked}
lock_status 4.3.2 db2 {main unlocked temp closed file2 unlocked}
do_test attach2-4.4 {
# db is holding a read lock on test.db, so we should not be able
# to commit a write to test.db from db2
catchsql {
INSERT INTO t1 VALUES(1, 2)
} db2
} {1 {database is locked}}
lock_status 4.4.1 db {main shared temp closed file2 unlocked}
lock_status 4.4.2 db2 {main unlocked temp closed file2 unlocked}
# We have to make sure that the cache_size and the soft_heap_limit
# are large enough to hold the entire change in memory. If either
# is set too small, then changes will spill to the database, forcing
# a reserved lock to promote to exclusive. That will mess up our
# test results.
set soft_limit [sqlite3_soft_heap_limit 0]
do_test attach2-4.5 {
# Handle 'db2' reserves file2.
execsql {BEGIN} db2
execsql {INSERT INTO file2.t1 VALUES(1, 2)} db2
# Lock status:
# db - shared(main)
# db2 - reserved(file2)
} {}
lock_status 4.5.1 db {main shared temp closed file2 unlocked}
lock_status 4.5.2 db2 {main unlocked temp closed file2 reserved}
do_test attach2-4.6.1 {
# Reads are allowed against a reserved database.
catchsql {
SELECT * FROM file2.t1;
}
# Lock status:
# db - shared(main), shared(file2)
# db2 - reserved(file2)
} {0 {}}
lock_status 4.6.1.1 db {main shared temp closed file2 shared}
lock_status 4.6.1.2 db2 {main unlocked temp closed file2 reserved}
do_test attach2-4.6.2 {
# Writes against a reserved database are not allowed.
catchsql {
UPDATE file2.t1 SET a=0;
}
} {1 {database is locked}}
lock_status 4.6.2.1 db {main shared temp closed file2 shared}
lock_status 4.6.2.2 db2 {main unlocked temp closed file2 reserved}
do_test attach2-4.7 {
# Ensure handle 'db' retains the lock on the main file after
# failing to obtain a write-lock on file2.
catchsql {
INSERT INTO t1 VALUES(1, 2)
} db2
} {0 {}}
lock_status 4.7.1 db {main shared temp closed file2 shared}
lock_status 4.7.2 db2 {main reserved temp closed file2 reserved}
do_test attach2-4.8 {
# We should still be able to read test.db from db2
execsql {SELECT * FROM t1} db2
} {1 2}
lock_status 4.8.1 db {main shared temp closed file2 shared}
lock_status 4.8.2 db2 {main reserved temp closed file2 reserved}
do_test attach2-4.9 {
# Try to upgrade the handle 'db' lock.
catchsql {
INSERT INTO t1 VALUES(1, 2)
}
} {1 {database is locked}}
lock_status 4.9.1 db {main shared temp closed file2 shared}
lock_status 4.9.2 db2 {main reserved temp closed file2 reserved}
do_test attach2-4.10 {
# We cannot commit db2 while db is holding a read-lock
catchsql {COMMIT} db2
} {1 {database is locked}}
lock_status 4.10.1 db {main shared temp closed file2 shared}
lock_status 4.10.2 db2 {main pending temp closed file2 reserved}
set sqlite_os_trace 0
do_test attach2-4.11 {
# db is able to commit.
catchsql {COMMIT}
} {0 {}}
lock_status 4.11.1 db {main unlocked temp closed file2 unlocked}
lock_status 4.11.2 db2 {main pending temp closed file2 reserved}
do_test attach2-4.12 {
# Now we can commit db2
catchsql {COMMIT} db2
} {0 {}}
lock_status 4.12.1 db {main unlocked temp closed file2 unlocked}
lock_status 4.12.2 db2 {main unlocked temp closed file2 unlocked}
do_test attach2-4.13 {
execsql {SELECT * FROM file2.t1}
} {1 2}
do_test attach2-4.14 {
execsql {INSERT INTO t1 VALUES(1, 2)}
} {}
do_test attach2-4.15 {
execsql {SELECT * FROM t1} db2
} {1 2 1 2}
db close
db2 close
forcedelete test2.db
sqlite3_soft_heap_limit $soft_limit
# These tests - attach2-5.* - check that the master journal file is deleted
# correctly when a multi-file transaction is committed or rolled back.
#
# Update: It's not actually created if a rollback occurs, so that test
# doesn't really prove too much.
foreach f [glob test.db*] {forcedelete $f}
do_test attach2-5.1 {
sqlite3 db test.db
execsql {
ATTACH 'test.db2' AS aux;
}
} {}
do_test attach2-5.2 {
execsql {
BEGIN;
CREATE TABLE tbl(a, b, c);
CREATE TABLE aux.tbl(a, b, c);
COMMIT;
}
} {}
do_test attach2-5.3 {
lsort [glob test.db*]
} {test.db test.db2}
do_test attach2-5.4 {
execsql {
BEGIN;
DROP TABLE aux.tbl;
DROP TABLE tbl;
ROLLBACK;
}
} {}
do_test attach2-5.5 {
lsort [glob test.db*]
} {test.db test.db2}
# Check that a database cannot be ATTACHed or DETACHed during a transaction.
do_test attach2-6.1 {
execsql {
BEGIN;
}
} {}
do_test attach2-6.2 {
catchsql {
ATTACH 'test3.db' as aux2;
}
} {1 {cannot ATTACH database within transaction}}
# EVIDENCE-OF: R-59740-55581 This statement will fail if SQLite is in
# the middle of a transaction.
#
do_test attach2-6.3 {
catchsql {
DETACH aux;
}
} {1 {cannot DETACH database within transaction}}
do_test attach2-6.4 {
execsql {
COMMIT;
DETACH aux;
}
} {}
db close
finish_test

View File

@ -0,0 +1,353 @@
# 2003 July 1
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library. The
# focus of this script is testing the ATTACH and DETACH commands
# and schema changes to attached databases.
#
# $Id: attach3.test,v 1.18 2007/10/09 08:29:32 danielk1977 Exp $
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
ifcapable !attach {
finish_test
return
}
# The tests in this file were written before SQLite supported recursive
# trigger invocation, and some tests depend on that to pass. So disable
# recursive triggers for this file.
catchsql { pragma recursive_triggers = off }
# Create tables t1 and t2 in the main database
execsql {
CREATE TABLE t1(a, b);
CREATE TABLE t2(c, d);
}
# Create tables t1 and t2 in database file test2.db
forcedelete test2.db
forcedelete test2.db-journal
sqlite3 db2 test2.db
execsql {
CREATE TABLE t1(a, b);
CREATE TABLE t2(c, d);
} db2
db2 close
# Create a table in the auxilary database.
do_test attach3-1.1 {
execsql {
ATTACH 'test2.db' AS aux;
}
} {}
do_test attach3-1.2 {
execsql {
CREATE TABLE aux.t3(e, f);
}
} {}
do_test attach3-1.3 {
execsql {
SELECT * FROM sqlite_master WHERE name = 't3';
}
} {}
do_test attach3-1.4 {
execsql {
SELECT * FROM aux.sqlite_master WHERE name = 't3';
}
} "table t3 t3 [expr $AUTOVACUUM?5:4] {CREATE TABLE t3(e, f)}"
do_test attach3-1.5 {
execsql {
INSERT INTO t3 VALUES(1, 2);
SELECT * FROM t3;
}
} {1 2}
# Create an index on the auxilary database table.
do_test attach3-2.1 {
execsql {
CREATE INDEX aux.i1 on t3(e);
}
} {}
do_test attach3-2.2 {
execsql {
SELECT * FROM sqlite_master WHERE name = 'i1';
}
} {}
do_test attach3-2.3 {
execsql {
SELECT * FROM aux.sqlite_master WHERE name = 'i1';
}
} "index i1 t3 [expr $AUTOVACUUM?6:5] {CREATE INDEX i1 on t3(e)}"
# Drop the index on the aux database table.
do_test attach3-3.1 {
execsql {
DROP INDEX aux.i1;
SELECT * FROM aux.sqlite_master WHERE name = 'i1';
}
} {}
do_test attach3-3.2 {
execsql {
CREATE INDEX aux.i1 on t3(e);
SELECT * FROM aux.sqlite_master WHERE name = 'i1';
}
} "index i1 t3 [expr $AUTOVACUUM?6:5] {CREATE INDEX i1 on t3(e)}"
do_test attach3-3.3 {
execsql {
DROP INDEX i1;
SELECT * FROM aux.sqlite_master WHERE name = 'i1';
}
} {}
# Drop tables t1 and t2 in the auxilary database.
do_test attach3-4.1 {
execsql {
DROP TABLE aux.t1;
SELECT name FROM aux.sqlite_master;
}
} {t2 t3}
do_test attach3-4.2 {
# This will drop main.t2
execsql {
DROP TABLE t2;
SELECT name FROM aux.sqlite_master;
}
} {t2 t3}
do_test attach3-4.3 {
execsql {
DROP TABLE t2;
SELECT name FROM aux.sqlite_master;
}
} {t3}
# Create a view in the auxilary database.
ifcapable view {
do_test attach3-5.1 {
execsql {
CREATE VIEW aux.v1 AS SELECT * FROM t3;
}
} {}
do_test attach3-5.2 {
execsql {
SELECT * FROM aux.sqlite_master WHERE name = 'v1';
}
} {view v1 v1 0 {CREATE VIEW v1 AS SELECT * FROM t3}}
do_test attach3-5.3 {
execsql {
INSERT INTO aux.t3 VALUES('hello', 'world');
SELECT * FROM v1;
}
} {1 2 hello world}
# Drop the view
do_test attach3-6.1 {
execsql {
DROP VIEW aux.v1;
}
} {}
do_test attach3-6.2 {
execsql {
SELECT * FROM aux.sqlite_master WHERE name = 'v1';
}
} {}
} ;# ifcapable view
ifcapable {trigger} {
# Create a trigger in the auxilary database.
do_test attach3-7.1 {
execsql {
CREATE TRIGGER aux.tr1 AFTER INSERT ON t3 BEGIN
INSERT INTO t3 VALUES(new.e*2, new.f*2);
END;
}
} {}
do_test attach3-7.2 {
execsql {
DELETE FROM t3;
INSERT INTO t3 VALUES(10, 20);
SELECT * FROM t3;
}
} {10 20 20 40}
do_test attach3-5.3 {
execsql {
SELECT * FROM aux.sqlite_master WHERE name = 'tr1';
}
} {trigger tr1 t3 0 {CREATE TRIGGER tr1 AFTER INSERT ON t3 BEGIN
INSERT INTO t3 VALUES(new.e*2, new.f*2);
END}}
# Drop the trigger
do_test attach3-8.1 {
execsql {
DROP TRIGGER aux.tr1;
}
} {}
do_test attach3-8.2 {
execsql {
SELECT * FROM aux.sqlite_master WHERE name = 'tr1';
}
} {}
ifcapable tempdb {
# Try to trick SQLite into dropping the wrong temp trigger.
do_test attach3-9.0 {
execsql {
CREATE TABLE main.t4(a, b, c);
CREATE TABLE aux.t4(a, b, c);
CREATE TEMP TRIGGER tst_trigger BEFORE INSERT ON aux.t4 BEGIN
SELECT 'hello world';
END;
SELECT count(*) FROM sqlite_temp_master;
}
} {1}
do_test attach3-9.1 {
execsql {
DROP TABLE main.t4;
SELECT count(*) FROM sqlite_temp_master;
}
} {1}
do_test attach3-9.2 {
execsql {
DROP TABLE aux.t4;
SELECT count(*) FROM sqlite_temp_master;
}
} {0}
}
} ;# endif trigger
# Make sure the aux.sqlite_master table is read-only
do_test attach3-10.0 {
catchsql {
INSERT INTO aux.sqlite_master VALUES(1, 2, 3, 4, 5);
}
} {1 {table sqlite_master may not be modified}}
# Failure to attach leaves us in a workable state.
# Ticket #811
#
do_test attach3-11.0 {
catchsql {
ATTACH DATABASE '/nodir/nofile.x' AS notadb;
}
} {1 {unable to open database: /nodir/nofile.x}}
do_test attach3-11.1 {
catchsql {
ATTACH DATABASE ':memory:' AS notadb;
}
} {0 {}}
do_test attach3-11.2 {
catchsql {
DETACH DATABASE notadb;
}
} {0 {}}
# Return a list of attached databases
#
proc db_list {} {
set x [execsql {
PRAGMA database_list;
}]
set y {}
foreach {n id file} $x {lappend y $id}
return $y
}
ifcapable schema_pragmas&&tempdb {
ifcapable !trigger {
execsql {create temp table dummy(dummy)}
}
# Ticket #1825
#
do_test attach3-12.1 {
db_list
} {main temp aux}
do_test attach3-12.2 {
execsql {
ATTACH DATABASE ? AS ?
}
db_list
} {main temp aux {}}
do_test attach3-12.3 {
execsql {
DETACH aux
}
db_list
} {main temp {}}
do_test attach3-12.4 {
execsql {
DETACH ?
}
db_list
} {main temp}
do_test attach3-12.5 {
execsql {
ATTACH DATABASE '' AS ''
}
db_list
} {main temp {}}
do_test attach3-12.6 {
execsql {
DETACH ''
}
db_list
} {main temp}
do_test attach3-12.7 {
execsql {
ATTACH DATABASE '' AS ?
}
db_list
} {main temp {}}
do_test attach3-12.8 {
execsql {
DETACH ''
}
db_list
} {main temp}
do_test attach3-12.9 {
execsql {
ATTACH DATABASE '' AS NULL
}
db_list
} {main temp {}}
do_test attach3-12.10 {
execsql {
DETACH ?
}
db_list
} {main temp}
do_test attach3-12.11 {
catchsql {
DETACH NULL
}
} {1 {no such database: }}
do_test attach3-12.12 {
catchsql {
ATTACH null AS null;
ATTACH '' AS '';
}
} {1 {database is already in use}}
do_test attach3-12.13 {
db_list
} {main temp {}}
do_test attach3-12.14 {
execsql {
DETACH '';
}
db_list
} {main temp}
} ;# ifcapable pragma
finish_test

View File

@ -0,0 +1,118 @@
# 200 July 1
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library. The
# focus of this script is attaching many database files to a single
# connection.
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
set testprefix attach4
ifcapable !attach {
finish_test
return
}
puts "Testing with SQLITE_MAX_ATTACHED=$SQLITE_MAX_ATTACHED"
set files {main test.db}
for {set ii 0} {$ii < $SQLITE_MAX_ATTACHED} {incr ii} {
lappend files aux$ii "test.db$ii"
}
do_test 1.1 {
sqlite3_limit db SQLITE_LIMIT_ATTACHED -1
} $SQLITE_MAX_ATTACHED
do_test 1.2.1 {
db close
foreach {name f} $files { forcedelete $f }
sqlite3 db test.db
foreach {name f} $files {
if {$name == "main"} continue
execsql "ATTACH '$f' AS $name"
}
db eval {PRAGMA database_list} {
lappend L $name [file tail $file]
}
set L
} $files
do_catchsql_test 1.2.2 {
ATTACH 'x.db' AS next;
} [list 1 "too many attached databases - max $SQLITE_MAX_ATTACHED"]
do_test 1.3 {
execsql BEGIN;
foreach {name f} $files {
execsql "CREATE TABLE $name.tbl(x)"
execsql "INSERT INTO $name.tbl VALUES('$f')"
}
execsql COMMIT;
} {}
do_test 1.4 {
set L [list]
foreach {name f} $files {
lappend L $name [execsql "SELECT x FROM $name.tbl"]
}
set L
} $files
set L [list]
set S ""
foreach {name f} $files {
if {[permutation] == "journaltest"} {
set mode delete
} else {
set mode wal
}
ifcapable !wal { set mode delete }
lappend L $mode
append S "
PRAGMA $name.journal_mode = WAL;
UPDATE $name.tbl SET x = '$name';
"
}
do_execsql_test 1.5 $S $L
do_test 1.6 {
set L [list]
foreach {name f} $files {
lappend L [execsql "SELECT x FROM $name.tbl"] $f
}
set L
} $files
do_test 1.7 {
execsql BEGIN;
foreach {name f} $files {
execsql "UPDATE $name.tbl SET x = '$f'"
}
execsql COMMIT;
} {}
do_test 1.8 {
set L [list]
foreach {name f} $files {
lappend L $name [execsql "SELECT x FROM $name.tbl"]
}
set L
} $files
db close
foreach {name f} $files { forcedelete $f }
finish_test

View File

@ -0,0 +1,77 @@
# 2005 September 19
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#*************************************************************************
# This file implements regression tests for SQLite library. The
# focus of this script is testing the ATTACH statement and
# specifically out-of-memory conditions within that command.
#
# $Id: attachmalloc.test,v 1.10 2008/10/22 10:45:38 danielk1977 Exp $
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
ifcapable !memdebug||!attach {
finish_test
return
}
source $testdir/malloc_common.tcl
do_malloc_test attachmalloc-1 -tclprep {
catch { db close }
for {set i 2} {$i<=4} {incr i} {
catch { db$i close }
forcedelete test$i.db
forcedelete test$i.db-journal
}
} -tclbody {
if {[catch {sqlite3 db test.db}]} {
error "out of memory"
}
sqlite3_db_config_lookaside db 0 0 0
sqlite3_extended_result_codes db 1
} -sqlbody {
ATTACH 'test2.db' AS two;
CREATE TABLE two.t1(x);
ATTACH 'test3.db' AS three;
CREATE TABLE three.t1(x);
ATTACH 'test4.db' AS four;
CREATE TABLE four.t1(x);
}
do_malloc_test attachmalloc-2 -tclprep {
forcedelete test2.db
forcedelete test2.db-journal
sqlite3 db2 test2.db
db2 eval {
CREATE TABLE t1(a, b, c);
CREATE INDEX i1 ON t1(a, b);
}
db2 close
} -sqlbody {
CREATE TABLE t1(d, e, f);
ATTACH 'test2.db' AS db1;
}
set enable_shared_cache [sqlite3_enable_shared_cache 1]
sqlite3 dbaux test3.db
dbaux eval {SELECT * FROM sqlite_master}
do_malloc_test attachmalloc-3 -sqlbody {
SELECT * FROM sqlite_master;
ATTACH 'test3.db' AS three;
} -cleanup {
db eval { DETACH three }
}
dbaux close
sqlite3_enable_shared_cache $enable_shared_cache
finish_test

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,172 @@
# 2006 Aug 24
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library. The
# focus of this script is testing the sqlite3_set_authorizer() API
# and related functionality.
#
# $Id: auth2.test,v 1.3 2008/07/02 13:13:53 danielk1977 Exp $
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
# disable this test if the SQLITE_OMIT_AUTHORIZATION macro is
# defined during compilation.
if {[catch {db auth {}} msg]} {
finish_test
return
}
do_test auth2-1.1 {
execsql {
CREATE TABLE t1(a,b,c);
INSERT INTO t1 VALUES(1,2,3);
}
set ::flist {}
proc auth {code arg1 arg2 arg3 arg4 args} {
if {$code=="SQLITE_FUNCTION"} {
lappend ::flist $arg2
if {$arg2=="max"} {
return SQLITE_DENY
} elseif {$arg2=="min"} {
return SQLITE_IGNORE
} else {
return SQLITE_OK
}
}
return SQLITE_OK
}
db authorizer ::auth
catchsql {SELECT max(a,b,c) FROM t1}
} {1 {not authorized to use function: max}}
do_test auth2-1.2 {
set ::flist
} max
do_test auth2-1.3 {
set ::flist {}
catchsql {SELECT min(a,b,c) FROM t1}
} {0 {{}}}
do_test auth2-1.4 {
set ::flist
} min
do_test auth2-1.5 {
set ::flist {}
catchsql {SELECT coalesce(min(a,b,c),999) FROM t1}
} {0 999}
do_test auth2-1.6 {
set ::flist
} {coalesce min}
do_test auth2-1.7 {
set ::flist {}
catchsql {SELECT coalesce(a,b,c) FROM t1}
} {0 1}
do_test auth2-1.8 {
set ::flist
} coalesce
# Make sure the authorizer is not called when parsing the schema
# and when computing the result set of a view.
#
db close
sqlite3 db test.db
sqlite3 db2 test.db
proc auth {args} {
global authargs
append authargs [lrange $args 0 4]\n
return SQLITE_OK
}
db auth auth
do_test auth2-2.1 {
set ::authargs {}
db eval {
CREATE TABLE t2(x,y,z);
}
set ::authargs
} {SQLITE_INSERT sqlite_master {} main {}
SQLITE_CREATE_TABLE t2 {} main {}
SQLITE_UPDATE sqlite_master type main {}
SQLITE_UPDATE sqlite_master name main {}
SQLITE_UPDATE sqlite_master tbl_name main {}
SQLITE_UPDATE sqlite_master rootpage main {}
SQLITE_UPDATE sqlite_master sql main {}
SQLITE_READ sqlite_master ROWID main {}
SQLITE_READ sqlite_master name main {}
SQLITE_READ sqlite_master rootpage main {}
SQLITE_READ sqlite_master sql main {}
SQLITE_READ sqlite_master tbl_name main {}
SQLITE_READ sqlite_master type main {}
SQLITE_READ sqlite_master ROWID main {}
}
do_test auth2-2.2 {
set ::authargs {}
db eval {
CREATE VIEW v2 AS SELECT x+y AS a, y+z AS b from t2;
}
set ::authargs
} {SQLITE_INSERT sqlite_master {} main {}
SQLITE_CREATE_VIEW v2 {} main {}
SQLITE_UPDATE sqlite_master type main {}
SQLITE_UPDATE sqlite_master name main {}
SQLITE_UPDATE sqlite_master tbl_name main {}
SQLITE_UPDATE sqlite_master rootpage main {}
SQLITE_UPDATE sqlite_master sql main {}
SQLITE_READ sqlite_master ROWID main {}
SQLITE_READ sqlite_master name main {}
SQLITE_READ sqlite_master rootpage main {}
SQLITE_READ sqlite_master sql main {}
SQLITE_READ sqlite_master tbl_name main {}
SQLITE_READ sqlite_master type main {}
SQLITE_READ sqlite_master ROWID main {}
}
do_test auth2-2.3 {
set ::authargs {}
db eval {
SELECT a, b FROM v2;
}
set ::authargs
} {SQLITE_SELECT {} {} {} {}
SQLITE_READ t2 x main v2
SQLITE_READ t2 y main v2
SQLITE_READ t2 y main v2
SQLITE_READ t2 z main v2
SQLITE_READ v2 a main {}
SQLITE_READ v2 b main {}
SQLITE_SELECT {} {} {} v2
}
do_test auth2-2.4 {
db2 eval {
CREATE TABLE t3(p,q,r);
}
set ::authargs {}
db eval {
SELECT b, a FROM v2;
}
set ::authargs
} {SQLITE_SELECT {} {} {} {}
SQLITE_READ t2 x main v2
SQLITE_READ t2 y main v2
SQLITE_READ t2 y main v2
SQLITE_READ t2 z main v2
SQLITE_READ v2 b main {}
SQLITE_READ v2 a main {}
SQLITE_SELECT {} {} {} v2
SQLITE_SELECT {} {} {} {}
SQLITE_READ t2 x main v2
SQLITE_READ t2 y main v2
SQLITE_READ t2 y main v2
SQLITE_READ t2 z main v2
SQLITE_READ v2 b main {}
SQLITE_READ v2 a main {}
SQLITE_SELECT {} {} {} v2
}
db2 close
finish_test

View File

@ -0,0 +1,111 @@
# 2008 October 27
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
#
# Test that the truncate optimization is disabled if the SQLITE_DELETE
# authorization callback returns SQLITE_IGNORE.
#
# $Id: auth3.test,v 1.2 2009/05/04 01:58:31 drh Exp $
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
# disable this test if the SQLITE_OMIT_AUTHORIZATION macro is
# defined during compilation.
if {[catch {db auth {}} msg]} {
finish_test
return
}
# Disable the statement cache for these tests.
#
db cache size 0
db authorizer ::auth
proc auth {code arg1 arg2 arg3 arg4 args} {
if {$code=="SQLITE_DELETE"} {
return $::authcode
}
return SQLITE_OK
}
#--------------------------------------------------------------------------
# The following tests - auth3-1.* - test that return values of SQLITE_DENY,
# SQLITE_IGNORE, SQLITE_OK and <invalid> are correctly handled when returned
# by an SQLITE_DELETE authorization callback triggered by a
# "DELETE FROM <table-name>" statement.
#
do_test auth3-1.1 {
execsql {
CREATE TABLE t1(a,b,c);
INSERT INTO t1 VALUES(1, 2, 3);
INSERT INTO t1 VALUES(4, 5, 6);
}
} {}
do_test auth3.1.2 {
set ::authcode SQLITE_DENY
catchsql { DELETE FROM t1 }
} {1 {not authorized}}
do_test auth3.1.3 {
set ::authcode SQLITE_INVALID
catchsql { DELETE FROM t1 }
} {1 {authorizer malfunction}}
do_test auth3.1.4 {
execsql { SELECT * FROM t1 }
} {1 2 3 4 5 6}
do_test auth3-1.5 {
set ::authcode SQLITE_IGNORE
execsql {
DELETE FROM t1;
SELECT * FROM t1;
}
} {}
do_test auth3-1.6 {
set ::authcode SQLITE_OK
execsql {
INSERT INTO t1 VALUES(1, 2, 3);
INSERT INTO t1 VALUES(4, 5, 6);
DELETE FROM t1;
SELECT * FROM t1;
}
} {}
#--------------------------------------------------------------------------
# These tests - auth3-2.* - test that returning SQLITE_IGNORE really does
# disable the truncate optimization.
#
do_test auth3-2.1 {
set ::authcode SQLITE_OK
execsql {
INSERT INTO t1 VALUES(1, 2, 3);
INSERT INTO t1 VALUES(4, 5, 6);
}
set sqlite_search_count 0
execsql {
DELETE FROM t1;
}
set sqlite_search_count
} {0}
do_test auth3-2.2 {
set ::authcode SQLITE_IGNORE
execsql {
INSERT INTO t1 VALUES(1, 2, 3);
INSERT INTO t1 VALUES(4, 5, 6);
}
set sqlite_search_count 0
execsql {
DELETE FROM t1;
}
set sqlite_search_count
} {1}
finish_test

View File

@ -0,0 +1,668 @@
# 2004 November 12
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#*************************************************************************
# This file implements regression tests for SQLite library. The
# focus of this script is testing the AUTOINCREMENT features.
#
# $Id: autoinc.test,v 1.14 2009/06/23 20:28:54 drh Exp $
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
# If the library is not compiled with autoincrement support then
# skip all tests in this file.
#
ifcapable {!autoinc} {
finish_test
return
}
sqlite3_db_config_lookaside db 0 0 0
# The database is initially empty.
#
do_test autoinc-1.1 {
execsql {
SELECT name FROM sqlite_master WHERE type='table';
}
} {}
# Add a table with the AUTOINCREMENT feature. Verify that the
# SQLITE_SEQUENCE table gets created.
#
do_test autoinc-1.2 {
execsql {
CREATE TABLE t1(x INTEGER PRIMARY KEY AUTOINCREMENT, y);
SELECT name FROM sqlite_master WHERE type='table';
}
} {t1 sqlite_sequence}
# The SQLITE_SEQUENCE table is initially empty
#
do_test autoinc-1.3 {
execsql {
SELECT * FROM sqlite_sequence;
}
} {}
do_test autoinc-1.3.1 {
catchsql {
CREATE INDEX seqidx ON sqlite_sequence(name)
}
} {1 {table sqlite_sequence may not be indexed}}
# Close and reopen the database. Verify that everything is still there.
#
do_test autoinc-1.4 {
db close
sqlite3 db test.db
execsql {
SELECT * FROM sqlite_sequence;
}
} {}
# We are not allowed to drop the sqlite_sequence table.
#
do_test autoinc-1.5 {
catchsql {DROP TABLE sqlite_sequence}
} {1 {table sqlite_sequence may not be dropped}}
do_test autoinc-1.6 {
execsql {SELECT name FROM sqlite_master WHERE type='table'}
} {t1 sqlite_sequence}
# Insert an entries into the t1 table and make sure the largest key
# is always recorded in the sqlite_sequence table.
#
do_test autoinc-2.1 {
execsql {
SELECT * FROM sqlite_sequence
}
} {}
do_test autoinc-2.2 {
execsql {
INSERT INTO t1 VALUES(12,34);
SELECT * FROM sqlite_sequence;
}
} {t1 12}
do_test autoinc-2.3 {
execsql {
INSERT INTO t1 VALUES(1,23);
SELECT * FROM sqlite_sequence;
}
} {t1 12}
do_test autoinc-2.4 {
execsql {
INSERT INTO t1 VALUES(123,456);
SELECT * FROM sqlite_sequence;
}
} {t1 123}
do_test autoinc-2.5 {
execsql {
INSERT INTO t1 VALUES(NULL,567);
SELECT * FROM sqlite_sequence;
}
} {t1 124}
do_test autoinc-2.6 {
execsql {
DELETE FROM t1 WHERE y=567;
SELECT * FROM sqlite_sequence;
}
} {t1 124}
do_test autoinc-2.7 {
execsql {
INSERT INTO t1 VALUES(NULL,567);
SELECT * FROM sqlite_sequence;
}
} {t1 125}
do_test autoinc-2.8 {
execsql {
DELETE FROM t1;
SELECT * FROM sqlite_sequence;
}
} {t1 125}
do_test autoinc-2.9 {
execsql {
INSERT INTO t1 VALUES(12,34);
SELECT * FROM sqlite_sequence;
}
} {t1 125}
do_test autoinc-2.10 {
execsql {
INSERT INTO t1 VALUES(125,456);
SELECT * FROM sqlite_sequence;
}
} {t1 125}
do_test autoinc-2.11 {
execsql {
INSERT INTO t1 VALUES(-1234567,-1);
SELECT * FROM sqlite_sequence;
}
} {t1 125}
do_test autoinc-2.12 {
execsql {
INSERT INTO t1 VALUES(234,5678);
SELECT * FROM sqlite_sequence;
}
} {t1 234}
do_test autoinc-2.13 {
execsql {
DELETE FROM t1;
INSERT INTO t1 VALUES(NULL,1);
SELECT * FROM sqlite_sequence;
}
} {t1 235}
do_test autoinc-2.14 {
execsql {
SELECT * FROM t1;
}
} {235 1}
# Manually change the autoincrement values in sqlite_sequence.
#
do_test autoinc-2.20 {
execsql {
UPDATE sqlite_sequence SET seq=1234 WHERE name='t1';
INSERT INTO t1 VALUES(NULL,2);
SELECT * FROM t1;
}
} {235 1 1235 2}
do_test autoinc-2.21 {
execsql {
SELECT * FROM sqlite_sequence;
}
} {t1 1235}
do_test autoinc-2.22 {
execsql {
UPDATE sqlite_sequence SET seq=NULL WHERE name='t1';
INSERT INTO t1 VALUES(NULL,3);
SELECT * FROM t1;
}
} {235 1 1235 2 1236 3}
do_test autoinc-2.23 {
execsql {
SELECT * FROM sqlite_sequence;
}
} {t1 1236}
do_test autoinc-2.24 {
execsql {
UPDATE sqlite_sequence SET seq='a-string' WHERE name='t1';
INSERT INTO t1 VALUES(NULL,4);
SELECT * FROM t1;
}
} {235 1 1235 2 1236 3 1237 4}
do_test autoinc-2.25 {
execsql {
SELECT * FROM sqlite_sequence;
}
} {t1 1237}
do_test autoinc-2.26 {
execsql {
DELETE FROM sqlite_sequence WHERE name='t1';
INSERT INTO t1 VALUES(NULL,5);
SELECT * FROM t1;
}
} {235 1 1235 2 1236 3 1237 4 1238 5}
do_test autoinc-2.27 {
execsql {
SELECT * FROM sqlite_sequence;
}
} {t1 1238}
do_test autoinc-2.28 {
execsql {
UPDATE sqlite_sequence SET seq='-12345678901234567890'
WHERE name='t1';
INSERT INTO t1 VALUES(NULL,6);
SELECT * FROM t1;
}
} {235 1 1235 2 1236 3 1237 4 1238 5 1239 6}
do_test autoinc-2.29 {
execsql {
SELECT * FROM sqlite_sequence;
}
} {t1 1239}
# Test multi-row inserts
#
do_test autoinc-2.50 {
execsql {
DELETE FROM t1 WHERE y>=3;
INSERT INTO t1 SELECT NULL, y+2 FROM t1;
SELECT * FROM t1;
}
} {235 1 1235 2 1240 3 1241 4}
do_test autoinc-2.51 {
execsql {
SELECT * FROM sqlite_sequence
}
} {t1 1241}
ifcapable tempdb {
do_test autoinc-2.52 {
execsql {
CREATE TEMP TABLE t2 AS SELECT y FROM t1;
}
execsql {
INSERT INTO t1 SELECT NULL, y+4 FROM t2;
SELECT * FROM t1;
}
} {235 1 1235 2 1240 3 1241 4 1242 5 1243 6 1244 7 1245 8}
do_test autoinc-2.53 {
execsql {
SELECT * FROM sqlite_sequence
}
} {t1 1245}
do_test autoinc-2.54 {
execsql {
DELETE FROM t1;
INSERT INTO t1 SELECT NULL, y FROM t2;
SELECT * FROM t1;
}
} {1246 1 1247 2 1248 3 1249 4}
do_test autoinc-2.55 {
execsql {
SELECT * FROM sqlite_sequence
}
} {t1 1249}
}
# Create multiple AUTOINCREMENT tables. Make sure all sequences are
# tracked separately and do not interfere with one another.
#
do_test autoinc-2.70 {
catchsql {
DROP TABLE t2;
}
execsql {
CREATE TABLE t2(d, e INTEGER PRIMARY KEY AUTOINCREMENT, f);
INSERT INTO t2(d) VALUES(1);
SELECT * FROM sqlite_sequence;
}
} [ifcapable tempdb {list t1 1249 t2 1} else {list t1 1241 t2 1}]
do_test autoinc-2.71 {
execsql {
INSERT INTO t2(d) VALUES(2);
SELECT * FROM sqlite_sequence;
}
} [ifcapable tempdb {list t1 1249 t2 2} else {list t1 1241 t2 2}]
do_test autoinc-2.72 {
execsql {
INSERT INTO t1(x) VALUES(10000);
SELECT * FROM sqlite_sequence;
}
} {t1 10000 t2 2}
do_test autoinc-2.73 {
execsql {
CREATE TABLE t3(g INTEGER PRIMARY KEY AUTOINCREMENT, h);
INSERT INTO t3(h) VALUES(1);
SELECT * FROM sqlite_sequence;
}
} {t1 10000 t2 2 t3 1}
do_test autoinc-2.74 {
execsql {
INSERT INTO t2(d,e) VALUES(3,100);
SELECT * FROM sqlite_sequence;
}
} {t1 10000 t2 100 t3 1}
# When a table with an AUTOINCREMENT is deleted, the corresponding entry
# in the SQLITE_SEQUENCE table should also be deleted. But the SQLITE_SEQUENCE
# table itself should remain behind.
#
do_test autoinc-3.1 {
execsql {SELECT name FROM sqlite_sequence}
} {t1 t2 t3}
do_test autoinc-3.2 {
execsql {
DROP TABLE t1;
SELECT name FROM sqlite_sequence;
}
} {t2 t3}
do_test autoinc-3.3 {
execsql {
DROP TABLE t3;
SELECT name FROM sqlite_sequence;
}
} {t2}
do_test autoinc-3.4 {
execsql {
DROP TABLE t2;
SELECT name FROM sqlite_sequence;
}
} {}
# AUTOINCREMENT on TEMP tables.
#
ifcapable tempdb {
do_test autoinc-4.1 {
execsql {
SELECT 1, name FROM sqlite_master WHERE type='table';
SELECT 2, name FROM sqlite_temp_master WHERE type='table';
}
} {1 sqlite_sequence}
do_test autoinc-4.2 {
execsql {
CREATE TABLE t1(x INTEGER PRIMARY KEY AUTOINCREMENT, y);
CREATE TEMP TABLE t3(a INTEGER PRIMARY KEY AUTOINCREMENT, b);
SELECT 1, name FROM sqlite_master WHERE type='table';
SELECT 2, name FROM sqlite_temp_master WHERE type='table';
}
} {1 sqlite_sequence 1 t1 2 t3 2 sqlite_sequence}
do_test autoinc-4.3 {
execsql {
SELECT 1, * FROM main.sqlite_sequence;
SELECT 2, * FROM temp.sqlite_sequence;
}
} {}
do_test autoinc-4.4 {
execsql {
INSERT INTO t1 VALUES(10,1);
INSERT INTO t3 VALUES(20,2);
INSERT INTO t1 VALUES(NULL,3);
INSERT INTO t3 VALUES(NULL,4);
}
} {}
ifcapable compound {
do_test autoinc-4.4.1 {
execsql {
SELECT * FROM t1 UNION ALL SELECT * FROM t3;
}
} {10 1 11 3 20 2 21 4}
} ;# ifcapable compound
do_test autoinc-4.5 {
execsql {
SELECT 1, * FROM main.sqlite_sequence;
SELECT 2, * FROM temp.sqlite_sequence;
}
} {1 t1 11 2 t3 21}
do_test autoinc-4.6 {
execsql {
INSERT INTO t1 SELECT * FROM t3;
SELECT 1, * FROM main.sqlite_sequence;
SELECT 2, * FROM temp.sqlite_sequence;
}
} {1 t1 21 2 t3 21}
do_test autoinc-4.7 {
execsql {
INSERT INTO t3 SELECT x+100, y FROM t1;
SELECT 1, * FROM main.sqlite_sequence;
SELECT 2, * FROM temp.sqlite_sequence;
}
} {1 t1 21 2 t3 121}
do_test autoinc-4.8 {
execsql {
DROP TABLE t3;
SELECT 1, * FROM main.sqlite_sequence;
SELECT 2, * FROM temp.sqlite_sequence;
}
} {1 t1 21}
do_test autoinc-4.9 {
execsql {
CREATE TEMP TABLE t2(p INTEGER PRIMARY KEY AUTOINCREMENT, q);
INSERT INTO t2 SELECT * FROM t1;
DROP TABLE t1;
SELECT 1, * FROM main.sqlite_sequence;
SELECT 2, * FROM temp.sqlite_sequence;
}
} {2 t2 21}
do_test autoinc-4.10 {
execsql {
DROP TABLE t2;
SELECT 1, * FROM main.sqlite_sequence;
SELECT 2, * FROM temp.sqlite_sequence;
}
} {}
}
# Make sure AUTOINCREMENT works on ATTACH-ed tables.
#
ifcapable tempdb&&attach {
do_test autoinc-5.1 {
forcedelete test2.db
forcedelete test2.db-journal
sqlite3 db2 test2.db
execsql {
CREATE TABLE t4(m INTEGER PRIMARY KEY AUTOINCREMENT, n);
CREATE TABLE t5(o, p INTEGER PRIMARY KEY AUTOINCREMENT);
} db2;
execsql {
ATTACH 'test2.db' as aux;
SELECT 1, * FROM main.sqlite_sequence;
SELECT 2, * FROM temp.sqlite_sequence;
SELECT 3, * FROM aux.sqlite_sequence;
}
} {}
do_test autoinc-5.2 {
execsql {
INSERT INTO t4 VALUES(NULL,1);
SELECT 1, * FROM main.sqlite_sequence;
SELECT 2, * FROM temp.sqlite_sequence;
SELECT 3, * FROM aux.sqlite_sequence;
}
} {3 t4 1}
do_test autoinc-5.3 {
execsql {
INSERT INTO t5 VALUES(100,200);
SELECT * FROM sqlite_sequence
} db2
} {t4 1 t5 200}
do_test autoinc-5.4 {
execsql {
SELECT 1, * FROM main.sqlite_sequence;
SELECT 2, * FROM temp.sqlite_sequence;
SELECT 3, * FROM aux.sqlite_sequence;
}
} {3 t4 1 3 t5 200}
}
# Requirement REQ00310: Make sure an insert fails if the sequence is
# already at its maximum value.
#
ifcapable {rowid32} {
do_test autoinc-6.1 {
execsql {
CREATE TABLE t6(v INTEGER PRIMARY KEY AUTOINCREMENT, w);
INSERT INTO t6 VALUES(2147483647,1);
SELECT seq FROM main.sqlite_sequence WHERE name='t6';
}
} 2147483647
}
ifcapable {!rowid32} {
do_test autoinc-6.1 {
execsql {
CREATE TABLE t6(v INTEGER PRIMARY KEY AUTOINCREMENT, w);
INSERT INTO t6 VALUES(9223372036854775807,1);
SELECT seq FROM main.sqlite_sequence WHERE name='t6';
}
} 9223372036854775807
}
do_test autoinc-6.2 {
catchsql {
INSERT INTO t6 VALUES(NULL,1);
}
} {1 {database or disk is full}}
# Allow the AUTOINCREMENT keyword inside the parentheses
# on a separate PRIMARY KEY designation.
#
do_test autoinc-7.1 {
execsql {
CREATE TABLE t7(x INTEGER, y REAL, PRIMARY KEY(x AUTOINCREMENT));
INSERT INTO t7(y) VALUES(123);
INSERT INTO t7(y) VALUES(234);
DELETE FROM t7;
INSERT INTO t7(y) VALUES(345);
SELECT * FROM t7;
}
} {3 345.0}
# Test that if the AUTOINCREMENT is applied to a non integer primary key
# the error message is sensible.
do_test autoinc-7.2 {
catchsql {
CREATE TABLE t8(x TEXT PRIMARY KEY AUTOINCREMENT);
}
} {1 {AUTOINCREMENT is only allowed on an INTEGER PRIMARY KEY}}
# Ticket #1283. Make sure that preparing but never running a statement
# that creates the sqlite_sequence table does not mess up the database.
#
do_test autoinc-8.1 {
catch {db2 close}
catch {db close}
forcedelete test.db
sqlite3 db test.db
set DB [sqlite3_connection_pointer db]
set STMT [sqlite3_prepare $DB {
CREATE TABLE t1(
x INTEGER PRIMARY KEY AUTOINCREMENT
)
} -1 TAIL]
sqlite3_finalize $STMT
set STMT [sqlite3_prepare $DB {
CREATE TABLE t1(
x INTEGER PRIMARY KEY AUTOINCREMENT
)
} -1 TAIL]
sqlite3_step $STMT
sqlite3_finalize $STMT
execsql {
INSERT INTO t1 VALUES(NULL);
SELECT * FROM t1;
}
} {1}
# Ticket #3148
# Make sure the sqlite_sequence table is not damaged when doing
# an empty insert - an INSERT INTO ... SELECT ... where the SELECT
# clause returns an empty set.
#
do_test autoinc-9.1 {
db eval {
CREATE TABLE t2(x INTEGER PRIMARY KEY AUTOINCREMENT, y);
INSERT INTO t2 VALUES(NULL, 1);
CREATE TABLE t3(a INTEGER PRIMARY KEY AUTOINCREMENT, b);
INSERT INTO t3 SELECT * FROM t2 WHERE y>1;
SELECT * FROM sqlite_sequence WHERE name='t3';
}
} {t3 0}
ifcapable trigger {
catchsql { pragma recursive_triggers = off }
# Ticket #3928. Make sure that triggers to not make extra slots in
# the SQLITE_SEQUENCE table.
#
do_test autoinc-3928.1 {
db eval {
CREATE TABLE t3928(a INTEGER PRIMARY KEY AUTOINCREMENT, b);
CREATE TRIGGER t3928r1 BEFORE INSERT ON t3928 BEGIN
INSERT INTO t3928(b) VALUES('before1');
INSERT INTO t3928(b) VALUES('before2');
END;
CREATE TRIGGER t3928r2 AFTER INSERT ON t3928 BEGIN
INSERT INTO t3928(b) VALUES('after1');
INSERT INTO t3928(b) VALUES('after2');
END;
INSERT INTO t3928(b) VALUES('test');
SELECT * FROM t3928 ORDER BY a;
}
} {1 before1 2 after1 3 after2 4 before2 5 after1 6 after2 7 test 8 before1 9 before2 10 after1 11 before1 12 before2 13 after2}
do_test autoinc-3928.2 {
db eval {
SELECT * FROM sqlite_sequence WHERE name='t3928'
}
} {t3928 13}
do_test autoinc-3928.3 {
db eval {
DROP TRIGGER t3928r1;
DROP TRIGGER t3928r2;
CREATE TRIGGER t3928r3 BEFORE UPDATE ON t3928
WHEN typeof(new.b)=='integer' BEGIN
INSERT INTO t3928(b) VALUES('before-int-' || new.b);
END;
CREATE TRIGGER t3928r4 AFTER UPDATE ON t3928
WHEN typeof(new.b)=='integer' BEGIN
INSERT INTO t3928(b) VALUES('after-int-' || new.b);
END;
DELETE FROM t3928 WHERE a!=1;
UPDATE t3928 SET b=456 WHERE a=1;
SELECT * FROM t3928 ORDER BY a;
}
} {1 456 14 before-int-456 15 after-int-456}
do_test autoinc-3928.4 {
db eval {
SELECT * FROM sqlite_sequence WHERE name='t3928'
}
} {t3928 15}
do_test autoinc-3928.5 {
db eval {
CREATE TABLE t3928b(x);
INSERT INTO t3928b VALUES(100);
INSERT INTO t3928b VALUES(200);
INSERT INTO t3928b VALUES(300);
DELETE FROM t3928;
CREATE TABLE t3928c(y INTEGER PRIMARY KEY AUTOINCREMENT, z);
CREATE TRIGGER t3928br1 BEFORE DELETE ON t3928b BEGIN
INSERT INTO t3928(b) VALUES('before-del-'||old.x);
INSERT INTO t3928c(z) VALUES('before-del-'||old.x);
END;
CREATE TRIGGER t3928br2 AFTER DELETE ON t3928b BEGIN
INSERT INTO t3928(b) VALUES('after-del-'||old.x);
INSERT INTO t3928c(z) VALUES('after-del-'||old.x);
END;
DELETE FROM t3928b;
SELECT * FROM t3928 ORDER BY a;
}
} {16 before-del-100 17 after-del-100 18 before-del-200 19 after-del-200 20 before-del-300 21 after-del-300}
do_test autoinc-3928.6 {
db eval {
SELECT * FROM t3928c ORDER BY y;
}
} {1 before-del-100 2 after-del-100 3 before-del-200 4 after-del-200 5 before-del-300 6 after-del-300}
do_test autoinc-3928.7 {
db eval {
SELECT * FROM sqlite_sequence WHERE name LIKE 't3928%' ORDER BY name;
}
} {t3928 21 t3928c 6}
# Ticket [a696379c1f0886615541a48b35bd8181a80e88f8]
do_test autoinc-a69637.1 {
db eval {
CREATE TABLE ta69637_1(x INTEGER PRIMARY KEY AUTOINCREMENT, y);
CREATE TABLE ta69637_2(z);
CREATE TRIGGER ra69637_1 AFTER INSERT ON ta69637_2 BEGIN
INSERT INTO ta69637_1(y) VALUES(new.z+1);
END;
INSERT INTO ta69637_2 VALUES(123);
SELECT * FROM ta69637_1;
}
} {1 124}
do_test autoinc-a69637.2 {
db eval {
CREATE VIEW va69637_2 AS SELECT * FROM ta69637_2;
CREATE TRIGGER ra69637_2 INSTEAD OF INSERT ON va69637_2 BEGIN
INSERT INTO ta69637_1(y) VALUES(new.z+10000);
END;
INSERT INTO va69637_2 VALUES(123);
SELECT * FROM ta69637_1;
}
} {1 124 2 10123}
}
finish_test

View File

@ -0,0 +1,523 @@
# 2010 April 07
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#*************************************************************************
# This file implements regression tests for SQLite library. The
# focus of this script is testing automatic index creation logic.
#
# EVIDENCE-OF: R-34271-33106 PRAGMA automatic_index; PRAGMA
# automatic_index = boolean; Query, set, or clear the automatic indexing
# capability.
set testdir [file dirname $argv0]
source $testdir/tester.tcl
# If the library is not compiled with automatic index support then
# skip all tests in this file.
#
ifcapable {!autoindex} {
finish_test
return
}
# Setup for logging
db close
sqlite3_shutdown
test_sqlite3_log [list lappend ::log]
set ::log [list]
sqlite3 db test.db
# With automatic index turned off, we do a full scan of the T2 table
do_test autoindex1-100 {
db eval {
CREATE TABLE t1(a,b);
INSERT INTO t1 VALUES(1,11);
INSERT INTO t1 VALUES(2,22);
INSERT INTO t1 SELECT a+2, b+22 FROM t1;
INSERT INTO t1 SELECT a+4, b+44 FROM t1;
CREATE TABLE t2(c,d);
INSERT INTO t2 SELECT a, 900+b FROM t1;
}
db eval {
PRAGMA automatic_index=OFF;
SELECT b, d FROM t1 JOIN t2 ON a=c ORDER BY b;
}
} {11 911 22 922 33 933 44 944 55 955 66 966 77 977 88 988}
do_test autoindex1-101 {
db status step
} {63}
do_test autoindex1-102 {
db status autoindex
} {0}
# With autoindex turned on, we build an index once and then use that index
# to find T2 values.
do_test autoindex1-110 {
db eval {
PRAGMA automatic_index=ON;
SELECT b, d FROM t1 JOIN t2 ON a=c ORDER BY b;
}
} {11 911 22 922 33 933 44 944 55 955 66 966 77 977 88 988}
do_test autoindex1-111 {
db status step
} {7}
do_test autoindex1-112 {
db status autoindex
} {7}
do_test autoindex1-113 {
set ::log
} {SQLITE_WARNING_AUTOINDEX {automatic index on t2(c)}}
db close
sqlite3_shutdown
test_sqlite3_log
sqlite3_initialize
sqlite3 db test.db
# The same test as above, but this time the T2 query is a subquery rather
# than a join.
do_test autoindex1-200 {
db eval {
PRAGMA automatic_index=OFF;
SELECT b, (SELECT d FROM t2 WHERE c=a) FROM t1;
}
} {11 911 22 922 33 933 44 944 55 955 66 966 77 977 88 988}
do_test autoindex1-201 {
db status step
} {35}
do_test autoindex1-202 {
db status autoindex
} {0}
do_test autoindex1-210 {
db eval {
PRAGMA automatic_index=ON;
ANALYZE;
UPDATE sqlite_stat1 SET stat='10000' WHERE tbl='t1';
-- Table t2 actually contains 8 rows.
UPDATE sqlite_stat1 SET stat='16' WHERE tbl='t2';
ANALYZE sqlite_master;
SELECT b, (SELECT d FROM t2 WHERE c=a) FROM t1;
}
} {11 911 22 922 33 933 44 944 55 955 66 966 77 977 88 988}
do_test autoindex1-211 {
db status step
} {7}
do_test autoindex1-212 {
db status autoindex
} {7}
# Modify the second table of the join while the join is in progress
#
do_execsql_test autoindex1-299 {
UPDATE sqlite_stat1 SET stat='10000' WHERE tbl='t2';
ANALYZE sqlite_master;
EXPLAIN QUERY PLAN
SELECT b, d FROM t1 CROSS JOIN t2 ON (c=a);
} {/AUTOMATIC COVERING INDEX/}
do_test autoindex1-300 {
set r {}
db eval {SELECT b, d FROM t1 CROSS JOIN t2 ON (c=a)} {
lappend r $b $d
db eval {UPDATE t2 SET d=d+1}
}
set r
} {11 911 22 922 33 933 44 944 55 955 66 966 77 977 88 988}
do_test autoindex1-310 {
db eval {SELECT d FROM t2 ORDER BY d}
} {919 930 941 952 963 974 985 996}
# The next test does a 10-way join on unindexed tables. Without
# automatic indices, the join will take a long time to complete.
# With automatic indices, it should only take about a second.
#
do_test autoindex1-400 {
db eval {
CREATE TABLE t4(a, b);
INSERT INTO t4 VALUES(1,2);
INSERT INTO t4 VALUES(2,3);
}
for {set n 2} {$n<4096} {set n [expr {$n+$n}]} {
db eval {INSERT INTO t4 SELECT a+$n, b+$n FROM t4}
}
db eval {
SELECT count(*) FROM t4;
}
} {4096}
do_test autoindex1-401 {
db eval {
SELECT count(*)
FROM t4 AS x1
JOIN t4 AS x2 ON x2.a=x1.b
JOIN t4 AS x3 ON x3.a=x2.b
JOIN t4 AS x4 ON x4.a=x3.b
JOIN t4 AS x5 ON x5.a=x4.b
JOIN t4 AS x6 ON x6.a=x5.b
JOIN t4 AS x7 ON x7.a=x6.b
JOIN t4 AS x8 ON x8.a=x7.b
JOIN t4 AS x9 ON x9.a=x8.b
JOIN t4 AS x10 ON x10.a=x9.b;
}
} {4087}
# Ticket [8011086c85c6c404014c947fcf3eb9f42b184a0d] from 2010-07-08
# Make sure automatic indices are not created for the RHS of an IN expression
# that is not a correlated subquery.
#
do_execsql_test autoindex1-500 {
CREATE TABLE t501(a INTEGER PRIMARY KEY, b);
CREATE TABLE t502(x INTEGER PRIMARY KEY, y);
INSERT INTO sqlite_stat1(tbl,idx,stat) VALUES('t501',null,'1000000');
INSERT INTO sqlite_stat1(tbl,idx,stat) VALUES('t502',null,'1000');
ANALYZE sqlite_master;
EXPLAIN QUERY PLAN
SELECT b FROM t501
WHERE t501.a IN (SELECT x FROM t502 WHERE y=?);
} {
0 0 0 {SEARCH TABLE t501 USING INTEGER PRIMARY KEY (rowid=?)}
0 0 0 {EXECUTE LIST SUBQUERY 1}
1 0 0 {SCAN TABLE t502}
}
do_execsql_test autoindex1-501 {
EXPLAIN QUERY PLAN
SELECT b FROM t501
WHERE t501.a IN (SELECT x FROM t502 WHERE y=t501.b);
} {
0 0 0 {SCAN TABLE t501}
0 0 0 {EXECUTE CORRELATED LIST SUBQUERY 1}
1 0 0 {SEARCH TABLE t502 USING AUTOMATIC COVERING INDEX (y=?)}
}
do_execsql_test autoindex1-502 {
EXPLAIN QUERY PLAN
SELECT b FROM t501
WHERE t501.a=123
AND t501.a IN (SELECT x FROM t502 WHERE y=t501.b);
} {
0 0 0 {SEARCH TABLE t501 USING INTEGER PRIMARY KEY (rowid=?)}
0 0 0 {EXECUTE CORRELATED LIST SUBQUERY 1}
1 0 0 {SCAN TABLE t502}
}
# The following code checks a performance regression reported on the
# mailing list on 2010-10-19. The problem is that the nRowEst field
# of ephermeral tables was not being initialized correctly and so no
# automatic index was being created for the emphemeral table when it was
# used as part of a join.
#
do_execsql_test autoindex1-600 {
CREATE TABLE flock_owner(
owner_rec_id INTEGER CONSTRAINT flock_owner_key PRIMARY KEY,
flock_no VARCHAR(6) NOT NULL REFERENCES flock (flock_no),
owner_person_id INTEGER NOT NULL REFERENCES person (person_id),
owner_change_date TEXT, last_changed TEXT NOT NULL,
CONSTRAINT fo_owner_date UNIQUE (flock_no, owner_change_date)
);
CREATE TABLE sheep (
Sheep_No char(7) NOT NULL,
Date_of_Birth char(8),
Sort_DoB text,
Flock_Book_Vol char(2),
Breeder_No char(6),
Breeder_Person integer,
Originating_Flock char(6),
Registering_Flock char(6),
Tag_Prefix char(9),
Tag_No char(15),
Sort_Tag_No integer,
Breeders_Temp_Tag char(15),
Sex char(1),
Sheep_Name char(32),
Sire_No char(7),
Dam_No char(7),
Register_Code char(1),
Colour char(48),
Colour_Code char(2),
Pattern_Code char(8),
Horns char(1),
Litter_Size char(1),
Coeff_of_Inbreeding real,
Date_of_Registration text,
Date_Last_Changed text,
UNIQUE(Sheep_No));
CREATE INDEX fo_flock_no_index
ON flock_owner (flock_no);
CREATE INDEX fo_owner_change_date_index
ON flock_owner (owner_change_date);
CREATE INDEX fo_owner_person_id_index
ON flock_owner (owner_person_id);
CREATE INDEX sheep_org_flock_index
ON sheep (originating_flock);
CREATE INDEX sheep_reg_flock_index
ON sheep (registering_flock);
EXPLAIN QUERY PLAN
SELECT x.sheep_no, x.registering_flock, x.date_of_registration
FROM sheep x LEFT JOIN
(SELECT s.sheep_no, prev.flock_no, prev.owner_person_id,
s.date_of_registration, prev.owner_change_date
FROM sheep s JOIN flock_owner prev ON s.registering_flock =
prev.flock_no
AND (prev.owner_change_date <= s.date_of_registration || ' 00:00:00')
WHERE NOT EXISTS
(SELECT 'x' FROM flock_owner later
WHERE prev.flock_no = later.flock_no
AND later.owner_change_date > prev.owner_change_date
AND later.owner_change_date <= s.date_of_registration||' 00:00:00')
) y ON x.sheep_no = y.sheep_no
WHERE y.sheep_no IS NULL
ORDER BY x.registering_flock;
} {
1 0 0 {SCAN TABLE sheep AS s}
1 1 1 {SEARCH TABLE flock_owner AS prev USING INDEX sqlite_autoindex_flock_owner_1 (flock_no=? AND owner_change_date<?)}
1 0 0 {EXECUTE CORRELATED SCALAR SUBQUERY 2}
2 0 0 {SEARCH TABLE flock_owner AS later USING COVERING INDEX sqlite_autoindex_flock_owner_1 (flock_no=? AND owner_change_date>? AND owner_change_date<?)}
0 0 0 {SCAN TABLE sheep AS x USING INDEX sheep_reg_flock_index}
0 1 1 {SEARCH SUBQUERY 1 AS y USING AUTOMATIC COVERING INDEX (sheep_no=?)}
}
do_execsql_test autoindex1-700 {
CREATE TABLE t5(a, b, c);
EXPLAIN QUERY PLAN SELECT a FROM t5 WHERE b=10 ORDER BY c;
} {
0 0 0 {SCAN TABLE t5}
0 0 0 {USE TEMP B-TREE FOR ORDER BY}
}
# The following checks a performance issue reported on the sqlite-dev
# mailing list on 2013-01-10
#
do_execsql_test autoindex1-800 {
CREATE TABLE accounts(
_id INTEGER PRIMARY KEY AUTOINCREMENT,
account_name TEXT,
account_type TEXT,
data_set TEXT
);
CREATE TABLE data(
_id INTEGER PRIMARY KEY AUTOINCREMENT,
package_id INTEGER REFERENCES package(_id),
mimetype_id INTEGER REFERENCES mimetype(_id) NOT NULL,
raw_contact_id INTEGER REFERENCES raw_contacts(_id) NOT NULL,
is_read_only INTEGER NOT NULL DEFAULT 0,
is_primary INTEGER NOT NULL DEFAULT 0,
is_super_primary INTEGER NOT NULL DEFAULT 0,
data_version INTEGER NOT NULL DEFAULT 0,
data1 TEXT,
data2 TEXT,
data3 TEXT,
data4 TEXT,
data5 TEXT,
data6 TEXT,
data7 TEXT,
data8 TEXT,
data9 TEXT,
data10 TEXT,
data11 TEXT,
data12 TEXT,
data13 TEXT,
data14 TEXT,
data15 TEXT,
data_sync1 TEXT,
data_sync2 TEXT,
data_sync3 TEXT,
data_sync4 TEXT
);
CREATE TABLE mimetypes(
_id INTEGER PRIMARY KEY AUTOINCREMENT,
mimetype TEXT NOT NULL
);
CREATE TABLE raw_contacts(
_id INTEGER PRIMARY KEY AUTOINCREMENT,
account_id INTEGER REFERENCES accounts(_id),
sourceid TEXT,
raw_contact_is_read_only INTEGER NOT NULL DEFAULT 0,
version INTEGER NOT NULL DEFAULT 1,
dirty INTEGER NOT NULL DEFAULT 0,
deleted INTEGER NOT NULL DEFAULT 0,
contact_id INTEGER REFERENCES contacts(_id),
aggregation_mode INTEGER NOT NULL DEFAULT 0,
aggregation_needed INTEGER NOT NULL DEFAULT 1,
custom_ringtone TEXT,
send_to_voicemail INTEGER NOT NULL DEFAULT 0,
times_contacted INTEGER NOT NULL DEFAULT 0,
last_time_contacted INTEGER,
starred INTEGER NOT NULL DEFAULT 0,
display_name TEXT,
display_name_alt TEXT,
display_name_source INTEGER NOT NULL DEFAULT 0,
phonetic_name TEXT,
phonetic_name_style TEXT,
sort_key TEXT,
sort_key_alt TEXT,
name_verified INTEGER NOT NULL DEFAULT 0,
sync1 TEXT,
sync2 TEXT,
sync3 TEXT,
sync4 TEXT,
sync_uid TEXT,
sync_version INTEGER NOT NULL DEFAULT 1,
has_calendar_event INTEGER NOT NULL DEFAULT 0,
modified_time INTEGER,
is_restricted INTEGER DEFAULT 0,
yp_source TEXT,
method_selected INTEGER DEFAULT 0,
custom_vibration_type INTEGER DEFAULT 0,
custom_ringtone_path TEXT,
message_notification TEXT,
message_notification_path TEXT
);
CREATE INDEX data_mimetype_data1_index ON data (mimetype_id,data1);
CREATE INDEX data_raw_contact_id ON data (raw_contact_id);
CREATE UNIQUE INDEX mime_type ON mimetypes (mimetype);
CREATE INDEX raw_contact_sort_key1_index ON raw_contacts (sort_key);
CREATE INDEX raw_contact_sort_key2_index ON raw_contacts (sort_key_alt);
CREATE INDEX raw_contacts_contact_id_index ON raw_contacts (contact_id);
CREATE INDEX raw_contacts_source_id_account_id_index
ON raw_contacts (sourceid, account_id);
ANALYZE sqlite_master;
INSERT INTO sqlite_stat1
VALUES('raw_contacts','raw_contact_sort_key2_index','1600 4');
INSERT INTO sqlite_stat1
VALUES('raw_contacts','raw_contact_sort_key1_index','1600 4');
INSERT INTO sqlite_stat1
VALUES('raw_contacts','raw_contacts_source_id_account_id_index',
'1600 1600 1600');
INSERT INTO sqlite_stat1
VALUES('raw_contacts','raw_contacts_contact_id_index','1600 1');
INSERT INTO sqlite_stat1 VALUES('mimetypes','mime_type','12 1');
INSERT INTO sqlite_stat1
VALUES('data','data_mimetype_data1_index','9819 2455 3');
INSERT INTO sqlite_stat1 VALUES('data','data_raw_contact_id','9819 7');
INSERT INTO sqlite_stat1 VALUES('accounts',NULL,'1');
DROP TABLE IF EXISTS sqlite_stat3;
ANALYZE sqlite_master;
EXPLAIN QUERY PLAN
SELECT * FROM
data JOIN mimetypes ON (data.mimetype_id=mimetypes._id)
JOIN raw_contacts ON (data.raw_contact_id=raw_contacts._id)
JOIN accounts ON (raw_contacts.account_id=accounts._id)
WHERE mimetype_id=10 AND data14 IS NOT NULL;
} {/SEARCH TABLE data .*SEARCH TABLE raw_contacts/}
do_execsql_test autoindex1-801 {
EXPLAIN QUERY PLAN
SELECT * FROM
data JOIN mimetypes ON (data.mimetype_id=mimetypes._id)
JOIN raw_contacts ON (data.raw_contact_id=raw_contacts._id)
JOIN accounts ON (raw_contacts.account_id=accounts._id)
WHERE mimetypes._id=10 AND data14 IS NOT NULL;
} {/SEARCH TABLE data .*SEARCH TABLE raw_contacts/}
# Another test case from an important user of SQLite. The key feature of
# this test is that the "aggindex" subquery should make use of an
# automatic index. If it does, the query is fast. If it does not, the
# query is deathly slow. It worked OK in 3.7.17 but started going slow
# with version 3.8.0. The problem was fixed for 3.8.7 by reducing the
# cost estimate for automatic indexes on views and subqueries.
#
db close
forcedelete test.db
sqlite3 db test.db
do_execsql_test autoindex1-900 {
CREATE TABLE messages (ROWID INTEGER PRIMARY KEY AUTOINCREMENT, message_id, document_id BLOB, in_reply_to, remote_id INTEGER, sender INTEGER, subject_prefix, subject INTEGER, date_sent INTEGER, date_received INTEGER, date_created INTEGER, date_last_viewed INTEGER, mailbox INTEGER, remote_mailbox INTEGER, original_mailbox INTEGER, flags INTEGER, read, flagged, size INTEGER, color, encoding, type INTEGER, pad, conversation_id INTEGER DEFAULT -1, snippet TEXT DEFAULT NULL, fuzzy_ancestor INTEGER DEFAULT NULL, automated_conversation INTEGER DEFAULT 0, root_status INTEGER DEFAULT -1, conversation_position INTEGER DEFAULT -1);
CREATE INDEX date_index ON messages(date_received);
CREATE INDEX date_last_viewed_index ON messages(date_last_viewed);
CREATE INDEX date_created_index ON messages(date_created);
CREATE INDEX message_message_id_mailbox_index ON messages(message_id, mailbox);
CREATE INDEX message_document_id_index ON messages(document_id);
CREATE INDEX message_read_index ON messages(read);
CREATE INDEX message_flagged_index ON messages(flagged);
CREATE INDEX message_mailbox_index ON messages(mailbox, date_received);
CREATE INDEX message_remote_mailbox_index ON messages(remote_mailbox, remote_id);
CREATE INDEX message_type_index ON messages(type);
CREATE INDEX message_conversation_id_conversation_position_index ON messages(conversation_id, conversation_position);
CREATE INDEX message_fuzzy_ancestor_index ON messages(fuzzy_ancestor);
CREATE INDEX message_subject_fuzzy_ancestor_index ON messages(subject, fuzzy_ancestor);
CREATE INDEX message_sender_subject_automated_conversation_index ON messages(sender, subject, automated_conversation);
CREATE INDEX message_sender_index ON messages(sender);
CREATE INDEX message_root_status ON messages(root_status);
CREATE TABLE subjects (ROWID INTEGER PRIMARY KEY, subject COLLATE RTRIM, normalized_subject COLLATE RTRIM);
CREATE INDEX subject_subject_index ON subjects(subject);
CREATE INDEX subject_normalized_subject_index ON subjects(normalized_subject);
CREATE TABLE addresses (ROWID INTEGER PRIMARY KEY, address COLLATE NOCASE, comment, UNIQUE(address, comment));
CREATE INDEX addresses_address_index ON addresses(address);
CREATE TABLE mailboxes (ROWID INTEGER PRIMARY KEY, url UNIQUE, total_count INTEGER DEFAULT 0, unread_count INTEGER DEFAULT 0, unseen_count INTEGER DEFAULT 0, deleted_count INTEGER DEFAULT 0, unread_count_adjusted_for_duplicates INTEGER DEFAULT 0, change_identifier, source INTEGER, alleged_change_identifier);
CREATE INDEX mailboxes_source_index ON mailboxes(source);
CREATE TABLE labels (ROWID INTEGER PRIMARY KEY, message_id INTEGER NOT NULL, mailbox_id INTEGER NOT NULL, UNIQUE(message_id, mailbox_id));
CREATE INDEX labels_message_id_mailbox_id_index ON labels(message_id, mailbox_id);
CREATE INDEX labels_mailbox_id_index ON labels(mailbox_id);
explain query plan
SELECT messages.ROWID,
messages.message_id,
messages.remote_id,
messages.date_received,
messages.date_sent,
messages.flags,
messages.size,
messages.color,
messages.date_last_viewed,
messages.subject_prefix,
subjects.subject,
sender.comment,
sender.address,
NULL,
messages.mailbox,
messages.original_mailbox,
NULL,
NULL,
messages.type,
messages.document_id,
sender,
NULL,
messages.conversation_id,
messages.conversation_position,
agglabels.labels
FROM mailboxes AS mailbox
JOIN messages ON mailbox.ROWID = messages.mailbox
LEFT OUTER JOIN subjects ON messages.subject = subjects.ROWID
LEFT OUTER JOIN addresses AS sender ON messages.sender = sender.ROWID
LEFT OUTER JOIN (
SELECT message_id, group_concat(mailbox_id) as labels
FROM labels GROUP BY message_id
) AS agglabels ON messages.ROWID = agglabels.message_id
WHERE (mailbox.url = 'imap://email.app@imap.gmail.com/%5BGmail%5D/All%20Mail')
AND (messages.ROWID IN (
SELECT labels.message_id
FROM labels JOIN mailboxes ON labels.mailbox_id = mailboxes.ROWID
WHERE mailboxes.url = 'imap://email.app@imap.gmail.com/INBOX'))
AND messages.mailbox in (6,12,18,24,30,36,42,1,7,13,19,25,31,37,43,2,8,
14,20,26,32,38,3,9,15,21,27,33,39,4,10,16,22,28,
34,40,5,11,17,23,35,41)
ORDER BY date_received DESC;
} {/agglabels USING AUTOMATIC COVERING INDEX/}
# A test case for VIEWs
#
do_execsql_test autoindex1-901 {
CREATE TABLE t1(x INTEGER PRIMARY KEY, y, z);
CREATE TABLE t2(a, b);
CREATE VIEW agg2 AS SELECT a, sum(b) AS m FROM t2 GROUP BY a;
EXPLAIN QUERY PLAN
SELECT t1.z, agg2.m
FROM t1 JOIN agg2 ON t1.y=agg2.m
WHERE t1.x IN (1,2,3);
} {/USING AUTOMATIC COVERING INDEX/}
# 2015-04-15: A NULL CollSeq pointer in automatic index creation.
#
do_execsql_test autoindex1-920 {
CREATE TABLE t920(x);
INSERT INTO t920 VALUES(3),(4),(5);
SELECT * FROM t920,(SELECT 0 FROM t920),(VALUES(9)) WHERE 5 IN (x);
} {5 0 9 5 0 9 5 0 9}
finish_test

View File

@ -0,0 +1,227 @@
# 2014-06-17
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#*************************************************************************
#
# This file implements regression tests for SQLite library. The
# focus of this script is testing automatic index creation logic.
#
# This file contains a single real-world test case that was giving
# suboptimal performance because of over-use of automatic indexes.
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
do_execsql_test autoindex2-100 {
CREATE TABLE t1(
t1_id largeint,
did char(9),
ptime largeint,
exbyte char(4),
pe_id int,
field_id int,
mass float,
param10 float,
param11 float,
exmass float,
deviation float,
trange float,
vstatus int,
commit_status int,
formula char(329),
tier int DEFAULT 2,
ssid int DEFAULT 0,
last_operation largeint DEFAULT 0,
admin_uuid int DEFAULT 0,
previous_value float,
job_id largeint,
last_t1 largeint DEFAULT 0,
data_t1 int,
previous_date largeint DEFAULT 0,
flg8 int DEFAULT 1,
failed_fields char(100)
);
CREATE INDEX t1x0 on t1 (t1_id);
CREATE INDEX t1x1 on t1 (ptime, vstatus);
CREATE INDEX t1x2 on t1 (did, ssid, ptime, vstatus, exbyte, t1_id);
CREATE INDEX t1x3 on t1 (job_id);
CREATE TABLE t2(
did char(9),
client_did char(30),
description char(49),
uid int,
tzid int,
privilege int,
param2 int,
type char(30),
subtype char(32),
dparam1 char(7) DEFAULT '',
param5 char(3) DEFAULT '',
notional float DEFAULT 0.000000,
create_time largeint,
sample_time largeint DEFAULT 0,
param6 largeint,
frequency int,
expiration largeint,
uw_status int,
next_sample largeint,
last_sample largeint,
reserve1 char(29) DEFAULT '',
reserve2 char(29) DEFAULT '',
reserve3 char(29) DEFAULT '',
bxcdr char(19) DEFAULT 'XY',
ssid int DEFAULT 1,
last_t1_id largeint,
reserve4 char(29) DEFAULT '',
reserve5 char(29) DEFAULT '',
param12 int DEFAULT 0,
long_did char(100) DEFAULT '',
gr_code int DEFAULT 0,
drx char(100) DEFAULT '',
parent_id char(9) DEFAULT '',
param13 int DEFAULT 0,
position float DEFAULT 1.000000,
client_did3 char(100) DEFAULT '',
client_did4 char(100) DEFAULT '',
dlib_id char(9) DEFAULT ''
);
CREATE INDEX t2x0 on t2 (did);
CREATE INDEX t2x1 on t2 (client_did);
CREATE INDEX t2x2 on t2 (long_did);
CREATE INDEX t2x3 on t2 (uid);
CREATE INDEX t2x4 on t2 (param2);
CREATE INDEX t2x5 on t2 (type);
CREATE INDEX t2x6 on t2 (subtype);
CREATE INDEX t2x7 on t2 (last_sample);
CREATE INDEX t2x8 on t2 (param6);
CREATE INDEX t2x9 on t2 (frequency);
CREATE INDEX t2x10 on t2 (privilege);
CREATE INDEX t2x11 on t2 (sample_time);
CREATE INDEX t2x12 on t2 (notional);
CREATE INDEX t2x13 on t2 (tzid);
CREATE INDEX t2x14 on t2 (gr_code);
CREATE INDEX t2x15 on t2 (parent_id);
CREATE TABLE t3(
uid int,
param3 int,
uuid int,
acc_id int,
cust_num int,
numerix_id int,
pfy char(29),
param4 char(29),
param15 int DEFAULT 0,
flg7 int DEFAULT 0,
param21 int DEFAULT 0,
bxcdr char(2) DEFAULT 'PC',
c31 int DEFAULT 0,
c33 int DEFAULT 0,
c35 int DEFAULT 0,
c37 int,
mgr_uuid int,
back_up_uuid int,
priv_mars int DEFAULT 0,
is_qc int DEFAULT 0,
c41 int DEFAULT 0,
deleted int DEFAULT 0,
c47 int DEFAULT 1
);
CREATE INDEX t3x0 on t3 (uid);
CREATE INDEX t3x1 on t3 (param3);
CREATE INDEX t3x2 on t3 (uuid);
CREATE INDEX t3x3 on t3 (acc_id);
CREATE INDEX t3x4 on t3 (param4);
CREATE INDEX t3x5 on t3 (pfy);
CREATE INDEX t3x6 on t3 (is_qc);
SELECT count(*) FROM sqlite_master;
} {30}
do_execsql_test autoindex2-110 {
ANALYZE sqlite_master;
INSERT INTO sqlite_stat1 VALUES('t1','t1x3','10747267 260');
INSERT INTO sqlite_stat1 VALUES('t1','t1x2','10747267 121 113 2 2 2 1');
INSERT INTO sqlite_stat1 VALUES('t1','t1x1','10747267 50 40');
INSERT INTO sqlite_stat1 VALUES('t1','t1x0','10747267 1');
INSERT INTO sqlite_stat1 VALUES('t2','t2x15','39667 253');
INSERT INTO sqlite_stat1 VALUES('t2','t2x14','39667 19834');
INSERT INTO sqlite_stat1 VALUES('t2','t2x13','39667 13223');
INSERT INTO sqlite_stat1 VALUES('t2','t2x12','39667 7');
INSERT INTO sqlite_stat1 VALUES('t2','t2x11','39667 17');
INSERT INTO sqlite_stat1 VALUES('t2','t2x10','39667 19834');
INSERT INTO sqlite_stat1 VALUES('t2','t2x9','39667 7934');
INSERT INTO sqlite_stat1 VALUES('t2','t2x8','39667 11');
INSERT INTO sqlite_stat1 VALUES('t2','t2x7','39667 5');
INSERT INTO sqlite_stat1 VALUES('t2','t2x6','39667 242');
INSERT INTO sqlite_stat1 VALUES('t2','t2x5','39667 1984');
INSERT INTO sqlite_stat1 VALUES('t2','t2x4','39667 4408');
INSERT INTO sqlite_stat1 VALUES('t2','t2x3','39667 81');
INSERT INTO sqlite_stat1 VALUES('t2','t2x2','39667 551');
INSERT INTO sqlite_stat1 VALUES('t2','t2x1','39667 2');
INSERT INTO sqlite_stat1 VALUES('t2','t2x0','39667 1');
INSERT INTO sqlite_stat1 VALUES('t3','t3x6','569 285');
INSERT INTO sqlite_stat1 VALUES('t3','t3x5','569 2');
INSERT INTO sqlite_stat1 VALUES('t3','t3x4','569 2');
INSERT INTO sqlite_stat1 VALUES('t3','t3x3','569 5');
INSERT INTO sqlite_stat1 VALUES('t3','t3x2','569 3');
INSERT INTO sqlite_stat1 VALUES('t3','t3x1','569 6');
INSERT INTO sqlite_stat1 VALUES('t3','t3x0','569 1');
ANALYZE sqlite_master;
} {}
do_execsql_test autoindex2-120 {
EXPLAIN QUERY PLAN
SELECT
t1_id,
t1.did,
param2,
param3,
t1.ptime,
t1.trange,
t1.exmass,
t1.mass,
t1.vstatus,
type,
subtype,
t1.deviation,
t1.formula,
dparam1,
reserve1,
reserve2,
param4,
t1.last_operation,
t1.admin_uuid,
t1.previous_value,
t1.job_id,
client_did,
t1.last_t1,
t1.data_t1,
t1.previous_date,
param5,
param6,
mgr_uuid
FROM
t1,
t2,
t3
WHERE
t1.ptime > 1393520400
AND param3<>9001
AND t3.flg7 = 1
AND t1.did = t2.did
AND t2.uid = t3.uid
ORDER BY t1.ptime desc LIMIT 500;
} {0 0 0 {SEARCH TABLE t1 USING INDEX t1x1 (ptime>?)} 0 1 1 {SEARCH TABLE t2 USING INDEX t2x0 (did=?)} 0 2 2 {SEARCH TABLE t3 USING INDEX t3x0 (uid=?)}}
#
# ^^^--- Before being fixed, the above was using an automatic covering
# on t3 and reordering the tables so that t3 was in the outer loop and
# implementing the ORDER BY clause using a B-Tree.
finish_test

View File

@ -0,0 +1,92 @@
# 2014-06-17
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#*************************************************************************
#
# This file implements regression tests for SQLite library. The
# focus of this script is testing automatic index creation logic,
# and specifically that an automatic index will not be created that
# shadows a declared index.
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
set testprefix autoindex3
# The t1b and t2d indexes are not very selective. It used to be that
# the autoindex mechanism would create automatic indexes on t1(b) or
# t2(d), make assumptions that they were reasonably selective, and use
# them instead of t1b or t2d. But that would be cheating, because the
# automatic index cannot be any more selective than the real index.
#
# This test verifies that the cheat is no longer allowed.
#
do_execsql_test autoindex3-100 {
CREATE TABLE t1(a,b,x);
CREATE TABLE t2(c,d,y);
CREATE INDEX t1b ON t1(b);
CREATE INDEX t2d ON t2(d);
ANALYZE sqlite_master;
INSERT INTO sqlite_stat1 VALUES('t1','t1b','10000 500');
INSERT INTO sqlite_stat1 VALUES('t2','t2d','10000 500');
ANALYZE sqlite_master;
EXPLAIN QUERY PLAN SELECT * FROM t1, t2 WHERE d=b;
} {~/AUTO/}
# Automatic indexes can still be used if existing indexes do not
# participate in == constraints.
#
do_execsql_test autoindex3-110 {
EXPLAIN QUERY PLAN SELECT * FROM t1, t2 WHERE d>b AND x=y;
} {/AUTO/}
do_execsql_test autoindex3-120 {
EXPLAIN QUERY PLAN SELECT * FROM t1, t2 WHERE d<b AND x=y;
} {/AUTO/}
do_execsql_test autoindex3-130 {
EXPLAIN QUERY PLAN SELECT * FROM t1, t2 WHERE d IS NULL AND x=y;
} {/AUTO/}
do_execsql_test autoindex3-140 {
EXPLAIN QUERY PLAN SELECT * FROM t1, t2 WHERE d IN (5,b) AND x=y;
} {/AUTO/}
reset_db
do_execsql_test 210 {
CREATE TABLE v(b, d, e);
CREATE TABLE u(a, b, c);
ANALYZE sqlite_master;
INSERT INTO "sqlite_stat1" VALUES('u','uab','40000 400 1');
INSERT INTO "sqlite_stat1" VALUES('v','vbde','40000 400 1 1');
INSERT INTO "sqlite_stat1" VALUES('v','ve','40000 21');
CREATE INDEX uab on u(a, b);
CREATE INDEX ve on v(e);
CREATE INDEX vbde on v(b,d,e);
DROP TABLE IF EXISTS sqlite_stat4;
ANALYZE sqlite_master;
}
# At one point, SQLite was using the inferior plan:
#
# 0|0|1|SEARCH TABLE v USING INDEX ve (e>?)
# 0|1|0|SEARCH TABLE u USING COVERING INDEX uab (ANY(a) AND b=?)
#
# on the basis that the real index "uab" must be better than the automatic
# index. This is not right - a skip-scan is not necessarily better than an
# automatic index scan.
#
do_eqp_test 220 {
select count(*) from u, v where u.b = v.b and v.e > 34;
} {
0 0 1 {SEARCH TABLE v USING INDEX ve (e>?)}
0 1 0 {SEARCH TABLE u USING AUTOMATIC COVERING INDEX (b=?)}
}
finish_test

View File

@ -0,0 +1,83 @@
# 2014-10-24
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#*************************************************************************
#
# This file implements regression tests for SQLite library. The
# focus of this script is testing automatic index creation logic,
# and specifically creation of automatic partial indexes.
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
do_execsql_test autoindex4-1.0 {
CREATE TABLE t1(a,b);
INSERT INTO t1 VALUES(123,'abc'),(234,'def'),(234,'ghi'),(345,'jkl');
CREATE TABLE t2(x,y);
INSERT INTO t2 VALUES(987,'zyx'),(654,'wvu'),(987,'rqp');
SELECT *, '|' FROM t1, t2 WHERE a=234 AND x=987 ORDER BY +b;
} {234 def 987 rqp | 234 def 987 zyx | 234 ghi 987 rqp | 234 ghi 987 zyx |}
do_execsql_test autoindex4-1.1 {
SELECT *, '|' FROM t1, t2 WHERE a=234 AND x=555;
} {}
do_execsql_test autoindex4-1.2 {
SELECT *, '|' FROM t1 LEFT JOIN t2 ON a=234 AND x=555;
} {123 abc {} {} | 234 def {} {} | 234 ghi {} {} | 345 jkl {} {} |}
do_execsql_test autoindex4-1.3 {
SELECT *, '|' FROM t1 LEFT JOIN t2 ON x=555 WHERE a=234;
} {234 def {} {} | 234 ghi {} {} |}
do_execsql_test autoindex4-1.4 {
SELECT *, '|' FROM t1 LEFT JOIN t2 WHERE a=234 AND x=555;
} {}
do_execsql_test autoindex4-2.0 {
CREATE TABLE t3(e,f);
INSERT INTO t3 VALUES(123,654),(555,444),(234,987);
SELECT (SELECT count(*) FROM t1, t2 WHERE a=e AND x=f), e, f, '|'
FROM t3
ORDER BY rowid;
} {1 123 654 | 0 555 444 | 4 234 987 |}
# Ticket [2326c258d02ead33d]
# Two joins, one with and the other without an ORDER BY clause.
# The one without ORDER BY correctly returns two rows of result.
# The one with ORDER BY returns no rows.
#
do_execsql_test autoindex4-3.0 {
CREATE TABLE A(Name text);
CREATE TABLE Items(ItemName text , Name text);
INSERT INTO Items VALUES('Item1','Parent');
INSERT INTO Items VALUES('Item2','Parent');
CREATE TABLE B(Name text);
SELECT Items.ItemName
FROM Items
LEFT JOIN A ON (A.Name = Items.ItemName and Items.ItemName = 'dummy')
LEFT JOIN B ON (B.Name = Items.ItemName)
WHERE Items.Name = 'Parent'
ORDER BY Items.ItemName;
} {Item1 Item2}
do_execsql_test autoindex4-3.1 {
CREATE INDEX Items_x1 ON Items(ItemName,Name) WHERE ItemName = 'dummy';
SELECT Items.ItemName
FROM Items
LEFT JOIN A ON (A.Name = Items.ItemName and Items.ItemName = 'dummy')
LEFT JOIN B ON (B.Name = Items.ItemName)
WHERE Items.Name = 'Parent'
ORDER BY Items.ItemName;
} {Item1 Item2}
finish_test

View File

@ -0,0 +1,129 @@
# 2014-10-24
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#*************************************************************************
#
# This file implements regression tests for SQLite library. The
# focus of this script is testing automatic index creation logic,
# and specifically ensuring that automatic indexes can be used with
# co-routine subqueries.
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
set testprefix autoindex5
# Schema is from the Debian security database
#
do_execsql_test autoindex5-1.0 {
CREATE TABLE source_package_status
(bug_name TEXT NOT NULL,
package INTEGER NOT NULL,
vulnerable INTEGER NOT NULL,
urgency TEXT NOT NULL,
PRIMARY KEY (bug_name, package));
CREATE INDEX source_package_status_package
ON source_package_status(package);
CREATE TABLE source_packages
(name TEXT NOT NULL,
release TEXT NOT NULL,
subrelease TEXT NOT NULL,
archive TEXT NOT NULL,
version TEXT NOT NULL,
version_id INTEGER NOT NULL DEFAULT 0,
PRIMARY KEY (name, release, subrelease, archive));
CREATE TABLE bugs
(name TEXT NOT NULL PRIMARY KEY,
cve_status TEXT NOT NULL
CHECK (cve_status IN
('', 'CANDIDATE', 'ASSIGNED', 'RESERVED', 'REJECTED')),
not_for_us INTEGER NOT NULL CHECK (not_for_us IN (0, 1)),
description TEXT NOT NULL,
release_date TEXT NOT NULL,
source_file TEXT NOT NULL,
source_line INTEGER NOT NULL);
CREATE TABLE package_notes
(id INTEGER NOT NULL PRIMARY KEY,
bug_name TEXT NOT NULL,
package TEXT NOT NULL,
fixed_version TEXT
CHECK (fixed_version IS NULL OR fixed_version <> ''),
fixed_version_id INTEGER NOT NULL DEFAULT 0,
release TEXT NOT NULL,
package_kind TEXT NOT NULL DEFAULT 'unknown',
urgency TEXT NOT NULL,
bug_origin TEXT NOT NULL DEFAULT '');
CREATE INDEX package_notes_package
ON package_notes(package);
CREATE UNIQUE INDEX package_notes_bug
ON package_notes(bug_name, package, release);
CREATE TABLE debian_bugs
(bug INTEGER NOT NULL,
note INTEGER NOT NULL,
PRIMARY KEY (bug, note));
CREATE VIEW debian_cve AS
SELECT DISTINCT debian_bugs.bug, st.bug_name
FROM package_notes, debian_bugs, source_package_status AS st
WHERE package_notes.bug_name = st.bug_name
AND debian_bugs.note = package_notes.id
ORDER BY debian_bugs.bug;
} {}
# The following query should use an automatic index for the view
# in FROM clause of the subquery of the second result column.
#
do_execsql_test autoindex5-1.1 {
EXPLAIN QUERY PLAN
SELECT
st.bug_name,
(SELECT ALL debian_cve.bug FROM debian_cve
WHERE debian_cve.bug_name = st.bug_name
ORDER BY debian_cve.bug),
sp.release
FROM
source_package_status AS st,
source_packages AS sp,
bugs
WHERE
sp.rowid = st.package
AND st.bug_name = bugs.name
AND ( st.bug_name LIKE 'CVE-%' OR st.bug_name LIKE 'TEMP-%' )
AND ( sp.release = 'sid' OR sp.release = 'stretch' OR sp.release = 'jessie'
OR sp.release = 'wheezy' OR sp.release = 'squeeze' )
ORDER BY sp.name, st.bug_name, sp.release, sp.subrelease;
} {/SEARCH SUBQUERY 2 USING AUTOMATIC COVERING INDEX .bug_name=/}
#-------------------------------------------------------------------------
# Test that ticket [8a2adec1] has been fixed.
#
do_execsql_test 2.1 {
CREATE TABLE one(o);
INSERT INTO one DEFAULT VALUES;
CREATE TABLE t1(x, z);
INSERT INTO t1 VALUES('aaa', 4.0);
INSERT INTO t1 VALUES('aaa', 4.0);
CREATE VIEW vvv AS
SELECT * FROM t1
UNION ALL
SELECT 0, 0 WHERE 0;
SELECT (
SELECT sum(z) FROM vvv WHERE x='aaa'
) FROM one;
} {8.0}
finish_test

View File

@ -0,0 +1,698 @@
# 2001 September 15
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library. The
# focus of this file is testing the SELECT statement.
#
# $Id: autovacuum.test,v 1.29 2009/04/06 17:50:03 danielk1977 Exp $
set testdir [file dirname $argv0]
source $testdir/tester.tcl
# If this build of the library does not support auto-vacuum, omit this
# whole file.
ifcapable {!autovacuum || !pragma} {
finish_test
return
}
# Return a string $len characters long. The returned string is $char repeated
# over and over. For example, [make_str abc 8] returns "abcabcab".
proc make_str {char len} {
set str [string repeat $char. $len]
return [string range $str 0 [expr $len-1]]
}
# Return the number of pages in the file test.db by looking at the file system.
proc file_pages {} {
return [expr [file size test.db] / 1024]
}
#-------------------------------------------------------------------------
# Test cases autovacuum-1.* work as follows:
#
# 1. A table with a single indexed field is created.
# 2. Approximately 20 rows are inserted into the table. Each row is long
# enough such that it uses at least 2 overflow pages for both the table
# and index entry.
# 3. The rows are deleted in a psuedo-random order. Sometimes only one row
# is deleted per transaction, sometimes more than one.
# 4. After each transaction the table data is checked to ensure it is correct
# and a "PRAGMA integrity_check" is executed.
# 5. Once all the rows are deleted the file is checked to make sure it
# consists of exactly 4 pages.
#
# Steps 2-5 are repeated for a few different psuedo-random delete patterns
# (defined by the $delete_orders list).
set delete_orders [list]
lappend delete_orders {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20}
lappend delete_orders {20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1}
lappend delete_orders {8 18 2 4 14 11 13 3 10 7 9 5 12 17 19 15 20 6 16 1}
lappend delete_orders {10 3 11 17 19 20 7 4 13 6 1 14 16 12 9 18 8 15 5 2}
lappend delete_orders {{1 2 3 4 5 6 7 8 9 10} {11 12 13 14 15 16 17 18 19 20}}
lappend delete_orders {{19 8 17 15} {16 11 9 14} {18 5 3 1} {13 20 7 2} {6 12}}
# The length of each table entry.
# set ENTRY_LEN 3500
set ENTRY_LEN 3500
do_test autovacuum-1.1 {
execsql {
PRAGMA auto_vacuum = 1;
CREATE TABLE av1(a);
CREATE INDEX av1_idx ON av1(a);
}
} {}
set tn 0
foreach delete_order $delete_orders {
incr tn
# Set up the table.
set ::tbl_data [list]
foreach i [lsort -integer [eval concat $delete_order]] {
execsql "INSERT INTO av1 (oid, a) VALUES($i, '[make_str $i $ENTRY_LEN]')"
lappend ::tbl_data [make_str $i $ENTRY_LEN]
}
# Make sure the integrity check passes with the initial data.
ifcapable {integrityck} {
do_test autovacuum-1.$tn.1 {
execsql {
pragma integrity_check
}
} {ok}
}
foreach delete $delete_order {
# Delete one set of rows from the table.
do_test autovacuum-1.$tn.($delete).1 {
execsql "
DELETE FROM av1 WHERE oid = [join $delete " OR oid = "]
"
} {}
# Do the integrity check.
ifcapable {integrityck} {
do_test autovacuum-1.$tn.($delete).2 {
execsql {
pragma integrity_check
}
} {ok}
}
# Ensure the data remaining in the table is what was expected.
foreach d $delete {
set idx [lsearch $::tbl_data [make_str $d $ENTRY_LEN]]
set ::tbl_data [lreplace $::tbl_data $idx $idx]
}
do_test autovacuum-1.$tn.($delete).3 {
execsql {
select a from av1 order by rowid
}
} $::tbl_data
}
# All rows have been deleted. Ensure the file has shrunk to 4 pages.
do_test autovacuum-1.$tn.3 {
file_pages
} {4}
}
#---------------------------------------------------------------------------
# Tests cases autovacuum-2.* test that root pages are allocated
# and deallocated correctly at the start of the file. Operation is roughly as
# follows:
#
# autovacuum-2.1.*: Drop the tables that currently exist in the database.
# autovacuum-2.2.*: Create some tables. Ensure that data pages can be
# moved correctly to make space for new root-pages.
# autovacuum-2.3.*: Drop one of the tables just created (not the last one),
# and check that one of the other tables is moved to
# the free root-page location.
# autovacuum-2.4.*: Check that a table can be created correctly when the
# root-page it requires is on the free-list.
# autovacuum-2.5.*: Check that a table with indices can be dropped. This
# is slightly tricky because dropping one of the
# indices/table btrees could move the root-page of another.
# The code-generation layer of SQLite overcomes this problem
# by dropping the btrees in descending order of root-pages.
# This test ensures that this actually happens.
#
do_test autovacuum-2.1.1 {
execsql {
DROP TABLE av1;
}
} {}
do_test autovacuum-2.1.2 {
file_pages
} {1}
# Create a table and put some data in it.
do_test autovacuum-2.2.1 {
execsql {
CREATE TABLE av1(x);
SELECT rootpage FROM sqlite_master ORDER BY rootpage;
}
} {3}
do_test autovacuum-2.2.2 {
execsql "
INSERT INTO av1 VALUES('[make_str abc 3000]');
INSERT INTO av1 VALUES('[make_str def 3000]');
INSERT INTO av1 VALUES('[make_str ghi 3000]');
INSERT INTO av1 VALUES('[make_str jkl 3000]');
"
set ::av1_data [db eval {select * from av1}]
file_pages
} {15}
# Create another table. Check it is located immediately after the first.
# This test case moves the second page in an over-flow chain.
do_test autovacuum-2.2.3 {
execsql {
CREATE TABLE av2(x);
SELECT rootpage FROM sqlite_master ORDER BY rootpage;
}
} {3 4}
do_test autovacuum-2.2.4 {
file_pages
} {16}
# Create another table. Check it is located immediately after the second.
# This test case moves the first page in an over-flow chain.
do_test autovacuum-2.2.5 {
execsql {
CREATE TABLE av3(x);
SELECT rootpage FROM sqlite_master ORDER BY rootpage;
}
} {3 4 5}
do_test autovacuum-2.2.6 {
file_pages
} {17}
# Create another table. Check it is located immediately after the second.
# This test case moves a btree leaf page.
do_test autovacuum-2.2.7 {
execsql {
CREATE TABLE av4(x);
SELECT rootpage FROM sqlite_master ORDER BY rootpage;
}
} {3 4 5 6}
do_test autovacuum-2.2.8 {
file_pages
} {18}
do_test autovacuum-2.2.9 {
execsql {
select * from av1
}
} $av1_data
do_test autovacuum-2.3.1 {
execsql {
INSERT INTO av2 SELECT 'av1' || x FROM av1;
INSERT INTO av3 SELECT 'av2' || x FROM av1;
INSERT INTO av4 SELECT 'av3' || x FROM av1;
}
set ::av2_data [execsql {select x from av2}]
set ::av3_data [execsql {select x from av3}]
set ::av4_data [execsql {select x from av4}]
file_pages
} {54}
do_test autovacuum-2.3.2 {
execsql {
DROP TABLE av2;
SELECT rootpage FROM sqlite_master ORDER BY rootpage;
}
} {3 4 5}
do_test autovacuum-2.3.3 {
file_pages
} {41}
do_test autovacuum-2.3.4 {
execsql {
SELECT x FROM av3;
}
} $::av3_data
do_test autovacuum-2.3.5 {
execsql {
SELECT x FROM av4;
}
} $::av4_data
# Drop all the tables in the file. This puts all pages except the first 2
# (the sqlite_master root-page and the first pointer map page) on the
# free-list.
do_test autovacuum-2.4.1 {
execsql {
DROP TABLE av1;
DROP TABLE av3;
BEGIN;
DROP TABLE av4;
}
file_pages
} {15}
do_test autovacuum-2.4.2 {
for {set i 3} {$i<=10} {incr i} {
execsql "CREATE TABLE av$i (x)"
}
file_pages
} {15}
do_test autovacuum-2.4.3 {
execsql {
SELECT rootpage FROM sqlite_master ORDER by rootpage
}
} {3 4 5 6 7 8 9 10}
# Right now there are 5 free pages in the database. Consume and then free
# a 520 pages. Then create 520 tables. This ensures that at least some of the
# desired root-pages reside on the second free-list trunk page, and that the
# trunk itself is required at some point.
do_test autovacuum-2.4.4 {
execsql "
INSERT INTO av3 VALUES ('[make_str abcde [expr 1020*520 + 500]]');
DELETE FROM av3;
"
} {}
set root_page_list [list]
set pending_byte_page [expr ($::sqlite_pending_byte / 1024) + 1]
for {set i 3} {$i<=532} {incr i} {
# 207 and 412 are pointer-map pages.
if { $i!=207 && $i!=412 && $i != $pending_byte_page} {
lappend root_page_list $i
}
}
if {$i >= $pending_byte_page} {
lappend root_page_list $i
}
do_test autovacuum-2.4.5 {
for {set i 11} {$i<=530} {incr i} {
execsql "CREATE TABLE av$i (x)"
}
execsql {
SELECT rootpage FROM sqlite_master ORDER by rootpage
}
} $root_page_list
# Just for fun, delete all those tables and see if the database is 1 page.
do_test autovacuum-2.4.6 {
execsql COMMIT;
file_pages
} [expr 561 + (($i >= $pending_byte_page)?1:0)]
integrity_check autovacuum-2.4.6
do_test autovacuum-2.4.7 {
execsql BEGIN
for {set i 3} {$i<=530} {incr i} {
execsql "DROP TABLE av$i"
}
execsql COMMIT
file_pages
} 1
# Create some tables with indices to drop.
do_test autovacuum-2.5.1 {
execsql {
CREATE TABLE av1(a PRIMARY KEY, b, c);
INSERT INTO av1 VALUES('av1 a', 'av1 b', 'av1 c');
CREATE TABLE av2(a PRIMARY KEY, b, c);
CREATE INDEX av2_i1 ON av2(b);
CREATE INDEX av2_i2 ON av2(c);
INSERT INTO av2 VALUES('av2 a', 'av2 b', 'av2 c');
CREATE TABLE av3(a PRIMARY KEY, b, c);
CREATE INDEX av3_i1 ON av3(b);
INSERT INTO av3 VALUES('av3 a', 'av3 b', 'av3 c');
CREATE TABLE av4(a, b, c);
CREATE INDEX av4_i1 ON av4(a);
CREATE INDEX av4_i2 ON av4(b);
CREATE INDEX av4_i3 ON av4(c);
CREATE INDEX av4_i4 ON av4(a, b, c);
INSERT INTO av4 VALUES('av4 a', 'av4 b', 'av4 c');
}
} {}
do_test autovacuum-2.5.2 {
execsql {
SELECT name, rootpage FROM sqlite_master;
}
} [list av1 3 sqlite_autoindex_av1_1 4 \
av2 5 sqlite_autoindex_av2_1 6 av2_i1 7 av2_i2 8 \
av3 9 sqlite_autoindex_av3_1 10 av3_i1 11 \
av4 12 av4_i1 13 av4_i2 14 av4_i3 15 av4_i4 16 \
]
# The following 4 tests are SELECT queries that use the indices created.
# If the root-pages in the internal schema are not updated correctly when
# a table or indice is moved, these queries will fail. They are repeated
# after each table is dropped (i.e. as test cases 2.5.*.[1..4]).
do_test autovacuum-2.5.2.1 {
execsql {
SELECT * FROM av1 WHERE a = 'av1 a';
}
} {{av1 a} {av1 b} {av1 c}}
do_test autovacuum-2.5.2.2 {
execsql {
SELECT * FROM av2 WHERE a = 'av2 a' AND b = 'av2 b' AND c = 'av2 c'
}
} {{av2 a} {av2 b} {av2 c}}
do_test autovacuum-2.5.2.3 {
execsql {
SELECT * FROM av3 WHERE a = 'av3 a' AND b = 'av3 b';
}
} {{av3 a} {av3 b} {av3 c}}
do_test autovacuum-2.5.2.4 {
execsql {
SELECT * FROM av4 WHERE a = 'av4 a' AND b = 'av4 b' AND c = 'av4 c';
}
} {{av4 a} {av4 b} {av4 c}}
# Drop table av3. Indices av4_i2, av4_i3 and av4_i4 are moved to fill the two
# root pages vacated. The operation proceeds as:
# Step 1: Delete av3_i1 (root-page 11). Move root-page of av4_i4 to page 11.
# Step 2: Delete av3 (root-page 10). Move root-page of av4_i3 to page 10.
# Step 3: Delete sqlite_autoindex_av1_3 (root-page 9). Move av4_i2 to page 9.
do_test autovacuum-2.5.3 {
execsql {
DROP TABLE av3;
SELECT name, rootpage FROM sqlite_master;
}
} [list av1 3 sqlite_autoindex_av1_1 4 \
av2 5 sqlite_autoindex_av2_1 6 av2_i1 7 av2_i2 8 \
av4 12 av4_i1 13 av4_i2 9 av4_i3 10 av4_i4 11 \
]
do_test autovacuum-2.5.3.1 {
execsql {
SELECT * FROM av1 WHERE a = 'av1 a';
}
} {{av1 a} {av1 b} {av1 c}}
do_test autovacuum-2.5.3.2 {
execsql {
SELECT * FROM av2 WHERE a = 'av2 a' AND b = 'av2 b' AND c = 'av2 c'
}
} {{av2 a} {av2 b} {av2 c}}
do_test autovacuum-2.5.3.3 {
execsql {
SELECT * FROM av4 WHERE a = 'av4 a' AND b = 'av4 b' AND c = 'av4 c';
}
} {{av4 a} {av4 b} {av4 c}}
# Drop table av1:
# Step 1: Delete av1 (root page 4). Root-page of av4_i1 fills the gap.
# Step 2: Delete sqlite_autoindex_av1_1 (root page 3). Move av4 to the gap.
do_test autovacuum-2.5.4 {
execsql {
DROP TABLE av1;
SELECT name, rootpage FROM sqlite_master;
}
} [list av2 5 sqlite_autoindex_av2_1 6 av2_i1 7 av2_i2 8 \
av4 3 av4_i1 4 av4_i2 9 av4_i3 10 av4_i4 11 \
]
do_test autovacuum-2.5.4.2 {
execsql {
SELECT * FROM av2 WHERE a = 'av2 a' AND b = 'av2 b' AND c = 'av2 c'
}
} {{av2 a} {av2 b} {av2 c}}
do_test autovacuum-2.5.4.4 {
execsql {
SELECT * FROM av4 WHERE a = 'av4 a' AND b = 'av4 b' AND c = 'av4 c';
}
} {{av4 a} {av4 b} {av4 c}}
# Drop table av4:
# Step 1: Delete av4_i4.
# Step 2: Delete av4_i3.
# Step 3: Delete av4_i2.
# Step 4: Delete av4_i1. av2_i2 replaces it.
# Step 5: Delete av4. av2_i1 replaces it.
do_test autovacuum-2.5.5 {
execsql {
DROP TABLE av4;
SELECT name, rootpage FROM sqlite_master;
}
} [list av2 5 sqlite_autoindex_av2_1 6 av2_i1 3 av2_i2 4]
do_test autovacuum-2.5.5.2 {
execsql {
SELECT * FROM av2 WHERE a = 'av2 a' AND b = 'av2 b' AND c = 'av2 c'
}
} {{av2 a} {av2 b} {av2 c}}
#--------------------------------------------------------------------------
# Test cases autovacuum-3.* test the operation of the "PRAGMA auto_vacuum"
# command.
#
do_test autovacuum-3.1 {
execsql {
PRAGMA auto_vacuum;
}
} {1}
do_test autovacuum-3.2 {
db close
sqlite3 db test.db
execsql {
PRAGMA auto_vacuum;
}
} {1}
do_test autovacuum-3.3 {
execsql {
PRAGMA auto_vacuum = 0;
PRAGMA auto_vacuum;
}
} {1}
do_test autovacuum-3.4 {
db close
forcedelete test.db
sqlite3 db test.db
execsql {
PRAGMA auto_vacuum;
}
} $AUTOVACUUM
do_test autovacuum-3.5 {
execsql {
CREATE TABLE av1(x);
PRAGMA auto_vacuum;
}
} $AUTOVACUUM
do_test autovacuum-3.6 {
execsql {
PRAGMA auto_vacuum = 1;
PRAGMA auto_vacuum;
}
} [expr $AUTOVACUUM ? 1 : 0]
do_test autovacuum-3.7 {
execsql {
DROP TABLE av1;
}
file_pages
} [expr $AUTOVACUUM?1:2]
#-----------------------------------------------------------------------
# Test that if a statement transaction around a CREATE INDEX statement is
# rolled back no corruption occurs.
#
do_test autovacuum-4.0 {
# The last round of tests may have left the db in non-autovacuum mode.
# Reset everything just in case.
#
db close
forcedelete test.db test.db-journal
sqlite3 db test.db
execsql {
PRAGMA auto_vacuum = 1;
PRAGMA auto_vacuum;
}
} {1}
do_test autovacuum-4.1 {
execsql {
CREATE TABLE av1(a, b);
BEGIN;
}
for {set i 0} {$i<100} {incr i} {
execsql "INSERT INTO av1 VALUES($i, '[string repeat X 200]');"
}
execsql "INSERT INTO av1 VALUES(99, '[string repeat X 200]');"
execsql {
SELECT sum(a) FROM av1;
}
} {5049}
do_test autovacuum-4.2 {
catchsql {
CREATE UNIQUE INDEX av1_i ON av1(a);
}
} {1 {UNIQUE constraint failed: av1.a}}
do_test autovacuum-4.3 {
execsql {
SELECT sum(a) FROM av1;
}
} {5049}
do_test autovacuum-4.4 {
execsql {
COMMIT;
}
} {}
ifcapable integrityck {
# Ticket #1727
do_test autovacuum-5.1 {
db close
sqlite3 db :memory:
db eval {
PRAGMA auto_vacuum=1;
CREATE TABLE t1(a);
CREATE TABLE t2(a);
DROP TABLE t1;
PRAGMA integrity_check;
}
} ok
}
# Ticket #1728.
#
# In autovacuum mode, when tables or indices are deleted, the rootpage
# values in the symbol table have to be updated. There was a bug in this
# logic so that if an index/table was moved twice, the second move might
# not occur. This would leave the internal symbol table in an inconsistent
# state causing subsequent statements to fail.
#
# The problem is difficult to reproduce. The sequence of statements in
# the following test are carefully designed make it occur and thus to
# verify that this very obscure bug has been resolved.
#
ifcapable integrityck&&memorydb {
do_test autovacuum-6.1 {
db close
sqlite3 db :memory:
db eval {
PRAGMA auto_vacuum=1;
CREATE TABLE t1(a, b);
CREATE INDEX i1 ON t1(a);
CREATE TABLE t2(a);
CREATE INDEX i2 ON t2(a);
CREATE TABLE t3(a);
CREATE INDEX i3 ON t2(a);
CREATE INDEX x ON t1(b);
DROP TABLE t3;
PRAGMA integrity_check;
DROP TABLE t2;
PRAGMA integrity_check;
DROP TABLE t1;
PRAGMA integrity_check;
}
} {ok ok ok}
}
#---------------------------------------------------------------------
# Test cases autovacuum-7.X test the case where a page must be moved
# and the destination location collides with at least one other
# entry in the page hash-table (internal to the pager.c module.
#
do_test autovacuum-7.1 {
db close
forcedelete test.db
forcedelete test.db-journal
sqlite3 db test.db
execsql {
PRAGMA auto_vacuum=1;
CREATE TABLE t1(a, b, PRIMARY KEY(a, b));
INSERT INTO t1 VALUES(randstr(400,400),randstr(400,400));
INSERT INTO t1 SELECT randstr(400,400), randstr(400,400) FROM t1; -- 2
INSERT INTO t1 SELECT randstr(400,400), randstr(400,400) FROM t1; -- 4
INSERT INTO t1 SELECT randstr(400,400), randstr(400,400) FROM t1; -- 8
INSERT INTO t1 SELECT randstr(400,400), randstr(400,400) FROM t1; -- 16
INSERT INTO t1 SELECT randstr(400,400), randstr(400,400) FROM t1; -- 32
}
expr {[file size test.db] / 1024}
} {73}
do_test autovacuum-7.2 {
execsql {
CREATE TABLE t2(a, b, PRIMARY KEY(a, b));
INSERT INTO t2 SELECT randstr(400,400), randstr(400,400) FROM t1; -- 2
CREATE TABLE t3(a, b, PRIMARY KEY(a, b));
INSERT INTO t3 SELECT randstr(400,400), randstr(400,400) FROM t1; -- 2
CREATE TABLE t4(a, b, PRIMARY KEY(a, b));
INSERT INTO t4 SELECT randstr(400,400), randstr(400,400) FROM t1; -- 2
CREATE TABLE t5(a, b, PRIMARY KEY(a, b));
INSERT INTO t5 SELECT randstr(400,400), randstr(400,400) FROM t1; -- 2
}
expr {[file size test.db] / 1024}
} {354}
do_test autovacuum-7.3 {
db close
sqlite3 db test.db
execsql {
BEGIN;
DELETE FROM t4;
COMMIT;
SELECT count(*) FROM t1;
}
expr {[file size test.db] / 1024}
} {286}
#------------------------------------------------------------------------
# Additional tests.
#
# Try to determine the autovacuum setting for a database that is locked.
#
do_test autovacuum-8.1 {
db close
sqlite3 db test.db
sqlite3 db2 test.db
db eval {PRAGMA auto_vacuum}
} {1}
if {[permutation] == ""} {
do_test autovacuum-8.2 {
db eval {BEGIN EXCLUSIVE}
catchsql {PRAGMA auto_vacuum} db2
} {1 {database is locked}}
catch {db2 close}
catch {db eval {COMMIT}}
}
do_test autovacuum-9.1 {
execsql {
DROP TABLE t1;
DROP TABLE t2;
DROP TABLE t3;
DROP TABLE t4;
DROP TABLE t5;
PRAGMA page_count;
}
} {1}
do_test autovacuum-9.2 {
file size test.db
} 1024
do_test autovacuum-9.3 {
execsql {
CREATE TABLE t1(a INTEGER PRIMARY KEY, b);
INSERT INTO t1 VALUES(NULL, randstr(50,50));
}
for {set ii 0} {$ii < 10} {incr ii} {
db eval { INSERT INTO t1 SELECT NULL, randstr(50,50) FROM t1 }
}
file size test.db
} $::sqlite_pending_byte
do_test autovacuum-9.4 {
execsql { INSERT INTO t1 SELECT NULL, randstr(50,50) FROM t1 }
} {}
do_test autovacuum-9.5 {
execsql { DELETE FROM t1 WHERE rowid > (SELECT max(a)/2 FROM t1) }
file size test.db
} $::sqlite_pending_byte
finish_test

View File

@ -0,0 +1,132 @@
# 2001 October 12
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library. The
# focus of this file is testing for correct handling of I/O errors
# such as writes failing because the disk is full.
#
# The tests in this file use special facilities that are only
# available in the SQLite test fixture.
#
# $Id: autovacuum_ioerr2.test,v 1.7 2008/07/12 14:52:20 drh Exp $
set testdir [file dirname $argv0]
source $testdir/tester.tcl
# If this build of the library does not support auto-vacuum, omit this
# whole file.
ifcapable {!autovacuum} {
finish_test
return
}
do_ioerr_test autovacuum-ioerr2-1 -sqlprep {
PRAGMA auto_vacuum = 1;
CREATE TABLE abc(a);
INSERT INTO abc VALUES(randstr(1500,1500));
} -sqlbody {
CREATE TABLE abc2(a);
BEGIN;
DELETE FROM abc;
INSERT INTO abc VALUES(randstr(1500,1500));
CREATE TABLE abc3(a);
COMMIT;
}
do_ioerr_test autovacuum-ioerr2-2 -tclprep {
execsql {
PRAGMA auto_vacuum = 1;
PRAGMA cache_size = 10;
BEGIN;
CREATE TABLE abc(a);
INSERT INTO abc VALUES(randstr(1100,1100)); -- Page 4 is overflow
INSERT INTO abc VALUES(randstr(1100,1100)); -- Page 5 is overflow
}
for {set i 0} {$i<150} {incr i} {
execsql {
INSERT INTO abc VALUES(randstr(100,100));
}
}
execsql COMMIT
} -sqlbody {
BEGIN;
DELETE FROM abc WHERE length(a)>100;
UPDATE abc SET a = randstr(90,90);
CREATE TABLE abc3(a);
COMMIT;
}
do_ioerr_test autovacuum-ioerr2-3 -sqlprep {
PRAGMA auto_vacuum = 1;
CREATE TABLE abc(a);
CREATE TABLE abc2(b);
} -sqlbody {
BEGIN;
INSERT INTO abc2 VALUES(10);
DROP TABLE abc;
COMMIT;
DROP TABLE abc2;
}
forcedelete backup.db
ifcapable subquery {
do_ioerr_test autovacuum-ioerr2-4 -tclprep {
if {![file exists backup.db]} {
sqlite3 dbb backup.db
execsql {
PRAGMA auto_vacuum = 1;
BEGIN;
CREATE TABLE abc(a);
INSERT INTO abc VALUES(randstr(1100,1100)); -- Page 4 is overflow
INSERT INTO abc VALUES(randstr(1100,1100)); -- Page 5 is overflow
} dbb
for {set i 0} {$i<2500} {incr i} {
execsql {
INSERT INTO abc VALUES(randstr(100,100));
} dbb
}
execsql {
COMMIT;
PRAGMA cache_size = 10;
} dbb
dbb close
}
db close
forcedelete test.db
forcedelete test.db-journal
forcecopy backup.db test.db
set ::DB [sqlite3 db test.db]
execsql {
PRAGMA cache_size = 10;
}
} -sqlbody {
BEGIN;
DELETE FROM abc WHERE oid < 3;
UPDATE abc SET a = randstr(100,100) WHERE oid > 2300;
UPDATE abc SET a = randstr(1100,1100) WHERE oid =
(select max(oid) from abc);
COMMIT;
}
}
do_ioerr_test autovacuum-ioerr2-1 -sqlprep {
PRAGMA auto_vacuum = 1;
CREATE TABLE abc(a);
INSERT INTO abc VALUES(randstr(1500,1500));
} -sqlbody {
CREATE TABLE abc2(a);
BEGIN;
DELETE FROM abc;
INSERT INTO abc VALUES(randstr(1500,1500));
CREATE TABLE abc3(a);
COMMIT;
}
finish_test

View File

@ -0,0 +1,926 @@
# 2001 September 15
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library. This
# file is a copy of "trans.test" modified to run under autovacuum mode.
# the point is to stress the autovacuum logic and try to get it to fail.
#
# $Id: avtrans.test,v 1.6 2007/09/12 17:01:45 danielk1977 Exp $
set testdir [file dirname $argv0]
source $testdir/tester.tcl
# Create several tables to work with.
#
do_test avtrans-1.0 {
execsql { PRAGMA auto_vacuum=ON }
wal_set_journal_mode
execsql {
CREATE TABLE one(a int PRIMARY KEY, b text);
INSERT INTO one VALUES(1,'one');
INSERT INTO one VALUES(2,'two');
INSERT INTO one VALUES(3,'three');
SELECT b FROM one ORDER BY a;
}
} {one two three}
do_test avtrans-1.1 {
execsql {
CREATE TABLE two(a int PRIMARY KEY, b text);
INSERT INTO two VALUES(1,'I');
INSERT INTO two VALUES(5,'V');
INSERT INTO two VALUES(10,'X');
SELECT b FROM two ORDER BY a;
}
} {I V X}
do_test avtrans-1.9 {
sqlite3 altdb test.db
execsql {SELECT b FROM one ORDER BY a} altdb
} {one two three}
do_test avtrans-1.10 {
execsql {SELECT b FROM two ORDER BY a} altdb
} {I V X}
integrity_check avtrans-1.11
wal_check_journal_mode avtrans-1.12
# Basic transactions
#
do_test avtrans-2.1 {
set v [catch {execsql {BEGIN}} msg]
lappend v $msg
} {0 {}}
do_test avtrans-2.2 {
set v [catch {execsql {END}} msg]
lappend v $msg
} {0 {}}
do_test avtrans-2.3 {
set v [catch {execsql {BEGIN TRANSACTION}} msg]
lappend v $msg
} {0 {}}
do_test avtrans-2.4 {
set v [catch {execsql {COMMIT TRANSACTION}} msg]
lappend v $msg
} {0 {}}
do_test avtrans-2.5 {
set v [catch {execsql {BEGIN TRANSACTION 'foo'}} msg]
lappend v $msg
} {0 {}}
do_test avtrans-2.6 {
set v [catch {execsql {ROLLBACK TRANSACTION 'foo'}} msg]
lappend v $msg
} {0 {}}
do_test avtrans-2.10 {
execsql {
BEGIN;
SELECT a FROM one ORDER BY a;
SELECT a FROM two ORDER BY a;
END;
}
} {1 2 3 1 5 10}
integrity_check avtrans-2.11
wal_check_journal_mode avtrans-2.12
# Check the locking behavior
#
sqlite3_soft_heap_limit 0
do_test avtrans-3.1 {
execsql {
BEGIN;
UPDATE one SET a = 0 WHERE 0;
SELECT a FROM one ORDER BY a;
}
} {1 2 3}
do_test avtrans-3.2 {
catchsql {
SELECT a FROM two ORDER BY a;
} altdb
} {0 {1 5 10}}
do_test avtrans-3.3 {
catchsql {
SELECT a FROM one ORDER BY a;
} altdb
} {0 {1 2 3}}
do_test avtrans-3.4 {
catchsql {
INSERT INTO one VALUES(4,'four');
}
} {0 {}}
do_test avtrans-3.5 {
catchsql {
SELECT a FROM two ORDER BY a;
} altdb
} {0 {1 5 10}}
do_test avtrans-3.6 {
catchsql {
SELECT a FROM one ORDER BY a;
} altdb
} {0 {1 2 3}}
do_test avtrans-3.7 {
catchsql {
INSERT INTO two VALUES(4,'IV');
}
} {0 {}}
do_test avtrans-3.8 {
catchsql {
SELECT a FROM two ORDER BY a;
} altdb
} {0 {1 5 10}}
do_test avtrans-3.9 {
catchsql {
SELECT a FROM one ORDER BY a;
} altdb
} {0 {1 2 3}}
do_test avtrans-3.10 {
execsql {END TRANSACTION}
} {}
do_test avtrans-3.11 {
set v [catch {execsql {
SELECT a FROM two ORDER BY a;
} altdb} msg]
lappend v $msg
} {0 {1 4 5 10}}
do_test avtrans-3.12 {
set v [catch {execsql {
SELECT a FROM one ORDER BY a;
} altdb} msg]
lappend v $msg
} {0 {1 2 3 4}}
do_test avtrans-3.13 {
set v [catch {execsql {
SELECT a FROM two ORDER BY a;
} db} msg]
lappend v $msg
} {0 {1 4 5 10}}
do_test avtrans-3.14 {
set v [catch {execsql {
SELECT a FROM one ORDER BY a;
} db} msg]
lappend v $msg
} {0 {1 2 3 4}}
sqlite3_soft_heap_limit $cmdlinearg(soft-heap-limit)
integrity_check avtrans-3.15
do_test avtrans-4.1 {
set v [catch {execsql {
COMMIT;
} db} msg]
lappend v $msg
} {1 {cannot commit - no transaction is active}}
do_test avtrans-4.2 {
set v [catch {execsql {
ROLLBACK;
} db} msg]
lappend v $msg
} {1 {cannot rollback - no transaction is active}}
do_test avtrans-4.3 {
catchsql {
BEGIN TRANSACTION;
UPDATE two SET a = 0 WHERE 0;
SELECT a FROM two ORDER BY a;
} db
} {0 {1 4 5 10}}
do_test avtrans-4.4 {
catchsql {
SELECT a FROM two ORDER BY a;
} altdb
} {0 {1 4 5 10}}
do_test avtrans-4.5 {
catchsql {
SELECT a FROM one ORDER BY a;
} altdb
} {0 {1 2 3 4}}
do_test avtrans-4.6 {
catchsql {
BEGIN TRANSACTION;
SELECT a FROM one ORDER BY a;
} db
} {1 {cannot start a transaction within a transaction}}
do_test avtrans-4.7 {
catchsql {
SELECT a FROM two ORDER BY a;
} altdb
} {0 {1 4 5 10}}
do_test avtrans-4.8 {
catchsql {
SELECT a FROM one ORDER BY a;
} altdb
} {0 {1 2 3 4}}
do_test avtrans-4.9 {
set v [catch {execsql {
END TRANSACTION;
SELECT a FROM two ORDER BY a;
} db} msg]
lappend v $msg
} {0 {1 4 5 10}}
do_test avtrans-4.10 {
set v [catch {execsql {
SELECT a FROM two ORDER BY a;
} altdb} msg]
lappend v $msg
} {0 {1 4 5 10}}
do_test avtrans-4.11 {
set v [catch {execsql {
SELECT a FROM one ORDER BY a;
} altdb} msg]
lappend v $msg
} {0 {1 2 3 4}}
integrity_check avtrans-4.12
do_test avtrans-4.98 {
altdb close
execsql {
DROP TABLE one;
DROP TABLE two;
}
} {}
integrity_check avtrans-4.99
# Check out the commit/rollback behavior of the database
#
do_test avtrans-5.1 {
execsql {SELECT name FROM sqlite_master WHERE type='table' ORDER BY name}
} {}
do_test avtrans-5.2 {
execsql {BEGIN TRANSACTION}
execsql {SELECT name FROM sqlite_master WHERE type='table' ORDER BY name}
} {}
do_test avtrans-5.3 {
execsql {CREATE TABLE one(a text, b int)}
execsql {SELECT name FROM sqlite_master WHERE type='table' ORDER BY name}
} {one}
do_test avtrans-5.4 {
execsql {SELECT a,b FROM one ORDER BY b}
} {}
do_test avtrans-5.5 {
execsql {INSERT INTO one(a,b) VALUES('hello', 1)}
execsql {SELECT a,b FROM one ORDER BY b}
} {hello 1}
do_test avtrans-5.6 {
execsql {ROLLBACK}
execsql {SELECT name FROM sqlite_master WHERE type='table' ORDER BY name}
} {}
do_test avtrans-5.7 {
set v [catch {
execsql {SELECT a,b FROM one ORDER BY b}
} msg]
lappend v $msg
} {1 {no such table: one}}
# Test commits and rollbacks of table CREATE TABLEs, CREATE INDEXs
# DROP TABLEs and DROP INDEXs
#
do_test avtrans-5.8 {
execsql {
SELECT name fROM sqlite_master
WHERE type='table' OR type='index'
ORDER BY name
}
} {}
do_test avtrans-5.9 {
execsql {
BEGIN TRANSACTION;
CREATE TABLE t1(a int, b int, c int);
SELECT name fROM sqlite_master
WHERE type='table' OR type='index'
ORDER BY name;
}
} {t1}
do_test avtrans-5.10 {
execsql {
CREATE INDEX i1 ON t1(a);
SELECT name fROM sqlite_master
WHERE type='table' OR type='index'
ORDER BY name;
}
} {i1 t1}
do_test avtrans-5.11 {
execsql {
COMMIT;
SELECT name fROM sqlite_master
WHERE type='table' OR type='index'
ORDER BY name;
}
} {i1 t1}
do_test avtrans-5.12 {
execsql {
BEGIN TRANSACTION;
CREATE TABLE t2(a int, b int, c int);
CREATE INDEX i2a ON t2(a);
CREATE INDEX i2b ON t2(b);
DROP TABLE t1;
SELECT name fROM sqlite_master
WHERE type='table' OR type='index'
ORDER BY name;
}
} {i2a i2b t2}
do_test avtrans-5.13 {
execsql {
ROLLBACK;
SELECT name fROM sqlite_master
WHERE type='table' OR type='index'
ORDER BY name;
}
} {i1 t1}
do_test avtrans-5.14 {
execsql {
BEGIN TRANSACTION;
DROP INDEX i1;
SELECT name fROM sqlite_master
WHERE type='table' OR type='index'
ORDER BY name;
}
} {t1}
do_test avtrans-5.15 {
execsql {
ROLLBACK;
SELECT name fROM sqlite_master
WHERE type='table' OR type='index'
ORDER BY name;
}
} {i1 t1}
do_test avtrans-5.16 {
execsql {
BEGIN TRANSACTION;
DROP INDEX i1;
CREATE TABLE t2(x int, y int, z int);
CREATE INDEX i2x ON t2(x);
CREATE INDEX i2y ON t2(y);
INSERT INTO t2 VALUES(1,2,3);
SELECT name fROM sqlite_master
WHERE type='table' OR type='index'
ORDER BY name;
}
} {i2x i2y t1 t2}
do_test avtrans-5.17 {
execsql {
COMMIT;
SELECT name fROM sqlite_master
WHERE type='table' OR type='index'
ORDER BY name;
}
} {i2x i2y t1 t2}
do_test avtrans-5.18 {
execsql {
SELECT * FROM t2;
}
} {1 2 3}
do_test avtrans-5.19 {
execsql {
SELECT x FROM t2 WHERE y=2;
}
} {1}
do_test avtrans-5.20 {
execsql {
BEGIN TRANSACTION;
DROP TABLE t1;
DROP TABLE t2;
SELECT name fROM sqlite_master
WHERE type='table' OR type='index'
ORDER BY name;
}
} {}
do_test avtrans-5.21 {
set r [catch {execsql {
SELECT * FROM t2
}} msg]
lappend r $msg
} {1 {no such table: t2}}
do_test avtrans-5.22 {
execsql {
ROLLBACK;
SELECT name fROM sqlite_master
WHERE type='table' OR type='index'
ORDER BY name;
}
} {i2x i2y t1 t2}
do_test avtrans-5.23 {
execsql {
SELECT * FROM t2;
}
} {1 2 3}
integrity_check avtrans-5.23
# Try to DROP and CREATE tables and indices with the same name
# within a transaction. Make sure ROLLBACK works.
#
do_test avtrans-6.1 {
execsql2 {
INSERT INTO t1 VALUES(1,2,3);
BEGIN TRANSACTION;
DROP TABLE t1;
CREATE TABLE t1(p,q,r);
ROLLBACK;
SELECT * FROM t1;
}
} {a 1 b 2 c 3}
do_test avtrans-6.2 {
execsql2 {
INSERT INTO t1 VALUES(1,2,3);
BEGIN TRANSACTION;
DROP TABLE t1;
CREATE TABLE t1(p,q,r);
COMMIT;
SELECT * FROM t1;
}
} {}
do_test avtrans-6.3 {
execsql2 {
INSERT INTO t1 VALUES(1,2,3);
SELECT * FROM t1;
}
} {p 1 q 2 r 3}
do_test avtrans-6.4 {
execsql2 {
BEGIN TRANSACTION;
DROP TABLE t1;
CREATE TABLE t1(a,b,c);
INSERT INTO t1 VALUES(4,5,6);
SELECT * FROM t1;
DROP TABLE t1;
}
} {a 4 b 5 c 6}
do_test avtrans-6.5 {
execsql2 {
ROLLBACK;
SELECT * FROM t1;
}
} {p 1 q 2 r 3}
do_test avtrans-6.6 {
execsql2 {
BEGIN TRANSACTION;
DROP TABLE t1;
CREATE TABLE t1(a,b,c);
INSERT INTO t1 VALUES(4,5,6);
SELECT * FROM t1;
DROP TABLE t1;
}
} {a 4 b 5 c 6}
do_test avtrans-6.7 {
catchsql {
COMMIT;
SELECT * FROM t1;
}
} {1 {no such table: t1}}
# Repeat on a table with an automatically generated index.
#
do_test avtrans-6.10 {
execsql2 {
CREATE TABLE t1(a unique,b,c);
INSERT INTO t1 VALUES(1,2,3);
BEGIN TRANSACTION;
DROP TABLE t1;
CREATE TABLE t1(p unique,q,r);
ROLLBACK;
SELECT * FROM t1;
}
} {a 1 b 2 c 3}
do_test avtrans-6.11 {
execsql2 {
BEGIN TRANSACTION;
DROP TABLE t1;
CREATE TABLE t1(p unique,q,r);
COMMIT;
SELECT * FROM t1;
}
} {}
do_test avtrans-6.12 {
execsql2 {
INSERT INTO t1 VALUES(1,2,3);
SELECT * FROM t1;
}
} {p 1 q 2 r 3}
do_test avtrans-6.13 {
execsql2 {
BEGIN TRANSACTION;
DROP TABLE t1;
CREATE TABLE t1(a unique,b,c);
INSERT INTO t1 VALUES(4,5,6);
SELECT * FROM t1;
DROP TABLE t1;
}
} {a 4 b 5 c 6}
do_test avtrans-6.14 {
execsql2 {
ROLLBACK;
SELECT * FROM t1;
}
} {p 1 q 2 r 3}
do_test avtrans-6.15 {
execsql2 {
BEGIN TRANSACTION;
DROP TABLE t1;
CREATE TABLE t1(a unique,b,c);
INSERT INTO t1 VALUES(4,5,6);
SELECT * FROM t1;
DROP TABLE t1;
}
} {a 4 b 5 c 6}
do_test avtrans-6.16 {
catchsql {
COMMIT;
SELECT * FROM t1;
}
} {1 {no such table: t1}}
do_test avtrans-6.20 {
execsql {
CREATE TABLE t1(a integer primary key,b,c);
INSERT INTO t1 VALUES(1,-2,-3);
INSERT INTO t1 VALUES(4,-5,-6);
SELECT * FROM t1;
}
} {1 -2 -3 4 -5 -6}
do_test avtrans-6.21 {
execsql {
CREATE INDEX i1 ON t1(b);
SELECT * FROM t1 WHERE b<1;
}
} {4 -5 -6 1 -2 -3}
do_test avtrans-6.22 {
execsql {
BEGIN TRANSACTION;
DROP INDEX i1;
SELECT * FROM t1 WHERE b<1;
ROLLBACK;
}
} {1 -2 -3 4 -5 -6}
do_test avtrans-6.23 {
execsql {
SELECT * FROM t1 WHERE b<1;
}
} {4 -5 -6 1 -2 -3}
do_test avtrans-6.24 {
execsql {
BEGIN TRANSACTION;
DROP TABLE t1;
ROLLBACK;
SELECT * FROM t1 WHERE b<1;
}
} {4 -5 -6 1 -2 -3}
do_test avtrans-6.25 {
execsql {
BEGIN TRANSACTION;
DROP INDEX i1;
CREATE INDEX i1 ON t1(c);
SELECT * FROM t1 WHERE b<1;
}
} {1 -2 -3 4 -5 -6}
do_test avtrans-6.26 {
execsql {
SELECT * FROM t1 WHERE c<1;
}
} {4 -5 -6 1 -2 -3}
do_test avtrans-6.27 {
execsql {
ROLLBACK;
SELECT * FROM t1 WHERE b<1;
}
} {4 -5 -6 1 -2 -3}
do_test avtrans-6.28 {
execsql {
SELECT * FROM t1 WHERE c<1;
}
} {1 -2 -3 4 -5 -6}
# The following repeats steps 6.20 through 6.28, but puts a "unique"
# constraint the first field of the table in order to generate an
# automatic index.
#
do_test avtrans-6.30 {
execsql {
BEGIN TRANSACTION;
DROP TABLE t1;
CREATE TABLE t1(a int unique,b,c);
COMMIT;
INSERT INTO t1 VALUES(1,-2,-3);
INSERT INTO t1 VALUES(4,-5,-6);
SELECT * FROM t1 ORDER BY a;
}
} {1 -2 -3 4 -5 -6}
do_test avtrans-6.31 {
execsql {
CREATE INDEX i1 ON t1(b);
SELECT * FROM t1 WHERE b<1;
}
} {4 -5 -6 1 -2 -3}
do_test avtrans-6.32 {
execsql {
BEGIN TRANSACTION;
DROP INDEX i1;
SELECT * FROM t1 WHERE b<1;
ROLLBACK;
}
} {1 -2 -3 4 -5 -6}
do_test avtrans-6.33 {
execsql {
SELECT * FROM t1 WHERE b<1;
}
} {4 -5 -6 1 -2 -3}
do_test avtrans-6.34 {
execsql {
BEGIN TRANSACTION;
DROP TABLE t1;
ROLLBACK;
SELECT * FROM t1 WHERE b<1;
}
} {4 -5 -6 1 -2 -3}
do_test avtrans-6.35 {
execsql {
BEGIN TRANSACTION;
DROP INDEX i1;
CREATE INDEX i1 ON t1(c);
SELECT * FROM t1 WHERE b<1;
}
} {1 -2 -3 4 -5 -6}
do_test avtrans-6.36 {
execsql {
SELECT * FROM t1 WHERE c<1;
}
} {4 -5 -6 1 -2 -3}
do_test avtrans-6.37 {
execsql {
DROP INDEX i1;
SELECT * FROM t1 WHERE c<1;
}
} {1 -2 -3 4 -5 -6}
do_test avtrans-6.38 {
execsql {
ROLLBACK;
SELECT * FROM t1 WHERE b<1;
}
} {4 -5 -6 1 -2 -3}
do_test avtrans-6.39 {
execsql {
SELECT * FROM t1 WHERE c<1;
}
} {1 -2 -3 4 -5 -6}
integrity_check avtrans-6.40
ifcapable !floatingpoint {
finish_test
return
}
# Test to make sure rollback restores the database back to its original
# state.
#
do_test avtrans-7.1 {
execsql {BEGIN}
for {set i 0} {$i<1000} {incr i} {
set r1 [expr {rand()}]
set r2 [expr {rand()}]
set r3 [expr {rand()}]
execsql "INSERT INTO t2 VALUES($r1,$r2,$r3)"
}
execsql {COMMIT}
set ::checksum [execsql {SELECT md5sum(x,y,z) FROM t2}]
set ::checksum2 [
execsql {SELECT md5sum(type,name,tbl_name,rootpage,sql) FROM sqlite_master}
]
execsql {SELECT count(*) FROM t2}
} {1001}
do_test avtrans-7.2 {
execsql {SELECT md5sum(x,y,z) FROM t2}
} $checksum
do_test avtrans-7.2.1 {
execsql {SELECT md5sum(type,name,tbl_name,rootpage,sql) FROM sqlite_master}
} $checksum2
do_test avtrans-7.3 {
execsql {
BEGIN;
DELETE FROM t2;
ROLLBACK;
SELECT md5sum(x,y,z) FROM t2;
}
} $checksum
do_test avtrans-7.4 {
execsql {
BEGIN;
INSERT INTO t2 SELECT * FROM t2;
ROLLBACK;
SELECT md5sum(x,y,z) FROM t2;
}
} $checksum
do_test avtrans-7.5 {
execsql {
BEGIN;
DELETE FROM t2;
ROLLBACK;
SELECT md5sum(x,y,z) FROM t2;
}
} $checksum
do_test avtrans-7.6 {
execsql {
BEGIN;
INSERT INTO t2 SELECT * FROM t2;
ROLLBACK;
SELECT md5sum(x,y,z) FROM t2;
}
} $checksum
do_test avtrans-7.7 {
execsql {
BEGIN;
CREATE TABLE t3 AS SELECT * FROM t2;
INSERT INTO t2 SELECT * FROM t3;
ROLLBACK;
SELECT md5sum(x,y,z) FROM t2;
}
} $checksum
do_test avtrans-7.8 {
execsql {SELECT md5sum(type,name,tbl_name,rootpage,sql) FROM sqlite_master}
} $checksum2
ifcapable tempdb {
do_test avtrans-7.9 {
execsql {
BEGIN;
CREATE TEMP TABLE t3 AS SELECT * FROM t2;
INSERT INTO t2 SELECT * FROM t3;
ROLLBACK;
SELECT md5sum(x,y,z) FROM t2;
}
} $checksum
}
do_test avtrans-7.10 {
execsql {SELECT md5sum(type,name,tbl_name,rootpage,sql) FROM sqlite_master}
} $checksum2
ifcapable tempdb {
do_test avtrans-7.11 {
execsql {
BEGIN;
CREATE TEMP TABLE t3 AS SELECT * FROM t2;
INSERT INTO t2 SELECT * FROM t3;
DROP INDEX i2x;
DROP INDEX i2y;
CREATE INDEX i3a ON t3(x);
ROLLBACK;
SELECT md5sum(x,y,z) FROM t2;
}
} $checksum
}
do_test avtrans-7.12 {
execsql {SELECT md5sum(type,name,tbl_name,rootpage,sql) FROM sqlite_master}
} $checksum2
ifcapable tempdb {
do_test avtrans-7.13 {
execsql {
BEGIN;
DROP TABLE t2;
ROLLBACK;
SELECT md5sum(x,y,z) FROM t2;
}
} $checksum
}
do_test avtrans-7.14 {
execsql {SELECT md5sum(type,name,tbl_name,rootpage,sql) FROM sqlite_master}
} $checksum2
integrity_check avtrans-7.15
# Arrange for another process to begin modifying the database but abort
# and die in the middle of the modification. Then have this process read
# the database. This process should detect the journal file and roll it
# back. Verify that this happens correctly.
#
set fd [open test.tcl w]
puts $fd {
sqlite3 db test.db
db eval {
PRAGMA default_cache_size=20;
BEGIN;
CREATE TABLE t3 AS SELECT * FROM t2;
DELETE FROM t2;
}
sqlite_abort
}
close $fd
do_test avtrans-8.1 {
catch {exec [info nameofexec] test.tcl}
execsql {SELECT md5sum(x,y,z) FROM t2}
} $checksum
do_test avtrans-8.2 {
execsql {SELECT md5sum(type,name,tbl_name,rootpage,sql) FROM sqlite_master}
} $checksum2
integrity_check avtrans-8.3
# In the following sequence of tests, compute the MD5 sum of the content
# of a table, make lots of modifications to that table, then do a rollback.
# Verify that after the rollback, the MD5 checksum is unchanged.
#
do_test avtrans-9.1 {
execsql {
PRAGMA default_cache_size=10;
}
db close
sqlite3 db test.db
execsql {
BEGIN;
CREATE TABLE t3(x TEXT);
INSERT INTO t3 VALUES(randstr(10,400));
INSERT INTO t3 VALUES(randstr(10,400));
INSERT INTO t3 SELECT randstr(10,400) FROM t3;
INSERT INTO t3 SELECT randstr(10,400) FROM t3;
INSERT INTO t3 SELECT randstr(10,400) FROM t3;
INSERT INTO t3 SELECT randstr(10,400) FROM t3;
INSERT INTO t3 SELECT randstr(10,400) FROM t3;
INSERT INTO t3 SELECT randstr(10,400) FROM t3;
INSERT INTO t3 SELECT randstr(10,400) FROM t3;
INSERT INTO t3 SELECT randstr(10,400) FROM t3;
INSERT INTO t3 SELECT randstr(10,400) FROM t3;
COMMIT;
SELECT count(*) FROM t3;
}
} {1024}
# The following procedure computes a "signature" for table "t3". If
# T3 changes in any way, the signature should change.
#
# This is used to test ROLLBACK. We gather a signature for t3, then
# make lots of changes to t3, then rollback and take another signature.
# The two signatures should be the same.
#
proc signature {} {
return [db eval {SELECT count(*), md5sum(x) FROM t3}]
}
# Repeat the following group of tests 20 times for quick testing and
# 40 times for full testing. Each iteration of the test makes table
# t3 a little larger, and thus takes a little longer, so doing 40 tests
# is more than 2.0 times slower than doing 20 tests. Considerably more.
#
if {[info exists G(isquick)]} {
set limit 20
} else {
set limit 40
}
# Do rollbacks. Make sure the signature does not change.
#
for {set i 2} {$i<=$limit} {incr i} {
set ::sig [signature]
set cnt [lindex $::sig 0]
if {$i%2==0} {
execsql {PRAGMA fullfsync=ON}
} else {
execsql {PRAGMA fullfsync=OFF}
}
set sqlite_sync_count 0
set sqlite_fullsync_count 0
do_test avtrans-9.$i.1-$cnt {
execsql {
BEGIN;
DELETE FROM t3 WHERE random()%10!=0;
INSERT INTO t3 SELECT randstr(10,10)||x FROM t3;
INSERT INTO t3 SELECT randstr(10,10)||x FROM t3;
ROLLBACK;
}
signature
} $sig
do_test avtrans-9.$i.2-$cnt {
execsql {
BEGIN;
DELETE FROM t3 WHERE random()%10!=0;
INSERT INTO t3 SELECT randstr(10,10)||x FROM t3;
DELETE FROM t3 WHERE random()%10!=0;
INSERT INTO t3 SELECT randstr(10,10)||x FROM t3;
ROLLBACK;
}
signature
} $sig
if {$i<$limit} {
do_test avtrans-9.$i.3-$cnt {
execsql {
INSERT INTO t3 SELECT randstr(10,400) FROM t3 WHERE random()%10==0;
}
} {}
if {$tcl_platform(platform)=="unix"} {
do_test avtrans-9.$i.4-$cnt {
expr {$sqlite_sync_count>0}
} 1
ifcapable pager_pragmas {
do_test avtrans-9.$i.5-$cnt {
expr {$sqlite_fullsync_count>0}
} [expr {$i%2==0}]
} else {
do_test avtrans-9.$i.5-$cnt {
expr {$sqlite_fullsync_count==0}
} {1}
}
}
wal_check_journal_mode avtrans-9.$i-6.$cnt
}
set ::pager_old_format 0
}
integrity_check avtrans-10.1
wal_check_journal_mode avtrans-10.2
finish_test

View File

@ -0,0 +1,517 @@
# 2010 August 19
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library. The
# focus of this file is testing that the current version of SQLite
# is capable of reading and writing databases created by previous
# versions, and vice-versa.
#
# To use this test, old versions of the testfixture process should be
# copied into the working directory alongside the new version. The old
# versions should be named "testfixtureXXX" (or testfixtureXXX.exe on
# windows), where XXX can be any string.
#
# This test file uses the tcl code for controlling a second testfixture
# process located in lock_common.tcl. See the commments in lock_common.tcl
# for documentation of the available commands.
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
source $testdir/lock_common.tcl
source $testdir/malloc_common.tcl
source $testdir/bc_common.tcl
db close
if {"" == [bc_find_binaries backcompat.test]} {
finish_test
return
}
proc do_backcompat_test {rv bin1 bin2 script} {
forcedelete test.db
if {$bin1 != ""} { set ::bc_chan1 [launch_testfixture $bin1] }
set ::bc_chan2 [launch_testfixture $bin2]
if { $rv } {
proc code2 {tcl} { uplevel #0 $tcl }
if {$bin1 != ""} { proc code2 {tcl} { testfixture $::bc_chan1 $tcl } }
proc code1 {tcl} { testfixture $::bc_chan2 $tcl }
} else {
proc code1 {tcl} { uplevel #0 $tcl }
if {$bin1 != ""} { proc code1 {tcl} { testfixture $::bc_chan1 $tcl } }
proc code2 {tcl} { testfixture $::bc_chan2 $tcl }
}
proc sql1 sql { code1 [list db eval $sql] }
proc sql2 sql { code2 [list db eval $sql] }
code1 { sqlite3 db test.db }
code2 { sqlite3 db test.db }
foreach c {code1 code2} {
$c {
set v [split [db version] .]
if {[llength $v]==3} {lappend v 0}
set ::sqlite_libversion [format \
"%d%.2d%.2d%2d" [lindex $v 0] [lindex $v 1] [lindex $v 2] [lindex $v 3]
]
}
}
uplevel $script
catch { code1 { db close } }
catch { code2 { db close } }
catch { close $::bc_chan2 }
catch { close $::bc_chan1 }
}
array set ::incompatible [list]
proc do_allbackcompat_test {script} {
foreach bin $::BC(binaries) {
set nErr [set_test_counter errors]
foreach dir {0 1} {
set bintag [string map {testfixture {}} $bin]
set bintag [string map {\.exe {}} $bintag]
if {$bintag == ""} {set bintag self}
set ::bcname ".$bintag.$dir."
rename do_test _do_test
proc do_test {nm sql res} {
set nm [regsub {\.} $nm $::bcname]
uplevel [list _do_test $nm $sql $res]
}
do_backcompat_test $dir {} $bin $script
rename do_test {}
rename _do_test do_test
}
if { $nErr < [set_test_counter errors] } {
set ::incompatible([get_version $bin]) 1
}
}
}
proc read_file {zFile} {
set zData {}
if {[file exists $zFile]} {
set fd [open $zFile]
fconfigure $fd -translation binary -encoding binary
if {[file size $zFile]<=$::sqlite_pending_byte || $zFile != "test.db"} {
set zData [read $fd]
} else {
set zData [read $fd $::sqlite_pending_byte]
append zData [string repeat x 512]
seek $fd [expr $::sqlite_pending_byte+512] start
append zData [read $fd]
}
close $fd
}
return $zData
}
proc write_file {zFile zData} {
set fd [open $zFile w]
fconfigure $fd -translation binary -encoding binary
puts -nonewline $fd $zData
close $fd
}
proc read_file_system {} {
set ret [list]
foreach f {test.db test.db-journal test.db-wal} { lappend ret [read_file $f] }
set ret
}
proc write_file_system {data} {
foreach f {test.db test.db-journal test.db-wal} d $data {
if {[string length $d] == 0} {
forcedelete $f
} else {
write_file $f $d
}
}
}
#-------------------------------------------------------------------------
# Actual tests begin here.
#
# This first block of tests checks to see that the same database and
# journal files can be used by old and new versions. WAL and wal-index
# files are tested separately below.
#
do_allbackcompat_test {
# Test that database files are backwards compatible.
#
do_test backcompat-1.1.1 { sql1 {
CREATE TABLE t1(a PRIMARY KEY, b UNIQUE);
INSERT INTO t1 VALUES('abc', 'def');
} } {}
do_test backcompat-1.1.2 { sql2 { SELECT * FROM t1; } } {abc def}
do_test backcompat-1.1.3 { sql2 { INSERT INTO t1 VALUES('ghi', 'jkl'); } } {}
do_test backcompat-1.1.4 { sql1 { SELECT * FROM t1; } } {abc def ghi jkl}
do_test backcompat-1.1.5 { sql1 { PRAGMA integrity_check } } {ok}
do_test backcompat-1.1.6 { sql2 { PRAGMA integrity_check } } {ok}
# Test that one version can roll back a hot-journal file left in the
# file-system by the other version.
#
# Each test case is named "backcompat-1.X...", where X is either 0 or
# 1. If it is 0, then the current version creates a journal file that
# the old versions try to read. Otherwise, if X is 1, then the old version
# creates the journal file and we try to read it with the current version.
#
do_test backcompat-1.2.1 { sql1 {
PRAGMA cache_size = 10;
BEGIN;
INSERT INTO t1 VALUES(randomblob(400), randomblob(400));
INSERT INTO t1 SELECT randomblob(400), randomblob(400) FROM t1;
INSERT INTO t1 SELECT randomblob(400), randomblob(400) FROM t1;
INSERT INTO t1 SELECT randomblob(400), randomblob(400) FROM t1;
INSERT INTO t1 SELECT randomblob(400), randomblob(400) FROM t1;
COMMIT;
} } {}
set cksum1 [sql1 {SELECT md5sum(a), md5sum(b) FROM t1}]
set cksum2 [sql2 {SELECT md5sum(a), md5sum(b) FROM t1}]
do_test backcompat-1.2.2 [list string compare $cksum1 $cksum2] 0
do_test backcompat-1.2.3 { sql1 {
BEGIN;
UPDATE t1 SET a = randomblob(500);
} } {}
set data [read_file_system]
do_test backcompat-1.2.4 { sql1 { COMMIT } } {}
set same [expr {[sql2 {SELECT md5sum(a), md5sum(b) FROM t1}] == $cksum2}]
do_test backcompat-1.2.5 [list set {} $same] 0
code1 { db close }
code2 { db close }
write_file_system $data
code1 { sqlite3 db test.db }
code2 { sqlite3 db test.db }
set same [expr {[sql2 {SELECT md5sum(a), md5sum(b) FROM t1}] == $cksum2}]
do_test backcompat-1.2.6 [list set {} $same] 1
do_test backcompat-1.2.7 { sql1 { PRAGMA integrity_check } } {ok}
do_test backcompat-1.2.8 { sql2 { PRAGMA integrity_check } } {ok}
do_test backcompat-2.1 {
sql1 {
CREATE TABLE t2(a UNIQUE, b PRIMARY KEY, c UNIQUE);
INSERT INTO t2 VALUES(1,9,5);
INSERT INTO t2 VALUES(5,5,1);
INSERT INTO t2 VALUES(9,1,9);
SELECT * FROM t2 ORDER BY a;
}
} {1 9 5 5 5 1 9 1 9}
do_test backcompat-2.2 {
sql2 {
SELECT * FROM sqlite_master WHERE rootpage=-1;
SELECT * FROM t2 ORDER BY a;
}
} {1 9 5 5 5 1 9 1 9}
do_test backcompat-2.3 {
sql1 {
SELECT * FROM t2 ORDER BY b;
}
} {9 1 9 5 5 1 1 9 5}
do_test backcompat-2.4 {
sql2 {
SELECT * FROM t2 ORDER BY b;
}
} {9 1 9 5 5 1 1 9 5}
do_test backcompat-2.5 {
sql1 {
SELECT * FROM t2 ORDER BY c;
}
} {5 5 1 1 9 5 9 1 9}
do_test backcompat-2.6 {
sql2 {
SELECT * FROM t2 ORDER BY c;
}
} {5 5 1 1 9 5 9 1 9}
}
foreach k [lsort [array names ::incompatible]] {
puts "ERROR: Detected journal incompatibility with version $k"
}
unset ::incompatible
#-------------------------------------------------------------------------
# Test that WAL and wal-index files may be shared between different
# SQLite versions.
#
do_allbackcompat_test {
if {[code1 {sqlite3 -version}] >= "3.7.0"
&& [code1 {set ::sqlite_options(wal)}]
&& [code2 {sqlite3 -version}] >= "3.7.0"
&& [code2 {set ::sqlite_options(wal)}]
} {
do_test backcompat-2.1.1 { sql1 {
PRAGMA journal_mode = WAL;
CREATE TABLE t1(a PRIMARY KEY, b UNIQUE);
INSERT INTO t1 VALUES('I', 1);
INSERT INTO t1 VALUES('II', 2);
INSERT INTO t1 VALUES('III', 3);
SELECT * FROM t1;
} } {wal I 1 II 2 III 3}
do_test backcompat-2.1.2 { sql2 {
SELECT * FROM t1;
} } {I 1 II 2 III 3}
set data [read_file_system]
code1 {db close}
code2 {db close}
write_file_system $data
code1 {sqlite3 db test.db}
code2 {sqlite3 db test.db}
# The WAL file now in the file-system was created by the [code1]
# process. Check that the [code2] process can recover the log.
#
do_test backcompat-2.1.3 { sql2 {
SELECT * FROM t1;
} } {I 1 II 2 III 3}
do_test backcompat-2.1.4 { sql1 {
SELECT * FROM t1;
} } {I 1 II 2 III 3}
}
}
#-------------------------------------------------------------------------
# Test that FTS3 tables may be read/written by different versions of
# SQLite.
#
ifcapable fts3 {
set contents {
CREATE VIRTUAL TABLE t1 USING fts3(a, b);
}
foreach {num doc} {
one "jk zm jk eczkjblu urvysbnykk sk gnl jk ttvgf hmjf"
two "jk bnhc jjrxpjkb mjpavjuhw fibokdry igju jk zm zm xh"
three "wxe ogttbykvt uhzq xr iaf zf urvysbnykk aayxpmve oacaxgjoo mjpavjuhw"
four "gazrt jk ephknonq myjp uenvbm wuvajhwqz jk zm xnxhf nvfasfh"
five "zm aayxpmve csjqxhgj xnxhf xr jk aayxpmve xnxhf zm zm"
six "sokcyf zm ogyavjvv jk zm fibokdry zm jk igju igju"
seven "vgsld bvgimjik xuprtlyle jk akmikrqyt jk aayxpmve hkfoudzftq ddjj"
eight "zm uhzq ovkyevlgv zk uenvbm csjqxhgj jk vgsld pgybs jk"
nine "zm agmckuiu zexh fibokdry jk uhzq bu tugflixoex xnxhf sk"
} {
append contents "INSERT INTO t1 VALUES('$num', '$doc');"
}
do_allbackcompat_test {
if {[code1 {set ::sqlite_options(fts3)}]
&& [code2 {set ::sqlite_options(fts3)}]
} {
do_test backcompat-3.1 { sql1 $contents } {}
foreach {n q} {
1 "SELECT * FROM t1 ORDER BY a, b"
2 "SELECT rowid FROM t1 WHERE a MATCH 'five'"
3 "SELECT * FROM t1 WHERE a MATCH 'five'"
4 "SELECT offsets(t1) FROM t1 WHERE t1 MATCH 'jk'"
5 "SELECT offsets(t1) FROM t1 WHERE t1 MATCH 'tug* OR eight'"
} {
do_test backcompat-3.2 [list sql1 $q] [sql2 $q]
}
do_test backcompat-3.3 { sql1 {
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
} } {}
foreach {n q} {
1 "SELECT * FROM t1 ORDER BY a, b"
2 "SELECT rowid FROM t1 WHERE a MATCH 'five'"
3 "SELECT * FROM t1 WHERE a MATCH 'five'"
4 "SELECT offsets(t1) FROM t1 WHERE t1 MATCH 'jk'"
5 "SELECT offsets(t1) FROM t1 WHERE t1 MATCH 'tug* OR eight'"
} {
do_test backcompat-3.4 [list sql1 $q] [sql2 $q]
}
set alphabet "a b c d e f g h i j k l m n o p q r s t u v w x y z 1 2 3 4"
for {set i 0} {$i < 900} {incr i} {
set term "[lindex $alphabet [expr $i/30]][lindex $alphabet [expr $i%30]] "
sql1 "INSERT INTO t1 VALUES($i, '[string repeat $term 14]')"
}
foreach {n q} {
1 "SELECT * FROM t1 ORDER BY a, b"
2 "SELECT rowid FROM t1 WHERE a MATCH 'five'"
3 "SELECT * FROM t1 WHERE a MATCH 'five'"
4 "SELECT offsets(t1) FROM t1 WHERE t1 MATCH 'jk'"
5 "SELECT offsets(t1) FROM t1 WHERE t1 MATCH 'tug* OR eight'"
6 "SELECT offsets(t1) FROM t1 WHERE t1 MATCH 'aa'"
7 "SELECT offsets(t1) FROM t1 WHERE t1 MATCH '44'"
8 "SELECT offsets(t1) FROM t1 WHERE t1 MATCH 'a*'"
} {
do_test backcompat-3.5 [list sql1 $q] [sql2 $q]
}
do_test backcompat-3.6 {
sql1 "SELECT optimize(t1) FROM t1 LIMIT 1"
} {{Index optimized}}
foreach {n q} {
1 "SELECT * FROM t1 ORDER BY a, b"
2 "SELECT rowid FROM t1 WHERE a MATCH 'five'"
3 "SELECT * FROM t1 WHERE a MATCH 'five'"
4 "SELECT offsets(t1) FROM t1 WHERE t1 MATCH 'jk'"
5 "SELECT offsets(t1) FROM t1 WHERE t1 MATCH 'tug* OR eight'"
6 "SELECT offsets(t1) FROM t1 WHERE t1 MATCH 'aa'"
7 "SELECT offsets(t1) FROM t1 WHERE t1 MATCH '44'"
8 "SELECT offsets(t1) FROM t1 WHERE t1 MATCH 'a*'"
} {
do_test backcompat-3.7 [list sql1 $q] [sql2 $q]
}
# Now test that an incremental merge can be started by one version
# and finished by another. And that the integrity-check still
# passes.
do_test backcompat-3.8 {
sql1 {
DROP TABLE IF EXISTS t1;
DROP TABLE IF EXISTS t2;
CREATE TABLE t1(docid, words);
CREATE VIRTUAL TABLE t2 USING fts3(words);
}
code1 [list source $testdir/genesis.tcl]
code1 { fts_kjv_genesis }
sql1 {
INSERT INTO t2 SELECT words FROM t1;
INSERT INTO t2 SELECT words FROM t1;
INSERT INTO t2 SELECT words FROM t1;
INSERT INTO t2 SELECT words FROM t1;
INSERT INTO t2 SELECT words FROM t1;
INSERT INTO t2 SELECT words FROM t1;
SELECT level, group_concat(idx, ' ') FROM t2_segdir GROUP BY level;
}
} {0 {0 1 2 3 4 5}}
if {[code1 { set ::sqlite_libversion }] >=3071200
&& [code2 { set ::sqlite_libversion }] >=3071200
} {
do_test backcompat-3.9 {
sql1 { INSERT INTO t2(t2) VALUES('merge=100,4'); }
sql2 { INSERT INTO t2(t2) VALUES('merge=100,4'); }
sql1 { INSERT INTO t2(t2) VALUES('merge=100,4'); }
sql2 { INSERT INTO t2(t2) VALUES('merge=2500,4'); }
sql2 {
SELECT level, group_concat(idx, ' ') FROM t2_segdir GROUP BY level;
}
} {0 {0 1} 1 0}
do_test backcompat-3.10 {
sql1 { INSERT INTO t2(t2) VALUES('integrity-check') }
sql2 { INSERT INTO t2(t2) VALUES('integrity-check') }
} {}
}
}
}
}
#-------------------------------------------------------------------------
# Test that Rtree tables may be read/written by different versions of
# SQLite.
#
ifcapable rtree {
set contents {
CREATE VIRTUAL TABLE t1 USING rtree(id, x1, x2, y1, y2);
}
foreach {id x1 x2 y1 y2} {
1 -47.64 43.87 33.86 34.42 2 -21.51 17.32 2.05 31.04
3 -43.67 -38.33 -19.79 3.43 4 32.41 35.16 9.12 19.82
5 33.28 34.87 14.78 28.26 6 49.31 116.59 -9.87 75.09
7 -14.93 34.51 -17.64 64.09 8 -43.05 23.43 -1.19 69.44
9 44.79 133.56 28.09 80.30 10 -2.66 81.47 -41.38 -10.46
11 -42.89 -3.54 15.76 71.63 12 -3.50 84.96 -11.64 64.95
13 -45.69 26.25 11.14 55.06 14 -44.09 11.23 17.52 44.45
15 36.23 133.49 -19.38 53.67 16 -17.89 81.54 14.64 50.61
17 -41.97 -24.04 -39.43 28.95 18 -5.85 7.76 -6.38 47.02
19 18.82 27.10 42.82 100.09 20 39.17 113.45 26.14 73.47
21 22.31 103.17 49.92 106.05 22 -43.06 40.38 -1.75 76.08
23 2.43 57.27 -14.19 -3.83 24 -47.57 -4.35 8.93 100.06
25 -37.47 49.14 -29.11 8.81 26 -7.86 75.72 49.34 107.42
27 1.53 45.49 20.36 49.74 28 -48.48 32.54 28.81 54.45
29 2.67 39.77 -4.05 13.67 30 4.11 62.88 -47.44 -5.72
31 -21.47 51.75 37.25 116.09 32 45.59 111.37 -6.43 43.64
33 35.23 48.29 23.54 113.33 34 16.61 68.35 -14.69 65.97
35 13.98 16.60 48.66 102.87 36 19.74 23.84 31.15 77.27
37 -27.61 24.43 7.96 94.91 38 -34.77 12.05 -22.60 -6.29
39 -25.83 8.71 -13.48 -12.53 40 -17.11 -1.01 18.06 67.89
41 14.13 71.72 -3.78 39.25 42 23.75 76.00 -16.30 8.23
43 -39.15 28.63 38.12 125.88 44 48.62 86.09 36.49 102.95
45 -31.39 -21.98 2.52 89.78 46 5.65 56.04 15.94 89.10
47 18.28 95.81 46.46 143.08 48 30.93 102.82 -20.08 37.36
49 -20.78 -3.48 -5.58 35.46 50 49.85 90.58 -24.48 46.29
} {
if {$x1 >= $x2 || $y1 >= $y2} { error "$x1 $x2 $y1 $y2" }
append contents "INSERT INTO t1 VALUES($id, $x1, $x2, $y1, $y2);"
}
set queries {
1 "SELECT id FROM t1 WHERE x1>10 AND x2<44"
2 "SELECT id FROM t1 WHERE y1<100"
3 "SELECT id FROM t1 WHERE y1<100 AND x1>0"
4 "SELECT id FROM t1 WHERE y1>10 AND x1>0 AND x2<50 AND y2<550"
}
do_allbackcompat_test {
if {[code1 {set ::sqlite_options(fts3)}]
&& [code2 {set ::sqlite_options(fts3)}]
} {
do_test backcompat-4.1 { sql1 $contents } {}
foreach {n q} $::queries {
do_test backcompat-4.2.$n [list sql1 $q] [sql2 $q]
}
do_test backcompat-4.3 { sql1 {
INSERT INTO t1 SELECT id+100, x1+10.0, x2+10.0, y1-10.0, y2-10.0 FROM t1;
} } {}
foreach {n q} $::queries {
do_test backcompat-4.4.$n [list sql1 $q] [sql2 $q]
}
do_test backcompat-4.5 { sql2 {
INSERT INTO t1 SELECT id+200, x1+20.0, x2+20.0, y1-20.0, y2-20.0 FROM t1;
} } {}
foreach {n q} $::queries {
do_test backcompat-4.6.$n [list sql1 $q] [sql2 $q]
}
}
}
}
finish_test

View File

@ -0,0 +1,973 @@
# 2009 January 30
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library. The
# focus of this file is testing the sqlite3_backup_XXX API.
#
# $Id: backup.test,v 1.11 2009/06/05 17:09:12 drh Exp $
set testdir [file dirname $argv0]
source $testdir/tester.tcl
do_not_use_codec
#---------------------------------------------------------------------
# Test organization:
#
# backup-1.*: Warm-body tests.
#
# backup-2.*: Test backup under various conditions. To and from in-memory
# databases. To and from empty/populated databases. etc.
#
# backup-3.*: Verify that the locking-page (pending byte page) is handled.
#
# backup-4.*: Test various error conditions.
#
# backup-5.*: Test the source database being modified during a backup.
#
# backup-6.*: Test the backup_remaining() and backup_pagecount() APIs.
#
# backup-7.*: Test SQLITE_BUSY and SQLITE_LOCKED errors.
#
# backup-8.*: Test multiple simultaneous backup operations.
#
# backup-9.*: Test that passing a negative argument to backup_step() is
# interpreted as "copy the whole file".
#
# backup-10.*: Test writing the source database mid backup.
#
proc data_checksum {db file} { $db one "SELECT md5sum(a, b) FROM ${file}.t1" }
proc test_contents {name db1 file1 db2 file2} {
$db2 eval {select * from sqlite_master}
$db1 eval {select * from sqlite_master}
set checksum [data_checksum $db2 $file2]
uplevel [list do_test $name [list data_checksum $db1 $file1] $checksum]
}
do_test backup-1.1 {
execsql {
BEGIN;
CREATE TABLE t1(a, b);
CREATE INDEX i1 ON t1(a, b);
INSERT INTO t1 VALUES(1, randstr(1000,1000));
INSERT INTO t1 VALUES(2, randstr(1000,1000));
INSERT INTO t1 VALUES(3, randstr(1000,1000));
INSERT INTO t1 VALUES(4, randstr(1000,1000));
INSERT INTO t1 VALUES(5, randstr(1000,1000));
COMMIT;
}
} {}
# Sanity check to verify that the [test_contents] proc works.
#
test_contents backup-1.2 db main db main
# Check that it is possible to create and finish backup operations.
#
do_test backup-1.3.1 {
delete_file test2.db
sqlite3 db2 test2.db
sqlite3_backup B db2 main db main
} {B}
do_test backup-1.3.2 {
B finish
} {SQLITE_OK}
do_test backup-1.3.3 {
info commands B
} {}
# Simplest backup operation. Backup test.db to test2.db. test2.db is
# initially empty. test.db uses the default page size.
#
do_test backup-1.4.1 {
sqlite3_backup B db2 main db main
} {B}
do_test backup-1.4.2 {
B step 200
} {SQLITE_DONE}
do_test backup-1.4.3 {
B finish
} {SQLITE_OK}
do_test backup-1.4.4 {
info commands B
} {}
test_contents backup-1.4.5 db2 main db main
db close
db2 close
#
# End of backup-1.* tests.
#---------------------------------------------------------------------
#---------------------------------------------------------------------
# The following tests, backup-2.*, are based on the following procedure:
#
# 1) Populate the source database.
# 2) Populate the destination database.
# 3) Run the backup to completion. (backup-2.*.1)
# 4) Integrity check the destination db. (backup-2.*.2)
# 5) Check that the contents of the destination db is the same as that
# of the source db. (backup-2.*.3)
#
# The test is run with all possible combinations of the following
# input parameters, except that if the destination is an in-memory
# database, the only page size tested is 1024 bytes (the same as the
# source page-size).
#
# * Source database is an in-memory database, OR
# * Source database is a file-backed database.
#
# * Target database is an in-memory database, OR
# * Target database is a file-backed database.
#
# * Destination database is a main file, OR
# * Destination database is an attached file, OR
# * Destination database is a temp database.
#
# * Target database is empty (zero bytes), OR
# * Target database is larger than the source, OR
# * Target database is smaller than the source.
#
# * Target database page-size is the same as the source, OR
# * Target database page-size is larger than the source, OR
# * Target database page-size is smaller than the source.
#
# * Each call to step copies a single page, OR
# * A single call to step copies the entire source database.
#
set iTest 1
foreach zSrcFile {test.db :memory:} {
foreach zDestFile {test2.db :memory:} {
foreach zOpenScript [list {
sqlite3 db $zSrcFile
sqlite3 db2 $zSrcFile
db2 eval "ATTACH '$zDestFile' AS bak"
set db_dest db2
set file_dest bak
} {
sqlite3 db $zSrcFile
sqlite3 db2 $zDestFile
set db_dest db2
set file_dest main
} {
sqlite3 db $zSrcFile
sqlite3 db2 $zDestFile
set db_dest db2
set file_dest temp
}] {
foreach rows_dest {0 3 10} {
foreach pgsz_dest {512 1024 2048} {
foreach nPagePerStep {1 200} {
# Open the databases.
catch { delete_file test.db }
catch { delete_file test2.db }
eval $zOpenScript
# Set to true if copying to an in-memory destination. Copying to an
# in-memory destination is only possible if the initial destination
# page size is the same as the source page size (in this case 1024 bytes).
#
set isMemDest [expr {
$zDestFile eq ":memory:" || $file_dest eq "temp" && $TEMP_STORE>=2
}]
if { $isMemDest==0 || $pgsz_dest == 1024 } {
if 0 {
puts -nonewline "Test $iTest: src=$zSrcFile dest=$zDestFile"
puts -nonewline " (as $db_dest.$file_dest)"
puts -nonewline " rows_dest=$rows_dest pgsz_dest=$pgsz_dest"
puts ""
}
# Set up the content of the source database.
execsql {
PRAGMA page_size = 1024;
BEGIN;
CREATE TABLE t1(a, b);
CREATE INDEX i1 ON t1(a, b);
INSERT INTO t1 VALUES(1, randstr(1000,1000));
INSERT INTO t1 VALUES(2, randstr(1000,1000));
INSERT INTO t1 VALUES(3, randstr(1000,1000));
INSERT INTO t1 VALUES(4, randstr(1000,1000));
INSERT INTO t1 VALUES(5, randstr(1000,1000));
COMMIT;
}
# Set up the content of the target database.
execsql "PRAGMA ${file_dest}.page_size = ${pgsz_dest}" $db_dest
if {$rows_dest != 0} {
execsql "
BEGIN;
CREATE TABLE ${file_dest}.t1(a, b);
CREATE INDEX ${file_dest}.i1 ON t1(a, b);
" $db_dest
for {set ii 0} {$ii < $rows_dest} {incr ii} {
execsql "
INSERT INTO ${file_dest}.t1 VALUES(1, randstr(1000,1000))
" $db_dest
}
execsql COMMIT $db_dest
}
# Backup the source database.
do_test backup-2.$iTest.1 {
sqlite3_backup B $db_dest $file_dest db main
while {[B step $nPagePerStep]=="SQLITE_OK"} {}
B finish
} {SQLITE_OK}
# Run integrity check on the backup.
do_test backup-2.$iTest.2 {
execsql "PRAGMA ${file_dest}.integrity_check" $db_dest
} {ok}
test_contents backup-2.$iTest.3 db main $db_dest $file_dest
}
db close
catch {db2 close}
incr iTest
} } } } } }
#
# End of backup-2.* tests.
#---------------------------------------------------------------------
#---------------------------------------------------------------------
# These tests, backup-3.*, ensure that nothing goes wrong if either
# the source or destination database are large enough to include the
# the locking-page (the page that contains the range of bytes that
# the locks are applied to). These tests assume that the pending
# byte is at offset 0x00010000 (64KB offset), as set by tester.tcl,
# not at the 1GB offset as it usually is.
#
# The test procedure is as follows (same procedure as used for
# the backup-2.* tests):
#
# 1) Populate the source database.
# 2) Populate the destination database.
# 3) Run the backup to completion. (backup-3.*.1)
# 4) Integrity check the destination db. (backup-3.*.2)
# 5) Check that the contents of the destination db is the same as that
# of the source db. (backup-3.*.3)
#
# The test procedure is run with the following parameters varied:
#
# * Source database includes pending-byte page.
# * Source database does not include pending-byte page.
#
# * Target database includes pending-byte page.
# * Target database does not include pending-byte page.
#
# * Target database page-size is the same as the source, OR
# * Target database page-size is larger than the source, OR
# * Target database page-size is smaller than the source.
#
set iTest 1
foreach nSrcPg {10 64 65 66 100} {
foreach nDestRow {10 100} {
foreach nDestPgsz {512 1024 2048 4096} {
catch { delete_file test.db }
catch { delete_file test2.db }
sqlite3 db test.db
sqlite3 db2 test2.db
# Set up the content of the two databases.
#
execsql { PRAGMA page_size = 1024 }
execsql "PRAGMA page_size = $nDestPgsz" db2
foreach db {db db2} {
execsql {
BEGIN;
CREATE TABLE t1(a, b);
CREATE INDEX i1 ON t1(a, b);
COMMIT;
} $db
}
while {[file size test.db]/1024 < $nSrcPg} {
execsql { INSERT INTO t1 VALUES($ii, randstr(200,200)) }
}
for {set ii 0} {$ii < $nDestRow} {incr ii} {
execsql { INSERT INTO t1 VALUES($ii, randstr(1000,1000)) } db2
}
# Backup the source database.
do_test backup-3.$iTest.1 {
sqlite3_backup B db main db2 main
while {[B step 10]=="SQLITE_OK"} {}
B finish
} {SQLITE_OK}
# Run integrity check on the backup.
do_test backup-3.$iTest.2 {
execsql "PRAGMA integrity_check" db2
} {ok}
test_contents backup-3.$iTest.3 db main db2 main
db close
db2 close
incr iTest
}
}
}
#--------------------------------------------------------------------
do_test backup-3.$iTest.1 {
catch { forcedelete test.db }
catch { forcedelete test2.db }
sqlite3 db test.db
set iTab 1
db eval { PRAGMA page_size = 512 }
while {[file size test.db] <= $::sqlite_pending_byte} {
db eval "CREATE TABLE t${iTab}(a, b, c)"
incr iTab
}
sqlite3 db2 test2.db
db2 eval { PRAGMA page_size = 4096 }
while {[file size test2.db] < $::sqlite_pending_byte} {
db2 eval "CREATE TABLE t${iTab}(a, b, c)"
incr iTab
}
sqlite3_backup B db2 main db main
B step -1
} {SQLITE_DONE}
do_test backup-3.$iTest.2 {
B finish
} {SQLITE_OK}
#
# End of backup-3.* tests.
#---------------------------------------------------------------------
#---------------------------------------------------------------------
# The following tests, backup-4.*, test various error conditions:
#
# backup-4.1.*: Test invalid database names.
#
# backup-4.2.*: Test that the source database cannot be detached while
# a backup is in progress.
#
# backup-4.3.*: Test that the source database handle cannot be closed
# while a backup is in progress.
#
# backup-4.4.*: Test an attempt to specify the same handle for the
# source and destination databases.
#
# backup-4.5.*: Test that an in-memory destination with a different
# page-size to the source database is an error.
#
sqlite3 db test.db
sqlite3 db2 test2.db
do_test backup-4.1.1 {
catch { sqlite3_backup B db aux db2 main }
} {1}
do_test backup-4.1.2 {
sqlite3_errmsg db
} {unknown database aux}
do_test backup-4.1.3 {
catch { sqlite3_backup B db main db2 aux }
} {1}
do_test backup-4.1.4 {
sqlite3_errmsg db
} {unknown database aux}
do_test backup-4.2.1 {
catch { forcedelete test3.db }
catch { forcedelete test4.db }
execsql {
ATTACH 'test3.db' AS aux1;
CREATE TABLE aux1.t1(a, b);
}
execsql {
ATTACH 'test4.db' AS aux2;
CREATE TABLE aux2.t2(a, b);
} db2
sqlite3_backup B db aux1 db2 aux2
} {B}
do_test backup-4.2.2 {
catchsql { DETACH aux2 } db2
} {1 {database aux2 is locked}}
do_test backup-4.2.3 {
B step 50
} {SQLITE_DONE}
do_test backup-4.2.4 {
B finish
} {SQLITE_OK}
do_test backup-4.3.1 {
sqlite3_backup B db aux1 db2 aux2
} {B}
do_test backup-4.3.2 {
db2 cache flush
sqlite3_close db2
} {SQLITE_BUSY}
do_test backup-4.3.3 {
sqlite3_errmsg db2
} {unable to close due to unfinalized statements or unfinished backups}
do_test backup-4.3.4 {
B step 50
} {SQLITE_DONE}
do_test backup-4.3.5 {
B finish
} {SQLITE_OK}
do_test backup-4.4.1 {
set rc [catch {sqlite3_backup B db main db aux1}]
list $rc [sqlite3_errcode db] [sqlite3_errmsg db]
} {1 SQLITE_ERROR {source and destination must be distinct}}
db close
db2 close
do_test backup-4.5.1 {
catch { forcedelete test.db }
sqlite3 db test.db
sqlite3 db2 :memory:
execsql {
CREATE TABLE t1(a, b);
INSERT INTO t1 VALUES(1, 2);
}
execsql {
PRAGMA page_size = 4096;
CREATE TABLE t2(a, b);
INSERT INTO t2 VALUES(3, 4);
} db2
sqlite3_backup B db2 main db main
} {B}
do_test backup-4.5.2 {
B step 5000
} {SQLITE_READONLY}
do_test backup-4.5.3 {
B finish
} {SQLITE_READONLY}
db close
db2 close
#
# End of backup-4.* tests.
#---------------------------------------------------------------------
#---------------------------------------------------------------------
# The following tests, backup-5.*, test that the backup works properly
# when the source database is modified during the backup. Test cases
# are organized as follows:
#
# backup-5.x.1.*: Nothing special. Modify the database mid-backup.
#
# backup-5.x.2.*: Modify the database mid-backup so that one or more
# pages are written out due to cache stress. Then
# rollback the transaction.
#
# backup-5.x.3.*: Database is vacuumed.
#
# backup-5.x.4.*: Database is vacuumed and the page-size modified.
#
# backup-5.x.5.*: Database is shrunk via incr-vacuum.
#
# Each test is run three times, in the following configurations:
#
# 1) Backing up file-to-file. The writer writes via an external pager.
# 2) Backing up file-to-file. The writer writes via the same pager as
# is used by the backup operation.
# 3) Backing up memory-to-file.
#
set iTest 0
forcedelete bak.db-wal
foreach {writer file} {db test.db db3 test.db db :memory:} {
incr iTest
catch { delete_file bak.db }
sqlite3 db2 bak.db
catch { delete_file $file }
sqlite3 db $file
sqlite3 db3 $file
do_test backup-5.$iTest.1.1 {
execsql {
BEGIN;
CREATE TABLE t1(a, b);
CREATE INDEX i1 ON t1(a, b);
INSERT INTO t1 VALUES(1, randstr(1000,1000));
INSERT INTO t1 VALUES(2, randstr(1000,1000));
INSERT INTO t1 VALUES(3, randstr(1000,1000));
INSERT INTO t1 VALUES(4, randstr(1000,1000));
INSERT INTO t1 VALUES(5, randstr(1000,1000));
COMMIT;
}
expr {[execsql {PRAGMA page_count}] > 10}
} {1}
do_test backup-5.$iTest.1.2 {
sqlite3_backup B db2 main db main
B step 5
} {SQLITE_OK}
do_test backup-5.$iTest.1.3 {
execsql { UPDATE t1 SET a = a + 1 } $writer
B step 50
} {SQLITE_DONE}
do_test backup-5.$iTest.1.4 {
B finish
} {SQLITE_OK}
integrity_check backup-5.$iTest.1.5 db2
test_contents backup-5.$iTest.1.6 db main db2 main
do_test backup-5.$iTest.2.1 {
execsql {
PRAGMA cache_size = 10;
BEGIN;
INSERT INTO t1 SELECT '', randstr(1000,1000) FROM t1;
INSERT INTO t1 SELECT '', randstr(1000,1000) FROM t1;
INSERT INTO t1 SELECT '', randstr(1000,1000) FROM t1;
INSERT INTO t1 SELECT '', randstr(1000,1000) FROM t1;
COMMIT;
}
} {}
do_test backup-5.$iTest.2.2 {
sqlite3_backup B db2 main db main
B step 50
} {SQLITE_OK}
do_test backup-5.$iTest.2.3 {
execsql {
BEGIN;
UPDATE t1 SET a = a + 1;
ROLLBACK;
} $writer
B step 5000
} {SQLITE_DONE}
do_test backup-5.$iTest.2.4 {
B finish
} {SQLITE_OK}
integrity_check backup-5.$iTest.2.5 db2
test_contents backup-5.$iTest.2.6 db main db2 main
do_test backup-5.$iTest.3.1 {
execsql { UPDATE t1 SET b = randstr(1000,1000) }
} {}
do_test backup-5.$iTest.3.2 {
sqlite3_backup B db2 main db main
B step 50
} {SQLITE_OK}
do_test backup-5.$iTest.3.3 {
execsql { VACUUM } $writer
B step 5000
} {SQLITE_DONE}
do_test backup-5.$iTest.3.4 {
B finish
} {SQLITE_OK}
integrity_check backup-5.$iTest.3.5 db2
test_contents backup-5.$iTest.3.6 db main db2 main
do_test backup-5.$iTest.4.1 {
execsql { UPDATE t1 SET b = randstr(1000,1000) }
} {}
do_test backup-5.$iTest.4.2 {
sqlite3_backup B db2 main db main
B step 50
} {SQLITE_OK}
do_test backup-5.$iTest.4.3 {
execsql {
PRAGMA page_size = 2048;
VACUUM;
} $writer
B step 5000
} {SQLITE_DONE}
do_test backup-5.$iTest.4.4 {
B finish
} {SQLITE_OK}
integrity_check backup-5.$iTest.4.5 db2
test_contents backup-5.$iTest.4.6 db main db2 main
catch {db close}
catch {db2 close}
catch {db3 close}
catch { delete_file bak.db }
sqlite3 db2 bak.db
catch { delete_file $file }
sqlite3 db $file
sqlite3 db3 $file
do_test backup-5.$iTest.5.1 {
execsql {
PRAGMA auto_vacuum = incremental;
BEGIN;
CREATE TABLE t1(a, b);
CREATE INDEX i1 ON t1(a, b);
INSERT INTO t1 VALUES(1, randstr(1000,1000));
INSERT INTO t1 VALUES(2, randstr(1000,1000));
INSERT INTO t1 VALUES(3, randstr(1000,1000));
INSERT INTO t1 VALUES(4, randstr(1000,1000));
INSERT INTO t1 VALUES(5, randstr(1000,1000));
COMMIT;
}
} {}
do_test backup-5.$iTest.5.2 {
sqlite3_backup B db2 main db main
B step 8
} {SQLITE_OK}
do_test backup-5.$iTest.5.3 {
execsql {
DELETE FROM t1;
PRAGMA incremental_vacuum;
} $writer
B step 50
} {SQLITE_DONE}
do_test backup-5.$iTest.5.4 {
B finish
} {SQLITE_OK}
integrity_check backup-5.$iTest.5.5 db2
test_contents backup-5.$iTest.5.6 db main db2 main
catch {db close}
catch {db2 close}
catch {db3 close}
}
#
# End of backup-5.* tests.
#---------------------------------------------------------------------
#---------------------------------------------------------------------
# Test the sqlite3_backup_remaining() and backup_pagecount() APIs.
#
do_test backup-6.1 {
catch { forcedelete test.db }
catch { forcedelete test2.db }
sqlite3 db test.db
sqlite3 db2 test2.db
execsql {
BEGIN;
CREATE TABLE t1(a, b);
CREATE INDEX i1 ON t1(a, b);
INSERT INTO t1 VALUES(1, randstr(1000,1000));
INSERT INTO t1 VALUES(2, randstr(1000,1000));
INSERT INTO t1 VALUES(3, randstr(1000,1000));
INSERT INTO t1 VALUES(4, randstr(1000,1000));
INSERT INTO t1 VALUES(5, randstr(1000,1000));
COMMIT;
}
} {}
do_test backup-6.2 {
set nTotal [expr {[file size test.db]/1024}]
sqlite3_backup B db2 main db main
B step 1
} {SQLITE_OK}
do_test backup-6.3 {
B pagecount
} $nTotal
do_test backup-6.4 {
B remaining
} [expr $nTotal-1]
do_test backup-6.5 {
B step 5
list [B remaining] [B pagecount]
} [list [expr $nTotal-6] $nTotal]
do_test backup-6.6 {
execsql { CREATE TABLE t2(a PRIMARY KEY, b) }
B step 1
list [B remaining] [B pagecount]
} [list [expr $nTotal-5] [expr $nTotal+2]]
do_test backup-6.X {
B finish
} {SQLITE_OK}
catch {db close}
catch {db2 close}
#---------------------------------------------------------------------
# Test cases backup-7.* test that SQLITE_BUSY and SQLITE_LOCKED errors
# are returned correctly:
#
# backup-7.1.*: Source database is externally locked (return SQLITE_BUSY).
#
# backup-7.2.*: Attempt to step the backup process while a
# write-transaction is underway on the source pager (return
# SQLITE_LOCKED).
#
# backup-7.3.*: Destination database is externally locked (return SQLITE_BUSY).
#
do_test backup-7.0 {
catch { forcedelete test.db }
catch { forcedelete test2.db }
sqlite3 db2 test2.db
sqlite3 db test.db
execsql {
CREATE TABLE t1(a, b);
CREATE INDEX i1 ON t1(a, b);
INSERT INTO t1 VALUES(1, randstr(1000,1000));
INSERT INTO t1 SELECT a+ 1, randstr(1000,1000) FROM t1;
INSERT INTO t1 SELECT a+ 2, randstr(1000,1000) FROM t1;
INSERT INTO t1 SELECT a+ 4, randstr(1000,1000) FROM t1;
INSERT INTO t1 SELECT a+ 8, randstr(1000,1000) FROM t1;
INSERT INTO t1 SELECT a+16, randstr(1000,1000) FROM t1;
INSERT INTO t1 SELECT a+32, randstr(1000,1000) FROM t1;
INSERT INTO t1 SELECT a+64, randstr(1000,1000) FROM t1;
}
} {}
do_test backup-7.1.1 {
sqlite3_backup B db2 main db main
B step 5
} {SQLITE_OK}
do_test backup-7.1.2 {
sqlite3 db3 test.db
execsql { BEGIN EXCLUSIVE } db3
B step 5
} {SQLITE_BUSY}
do_test backup-7.1.3 {
execsql { ROLLBACK } db3
B step 5
} {SQLITE_OK}
do_test backup-7.2.1 {
execsql {
BEGIN;
INSERT INTO t1 VALUES(1, 4);
}
} {}
do_test backup-7.2.2 {
B step 5000
} {SQLITE_BUSY}
do_test backup-7.2.3 {
execsql { ROLLBACK }
B step 5000
} {SQLITE_DONE}
do_test backup-7.2.4 {
B finish
} {SQLITE_OK}
test_contents backup-7.2.5 db main db2 main
integrity_check backup-7.3.6 db2
do_test backup-7.3.1 {
db2 close
db3 close
forcedelete test2.db
sqlite3 db2 test2.db
sqlite3 db3 test2.db
sqlite3_backup B db2 main db main
execsql { BEGIN ; CREATE TABLE t2(a, b); } db3
B step 5
} {SQLITE_BUSY}
do_test backup-7.3.2 {
execsql { COMMIT } db3
B step 5000
} {SQLITE_DONE}
do_test backup-7.3.3 {
B finish
} {SQLITE_OK}
test_contents backup-7.3.4 db main db2 main
integrity_check backup-7.3.5 db2
catch { db2 close }
catch { db3 close }
#-----------------------------------------------------------------------
# The following tests, backup-8.*, test attaching multiple backup
# processes to the same source database. Also, reading from the source
# database while a read transaction is active.
#
# These tests reuse the database "test.db" left over from backup-7.*.
#
do_test backup-8.1 {
catch { forcedelete test2.db }
catch { forcedelete test3.db }
sqlite3 db2 test2.db
sqlite3 db3 test3.db
sqlite3_backup B2 db2 main db main
sqlite3_backup B3 db3 main db main
list [B2 finish] [B3 finish]
} {SQLITE_OK SQLITE_OK}
do_test backup-8.2 {
sqlite3_backup B3 db3 main db main
sqlite3_backup B2 db2 main db main
list [B2 finish] [B3 finish]
} {SQLITE_OK SQLITE_OK}
do_test backup-8.3 {
sqlite3_backup B2 db2 main db main
sqlite3_backup B3 db3 main db main
B2 step 5
} {SQLITE_OK}
do_test backup-8.4 {
execsql {
BEGIN;
SELECT * FROM sqlite_master;
}
B3 step 5
} {SQLITE_OK}
do_test backup-8.5 {
list [B3 step 5000] [B3 finish]
} {SQLITE_DONE SQLITE_OK}
do_test backup-8.6 {
list [B2 step 5000] [B2 finish]
} {SQLITE_DONE SQLITE_OK}
test_contents backup-8.7 db main db2 main
test_contents backup-8.8 db main db3 main
do_test backup-8.9 {
execsql { PRAGMA lock_status }
} {main shared temp closed}
do_test backup-8.10 {
execsql COMMIT
} {}
catch { db2 close }
catch { db3 close }
#-----------------------------------------------------------------------
# The following tests, backup-9.*, test that:
#
# * Passing 0 as an argument to sqlite3_backup_step() means no pages
# are backed up (backup-9.1.*), and
# * Passing a negative value as an argument to sqlite3_backup_step() means
# all pages are backed up (backup-9.2.*).
#
# These tests reuse the database "test.db" left over from backup-7.*.
#
do_test backup-9.1.1 {
sqlite3 db2 test2.db
sqlite3_backup B db2 main db main
B step 1
} {SQLITE_OK}
do_test backup-9.1.2 {
set nRemaining [B remaining]
expr {$nRemaining>100}
} {1}
do_test backup-9.1.3 {
B step 0
} {SQLITE_OK}
do_test backup-9.1.4 {
B remaining
} $nRemaining
do_test backup-9.2.1 {
B step -1
} {SQLITE_DONE}
do_test backup-9.2.2 {
B remaining
} {0}
do_test backup-9.2.3 {
B finish
} {SQLITE_OK}
catch {db2 close}
ifcapable memorymanage {
db close
forcedelete test.db
forcedelete bak.db
sqlite3 db test.db
sqlite3 db2 test.db
sqlite3 db3 bak.db
do_test backup-10.1.1 {
execsql {
BEGIN;
CREATE TABLE t1(a, b);
INSERT INTO t1 VALUES(1, randstr(1000,1000));
INSERT INTO t1 VALUES(2, randstr(1000,1000));
INSERT INTO t1 VALUES(3, randstr(1000,1000));
INSERT INTO t1 VALUES(4, randstr(1000,1000));
INSERT INTO t1 VALUES(5, randstr(1000,1000));
CREATE INDEX i1 ON t1(a, b);
COMMIT;
}
} {}
do_test backup-10.1.2 {
sqlite3_backup B db3 main db2 main
B step 5
} {SQLITE_OK}
do_test backup-10.1.3 {
execsql {
UPDATE t1 SET b = randstr(500,500);
}
} {}
sqlite3_release_memory [expr 1024*1024]
do_test backup-10.1.3 {
B step 50
} {SQLITE_DONE}
do_test backup-10.1.4 {
B finish
} {SQLITE_OK}
do_test backup-10.1.5 {
execsql { PRAGMA integrity_check } db3
} {ok}
db2 close
db3 close
}
#-----------------------------------------------------------------------
# Test that if the database is written to via the same database handle being
# used as the source by a backup operation:
#
# 10.1.*: If the db is in-memory, the backup is restarted.
# 10.2.*: If the db is a file, the backup is not restarted.
#
db close
forcedelete test.db test.db-journal
foreach {tn file rc} {
1 test.db SQLITE_DONE
2 :memory: SQLITE_OK
} {
do_test backup-10.$tn.1 {
sqlite3 db $file
execsql {
CREATE TABLE t1(a INTEGER PRIMARY KEY, b BLOB);
BEGIN;
INSERT INTO t1 VALUES(NULL, randomblob(200));
INSERT INTO t1 SELECT NULL, randomblob(200) FROM t1;
INSERT INTO t1 SELECT NULL, randomblob(200) FROM t1;
INSERT INTO t1 SELECT NULL, randomblob(200) FROM t1;
INSERT INTO t1 SELECT NULL, randomblob(200) FROM t1;
INSERT INTO t1 SELECT NULL, randomblob(200) FROM t1;
INSERT INTO t1 SELECT NULL, randomblob(200) FROM t1;
INSERT INTO t1 SELECT NULL, randomblob(200) FROM t1;
INSERT INTO t1 SELECT NULL, randomblob(200) FROM t1;
COMMIT;
SELECT count(*) FROM t1;
}
} {256}
do_test backup-10.$tn.2 {
set pgs [execsql {pragma page_count}]
expr {$pgs > 50 && $pgs < 75}
} {1}
do_test backup-10.$tn.3 {
forcedelete bak.db bak.db-journal
sqlite3 db2 bak.db
sqlite3_backup B db2 main db main
B step 50
} {SQLITE_OK}
do_test backup-10.$tn.4 {
execsql { UPDATE t1 SET b = randomblob(200) WHERE a IN (1, 250) }
} {}
do_test backup-10.$tn.5 {
B step 50
} $rc
do_test backup-10.$tn.6 {
B finish
} {SQLITE_OK}
db2 close
}
finish_test

View File

@ -0,0 +1,186 @@
# 2009 February 4
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library. The
# focus of this file is testing the "backup" and "restore" methods
# of the TCL interface - methods which are based on the
# sqlite3_backup_XXX API.
#
# $Id: backup2.test,v 1.4 2009/04/07 14:14:23 danielk1977 Exp $
set testdir [file dirname $argv0]
source $testdir/tester.tcl
do_not_use_codec
ifcapable !trigger||!view { finish_test ; return }
# Fill a database with test data.
#
do_test backup2-1 {
db eval {
CREATE TABLE t1(x);
INSERT INTO t1 VALUES(randstr(8000,8000));
INSERT INTO t1 VALUES(randstr(8000,8000));
INSERT INTO t1 VALUES(randstr(8000,8000));
INSERT INTO t1 VALUES(randstr(8000,8000));
INSERT INTO t1 VALUES(randstr(8000,8000));
CREATE VIEW v1 AS SELECT substr(x,10,10) FROM t1;
CREATE TABLE t2(a,b);
INSERT INTO t2 VALUES(1,2);
INSERT INTO t2 VALUES(2,4);
INSERT INTO t2 SELECT a+2, (a+2)*2 FROM t2;
INSERT INTO t2 SELECT a+4, (a+4)*2 FROM t2;
INSERT INTO t2 SELECT a+8, (a+8)*2 FROM t2;
INSERT INTO t2 SELECT a+16, (a+16)*2 FROM t2;
INSERT INTO t2 SELECT a+32, (a+32)*2 FROM t2;
INSERT INTO t2 SELECT a+64, (a+64)*2 FROM t2;
INSERT INTO t2 SELECT a+128, (a+128)*2 FROM t2;
CREATE INDEX t2i1 ON t2(a,b);
CREATE TRIGGER r1 AFTER INSERT ON t2 BEGIN
SELECT 'hello';
END;
ANALYZE;
PRAGMA integrity_check;
}
} {ok}
# Remember a check-sum on the database file.
#
unset -nocomplain cksum
set cksum [dbcksum db main]
# Make a backup of the test data. Verify that the backup copy
# is identical to the original.
#
do_test backup2-2 {
forcedelete bu1.db
db backup bu1.db
sqlite3 db2 bu1.db
dbcksum db2 main
} $cksum
# Delete the original. Restore from backup. Verify the content is
# unchanged.
#
do_test backup2-3.1 {
db close
forcedelete test.db test.db-journal
sqlite3 db test.db
db2 eval {BEGIN EXCLUSIVE}
set rc [catch {db restore bu1.db} res]
lappend rc $res
db2 eval {ROLLBACK}
set rc
} {1 {restore failed: source database busy}}
do_test backup2-3.2 {
db close
forcedelete test.db test.db-journal
sqlite3 db test.db
db restore bu1.db
dbcksum db main
} $cksum
# Use alternative databases - other than "main".
#
do_test backup2-4 {
db restore temp bu1.db
dbcksum db temp
} $cksum
do_test backup2-5 {
db2 close
forcedelete bu1.db bu2.db
db backup temp bu2.db
sqlite3 db2 bu2.db
dbcksum db2 main
} $cksum
# Try to backup to a readonly file.
#
do_test backup2-6 {
db2 close
catch {file attributes bu2.db -permissions r--------}
catch {file attributes bu2.db -readonly 1}
set rc [catch {db backup temp bu2.db} res]
lappend rc $res
} {1 {backup failed: attempt to write a readonly database}}
# Try to backup to something that is not a database file.
#
do_test backup2-7 {
catch {file attributes bu2.db -readonly 0}
catch {file attributes bu2.db -permissions rw-------}
set out [open bu2.db w]
puts $out "This is not a valid database file"
close $out
set rc [catch {db backup temp bu2.db} res]
lappend rc $res
} {1 {backup failed: file is encrypted or is not a database}}
# Try to backup database that does not exist
#
do_test backup2-8 {
forcedelete bu1.db
set rc [catch {db backup aux1 bu1.db} res]
lappend rc $res
} {1 {backup failed: unknown database aux1}}
# Invalid syntax on the backup method
#
do_test backup2-9 {
set rc [catch {db backup} res]
lappend rc $res
} {1 {wrong # args: should be "db backup ?DATABASE? FILENAME"}}
# Try to restore from an unreadable file.
#
if {$tcl_platform(platform)=="windows"} {
set msg {cannot open source database: unable to open database file}
} elseif {$tcl_platform(os)=="OpenBSD"} {
set msg {restore failed: file is encrypted or is not a database}
} else {
set msg {cannot open source database: disk I/O error}
}
do_test backup2-10 {
forcedelete bu3.db
file mkdir bu3.db
set rc [catch {db restore temp bu3.db} res]
lappend rc $res
} [list 1 $msg]
# Try to restore from something that is not a database file.
#
do_test backup2-11 {
set rc [catch {db restore temp bu2.db} res]
lappend rc $res
} {1 {restore failed: file is encrypted or is not a database}}
# Try to restore a database that does not exist
#
do_test backup2-12 {
set rc [catch {db restore aux1 bu2.db} res]
lappend rc $res
} {1 {restore failed: unknown database aux1}}
do_test backup2-13 {
forcedelete bu4.db
set rc [catch {db restore bu4.db} res]
lappend rc $res
} {1 {cannot open source database: unable to open database file}}
# Invalid syntax on the restore method
#
do_test backup2-14 {
set rc [catch {db restore} res]
lappend rc $res
} {1 {wrong # args: should be "db restore ?DATABASE? FILENAME"}}
forcedelete bu1.db bu2.db bu3.db bu4.db
finish_test

View File

@ -0,0 +1,103 @@
# 2012 October 13
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
#
# The tests in this file verify that if an empty database (zero bytes in
# size) is used as the source of a backup operation, the final destination
# database is one page in size.
#
# The destination must consist of at least one page as truncating a
# database file to zero bytes is equivalent to resetting the database
# schema cookie and change counter. Doing that could cause other clients
# to become confused and continue using out-of-date cache data.
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
set testprefix backup4
#-------------------------------------------------------------------------
# At one point this test was failing because [db] was using an out of
# date schema in test case 1.2.
#
do_execsql_test 1.0 {
CREATE TABLE t1(x, y, UNIQUE(x, y));
INSERT INTO t1 VALUES('one', 'two');
SELECT * FROM t1 WHERE x='one';
PRAGMA integrity_check;
} {one two ok}
do_test 1.1 {
sqlite3 db1 :memory:
db1 backup test.db
sqlite3 db1 test.db
db1 eval {
CREATE TABLE t1(x, y);
INSERT INTO t1 VALUES('one', 'two');
}
db1 close
} {}
do_execsql_test 1.2 {
SELECT * FROM t1 WHERE x='one';
PRAGMA integrity_check;
} {one two ok}
db close
forcedelete test.db
forcedelete test.db2
sqlite3 db test.db
#-------------------------------------------------------------------------
# Test that if the source is zero bytes, the destination database
# consists of a single page only.
#
do_execsql_test 2.1 {
CREATE TABLE t1(a, b);
CREATE INDEX i1 ON t1(a, b);
}
do_test 2.2 { file size test.db } [expr $AUTOVACUUM ? 4096 : 3072]
do_test 2.3 {
sqlite3 db1 test.db2
db1 backup test.db
db1 close
file size test.db
} {1024}
do_test 2.4 { file size test.db2 } 0
db close
forcedelete test.db
forcedelete test.db2
sqlite3 db test.db
#-------------------------------------------------------------------------
# Test that if the destination has a page-size larger than the implicit
# page-size of the source, the final destination database still consists
# of a single page.
#
do_execsql_test 3.1 {
PRAGMA page_size = 4096;
CREATE TABLE t1(a, b);
CREATE INDEX i1 ON t1(a, b);
}
do_test 3.2 { file size test.db } [expr $AUTOVACUUM ? 16384 : 12288]
do_test 3.3 {
sqlite3 db1 test.db2
db1 backup test.db
db1 close
file size test.db
} {1024}
do_test 3.4 { file size test.db2 } 0
finish_test

View File

@ -0,0 +1,65 @@
# 2014 November 13
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
set testprefix backup5
forcedelete test2.db
do_execsql_test 1.0 {
CREATE TABLE t1(a, b);
CREATE TABLE t2(a, b);
INSERT INTO t2 VALUES(1, 1);
INSERT INTO t2 VALUES(2, 2);
INSERT INTO t2 VALUES(3, 3);
}
do_test 1.1 {
forcecopy test.db test.db2
db eval {
DROP TABLE t2;
INSERT INTO t1 VALUES(zeroblob(1000), zeroblob(1000));
INSERT INTO t1 VALUES(randomblob(1000), randomblob(1000));
}
} {}
do_test 1.2 {
sqlite3 db2 test.db2
set stmt [sqlite3_prepare_v2 db2 "SELECT * FROM t2" -1 dummy]
sqlite3_step $stmt
} {SQLITE_ROW}
do_test 1.3 {
list [catch { sqlite3_backup B db2 main db main } msg] $msg
} {1 {sqlite3_backup_init() failed}}
do_test 1.4 {
sqlite3_errmsg db2
} {destination database is in use}
do_test 1.5 {
sqlite3_reset $stmt
sqlite3_backup B db2 main db main
B step 200
B finish
} {SQLITE_OK}
do_test 1.6 {
list [sqlite3_step $stmt] [sqlite3_finalize $stmt]
} {SQLITE_ERROR SQLITE_ERROR}
do_test 1.7 {
sqlite3_errmsg db2
} {no such table: t2}
finish_test

View File

@ -0,0 +1,286 @@
# 2009 January 30
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library. The
# focus of this file is testing the handling of IO errors by the
# sqlite3_backup_XXX APIs.
#
# $Id: backup_ioerr.test,v 1.3 2009/04/10 18:41:01 danielk1977 Exp $
set testdir [file dirname $argv0]
source $testdir/tester.tcl
proc data_checksum {db file} {
$db one "SELECT md5sum(a, b) FROM ${file}.t1"
}
proc test_contents {name db1 file1 db2 file2} {
$db2 eval {select * from sqlite_master}
$db1 eval {select * from sqlite_master}
set checksum [data_checksum $db2 $file2]
uplevel [list do_test $name [list data_checksum $db1 $file1] $checksum]
}
#--------------------------------------------------------------------
# This proc creates a database of approximately 290 pages. Depending
# on whether or not auto-vacuum is configured. Test cases backup_ioerr-1.*
# verify nothing more than this assumption.
#
proc populate_database {db {xtra_large 0}} {
execsql {
BEGIN;
CREATE TABLE t1(a, b);
INSERT INTO t1 VALUES(1, randstr(1000,1000));
INSERT INTO t1 SELECT a+ 1, randstr(1000,1000) FROM t1;
INSERT INTO t1 SELECT a+ 2, randstr(1000,1000) FROM t1;
INSERT INTO t1 SELECT a+ 4, randstr(1000,1000) FROM t1;
INSERT INTO t1 SELECT a+ 8, randstr(1000,1000) FROM t1;
INSERT INTO t1 SELECT a+16, randstr(1000,1000) FROM t1;
INSERT INTO t1 SELECT a+32, randstr(1000,1000) FROM t1;
CREATE INDEX i1 ON t1(b);
COMMIT;
} $db
if {$xtra_large} {
execsql { INSERT INTO t1 SELECT a+64, randstr(1000,1000) FROM t1 } $db
}
}
do_test backup_ioerr-1.1 {
populate_database db
set nPage [expr {[file size test.db] / 1024}]
expr {$nPage>130 && $nPage<160}
} {1}
do_test backup_ioerr-1.2 {
expr {[file size test.db] > $sqlite_pending_byte}
} {1}
do_test backup_ioerr-1.3 {
db close
forcedelete test.db
} {}
# Turn off IO error simulation.
#
proc clear_ioerr_simulation {} {
set ::sqlite_io_error_hit 0
set ::sqlite_io_error_hardhit 0
set ::sqlite_io_error_pending 0
set ::sqlite_io_error_persist 0
}
#--------------------------------------------------------------------
# The following procedure runs with SQLite's IO error simulation
# enabled.
#
# 1) Start with a reasonably sized database. One that includes the
# pending-byte (locking) page.
#
# 2) Open a backup process. Set the cache-size for the destination
# database to 10 pages only.
#
# 3) Step the backup process N times to partially backup the database
# file. If an IO error is reported, then the backup process is
# concluded with a call to backup_finish().
#
# If an IO error occurs, verify that:
#
# * the call to backup_step() returns an SQLITE_IOERR_XXX error code.
#
# * after the failed call to backup_step() but before the call to
# backup_finish() the destination database handle error code and
# error message remain unchanged.
#
# * the call to backup_finish() returns an SQLITE_IOERR_XXX error code.
#
# * following the call to backup_finish(), the destination database
# handle has been populated with an error code and error message.
#
# 4) Write to the database via the source database connection. Check
# that:
#
# * If an IO error occurs while writing the source database, the
# write operation should report an IO error. The backup should
# proceed as normal.
#
# * If an IO error occurs while updating the backup, the write
# operation should proceed normally. The error should be reported
# from the next call to backup_step() (in step 5 of this test
# procedure).
#
# 5) Step the backup process to finish the backup. If an IO error is
# reported, then the backup process is concluded with a call to
# backup_finish().
#
# Test that if an IO error occurs, or if one occurred while updating
# the backup database during step 4, then the conditions listed
# under step 3 are all true.
#
# 6) Finish the backup process.
#
# * If the backup succeeds (backup_finish() returns SQLITE_OK), then
# the contents of the backup database should match that of the
# source database.
#
# * If the backup fails (backup_finish() returns other than SQLITE_OK),
# then the contents of the backup database should be as they were
# before the operation was started.
#
# The following factors are varied:
#
# * Destination database is initially larger than the source database, OR
# * Destination database is initially smaller than the source database.
#
# * IO errors are transient, OR
# * IO errors are persistent.
#
# * Destination page-size is smaller than the source.
# * Destination page-size is the same as the source.
# * Destination page-size is larger than the source.
#
set iTest 1
foreach bPersist {0 1} {
foreach iDestPagesize {512 1024 4096} {
foreach zSetupBak [list "" {populate_database ddb 1}] {
incr iTest
set bStop 0
for {set iError 1} {$bStop == 0} {incr iError} {
# Disable IO error simulation.
clear_ioerr_simulation
catch { ddb close }
catch { sdb close }
catch { forcedelete test.db }
catch { forcedelete bak.db }
# Open the source and destination databases.
sqlite3 sdb test.db
sqlite3 ddb bak.db
# Step 1: Populate the source and destination databases.
populate_database sdb
ddb eval "PRAGMA page_size = $iDestPagesize"
ddb eval "PRAGMA cache_size = 10"
eval $zSetupBak
# Step 2: Open the backup process.
sqlite3_backup B ddb main sdb main
# Enable IO error simulation.
set ::sqlite_io_error_pending $iError
set ::sqlite_io_error_persist $bPersist
# Step 3: Partially backup the database. If an IO error occurs, check
# a few things then skip to the next iteration of the loop.
#
set rc [B step 100]
if {$::sqlite_io_error_hardhit} {
do_test backup_ioerr-$iTest.$iError.1 {
string match SQLITE_IOERR* $rc
} {1}
do_test backup_ioerr-$iTest.$iError.2 {
list [sqlite3_errcode ddb] [sqlite3_errmsg ddb]
} {SQLITE_OK {not an error}}
set rc [B finish]
do_test backup_ioerr-$iTest.$iError.3 {
string match SQLITE_IOERR* $rc
} {1}
do_test backup_ioerr-$iTest.$iError.4 {
sqlite3_errmsg ddb
} {disk I/O error}
clear_ioerr_simulation
sqlite3 ddb bak.db
integrity_check backup_ioerr-$iTest.$iError.5 ddb
continue
}
# No IO error was encountered during step 3. Check that backup_step()
# returned SQLITE_OK before proceding.
do_test backup_ioerr-$iTest.$iError.6 {
expr {$rc eq "SQLITE_OK"}
} {1}
# Step 4: Write to the source database.
set rc [catchsql { UPDATE t1 SET b = randstr(1000,1000) WHERE a < 50 } sdb]
if {[lindex $rc 0] && $::sqlite_io_error_persist==0} {
# The IO error occurred while updating the source database. In this
# case the backup should be able to continue.
set rc [B step 5000]
if { $rc != "SQLITE_IOERR_UNLOCK" } {
do_test backup_ioerr-$iTest.$iError.7 {
list [B step 5000] [B finish]
} {SQLITE_DONE SQLITE_OK}
clear_ioerr_simulation
test_contents backup_ioerr-$iTest.$iError.8 ddb main sdb main
integrity_check backup_ioerr-$iTest.$iError.9 ddb
} else {
do_test backup_ioerr-$iTest.$iError.10 {
B finish
} {SQLITE_IOERR_UNLOCK}
}
clear_ioerr_simulation
sqlite3 ddb bak.db
integrity_check backup_ioerr-$iTest.$iError.11 ddb
continue
}
# Step 5: Finish the backup operation. If an IO error occurs, check that
# it is reported correctly and skip to the next iteration of the loop.
#
set rc [B step 5000]
if {$rc != "SQLITE_DONE"} {
do_test backup_ioerr-$iTest.$iError.12 {
string match SQLITE_IOERR* $rc
} {1}
do_test backup_ioerr-$iTest.$iError.13 {
list [sqlite3_errcode ddb] [sqlite3_errmsg ddb]
} {SQLITE_OK {not an error}}
set rc [B finish]
do_test backup_ioerr-$iTest.$iError.14 {
string match SQLITE_IOERR* $rc
} {1}
do_test backup_ioerr-$iTest.$iError.15 {
sqlite3_errmsg ddb
} {disk I/O error}
clear_ioerr_simulation
sqlite3 ddb bak.db
integrity_check backup_ioerr-$iTest.$iError.16 ddb
continue
}
# The backup was successfully completed.
#
do_test backup_ioerr-$iTest.$iError.17 {
list [set rc] [B finish]
} {SQLITE_DONE SQLITE_OK}
clear_ioerr_simulation
sqlite3 sdb test.db
sqlite3 ddb bak.db
test_contents backup_ioerr-$iTest.$iError.18 ddb main sdb main
integrity_check backup_ioerr-$iTest.$iError.19 ddb
set bStop [expr $::sqlite_io_error_pending<=0]
}}}}
catch { sdb close }
catch { ddb close }
finish_test

View File

@ -0,0 +1,87 @@
# 2009 January 30
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library. The
# focus of this file is testing the handling of OOM errors by the
# sqlite3_backup_XXX APIs.
#
# $Id: backup_malloc.test,v 1.2 2009/02/04 22:46:47 drh Exp $
set testdir [file dirname $argv0]
source $testdir/tester.tcl
source $testdir/malloc_common.tcl
do_malloc_test backup_malloc-1 -tclprep {
execsql {
PRAGMA cache_size = 10;
BEGIN;
CREATE TABLE t1(a, b);
INSERT INTO t1 VALUES(1, randstr(1000,1000));
INSERT INTO t1 SELECT a+ 1, randstr(1000,1000) FROM t1;
INSERT INTO t1 SELECT a+ 2, randstr(1000,1000) FROM t1;
INSERT INTO t1 SELECT a+ 4, randstr(1000,1000) FROM t1;
INSERT INTO t1 SELECT a+ 8, randstr(1000,1000) FROM t1;
INSERT INTO t1 SELECT a+16, randstr(1000,1000) FROM t1;
INSERT INTO t1 SELECT a+32, randstr(1000,1000) FROM t1;
INSERT INTO t1 SELECT a+64, randstr(1000,1000) FROM t1;
CREATE INDEX i1 ON t1(b);
COMMIT;
}
sqlite3 db2 test2.db
execsql { PRAGMA cache_size = 10 } db2
} -tclbody {
# Create a backup object.
#
set rc [catch {sqlite3_backup B db2 main db main}]
if {$rc && [sqlite3_errcode db2] == "SQLITE_NOMEM"} {
error "out of memory"
}
# Run the backup process some.
#
set rc [B step 50]
if {$rc == "SQLITE_NOMEM" || $rc == "SQLITE_IOERR_NOMEM"} {
error "out of memory"
}
# Update the database.
#
execsql { UPDATE t1 SET a = a + 1 }
# Finish doing the backup.
#
set rc [B step 5000]
if {$rc == "SQLITE_NOMEM" || $rc == "SQLITE_IOERR_NOMEM"} {
error "out of memory"
}
# Finalize the backup.
B finish
} -cleanup {
catch { B finish }
catch { db2 close }
}
do_malloc_test backup_malloc-2 -tclprep {
sqlite3 db2 test2.db
} -tclbody {
set rc [catch {sqlite3_backup B db2 temp db main}]
set errcode [sqlite3_errcode db2]
if {$rc && ($errcode == "SQLITE_NOMEM" || $errcode == "SQLITE_IOERR_NOMEM")} {
error "out of memory"
}
} -cleanup {
catch { B finish }
db2 close
}
finish_test

View File

@ -0,0 +1,143 @@
# 2007 May 15
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library.
#
# This file checks to make sure SQLite is able to gracefully
# handle malformed UTF-8.
#
# $Id: badutf.test,v 1.2 2007/09/12 17:01:45 danielk1977 Exp $
set testdir [file dirname $argv0]
source $testdir/tester.tcl
do_test badutf-1.1 {
db eval {PRAGMA encoding=UTF8}
sqlite3_exec db {SELECT hex('%80') AS x}
} {0 {x 80}}
do_test badutf-1.2 {
sqlite3_exec db {SELECT hex('%81') AS x}
} {0 {x 81}}
do_test badutf-1.3 {
sqlite3_exec db {SELECT hex('%bf') AS x}
} {0 {x BF}}
do_test badutf-1.4 {
sqlite3_exec db {SELECT hex('%c0') AS x}
} {0 {x C0}}
do_test badutf-1.5 {
sqlite3_exec db {SELECT hex('%e0') AS x}
} {0 {x E0}}
do_test badutf-1.6 {
sqlite3_exec db {SELECT hex('%f0') AS x}
} {0 {x F0}}
do_test badutf-1.7 {
sqlite3_exec db {SELECT hex('%ff') AS x}
} {0 {x FF}}
sqlite3 db2 {}
ifcapable utf16 {
do_test badutf-1.10 {
db2 eval {PRAGMA encoding=UTF16be}
sqlite3_exec db2 {SELECT hex('%80') AS x}
} {0 {x 0080}}
do_test badutf-1.11 {
sqlite3_exec db2 {SELECT hex('%81') AS x}
} {0 {x 0081}}
do_test badutf-1.12 {
sqlite3_exec db2 {SELECT hex('%bf') AS x}
} {0 {x 00BF}}
do_test badutf-1.13 {
sqlite3_exec db2 {SELECT hex('%c0') AS x}
} {0 {x FFFD}}
do_test badutf-1.14 {
sqlite3_exec db2 {SELECT hex('%c1') AS x}
} {0 {x FFFD}}
do_test badutf-1.15 {
sqlite3_exec db2 {SELECT hex('%c0%bf') AS x}
} {0 {x FFFD}}
do_test badutf-1.16 {
sqlite3_exec db2 {SELECT hex('%c1%bf') AS x}
} {0 {x FFFD}}
do_test badutf-1.17 {
sqlite3_exec db2 {SELECT hex('%c3%bf') AS x}
} {0 {x 00FF}}
do_test badutf-1.18 {
sqlite3_exec db2 {SELECT hex('%e0') AS x}
} {0 {x FFFD}}
do_test badutf-1.19 {
sqlite3_exec db2 {SELECT hex('%f0') AS x}
} {0 {x FFFD}}
do_test badutf-1.20 {
sqlite3_exec db2 {SELECT hex('%ff') AS x}
} {0 {x FFFD}}
}
ifcapable bloblit {
do_test badutf-2.1 {
sqlite3_exec db {SELECT '%80'=CAST(x'80' AS text) AS x}
} {0 {x 1}}
do_test badutf-2.2 {
sqlite3_exec db {SELECT CAST('%80' AS blob)=x'80' AS x}
} {0 {x 1}}
}
do_test badutf-3.1 {
sqlite3_exec db {SELECT length('%80') AS x}
} {0 {x 1}}
do_test badutf-3.2 {
sqlite3_exec db {SELECT length('%61%62%63') AS x}
} {0 {x 3}}
do_test badutf-3.3 {
sqlite3_exec db {SELECT length('%7f%80%81') AS x}
} {0 {x 3}}
do_test badutf-3.4 {
sqlite3_exec db {SELECT length('%61%c0') AS x}
} {0 {x 2}}
do_test badutf-3.5 {
sqlite3_exec db {SELECT length('%61%c0%80%80%80%80%80%80%80%80%80%80') AS x}
} {0 {x 2}}
do_test badutf-3.6 {
sqlite3_exec db {SELECT length('%c0%80%80%80%80%80%80%80%80%80%80') AS x}
} {0 {x 1}}
do_test badutf-3.7 {
sqlite3_exec db {SELECT length('%80%80%80%80%80%80%80%80%80%80') AS x}
} {0 {x 10}}
do_test badutf-3.8 {
sqlite3_exec db {SELECT length('%80%80%80%80%80%f0%80%80%80%80') AS x}
} {0 {x 6}}
do_test badutf-3.9 {
sqlite3_exec db {SELECT length('%80%80%80%80%80%f0%80%80%80%ff') AS x}
} {0 {x 7}}
do_test badutf-4.1 {
sqlite3_exec db {SELECT hex(trim('%80%80%80%f0%80%80%80%ff','%80%ff')) AS x}
} {0 {x F0}}
do_test badutf-4.2 {
sqlite3_exec db {SELECT hex(ltrim('%80%80%80%f0%80%80%80%ff','%80%ff')) AS x}
} {0 {x F0808080FF}}
do_test badutf-4.3 {
sqlite3_exec db {SELECT hex(rtrim('%80%80%80%f0%80%80%80%ff','%80%ff')) AS x}
} {0 {x 808080F0}}
do_test badutf-4.4 {
sqlite3_exec db {SELECT hex(trim('%80%80%80%f0%80%80%80%ff','%ff%80')) AS x}
} {0 {x 808080F0808080FF}}
do_test badutf-4.5 {
sqlite3_exec db {SELECT hex(trim('%ff%80%80%f0%80%80%80%ff','%ff%80')) AS x}
} {0 {x 80F0808080FF}}
do_test badutf-4.6 {
sqlite3_exec db {SELECT hex(trim('%ff%80%f0%80%80%80%ff','%ff%80')) AS x}
} {0 {x F0808080FF}}
do_test badutf-4.7 {
sqlite3_exec db {SELECT hex(trim('%ff%80%f0%80%80%80%ff','%ff%80%80')) AS x}
} {0 {x FF80F0808080FF}}
db2 close
finish_test

View File

@ -0,0 +1,121 @@
# 2011 March 15
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library.
#
# This file checks to make sure SQLite is able to gracEFully
# handle malformed UTF-8.
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
proc utf8_to_ustr2 {s} {
set r ""
foreach i [split $s ""] {
scan $i %c c
append r [format \\u%04.4X $c]
}
set r
}
proc utf8_to_hstr {in} {
regsub -all -- {(..)} $in {%[format "%s" \1]} out
subst $out
}
proc utf8_to_xstr {in} {
regsub -all -- {(..)} $in {\\\\x[format "%s" \1]} out
subst $out
}
proc utf8_to_ustr {in} {
regsub -all -- {(..)} $in {\\\\u[format "%04.4X" 0x\1]} out
subst $out
}
do_test badutf2-1.0 {
db close
forcedelete test.db
sqlite3 db test.db
db eval "PRAGMA encoding = 'UTF-8'"
} {}
do_test badutf2-4.0 {
set S [sqlite3_prepare_v2 db "SELECT ?" -1 dummy]
sqlite3_expired $S
} {0}
foreach { i len uval xstr ustr u2u } {
1 1 00 \x00 {} {}
2 1 01 \x01 "\\u0001" 01
3 1 3F \x3F "\\u003F" 3F
4 1 7F \x7F "\\u007F" 7F
5 1 80 \x80 "\\u0080" C280
6 1 C3BF \xFF "\\u00FF" C3BF
7 3 EFBFBD \xEF\xBF\xBD "\\uFFFD" {}
} {
set hstr [ utf8_to_hstr $uval ]
ifcapable bloblit {
if {$hstr != "%00"} {
do_test badutf2-2.1.$i {
set sql "SELECT '$hstr'=CAST(x'$uval' AS text) AS x;"
set res [ sqlite3_exec db $sql ]
lindex [ lindex $res 1] 1
} {1}
do_test badutf2-2.2.$i {
set sql "SELECT CAST('$hstr' AS blob)=x'$uval' AS x;"
set res [ sqlite3_exec db $sql ]
lindex [ lindex $res 1] 1
} {1}
}
do_test badutf2-2.3.$i {
set sql "SELECT hex(CAST(x'$uval' AS text)) AS x;"
set res [ sqlite3_exec db $sql ]
lindex [ lindex $res 1] 1
} $uval
do_test badutf2-2.4.$i {
set sql "SELECT hex(CAST(x'$uval' AS text)) AS x;"
set res [ sqlite3_exec db $sql ]
lindex [ lindex $res 1] 1
} $uval
}
if {$hstr != "%00"} {
do_test badutf2-3.1.$i {
set sql "SELECT hex('$hstr') AS x;"
set res [ sqlite3_exec db $sql ]
lindex [ lindex $res 1] 1
} $uval
}
do_test badutf2-4.1.$i {
sqlite3_reset $S
sqlite3_bind_text $S 1 $xstr $len
sqlite3_step $S
utf8_to_ustr2 [ sqlite3_column_text $S 0 ]
} $ustr
ifcapable debug {
do_test badutf2-5.1.$i {
utf8_to_utf8 $uval
} $u2u
}
}
do_test badutf2-4.2 {
sqlite3_finalize $S
} {SQLITE_OK}
finish_test

View File

@ -0,0 +1,74 @@
proc bc_find_binaries {zCaption} {
# Search for binaries to test against. Any executable files that match
# our naming convention are assumed to be testfixture binaries to test
# against.
#
set binaries [list]
set self [file tail [info nameofexec]]
set pattern "$self?*"
if {$::tcl_platform(platform)=="windows"} {
set pattern [string map {\.exe {}} $pattern]
}
foreach file [glob -nocomplain $pattern] {
if {$file==$self} continue
if {[file executable $file] && [file isfile $file]} {lappend binaries $file}
}
if {[llength $binaries]==0} {
puts "WARNING: No historical binaries to test against."
puts "WARNING: Omitting backwards-compatibility tests"
}
foreach bin $binaries {
puts -nonewline "Testing against $bin - "
flush stdout
puts "version [get_version $bin]"
}
set ::BC(binaries) $binaries
return $binaries
}
proc get_version {binary} {
set chan [launch_testfixture $binary]
set v [testfixture $chan { sqlite3 -version }]
close $chan
set v
}
proc do_bc_test {bin script} {
forcedelete test.db
set ::bc_chan [launch_testfixture $bin]
proc code1 {tcl} { uplevel #0 $tcl }
proc code2 {tcl} { testfixture $::bc_chan $tcl }
proc sql1 sql { code1 [list db eval $sql] }
proc sql2 sql { code2 [list db eval $sql] }
code1 { sqlite3 db test.db }
code2 { sqlite3 db test.db }
set bintag [string map {testfixture {}} $bin]
set bintag [string map {\.exe {}} $bintag]
if {$bintag == ""} {set bintag self}
set saved_prefix $::testprefix
append ::testprefix ".$bintag"
uplevel $script
set ::testprefix $saved_prefix
catch { code1 { db close } }
catch { code2 { db close } }
catch { close $::bc_chan }
}
proc do_all_bc_test {script} {
foreach bin $::BC(binaries) {
uplevel [list do_bc_test $bin $script]
}
}

View File

@ -0,0 +1,123 @@
# 2005 July 28
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library. The
# focus of this file is testing the use of indices in WHERE clauses
# when the WHERE clause contains the BETWEEN operator.
#
# $Id: between.test,v 1.2 2006/01/17 09:35:02 danielk1977 Exp $
set testdir [file dirname $argv0]
source $testdir/tester.tcl
# Build some test data
#
do_test between-1.0 {
execsql {
BEGIN;
CREATE TABLE t1(w int, x int, y int, z int);
}
for {set i 1} {$i<=100} {incr i} {
set w $i
set x [expr {int(log($i)/log(2))}]
set y [expr {$i*$i + 2*$i + 1}]
set z [expr {$x+$y}]
ifcapable tclvar {
# Random unplanned test of the $varname variable syntax.
execsql {INSERT INTO t1 VALUES($::w,$::x,$::y,$::z)}
} else {
# If the $varname syntax is not available, use the regular variable
# declaration syntax.
execsql {INSERT INTO t1 VALUES(:w,:x,:y,:z)}
}
}
execsql {
CREATE UNIQUE INDEX i1w ON t1(w);
CREATE INDEX i1xy ON t1(x,y);
CREATE INDEX i1zyx ON t1(z,y,x);
COMMIT;
}
} {}
# This procedure executes the SQL. Then it appends to the result the
# "sort" or "nosort" keyword depending on whether or not any sorting
# is done. Then it appends the names of the table and index used.
#
proc queryplan {sql} {
set ::sqlite_sort_count 0
set data [execsql $sql]
if {$::sqlite_sort_count} {set x sort} {set x nosort}
lappend data $x
set eqp [execsql "EXPLAIN QUERY PLAN $sql"]
# puts eqp=$eqp
foreach {a b c x} $eqp {
if {[regexp { TABLE (\w+ AS )?(\w+) USING.* INDEX (\w+)\y} \
$x all as tab idx]} {
lappend data $tab $idx
} elseif {[regexp { TABLE (\w+ AS )?(\w+)\y} $x all as tab]} {
lappend data $tab *
}
}
return $data
}
do_test between-1.1.1 {
queryplan {
SELECT * FROM t1 WHERE w BETWEEN 5 AND 6 ORDER BY +w
}
} {5 2 36 38 6 2 49 51 sort t1 i1w}
do_test between-1.1.2 {
queryplan {
SELECT * FROM t1 WHERE +w BETWEEN 5 AND 6 ORDER BY +w
}
} {5 2 36 38 6 2 49 51 sort t1 *}
do_test between-1.2.1 {
queryplan {
SELECT * FROM t1 WHERE w BETWEEN 5 AND 65-y ORDER BY +w
}
} {5 2 36 38 6 2 49 51 sort t1 i1w}
do_test between-1.2.2 {
queryplan {
SELECT * FROM t1 WHERE +w BETWEEN 5 AND 65-y ORDER BY +w
}
} {5 2 36 38 6 2 49 51 sort t1 *}
do_test between-1.3.1 {
queryplan {
SELECT * FROM t1 WHERE w BETWEEN 41-y AND 6 ORDER BY +w
}
} {5 2 36 38 6 2 49 51 sort t1 i1w}
do_test between-1.3.2 {
queryplan {
SELECT * FROM t1 WHERE +w BETWEEN 41-y AND 6 ORDER BY +w
}
} {5 2 36 38 6 2 49 51 sort t1 *}
do_test between-1.4 {
queryplan {
SELECT * FROM t1 WHERE w BETWEEN 41-y AND 65-y ORDER BY +w
}
} {5 2 36 38 6 2 49 51 sort t1 *}
do_test between-1.5.1 {
queryplan {
SELECT * FROM t1 WHERE 26 BETWEEN y AND z ORDER BY +w
}
} {4 2 25 27 sort t1 i1zyx}
do_test between-1.5.2 {
queryplan {
SELECT * FROM t1 WHERE 26 BETWEEN +y AND z ORDER BY +w
}
} {4 2 25 27 sort t1 i1zyx}
do_test between-1.5.3 {
queryplan {
SELECT * FROM t1 WHERE 26 BETWEEN y AND +z ORDER BY +w
}
} {4 2 25 27 sort t1 *}
finish_test

View File

@ -0,0 +1,203 @@
# 2002 November 30
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library. The
# focus of this script testing the ability of SQLite to handle database
# files larger than 4GB.
#
# $Id: bigfile.test,v 1.12 2009/03/05 04:27:08 shane Exp $
#
if {[file exists skip-big-file]} return
if {$tcl_platform(os)=="Darwin"} return
set testdir [file dirname $argv0]
source $testdir/tester.tcl
# Do not use a codec for this file, as the database is manipulated using
# external methods (the [fake_big_file] and [hexio_write] commands).
#
do_not_use_codec
# If SQLITE_DISABLE_LFS is defined, omit this file.
ifcapable !lfs {
finish_test
return
}
# These tests only work for Tcl version 8.4 and later. Prior to 8.4,
# Tcl was unable to handle large files.
#
scan $::tcl_version %f vx
if {$vx<8.4} return
# Mac OS X does not handle large files efficiently. So skip this test
# on that platform.
if {$tcl_platform(os)=="Darwin"} return
# This is the md5 checksum of all the data in table t1 as created
# by the first test. We will use this number to make sure that data
# never changes.
#
set MAGIC_SUM {593f1efcfdbe698c28b4b1b693f7e4cf}
do_test bigfile-1.1 {
execsql {
BEGIN;
CREATE TABLE t1(x);
INSERT INTO t1 VALUES('abcdefghijklmnopqrstuvwxyz');
INSERT INTO t1 SELECT rowid || ' ' || x FROM t1;
INSERT INTO t1 SELECT rowid || ' ' || x FROM t1;
INSERT INTO t1 SELECT rowid || ' ' || x FROM t1;
INSERT INTO t1 SELECT rowid || ' ' || x FROM t1;
INSERT INTO t1 SELECT rowid || ' ' || x FROM t1;
INSERT INTO t1 SELECT rowid || ' ' || x FROM t1;
INSERT INTO t1 SELECT rowid || ' ' || x FROM t1;
COMMIT;
}
execsql {
SELECT md5sum(x) FROM t1;
}
} $::MAGIC_SUM
# Try to create a large file - a file that is larger than 2^32 bytes.
# If this fails, it means that the system being tested does not support
# large files. So skip all of the remaining tests in this file.
#
db close
if {[catch {fake_big_file 4096 [get_pwd]/test.db} msg]} {
puts "**** Unable to create a file larger than 4096 MB. *****"
finish_test
return
}
hexio_write test.db 28 00000000
do_test bigfile-1.2 {
sqlite3 db test.db
execsql {
SELECT md5sum(x) FROM t1;
}
} $::MAGIC_SUM
# The previous test may fail on some systems because they are unable
# to handle large files. If that is so, then skip all of the following
# tests. We will know the above test failed because the "db" command
# does not exist.
#
if {[llength [info command db]]<=0} {
puts "**** Large file support appears to be broken. *****"
finish_test
return
}
do_test bigfile-1.3 {
execsql {
CREATE TABLE t2 AS SELECT * FROM t1;
SELECT md5sum(x) FROM t2;
}
} $::MAGIC_SUM
do_test bigfile-1.4 {
db close
sqlite3 db test.db
execsql {
SELECT md5sum(x) FROM t1;
}
} $::MAGIC_SUM
db close
if {[catch {fake_big_file 8192 [get_pwd]/test.db}]} {
puts "**** Unable to create a file larger than 8192 MB. *****"
finish_test
return
}
hexio_write test.db 28 00000000
do_test bigfile-1.5 {
sqlite3 db test.db
execsql {
SELECT md5sum(x) FROM t1;
}
} $::MAGIC_SUM
do_test bigfile-1.6 {
sqlite3 db test.db
execsql {
SELECT md5sum(x) FROM t2;
}
} $::MAGIC_SUM
do_test bigfile-1.7 {
execsql {
CREATE TABLE t3 AS SELECT * FROM t1;
SELECT md5sum(x) FROM t3;
}
} $::MAGIC_SUM
do_test bigfile-1.8 {
db close
sqlite3 db test.db
execsql {
SELECT md5sum(x) FROM t1;
}
} $::MAGIC_SUM
do_test bigfile-1.9 {
execsql {
SELECT md5sum(x) FROM t2;
}
} $::MAGIC_SUM
db close
if {[catch {fake_big_file 16384 [get_pwd]/test.db}]} {
puts "**** Unable to create a file larger than 16384 MB. *****"
finish_test
return
}
hexio_write test.db 28 00000000
do_test bigfile-1.10 {
sqlite3 db test.db
execsql {
SELECT md5sum(x) FROM t1;
}
} $::MAGIC_SUM
do_test bigfile-1.11 {
sqlite3 db test.db
execsql {
SELECT md5sum(x) FROM t2;
}
} $::MAGIC_SUM
do_test bigfile-1.12 {
sqlite3 db test.db
execsql {
SELECT md5sum(x) FROM t3;
}
} $::MAGIC_SUM
do_test bigfile-1.13 {
execsql {
CREATE TABLE t4 AS SELECT * FROM t1;
SELECT md5sum(x) FROM t4;
}
} $::MAGIC_SUM
do_test bigfile-1.14 {
db close
sqlite3 db test.db
execsql {
SELECT md5sum(x) FROM t1;
}
} $::MAGIC_SUM
do_test bigfile-1.15 {
execsql {
SELECT md5sum(x) FROM t2;
}
} $::MAGIC_SUM
do_test bigfile-1.16 {
execsql {
SELECT md5sum(x) FROM t3;
}
} $::MAGIC_SUM
finish_test

View File

@ -0,0 +1,62 @@
# 2011 December 20
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library. The
# focus of this script testing the ability of SQLite to handle database
# files larger than 4GB.
#
if {[file exists skip-big-file]} return
if {$tcl_platform(os)=="Darwin"} return
set testdir [file dirname $argv0]
source $testdir/tester.tcl
set testprefix bigfile2
# Create a small database.
#
do_execsql_test 1.1 {
CREATE TABLE t1(a, b);
INSERT INTO t1 VALUES(1, 2);
}
# Pad the file out to 4GB in size. Then clear the file-size field in the
# db header. This will cause SQLite to assume that the first 4GB of pages
# are actually in use and new pages will be appended to the file.
#
db close
if {[catch {fake_big_file 4096 [get_pwd]/test.db} msg]} {
puts "**** Unable to create a file larger than 4096 MB. *****"
finish_test
return
}
hexio_write test.db 28 00000000
do_test 1.2 {
file size test.db
} [expr 14 + 4096 * (1<<20)]
# Now insert a large row. The overflow pages will be located past the 4GB
# boundary. Then, after opening and closing the database, test that the row
# can be read back in.
#
set str [string repeat k 30000]
do_test 1.3 {
sqlite3 db test.db
execsql { INSERT INTO t1 VALUES(3, $str) }
db close
sqlite3 db test.db
db one { SELECT b FROM t1 WHERE a = 3 }
} $str
db close
delete_file test.db
finish_test

View File

@ -0,0 +1,223 @@
# 2001 September 23
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library. The
# focus of this file is stressing the library by putting large amounts
# of data in a single row of a table.
#
# $Id: bigrow.test,v 1.5 2004/08/07 23:54:48 drh Exp $
set testdir [file dirname $argv0]
source $testdir/tester.tcl
# Make a big string that we can use for test data
#
do_test bigrow-1.0 {
set ::bigstr {}
for {set i 1} {$i<=9999} {incr i} {
set sep [string index "abcdefghijklmnopqrstuvwxyz" [expr {$i%26}]]
append ::bigstr "$sep [format %04d $i] "
}
string length $::bigstr
} {69993}
# Make a table into which we can insert some but records.
#
do_test bigrow-1.1 {
execsql {
CREATE TABLE t1(a text, b text, c text);
SELECT name FROM sqlite_master
WHERE type='table' OR type='index'
ORDER BY name
}
} {t1}
do_test bigrow-1.2 {
set ::big1 [string range $::bigstr 0 65519]
set sql "INSERT INTO t1 VALUES('abc',"
append sql "'$::big1', 'xyz');"
execsql $sql
execsql {SELECT a, c FROM t1}
} {abc xyz}
do_test bigrow-1.3 {
execsql {SELECT b FROM t1}
} [list $::big1]
do_test bigrow-1.4 {
set ::big2 [string range $::bigstr 0 65520]
set sql "INSERT INTO t1 VALUES('abc2',"
append sql "'$::big2', 'xyz2');"
set r [catch {execsql $sql} msg]
lappend r $msg
} {0 {}}
do_test bigrow-1.4.1 {
execsql {SELECT b FROM t1 ORDER BY c}
} [list $::big1 $::big2]
do_test bigrow-1.4.2 {
execsql {SELECT c FROM t1 ORDER BY c}
} {xyz xyz2}
do_test bigrow-1.4.3 {
execsql {DELETE FROM t1 WHERE a='abc2'}
execsql {SELECT c FROM t1}
} {xyz}
do_test bigrow-1.5 {
execsql {
UPDATE t1 SET a=b, b=a;
SELECT b,c FROM t1
}
} {abc xyz}
do_test bigrow-1.6 {
execsql {
SELECT * FROM t1
}
} [list $::big1 abc xyz]
do_test bigrow-1.7 {
execsql {
INSERT INTO t1 VALUES('1','2','3');
INSERT INTO t1 VALUES('A','B','C');
SELECT b FROM t1 WHERE a=='1';
}
} {2}
do_test bigrow-1.8 {
execsql "SELECT b FROM t1 WHERE a=='$::big1'"
} {abc}
do_test bigrow-1.9 {
execsql "SELECT b FROM t1 WHERE a!='$::big1' ORDER BY a"
} {2 B}
# Try doing some indexing on big columns
#
do_test bigrow-2.1 {
execsql {
CREATE INDEX i1 ON t1(a)
}
execsql "SELECT b FROM t1 WHERE a=='$::big1'"
} {abc}
do_test bigrow-2.2 {
execsql {
UPDATE t1 SET a=b, b=a
}
execsql "SELECT b FROM t1 WHERE a=='abc'"
} [list $::big1]
do_test bigrow-2.3 {
execsql {
UPDATE t1 SET a=b, b=a
}
execsql "SELECT b FROM t1 WHERE a=='$::big1'"
} {abc}
catch {unset ::bigstr}
catch {unset ::big1}
catch {unset ::big2}
# Mosts of the tests above were created back when rows were limited in
# size to 64K. Now rows can be much bigger. Test that logic. Also
# make sure things work correctly at the transition boundries between
# row sizes of 256 to 257 bytes and from 65536 to 65537 bytes.
#
# We begin by testing the 256..257 transition.
#
do_test bigrow-3.1 {
execsql {
DELETE FROM t1;
INSERT INTO t1(a,b,c) VALUES('one','abcdefghijklmnopqrstuvwxyz0123','hi');
}
execsql {SELECT a,length(b),c FROM t1}
} {one 30 hi}
do_test bigrow-3.2 {
execsql {
UPDATE t1 SET b=b||b;
UPDATE t1 SET b=b||b;
UPDATE t1 SET b=b||b;
}
execsql {SELECT a,length(b),c FROM t1}
} {one 240 hi}
for {set i 1} {$i<10} {incr i} {
do_test bigrow-3.3.$i {
execsql "UPDATE t1 SET b=b||'$i'"
execsql {SELECT a,length(b),c FROM t1}
} "one [expr {240+$i}] hi"
}
# Now test the 65536..65537 row-size transition.
#
do_test bigrow-4.1 {
execsql {
DELETE FROM t1;
INSERT INTO t1(a,b,c) VALUES('one','abcdefghijklmnopqrstuvwxyz0123','hi');
}
execsql {SELECT a,length(b),c FROM t1}
} {one 30 hi}
do_test bigrow-4.2 {
execsql {
UPDATE t1 SET b=b||b;
UPDATE t1 SET b=b||b;
UPDATE t1 SET b=b||b;
UPDATE t1 SET b=b||b;
UPDATE t1 SET b=b||b;
UPDATE t1 SET b=b||b;
UPDATE t1 SET b=b||b;
UPDATE t1 SET b=b||b;
UPDATE t1 SET b=b||b;
UPDATE t1 SET b=b||b;
UPDATE t1 SET b=b||b;
UPDATE t1 SET b=b||b;
}
execsql {SELECT a,length(b),c FROM t1}
} {one 122880 hi}
do_test bigrow-4.3 {
execsql {
UPDATE t1 SET b=substr(b,1,65515)
}
execsql {SELECT a,length(b),c FROM t1}
} {one 65515 hi}
for {set i 1} {$i<10} {incr i} {
do_test bigrow-4.4.$i {
execsql "UPDATE t1 SET b=b||'$i'"
execsql {SELECT a,length(b),c FROM t1}
} "one [expr {65515+$i}] hi"
}
# Check to make sure the library recovers safely if a row contains
# too much data.
#
do_test bigrow-5.1 {
execsql {
DELETE FROM t1;
INSERT INTO t1(a,b,c) VALUES('one','abcdefghijklmnopqrstuvwxyz0123','hi');
}
execsql {SELECT a,length(b),c FROM t1}
} {one 30 hi}
set i 1
for {set sz 60} {$sz<1048560} {incr sz $sz} {
do_test bigrow-5.2.$i {
execsql {
UPDATE t1 SET b=b||b;
SELECT a,length(b),c FROM t1;
}
} "one $sz hi"
incr i
}
do_test bigrow-5.3 {
catchsql {UPDATE t1 SET b=b||b}
} {0 {}}
do_test bigrow-5.4 {
execsql {SELECT length(b) FROM t1}
} 1966080
do_test bigrow-5.5 {
catchsql {UPDATE t1 SET b=b||b}
} {0 {}}
do_test bigrow-5.6 {
execsql {SELECT length(b) FROM t1}
} 3932160
do_test bigrow-5.99 {
execsql {DROP TABLE t1}
} {}
finish_test

View File

@ -0,0 +1,50 @@
# 2014 November 26
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
set testprefix bigsort
#--------------------------------------------------------------------
# At one point there was an overflow problem if the product of the
# cache-size and page-size was larger than 2^31. Causing an infinite
# loop if the product was also an integer multiple of 2^32, or
# inefficiency otherwise.
#
# This test causes thrashing on machines with smaller amounts of
# memory. Make sure the host has at least 8GB available before running
# this test.
#
if {[catch {exec free | grep Mem:} out] || [lindex $out 1]<8000000} {
finish_test
return
}
do_execsql_test 1.0 {
PRAGMA page_size = 1024;
CREATE TABLE t1(a, b);
BEGIN;
WITH data(x,y) AS (
SELECT 1, zeroblob(10000)
UNION ALL
SELECT x+1, y FROM data WHERE x < 300000
)
INSERT INTO t1 SELECT * FROM data;
COMMIT;
}
do_execsql_test 1.1 {
PRAGMA cache_size = 4194304;
CREATE INDEX i1 ON t1(a, b);
}
finish_test

View File

@ -0,0 +1,758 @@
# 2003 September 6
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library. The
# focus of this script testing the sqlite_bind API.
#
# $Id: bind.test,v 1.48 2009/07/22 07:27:57 danielk1977 Exp $
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
proc sqlite_step {stmt N VALS COLS} {
upvar VALS vals
upvar COLS cols
set vals [list]
set cols [list]
set rc [sqlite3_step $stmt]
for {set i 0} {$i < [sqlite3_column_count $stmt]} {incr i} {
lappend cols [sqlite3_column_name $stmt $i]
}
for {set i 0} {$i < [sqlite3_data_count $stmt]} {incr i} {
lappend vals [sqlite3_column_text $stmt $i]
}
return $rc
}
do_test bind-1.1 {
set DB [sqlite3_connection_pointer db]
execsql {CREATE TABLE t1(a,b,c);}
set VM [sqlite3_prepare $DB {INSERT INTO t1 VALUES(:1,?,:abc)} -1 TAIL]
set TAIL
} {}
do_test bind-1.1.1 {
sqlite3_bind_parameter_count $VM
} 3
do_test bind-1.1.2 {
sqlite3_bind_parameter_name $VM 1
} {:1}
do_test bind-1.1.3 {
sqlite3_bind_parameter_name $VM 2
} {}
do_test bind-1.1.4 {
sqlite3_bind_parameter_name $VM 3
} {:abc}
do_test bind-1.2 {
sqlite_step $VM N VALUES COLNAMES
} {SQLITE_DONE}
do_test bind-1.3 {
execsql {SELECT rowid, * FROM t1}
} {1 {} {} {}}
do_test bind-1.4 {
sqlite3_reset $VM
sqlite_bind $VM 1 {test value 1} normal
sqlite_step $VM N VALUES COLNAMES
} SQLITE_DONE
do_test bind-1.5 {
execsql {SELECT rowid, * FROM t1}
} {1 {} {} {} 2 {test value 1} {} {}}
do_test bind-1.6 {
sqlite3_reset $VM
sqlite_bind $VM 3 {'test value 2'} normal
sqlite_step $VM N VALUES COLNAMES
} SQLITE_DONE
do_test bind-1.7 {
execsql {SELECT rowid, * FROM t1}
} {1 {} {} {} 2 {test value 1} {} {} 3 {test value 1} {} {'test value 2'}}
do_test bind-1.8 {
sqlite3_reset $VM
set sqlite_static_bind_value 123
sqlite_bind $VM 1 {} static
sqlite_bind $VM 2 {abcdefg} normal
sqlite_bind $VM 3 {} null
execsql {DELETE FROM t1}
sqlite_step $VM N VALUES COLNAMES
execsql {SELECT rowid, * FROM t1}
} {1 123 abcdefg {}}
do_test bind-1.9 {
sqlite3_reset $VM
sqlite_bind $VM 1 {456} normal
sqlite_step $VM N VALUES COLNAMES
execsql {SELECT rowid, * FROM t1}
} {1 123 abcdefg {} 2 456 abcdefg {}}
do_test bind-1.10 {
set rc [catch {
sqlite3_prepare db {INSERT INTO t1 VALUES($abc:123,?,:abc)} -1 TAIL
} msg]
lappend rc $msg
} {1 {(1) near ":123": syntax error}}
do_test bind-1.11 {
set rc [catch {
sqlite3_prepare db {INSERT INTO t1 VALUES(@abc:xyz,?,:abc)} -1 TAIL
} msg]
lappend rc $msg
} {1 {(1) near ":xyz": syntax error}}
do_test bind-1.99 {
sqlite3_finalize $VM
} SQLITE_OK
# Prepare the statement in different ways depending on whether or not
# the $var processing is compiled into the library.
#
ifcapable {tclvar} {
do_test bind-2.1 {
execsql {
DELETE FROM t1;
}
set VM [sqlite3_prepare $DB {INSERT INTO t1 VALUES($one,$::two,$x(-z-))}\
-1 TX]
set TX
} {}
set v1 {$one}
set v2 {$::two}
set v3 {$x(-z-)}
}
ifcapable {!tclvar} {
do_test bind-2.1 {
execsql {
DELETE FROM t1;
}
set VM [sqlite3_prepare $DB {INSERT INTO t1 VALUES(:one,:two,:_)} -1 TX]
set TX
} {}
set v1 {:one}
set v2 {:two}
set v3 {:_}
}
do_test bind-2.1.1 {
sqlite3_bind_parameter_count $VM
} 3
do_test bind-2.1.2 {
sqlite3_bind_parameter_name $VM 1
} $v1
do_test bind-2.1.3 {
sqlite3_bind_parameter_name $VM 2
} $v2
do_test bind-2.1.4 {
sqlite3_bind_parameter_name $VM 3
} $v3
do_test bind-2.1.5 {
sqlite3_bind_parameter_index $VM $v1
} 1
do_test bind-2.1.6 {
sqlite3_bind_parameter_index $VM $v2
} 2
do_test bind-2.1.7 {
sqlite3_bind_parameter_index $VM $v3
} 3
do_test bind-2.1.8 {
sqlite3_bind_parameter_index $VM {:hi}
} 0
# 32 bit Integers
do_test bind-2.2 {
sqlite3_bind_int $VM 1 123
sqlite3_bind_int $VM 2 456
sqlite3_bind_int $VM 3 789
sqlite_step $VM N VALUES COLNAMES
sqlite3_reset $VM
execsql {SELECT rowid, * FROM t1}
} {1 123 456 789}
do_test bind-2.3 {
sqlite3_bind_int $VM 2 -2000000000
sqlite3_bind_int $VM 3 2000000000
sqlite_step $VM N VALUES COLNAMES
sqlite3_reset $VM
execsql {SELECT rowid, * FROM t1}
} {1 123 456 789 2 123 -2000000000 2000000000}
do_test bind-2.4 {
execsql {SELECT typeof(a), typeof(b), typeof(c) FROM t1}
} {integer integer integer integer integer integer}
do_test bind-2.5 {
execsql {
DELETE FROM t1;
}
} {}
# 64 bit Integers
do_test bind-3.1 {
sqlite3_bind_int64 $VM 1 32
sqlite3_bind_int64 $VM 2 -2000000000000
sqlite3_bind_int64 $VM 3 2000000000000
sqlite_step $VM N VALUES COLNAMES
sqlite3_reset $VM
execsql {SELECT rowid, * FROM t1}
} {1 32 -2000000000000 2000000000000}
do_test bind-3.2 {
execsql {SELECT typeof(a), typeof(b), typeof(c) FROM t1}
} {integer integer integer}
do_test bind-3.3 {
execsql {
DELETE FROM t1;
}
} {}
# Doubles
do_test bind-4.1 {
sqlite3_bind_double $VM 1 1234.1234
sqlite3_bind_double $VM 2 0.00001
sqlite3_bind_double $VM 3 123456789
sqlite_step $VM N VALUES COLNAMES
sqlite3_reset $VM
set x [execsql {SELECT rowid, * FROM t1}]
regsub {1e-005} $x {1e-05} y
set y
} {1 1234.1234 1e-05 123456789.0}
do_test bind-4.2 {
execsql {SELECT typeof(a), typeof(b), typeof(c) FROM t1}
} {real real real}
do_test bind-4.3 {
execsql {
DELETE FROM t1;
}
} {}
do_test bind-4.4 {
sqlite3_bind_double $VM 1 NaN
sqlite3_bind_double $VM 2 1e300
sqlite3_bind_double $VM 3 -1e-300
sqlite_step $VM N VALUES COLNAMES
sqlite3_reset $VM
set x [execsql {SELECT rowid, * FROM t1}]
regsub {1e-005} $x {1e-05} y
set y
} {1 {} 1e+300 -1e-300}
do_test bind-4.5 {
execsql {SELECT typeof(a), typeof(b), typeof(c) FROM t1}
} {null real real}
do_test bind-4.6 {
execsql {
DELETE FROM t1;
}
} {}
# NULL
do_test bind-5.1 {
sqlite3_bind_null $VM 1
sqlite3_bind_null $VM 2
sqlite3_bind_null $VM 3
sqlite_step $VM N VALUES COLNAMES
sqlite3_reset $VM
execsql {SELECT rowid, * FROM t1}
} {1 {} {} {}}
do_test bind-5.2 {
execsql {SELECT typeof(a), typeof(b), typeof(c) FROM t1}
} {null null null}
do_test bind-5.3 {
execsql {
DELETE FROM t1;
}
} {}
# UTF-8 text
do_test bind-6.1 {
sqlite3_bind_text $VM 1 hellothere 5
sqlite3_bind_text $VM 2 ".." 1
sqlite3_bind_text $VM 3 world\000 -1
sqlite_step $VM N VALUES COLNAMES
sqlite3_reset $VM
execsql {SELECT rowid, * FROM t1}
} {1 hello . world}
do_test bind-6.2 {
execsql {SELECT typeof(a), typeof(b), typeof(c) FROM t1}
} {text text text}
do_test bind-6.3 {
execsql {
DELETE FROM t1;
}
} {}
# Make sure zeros in a string work.
#
do_test bind-6.4 {
db eval {DELETE FROM t1}
sqlite3_bind_text $VM 1 hello\000there\000 12
sqlite3_bind_text $VM 2 hello\000there\000 11
sqlite3_bind_text $VM 3 hello\000there\000 -1
sqlite_step $VM N VALUES COLNAMES
sqlite3_reset $VM
execsql {SELECT * FROM t1}
} {hello hello hello}
set enc [db eval {PRAGMA encoding}]
if {$enc=="UTF-8" || $enc==""} {
do_test bind-6.5 {
execsql {SELECT hex(a), hex(b), hex(c) FROM t1}
} {68656C6C6F00746865726500 68656C6C6F007468657265 68656C6C6F}
} elseif {$enc=="UTF-16le"} {
do_test bind-6.5 {
execsql {SELECT hex(a), hex(b), hex(c) FROM t1}
} {680065006C006C006F000000740068006500720065000000 680065006C006C006F00000074006800650072006500 680065006C006C006F00}
} elseif {$enc=="UTF-16be"} {
do_test bind-6.5 {
execsql {SELECT hex(a), hex(b), hex(c) FROM t1}
} {00680065006C006C006F0000007400680065007200650000 00680065006C006C006F000000740068006500720065 00680065006C006C006F}
} else {
do_test bind-6.5 {
set "Unknown database encoding: $::enc"
} {}
}
do_test bind-6.6 {
execsql {SELECT typeof(a), typeof(b), typeof(c) FROM t1}
} {text text text}
do_test bind-6.7 {
execsql {
DELETE FROM t1;
}
} {}
# UTF-16 text
ifcapable {utf16} {
do_test bind-7.1 {
sqlite3_bind_text16 $VM 1 [encoding convertto unicode hellothere] 10
sqlite3_bind_text16 $VM 2 [encoding convertto unicode ""] 0
sqlite3_bind_text16 $VM 3 [encoding convertto unicode world] 10
sqlite_step $VM N VALUES COLNAMES
sqlite3_reset $VM
execsql {SELECT rowid, * FROM t1}
} {1 hello {} world}
do_test bind-7.2 {
execsql {SELECT typeof(a), typeof(b), typeof(c) FROM t1}
} {text text text}
do_test bind-7.3 {
db eval {DELETE FROM t1}
sqlite3_bind_text16 $VM 1 [encoding convertto unicode hi\000yall\000] 16
sqlite3_bind_text16 $VM 2 [encoding convertto unicode hi\000yall\000] 14
sqlite3_bind_text16 $VM 3 [encoding convertto unicode hi\000yall\000] -1
sqlite_step $VM N VALUES COLNAMES
sqlite3_reset $VM
execsql {SELECT * FROM t1}
} {hi hi hi}
if {$enc=="UTF-8"} {
do_test bind-7.4 {
execsql {SELECT hex(a), hex(b), hex(c) FROM t1}
} {68690079616C6C00 68690079616C6C 6869}
} elseif {$enc=="UTF-16le"} {
do_test bind-7.4 {
execsql {SELECT hex(a), hex(b), hex(c) FROM t1}
} {680069000000790061006C006C000000 680069000000790061006C006C00 68006900}
} elseif {$enc=="UTF-16be"} {
do_test bind-7.4 {
execsql {SELECT hex(a), hex(b), hex(c) FROM t1}
} {00680069000000790061006C006C0000 00680069000000790061006C006C 00680069}
}
do_test bind-7.5 {
execsql {SELECT typeof(a), typeof(b), typeof(c) FROM t1}
} {text text text}
}
do_test bind-7.99 {
execsql {DELETE FROM t1;}
} {}
# Test that the 'out of range' error works.
do_test bind-8.1 {
catch { sqlite3_bind_null $VM 0 }
} {1}
do_test bind-8.2 {
sqlite3_errmsg $DB
} {bind or column index out of range}
ifcapable {utf16} {
do_test bind-8.3 {
encoding convertfrom unicode [sqlite3_errmsg16 $DB]
} {bind or column index out of range}
}
do_test bind-8.4 {
sqlite3_bind_null $VM 1
sqlite3_errmsg $DB
} {not an error}
do_test bind-8.5 {
catch { sqlite3_bind_null $VM 4 }
} {1}
do_test bind-8.6 {
sqlite3_errmsg $DB
} {bind or column index out of range}
ifcapable {utf16} {
do_test bind-8.7 {
encoding convertfrom unicode [sqlite3_errmsg16 $DB]
} {bind or column index out of range}
}
do_test bind-8.8 {
catch { sqlite3_bind_blob $VM 0 "abc" 3 }
} {1}
do_test bind-8.9 {
catch { sqlite3_bind_blob $VM 4 "abc" 3 }
} {1}
do_test bind-8.10 {
catch { sqlite3_bind_text $VM 0 "abc" 3 }
} {1}
ifcapable {utf16} {
do_test bind-8.11 {
catch { sqlite3_bind_text16 $VM 4 "abc" 2 }
} {1}
}
do_test bind-8.12 {
catch { sqlite3_bind_int $VM 0 5 }
} {1}
do_test bind-8.13 {
catch { sqlite3_bind_int $VM 4 5 }
} {1}
do_test bind-8.14 {
catch { sqlite3_bind_double $VM 0 5.0 }
} {1}
do_test bind-8.15 {
catch { sqlite3_bind_double $VM 4 6.0 }
} {1}
do_test bind-8.99 {
sqlite3_finalize $VM
} SQLITE_OK
set iMaxVar $SQLITE_MAX_VARIABLE_NUMBER
set zError "(1) variable number must be between ?1 and ?$iMaxVar"
do_test bind-9.1 {
execsql {
CREATE TABLE t2(a,b,c,d,e,f);
}
set rc [catch {
sqlite3_prepare $DB {
INSERT INTO t2(a) VALUES(?0)
} -1 TAIL
} msg]
lappend rc $msg
} [list 1 $zError]
do_test bind-9.2 {
set rc [catch {
sqlite3_prepare $DB "INSERT INTO t2(a) VALUES(?[expr $iMaxVar+1])" -1 TAIL
} msg]
lappend rc $msg
} [list 1 $zError]
do_test bind-9.3.1 {
set VM [
sqlite3_prepare $DB "
INSERT INTO t2(a,b) VALUES(?1,?$iMaxVar)
" -1 TAIL
]
sqlite3_bind_parameter_count $VM
} $iMaxVar
catch {sqlite3_finalize $VM}
do_test bind-9.3.2 {
set VM [
sqlite3_prepare $DB "
INSERT INTO t2(a,b) VALUES(?2,?[expr $iMaxVar - 1])
" -1 TAIL
]
sqlite3_bind_parameter_count $VM
} [expr {$iMaxVar - 1}]
catch {sqlite3_finalize $VM}
do_test bind-9.4 {
set VM [
sqlite3_prepare $DB "
INSERT INTO t2(a,b,c,d) VALUES(?1,?[expr $iMaxVar - 2],?,?)
" -1 TAIL
]
sqlite3_bind_parameter_count $VM
} $iMaxVar
do_test bind-9.5 {
sqlite3_bind_int $VM 1 1
sqlite3_bind_int $VM [expr $iMaxVar - 2] 999
sqlite3_bind_int $VM [expr $iMaxVar - 1] 1000
sqlite3_bind_int $VM $iMaxVar 1001
sqlite3_step $VM
} SQLITE_DONE
do_test bind-9.6 {
sqlite3_finalize $VM
} SQLITE_OK
do_test bind-9.7 {
execsql {SELECT * FROM t2}
} {1 999 1000 1001 {} {}}
ifcapable {tclvar} {
do_test bind-10.1 {
set VM [
sqlite3_prepare $DB {
INSERT INTO t2(a,b,c,d,e,f) VALUES(:abc,$abc,:abc,$ab,$abc,:abc)
} -1 TAIL
]
sqlite3_bind_parameter_count $VM
} 3
set v1 {$abc}
set v2 {$ab}
}
ifcapable {!tclvar} {
do_test bind-10.1 {
set VM [
sqlite3_prepare $DB {
INSERT INTO t2(a,b,c,d,e,f) VALUES(:abc,:xyz,:abc,:xy,:xyz,:abc)
} -1 TAIL
]
sqlite3_bind_parameter_count $VM
} 3
set v1 {:xyz}
set v2 {:xy}
}
do_test bind-10.2 {
sqlite3_bind_parameter_index $VM :abc
} 1
do_test bind-10.3 {
sqlite3_bind_parameter_index $VM $v1
} 2
do_test bind-10.4 {
sqlite3_bind_parameter_index $VM $v2
} 3
do_test bind-10.5 {
sqlite3_bind_parameter_name $VM 1
} :abc
do_test bind-10.6 {
sqlite3_bind_parameter_name $VM 2
} $v1
do_test bind-10.7 {
sqlite3_bind_parameter_name $VM 3
} $v2
do_test bind-10.7.1 {
sqlite3_bind_parameter_name 0 1 ;# Ignore if VM is NULL
} {}
do_test bind-10.7.2 {
sqlite3_bind_parameter_name $VM 0 ;# Ignore if index too small
} {}
do_test bind-10.7.3 {
sqlite3_bind_parameter_name $VM 4 ;# Ignore if index is too big
} {}
do_test bind-10.8 {
sqlite3_bind_int $VM 1 1
sqlite3_bind_int $VM 2 2
sqlite3_bind_int $VM 3 3
sqlite3_step $VM
} SQLITE_DONE
do_test bind-10.8.1 {
# Binding attempts after program start should fail
set rc [catch {
sqlite3_bind_int $VM 1 1
} msg]
lappend rc $msg
} {1 {}}
do_test bind-10.9 {
sqlite3_finalize $VM
} SQLITE_OK
do_test bind-10.10 {
execsql {SELECT * FROM t2}
} {1 999 1000 1001 {} {} 1 2 1 3 2 1}
# Ticket #918
#
do_test bind-10.11 {
# catch {sqlite3_finalize $VM}
set VM [
sqlite3_prepare $DB {
INSERT INTO t2(a,b,c,d,e,f) VALUES(:abc,?,?4,:pqr,:abc,?4)
} -1 TAIL
]
sqlite3_bind_parameter_count $VM
} 5
do_test bind-10.11.1 {
sqlite3_bind_parameter_index 0 :xyz ;# ignore NULL VM arguments
} 0
do_test bind-10.12 {
sqlite3_bind_parameter_index $VM :xyz
} 0
do_test bind-10.13 {
sqlite3_bind_parameter_index $VM {}
} 0
do_test bind-10.14 {
sqlite3_bind_parameter_index $VM :pqr
} 5
do_test bind-10.15 {
sqlite3_bind_parameter_index $VM ?4
} 4
do_test bind-10.16 {
sqlite3_bind_parameter_name $VM 1
} :abc
do_test bind-10.17 {
sqlite3_bind_parameter_name $VM 2
} {}
do_test bind-10.18 {
sqlite3_bind_parameter_name $VM 3
} {}
do_test bind-10.19 {
sqlite3_bind_parameter_name $VM 4
} {?4}
do_test bind-10.20 {
sqlite3_bind_parameter_name $VM 5
} :pqr
catch {sqlite3_finalize $VM}
# Make sure we catch an unterminated "(" in a Tcl-style variable name
#
ifcapable tclvar {
do_test bind-11.1 {
catchsql {SELECT * FROM sqlite_master WHERE name=$abc(123 and sql NOT NULL;}
} {1 {unrecognized token: "$abc(123"}}
}
if {[execsql {pragma encoding}]=="UTF-8"} {
# Test the ability to bind text that contains embedded '\000' characters.
# Make sure we can recover the entire input string.
#
do_test bind-12.1 {
execsql {
CREATE TABLE t3(x BLOB);
}
set VM [sqlite3_prepare $DB {INSERT INTO t3 VALUES(?)} -1 TAIL]
sqlite_bind $VM 1 not-used blob10
sqlite3_step $VM
sqlite3_finalize $VM
execsql {
SELECT typeof(x), length(x), quote(x),
length(cast(x AS BLOB)), quote(cast(x AS BLOB)) FROM t3
}
} {text 3 'abc' 10 X'6162630078797A007071'}
do_test bind-12.2 {
sqlite3_create_function $DB
execsql {
SELECT quote(cast(x_coalesce(x) AS blob)) FROM t3
}
} {X'6162630078797A007071'}
}
# Test the operation of sqlite3_clear_bindings
#
do_test bind-13.1 {
set VM [sqlite3_prepare $DB {SELECT ?,?,?} -1 TAIL]
sqlite3_step $VM
list [sqlite3_column_type $VM 0] [sqlite3_column_type $VM 1] \
[sqlite3_column_type $VM 2]
} {NULL NULL NULL}
do_test bind-13.2 {
sqlite3_reset $VM
sqlite3_bind_int $VM 1 1
sqlite3_bind_int $VM 2 2
sqlite3_bind_int $VM 3 3
sqlite3_step $VM
list [sqlite3_column_type $VM 0] [sqlite3_column_type $VM 1] \
[sqlite3_column_type $VM 2]
} {INTEGER INTEGER INTEGER}
do_test bind-13.3 {
sqlite3_reset $VM
sqlite3_step $VM
list [sqlite3_column_type $VM 0] [sqlite3_column_type $VM 1] \
[sqlite3_column_type $VM 2]
} {INTEGER INTEGER INTEGER}
do_test bind-13.4 {
sqlite3_reset $VM
sqlite3_clear_bindings $VM
sqlite3_step $VM
list [sqlite3_column_type $VM 0] [sqlite3_column_type $VM 1] \
[sqlite3_column_type $VM 2]
} {NULL NULL NULL}
sqlite3_finalize $VM
#--------------------------------------------------------------------
# These tests attempt to reproduce bug #3463.
#
proc param_names {db zSql} {
set ret [list]
set VM [sqlite3_prepare db $zSql -1 TAIL]
for {set ii 1} {$ii <= [sqlite3_bind_parameter_count $VM]} {incr ii} {
lappend ret [sqlite3_bind_parameter_name $VM $ii]
}
sqlite3_finalize $VM
set ret
}
do_test bind-14.1 {
param_names db { SELECT @a, @b }
} {@a @b}
do_test bind-14.2 {
param_names db { SELECT NULL FROM (SELECT NULL) WHERE @a = @b }
} {@a @b}
do_test bind-14.3 {
param_names db { SELECT @a FROM (SELECT NULL) WHERE 1 = @b }
} {@a @b}
do_test bind-14.4 {
param_names db { SELECT @a, @b FROM (SELECT NULL) }
} {@a @b}
#--------------------------------------------------------------------------
# Tests of the OP_Variable opcode where P3>1
#
do_test bind-15.1 {
db eval {CREATE TABLE t4(a,b,c,d,e,f,g,h);}
set VM [sqlite3_prepare db {
INSERT INTO t4(a,b,c,d,f,g,h,e) VALUES(?,?,?,?,?,?,?,?)
} -1 TAIL]
sqlite3_bind_int $VM 1 1
sqlite3_bind_int $VM 2 2
sqlite3_bind_int $VM 3 3
sqlite3_bind_int $VM 4 4
sqlite3_bind_int $VM 5 5
sqlite3_bind_int $VM 6 6
sqlite3_bind_int $VM 7 7
sqlite3_bind_int $VM 8 8
sqlite3_step $VM
sqlite3_finalize $VM
db eval {SELECT * FROM t4}
} {1 2 3 4 8 5 6 7}
do_test bind-15.2 {
db eval {DELETE FROM t4}
set VM [sqlite3_prepare db {
INSERT INTO t4(a,b,c,d,e,f,g,h) VALUES(?,?,?,?,?,?,?,?)
} -1 TAIL]
sqlite3_bind_int $VM 1 1
sqlite3_bind_int $VM 2 2
sqlite3_bind_int $VM 3 3
sqlite3_bind_int $VM 4 4
sqlite3_bind_int $VM 5 5
sqlite3_bind_int $VM 6 6
sqlite3_bind_int $VM 7 7
sqlite3_bind_int $VM 8 8
sqlite3_step $VM
sqlite3_finalize $VM
db eval {SELECT * FROM t4}
} {1 2 3 4 5 6 7 8}
do_test bind-15.3 {
db eval {DELETE FROM t4}
set VM [sqlite3_prepare db {
INSERT INTO t4(h,g,f,e,d,c,b,a) VALUES(?,?,?,?,?,?,?,?)
} -1 TAIL]
sqlite3_bind_int $VM 1 1
sqlite3_bind_int $VM 2 2
sqlite3_bind_int $VM 3 3
sqlite3_bind_int $VM 4 4
sqlite3_bind_int $VM 5 5
sqlite3_bind_int $VM 6 6
sqlite3_bind_int $VM 7 7
sqlite3_bind_int $VM 8 8
sqlite3_step $VM
sqlite3_finalize $VM
db eval {SELECT * FROM t4}
} {8 7 6 5 4 3 2 1}
do_test bind-15.4 {
db eval {DELETE FROM t4}
set VM [sqlite3_prepare db {
INSERT INTO t4(a,b,c,d,e,f,g,h) VALUES(?,?,?,?4,?,?6,?,?)
} -1 TAIL]
sqlite3_bind_int $VM 1 1
sqlite3_bind_int $VM 2 2
sqlite3_bind_int $VM 3 3
sqlite3_bind_int $VM 4 4
sqlite3_bind_int $VM 5 5
sqlite3_bind_int $VM 6 6
sqlite3_bind_int $VM 7 7
sqlite3_bind_int $VM 8 8
sqlite3_step $VM
sqlite3_finalize $VM
db eval {SELECT * FROM t4}
} {1 2 3 4 5 6 7 8}
finish_test

View File

@ -0,0 +1,76 @@
# 2005 April 21
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library. The
# focus of this script testing the sqlite_transfer_bindings() API.
#
# $Id: bindxfer.test,v 1.9 2009/04/17 11:56:28 drh Exp $
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
proc sqlite_step {stmt VALS COLS} {
upvar #0 $VALS vals
upvar #0 $COLS cols
set vals [list]
set cols [list]
set rc [sqlite3_step $stmt]
for {set i 0} {$i < [sqlite3_column_count $stmt]} {incr i} {
lappend cols [sqlite3_column_name $stmt $i]
}
for {set i 0} {$i < [sqlite3_data_count $stmt]} {incr i} {
lappend vals [sqlite3_column_text $stmt $i]
}
return $rc
}
do_test bindxfer-1.1 {
set DB [sqlite3_connection_pointer db]
execsql {CREATE TABLE t1(a,b,c);}
set VM1 [sqlite3_prepare $DB {SELECT ?, ?, ?} -1 TAIL]
set TAIL
} {}
do_test bindxfer-1.2 {
sqlite3_bind_parameter_count $VM1
} 3
do_test bindxfer-1.3 {
set VM2 [sqlite3_prepare $DB {SELECT ?, ?, ?} -1 TAIL]
set TAIL
} {}
do_test bindxfer-1.4 {
sqlite3_bind_parameter_count $VM2
} 3
ifcapable deprecated {
do_test bindxfer-1.5 {
sqlite_bind $VM1 1 one normal
set sqlite_static_bind_value two
sqlite_bind $VM1 2 {} static
sqlite_bind $VM1 3 {} null
sqlite3_transfer_bindings $VM1 $VM2
sqlite_step $VM1 VALUES COLNAMES
} SQLITE_ROW
do_test bindxfer-1.6 {
set VALUES
} {{} {} {}}
do_test bindxfer-1.7 {
sqlite_step $VM2 VALUES COLNAMES
} SQLITE_ROW
do_test bindxfer-1.8 {
set VALUES
} {one two {}}
}
catch {sqlite3_finalize $VM1}
catch {sqlite3_finalize $VM2}
finish_test

View File

@ -0,0 +1,195 @@
# 2008 February 18
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
#
# Unit testing of the Bitvec object.
#
# $Id: bitvec.test,v 1.4 2009/04/01 23:49:04 drh Exp $
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
# The built-in test logic must be operational in order for
# this test to work.
ifcapable !builtin_test {
finish_test
return
}
# Test that sqlite3BitvecBuiltinTest correctly reports errors
# that are deliberately introduced.
#
do_test bitvec-1.0.1 {
sqlite3BitvecBuiltinTest 400 {5 1 1 1 0}
} 1
do_test bitvec-1.0.2 {
sqlite3BitvecBuiltinTest 400 {5 1 234 1 0}
} 234
# Run test cases that set every bit in vectors of various sizes.
# for larger cases, this should cycle the bit vector representation
# from hashing into subbitmaps. The subbitmaps should start as
# hashes then change to either subbitmaps or linear maps, depending
# on their size.
#
do_test bitvec-1.1 {
sqlite3BitvecBuiltinTest 400 {1 400 1 1 0}
} 0
do_test bitvec-1.2 {
sqlite3BitvecBuiltinTest 4000 {1 4000 1 1 0}
} 0
do_test bitvec-1.3 {
sqlite3BitvecBuiltinTest 40000 {1 40000 1 1 0}
} 0
do_test bitvec-1.4 {
sqlite3BitvecBuiltinTest 400000 {1 400000 1 1 0}
} 0
# By specifying a larger increments, we spread the load around.
#
do_test bitvec-1.5 {
sqlite3BitvecBuiltinTest 400 {1 400 1 7 0}
} 0
do_test bitvec-1.6 {
sqlite3BitvecBuiltinTest 4000 {1 4000 1 7 0}
} 0
do_test bitvec-1.7 {
sqlite3BitvecBuiltinTest 40000 {1 40000 1 7 0}
} 0
do_test bitvec-1.8 {
sqlite3BitvecBuiltinTest 400000 {1 400000 1 7 0}
} 0
# First fill up the bitmap with ones, then go through and
# clear all the bits. This will stress the clearing mechanism.
#
do_test bitvec-1.9 {
sqlite3BitvecBuiltinTest 400 {1 400 1 1 2 400 1 1 0}
} 0
do_test bitvec-1.10 {
sqlite3BitvecBuiltinTest 4000 {1 4000 1 1 2 4000 1 1 0}
} 0
do_test bitvec-1.11 {
sqlite3BitvecBuiltinTest 40000 {1 40000 1 1 2 40000 1 1 0}
} 0
do_test bitvec-1.12 {
sqlite3BitvecBuiltinTest 400000 {1 400000 1 1 2 400000 1 1 0}
} 0
do_test bitvec-1.13 {
sqlite3BitvecBuiltinTest 400 {1 400 1 1 2 400 1 7 0}
} 0
do_test bitvec-1.15 {
sqlite3BitvecBuiltinTest 4000 {1 4000 1 1 2 4000 1 7 0}
} 0
do_test bitvec-1.16 {
sqlite3BitvecBuiltinTest 40000 {1 40000 1 1 2 40000 1 77 0}
} 0
do_test bitvec-1.17 {
sqlite3BitvecBuiltinTest 400000 {1 400000 1 1 2 400000 1 777 0}
} 0
do_test bitvec-1.18 {
sqlite3BitvecBuiltinTest 400000 {1 5000 100000 1 2 400000 1 37 0}
} 0
# Attempt to induce hash collisions.
#
unset -nocomplain start
unset -nocomplain incr
foreach start {1 2 3 4 5 6 7 8} {
foreach incr {124 125} {
do_test bitvec-1.20.$start.$incr {
set prog [list 1 60 $::start $::incr 2 5000 1 1 0]
sqlite3BitvecBuiltinTest 5000 $prog
} 0
}
}
do_test bitvec-1.30.big_and_slow {
sqlite3BitvecBuiltinTest 17000000 {1 17000000 1 1 2 17000000 1 1 0}
} 0
# Test setting and clearing a random subset of bits.
#
do_test bitvec-2.1 {
sqlite3BitvecBuiltinTest 4000 {3 2000 4 2000 0}
} 0
do_test bitvec-2.2 {
sqlite3BitvecBuiltinTest 4000 {3 1000 4 1000 3 1000 4 1000 3 1000 4 1000
3 1000 4 1000 3 1000 4 1000 3 1000 4 1000 0}
} 0
do_test bitvec-2.3 {
sqlite3BitvecBuiltinTest 400000 {3 10 0}
} 0
do_test bitvec-2.4 {
sqlite3BitvecBuiltinTest 4000 {3 10 2 4000 1 1 0}
} 0
do_test bitvec-2.5 {
sqlite3BitvecBuiltinTest 5000 {3 20 2 5000 1 1 0}
} 0
do_test bitvec-2.6 {
sqlite3BitvecBuiltinTest 50000 {3 60 2 50000 1 1 0}
} 0
do_test bitvec-2.7 {
sqlite3BitvecBuiltinTest 5000 {
1 25 121 125
1 50 121 125
2 25 121 125
0
}
} 0
# This procedure runs sqlite3BitvecBuiltinTest with argments "n" and
# "program". But it also causes a malloc error to occur after the
# "failcnt"-th malloc. The result should be "0" if no malloc failure
# occurs or "-1" if there is a malloc failure.
#
proc bitvec_malloc_test {label failcnt n program} {
do_test $label [subst {
sqlite3_memdebug_fail $failcnt
set x \[sqlite3BitvecBuiltinTest $n [list $program]\]
set nFail \[sqlite3_memdebug_fail -1\]
if {\$nFail==0} {
set ::go 0
set x -1
}
set x
}] -1
}
# Make sure malloc failures are handled sanily.
#
unset -nocomplain n
unset -nocomplain go
set go 1
save_prng_state
for {set n 0} {$go} {incr n} {
restore_prng_state
bitvec_malloc_test bitvec-3.1.$n $n 5000 {
3 60 2 5000 1 1 3 60 2 5000 1 1 3 60 2 5000 1 1 0
}
}
set go 1
for {set n 0} {$go} {incr n} {
restore_prng_state
bitvec_malloc_test bitvec-3.2.$n $n 5000 {
3 600 2 5000 1 1 3 600 2 5000 1 1 3 600 2 5000 1 1 0
}
}
set go 1
for {set n 1} {$go} {incr n} {
bitvec_malloc_test bitvec-3.3.$n $n 50000 {1 50000 1 1 0}
}
finish_test
return

View File

@ -0,0 +1,147 @@
# 2001 September 15
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library.
#
# $Id: blob.test,v 1.8 2009/04/28 18:00:27 drh Exp $
set testdir [file dirname $argv0]
source $testdir/tester.tcl
ifcapable {!bloblit} {
finish_test
return
}
proc bin_to_hex {blob} {
set bytes {}
binary scan $blob \c* bytes
set bytes2 [list]
foreach b $bytes {lappend bytes2 [format %02X [expr $b & 0xFF]]}
join $bytes2 {}
}
# Simplest possible case. Specify a blob literal
do_test blob-1.0 {
set blob [execsql {SELECT X'01020304';}]
bin_to_hex [lindex $blob 0]
} {01020304}
do_test blob-1.1 {
set blob [execsql {SELECT x'ABCDEF';}]
bin_to_hex [lindex $blob 0]
} {ABCDEF}
do_test blob-1.2 {
set blob [execsql {SELECT x'';}]
bin_to_hex [lindex $blob 0]
} {}
do_test blob-1.3 {
set blob [execsql {SELECT x'abcdEF12';}]
bin_to_hex [lindex $blob 0]
} {ABCDEF12}
do_test blob-1.3.2 {
set blob [execsql {SELECT x'0123456789abcdefABCDEF';}]
bin_to_hex [lindex $blob 0]
} {0123456789ABCDEFABCDEF}
# Try some syntax errors in blob literals.
do_test blob-1.4 {
catchsql {SELECT X'01020k304', 100}
} {1 {unrecognized token: "X'01020k304'"}}
do_test blob-1.5 {
catchsql {SELECT X'01020, 100}
} {1 {unrecognized token: "X'01020, 100"}}
do_test blob-1.6 {
catchsql {SELECT X'01020 100'}
} {1 {unrecognized token: "X'01020 100'"}}
do_test blob-1.7 {
catchsql {SELECT X'01001'}
} {1 {unrecognized token: "X'01001'"}}
do_test blob-1.8 {
catchsql {SELECT x'012/45'}
} {1 {unrecognized token: "x'012/45'"}}
do_test blob-1.9 {
catchsql {SELECT x'012:45'}
} {1 {unrecognized token: "x'012:45'"}}
do_test blob-1.10 {
catchsql {SELECT x'012@45'}
} {1 {unrecognized token: "x'012@45'"}}
do_test blob-1.11 {
catchsql {SELECT x'012G45'}
} {1 {unrecognized token: "x'012G45'"}}
do_test blob-1.12 {
catchsql {SELECT x'012`45'}
} {1 {unrecognized token: "x'012`45'"}}
do_test blob-1.13 {
catchsql {SELECT x'012g45'}
} {1 {unrecognized token: "x'012g45'"}}
# Insert a blob into a table and retrieve it.
do_test blob-2.0 {
execsql {
CREATE TABLE t1(a BLOB, b BLOB);
INSERT INTO t1 VALUES(X'123456', x'7890ab');
INSERT INTO t1 VALUES(X'CDEF12', x'345678');
}
set blobs [execsql {SELECT * FROM t1}]
set blobs2 [list]
foreach b $blobs {lappend blobs2 [bin_to_hex $b]}
set blobs2
} {123456 7890AB CDEF12 345678}
# An index on a blob column
do_test blob-2.1 {
execsql {
CREATE INDEX i1 ON t1(a);
}
set blobs [execsql {SELECT * FROM t1}]
set blobs2 [list]
foreach b $blobs {lappend blobs2 [bin_to_hex $b]}
set blobs2
} {123456 7890AB CDEF12 345678}
do_test blob-2.2 {
set blobs [execsql {SELECT * FROM t1 where a = X'123456'}]
set blobs2 [list]
foreach b $blobs {lappend blobs2 [bin_to_hex $b]}
set blobs2
} {123456 7890AB}
do_test blob-2.3 {
set blobs [execsql {SELECT * FROM t1 where a = X'CDEF12'}]
set blobs2 [list]
foreach b $blobs {lappend blobs2 [bin_to_hex $b]}
set blobs2
} {CDEF12 345678}
do_test blob-2.4 {
set blobs [execsql {SELECT * FROM t1 where a = X'CD12'}]
set blobs2 [list]
foreach b $blobs {lappend blobs2 [bin_to_hex $b]}
set blobs2
} {}
# Try to bind a blob value to a prepared statement.
do_test blob-3.0 {
sqlite3 db2 test.db
set DB [sqlite3_connection_pointer db2]
set STMT [sqlite3_prepare $DB "DELETE FROM t1 WHERE a = ?" -1 DUMMY]
sqlite3_bind_blob $STMT 1 "\x12\x34\x56" 3
sqlite3_step $STMT
} {SQLITE_DONE}
do_test blob-3.1 {
sqlite3_finalize $STMT
db2 close
} {}
do_test blob-3.2 {
set blobs [execsql {SELECT * FROM t1}]
set blobs2 [list]
foreach b $blobs {lappend blobs2 [bin_to_hex $b]}
set blobs2
} {CDEF12 345678}
finish_test

View File

@ -0,0 +1,289 @@
puts {# 2008 December 11
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library.
#
# This file is automatically generated from a separate TCL script.
# This file seeks to exercise integer boundary values.
#
# $Id: boundary1.tcl,v 1.3 2009/01/02 15:45:48 shane Exp $
set testdir [file dirname $argv0]
source $testdir/tester.tcl
# Many of the boundary tests depend on a working 64-bit implementation.
if {![working_64bit_int]} { finish_test; return }
}
expr srand(0)
# Generate interesting boundary numbers
#
foreach x {
0
1
0x7f
0x7fff
0x7fffff
0x7fffffff
0x7fffffffff
0x7fffffffffff
0x7fffffffffffff
0x7fffffffffffffff
} {
set x [expr {wide($x)}]
set boundarynum($x) 1
set boundarynum([expr {$x+1}]) 1
set boundarynum([expr {-($x+1)}]) 1
set boundarynum([expr {-($x+2)}]) 1
set boundarynum([expr {$x+$x+1}]) 1
set boundarynum([expr {$x+$x+2}]) 1
}
set x [expr {wide(127)}]
for {set i 1} {$i<=9} {incr i} {
set boundarynum($x) 1
set boundarynum([expr {$x+1}]) 1
set x [expr {wide($x*128 + 127)}]
}
# Scramble the $inlist into a random order.
#
proc scramble {inlist} {
set y {}
foreach x $inlist {
lappend y [list [expr {rand()}] $x]
}
set y [lsort $y]
set outlist {}
foreach x $y {
lappend outlist [lindex $x 1]
}
return $outlist
}
# A simple selection sort. Not trying to be efficient.
#
proc sort {inlist} {
set outlist {}
set mn [lindex $inlist 0]
foreach x $inlist {
if {$x<$mn} {set mn $x}
}
set outlist $mn
set mx $mn
while {1} {
set valid 0
foreach x $inlist {
if {$x>$mx && (!$valid || $mn>$x)} {
set mn $x
set valid 1
}
}
if {!$valid} break
lappend outlist $mn
set mx $mn
}
return $outlist
}
# Reverse the order of a list
#
proc reverse {inlist} {
set i [llength $inlist]
set outlist {}
for {incr i -1} {$i>=0} {incr i -1} {
lappend outlist [lindex $inlist $i]
}
return $outlist
}
set nums1 [scramble [array names boundarynum]]
set nums2 [scramble [array names boundarynum]]
set tname boundary1
puts "do_test $tname-1.1 \173"
puts " db eval \173"
puts " CREATE TABLE t1(a,x);"
set a 0
foreach r $nums1 {
incr a
set t1ra($r) $a
set t1ar($a) $r
set x [format %08x%08x [expr {wide($r)>>32}] $r]
set t1rx($r) $x
set t1xr($x) $r
puts " INSERT INTO t1(oid,a,x) VALUES($r,$a,'$x');"
}
puts " CREATE INDEX t1i1 ON t1(a);"
puts " CREATE INDEX t1i2 ON t1(x);"
puts " \175"
puts "\175 {}"
puts "do_test $tname-1.2 \173"
puts " db eval \173"
puts " SELECT count(*) FROM t1"
puts " \175"
puts "\175 {64}"
set nums3 $nums2
lappend nums3 9.22337303685477580800e+18
lappend nums3 -9.22337303685477580800e+18
set i 0
foreach r $nums3 {
incr i
if {abs($r)<9.22337203685477580800e+18} {
set x $t1rx($r)
set a $t1ra($r)
set r5 $r.5
set r0 $r.0
puts "do_test $tname-2.$i.1 \173"
puts " db eval \173"
puts " SELECT * FROM t1 WHERE rowid=$r"
puts " \175"
puts "\175 {$a $x}"
puts "do_test $tname-2.$i.2 \173"
puts " db eval \173"
puts " SELECT rowid, a FROM t1 WHERE x='$x'"
puts " \175"
puts "\175 {$r $a}"
puts "do_test $tname-2.$i.3 \173"
puts " db eval \173"
puts " SELECT rowid, x FROM t1 WHERE a=$a"
puts " \175"
puts "\175 {$r $x}"
}
foreach op {> >= < <=} subno {gt ge lt le} {
################################################################ 2.x.y.1
set rset {}
set aset {}
foreach rx $nums2 {
if "\$rx $op \$r" {
lappend rset $rx
lappend aset $t1ra($rx)
}
}
puts "do_test $tname-2.$i.$subno.1 \173"
puts " db eval \173"
puts " SELECT a FROM t1 WHERE rowid $op $r ORDER BY a"
puts " \175"
puts "\175 {[sort $aset]}"
################################################################ 2.x.y.2
puts "do_test $tname-2.$i.$subno.2 \173"
puts " db eval \173"
puts " SELECT a FROM t1 WHERE rowid $op $r ORDER BY a DESC"
puts " \175"
puts "\175 {[reverse [sort $aset]]}"
################################################################ 2.x.y.3
set aset {}
foreach rx [sort $rset] {
lappend aset $t1ra($rx)
}
puts "do_test $tname-2.$i.$subno.3 \173"
puts " db eval \173"
puts " SELECT a FROM t1 WHERE rowid $op $r ORDER BY rowid"
puts " \175"
puts "\175 {$aset}"
################################################################ 2.x.y.4
set aset {}
foreach rx [reverse [sort $rset]] {
lappend aset $t1ra($rx)
}
puts "do_test $tname-2.$i.$subno.4 \173"
puts " db eval \173"
puts " SELECT a FROM t1 WHERE rowid $op $r ORDER BY rowid DESC"
puts " \175"
puts "\175 {$aset}"
################################################################ 2.x.y.5
set aset {}
set xset {}
foreach rx $rset {
lappend xset $t1rx($rx)
}
foreach x [sort $xset] {
set rx $t1xr($x)
lappend aset $t1ra($rx)
}
puts "do_test $tname-2.$i.$subno.5 \173"
puts " db eval \173"
puts " SELECT a FROM t1 WHERE rowid $op $r ORDER BY x"
puts " \175"
puts "\175 {$aset}"
################################################################ 2.x.y.10
if {abs($r)>9223372036854775808 || [string length $r5]>15} continue
set rset {}
set aset {}
foreach rx $nums2 {
if "\$rx $op \$r0" {
lappend rset $rx
}
}
foreach rx [sort $rset] {
lappend aset $t1ra($rx)
}
puts "do_test $tname-2.$i.$subno.10 \173"
puts " db eval \173"
puts " SELECT a FROM t1 WHERE rowid $op $r0 ORDER BY rowid"
puts " \175"
puts "\175 {$aset}"
################################################################ 2.x.y.11
set aset {}
foreach rx [reverse [sort $rset]] {
lappend aset $t1ra($rx)
}
puts "do_test $tname-2.$i.$subno.11 \173"
puts " db eval \173"
puts " SELECT a FROM t1 WHERE rowid $op $r0 ORDER BY rowid DESC"
puts " \175"
puts "\175 {$aset}"
################################################################ 2.x.y.12
set rset {}
set aset {}
foreach rx $nums2 {
if "\$rx $op \$r5" {
lappend rset $rx
}
}
foreach rx [sort $rset] {
lappend aset $t1ra($rx)
}
puts "do_test $tname-2.$i.$subno.12 \173"
puts " db eval \173"
puts " SELECT a FROM t1 WHERE rowid $op $r5 ORDER BY rowid"
puts " \175"
puts "\175 {$aset}"
################################################################ 2.x.y.13
set aset {}
foreach rx [reverse [sort $rset]] {
lappend aset $t1ra($rx)
}
puts "do_test $tname-2.$i.$subno.13 \173"
puts " db eval \173"
puts " SELECT a FROM t1 WHERE rowid $op $r5 ORDER BY rowid DESC"
puts " \175"
puts "\175 {$aset}"
}
}
puts {finish_test}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,445 @@
puts {# 2008 December 11
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library.
#
# This file is automatically generated from a separate TCL script.
# This file seeks to exercise integer boundary values.
#
# $Id: boundary2.tcl,v 1.3 2009/01/02 15:45:48 shane Exp $
set testdir [file dirname $argv0]
source $testdir/tester.tcl
# Many of the boundary tests depend on a working 64-bit implementation.
if {![working_64bit_int]} { finish_test; return }
}
expr srand(0)
# Generate interesting boundary numbers
#
foreach x {
0
1
0x7f
0x7fff
0x7fffff
0x7fffffff
0x7fffffffff
0x7fffffffffff
0x7fffffffffffff
0x7fffffffffffffff
} {
set x [expr {wide($x)}]
set boundarynum($x) 1
set boundarynum([expr {$x+1}]) 1
set boundarynum([expr {-($x+1)}]) 1
set boundarynum([expr {-($x+2)}]) 1
set boundarynum([expr {$x+$x+1}]) 1
set boundarynum([expr {$x+$x+2}]) 1
}
set x [expr {wide(127)}]
for {set i 1} {$i<=9} {incr i} {
set boundarynum($x) 1
set boundarynum([expr {$x+1}]) 1
set x [expr {wide($x*128 + 127)}]
}
# Scramble the $inlist into a random order.
#
proc scramble {inlist} {
set y {}
foreach x $inlist {
lappend y [list [expr {rand()}] $x]
}
set y [lsort $y]
set outlist {}
foreach x $y {
lappend outlist [lindex $x 1]
}
return $outlist
}
# A simple selection sort. Not trying to be efficient.
#
proc sort {inlist} {
set outlist {}
set mn [lindex $inlist 0]
foreach x $inlist {
if {$x<$mn} {set mn $x}
}
set outlist $mn
set mx $mn
while {1} {
set valid 0
foreach x $inlist {
if {$x>$mx && (!$valid || $mn>$x)} {
set mn $x
set valid 1
}
}
if {!$valid} break
lappend outlist $mn
set mx $mn
}
return $outlist
}
# Reverse the order of a list
#
proc reverse {inlist} {
set i [llength $inlist]
set outlist {}
for {incr i -1} {$i>=0} {incr i -1} {
lappend outlist [lindex $inlist $i]
}
return $outlist
}
set nums1 [scramble [array names boundarynum]]
set nums2 [scramble [array names boundarynum]]
set tname boundary2
puts "do_test $tname-1.1 \173"
puts " db eval \173"
puts " CREATE TABLE t1(r INTEGER, a INTEGER, x TEXT);"
set a 0
foreach r $nums1 {
incr a
set t1ra($r) $a
set t1ar($a) $r
set x [format %08x%08x [expr {wide($r)>>32}] $r]
set t1rx($r) $x
set t1xr($x) $r
puts " INSERT INTO t1 VALUES($r,$a,'$x');"
}
puts " CREATE INDEX t1i1 ON t1(r);"
puts " CREATE INDEX t1i2 ON t1(a);"
puts " CREATE INDEX t1i3 ON t1(x);"
puts " \175"
puts "\175 {}"
puts "do_test $tname-1.2 \173"
puts " db eval \173"
puts " SELECT count(*) FROM t1"
puts " \175"
puts "\175 {64}"
set nums3 $nums2
lappend nums3 9.22337303685477580800e+18
lappend nums3 -9.22337303685477580800e+18
set i 0
foreach r $nums3 {
incr i
if {abs($r)<9.22337203685477580800e+18} {
set x $t1rx($r)
set a $t1ra($r)
set r5 $r.5
set r0 $r.0
puts "do_test $tname-2.$i.1 \173"
puts " db eval \173"
puts " SELECT * FROM t1 WHERE r=$r"
puts " \175"
puts "\175 {$r $a $x}"
puts "do_test $tname-2.$i.2 \173"
puts " db eval \173"
puts " SELECT r, a FROM t1 WHERE x='$x'"
puts " \175"
puts "\175 {$r $a}"
puts "do_test $tname-2.$i.3 \173"
puts " db eval \173"
puts " SELECT r, x FROM t1 WHERE a=$a"
puts " \175"
puts "\175 {$r $x}"
}
foreach op {> >= < <=} subno {gt ge lt le} {
################################################################ 2.x.y.1
set rset {}
set aset {}
foreach rx $nums2 {
if "\$rx $op \$r" {
lappend rset $rx
lappend aset $t1ra($rx)
}
}
puts "do_test $tname-2.$i.$subno.1 \173"
puts " db eval \173"
puts " SELECT a FROM t1 WHERE r $op $r ORDER BY a"
puts " \175"
puts "\175 {[sort $aset]}"
################################################################ 2.x.y.2
puts "do_test $tname-2.$i.$subno.2 \173"
puts " db eval \173"
puts " SELECT a FROM t1 WHERE r $op $r ORDER BY a DESC"
puts " \175"
puts "\175 {[reverse [sort $aset]]}"
################################################################ 2.x.y.3
set aset {}
foreach rx [sort $rset] {
lappend aset $t1ra($rx)
}
puts "do_test $tname-2.$i.$subno.3 \173"
puts " db eval \173"
puts " SELECT a FROM t1 WHERE r $op $r ORDER BY r"
puts " \175"
puts "\175 {$aset}"
################################################################ 2.x.y.4
set aset {}
foreach rx [reverse [sort $rset]] {
lappend aset $t1ra($rx)
}
puts "do_test $tname-2.$i.$subno.4 \173"
puts " db eval \173"
puts " SELECT a FROM t1 WHERE r $op $r ORDER BY r DESC"
puts " \175"
puts "\175 {$aset}"
################################################################ 2.x.y.5
set aset {}
set xset {}
foreach rx $rset {
lappend xset $t1rx($rx)
}
foreach x [sort $xset] {
set rx $t1xr($x)
lappend aset $t1ra($rx)
}
puts "do_test $tname-2.$i.$subno.5 \173"
puts " db eval \173"
puts " SELECT a FROM t1 WHERE r $op $r ORDER BY x"
puts " \175"
puts "\175 {$aset}"
################################################################ 2.x.y.10
if {abs($r)>9223372036854775808 || [string length $r5]>15} continue
set rset {}
set aset {}
foreach rx $nums2 {
if "\$rx $op \$r0" {
lappend rset $rx
}
}
foreach rx [sort $rset] {
lappend aset $t1ra($rx)
}
puts "do_test $tname-2.$i.$subno.10 \173"
puts " db eval \173"
puts " SELECT a FROM t1 WHERE r $op $r0 ORDER BY r"
puts " \175"
puts "\175 {$aset}"
################################################################ 2.x.y.11
set aset {}
foreach rx [reverse [sort $rset]] {
lappend aset $t1ra($rx)
}
puts "do_test $tname-2.$i.$subno.11 \173"
puts " db eval \173"
puts " SELECT a FROM t1 WHERE r $op $r0 ORDER BY r DESC"
puts " \175"
puts "\175 {$aset}"
################################################################ 2.x.y.12
set rset {}
set aset {}
foreach rx $nums2 {
if "\$rx $op \$r5" {
lappend rset $rx
}
}
foreach rx [sort $rset] {
lappend aset $t1ra($rx)
}
puts "do_test $tname-2.$i.$subno.12 \173"
puts " db eval \173"
puts " SELECT a FROM t1 WHERE r $op $r5 ORDER BY r"
puts " \175"
puts "\175 {$aset}"
################################################################ 2.x.y.13
set aset {}
foreach rx [reverse [sort $rset]] {
lappend aset $t1ra($rx)
}
puts "do_test $tname-2.$i.$subno.13 \173"
puts " db eval \173"
puts " SELECT a FROM t1 WHERE r $op $r5 ORDER BY r DESC"
puts " \175"
puts "\175 {$aset}"
}
}
puts "do_test $tname-3.1 \173"
puts " db eval \173"
puts " DROP INDEX t1i1;"
puts " DROP INDEX t1i2;"
puts " DROP INDEX t1i3;"
puts " \175"
puts "\175 {}"
set i 0
foreach r $nums3 {
incr i
if {abs($r)<9.22337203685477580800e+18} {
set x $t1rx($r)
set a $t1ra($r)
set r5 $r.5
set r0 $r.0
puts "do_test $tname-4.$i.1 \173"
puts " db eval \173"
puts " SELECT * FROM t1 WHERE r=$r"
puts " \175"
puts "\175 {$r $a $x}"
puts "do_test $tname-4.$i.2 \173"
puts " db eval \173"
puts " SELECT r, a FROM t1 WHERE x='$x'"
puts " \175"
puts "\175 {$r $a}"
puts "do_test $tname-4.$i.3 \173"
puts " db eval \173"
puts " SELECT r, x FROM t1 WHERE a=$a"
puts " \175"
puts "\175 {$r $x}"
}
foreach op {> >= < <=} subno {gt ge lt le} {
################################################################ 2.x.y.1
set rset {}
set aset {}
foreach rx $nums2 {
if "\$rx $op \$r" {
lappend rset $rx
lappend aset $t1ra($rx)
}
}
puts "do_test $tname-4.$i.$subno.1 \173"
puts " db eval \173"
puts " SELECT a FROM t1 WHERE r $op $r ORDER BY a"
puts " \175"
puts "\175 {[sort $aset]}"
################################################################ 2.x.y.2
puts "do_test $tname-4.$i.$subno.2 \173"
puts " db eval \173"
puts " SELECT a FROM t1 WHERE r $op $r ORDER BY a DESC"
puts " \175"
puts "\175 {[reverse [sort $aset]]}"
################################################################ 2.x.y.3
set aset {}
foreach rx [sort $rset] {
lappend aset $t1ra($rx)
}
puts "do_test $tname-4.$i.$subno.3 \173"
puts " db eval \173"
puts " SELECT a FROM t1 WHERE r $op $r ORDER BY r"
puts " \175"
puts "\175 {$aset}"
################################################################ 2.x.y.4
set aset {}
foreach rx [reverse [sort $rset]] {
lappend aset $t1ra($rx)
}
puts "do_test $tname-4.$i.$subno.4 \173"
puts " db eval \173"
puts " SELECT a FROM t1 WHERE r $op $r ORDER BY r DESC"
puts " \175"
puts "\175 {$aset}"
################################################################ 2.x.y.5
set aset {}
set xset {}
foreach rx $rset {
lappend xset $t1rx($rx)
}
foreach x [sort $xset] {
set rx $t1xr($x)
lappend aset $t1ra($rx)
}
puts "do_test $tname-4.$i.$subno.5 \173"
puts " db eval \173"
puts " SELECT a FROM t1 WHERE r $op $r ORDER BY x"
puts " \175"
puts "\175 {$aset}"
################################################################ 2.x.y.10
if {abs($r)>9223372036854775808 || [string length $r5]>15} continue
set rset {}
set aset {}
foreach rx $nums2 {
if "\$rx $op \$r0" {
lappend rset $rx
}
}
foreach rx [sort $rset] {
lappend aset $t1ra($rx)
}
puts "do_test $tname-4.$i.$subno.10 \173"
puts " db eval \173"
puts " SELECT a FROM t1 WHERE r $op $r0 ORDER BY r"
puts " \175"
puts "\175 {$aset}"
################################################################ 2.x.y.11
set aset {}
foreach rx [reverse [sort $rset]] {
lappend aset $t1ra($rx)
}
puts "do_test $tname-4.$i.$subno.11 \173"
puts " db eval \173"
puts " SELECT a FROM t1 WHERE r $op $r0 ORDER BY r DESC"
puts " \175"
puts "\175 {$aset}"
################################################################ 2.x.y.12
set rset {}
set aset {}
foreach rx $nums2 {
if "\$rx $op \$r5" {
lappend rset $rx
}
}
foreach rx [sort $rset] {
lappend aset $t1ra($rx)
}
puts "do_test $tname-4.$i.$subno.12 \173"
puts " db eval \173"
puts " SELECT a FROM t1 WHERE r $op $r5 ORDER BY r"
puts " \175"
puts "\175 {$aset}"
################################################################ 2.x.y.13
set aset {}
foreach rx [reverse [sort $rset]] {
lappend aset $t1ra($rx)
}
puts "do_test $tname-4.$i.$subno.13 \173"
puts " db eval \173"
puts " SELECT a FROM t1 WHERE r $op $r5 ORDER BY r DESC"
puts " \175"
puts "\175 {$aset}"
}
}
puts {finish_test}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,289 @@
puts {# 2008 December 11
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library.
#
# This file is automatically generated from a separate TCL script.
# This file seeks to exercise integer boundary values.
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
# Many of the boundary tests depend on a working 64-bit implementation.
if {![working_64bit_int]} { finish_test; return }
}
expr srand(0)
# Generate interesting boundary numbers
#
foreach x {
0
1
0x7f
0x7fff
0x7fffff
0x7fffffff
0x7fffffffff
0x7fffffffffff
0x7fffffffffffff
0x7fffffffffffffff
} {
set x [expr {wide($x)}]
set boundarynum($x) 1
set boundarynum([expr {wide($x+1)}]) 1
set boundarynum([expr {wide(-($x+1))}]) 1
set boundarynum([expr {wide(-($x+2))}]) 1
set boundarynum([expr {wide($x+$x+1)}]) 1
set boundarynum([expr {wide($x+$x+2)}]) 1
}
set x [expr {wide(127)}]
for {set i 1} {$i<=9} {incr i} {
set boundarynum($x) 1
set boundarynum([expr {wide($x+1)}]) 1
set x [expr {wide($x*128 + 127)}]
}
# Scramble the $inlist into a random order.
#
proc scramble {inlist} {
set y {}
foreach x $inlist {
lappend y [list [expr {rand()}] $x]
}
set y [lsort $y]
set outlist {}
foreach x $y {
lappend outlist [lindex $x 1]
}
return $outlist
}
# A simple selection sort. Not trying to be efficient.
#
proc sort {inlist} {
set outlist {}
set mn [lindex $inlist 0]
foreach x $inlist {
if {$x<$mn} {set mn $x}
}
set outlist $mn
set mx $mn
while {1} {
set valid 0
foreach x $inlist {
if {$x>$mx && (!$valid || $mn>$x)} {
set mn $x
set valid 1
}
}
if {!$valid} break
lappend outlist $mn
set mx $mn
}
return $outlist
}
# Reverse the order of a list
#
proc reverse {inlist} {
set i [llength $inlist]
set outlist {}
for {incr i -1} {$i>=0} {incr i -1} {
lappend outlist [lindex $inlist $i]
}
return $outlist
}
set nums1 [scramble [array names boundarynum]]
set nums2 [scramble [array names boundarynum]]
set tname boundary3
puts "do_test $tname-1.1 \173"
puts " db eval \173"
puts " CREATE TABLE t1(a,x);"
set a 0
foreach r $nums1 {
incr a
set t1ra($r) $a
set t1ar($a) $r
set x [format %016x [expr {wide($r)}]]
set t1rx($r) $x
set t1xr($x) $r
puts " INSERT INTO t1(oid,a,x) VALUES($r,$a,'$x');"
}
puts " CREATE INDEX t1i1 ON t1(a);"
puts " CREATE INDEX t1i2 ON t1(x);"
puts " \175"
puts "\175 {}"
puts "do_test $tname-1.2 \173"
puts " db eval \173"
puts " SELECT count(*) FROM t1"
puts " \175"
puts "\175 {64}"
puts "do_test $tname-1.3 \173"
puts " db eval \173"
puts " CREATE TABLE t2(r,a);"
puts " INSERT INTO t2 SELECT rowid, a FROM t1;"
puts " CREATE INDEX t2i1 ON t2(r);"
puts " CREATE INDEX t2i2 ON t2(a);"
puts " INSERT INTO t2 VALUES(9.22337303685477580800e+18,65);"
set t1ra(9.22337303685477580800e+18) 65
set t1ar(65) 9.22337303685477580800e+18)
puts " INSERT INTO t2 VALUES(-9.22337303685477580800e+18,66);"
set t1ra(-9.22337303685477580800e+18) 66
set t1ar(66) -9.22337303685477580800e+18)
puts " SELECT count(*) FROM t2;"
puts " \175"
puts "\175 {66}"
set nums3 $nums2
lappend nums3 9.22337303685477580800e+18
lappend nums3 -9.22337303685477580800e+18
set i 0
foreach r $nums3 {
incr i
set r5 $r.5
set r0 $r.0
if {abs($r)<0x7FFFFFFFFFFFFFFF || $r==-9223372036854775808} {
set x $t1rx($r)
set a $t1ra($r)
puts "do_test $tname-2.$i.1 \173"
puts " db eval \173"
puts " SELECT t1.* FROM t1, t2 WHERE t1.rowid=$r AND t2.a=t1.a"
puts " \175"
puts "\175 {$a $x}"
puts "do_test $tname-2.$i.2 \173"
puts " db eval \173"
puts " SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='$x'"
puts " \175"
puts "\175 {$r $a}"
puts "do_test $tname-2.$i.3 \173"
puts " db eval \173"
puts " SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=$a"
puts " \175"
puts "\175 {$r $x}"
}
foreach op {> >= < <=} subno {gt ge lt le} {
################################################################ 2.x.y.1
set rset {}
set aset {}
foreach rx $nums2 {
if "\$rx $op \$r" {
lappend rset $rx
lappend aset $t1ra($rx)
}
}
puts "do_test $tname-2.$i.$subno.1 \173"
puts " db eval \173"
puts " SELECT t2.a FROM t1 JOIN t2 USING(a)"
puts " WHERE t1.rowid $op $r ORDER BY t2.a"
puts " \175"
puts "\175 {[sort $aset]}"
################################################################ 2.x.y.2
puts "do_test $tname-2.$i.$subno.2 \173"
puts " db eval \173"
puts " SELECT t2.a FROM t2 NATURAL JOIN t1"
puts " WHERE t1.rowid $op $r ORDER BY t1.a DESC"
puts " \175"
puts "\175 {[reverse [sort $aset]]}"
################################################################ 2.x.y.3
set ax $t1ra($r)
set aset {}
foreach rx [sort $rset] {
lappend aset $t1ra($rx)
}
puts "do_test $tname-2.$i.$subno.3 \173"
puts " db eval \173"
puts " SELECT t1.a FROM t1 JOIN t2 ON t1.rowid $op t2.r"
puts " WHERE t2.a=$ax"
puts " ORDER BY t1.rowid"
puts " \175"
puts "\175 {$aset}"
################################################################ 2.x.y.4
set aset {}
foreach rx [reverse [sort $rset]] {
lappend aset $t1ra($rx)
}
puts "do_test $tname-2.$i.$subno.4 \173"
puts " db eval \173"
puts " SELECT t1.a FROM t1 JOIN t2 ON t1.rowid $op t2.r"
puts " WHERE t2.a=$ax"
puts " ORDER BY t1.rowid DESC"
puts " \175"
puts "\175 {$aset}"
################################################################ 2.x.y.5
set aset {}
set xset {}
foreach rx $rset {
lappend xset $t1rx($rx)
}
foreach x [sort $xset] {
set rx $t1xr($x)
lappend aset $t1ra($rx)
}
puts "do_test $tname-2.$i.$subno.5 \173"
puts " db eval \173"
puts " SELECT t1.a FROM t1 JOIN t2 ON t1.rowid $op t2.r"
puts " WHERE t2.a=$ax"
puts " ORDER BY x"
puts " \175"
puts "\175 {$aset}"
################################################################ 2.x.y.10
if {[string length $r5]>15} continue
set rset {}
set aset {}
foreach rx $nums2 {
if "\$rx $op \$r0" {
lappend rset $rx
}
}
foreach rx [sort $rset] {
lappend aset $t1ra($rx)
}
puts "do_test $tname-2.$i.$subno.10 \173"
puts " db eval \173"
puts " SELECT t1.a FROM t1 JOIN t2 ON t1.rowid $op CAST(t2.r AS real)"
puts " WHERE t2.a=$ax"
puts " ORDER BY t1.rowid"
puts " \175"
puts "\175 {$aset}"
################################################################ 2.x.y.11
set aset {}
foreach rx [reverse [sort $rset]] {
lappend aset $t1ra($rx)
}
puts "do_test $tname-2.$i.$subno.11 \173"
puts " db eval \173"
puts " SELECT t1.a FROM t1 JOIN t2 ON t1.rowid $op CAST(t2.r AS real)"
puts " WHERE t2.a=$ax"
puts " ORDER BY t1.rowid DESC"
puts " \175"
puts "\175 {$aset}"
}
}
puts {finish_test}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,340 @@
puts {# 2008 December 11
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library.
#
# This file is automatically generated from a separate TCL script.
# This file seeks to exercise integer boundary values.
#
# $Id: boundary4.tcl,v 1.3 2009/01/02 15:45:48 shane Exp $
set testdir [file dirname $argv0]
source $testdir/tester.tcl
# Many of the boundary tests depend on a working 64-bit implementation.
if {![working_64bit_int]} { finish_test; return }
ifcapable !altertable { finish_test; return }
}
expr srand(0)
# Generate interesting boundary numbers
#
foreach x {
0x7f
0x7fff
0x7fffff
0x7fffffff
0x7fffffffff
0x7fffffffffff
0x7fffffffffffff
0x7fffffffffffffff
} {
set x [expr {wide($x)}]
set boundarynum($x) 1
set boundarynum([expr {$x+1}]) 1
set boundarynum([expr {-($x+1)}]) 1
set boundarynum([expr {-($x+2)}]) 1
set boundarynum([expr {$x+$x+1}]) 1
set boundarynum([expr {$x+$x+2}]) 1
}
set x [expr {wide(127)}]
for {set i 127} {$i<=9} {incr i} {
set boundarynum($x) 1
set boundarynum([expr {$x+1}]) 1
set x [expr {wide($x*128 + 127)}]
}
# Scramble the $inlist into a random order.
#
proc scramble {inlist} {
set y {}
foreach x $inlist {
lappend y [list [expr {rand()}] $x]
}
set y [lsort $y]
set outlist {}
foreach x $y {
lappend outlist [lindex $x 1]
}
return $outlist
}
# A simple selection sort. Not trying to be efficient.
#
proc sort {inlist} {
set outlist {}
set mn [lindex $inlist 0]
foreach x $inlist {
if {$x<$mn} {set mn $x}
}
set outlist $mn
set mx $mn
while {1} {
set valid 0
foreach x $inlist {
if {$x>$mx && (!$valid || $mn>$x)} {
set mn $x
set valid 1
}
}
if {!$valid} break
lappend outlist $mn
set mx $mn
}
return $outlist
}
# Reverse the order of a list
#
proc reverse {inlist} {
set i [llength $inlist]
set outlist {}
for {incr i -1} {$i>=0} {incr i -1} {
lappend outlist [lindex $inlist $i]
}
return $outlist
}
set nums1 [scramble [array names boundarynum]]
set nums2 [scramble [array names boundarynum]]
set tname boundary4
puts "do_test $tname-1.1 \173"
puts " db eval \173"
puts " CREATE TABLE t1(a,x);"
set a 0
set all_rowid {}
set all_a {}
set all_x {}
foreach r $nums1 {
incr a
set t1ra($r) $a
set t1ar($a) $r
set x [format %08x%08x [expr {wide($r)>>32}] $r]
set t1rx($r) $x
set t1xr($x) $r
puts " INSERT INTO t1(oid,a,x) VALUES($r,$a,'$x');"
lappend all_rowid $r
lappend all_a $a
lappend all_x $x
}
puts " CREATE INDEX t1i1 ON t1(a);"
puts " CREATE INDEX t1i2 ON t1(x);"
puts " \175"
puts "\175 {}"
puts "do_test $tname-1.2 \173"
puts " db eval \173"
puts " SELECT count(*) FROM t1"
puts " \175"
puts "\175 {[llength $nums1]}"
proc maketest {tnum sql answer} {
puts "do_test $::tname-$tnum \173"
puts " db eval \173"
puts " $sql"
puts " \175"
puts "\175 {$answer}"
}
set ans {}
foreach r [sort $all_rowid] {
lappend ans $r $t1ra($r) $t1rx($r)
}
maketest 1.3 {SELECT rowid, a, x FROM t1 ORDER BY +rowid} $ans
maketest 1.4 {SELECT rowid, a, x FROM t1 ORDER BY rowid} $ans
set ans {}
foreach r [reverse [sort $all_rowid]] {
lappend ans $r $t1ra($r) $t1rx($r)
}
maketest 1.5 {SELECT rowid, a, x FROM t1 ORDER BY +rowid DESC} $ans
maketest 1.6 {SELECT rowid, a, x FROM t1 ORDER BY rowid DESC} $ans
set ans {}
foreach a [sort $all_a] {
set r $t1ar($a)
lappend ans $r $a $t1rx($r)
}
maketest 1.7 {SELECT rowid, a, x FROM t1 ORDER BY +a} $ans
maketest 1.8 {SELECT rowid, a, x FROM t1 ORDER BY a} $ans
set ans {}
foreach a [reverse [sort $all_a]] {
set r $t1ar($a)
lappend ans $r $a $t1rx($r)
}
maketest 1.9 {SELECT rowid, a, x FROM t1 ORDER BY +a DESC} $ans
maketest 1.10 {SELECT rowid, a, x FROM t1 ORDER BY a DESC} $ans
set ans {}
foreach x [sort $all_x] {
set r $t1xr($x)
lappend ans $r $t1ra($r) $x
}
maketest 1.11 {SELECT rowid, a, x FROM t1 ORDER BY +x} $ans
maketest 1.12 {SELECT rowid, a, x FROM t1 ORDER BY x} $ans
set ans {}
foreach x [reverse [sort $all_x]] {
set r $t1xr($x)
lappend ans $r $t1ra($r) $x
}
maketest 1.13 {SELECT rowid, a, x FROM t1 ORDER BY +x DESC} $ans
maketest 1.14 {SELECT rowid, a, x FROM t1 ORDER BY x DESC} $ans
maketest 2.1 {UPDATE t1 SET rowid=a, a=rowid} {}
set ans {}
foreach r [sort $all_rowid] {
lappend ans $r $t1ra($r) $t1rx($r)
}
maketest 2.3 {SELECT a, rowid, x FROM t1 ORDER BY +a} $ans
maketest 2.4 {SELECT a, rowid, x FROM t1 ORDER BY a} $ans
set ans {}
foreach r [reverse [sort $all_rowid]] {
lappend ans $r $t1ra($r) $t1rx($r)
}
maketest 2.5 {SELECT a, rowid, x FROM t1 ORDER BY +a DESC} $ans
maketest 2.6 {SELECT a, rowid, x FROM t1 ORDER BY a DESC} $ans
set ans {}
foreach a [sort $all_a] {
set r $t1ar($a)
lappend ans $r $a $t1rx($r)
}
maketest 2.7 {SELECT a, rowid, x FROM t1 ORDER BY +rowid} $ans
maketest 2.8 {SELECT a, rowid, x FROM t1 ORDER BY rowid} $ans
set ans {}
foreach a [reverse [sort $all_a]] {
set r $t1ar($a)
lappend ans $r $a $t1rx($r)
}
maketest 2.9 {SELECT a, rowid, x FROM t1 ORDER BY +rowid DESC} $ans
maketest 2.10 {SELECT a, rowid, x FROM t1 ORDER BY rowid DESC} $ans
set ans {}
foreach x [sort $all_x] {
set r $t1xr($x)
lappend ans $r $t1ra($r) $x
}
maketest 2.11 {SELECT a, rowid, x FROM t1 ORDER BY +x} $ans
maketest 2.12 {SELECT a, rowid, x FROM t1 ORDER BY x} $ans
set ans {}
foreach x [reverse [sort $all_x]] {
set r $t1xr($x)
lappend ans $r $t1ra($r) $x
}
maketest 2.13 {SELECT a, rowid, x FROM t1 ORDER BY +x DESC} $ans
maketest 2.14 {SELECT a, rowid, x FROM t1 ORDER BY x DESC} $ans
maketest 3.1 {UPDATE t1 SET rowid=a, a=rowid} {}
maketest 3.2 {ALTER TABLE t1 ADD COLUMN z; UPDATE t1 SET z=zeroblob(600)} {}
set ans {}
foreach r [sort $all_rowid] {
lappend ans $r $t1ra($r) $t1rx($r)
}
maketest 3.3 {SELECT rowid, a, x FROM t1 ORDER BY +rowid} $ans
maketest 3.4 {SELECT rowid, a, x FROM t1 ORDER BY rowid} $ans
set ans {}
foreach r [reverse [sort $all_rowid]] {
lappend ans $r $t1ra($r) $t1rx($r)
}
maketest 3.5 {SELECT rowid, a, x FROM t1 ORDER BY +rowid DESC} $ans
maketest 3.6 {SELECT rowid, a, x FROM t1 ORDER BY rowid DESC} $ans
set ans {}
foreach a [sort $all_a] {
set r $t1ar($a)
lappend ans $r $a $t1rx($r)
}
maketest 3.7 {SELECT rowid, a, x FROM t1 ORDER BY +a} $ans
maketest 3.8 {SELECT rowid, a, x FROM t1 ORDER BY a} $ans
set ans {}
foreach a [reverse [sort $all_a]] {
set r $t1ar($a)
lappend ans $r $a $t1rx($r)
}
maketest 3.9 {SELECT rowid, a, x FROM t1 ORDER BY +a DESC} $ans
maketest 3.10 {SELECT rowid, a, x FROM t1 ORDER BY a DESC} $ans
set ans {}
foreach x [sort $all_x] {
set r $t1xr($x)
lappend ans $r $t1ra($r) $x
}
maketest 3.11 {SELECT rowid, a, x FROM t1 ORDER BY +x} $ans
maketest 3.12 {SELECT rowid, a, x FROM t1 ORDER BY x} $ans
set ans {}
foreach x [reverse [sort $all_x]] {
set r $t1xr($x)
lappend ans $r $t1ra($r) $x
}
maketest 3.13 {SELECT rowid, a, x FROM t1 ORDER BY +x DESC} $ans
maketest 3.14 {SELECT rowid, a, x FROM t1 ORDER BY x DESC} $ans
maketest 4.1 {UPDATE t1 SET rowid=a, a=rowid, x=z, z=x} {}
set ans {}
foreach r [sort $all_rowid] {
lappend ans $r $t1ra($r) $t1rx($r)
}
maketest 4.3 {SELECT a, rowid, z FROM t1 ORDER BY +a} $ans
maketest 4.4 {SELECT a, rowid, z FROM t1 ORDER BY a} $ans
set ans {}
foreach r [reverse [sort $all_rowid]] {
lappend ans $r $t1ra($r) $t1rx($r)
}
maketest 4.5 {SELECT a, rowid, z FROM t1 ORDER BY +a DESC} $ans
maketest 4.6 {SELECT a, rowid, z FROM t1 ORDER BY a DESC} $ans
set ans {}
foreach a [sort $all_a] {
set r $t1ar($a)
lappend ans $r $a $t1rx($r)
}
maketest 4.7 {SELECT a, rowid, z FROM t1 ORDER BY +rowid} $ans
maketest 4.8 {SELECT a, rowid, z FROM t1 ORDER BY rowid} $ans
set ans {}
foreach a [reverse [sort $all_a]] {
set r $t1ar($a)
lappend ans $r $a $t1rx($r)
}
maketest 4.9 {SELECT a, rowid, z FROM t1 ORDER BY +rowid DESC} $ans
maketest 4.10 {SELECT a, rowid, z FROM t1 ORDER BY rowid DESC} $ans
set ans {}
foreach x [sort $all_x] {
set r $t1xr($x)
lappend ans $r $t1ra($r) $x
}
maketest 4.11 {SELECT a, rowid, z FROM t1 ORDER BY +z} $ans
maketest 4.12 {SELECT a, rowid, z FROM t1 ORDER BY z} $ans
set ans {}
foreach x [reverse [sort $all_x]] {
set r $t1xr($x)
lappend ans $r $t1ra($r) $x
}
maketest 4.13 {SELECT a, rowid, z FROM t1 ORDER BY +z DESC} $ans
maketest 4.14 {SELECT a, rowid, z FROM t1 ORDER BY z DESC} $ans
puts {finish_test}

View File

@ -0,0 +1,343 @@
# 2008 December 11
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library.
#
# This file is automatically generated from a separate TCL script.
# This file seeks to exercise integer boundary values.
#
# $Id: boundary4.test,v 1.2 2009/01/02 15:45:48 shane Exp $
set testdir [file dirname $argv0]
source $testdir/tester.tcl
# Many of the boundary tests depend on a working 64-bit implementation.
if {![working_64bit_int]} { finish_test; return }
ifcapable !altertable { finish_test; return }
do_test boundary4-1.1 {
db eval {
CREATE TABLE t1(a,x);
INSERT INTO t1(oid,a,x) VALUES(549755813887,1,'0000007fffffffff');
INSERT INTO t1(oid,a,x) VALUES(-8388608,2,'ffffffffff800000');
INSERT INTO t1(oid,a,x) VALUES(0,3,'0000000000000000');
INSERT INTO t1(oid,a,x) VALUES(-129,4,'ffffffffffffff7f');
INSERT INTO t1(oid,a,x) VALUES(8388608,5,'0000000000800000');
INSERT INTO t1(oid,a,x) VALUES(65535,6,'000000000000ffff');
INSERT INTO t1(oid,a,x) VALUES(8388607,7,'00000000007fffff');
INSERT INTO t1(oid,a,x) VALUES(1099511627776,8,'0000010000000000');
INSERT INTO t1(oid,a,x) VALUES(16777215,9,'0000000000ffffff');
INSERT INTO t1(oid,a,x) VALUES(32767,10,'0000000000007fff');
INSERT INTO t1(oid,a,x) VALUES(4294967296,11,'0000000100000000');
INSERT INTO t1(oid,a,x) VALUES(-549755813888,12,'ffffff8000000000');
INSERT INTO t1(oid,a,x) VALUES(-140737488355328,13,'ffff800000000000');
INSERT INTO t1(oid,a,x) VALUES(256,14,'0000000000000100');
INSERT INTO t1(oid,a,x) VALUES(16777216,15,'0000000001000000');
INSERT INTO t1(oid,a,x) VALUES(72057594037927936,16,'0100000000000000');
INSERT INTO t1(oid,a,x) VALUES(-1,17,'ffffffffffffffff');
INSERT INTO t1(oid,a,x) VALUES(9223372036854775807,18,'7fffffffffffffff');
INSERT INTO t1(oid,a,x) VALUES(281474976710655,19,'0000ffffffffffff');
INSERT INTO t1(oid,a,x) VALUES(1099511627775,20,'000000ffffffffff');
INSERT INTO t1(oid,a,x) VALUES(-8388609,21,'ffffffffff7fffff');
INSERT INTO t1(oid,a,x) VALUES(32768,22,'0000000000008000');
INSERT INTO t1(oid,a,x) VALUES(36028797018963968,23,'0080000000000000');
INSERT INTO t1(oid,a,x) VALUES(-32769,24,'ffffffffffff7fff');
INSERT INTO t1(oid,a,x) VALUES(127,25,'000000000000007f');
INSERT INTO t1(oid,a,x) VALUES(-9223372036854775808,26,'8000000000000000');
INSERT INTO t1(oid,a,x) VALUES(72057594037927935,27,'00ffffffffffffff');
INSERT INTO t1(oid,a,x) VALUES(-549755813889,28,'ffffff7fffffffff');
INSERT INTO t1(oid,a,x) VALUES(255,29,'00000000000000ff');
INSERT INTO t1(oid,a,x) VALUES(-36028797018963969,30,'ff7fffffffffffff');
INSERT INTO t1(oid,a,x) VALUES(-2147483648,31,'ffffffff80000000');
INSERT INTO t1(oid,a,x) VALUES(281474976710656,32,'0001000000000000');
INSERT INTO t1(oid,a,x) VALUES(65536,33,'0000000000010000');
INSERT INTO t1(oid,a,x) VALUES(140737488355328,34,'0000800000000000');
INSERT INTO t1(oid,a,x) VALUES(549755813888,35,'0000008000000000');
INSERT INTO t1(oid,a,x) VALUES(2147483648,36,'0000000080000000');
INSERT INTO t1(oid,a,x) VALUES(4294967295,37,'00000000ffffffff');
INSERT INTO t1(oid,a,x) VALUES(140737488355327,38,'00007fffffffffff');
INSERT INTO t1(oid,a,x) VALUES(-2147483649,39,'ffffffff7fffffff');
INSERT INTO t1(oid,a,x) VALUES(36028797018963967,40,'007fffffffffffff');
INSERT INTO t1(oid,a,x) VALUES(128,41,'0000000000000080');
INSERT INTO t1(oid,a,x) VALUES(-32768,42,'ffffffffffff8000');
INSERT INTO t1(oid,a,x) VALUES(-36028797018963968,43,'ff80000000000000');
INSERT INTO t1(oid,a,x) VALUES(-140737488355329,44,'ffff7fffffffffff');
INSERT INTO t1(oid,a,x) VALUES(-128,45,'ffffffffffffff80');
INSERT INTO t1(oid,a,x) VALUES(2147483647,46,'000000007fffffff');
CREATE INDEX t1i1 ON t1(a);
CREATE INDEX t1i2 ON t1(x);
}
} {}
do_test boundary4-1.2 {
db eval {
SELECT count(*) FROM t1
}
} {46}
do_test boundary4-1.3 {
db eval {
SELECT rowid, a, x FROM t1 ORDER BY +rowid
}
} {-9223372036854775808 26 8000000000000000 -36028797018963969 30 ff7fffffffffffff -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -140737488355328 13 ffff800000000000 -549755813889 28 ffffff7fffffffff -549755813888 12 ffffff8000000000 -2147483649 39 ffffffff7fffffff -2147483648 31 ffffffff80000000 -8388609 21 ffffffffff7fffff -8388608 2 ffffffffff800000 -32769 24 ffffffffffff7fff -32768 42 ffffffffffff8000 -129 4 ffffffffffffff7f -128 45 ffffffffffffff80 -1 17 ffffffffffffffff 0 3 0000000000000000 127 25 000000000000007f 128 41 0000000000000080 255 29 00000000000000ff 256 14 0000000000000100 32767 10 0000000000007fff 32768 22 0000000000008000 65535 6 000000000000ffff 65536 33 0000000000010000 8388607 7 00000000007fffff 8388608 5 0000000000800000 16777215 9 0000000000ffffff 16777216 15 0000000001000000 2147483647 46 000000007fffffff 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 4294967296 11 0000000100000000 549755813887 1 0000007fffffffff 549755813888 35 0000008000000000 1099511627775 20 000000ffffffffff 1099511627776 8 0000010000000000 140737488355327 38 00007fffffffffff 140737488355328 34 0000800000000000 281474976710655 19 0000ffffffffffff 281474976710656 32 0001000000000000 36028797018963967 40 007fffffffffffff 36028797018963968 23 0080000000000000 72057594037927935 27 00ffffffffffffff 72057594037927936 16 0100000000000000 9223372036854775807 18 7fffffffffffffff}
do_test boundary4-1.4 {
db eval {
SELECT rowid, a, x FROM t1 ORDER BY rowid
}
} {-9223372036854775808 26 8000000000000000 -36028797018963969 30 ff7fffffffffffff -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -140737488355328 13 ffff800000000000 -549755813889 28 ffffff7fffffffff -549755813888 12 ffffff8000000000 -2147483649 39 ffffffff7fffffff -2147483648 31 ffffffff80000000 -8388609 21 ffffffffff7fffff -8388608 2 ffffffffff800000 -32769 24 ffffffffffff7fff -32768 42 ffffffffffff8000 -129 4 ffffffffffffff7f -128 45 ffffffffffffff80 -1 17 ffffffffffffffff 0 3 0000000000000000 127 25 000000000000007f 128 41 0000000000000080 255 29 00000000000000ff 256 14 0000000000000100 32767 10 0000000000007fff 32768 22 0000000000008000 65535 6 000000000000ffff 65536 33 0000000000010000 8388607 7 00000000007fffff 8388608 5 0000000000800000 16777215 9 0000000000ffffff 16777216 15 0000000001000000 2147483647 46 000000007fffffff 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 4294967296 11 0000000100000000 549755813887 1 0000007fffffffff 549755813888 35 0000008000000000 1099511627775 20 000000ffffffffff 1099511627776 8 0000010000000000 140737488355327 38 00007fffffffffff 140737488355328 34 0000800000000000 281474976710655 19 0000ffffffffffff 281474976710656 32 0001000000000000 36028797018963967 40 007fffffffffffff 36028797018963968 23 0080000000000000 72057594037927935 27 00ffffffffffffff 72057594037927936 16 0100000000000000 9223372036854775807 18 7fffffffffffffff}
do_test boundary4-1.5 {
db eval {
SELECT rowid, a, x FROM t1 ORDER BY +rowid DESC
}
} {9223372036854775807 18 7fffffffffffffff 72057594037927936 16 0100000000000000 72057594037927935 27 00ffffffffffffff 36028797018963968 23 0080000000000000 36028797018963967 40 007fffffffffffff 281474976710656 32 0001000000000000 281474976710655 19 0000ffffffffffff 140737488355328 34 0000800000000000 140737488355327 38 00007fffffffffff 1099511627776 8 0000010000000000 1099511627775 20 000000ffffffffff 549755813888 35 0000008000000000 549755813887 1 0000007fffffffff 4294967296 11 0000000100000000 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 2147483647 46 000000007fffffff 16777216 15 0000000001000000 16777215 9 0000000000ffffff 8388608 5 0000000000800000 8388607 7 00000000007fffff 65536 33 0000000000010000 65535 6 000000000000ffff 32768 22 0000000000008000 32767 10 0000000000007fff 256 14 0000000000000100 255 29 00000000000000ff 128 41 0000000000000080 127 25 000000000000007f 0 3 0000000000000000 -1 17 ffffffffffffffff -128 45 ffffffffffffff80 -129 4 ffffffffffffff7f -32768 42 ffffffffffff8000 -32769 24 ffffffffffff7fff -8388608 2 ffffffffff800000 -8388609 21 ffffffffff7fffff -2147483648 31 ffffffff80000000 -2147483649 39 ffffffff7fffffff -549755813888 12 ffffff8000000000 -549755813889 28 ffffff7fffffffff -140737488355328 13 ffff800000000000 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -36028797018963969 30 ff7fffffffffffff -9223372036854775808 26 8000000000000000}
do_test boundary4-1.6 {
db eval {
SELECT rowid, a, x FROM t1 ORDER BY rowid DESC
}
} {9223372036854775807 18 7fffffffffffffff 72057594037927936 16 0100000000000000 72057594037927935 27 00ffffffffffffff 36028797018963968 23 0080000000000000 36028797018963967 40 007fffffffffffff 281474976710656 32 0001000000000000 281474976710655 19 0000ffffffffffff 140737488355328 34 0000800000000000 140737488355327 38 00007fffffffffff 1099511627776 8 0000010000000000 1099511627775 20 000000ffffffffff 549755813888 35 0000008000000000 549755813887 1 0000007fffffffff 4294967296 11 0000000100000000 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 2147483647 46 000000007fffffff 16777216 15 0000000001000000 16777215 9 0000000000ffffff 8388608 5 0000000000800000 8388607 7 00000000007fffff 65536 33 0000000000010000 65535 6 000000000000ffff 32768 22 0000000000008000 32767 10 0000000000007fff 256 14 0000000000000100 255 29 00000000000000ff 128 41 0000000000000080 127 25 000000000000007f 0 3 0000000000000000 -1 17 ffffffffffffffff -128 45 ffffffffffffff80 -129 4 ffffffffffffff7f -32768 42 ffffffffffff8000 -32769 24 ffffffffffff7fff -8388608 2 ffffffffff800000 -8388609 21 ffffffffff7fffff -2147483648 31 ffffffff80000000 -2147483649 39 ffffffff7fffffff -549755813888 12 ffffff8000000000 -549755813889 28 ffffff7fffffffff -140737488355328 13 ffff800000000000 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -36028797018963969 30 ff7fffffffffffff -9223372036854775808 26 8000000000000000}
do_test boundary4-1.7 {
db eval {
SELECT rowid, a, x FROM t1 ORDER BY +a
}
} {549755813887 1 0000007fffffffff -8388608 2 ffffffffff800000 0 3 0000000000000000 -129 4 ffffffffffffff7f 8388608 5 0000000000800000 65535 6 000000000000ffff 8388607 7 00000000007fffff 1099511627776 8 0000010000000000 16777215 9 0000000000ffffff 32767 10 0000000000007fff 4294967296 11 0000000100000000 -549755813888 12 ffffff8000000000 -140737488355328 13 ffff800000000000 256 14 0000000000000100 16777216 15 0000000001000000 72057594037927936 16 0100000000000000 -1 17 ffffffffffffffff 9223372036854775807 18 7fffffffffffffff 281474976710655 19 0000ffffffffffff 1099511627775 20 000000ffffffffff -8388609 21 ffffffffff7fffff 32768 22 0000000000008000 36028797018963968 23 0080000000000000 -32769 24 ffffffffffff7fff 127 25 000000000000007f -9223372036854775808 26 8000000000000000 72057594037927935 27 00ffffffffffffff -549755813889 28 ffffff7fffffffff 255 29 00000000000000ff -36028797018963969 30 ff7fffffffffffff -2147483648 31 ffffffff80000000 281474976710656 32 0001000000000000 65536 33 0000000000010000 140737488355328 34 0000800000000000 549755813888 35 0000008000000000 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 140737488355327 38 00007fffffffffff -2147483649 39 ffffffff7fffffff 36028797018963967 40 007fffffffffffff 128 41 0000000000000080 -32768 42 ffffffffffff8000 -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -128 45 ffffffffffffff80 2147483647 46 000000007fffffff}
do_test boundary4-1.8 {
db eval {
SELECT rowid, a, x FROM t1 ORDER BY a
}
} {549755813887 1 0000007fffffffff -8388608 2 ffffffffff800000 0 3 0000000000000000 -129 4 ffffffffffffff7f 8388608 5 0000000000800000 65535 6 000000000000ffff 8388607 7 00000000007fffff 1099511627776 8 0000010000000000 16777215 9 0000000000ffffff 32767 10 0000000000007fff 4294967296 11 0000000100000000 -549755813888 12 ffffff8000000000 -140737488355328 13 ffff800000000000 256 14 0000000000000100 16777216 15 0000000001000000 72057594037927936 16 0100000000000000 -1 17 ffffffffffffffff 9223372036854775807 18 7fffffffffffffff 281474976710655 19 0000ffffffffffff 1099511627775 20 000000ffffffffff -8388609 21 ffffffffff7fffff 32768 22 0000000000008000 36028797018963968 23 0080000000000000 -32769 24 ffffffffffff7fff 127 25 000000000000007f -9223372036854775808 26 8000000000000000 72057594037927935 27 00ffffffffffffff -549755813889 28 ffffff7fffffffff 255 29 00000000000000ff -36028797018963969 30 ff7fffffffffffff -2147483648 31 ffffffff80000000 281474976710656 32 0001000000000000 65536 33 0000000000010000 140737488355328 34 0000800000000000 549755813888 35 0000008000000000 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 140737488355327 38 00007fffffffffff -2147483649 39 ffffffff7fffffff 36028797018963967 40 007fffffffffffff 128 41 0000000000000080 -32768 42 ffffffffffff8000 -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -128 45 ffffffffffffff80 2147483647 46 000000007fffffff}
do_test boundary4-1.9 {
db eval {
SELECT rowid, a, x FROM t1 ORDER BY +a DESC
}
} {2147483647 46 000000007fffffff -128 45 ffffffffffffff80 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -32768 42 ffffffffffff8000 128 41 0000000000000080 36028797018963967 40 007fffffffffffff -2147483649 39 ffffffff7fffffff 140737488355327 38 00007fffffffffff 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 549755813888 35 0000008000000000 140737488355328 34 0000800000000000 65536 33 0000000000010000 281474976710656 32 0001000000000000 -2147483648 31 ffffffff80000000 -36028797018963969 30 ff7fffffffffffff 255 29 00000000000000ff -549755813889 28 ffffff7fffffffff 72057594037927935 27 00ffffffffffffff -9223372036854775808 26 8000000000000000 127 25 000000000000007f -32769 24 ffffffffffff7fff 36028797018963968 23 0080000000000000 32768 22 0000000000008000 -8388609 21 ffffffffff7fffff 1099511627775 20 000000ffffffffff 281474976710655 19 0000ffffffffffff 9223372036854775807 18 7fffffffffffffff -1 17 ffffffffffffffff 72057594037927936 16 0100000000000000 16777216 15 0000000001000000 256 14 0000000000000100 -140737488355328 13 ffff800000000000 -549755813888 12 ffffff8000000000 4294967296 11 0000000100000000 32767 10 0000000000007fff 16777215 9 0000000000ffffff 1099511627776 8 0000010000000000 8388607 7 00000000007fffff 65535 6 000000000000ffff 8388608 5 0000000000800000 -129 4 ffffffffffffff7f 0 3 0000000000000000 -8388608 2 ffffffffff800000 549755813887 1 0000007fffffffff}
do_test boundary4-1.10 {
db eval {
SELECT rowid, a, x FROM t1 ORDER BY a DESC
}
} {2147483647 46 000000007fffffff -128 45 ffffffffffffff80 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -32768 42 ffffffffffff8000 128 41 0000000000000080 36028797018963967 40 007fffffffffffff -2147483649 39 ffffffff7fffffff 140737488355327 38 00007fffffffffff 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 549755813888 35 0000008000000000 140737488355328 34 0000800000000000 65536 33 0000000000010000 281474976710656 32 0001000000000000 -2147483648 31 ffffffff80000000 -36028797018963969 30 ff7fffffffffffff 255 29 00000000000000ff -549755813889 28 ffffff7fffffffff 72057594037927935 27 00ffffffffffffff -9223372036854775808 26 8000000000000000 127 25 000000000000007f -32769 24 ffffffffffff7fff 36028797018963968 23 0080000000000000 32768 22 0000000000008000 -8388609 21 ffffffffff7fffff 1099511627775 20 000000ffffffffff 281474976710655 19 0000ffffffffffff 9223372036854775807 18 7fffffffffffffff -1 17 ffffffffffffffff 72057594037927936 16 0100000000000000 16777216 15 0000000001000000 256 14 0000000000000100 -140737488355328 13 ffff800000000000 -549755813888 12 ffffff8000000000 4294967296 11 0000000100000000 32767 10 0000000000007fff 16777215 9 0000000000ffffff 1099511627776 8 0000010000000000 8388607 7 00000000007fffff 65535 6 000000000000ffff 8388608 5 0000000000800000 -129 4 ffffffffffffff7f 0 3 0000000000000000 -8388608 2 ffffffffff800000 549755813887 1 0000007fffffffff}
do_test boundary4-1.11 {
db eval {
SELECT rowid, a, x FROM t1 ORDER BY +x
}
} {0 3 0000000000000000 127 25 000000000000007f 128 41 0000000000000080 255 29 00000000000000ff 256 14 0000000000000100 32767 10 0000000000007fff 32768 22 0000000000008000 65535 6 000000000000ffff 65536 33 0000000000010000 8388607 7 00000000007fffff 8388608 5 0000000000800000 16777215 9 0000000000ffffff 16777216 15 0000000001000000 2147483647 46 000000007fffffff 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 4294967296 11 0000000100000000 549755813887 1 0000007fffffffff 549755813888 35 0000008000000000 1099511627775 20 000000ffffffffff 1099511627776 8 0000010000000000 140737488355327 38 00007fffffffffff 140737488355328 34 0000800000000000 281474976710655 19 0000ffffffffffff 281474976710656 32 0001000000000000 36028797018963967 40 007fffffffffffff 36028797018963968 23 0080000000000000 72057594037927935 27 00ffffffffffffff 72057594037927936 16 0100000000000000 9223372036854775807 18 7fffffffffffffff -9223372036854775808 26 8000000000000000 -36028797018963969 30 ff7fffffffffffff -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -140737488355328 13 ffff800000000000 -549755813889 28 ffffff7fffffffff -549755813888 12 ffffff8000000000 -2147483649 39 ffffffff7fffffff -2147483648 31 ffffffff80000000 -8388609 21 ffffffffff7fffff -8388608 2 ffffffffff800000 -32769 24 ffffffffffff7fff -32768 42 ffffffffffff8000 -129 4 ffffffffffffff7f -128 45 ffffffffffffff80 -1 17 ffffffffffffffff}
do_test boundary4-1.12 {
db eval {
SELECT rowid, a, x FROM t1 ORDER BY x
}
} {0 3 0000000000000000 127 25 000000000000007f 128 41 0000000000000080 255 29 00000000000000ff 256 14 0000000000000100 32767 10 0000000000007fff 32768 22 0000000000008000 65535 6 000000000000ffff 65536 33 0000000000010000 8388607 7 00000000007fffff 8388608 5 0000000000800000 16777215 9 0000000000ffffff 16777216 15 0000000001000000 2147483647 46 000000007fffffff 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 4294967296 11 0000000100000000 549755813887 1 0000007fffffffff 549755813888 35 0000008000000000 1099511627775 20 000000ffffffffff 1099511627776 8 0000010000000000 140737488355327 38 00007fffffffffff 140737488355328 34 0000800000000000 281474976710655 19 0000ffffffffffff 281474976710656 32 0001000000000000 36028797018963967 40 007fffffffffffff 36028797018963968 23 0080000000000000 72057594037927935 27 00ffffffffffffff 72057594037927936 16 0100000000000000 9223372036854775807 18 7fffffffffffffff -9223372036854775808 26 8000000000000000 -36028797018963969 30 ff7fffffffffffff -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -140737488355328 13 ffff800000000000 -549755813889 28 ffffff7fffffffff -549755813888 12 ffffff8000000000 -2147483649 39 ffffffff7fffffff -2147483648 31 ffffffff80000000 -8388609 21 ffffffffff7fffff -8388608 2 ffffffffff800000 -32769 24 ffffffffffff7fff -32768 42 ffffffffffff8000 -129 4 ffffffffffffff7f -128 45 ffffffffffffff80 -1 17 ffffffffffffffff}
do_test boundary4-1.13 {
db eval {
SELECT rowid, a, x FROM t1 ORDER BY +x DESC
}
} {-1 17 ffffffffffffffff -128 45 ffffffffffffff80 -129 4 ffffffffffffff7f -32768 42 ffffffffffff8000 -32769 24 ffffffffffff7fff -8388608 2 ffffffffff800000 -8388609 21 ffffffffff7fffff -2147483648 31 ffffffff80000000 -2147483649 39 ffffffff7fffffff -549755813888 12 ffffff8000000000 -549755813889 28 ffffff7fffffffff -140737488355328 13 ffff800000000000 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -36028797018963969 30 ff7fffffffffffff -9223372036854775808 26 8000000000000000 9223372036854775807 18 7fffffffffffffff 72057594037927936 16 0100000000000000 72057594037927935 27 00ffffffffffffff 36028797018963968 23 0080000000000000 36028797018963967 40 007fffffffffffff 281474976710656 32 0001000000000000 281474976710655 19 0000ffffffffffff 140737488355328 34 0000800000000000 140737488355327 38 00007fffffffffff 1099511627776 8 0000010000000000 1099511627775 20 000000ffffffffff 549755813888 35 0000008000000000 549755813887 1 0000007fffffffff 4294967296 11 0000000100000000 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 2147483647 46 000000007fffffff 16777216 15 0000000001000000 16777215 9 0000000000ffffff 8388608 5 0000000000800000 8388607 7 00000000007fffff 65536 33 0000000000010000 65535 6 000000000000ffff 32768 22 0000000000008000 32767 10 0000000000007fff 256 14 0000000000000100 255 29 00000000000000ff 128 41 0000000000000080 127 25 000000000000007f 0 3 0000000000000000}
do_test boundary4-1.14 {
db eval {
SELECT rowid, a, x FROM t1 ORDER BY x DESC
}
} {-1 17 ffffffffffffffff -128 45 ffffffffffffff80 -129 4 ffffffffffffff7f -32768 42 ffffffffffff8000 -32769 24 ffffffffffff7fff -8388608 2 ffffffffff800000 -8388609 21 ffffffffff7fffff -2147483648 31 ffffffff80000000 -2147483649 39 ffffffff7fffffff -549755813888 12 ffffff8000000000 -549755813889 28 ffffff7fffffffff -140737488355328 13 ffff800000000000 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -36028797018963969 30 ff7fffffffffffff -9223372036854775808 26 8000000000000000 9223372036854775807 18 7fffffffffffffff 72057594037927936 16 0100000000000000 72057594037927935 27 00ffffffffffffff 36028797018963968 23 0080000000000000 36028797018963967 40 007fffffffffffff 281474976710656 32 0001000000000000 281474976710655 19 0000ffffffffffff 140737488355328 34 0000800000000000 140737488355327 38 00007fffffffffff 1099511627776 8 0000010000000000 1099511627775 20 000000ffffffffff 549755813888 35 0000008000000000 549755813887 1 0000007fffffffff 4294967296 11 0000000100000000 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 2147483647 46 000000007fffffff 16777216 15 0000000001000000 16777215 9 0000000000ffffff 8388608 5 0000000000800000 8388607 7 00000000007fffff 65536 33 0000000000010000 65535 6 000000000000ffff 32768 22 0000000000008000 32767 10 0000000000007fff 256 14 0000000000000100 255 29 00000000000000ff 128 41 0000000000000080 127 25 000000000000007f 0 3 0000000000000000}
do_test boundary4-2.1 {
db eval {
UPDATE t1 SET rowid=a, a=rowid
}
} {}
do_test boundary4-2.3 {
db eval {
SELECT a, rowid, x FROM t1 ORDER BY +a
}
} {-9223372036854775808 26 8000000000000000 -36028797018963969 30 ff7fffffffffffff -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -140737488355328 13 ffff800000000000 -549755813889 28 ffffff7fffffffff -549755813888 12 ffffff8000000000 -2147483649 39 ffffffff7fffffff -2147483648 31 ffffffff80000000 -8388609 21 ffffffffff7fffff -8388608 2 ffffffffff800000 -32769 24 ffffffffffff7fff -32768 42 ffffffffffff8000 -129 4 ffffffffffffff7f -128 45 ffffffffffffff80 -1 17 ffffffffffffffff 0 3 0000000000000000 127 25 000000000000007f 128 41 0000000000000080 255 29 00000000000000ff 256 14 0000000000000100 32767 10 0000000000007fff 32768 22 0000000000008000 65535 6 000000000000ffff 65536 33 0000000000010000 8388607 7 00000000007fffff 8388608 5 0000000000800000 16777215 9 0000000000ffffff 16777216 15 0000000001000000 2147483647 46 000000007fffffff 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 4294967296 11 0000000100000000 549755813887 1 0000007fffffffff 549755813888 35 0000008000000000 1099511627775 20 000000ffffffffff 1099511627776 8 0000010000000000 140737488355327 38 00007fffffffffff 140737488355328 34 0000800000000000 281474976710655 19 0000ffffffffffff 281474976710656 32 0001000000000000 36028797018963967 40 007fffffffffffff 36028797018963968 23 0080000000000000 72057594037927935 27 00ffffffffffffff 72057594037927936 16 0100000000000000 9223372036854775807 18 7fffffffffffffff}
do_test boundary4-2.4 {
db eval {
SELECT a, rowid, x FROM t1 ORDER BY a
}
} {-9223372036854775808 26 8000000000000000 -36028797018963969 30 ff7fffffffffffff -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -140737488355328 13 ffff800000000000 -549755813889 28 ffffff7fffffffff -549755813888 12 ffffff8000000000 -2147483649 39 ffffffff7fffffff -2147483648 31 ffffffff80000000 -8388609 21 ffffffffff7fffff -8388608 2 ffffffffff800000 -32769 24 ffffffffffff7fff -32768 42 ffffffffffff8000 -129 4 ffffffffffffff7f -128 45 ffffffffffffff80 -1 17 ffffffffffffffff 0 3 0000000000000000 127 25 000000000000007f 128 41 0000000000000080 255 29 00000000000000ff 256 14 0000000000000100 32767 10 0000000000007fff 32768 22 0000000000008000 65535 6 000000000000ffff 65536 33 0000000000010000 8388607 7 00000000007fffff 8388608 5 0000000000800000 16777215 9 0000000000ffffff 16777216 15 0000000001000000 2147483647 46 000000007fffffff 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 4294967296 11 0000000100000000 549755813887 1 0000007fffffffff 549755813888 35 0000008000000000 1099511627775 20 000000ffffffffff 1099511627776 8 0000010000000000 140737488355327 38 00007fffffffffff 140737488355328 34 0000800000000000 281474976710655 19 0000ffffffffffff 281474976710656 32 0001000000000000 36028797018963967 40 007fffffffffffff 36028797018963968 23 0080000000000000 72057594037927935 27 00ffffffffffffff 72057594037927936 16 0100000000000000 9223372036854775807 18 7fffffffffffffff}
do_test boundary4-2.5 {
db eval {
SELECT a, rowid, x FROM t1 ORDER BY +a DESC
}
} {9223372036854775807 18 7fffffffffffffff 72057594037927936 16 0100000000000000 72057594037927935 27 00ffffffffffffff 36028797018963968 23 0080000000000000 36028797018963967 40 007fffffffffffff 281474976710656 32 0001000000000000 281474976710655 19 0000ffffffffffff 140737488355328 34 0000800000000000 140737488355327 38 00007fffffffffff 1099511627776 8 0000010000000000 1099511627775 20 000000ffffffffff 549755813888 35 0000008000000000 549755813887 1 0000007fffffffff 4294967296 11 0000000100000000 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 2147483647 46 000000007fffffff 16777216 15 0000000001000000 16777215 9 0000000000ffffff 8388608 5 0000000000800000 8388607 7 00000000007fffff 65536 33 0000000000010000 65535 6 000000000000ffff 32768 22 0000000000008000 32767 10 0000000000007fff 256 14 0000000000000100 255 29 00000000000000ff 128 41 0000000000000080 127 25 000000000000007f 0 3 0000000000000000 -1 17 ffffffffffffffff -128 45 ffffffffffffff80 -129 4 ffffffffffffff7f -32768 42 ffffffffffff8000 -32769 24 ffffffffffff7fff -8388608 2 ffffffffff800000 -8388609 21 ffffffffff7fffff -2147483648 31 ffffffff80000000 -2147483649 39 ffffffff7fffffff -549755813888 12 ffffff8000000000 -549755813889 28 ffffff7fffffffff -140737488355328 13 ffff800000000000 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -36028797018963969 30 ff7fffffffffffff -9223372036854775808 26 8000000000000000}
do_test boundary4-2.6 {
db eval {
SELECT a, rowid, x FROM t1 ORDER BY a DESC
}
} {9223372036854775807 18 7fffffffffffffff 72057594037927936 16 0100000000000000 72057594037927935 27 00ffffffffffffff 36028797018963968 23 0080000000000000 36028797018963967 40 007fffffffffffff 281474976710656 32 0001000000000000 281474976710655 19 0000ffffffffffff 140737488355328 34 0000800000000000 140737488355327 38 00007fffffffffff 1099511627776 8 0000010000000000 1099511627775 20 000000ffffffffff 549755813888 35 0000008000000000 549755813887 1 0000007fffffffff 4294967296 11 0000000100000000 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 2147483647 46 000000007fffffff 16777216 15 0000000001000000 16777215 9 0000000000ffffff 8388608 5 0000000000800000 8388607 7 00000000007fffff 65536 33 0000000000010000 65535 6 000000000000ffff 32768 22 0000000000008000 32767 10 0000000000007fff 256 14 0000000000000100 255 29 00000000000000ff 128 41 0000000000000080 127 25 000000000000007f 0 3 0000000000000000 -1 17 ffffffffffffffff -128 45 ffffffffffffff80 -129 4 ffffffffffffff7f -32768 42 ffffffffffff8000 -32769 24 ffffffffffff7fff -8388608 2 ffffffffff800000 -8388609 21 ffffffffff7fffff -2147483648 31 ffffffff80000000 -2147483649 39 ffffffff7fffffff -549755813888 12 ffffff8000000000 -549755813889 28 ffffff7fffffffff -140737488355328 13 ffff800000000000 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -36028797018963969 30 ff7fffffffffffff -9223372036854775808 26 8000000000000000}
do_test boundary4-2.7 {
db eval {
SELECT a, rowid, x FROM t1 ORDER BY +rowid
}
} {549755813887 1 0000007fffffffff -8388608 2 ffffffffff800000 0 3 0000000000000000 -129 4 ffffffffffffff7f 8388608 5 0000000000800000 65535 6 000000000000ffff 8388607 7 00000000007fffff 1099511627776 8 0000010000000000 16777215 9 0000000000ffffff 32767 10 0000000000007fff 4294967296 11 0000000100000000 -549755813888 12 ffffff8000000000 -140737488355328 13 ffff800000000000 256 14 0000000000000100 16777216 15 0000000001000000 72057594037927936 16 0100000000000000 -1 17 ffffffffffffffff 9223372036854775807 18 7fffffffffffffff 281474976710655 19 0000ffffffffffff 1099511627775 20 000000ffffffffff -8388609 21 ffffffffff7fffff 32768 22 0000000000008000 36028797018963968 23 0080000000000000 -32769 24 ffffffffffff7fff 127 25 000000000000007f -9223372036854775808 26 8000000000000000 72057594037927935 27 00ffffffffffffff -549755813889 28 ffffff7fffffffff 255 29 00000000000000ff -36028797018963969 30 ff7fffffffffffff -2147483648 31 ffffffff80000000 281474976710656 32 0001000000000000 65536 33 0000000000010000 140737488355328 34 0000800000000000 549755813888 35 0000008000000000 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 140737488355327 38 00007fffffffffff -2147483649 39 ffffffff7fffffff 36028797018963967 40 007fffffffffffff 128 41 0000000000000080 -32768 42 ffffffffffff8000 -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -128 45 ffffffffffffff80 2147483647 46 000000007fffffff}
do_test boundary4-2.8 {
db eval {
SELECT a, rowid, x FROM t1 ORDER BY rowid
}
} {549755813887 1 0000007fffffffff -8388608 2 ffffffffff800000 0 3 0000000000000000 -129 4 ffffffffffffff7f 8388608 5 0000000000800000 65535 6 000000000000ffff 8388607 7 00000000007fffff 1099511627776 8 0000010000000000 16777215 9 0000000000ffffff 32767 10 0000000000007fff 4294967296 11 0000000100000000 -549755813888 12 ffffff8000000000 -140737488355328 13 ffff800000000000 256 14 0000000000000100 16777216 15 0000000001000000 72057594037927936 16 0100000000000000 -1 17 ffffffffffffffff 9223372036854775807 18 7fffffffffffffff 281474976710655 19 0000ffffffffffff 1099511627775 20 000000ffffffffff -8388609 21 ffffffffff7fffff 32768 22 0000000000008000 36028797018963968 23 0080000000000000 -32769 24 ffffffffffff7fff 127 25 000000000000007f -9223372036854775808 26 8000000000000000 72057594037927935 27 00ffffffffffffff -549755813889 28 ffffff7fffffffff 255 29 00000000000000ff -36028797018963969 30 ff7fffffffffffff -2147483648 31 ffffffff80000000 281474976710656 32 0001000000000000 65536 33 0000000000010000 140737488355328 34 0000800000000000 549755813888 35 0000008000000000 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 140737488355327 38 00007fffffffffff -2147483649 39 ffffffff7fffffff 36028797018963967 40 007fffffffffffff 128 41 0000000000000080 -32768 42 ffffffffffff8000 -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -128 45 ffffffffffffff80 2147483647 46 000000007fffffff}
do_test boundary4-2.9 {
db eval {
SELECT a, rowid, x FROM t1 ORDER BY +rowid DESC
}
} {2147483647 46 000000007fffffff -128 45 ffffffffffffff80 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -32768 42 ffffffffffff8000 128 41 0000000000000080 36028797018963967 40 007fffffffffffff -2147483649 39 ffffffff7fffffff 140737488355327 38 00007fffffffffff 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 549755813888 35 0000008000000000 140737488355328 34 0000800000000000 65536 33 0000000000010000 281474976710656 32 0001000000000000 -2147483648 31 ffffffff80000000 -36028797018963969 30 ff7fffffffffffff 255 29 00000000000000ff -549755813889 28 ffffff7fffffffff 72057594037927935 27 00ffffffffffffff -9223372036854775808 26 8000000000000000 127 25 000000000000007f -32769 24 ffffffffffff7fff 36028797018963968 23 0080000000000000 32768 22 0000000000008000 -8388609 21 ffffffffff7fffff 1099511627775 20 000000ffffffffff 281474976710655 19 0000ffffffffffff 9223372036854775807 18 7fffffffffffffff -1 17 ffffffffffffffff 72057594037927936 16 0100000000000000 16777216 15 0000000001000000 256 14 0000000000000100 -140737488355328 13 ffff800000000000 -549755813888 12 ffffff8000000000 4294967296 11 0000000100000000 32767 10 0000000000007fff 16777215 9 0000000000ffffff 1099511627776 8 0000010000000000 8388607 7 00000000007fffff 65535 6 000000000000ffff 8388608 5 0000000000800000 -129 4 ffffffffffffff7f 0 3 0000000000000000 -8388608 2 ffffffffff800000 549755813887 1 0000007fffffffff}
do_test boundary4-2.10 {
db eval {
SELECT a, rowid, x FROM t1 ORDER BY rowid DESC
}
} {2147483647 46 000000007fffffff -128 45 ffffffffffffff80 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -32768 42 ffffffffffff8000 128 41 0000000000000080 36028797018963967 40 007fffffffffffff -2147483649 39 ffffffff7fffffff 140737488355327 38 00007fffffffffff 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 549755813888 35 0000008000000000 140737488355328 34 0000800000000000 65536 33 0000000000010000 281474976710656 32 0001000000000000 -2147483648 31 ffffffff80000000 -36028797018963969 30 ff7fffffffffffff 255 29 00000000000000ff -549755813889 28 ffffff7fffffffff 72057594037927935 27 00ffffffffffffff -9223372036854775808 26 8000000000000000 127 25 000000000000007f -32769 24 ffffffffffff7fff 36028797018963968 23 0080000000000000 32768 22 0000000000008000 -8388609 21 ffffffffff7fffff 1099511627775 20 000000ffffffffff 281474976710655 19 0000ffffffffffff 9223372036854775807 18 7fffffffffffffff -1 17 ffffffffffffffff 72057594037927936 16 0100000000000000 16777216 15 0000000001000000 256 14 0000000000000100 -140737488355328 13 ffff800000000000 -549755813888 12 ffffff8000000000 4294967296 11 0000000100000000 32767 10 0000000000007fff 16777215 9 0000000000ffffff 1099511627776 8 0000010000000000 8388607 7 00000000007fffff 65535 6 000000000000ffff 8388608 5 0000000000800000 -129 4 ffffffffffffff7f 0 3 0000000000000000 -8388608 2 ffffffffff800000 549755813887 1 0000007fffffffff}
do_test boundary4-2.11 {
db eval {
SELECT a, rowid, x FROM t1 ORDER BY +x
}
} {0 3 0000000000000000 127 25 000000000000007f 128 41 0000000000000080 255 29 00000000000000ff 256 14 0000000000000100 32767 10 0000000000007fff 32768 22 0000000000008000 65535 6 000000000000ffff 65536 33 0000000000010000 8388607 7 00000000007fffff 8388608 5 0000000000800000 16777215 9 0000000000ffffff 16777216 15 0000000001000000 2147483647 46 000000007fffffff 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 4294967296 11 0000000100000000 549755813887 1 0000007fffffffff 549755813888 35 0000008000000000 1099511627775 20 000000ffffffffff 1099511627776 8 0000010000000000 140737488355327 38 00007fffffffffff 140737488355328 34 0000800000000000 281474976710655 19 0000ffffffffffff 281474976710656 32 0001000000000000 36028797018963967 40 007fffffffffffff 36028797018963968 23 0080000000000000 72057594037927935 27 00ffffffffffffff 72057594037927936 16 0100000000000000 9223372036854775807 18 7fffffffffffffff -9223372036854775808 26 8000000000000000 -36028797018963969 30 ff7fffffffffffff -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -140737488355328 13 ffff800000000000 -549755813889 28 ffffff7fffffffff -549755813888 12 ffffff8000000000 -2147483649 39 ffffffff7fffffff -2147483648 31 ffffffff80000000 -8388609 21 ffffffffff7fffff -8388608 2 ffffffffff800000 -32769 24 ffffffffffff7fff -32768 42 ffffffffffff8000 -129 4 ffffffffffffff7f -128 45 ffffffffffffff80 -1 17 ffffffffffffffff}
do_test boundary4-2.12 {
db eval {
SELECT a, rowid, x FROM t1 ORDER BY x
}
} {0 3 0000000000000000 127 25 000000000000007f 128 41 0000000000000080 255 29 00000000000000ff 256 14 0000000000000100 32767 10 0000000000007fff 32768 22 0000000000008000 65535 6 000000000000ffff 65536 33 0000000000010000 8388607 7 00000000007fffff 8388608 5 0000000000800000 16777215 9 0000000000ffffff 16777216 15 0000000001000000 2147483647 46 000000007fffffff 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 4294967296 11 0000000100000000 549755813887 1 0000007fffffffff 549755813888 35 0000008000000000 1099511627775 20 000000ffffffffff 1099511627776 8 0000010000000000 140737488355327 38 00007fffffffffff 140737488355328 34 0000800000000000 281474976710655 19 0000ffffffffffff 281474976710656 32 0001000000000000 36028797018963967 40 007fffffffffffff 36028797018963968 23 0080000000000000 72057594037927935 27 00ffffffffffffff 72057594037927936 16 0100000000000000 9223372036854775807 18 7fffffffffffffff -9223372036854775808 26 8000000000000000 -36028797018963969 30 ff7fffffffffffff -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -140737488355328 13 ffff800000000000 -549755813889 28 ffffff7fffffffff -549755813888 12 ffffff8000000000 -2147483649 39 ffffffff7fffffff -2147483648 31 ffffffff80000000 -8388609 21 ffffffffff7fffff -8388608 2 ffffffffff800000 -32769 24 ffffffffffff7fff -32768 42 ffffffffffff8000 -129 4 ffffffffffffff7f -128 45 ffffffffffffff80 -1 17 ffffffffffffffff}
do_test boundary4-2.13 {
db eval {
SELECT a, rowid, x FROM t1 ORDER BY +x DESC
}
} {-1 17 ffffffffffffffff -128 45 ffffffffffffff80 -129 4 ffffffffffffff7f -32768 42 ffffffffffff8000 -32769 24 ffffffffffff7fff -8388608 2 ffffffffff800000 -8388609 21 ffffffffff7fffff -2147483648 31 ffffffff80000000 -2147483649 39 ffffffff7fffffff -549755813888 12 ffffff8000000000 -549755813889 28 ffffff7fffffffff -140737488355328 13 ffff800000000000 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -36028797018963969 30 ff7fffffffffffff -9223372036854775808 26 8000000000000000 9223372036854775807 18 7fffffffffffffff 72057594037927936 16 0100000000000000 72057594037927935 27 00ffffffffffffff 36028797018963968 23 0080000000000000 36028797018963967 40 007fffffffffffff 281474976710656 32 0001000000000000 281474976710655 19 0000ffffffffffff 140737488355328 34 0000800000000000 140737488355327 38 00007fffffffffff 1099511627776 8 0000010000000000 1099511627775 20 000000ffffffffff 549755813888 35 0000008000000000 549755813887 1 0000007fffffffff 4294967296 11 0000000100000000 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 2147483647 46 000000007fffffff 16777216 15 0000000001000000 16777215 9 0000000000ffffff 8388608 5 0000000000800000 8388607 7 00000000007fffff 65536 33 0000000000010000 65535 6 000000000000ffff 32768 22 0000000000008000 32767 10 0000000000007fff 256 14 0000000000000100 255 29 00000000000000ff 128 41 0000000000000080 127 25 000000000000007f 0 3 0000000000000000}
do_test boundary4-2.14 {
db eval {
SELECT a, rowid, x FROM t1 ORDER BY x DESC
}
} {-1 17 ffffffffffffffff -128 45 ffffffffffffff80 -129 4 ffffffffffffff7f -32768 42 ffffffffffff8000 -32769 24 ffffffffffff7fff -8388608 2 ffffffffff800000 -8388609 21 ffffffffff7fffff -2147483648 31 ffffffff80000000 -2147483649 39 ffffffff7fffffff -549755813888 12 ffffff8000000000 -549755813889 28 ffffff7fffffffff -140737488355328 13 ffff800000000000 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -36028797018963969 30 ff7fffffffffffff -9223372036854775808 26 8000000000000000 9223372036854775807 18 7fffffffffffffff 72057594037927936 16 0100000000000000 72057594037927935 27 00ffffffffffffff 36028797018963968 23 0080000000000000 36028797018963967 40 007fffffffffffff 281474976710656 32 0001000000000000 281474976710655 19 0000ffffffffffff 140737488355328 34 0000800000000000 140737488355327 38 00007fffffffffff 1099511627776 8 0000010000000000 1099511627775 20 000000ffffffffff 549755813888 35 0000008000000000 549755813887 1 0000007fffffffff 4294967296 11 0000000100000000 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 2147483647 46 000000007fffffff 16777216 15 0000000001000000 16777215 9 0000000000ffffff 8388608 5 0000000000800000 8388607 7 00000000007fffff 65536 33 0000000000010000 65535 6 000000000000ffff 32768 22 0000000000008000 32767 10 0000000000007fff 256 14 0000000000000100 255 29 00000000000000ff 128 41 0000000000000080 127 25 000000000000007f 0 3 0000000000000000}
do_test boundary4-3.1 {
db eval {
UPDATE t1 SET rowid=a, a=rowid
}
} {}
do_test boundary4-3.2 {
db eval {
ALTER TABLE t1 ADD COLUMN z; UPDATE t1 SET z=zeroblob(600)
}
} {}
do_test boundary4-3.3 {
db eval {
SELECT rowid, a, x FROM t1 ORDER BY +rowid
}
} {-9223372036854775808 26 8000000000000000 -36028797018963969 30 ff7fffffffffffff -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -140737488355328 13 ffff800000000000 -549755813889 28 ffffff7fffffffff -549755813888 12 ffffff8000000000 -2147483649 39 ffffffff7fffffff -2147483648 31 ffffffff80000000 -8388609 21 ffffffffff7fffff -8388608 2 ffffffffff800000 -32769 24 ffffffffffff7fff -32768 42 ffffffffffff8000 -129 4 ffffffffffffff7f -128 45 ffffffffffffff80 -1 17 ffffffffffffffff 0 3 0000000000000000 127 25 000000000000007f 128 41 0000000000000080 255 29 00000000000000ff 256 14 0000000000000100 32767 10 0000000000007fff 32768 22 0000000000008000 65535 6 000000000000ffff 65536 33 0000000000010000 8388607 7 00000000007fffff 8388608 5 0000000000800000 16777215 9 0000000000ffffff 16777216 15 0000000001000000 2147483647 46 000000007fffffff 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 4294967296 11 0000000100000000 549755813887 1 0000007fffffffff 549755813888 35 0000008000000000 1099511627775 20 000000ffffffffff 1099511627776 8 0000010000000000 140737488355327 38 00007fffffffffff 140737488355328 34 0000800000000000 281474976710655 19 0000ffffffffffff 281474976710656 32 0001000000000000 36028797018963967 40 007fffffffffffff 36028797018963968 23 0080000000000000 72057594037927935 27 00ffffffffffffff 72057594037927936 16 0100000000000000 9223372036854775807 18 7fffffffffffffff}
do_test boundary4-3.4 {
db eval {
SELECT rowid, a, x FROM t1 ORDER BY rowid
}
} {-9223372036854775808 26 8000000000000000 -36028797018963969 30 ff7fffffffffffff -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -140737488355328 13 ffff800000000000 -549755813889 28 ffffff7fffffffff -549755813888 12 ffffff8000000000 -2147483649 39 ffffffff7fffffff -2147483648 31 ffffffff80000000 -8388609 21 ffffffffff7fffff -8388608 2 ffffffffff800000 -32769 24 ffffffffffff7fff -32768 42 ffffffffffff8000 -129 4 ffffffffffffff7f -128 45 ffffffffffffff80 -1 17 ffffffffffffffff 0 3 0000000000000000 127 25 000000000000007f 128 41 0000000000000080 255 29 00000000000000ff 256 14 0000000000000100 32767 10 0000000000007fff 32768 22 0000000000008000 65535 6 000000000000ffff 65536 33 0000000000010000 8388607 7 00000000007fffff 8388608 5 0000000000800000 16777215 9 0000000000ffffff 16777216 15 0000000001000000 2147483647 46 000000007fffffff 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 4294967296 11 0000000100000000 549755813887 1 0000007fffffffff 549755813888 35 0000008000000000 1099511627775 20 000000ffffffffff 1099511627776 8 0000010000000000 140737488355327 38 00007fffffffffff 140737488355328 34 0000800000000000 281474976710655 19 0000ffffffffffff 281474976710656 32 0001000000000000 36028797018963967 40 007fffffffffffff 36028797018963968 23 0080000000000000 72057594037927935 27 00ffffffffffffff 72057594037927936 16 0100000000000000 9223372036854775807 18 7fffffffffffffff}
do_test boundary4-3.5 {
db eval {
SELECT rowid, a, x FROM t1 ORDER BY +rowid DESC
}
} {9223372036854775807 18 7fffffffffffffff 72057594037927936 16 0100000000000000 72057594037927935 27 00ffffffffffffff 36028797018963968 23 0080000000000000 36028797018963967 40 007fffffffffffff 281474976710656 32 0001000000000000 281474976710655 19 0000ffffffffffff 140737488355328 34 0000800000000000 140737488355327 38 00007fffffffffff 1099511627776 8 0000010000000000 1099511627775 20 000000ffffffffff 549755813888 35 0000008000000000 549755813887 1 0000007fffffffff 4294967296 11 0000000100000000 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 2147483647 46 000000007fffffff 16777216 15 0000000001000000 16777215 9 0000000000ffffff 8388608 5 0000000000800000 8388607 7 00000000007fffff 65536 33 0000000000010000 65535 6 000000000000ffff 32768 22 0000000000008000 32767 10 0000000000007fff 256 14 0000000000000100 255 29 00000000000000ff 128 41 0000000000000080 127 25 000000000000007f 0 3 0000000000000000 -1 17 ffffffffffffffff -128 45 ffffffffffffff80 -129 4 ffffffffffffff7f -32768 42 ffffffffffff8000 -32769 24 ffffffffffff7fff -8388608 2 ffffffffff800000 -8388609 21 ffffffffff7fffff -2147483648 31 ffffffff80000000 -2147483649 39 ffffffff7fffffff -549755813888 12 ffffff8000000000 -549755813889 28 ffffff7fffffffff -140737488355328 13 ffff800000000000 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -36028797018963969 30 ff7fffffffffffff -9223372036854775808 26 8000000000000000}
do_test boundary4-3.6 {
db eval {
SELECT rowid, a, x FROM t1 ORDER BY rowid DESC
}
} {9223372036854775807 18 7fffffffffffffff 72057594037927936 16 0100000000000000 72057594037927935 27 00ffffffffffffff 36028797018963968 23 0080000000000000 36028797018963967 40 007fffffffffffff 281474976710656 32 0001000000000000 281474976710655 19 0000ffffffffffff 140737488355328 34 0000800000000000 140737488355327 38 00007fffffffffff 1099511627776 8 0000010000000000 1099511627775 20 000000ffffffffff 549755813888 35 0000008000000000 549755813887 1 0000007fffffffff 4294967296 11 0000000100000000 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 2147483647 46 000000007fffffff 16777216 15 0000000001000000 16777215 9 0000000000ffffff 8388608 5 0000000000800000 8388607 7 00000000007fffff 65536 33 0000000000010000 65535 6 000000000000ffff 32768 22 0000000000008000 32767 10 0000000000007fff 256 14 0000000000000100 255 29 00000000000000ff 128 41 0000000000000080 127 25 000000000000007f 0 3 0000000000000000 -1 17 ffffffffffffffff -128 45 ffffffffffffff80 -129 4 ffffffffffffff7f -32768 42 ffffffffffff8000 -32769 24 ffffffffffff7fff -8388608 2 ffffffffff800000 -8388609 21 ffffffffff7fffff -2147483648 31 ffffffff80000000 -2147483649 39 ffffffff7fffffff -549755813888 12 ffffff8000000000 -549755813889 28 ffffff7fffffffff -140737488355328 13 ffff800000000000 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -36028797018963969 30 ff7fffffffffffff -9223372036854775808 26 8000000000000000}
do_test boundary4-3.7 {
db eval {
SELECT rowid, a, x FROM t1 ORDER BY +a
}
} {549755813887 1 0000007fffffffff -8388608 2 ffffffffff800000 0 3 0000000000000000 -129 4 ffffffffffffff7f 8388608 5 0000000000800000 65535 6 000000000000ffff 8388607 7 00000000007fffff 1099511627776 8 0000010000000000 16777215 9 0000000000ffffff 32767 10 0000000000007fff 4294967296 11 0000000100000000 -549755813888 12 ffffff8000000000 -140737488355328 13 ffff800000000000 256 14 0000000000000100 16777216 15 0000000001000000 72057594037927936 16 0100000000000000 -1 17 ffffffffffffffff 9223372036854775807 18 7fffffffffffffff 281474976710655 19 0000ffffffffffff 1099511627775 20 000000ffffffffff -8388609 21 ffffffffff7fffff 32768 22 0000000000008000 36028797018963968 23 0080000000000000 -32769 24 ffffffffffff7fff 127 25 000000000000007f -9223372036854775808 26 8000000000000000 72057594037927935 27 00ffffffffffffff -549755813889 28 ffffff7fffffffff 255 29 00000000000000ff -36028797018963969 30 ff7fffffffffffff -2147483648 31 ffffffff80000000 281474976710656 32 0001000000000000 65536 33 0000000000010000 140737488355328 34 0000800000000000 549755813888 35 0000008000000000 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 140737488355327 38 00007fffffffffff -2147483649 39 ffffffff7fffffff 36028797018963967 40 007fffffffffffff 128 41 0000000000000080 -32768 42 ffffffffffff8000 -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -128 45 ffffffffffffff80 2147483647 46 000000007fffffff}
do_test boundary4-3.8 {
db eval {
SELECT rowid, a, x FROM t1 ORDER BY a
}
} {549755813887 1 0000007fffffffff -8388608 2 ffffffffff800000 0 3 0000000000000000 -129 4 ffffffffffffff7f 8388608 5 0000000000800000 65535 6 000000000000ffff 8388607 7 00000000007fffff 1099511627776 8 0000010000000000 16777215 9 0000000000ffffff 32767 10 0000000000007fff 4294967296 11 0000000100000000 -549755813888 12 ffffff8000000000 -140737488355328 13 ffff800000000000 256 14 0000000000000100 16777216 15 0000000001000000 72057594037927936 16 0100000000000000 -1 17 ffffffffffffffff 9223372036854775807 18 7fffffffffffffff 281474976710655 19 0000ffffffffffff 1099511627775 20 000000ffffffffff -8388609 21 ffffffffff7fffff 32768 22 0000000000008000 36028797018963968 23 0080000000000000 -32769 24 ffffffffffff7fff 127 25 000000000000007f -9223372036854775808 26 8000000000000000 72057594037927935 27 00ffffffffffffff -549755813889 28 ffffff7fffffffff 255 29 00000000000000ff -36028797018963969 30 ff7fffffffffffff -2147483648 31 ffffffff80000000 281474976710656 32 0001000000000000 65536 33 0000000000010000 140737488355328 34 0000800000000000 549755813888 35 0000008000000000 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 140737488355327 38 00007fffffffffff -2147483649 39 ffffffff7fffffff 36028797018963967 40 007fffffffffffff 128 41 0000000000000080 -32768 42 ffffffffffff8000 -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -128 45 ffffffffffffff80 2147483647 46 000000007fffffff}
do_test boundary4-3.9 {
db eval {
SELECT rowid, a, x FROM t1 ORDER BY +a DESC
}
} {2147483647 46 000000007fffffff -128 45 ffffffffffffff80 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -32768 42 ffffffffffff8000 128 41 0000000000000080 36028797018963967 40 007fffffffffffff -2147483649 39 ffffffff7fffffff 140737488355327 38 00007fffffffffff 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 549755813888 35 0000008000000000 140737488355328 34 0000800000000000 65536 33 0000000000010000 281474976710656 32 0001000000000000 -2147483648 31 ffffffff80000000 -36028797018963969 30 ff7fffffffffffff 255 29 00000000000000ff -549755813889 28 ffffff7fffffffff 72057594037927935 27 00ffffffffffffff -9223372036854775808 26 8000000000000000 127 25 000000000000007f -32769 24 ffffffffffff7fff 36028797018963968 23 0080000000000000 32768 22 0000000000008000 -8388609 21 ffffffffff7fffff 1099511627775 20 000000ffffffffff 281474976710655 19 0000ffffffffffff 9223372036854775807 18 7fffffffffffffff -1 17 ffffffffffffffff 72057594037927936 16 0100000000000000 16777216 15 0000000001000000 256 14 0000000000000100 -140737488355328 13 ffff800000000000 -549755813888 12 ffffff8000000000 4294967296 11 0000000100000000 32767 10 0000000000007fff 16777215 9 0000000000ffffff 1099511627776 8 0000010000000000 8388607 7 00000000007fffff 65535 6 000000000000ffff 8388608 5 0000000000800000 -129 4 ffffffffffffff7f 0 3 0000000000000000 -8388608 2 ffffffffff800000 549755813887 1 0000007fffffffff}
do_test boundary4-3.10 {
db eval {
SELECT rowid, a, x FROM t1 ORDER BY a DESC
}
} {2147483647 46 000000007fffffff -128 45 ffffffffffffff80 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -32768 42 ffffffffffff8000 128 41 0000000000000080 36028797018963967 40 007fffffffffffff -2147483649 39 ffffffff7fffffff 140737488355327 38 00007fffffffffff 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 549755813888 35 0000008000000000 140737488355328 34 0000800000000000 65536 33 0000000000010000 281474976710656 32 0001000000000000 -2147483648 31 ffffffff80000000 -36028797018963969 30 ff7fffffffffffff 255 29 00000000000000ff -549755813889 28 ffffff7fffffffff 72057594037927935 27 00ffffffffffffff -9223372036854775808 26 8000000000000000 127 25 000000000000007f -32769 24 ffffffffffff7fff 36028797018963968 23 0080000000000000 32768 22 0000000000008000 -8388609 21 ffffffffff7fffff 1099511627775 20 000000ffffffffff 281474976710655 19 0000ffffffffffff 9223372036854775807 18 7fffffffffffffff -1 17 ffffffffffffffff 72057594037927936 16 0100000000000000 16777216 15 0000000001000000 256 14 0000000000000100 -140737488355328 13 ffff800000000000 -549755813888 12 ffffff8000000000 4294967296 11 0000000100000000 32767 10 0000000000007fff 16777215 9 0000000000ffffff 1099511627776 8 0000010000000000 8388607 7 00000000007fffff 65535 6 000000000000ffff 8388608 5 0000000000800000 -129 4 ffffffffffffff7f 0 3 0000000000000000 -8388608 2 ffffffffff800000 549755813887 1 0000007fffffffff}
do_test boundary4-3.11 {
db eval {
SELECT rowid, a, x FROM t1 ORDER BY +x
}
} {0 3 0000000000000000 127 25 000000000000007f 128 41 0000000000000080 255 29 00000000000000ff 256 14 0000000000000100 32767 10 0000000000007fff 32768 22 0000000000008000 65535 6 000000000000ffff 65536 33 0000000000010000 8388607 7 00000000007fffff 8388608 5 0000000000800000 16777215 9 0000000000ffffff 16777216 15 0000000001000000 2147483647 46 000000007fffffff 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 4294967296 11 0000000100000000 549755813887 1 0000007fffffffff 549755813888 35 0000008000000000 1099511627775 20 000000ffffffffff 1099511627776 8 0000010000000000 140737488355327 38 00007fffffffffff 140737488355328 34 0000800000000000 281474976710655 19 0000ffffffffffff 281474976710656 32 0001000000000000 36028797018963967 40 007fffffffffffff 36028797018963968 23 0080000000000000 72057594037927935 27 00ffffffffffffff 72057594037927936 16 0100000000000000 9223372036854775807 18 7fffffffffffffff -9223372036854775808 26 8000000000000000 -36028797018963969 30 ff7fffffffffffff -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -140737488355328 13 ffff800000000000 -549755813889 28 ffffff7fffffffff -549755813888 12 ffffff8000000000 -2147483649 39 ffffffff7fffffff -2147483648 31 ffffffff80000000 -8388609 21 ffffffffff7fffff -8388608 2 ffffffffff800000 -32769 24 ffffffffffff7fff -32768 42 ffffffffffff8000 -129 4 ffffffffffffff7f -128 45 ffffffffffffff80 -1 17 ffffffffffffffff}
do_test boundary4-3.12 {
db eval {
SELECT rowid, a, x FROM t1 ORDER BY x
}
} {0 3 0000000000000000 127 25 000000000000007f 128 41 0000000000000080 255 29 00000000000000ff 256 14 0000000000000100 32767 10 0000000000007fff 32768 22 0000000000008000 65535 6 000000000000ffff 65536 33 0000000000010000 8388607 7 00000000007fffff 8388608 5 0000000000800000 16777215 9 0000000000ffffff 16777216 15 0000000001000000 2147483647 46 000000007fffffff 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 4294967296 11 0000000100000000 549755813887 1 0000007fffffffff 549755813888 35 0000008000000000 1099511627775 20 000000ffffffffff 1099511627776 8 0000010000000000 140737488355327 38 00007fffffffffff 140737488355328 34 0000800000000000 281474976710655 19 0000ffffffffffff 281474976710656 32 0001000000000000 36028797018963967 40 007fffffffffffff 36028797018963968 23 0080000000000000 72057594037927935 27 00ffffffffffffff 72057594037927936 16 0100000000000000 9223372036854775807 18 7fffffffffffffff -9223372036854775808 26 8000000000000000 -36028797018963969 30 ff7fffffffffffff -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -140737488355328 13 ffff800000000000 -549755813889 28 ffffff7fffffffff -549755813888 12 ffffff8000000000 -2147483649 39 ffffffff7fffffff -2147483648 31 ffffffff80000000 -8388609 21 ffffffffff7fffff -8388608 2 ffffffffff800000 -32769 24 ffffffffffff7fff -32768 42 ffffffffffff8000 -129 4 ffffffffffffff7f -128 45 ffffffffffffff80 -1 17 ffffffffffffffff}
do_test boundary4-3.13 {
db eval {
SELECT rowid, a, x FROM t1 ORDER BY +x DESC
}
} {-1 17 ffffffffffffffff -128 45 ffffffffffffff80 -129 4 ffffffffffffff7f -32768 42 ffffffffffff8000 -32769 24 ffffffffffff7fff -8388608 2 ffffffffff800000 -8388609 21 ffffffffff7fffff -2147483648 31 ffffffff80000000 -2147483649 39 ffffffff7fffffff -549755813888 12 ffffff8000000000 -549755813889 28 ffffff7fffffffff -140737488355328 13 ffff800000000000 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -36028797018963969 30 ff7fffffffffffff -9223372036854775808 26 8000000000000000 9223372036854775807 18 7fffffffffffffff 72057594037927936 16 0100000000000000 72057594037927935 27 00ffffffffffffff 36028797018963968 23 0080000000000000 36028797018963967 40 007fffffffffffff 281474976710656 32 0001000000000000 281474976710655 19 0000ffffffffffff 140737488355328 34 0000800000000000 140737488355327 38 00007fffffffffff 1099511627776 8 0000010000000000 1099511627775 20 000000ffffffffff 549755813888 35 0000008000000000 549755813887 1 0000007fffffffff 4294967296 11 0000000100000000 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 2147483647 46 000000007fffffff 16777216 15 0000000001000000 16777215 9 0000000000ffffff 8388608 5 0000000000800000 8388607 7 00000000007fffff 65536 33 0000000000010000 65535 6 000000000000ffff 32768 22 0000000000008000 32767 10 0000000000007fff 256 14 0000000000000100 255 29 00000000000000ff 128 41 0000000000000080 127 25 000000000000007f 0 3 0000000000000000}
do_test boundary4-3.14 {
db eval {
SELECT rowid, a, x FROM t1 ORDER BY x DESC
}
} {-1 17 ffffffffffffffff -128 45 ffffffffffffff80 -129 4 ffffffffffffff7f -32768 42 ffffffffffff8000 -32769 24 ffffffffffff7fff -8388608 2 ffffffffff800000 -8388609 21 ffffffffff7fffff -2147483648 31 ffffffff80000000 -2147483649 39 ffffffff7fffffff -549755813888 12 ffffff8000000000 -549755813889 28 ffffff7fffffffff -140737488355328 13 ffff800000000000 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -36028797018963969 30 ff7fffffffffffff -9223372036854775808 26 8000000000000000 9223372036854775807 18 7fffffffffffffff 72057594037927936 16 0100000000000000 72057594037927935 27 00ffffffffffffff 36028797018963968 23 0080000000000000 36028797018963967 40 007fffffffffffff 281474976710656 32 0001000000000000 281474976710655 19 0000ffffffffffff 140737488355328 34 0000800000000000 140737488355327 38 00007fffffffffff 1099511627776 8 0000010000000000 1099511627775 20 000000ffffffffff 549755813888 35 0000008000000000 549755813887 1 0000007fffffffff 4294967296 11 0000000100000000 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 2147483647 46 000000007fffffff 16777216 15 0000000001000000 16777215 9 0000000000ffffff 8388608 5 0000000000800000 8388607 7 00000000007fffff 65536 33 0000000000010000 65535 6 000000000000ffff 32768 22 0000000000008000 32767 10 0000000000007fff 256 14 0000000000000100 255 29 00000000000000ff 128 41 0000000000000080 127 25 000000000000007f 0 3 0000000000000000}
do_test boundary4-4.1 {
db eval {
UPDATE t1 SET rowid=a, a=rowid, x=z, z=x
}
} {}
do_test boundary4-4.3 {
db eval {
SELECT a, rowid, z FROM t1 ORDER BY +a
}
} {-9223372036854775808 26 8000000000000000 -36028797018963969 30 ff7fffffffffffff -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -140737488355328 13 ffff800000000000 -549755813889 28 ffffff7fffffffff -549755813888 12 ffffff8000000000 -2147483649 39 ffffffff7fffffff -2147483648 31 ffffffff80000000 -8388609 21 ffffffffff7fffff -8388608 2 ffffffffff800000 -32769 24 ffffffffffff7fff -32768 42 ffffffffffff8000 -129 4 ffffffffffffff7f -128 45 ffffffffffffff80 -1 17 ffffffffffffffff 0 3 0000000000000000 127 25 000000000000007f 128 41 0000000000000080 255 29 00000000000000ff 256 14 0000000000000100 32767 10 0000000000007fff 32768 22 0000000000008000 65535 6 000000000000ffff 65536 33 0000000000010000 8388607 7 00000000007fffff 8388608 5 0000000000800000 16777215 9 0000000000ffffff 16777216 15 0000000001000000 2147483647 46 000000007fffffff 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 4294967296 11 0000000100000000 549755813887 1 0000007fffffffff 549755813888 35 0000008000000000 1099511627775 20 000000ffffffffff 1099511627776 8 0000010000000000 140737488355327 38 00007fffffffffff 140737488355328 34 0000800000000000 281474976710655 19 0000ffffffffffff 281474976710656 32 0001000000000000 36028797018963967 40 007fffffffffffff 36028797018963968 23 0080000000000000 72057594037927935 27 00ffffffffffffff 72057594037927936 16 0100000000000000 9223372036854775807 18 7fffffffffffffff}
do_test boundary4-4.4 {
db eval {
SELECT a, rowid, z FROM t1 ORDER BY a
}
} {-9223372036854775808 26 8000000000000000 -36028797018963969 30 ff7fffffffffffff -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -140737488355328 13 ffff800000000000 -549755813889 28 ffffff7fffffffff -549755813888 12 ffffff8000000000 -2147483649 39 ffffffff7fffffff -2147483648 31 ffffffff80000000 -8388609 21 ffffffffff7fffff -8388608 2 ffffffffff800000 -32769 24 ffffffffffff7fff -32768 42 ffffffffffff8000 -129 4 ffffffffffffff7f -128 45 ffffffffffffff80 -1 17 ffffffffffffffff 0 3 0000000000000000 127 25 000000000000007f 128 41 0000000000000080 255 29 00000000000000ff 256 14 0000000000000100 32767 10 0000000000007fff 32768 22 0000000000008000 65535 6 000000000000ffff 65536 33 0000000000010000 8388607 7 00000000007fffff 8388608 5 0000000000800000 16777215 9 0000000000ffffff 16777216 15 0000000001000000 2147483647 46 000000007fffffff 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 4294967296 11 0000000100000000 549755813887 1 0000007fffffffff 549755813888 35 0000008000000000 1099511627775 20 000000ffffffffff 1099511627776 8 0000010000000000 140737488355327 38 00007fffffffffff 140737488355328 34 0000800000000000 281474976710655 19 0000ffffffffffff 281474976710656 32 0001000000000000 36028797018963967 40 007fffffffffffff 36028797018963968 23 0080000000000000 72057594037927935 27 00ffffffffffffff 72057594037927936 16 0100000000000000 9223372036854775807 18 7fffffffffffffff}
do_test boundary4-4.5 {
db eval {
SELECT a, rowid, z FROM t1 ORDER BY +a DESC
}
} {9223372036854775807 18 7fffffffffffffff 72057594037927936 16 0100000000000000 72057594037927935 27 00ffffffffffffff 36028797018963968 23 0080000000000000 36028797018963967 40 007fffffffffffff 281474976710656 32 0001000000000000 281474976710655 19 0000ffffffffffff 140737488355328 34 0000800000000000 140737488355327 38 00007fffffffffff 1099511627776 8 0000010000000000 1099511627775 20 000000ffffffffff 549755813888 35 0000008000000000 549755813887 1 0000007fffffffff 4294967296 11 0000000100000000 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 2147483647 46 000000007fffffff 16777216 15 0000000001000000 16777215 9 0000000000ffffff 8388608 5 0000000000800000 8388607 7 00000000007fffff 65536 33 0000000000010000 65535 6 000000000000ffff 32768 22 0000000000008000 32767 10 0000000000007fff 256 14 0000000000000100 255 29 00000000000000ff 128 41 0000000000000080 127 25 000000000000007f 0 3 0000000000000000 -1 17 ffffffffffffffff -128 45 ffffffffffffff80 -129 4 ffffffffffffff7f -32768 42 ffffffffffff8000 -32769 24 ffffffffffff7fff -8388608 2 ffffffffff800000 -8388609 21 ffffffffff7fffff -2147483648 31 ffffffff80000000 -2147483649 39 ffffffff7fffffff -549755813888 12 ffffff8000000000 -549755813889 28 ffffff7fffffffff -140737488355328 13 ffff800000000000 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -36028797018963969 30 ff7fffffffffffff -9223372036854775808 26 8000000000000000}
do_test boundary4-4.6 {
db eval {
SELECT a, rowid, z FROM t1 ORDER BY a DESC
}
} {9223372036854775807 18 7fffffffffffffff 72057594037927936 16 0100000000000000 72057594037927935 27 00ffffffffffffff 36028797018963968 23 0080000000000000 36028797018963967 40 007fffffffffffff 281474976710656 32 0001000000000000 281474976710655 19 0000ffffffffffff 140737488355328 34 0000800000000000 140737488355327 38 00007fffffffffff 1099511627776 8 0000010000000000 1099511627775 20 000000ffffffffff 549755813888 35 0000008000000000 549755813887 1 0000007fffffffff 4294967296 11 0000000100000000 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 2147483647 46 000000007fffffff 16777216 15 0000000001000000 16777215 9 0000000000ffffff 8388608 5 0000000000800000 8388607 7 00000000007fffff 65536 33 0000000000010000 65535 6 000000000000ffff 32768 22 0000000000008000 32767 10 0000000000007fff 256 14 0000000000000100 255 29 00000000000000ff 128 41 0000000000000080 127 25 000000000000007f 0 3 0000000000000000 -1 17 ffffffffffffffff -128 45 ffffffffffffff80 -129 4 ffffffffffffff7f -32768 42 ffffffffffff8000 -32769 24 ffffffffffff7fff -8388608 2 ffffffffff800000 -8388609 21 ffffffffff7fffff -2147483648 31 ffffffff80000000 -2147483649 39 ffffffff7fffffff -549755813888 12 ffffff8000000000 -549755813889 28 ffffff7fffffffff -140737488355328 13 ffff800000000000 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -36028797018963969 30 ff7fffffffffffff -9223372036854775808 26 8000000000000000}
do_test boundary4-4.7 {
db eval {
SELECT a, rowid, z FROM t1 ORDER BY +rowid
}
} {549755813887 1 0000007fffffffff -8388608 2 ffffffffff800000 0 3 0000000000000000 -129 4 ffffffffffffff7f 8388608 5 0000000000800000 65535 6 000000000000ffff 8388607 7 00000000007fffff 1099511627776 8 0000010000000000 16777215 9 0000000000ffffff 32767 10 0000000000007fff 4294967296 11 0000000100000000 -549755813888 12 ffffff8000000000 -140737488355328 13 ffff800000000000 256 14 0000000000000100 16777216 15 0000000001000000 72057594037927936 16 0100000000000000 -1 17 ffffffffffffffff 9223372036854775807 18 7fffffffffffffff 281474976710655 19 0000ffffffffffff 1099511627775 20 000000ffffffffff -8388609 21 ffffffffff7fffff 32768 22 0000000000008000 36028797018963968 23 0080000000000000 -32769 24 ffffffffffff7fff 127 25 000000000000007f -9223372036854775808 26 8000000000000000 72057594037927935 27 00ffffffffffffff -549755813889 28 ffffff7fffffffff 255 29 00000000000000ff -36028797018963969 30 ff7fffffffffffff -2147483648 31 ffffffff80000000 281474976710656 32 0001000000000000 65536 33 0000000000010000 140737488355328 34 0000800000000000 549755813888 35 0000008000000000 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 140737488355327 38 00007fffffffffff -2147483649 39 ffffffff7fffffff 36028797018963967 40 007fffffffffffff 128 41 0000000000000080 -32768 42 ffffffffffff8000 -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -128 45 ffffffffffffff80 2147483647 46 000000007fffffff}
do_test boundary4-4.8 {
db eval {
SELECT a, rowid, z FROM t1 ORDER BY rowid
}
} {549755813887 1 0000007fffffffff -8388608 2 ffffffffff800000 0 3 0000000000000000 -129 4 ffffffffffffff7f 8388608 5 0000000000800000 65535 6 000000000000ffff 8388607 7 00000000007fffff 1099511627776 8 0000010000000000 16777215 9 0000000000ffffff 32767 10 0000000000007fff 4294967296 11 0000000100000000 -549755813888 12 ffffff8000000000 -140737488355328 13 ffff800000000000 256 14 0000000000000100 16777216 15 0000000001000000 72057594037927936 16 0100000000000000 -1 17 ffffffffffffffff 9223372036854775807 18 7fffffffffffffff 281474976710655 19 0000ffffffffffff 1099511627775 20 000000ffffffffff -8388609 21 ffffffffff7fffff 32768 22 0000000000008000 36028797018963968 23 0080000000000000 -32769 24 ffffffffffff7fff 127 25 000000000000007f -9223372036854775808 26 8000000000000000 72057594037927935 27 00ffffffffffffff -549755813889 28 ffffff7fffffffff 255 29 00000000000000ff -36028797018963969 30 ff7fffffffffffff -2147483648 31 ffffffff80000000 281474976710656 32 0001000000000000 65536 33 0000000000010000 140737488355328 34 0000800000000000 549755813888 35 0000008000000000 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 140737488355327 38 00007fffffffffff -2147483649 39 ffffffff7fffffff 36028797018963967 40 007fffffffffffff 128 41 0000000000000080 -32768 42 ffffffffffff8000 -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -128 45 ffffffffffffff80 2147483647 46 000000007fffffff}
do_test boundary4-4.9 {
db eval {
SELECT a, rowid, z FROM t1 ORDER BY +rowid DESC
}
} {2147483647 46 000000007fffffff -128 45 ffffffffffffff80 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -32768 42 ffffffffffff8000 128 41 0000000000000080 36028797018963967 40 007fffffffffffff -2147483649 39 ffffffff7fffffff 140737488355327 38 00007fffffffffff 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 549755813888 35 0000008000000000 140737488355328 34 0000800000000000 65536 33 0000000000010000 281474976710656 32 0001000000000000 -2147483648 31 ffffffff80000000 -36028797018963969 30 ff7fffffffffffff 255 29 00000000000000ff -549755813889 28 ffffff7fffffffff 72057594037927935 27 00ffffffffffffff -9223372036854775808 26 8000000000000000 127 25 000000000000007f -32769 24 ffffffffffff7fff 36028797018963968 23 0080000000000000 32768 22 0000000000008000 -8388609 21 ffffffffff7fffff 1099511627775 20 000000ffffffffff 281474976710655 19 0000ffffffffffff 9223372036854775807 18 7fffffffffffffff -1 17 ffffffffffffffff 72057594037927936 16 0100000000000000 16777216 15 0000000001000000 256 14 0000000000000100 -140737488355328 13 ffff800000000000 -549755813888 12 ffffff8000000000 4294967296 11 0000000100000000 32767 10 0000000000007fff 16777215 9 0000000000ffffff 1099511627776 8 0000010000000000 8388607 7 00000000007fffff 65535 6 000000000000ffff 8388608 5 0000000000800000 -129 4 ffffffffffffff7f 0 3 0000000000000000 -8388608 2 ffffffffff800000 549755813887 1 0000007fffffffff}
do_test boundary4-4.10 {
db eval {
SELECT a, rowid, z FROM t1 ORDER BY rowid DESC
}
} {2147483647 46 000000007fffffff -128 45 ffffffffffffff80 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -32768 42 ffffffffffff8000 128 41 0000000000000080 36028797018963967 40 007fffffffffffff -2147483649 39 ffffffff7fffffff 140737488355327 38 00007fffffffffff 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 549755813888 35 0000008000000000 140737488355328 34 0000800000000000 65536 33 0000000000010000 281474976710656 32 0001000000000000 -2147483648 31 ffffffff80000000 -36028797018963969 30 ff7fffffffffffff 255 29 00000000000000ff -549755813889 28 ffffff7fffffffff 72057594037927935 27 00ffffffffffffff -9223372036854775808 26 8000000000000000 127 25 000000000000007f -32769 24 ffffffffffff7fff 36028797018963968 23 0080000000000000 32768 22 0000000000008000 -8388609 21 ffffffffff7fffff 1099511627775 20 000000ffffffffff 281474976710655 19 0000ffffffffffff 9223372036854775807 18 7fffffffffffffff -1 17 ffffffffffffffff 72057594037927936 16 0100000000000000 16777216 15 0000000001000000 256 14 0000000000000100 -140737488355328 13 ffff800000000000 -549755813888 12 ffffff8000000000 4294967296 11 0000000100000000 32767 10 0000000000007fff 16777215 9 0000000000ffffff 1099511627776 8 0000010000000000 8388607 7 00000000007fffff 65535 6 000000000000ffff 8388608 5 0000000000800000 -129 4 ffffffffffffff7f 0 3 0000000000000000 -8388608 2 ffffffffff800000 549755813887 1 0000007fffffffff}
do_test boundary4-4.11 {
db eval {
SELECT a, rowid, z FROM t1 ORDER BY +z
}
} {0 3 0000000000000000 127 25 000000000000007f 128 41 0000000000000080 255 29 00000000000000ff 256 14 0000000000000100 32767 10 0000000000007fff 32768 22 0000000000008000 65535 6 000000000000ffff 65536 33 0000000000010000 8388607 7 00000000007fffff 8388608 5 0000000000800000 16777215 9 0000000000ffffff 16777216 15 0000000001000000 2147483647 46 000000007fffffff 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 4294967296 11 0000000100000000 549755813887 1 0000007fffffffff 549755813888 35 0000008000000000 1099511627775 20 000000ffffffffff 1099511627776 8 0000010000000000 140737488355327 38 00007fffffffffff 140737488355328 34 0000800000000000 281474976710655 19 0000ffffffffffff 281474976710656 32 0001000000000000 36028797018963967 40 007fffffffffffff 36028797018963968 23 0080000000000000 72057594037927935 27 00ffffffffffffff 72057594037927936 16 0100000000000000 9223372036854775807 18 7fffffffffffffff -9223372036854775808 26 8000000000000000 -36028797018963969 30 ff7fffffffffffff -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -140737488355328 13 ffff800000000000 -549755813889 28 ffffff7fffffffff -549755813888 12 ffffff8000000000 -2147483649 39 ffffffff7fffffff -2147483648 31 ffffffff80000000 -8388609 21 ffffffffff7fffff -8388608 2 ffffffffff800000 -32769 24 ffffffffffff7fff -32768 42 ffffffffffff8000 -129 4 ffffffffffffff7f -128 45 ffffffffffffff80 -1 17 ffffffffffffffff}
do_test boundary4-4.12 {
db eval {
SELECT a, rowid, z FROM t1 ORDER BY z
}
} {0 3 0000000000000000 127 25 000000000000007f 128 41 0000000000000080 255 29 00000000000000ff 256 14 0000000000000100 32767 10 0000000000007fff 32768 22 0000000000008000 65535 6 000000000000ffff 65536 33 0000000000010000 8388607 7 00000000007fffff 8388608 5 0000000000800000 16777215 9 0000000000ffffff 16777216 15 0000000001000000 2147483647 46 000000007fffffff 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 4294967296 11 0000000100000000 549755813887 1 0000007fffffffff 549755813888 35 0000008000000000 1099511627775 20 000000ffffffffff 1099511627776 8 0000010000000000 140737488355327 38 00007fffffffffff 140737488355328 34 0000800000000000 281474976710655 19 0000ffffffffffff 281474976710656 32 0001000000000000 36028797018963967 40 007fffffffffffff 36028797018963968 23 0080000000000000 72057594037927935 27 00ffffffffffffff 72057594037927936 16 0100000000000000 9223372036854775807 18 7fffffffffffffff -9223372036854775808 26 8000000000000000 -36028797018963969 30 ff7fffffffffffff -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -140737488355328 13 ffff800000000000 -549755813889 28 ffffff7fffffffff -549755813888 12 ffffff8000000000 -2147483649 39 ffffffff7fffffff -2147483648 31 ffffffff80000000 -8388609 21 ffffffffff7fffff -8388608 2 ffffffffff800000 -32769 24 ffffffffffff7fff -32768 42 ffffffffffff8000 -129 4 ffffffffffffff7f -128 45 ffffffffffffff80 -1 17 ffffffffffffffff}
do_test boundary4-4.13 {
db eval {
SELECT a, rowid, z FROM t1 ORDER BY +z DESC
}
} {-1 17 ffffffffffffffff -128 45 ffffffffffffff80 -129 4 ffffffffffffff7f -32768 42 ffffffffffff8000 -32769 24 ffffffffffff7fff -8388608 2 ffffffffff800000 -8388609 21 ffffffffff7fffff -2147483648 31 ffffffff80000000 -2147483649 39 ffffffff7fffffff -549755813888 12 ffffff8000000000 -549755813889 28 ffffff7fffffffff -140737488355328 13 ffff800000000000 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -36028797018963969 30 ff7fffffffffffff -9223372036854775808 26 8000000000000000 9223372036854775807 18 7fffffffffffffff 72057594037927936 16 0100000000000000 72057594037927935 27 00ffffffffffffff 36028797018963968 23 0080000000000000 36028797018963967 40 007fffffffffffff 281474976710656 32 0001000000000000 281474976710655 19 0000ffffffffffff 140737488355328 34 0000800000000000 140737488355327 38 00007fffffffffff 1099511627776 8 0000010000000000 1099511627775 20 000000ffffffffff 549755813888 35 0000008000000000 549755813887 1 0000007fffffffff 4294967296 11 0000000100000000 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 2147483647 46 000000007fffffff 16777216 15 0000000001000000 16777215 9 0000000000ffffff 8388608 5 0000000000800000 8388607 7 00000000007fffff 65536 33 0000000000010000 65535 6 000000000000ffff 32768 22 0000000000008000 32767 10 0000000000007fff 256 14 0000000000000100 255 29 00000000000000ff 128 41 0000000000000080 127 25 000000000000007f 0 3 0000000000000000}
do_test boundary4-4.14 {
db eval {
SELECT a, rowid, z FROM t1 ORDER BY z DESC
}
} {-1 17 ffffffffffffffff -128 45 ffffffffffffff80 -129 4 ffffffffffffff7f -32768 42 ffffffffffff8000 -32769 24 ffffffffffff7fff -8388608 2 ffffffffff800000 -8388609 21 ffffffffff7fffff -2147483648 31 ffffffff80000000 -2147483649 39 ffffffff7fffffff -549755813888 12 ffffff8000000000 -549755813889 28 ffffff7fffffffff -140737488355328 13 ffff800000000000 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -36028797018963969 30 ff7fffffffffffff -9223372036854775808 26 8000000000000000 9223372036854775807 18 7fffffffffffffff 72057594037927936 16 0100000000000000 72057594037927935 27 00ffffffffffffff 36028797018963968 23 0080000000000000 36028797018963967 40 007fffffffffffff 281474976710656 32 0001000000000000 281474976710655 19 0000ffffffffffff 140737488355328 34 0000800000000000 140737488355327 38 00007fffffffffff 1099511627776 8 0000010000000000 1099511627775 20 000000ffffffffff 549755813888 35 0000008000000000 549755813887 1 0000007fffffffff 4294967296 11 0000000100000000 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 2147483647 46 000000007fffffff 16777216 15 0000000001000000 16777215 9 0000000000ffffff 8388608 5 0000000000800000 8388607 7 00000000007fffff 65536 33 0000000000010000 65535 6 000000000000ffff 32768 22 0000000000008000 32767 10 0000000000007fff 256 14 0000000000000100 255 29 00000000000000ff 128 41 0000000000000080 127 25 000000000000007f 0 3 0000000000000000}
finish_test

View File

@ -0,0 +1,132 @@
# 2014-11-27
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
#
# This file contains test cases for b-tree logic.
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
set testprefix btree01
# The refactoring on the b-tree balance() routine in check-in
# http://www.sqlite.org/src/info/face33bea1ba3a (2014-10-27)
# caused the integrity_check on the following SQL to fail.
#
do_execsql_test btree01-1.1 {
PRAGMA page_size=65536;
CREATE TABLE t1(a INTEGER PRIMARY KEY, b BLOB);
WITH RECURSIVE
c(i) AS (VALUES(1) UNION ALL SELECT i+1 FROM c WHERE i<30)
INSERT INTO t1(a,b) SELECT i, zeroblob(6500) FROM c;
UPDATE t1 SET b=zeroblob(3000);
UPDATE t1 SET b=zeroblob(64000) WHERE a=2;
PRAGMA integrity_check;
} {ok}
# The previous test is sufficient to prevent a regression. But we
# add a number of additional tests to stress the balancer in similar
# ways, looking for related problems.
#
for {set i 1} {$i<=30} {incr i} {
do_test btree01-1.2.$i {
db eval {
DELETE FROM t1;
WITH RECURSIVE
c(i) AS (VALUES(1) UNION ALL SELECT i+1 FROM c WHERE i<30)
INSERT INTO t1(a,b) SELECT i, zeroblob(6500) FROM c;
UPDATE t1 SET b=zeroblob(3000);
UPDATE t1 SET b=zeroblob(64000) WHERE a=$::i;
PRAGMA integrity_check;
}
} {ok}
}
for {set i 1} {$i<=30} {incr i} {
do_test btree01-1.3.$i {
db eval {
DELETE FROM t1;
WITH RECURSIVE
c(i) AS (VALUES(1) UNION ALL SELECT i+1 FROM c WHERE i<30)
INSERT INTO t1(a,b) SELECT i, zeroblob(6500) FROM c;
UPDATE t1 SET b=zeroblob(2000);
UPDATE t1 SET b=zeroblob(64000) WHERE a=$::i;
PRAGMA integrity_check;
}
} {ok}
}
for {set i 1} {$i<=30} {incr i} {
do_test btree01-1.4.$i {
db eval {
DELETE FROM t1;
WITH RECURSIVE
c(i) AS (VALUES(1) UNION ALL SELECT i+1 FROM c WHERE i<30)
INSERT INTO t1(a,b) SELECT i, zeroblob(6500) FROM c;
UPDATE t1 SET b=zeroblob(6499) WHERE (a%3)==0;
UPDATE t1 SET b=zeroblob(6499) WHERE (a%3)==1;
UPDATE t1 SET b=zeroblob(6499) WHERE (a%3)==2;
UPDATE t1 SET b=zeroblob(64000) WHERE a=$::i;
PRAGMA integrity_check;
}
} {ok}
}
for {set i 1} {$i<=30} {incr i} {
do_test btree01-1.5.$i {
db eval {
DELETE FROM t1;
WITH RECURSIVE
c(i) AS (VALUES(1) UNION ALL SELECT i+1 FROM c WHERE i<30)
INSERT INTO t1(a,b) SELECT i, zeroblob(6542) FROM c;
UPDATE t1 SET b=zeroblob(2331);
UPDATE t1 SET b=zeroblob(65496) WHERE a=$::i;
PRAGMA integrity_check;
}
} {ok}
}
for {set i 1} {$i<=30} {incr i} {
do_test btree01-1.6.$i {
db eval {
DELETE FROM t1;
WITH RECURSIVE
c(i) AS (VALUES(1) UNION ALL SELECT i+1 FROM c WHERE i<30)
INSERT INTO t1(a,b) SELECT i, zeroblob(6542) FROM c;
UPDATE t1 SET b=zeroblob(2332);
UPDATE t1 SET b=zeroblob(65496) WHERE a=$::i;
PRAGMA integrity_check;
}
} {ok}
}
for {set i 1} {$i<=30} {incr i} {
do_test btree01-1.7.$i {
db eval {
DELETE FROM t1;
WITH RECURSIVE
c(i) AS (VALUES(1) UNION ALL SELECT i+1 FROM c WHERE i<30)
INSERT INTO t1(a,b) SELECT i, zeroblob(6500) FROM c;
UPDATE t1 SET b=zeroblob(1);
UPDATE t1 SET b=zeroblob(65000) WHERE a=$::i;
PRAGMA integrity_check;
}
} {ok}
}
for {set i 1} {$i<=31} {incr i} {
do_test btree01-1.8.$i {
db eval {
DELETE FROM t1;
WITH RECURSIVE
c(i) AS (VALUES(1) UNION ALL SELECT i+1 FROM c WHERE i<31)
INSERT INTO t1(a,b) SELECT i, zeroblob(6500) FROM c;
UPDATE t1 SET b=zeroblob(4000);
UPDATE t1 SET b=zeroblob(65000) WHERE a=$::i;
PRAGMA integrity_check;
}
} {ok}
}
finish_test

View File

@ -0,0 +1,52 @@
# 2015-03-25
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library.
#
# The focus of this script is making multiple calls to saveCursorPosition()
# and restoreCursorPosition() when cursors have eState==CURSOR_SKIPNEXT
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
load_static_extension db eval
do_execsql_test btree02-100 {
CREATE TABLE t1(a TEXT, ax INTEGER, b INT, PRIMARY KEY(a,ax)) WITHOUT ROWID;
WITH RECURSIVE c(i) AS (VALUES(1) UNION ALL SELECT i+1 FROM c WHERE i<10)
INSERT INTO t1(a,ax,b) SELECT printf('%02x',i), random(), i FROM c;
CREATE INDEX t1a ON t1(a);
CREATE TABLE t2(x,y);
CREATE TABLE t3(cnt);
WITH RECURSIVE c(i) AS (VALUES(1) UNION ALL SELECT i+1 FROM c WHERE i<4)
INSERT INTO t3(cnt) SELECT i FROM c;
SELECT count(*) FROM t1;
} {10}
do_test btree02-110 {
db eval BEGIN
set i 0
db eval {SELECT a, ax, b, cnt FROM t1 CROSS JOIN t3 WHERE b IS NOT NULL} {
db eval {INSERT INTO t2(x,y) VALUES($b,$cnt)}
# puts "a,b,cnt = ($a,$b,$cnt)"
incr i
if {$i%2==1} {
set bx [expr {$b+1000}]
# puts "INSERT ($a),$bx"
db eval {INSERT INTO t1(a,ax,b) VALUES(printf('(%s)',$a),random(),$bx)}
} else {
# puts "DELETE a=$a"
db eval {DELETE FROM t1 WHERE a=$a}
}
db eval {COMMIT; BEGIN}
}
db one {COMMIT; SELECT count(*) FROM t1;}
} {20}
finish_test

View File

@ -0,0 +1,57 @@
# 2013 April 02
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
#
# This file contains fault injection tests designed to test the btree.c
# module.
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
source $testdir/malloc_common.tcl
set testprefix btreefault
# This test will not work with an in-memory journal, as the database will
# become corrupt if an error is injected into a transaction after it starts
# writing data out to the db file.
if {[permutation]=="inmemory_journal"} {
finish_test
return
}
do_test 1-pre1 {
execsql {
PRAGMA auto_vacuum = incremental;
PRAGMA journal_mode = DELETE;
CREATE TABLE t1(a PRIMARY KEY, b);
INSERT INTO t1 VALUES(randomblob(1000), randomblob(100));
INSERT INTO t1 SELECT randomblob(1000), randomblob(1000) FROM t1;
INSERT INTO t1 SELECT randomblob(1000), randomblob(1000) FROM t1;
INSERT INTO t1 SELECT randomblob(1000), randomblob(1000) FROM t1;
INSERT INTO t1 SELECT randomblob(1000), randomblob(1000) FROM t1;
DELETE FROM t1 WHERE rowid%2;
}
faultsim_save_and_close
} {}
do_faultsim_test 1 -prep {
faultsim_restore_and_reopen
set ::STMT [sqlite3_prepare db "SELECT * FROM t1 ORDER BY a" -1 DUMMY]
sqlite3_step $::STMT
sqlite3_step $::STMT
} -body {
execsql { PRAGMA incremental_vacuum = 10 }
} -test {
sqlite3_finalize $::STMT
faultsim_test_result {0 {}}
faultsim_integrity_check
}
finish_test

View File

@ -0,0 +1,61 @@
# 2005 july 8
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file test the busy handler
#
# $Id: busy.test,v 1.3 2008/03/15 02:09:22 drh Exp $
set testdir [file dirname $argv0]
source $testdir/tester.tcl
do_test busy-1.1 {
sqlite3 db2 test.db
execsql {
CREATE TABLE t1(x);
INSERT INTO t1 VALUES(1);
SELECT * FROM t1
}
} 1
proc busy x {
lappend ::busyargs $x
if {$x>2} {return 1}
return 0
}
set busyargs {}
do_test busy-1.2 {
db busy busy
db2 eval {BEGIN EXCLUSIVE}
catchsql {BEGIN IMMEDIATE}
} {1 {database is locked}}
do_test busy-1.3 {
set busyargs
} {0 1 2 3}
do_test busy-1.4 {
set busyargs {}
catchsql {BEGIN IMMEDIATE}
set busyargs
} {0 1 2 3}
do_test busy-2.1 {
db2 eval {COMMIT}
db eval {BEGIN; INSERT INTO t1 VALUES(5)}
db2 eval {BEGIN; SELECT * FROM t1}
set busyargs {}
catchsql COMMIT
} {1 {database is locked}}
do_test busy-2.2 {
set busyargs
} {0 1 2 3}
db2 close
finish_test

View File

@ -0,0 +1,140 @@
# 2007 March 24
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
#
# $Id: cache.test,v 1.4 2007/08/22 02:56:44 drh Exp $
set testdir [file dirname $argv0]
source $testdir/tester.tcl
ifcapable !pager_pragmas||!compound {
finish_test
return
}
sqlite3_soft_heap_limit 0
proc pager_cache_size {db} {
set bt [btree_from_db $db]
db_enter $db
array set stats [btree_pager_stats $bt]
db_leave $db
return $stats(page)
}
if {[permutation] == ""} {
do_test cache-1.1 { pager_cache_size db } {0}
}
do_test cache-1.2 {
execsql {
PRAGMA auto_vacuum=OFF;
CREATE TABLE abc(a, b, c);
INSERT INTO abc VALUES(1, 2, 3);
}
pager_cache_size db
} {2}
# At one point, repeatedly locking and unlocking the cache was causing
# a resource leak of one page per repetition. The page wasn't actually
# leaked, but would not be reused until the pager-cache was full (i.e.
# 2000 pages by default).
#
# This tests that once the pager-cache is initialized, it can be locked
# and unlocked repeatedly without internally allocating any new pages.
#
set cache_size [pager_cache_size db]
for {set ii 0} {$ii < 10} {incr ii} {
do_test cache-1.3.$ii {
execsql {SELECT * FROM abc}
pager_cache_size db
} $::cache_size
}
#-------------------------------------------------------------------------
# This block of tests checks that it is possible to set the cache_size of a
# database to a small (< 10) value. More specifically:
#
# cache-2.1.*: Test that "PRAGMA cache_size" appears to work with small
# values.
# cache-2.2.*: Test that "PRAGMA main.cache_size" appears to work with
# small values.
# cache-2.3.*: Test cache_size=1 correctly spills/flushes the cache.
# cache-2.4.*: Test cache_size=0 correctly spills/flushes the cache.
#
#
db_delete_and_reopen
do_execsql_test cache-2.0 {
PRAGMA auto_vacuum=OFF;
PRAGMA journal_mode=DELETE;
CREATE TABLE t1(a, b);
CREATE TABLE t2(c, d);
INSERT INTO t1 VALUES('x', 'y');
INSERT INTO t2 VALUES('i', 'j');
} {delete}
for {set i 0} {$i < 20} {incr i} {
do_execsql_test cache-2.1.$i.1 "PRAGMA cache_size = $i"
do_execsql_test cache-2.1.$i.2 "PRAGMA cache_size" $i
do_execsql_test cache-2.1.$i.3 "SELECT * FROM t1" {x y}
do_execsql_test cache-2.1.$i.4 "PRAGMA cache_size" $i
}
for {set i 0} {$i < 20} {incr i} {
do_execsql_test cache-2.2.$i.1 "PRAGMA main.cache_size = $i"
do_execsql_test cache-2.2.$i.2 "PRAGMA main.cache_size" $i
do_execsql_test cache-2.2.$i.3 "SELECT * FROM t1" {x y}
do_execsql_test cache-2.2.$i.4 "PRAGMA main.cache_size" $i
}
# Tests for cache_size = 1.
#
do_execsql_test cache-2.3.1 {
PRAGMA cache_size = 1;
BEGIN;
INSERT INTO t1 VALUES(1, 2);
PRAGMA lock_status;
} {main reserved temp closed}
do_test cache-2.3.2 { pager_cache_size db } 2
do_execsql_test cache-2.3.3 {
INSERT INTO t2 VALUES(1, 2);
PRAGMA lock_status;
} {main exclusive temp closed}
do_test cache-2.3.4 { pager_cache_size db } 2
do_execsql_test cache-2.3.5 COMMIT
do_test cache-2.3.6 { pager_cache_size db } 1
do_execsql_test cache-2.3.7 {
SELECT * FROM t1 UNION SELECT * FROM t2;
} {1 2 i j x y}
do_test cache-2.3.8 { pager_cache_size db } 1
# Tests for cache_size = 0.
#
do_execsql_test cache-2.4.1 {
PRAGMA cache_size = 0;
BEGIN;
INSERT INTO t1 VALUES(1, 2);
PRAGMA lock_status;
} {main reserved temp closed}
do_test cache-2.4.2 { pager_cache_size db } 2
do_execsql_test cache-2.4.3 {
INSERT INTO t2 VALUES(1, 2);
PRAGMA lock_status;
} {main exclusive temp closed}
do_test cache-2.4.4 { pager_cache_size db } 2
do_execsql_test cache-2.4.5 COMMIT
do_test cache-2.4.6 { pager_cache_size db } 0
do_execsql_test cache-2.4.7 {
SELECT * FROM t1 UNION SELECT * FROM t2;
} {1 2 i j x y}
do_test cache-2.4.8 { pager_cache_size db } 0
sqlite3_soft_heap_limit $cmdlinearg(soft-heap-limit)
finish_test

View File

@ -0,0 +1,323 @@
# 2011 November 16
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
#
# This file contains test cases for sqlite3_db_cacheflush API.
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
set testprefix cacheflush
test_set_config_pagecache 0 0
# Run the supplied SQL on a copy of the database currently stored on
# disk in file $dbfile.
proc diskquery {dbfile sql} {
forcecopy $dbfile dq.db
sqlite3 dq dq.db
set res [execsql $sql dq]
dq close
set res
}
# Simplest possible test.
#
do_execsql_test 1.1.0 {
CREATE TABLE t1(a, b);
INSERT INTO t1 VALUES(1, 2);
BEGIN;
INSERT INTO t1 VALUES(3, 4);
}
do_test 1.1.1 {
diskquery test.db { SELECT * FROM t1 }
} {1 2}
do_test 1.1.2 {
sqlite3_db_cacheflush db
diskquery test.db { SELECT * FROM t1 }
} {1 2 3 4}
# Test that multiple pages may be flushed to disk.
#
do_execsql_test 1.2.0 {
COMMIT;
CREATE TABLE t2(a, b);
BEGIN;
INSERT INTO t1 VALUES(5, 6);
INSERT INTO t2 VALUES('a', 'b');
}
do_test 1.2.1 {
diskquery test.db {
SELECT * FROM t1;
SELECT * FROM t2;
}
} {1 2 3 4}
do_test 1.2.2 {
sqlite3_db_cacheflush db
diskquery test.db {
SELECT * FROM t1;
SELECT * FROM t2;
}
} {1 2 3 4 5 6 a b}
# Test that pages with nRef!=0 are not flushed to disk.
#
do_execsql_test 1.3.0 {
COMMIT;
CREATE TABLE t3(a, b);
BEGIN;
INSERT INTO t1 VALUES(7, 8);
INSERT INTO t2 VALUES('c', 'd');
INSERT INTO t3 VALUES('i', 'ii');
}
do_test 1.3.1 {
diskquery test.db {
SELECT * FROM t1;
SELECT * FROM t2;
SELECT * FROM t3;
}
} {1 2 3 4 5 6 a b}
do_test 1.3.2 {
db eval { SELECT a FROM t1 } {
if {$a==3} {
sqlite3_db_cacheflush db
}
}
diskquery test.db {
SELECT * FROM t1;
SELECT * FROM t2;
SELECT * FROM t3;
}
} {1 2 3 4 5 6 a b c d i ii}
do_test 1.3.2 {
sqlite3_db_cacheflush db
diskquery test.db {
SELECT * FROM t1;
SELECT * FROM t2;
SELECT * FROM t3;
}
} {1 2 3 4 5 6 7 8 a b c d i ii}
# Check that SQLITE_BUSY is returned if pages cannot be flushed due to
# conflicting read locks.
#
do_execsql_test 1.4.0 {
COMMIT;
BEGIN;
INSERT INTO t1 VALUES(9, 10);
}
do_test 1.4.1 {
sqlite3 db2 test.db
db2 eval {
BEGIN;
SELECT * FROM t1;
}
diskquery test.db {
SELECT * FROM t1;
}
} {1 2 3 4 5 6 7 8}
do_test 1.4.2 {
list [catch { sqlite3_db_cacheflush db } msg] $msg
} {1 {database is locked}}
do_test 1.4.3 {
diskquery test.db {
SELECT * FROM t1;
}
} {1 2 3 4 5 6 7 8}
do_test 1.4.4 {
db2 close
sqlite3_db_cacheflush db
diskquery test.db {
SELECT * FROM t1;
}
} {1 2 3 4 5 6 7 8 9 10}
do_execsql_test 1.4.5 { COMMIT }
#-------------------------------------------------------------------------
# Test that ATTACHed database caches are also flushed.
#
forcedelete test.db2
do_execsql_test 2.1.0 {
ATTACH 'test.db2' AS aux;
CREATE TABLE aux.t4(x, y);
INSERT INTO t4 VALUES('A', 'B');
BEGIN;
INSERT INTO t1 VALUES(11, 12);
INSERT INTO t4 VALUES('C', 'D');
}
do_test 2.1.1 {
diskquery test.db { SELECT * FROM t1; }
} {1 2 3 4 5 6 7 8 9 10}
do_test 2.1.2 {
diskquery test.db2 { SELECT * FROM t4; }
} {A B}
do_test 2.1.3 {
sqlite3_db_cacheflush db
diskquery test.db { SELECT * FROM t1; }
} {1 2 3 4 5 6 7 8 9 10 11 12}
do_test 2.1.4 {
sqlite3_db_cacheflush db
diskquery test.db2 { SELECT * FROM t4; }
} {A B C D}
do_execsql_test 2.1.5 { COMMIT }
# And that hitting an SQLITE_BUSY when flushing "main" does not stop
# SQLite from going on to flush "aux".
#
do_execsql_test 2.2.0 {
BEGIN;
INSERT INTO t1 VALUES(13, 14);
INSERT INTO t4 VALUES('E', 'F');
}
do_test 2.2.1 {
diskquery test.db { SELECT * FROM t1; }
} {1 2 3 4 5 6 7 8 9 10 11 12}
do_test 2.2.2 {
diskquery test.db2 { SELECT * FROM t4; }
} {A B C D}
do_test 2.2.3 {
sqlite3 db2 test.db
execsql {
BEGIN;
SELECT * FROM t1;
} db2
list [catch { sqlite3_db_cacheflush db } msg] $msg
} {1 {database is locked}}
do_test 2.2.4 {
diskquery test.db { SELECT * FROM t1; }
} {1 2 3 4 5 6 7 8 9 10 11 12}
do_test 2.2.5 {
diskquery test.db2 { SELECT * FROM t4; }
} {A B C D E F}
do_test 2.2.6 {
db2 close
sqlite3_db_cacheflush db
diskquery test.db { SELECT * FROM t1; }
} {1 2 3 4 5 6 7 8 9 10 11 12 13 14}
do_execsql_test 2.2.7 { COMMIT }
#-------------------------------------------------------------------------
# Test that nothing terrible happens if sqlite3_db_cacheflush() is
# called on an in-memory database.
#
do_test 3.0 {
db close
sqlite3 db :memory:
db eval {
CREATE TABLE t1(x PRIMARY KEY);
CREATE TABLE t2(y PRIMARY KEY);
BEGIN;
INSERT INTO t1 VALUES(randomblob(100));
INSERT INTO t2 VALUES(randomblob(100));
INSERT INTO t1 VALUES(randomblob(100));
INSERT INTO t2 VALUES(randomblob(100));
}
sqlite3_db_cacheflush db
} {}
do_execsql_test 3.1 { PRAGMA integrity_check } ok
do_execsql_test 3.2 { COMMIT }
do_execsql_test 3.3 { PRAGMA integrity_check } ok
do_execsql_test 3.4 {
SELECT count(*) FROM t1;
SELECT count(*) FROM t2;
} {2 2}
#-------------------------------------------------------------------------
# Test that calling sqlite3_db_cacheflush() does not interfere with
# savepoint transactions.
#
do_test 4.0 {
reset_db
execsql {
CREATE TABLE ta(a, aa);
CREATE TABLE tb(b, bb);
INSERT INTO ta VALUES('a', randomblob(500));
INSERT INTO tb VALUES('b', randomblob(500));
BEGIN;
UPDATE ta SET a = 'A';
SAVEPOINT one;
UPDATE tb SET b = 'B';
}
sqlite3_db_cacheflush db
diskquery test.db {
SELECT a FROM ta;
SELECT b FROM tb;
}
} {A B}
do_test 4.1 {
execsql {
ROLLBACK TO one;
}
sqlite3_db_cacheflush db
diskquery test.db {
SELECT a FROM ta;
SELECT b FROM tb;
}
} {A b}
do_test 4.2 {
execsql {
INSERT INTO tb VALUES('c', randomblob(10));
INSERT INTO tb VALUES('d', randomblob(10));
INSERT INTO tb VALUES('e', randomblob(10));
}
sqlite3_db_cacheflush db
diskquery test.db {
SELECT a FROM ta;
SELECT b FROM tb;
}
} {A b c d e}
do_test 4.3 {
execsql {
SAVEPOINT two;
UPDATE tb SET b = upper(b);
}
sqlite3_db_cacheflush db
diskquery test.db {
SELECT a FROM ta;
SELECT b FROM tb;
}
} {A B C D E}
do_test 4.4 {
execsql {
ROLLBACK TO two;
}
sqlite3_db_cacheflush db
diskquery test.db {
SELECT a FROM ta;
SELECT b FROM tb;
}
} {A b c d e}
do_test 4.4 {
execsql {
ROLLBACK TO one;
}
sqlite3_db_cacheflush db
diskquery test.db {
SELECT a FROM ta;
SELECT b FROM tb;
}
} {A b}
do_test 4.5 {
execsql {
ROLLBACK;
SELECT a FROM ta;
SELECT b FROM tb;
}
} {a b}
test_restore_config_pagecache
finish_test

View File

@ -0,0 +1,803 @@
# 2003 January 29
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library. The
# focus of this script testing the callback-free C/C++ API.
#
# $Id: capi2.test,v 1.37 2008/12/30 17:55:00 drh Exp $
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
# Return the text values from the current row pointed at by STMT as a list.
proc get_row_values {STMT} {
set VALUES [list]
for {set i 0} {$i < [sqlite3_data_count $STMT]} {incr i} {
lappend VALUES [sqlite3_column_text $STMT $i]
}
return $VALUES
}
# Return the column names followed by declaration types for the result set
# of the SQL statement STMT.
#
# i.e. for:
# CREATE TABLE abc(a text, b integer);
# SELECT * FROM abc;
#
# The result is {a b text integer}
proc get_column_names {STMT} {
set VALUES [list]
for {set i 0} {$i < [sqlite3_column_count $STMT]} {incr i} {
lappend VALUES [sqlite3_column_name $STMT $i]
}
for {set i 0} {$i < [sqlite3_column_count $STMT]} {incr i} {
lappend VALUES [sqlite3_column_decltype $STMT $i]
}
return $VALUES
}
# Check basic functionality
#
do_test capi2-1.1 {
set DB [sqlite3_connection_pointer db]
execsql {CREATE TABLE t1(a,b,c)}
set VM [sqlite3_prepare $DB {SELECT name, rowid FROM sqlite_master} -1 TAIL]
set TAIL
} {}
do_test capi2-1.2 {
sqlite3_step $VM
} {SQLITE_ROW}
do_test capi2-1.3 {
sqlite3_data_count $VM
} {2}
do_test capi2-1.4 {
get_row_values $VM
} {t1 1}
do_test capi2-1.5 {
get_column_names $VM
} {name rowid text INTEGER}
do_test capi2-1.6 {
sqlite3_step $VM
} {SQLITE_DONE}
do_test capi2-1.7 {
list [sqlite3_column_count $VM] [get_row_values $VM] [get_column_names $VM]
} {2 {} {name rowid text INTEGER}}
# This used to be SQLITE_MISUSE. But now we automatically reset prepared
# statements.
ifcapable autoreset {
do_test capi2-1.8 {
sqlite3_step $VM
} {SQLITE_ROW}
} else {
do_test capi2-1.8 {
sqlite3_step $VM
} {SQLITE_MISUSE}
}
# Update: In v2, once SQLITE_MISUSE is returned the statement handle cannot
# be interrogated for more information. However in v3, since the column
# count, names and types are determined at compile time, these are still
# accessible after an SQLITE_MISUSE error.
do_test capi2-1.9 {
sqlite3_reset $VM
list [sqlite3_column_count $VM] [get_row_values $VM] [get_column_names $VM]
} {2 {} {name rowid text INTEGER}}
do_test capi2-1.10 {
sqlite3_data_count $VM
} {0}
do_test capi2-1.11 {
sqlite3_finalize $VM
} {SQLITE_OK}
# Check to make sure that the "tail" of a multi-statement SQL script
# is returned by sqlite3_prepare.
#
do_test capi2-2.1 {
set SQL {
SELECT name, rowid FROM sqlite_master;
SELECT name, rowid FROM sqlite_master WHERE 0;
-- A comment at the end
}
set VM [sqlite3_prepare $DB $SQL -1 SQL]
set SQL
} {
SELECT name, rowid FROM sqlite_master WHERE 0;
-- A comment at the end
}
do_test capi2-2.2 {
set r [sqlite3_step $VM]
lappend r [sqlite3_column_count $VM] \
[get_row_values $VM] \
[get_column_names $VM]
} {SQLITE_ROW 2 {t1 1} {name rowid text INTEGER}}
do_test capi2-2.3 {
set r [sqlite3_step $VM]
lappend r [sqlite3_column_count $VM] \
[get_row_values $VM] \
[get_column_names $VM]
} {SQLITE_DONE 2 {} {name rowid text INTEGER}}
do_test capi2-2.4 {
sqlite3_finalize $VM
} {SQLITE_OK}
do_test capi2-2.5 {
set VM [sqlite3_prepare $DB $SQL -1 SQL]
set SQL
} {
-- A comment at the end
}
do_test capi2-2.6 {
set r [sqlite3_step $VM]
lappend r [sqlite3_column_count $VM] \
[get_row_values $VM] \
[get_column_names $VM]
} {SQLITE_DONE 2 {} {name rowid text INTEGER}}
do_test capi2-2.7 {
sqlite3_finalize $VM
} {SQLITE_OK}
do_test capi2-2.8 {
set VM [sqlite3_prepare $DB $SQL -1 SQL]
list $SQL $VM
} {{} {}}
# Check the error handling.
#
do_test capi2-3.1 {
set rc [catch {
sqlite3_prepare $DB {select bogus from sqlite_master} -1 TAIL
} msg]
lappend rc $msg $TAIL
} {1 {(1) no such column: bogus} {}}
do_test capi2-3.2 {
set rc [catch {
sqlite3_prepare $DB {select bogus from } -1 TAIL
} msg]
lappend rc $msg $TAIL
} {1 {(1) near " ": syntax error} {}}
do_test capi2-3.3 {
set rc [catch {
sqlite3_prepare $DB {;;;;select bogus from sqlite_master} -1 TAIL
} msg]
lappend rc $msg $TAIL
} {1 {(1) no such column: bogus} {}}
do_test capi2-3.4 {
set rc [catch {
sqlite3_prepare $DB {select bogus from sqlite_master;x;} -1 TAIL
} msg]
lappend rc $msg $TAIL
} {1 {(1) no such column: bogus} {x;}}
do_test capi2-3.5 {
set rc [catch {
sqlite3_prepare $DB {select bogus from sqlite_master;;;x;} -1 TAIL
} msg]
lappend rc $msg $TAIL
} {1 {(1) no such column: bogus} {;;x;}}
do_test capi2-3.6 {
set rc [catch {
sqlite3_prepare $DB {select 5/0} -1 TAIL
} VM]
lappend rc $TAIL
} {0 {}}
do_test capi2-3.7 {
list [sqlite3_step $VM] \
[sqlite3_column_count $VM] \
[get_row_values $VM] \
[get_column_names $VM]
} {SQLITE_ROW 1 {{}} {5/0 {}}}
do_test capi2-3.8 {
sqlite3_finalize $VM
} {SQLITE_OK}
do_test capi2-3.9 {
execsql {CREATE UNIQUE INDEX i1 ON t1(a)}
set VM [sqlite3_prepare $DB {INSERT INTO t1 VALUES(1,2,3)} -1 TAIL]
set TAIL
} {}
do_test capi2-3.9b {db changes} {0}
do_test capi2-3.10 {
list [sqlite3_step $VM] \
[sqlite3_column_count $VM] \
[get_row_values $VM] \
[get_column_names $VM]
} {SQLITE_DONE 0 {} {}}
# Update for v3 - the change has not actually happened until the query is
# finalized. Is this going to cause trouble for anyone? Lee Nelson maybe?
# (Later:) The change now happens just before SQLITE_DONE is returned.
do_test capi2-3.10b {db changes} {1}
do_test capi2-3.11 {
sqlite3_finalize $VM
} {SQLITE_OK}
do_test capi2-3.11b {db changes} {1}
#do_test capi2-3.12-misuse {
# sqlite3_finalize $VM
#} {SQLITE_MISUSE}
do_test capi2-3.13 {
set VM [sqlite3_prepare $DB {INSERT INTO t1 VALUES(1,3,4)} -1 TAIL]
list [sqlite3_step $VM] \
[sqlite3_column_count $VM] \
[get_row_values $VM] \
[get_column_names $VM]
} {SQLITE_ERROR 0 {} {}}
# Update for v3: Preparing a statement does not affect the change counter.
# (Test result changes from 0 to 1). (Later:) change counter updates occur
# when sqlite3_step returns, not at finalize time.
do_test capi2-3.13b {db changes} {0}
do_test capi2-3.14 {
list [sqlite3_finalize $VM] [sqlite3_errmsg $DB] \
[sqlite3_extended_errcode $DB]
} {SQLITE_CONSTRAINT {UNIQUE constraint failed: t1.a} SQLITE_CONSTRAINT_UNIQUE}
do_test capi2-3.15 {
set VM [sqlite3_prepare $DB {CREATE TABLE t2(a NOT NULL, b)} -1 TAIL]
set TAIL
} {}
do_test capi2-3.16 {
list [sqlite3_step $VM] \
[sqlite3_column_count $VM] \
[get_row_values $VM] \
[get_column_names $VM]
} {SQLITE_DONE 0 {} {}}
do_test capi2-3.17 {
list [sqlite3_finalize $VM] [sqlite3_errmsg $DB]
} {SQLITE_OK {not an error}}
do_test capi2-3.18 {
set VM [sqlite3_prepare $DB {INSERT INTO t2 VALUES(NULL,2)} -1 TAIL]
list [sqlite3_step $VM] \
[sqlite3_column_count $VM] \
[get_row_values $VM] \
[get_column_names $VM]
} {SQLITE_ERROR 0 {} {}}
do_test capi2-3.19 {
list [sqlite3_finalize $VM] [sqlite3_errmsg $DB] \
[sqlite3_extended_errcode $DB]
} {SQLITE_CONSTRAINT {NOT NULL constraint failed: t2.a} SQLITE_CONSTRAINT_NOTNULL}
do_test capi2-3.20 {
execsql {
CREATE TABLE a1(message_id, name , UNIQUE(message_id, name) );
INSERT INTO a1 VALUES(1, 1);
}
} {}
do_test capi2-3.21 {
set VM [sqlite3_prepare $DB {INSERT INTO a1 VALUES(1, 1)} -1 TAIL]
sqlite3_step $VM
} {SQLITE_ERROR}
do_test capi2-3.22 {
sqlite3_errcode $DB
} {SQLITE_ERROR}
do_test capi2-3.23 {
sqlite3_finalize $VM
} {SQLITE_CONSTRAINT}
do_test capi2-3.24 {
list [sqlite3_errcode $DB] [sqlite3_extended_errcode $DB]
} {SQLITE_CONSTRAINT SQLITE_CONSTRAINT_UNIQUE}
# Two or more virtual machines exists at the same time.
#
do_test capi2-4.1 {
set VM1 [sqlite3_prepare $DB {INSERT INTO t2 VALUES(1,2)} -1 TAIL]
set TAIL
} {}
do_test capi2-4.2 {
set VM2 [sqlite3_prepare $DB {INSERT INTO t2 VALUES(2,3)} -1 TAIL]
set TAIL
} {}
do_test capi2-4.3 {
set VM3 [sqlite3_prepare $DB {INSERT INTO t2 VALUES(3,4)} -1 TAIL]
set TAIL
} {}
do_test capi2-4.4 {
list [sqlite3_step $VM2] \
[sqlite3_column_count $VM2] \
[get_row_values $VM2] \
[get_column_names $VM2]
} {SQLITE_DONE 0 {} {}}
do_test capi2-4.5 {
execsql {SELECT * FROM t2 ORDER BY a}
} {2 3}
do_test capi2-4.6 {
sqlite3_finalize $VM2
} {SQLITE_OK}
do_test capi2-4.7 {
list [sqlite3_step $VM3] \
[sqlite3_column_count $VM3] \
[get_row_values $VM3] \
[get_column_names $VM3]
} {SQLITE_DONE 0 {} {}}
do_test capi2-4.8 {
execsql {SELECT * FROM t2 ORDER BY a}
} {2 3 3 4}
do_test capi2-4.9 {
sqlite3_finalize $VM3
} {SQLITE_OK}
do_test capi2-4.10 {
list [sqlite3_step $VM1] \
[sqlite3_column_count $VM1] \
[get_row_values $VM1] \
[get_column_names $VM1]
} {SQLITE_DONE 0 {} {}}
do_test capi2-4.11 {
execsql {SELECT * FROM t2 ORDER BY a}
} {1 2 2 3 3 4}
do_test capi2-4.12 {
sqlite3_finalize $VM1
} {SQLITE_OK}
# Interleaved SELECTs
#
do_test capi2-5.1 {
set VM1 [sqlite3_prepare $DB {SELECT * FROM t2} -1 TAIL]
set VM2 [sqlite3_prepare $DB {SELECT * FROM t2} -1 TAIL]
set VM3 [sqlite3_prepare $DB {SELECT * FROM t2} -1 TAIL]
list [sqlite3_step $VM1] \
[sqlite3_column_count $VM1] \
[get_row_values $VM1] \
[get_column_names $VM1]
} {SQLITE_ROW 2 {2 3} {a b {} {}}}
do_test capi2-5.2 {
list [sqlite3_step $VM2] \
[sqlite3_column_count $VM2] \
[get_row_values $VM2] \
[get_column_names $VM2]
} {SQLITE_ROW 2 {2 3} {a b {} {}}}
do_test capi2-5.3 {
list [sqlite3_step $VM1] \
[sqlite3_column_count $VM1] \
[get_row_values $VM1] \
[get_column_names $VM1]
} {SQLITE_ROW 2 {3 4} {a b {} {}}}
do_test capi2-5.4 {
list [sqlite3_step $VM3] \
[sqlite3_column_count $VM3] \
[get_row_values $VM3] \
[get_column_names $VM3]
} {SQLITE_ROW 2 {2 3} {a b {} {}}}
do_test capi2-5.5 {
list [sqlite3_step $VM3] \
[sqlite3_column_count $VM3] \
[get_row_values $VM3] \
[get_column_names $VM3]
} {SQLITE_ROW 2 {3 4} {a b {} {}}}
do_test capi2-5.6 {
list [sqlite3_step $VM3] \
[sqlite3_column_count $VM3] \
[get_row_values $VM3] \
[get_column_names $VM3]
} {SQLITE_ROW 2 {1 2} {a b {} {}}}
do_test capi2-5.7 {
list [sqlite3_step $VM3] \
[sqlite3_column_count $VM3] \
[get_row_values $VM3] \
[get_column_names $VM3]
} {SQLITE_DONE 2 {} {a b {} {}}}
do_test capi2-5.8 {
sqlite3_finalize $VM3
} {SQLITE_OK}
do_test capi2-5.9 {
list [sqlite3_step $VM1] \
[sqlite3_column_count $VM1] \
[get_row_values $VM1] \
[get_column_names $VM1]
} {SQLITE_ROW 2 {1 2} {a b {} {}}}
do_test capi2-5.10 {
sqlite3_finalize $VM1
} {SQLITE_OK}
do_test capi2-5.11 {
list [sqlite3_step $VM2] \
[sqlite3_column_count $VM2] \
[get_row_values $VM2] \
[get_column_names $VM2]
} {SQLITE_ROW 2 {3 4} {a b {} {}}}
do_test capi2-5.12 {
list [sqlite3_step $VM2] \
[sqlite3_column_count $VM2] \
[get_row_values $VM2] \
[get_column_names $VM2]
} {SQLITE_ROW 2 {1 2} {a b {} {}}}
do_test capi2-5.11 {
sqlite3_finalize $VM2
} {SQLITE_OK}
# Check for proper SQLITE_BUSY returns.
#
do_test capi2-6.1 {
execsql {
BEGIN;
CREATE TABLE t3(x counter);
INSERT INTO t3 VALUES(1);
INSERT INTO t3 VALUES(2);
INSERT INTO t3 SELECT x+2 FROM t3;
INSERT INTO t3 SELECT x+4 FROM t3;
INSERT INTO t3 SELECT x+8 FROM t3;
COMMIT;
}
set VM1 [sqlite3_prepare $DB {SELECT * FROM t3} -1 TAIL]
sqlite3 db2 test.db
execsql {BEGIN} db2
} {}
# Update for v3: BEGIN doesn't write-lock the database. It is quite
# difficult to get v3 to write-lock the database, which causes a few
# problems for test scripts.
#
# do_test capi2-6.2 {
# list [sqlite3_step $VM1] \
# [sqlite3_column_count $VM1] \
# [get_row_values $VM1] \
# [get_column_names $VM1]
# } {SQLITE_BUSY 0 {} {}}
do_test capi2-6.3 {
execsql {COMMIT} db2
} {}
do_test capi2-6.4 {
list [sqlite3_step $VM1] \
[sqlite3_column_count $VM1] \
[get_row_values $VM1] \
[get_column_names $VM1]
} {SQLITE_ROW 1 1 {x counter}}
do_test capi2-6.5 {
catchsql {INSERT INTO t3 VALUES(10);} db2
} {1 {database is locked}}
do_test capi2-6.6 {
list [sqlite3_step $VM1] \
[sqlite3_column_count $VM1] \
[get_row_values $VM1] \
[get_column_names $VM1]
} {SQLITE_ROW 1 2 {x counter}}
do_test capi2-6.7 {
execsql {SELECT * FROM t2} db2
} {2 3 3 4 1 2}
do_test capi2-6.8 {
list [sqlite3_step $VM1] \
[sqlite3_column_count $VM1] \
[get_row_values $VM1] \
[get_column_names $VM1]
} {SQLITE_ROW 1 3 {x counter}}
do_test capi2-6.9 {
execsql {SELECT * FROM t2}
} {2 3 3 4 1 2}
do_test capi2-6.10 {
list [sqlite3_step $VM1] \
[sqlite3_column_count $VM1] \
[get_row_values $VM1] \
[get_column_names $VM1]
} {SQLITE_ROW 1 4 {x counter}}
do_test capi2-6.11 {
execsql {BEGIN}
} {}
do_test capi2-6.12 {
list [sqlite3_step $VM1] \
[sqlite3_column_count $VM1] \
[get_row_values $VM1] \
[get_column_names $VM1]
} {SQLITE_ROW 1 5 {x counter}}
# A read no longer blocks a write in the same connection.
#do_test capi2-6.13 {
# catchsql {UPDATE t3 SET x=x+1}
#} {1 {database table is locked}}
do_test capi2-6.14 {
list [sqlite3_step $VM1] \
[sqlite3_column_count $VM1] \
[get_row_values $VM1] \
[get_column_names $VM1]
} {SQLITE_ROW 1 6 {x counter}}
do_test capi2-6.15 {
execsql {SELECT * FROM t1}
} {1 2 3}
do_test capi2-6.16 {
list [sqlite3_step $VM1] \
[sqlite3_column_count $VM1] \
[get_row_values $VM1] \
[get_column_names $VM1]
} {SQLITE_ROW 1 7 {x counter}}
do_test capi2-6.17 {
catchsql {UPDATE t1 SET b=b+1}
} {0 {}}
do_test capi2-6.18 {
list [sqlite3_step $VM1] \
[sqlite3_column_count $VM1] \
[get_row_values $VM1] \
[get_column_names $VM1]
} {SQLITE_ROW 1 8 {x counter}}
do_test capi2-6.19 {
execsql {SELECT * FROM t1}
} {1 3 3}
do_test capi2-6.20 {
list [sqlite3_step $VM1] \
[sqlite3_column_count $VM1] \
[get_row_values $VM1] \
[get_column_names $VM1]
} {SQLITE_ROW 1 9 {x counter}}
#do_test capi2-6.21 {
# execsql {ROLLBACK; SELECT * FROM t1}
#} {1 2 3}
do_test capi2-6.22 {
list [sqlite3_step $VM1] \
[sqlite3_column_count $VM1] \
[get_row_values $VM1] \
[get_column_names $VM1]
} {SQLITE_ROW 1 10 {x counter}}
#do_test capi2-6.23 {
# execsql {BEGIN TRANSACTION;}
#} {}
do_test capi2-6.24 {
list [sqlite3_step $VM1] \
[sqlite3_column_count $VM1] \
[get_row_values $VM1] \
[get_column_names $VM1]
} {SQLITE_ROW 1 11 {x counter}}
do_test capi2-6.25 {
execsql {
INSERT INTO t1 VALUES(2,3,4);
SELECT * FROM t1;
}
} {1 3 3 2 3 4}
do_test capi2-6.26 {
list [sqlite3_step $VM1] \
[sqlite3_column_count $VM1] \
[get_row_values $VM1] \
[get_column_names $VM1]
} {SQLITE_ROW 1 12 {x counter}}
do_test capi2-6.27 {
catchsql {
INSERT INTO t1 VALUES(2,4,5);
SELECT * FROM t1;
}
} {1 {UNIQUE constraint failed: t1.a}}
do_test capi2-6.28 {
list [sqlite3_step $VM1] \
[sqlite3_column_count $VM1] \
[get_row_values $VM1] \
[get_column_names $VM1]
} {SQLITE_ROW 1 13 {x counter}}
do_test capi2-6.99 {
sqlite3_finalize $VM1
} {SQLITE_OK}
catchsql {ROLLBACK}
do_test capi2-7.1 {
stepsql $DB {
SELECT * FROM t1
}
} {0 1 2 3}
do_test capi2-7.2 {
stepsql $DB {
PRAGMA count_changes=on
}
} {0}
do_test capi2-7.3 {
stepsql $DB {
UPDATE t1 SET a=a+10;
}
} {0 1}
do_test capi2-7.4 {
stepsql $DB {
INSERT INTO t1 SELECT a+1,b+1,c+1 FROM t1;
}
} {0 1}
do_test capi2-7.4b {sqlite3_changes $DB} {1}
do_test capi2-7.5 {
stepsql $DB {
UPDATE t1 SET a=a+10;
}
} {0 2}
do_test capi2-7.5b {sqlite3_changes $DB} {2}
do_test capi2-7.6 {
stepsql $DB {
SELECT * FROM t1;
}
} {0 21 2 3 22 3 4}
do_test capi2-7.7 {
stepsql $DB {
INSERT INTO t1 SELECT a+2,b+2,c+2 FROM t1;
}
} {0 2}
do_test capi2-7.8 {
sqlite3_changes $DB
} {2}
do_test capi2-7.9 {
stepsql $DB {
SELECT * FROM t1;
}
} {0 21 2 3 22 3 4 23 4 5 24 5 6}
do_test capi2-7.10 {
stepsql $DB {
UPDATE t1 SET a=a-20;
SELECT * FROM t1;
}
} {0 4 1 2 3 2 3 4 3 4 5 4 5 6}
# Update for version 3: A SELECT statement no longer resets the change
# counter (Test result changes from 0 to 4).
do_test capi2-7.11 {
sqlite3_changes $DB
} {4}
do_test capi2-7.11a {
execsql {SELECT count(*) FROM t1}
} {4}
ifcapable {explain} {
do_test capi2-7.12 {
set x [stepsql $DB {EXPLAIN SELECT * FROM t1}]
lindex $x 0
} {0}
}
# Ticket #261 - make sure we can finalize before the end of a query.
#
do_test capi2-8.1 {
set VM1 [sqlite3_prepare $DB {SELECT * FROM t2} -1 TAIL]
sqlite3_finalize $VM1
} {SQLITE_OK}
# Tickets #384 and #385 - make sure the TAIL argument to sqlite3_prepare
# and all of the return pointers in sqlite_step can be null.
#
do_test capi2-9.1 {
set VM1 [sqlite3_prepare $DB {SELECT * FROM t2} -1 DUMMY]
sqlite3_step $VM1
sqlite3_finalize $VM1
} {SQLITE_OK}
# Test that passing a NULL pointer to sqlite3_finalize() or sqlite3_reset
# does not cause an error.
do_test capi2-10.1 {
sqlite3_finalize 0
} {SQLITE_OK}
do_test capi2-10.2 {
sqlite3_reset 0
} {SQLITE_OK}
#---------------------------------------------------------------------------
# The following tests - capi2-11.* - test the "column origin" APIs.
#
# sqlite3_column_origin_name()
# sqlite3_column_database_name()
# sqlite3_column_table_name()
#
ifcapable columnmetadata {
# This proc uses the database handle $::DB to compile the SQL statement passed
# as a parameter. The return value of this procedure is a list with one
# element for each column returned by the compiled statement. Each element of
# this list is itself a list of length three, consisting of the origin
# database, table and column for the corresponding returned column.
proc check_origins {sql} {
set ret [list]
set ::STMT [sqlite3_prepare $::DB $sql -1 dummy]
for {set i 0} {$i < [sqlite3_column_count $::STMT]} {incr i} {
lappend ret [list \
[sqlite3_column_database_name $::STMT $i] \
[sqlite3_column_table_name $::STMT $i] \
[sqlite3_column_origin_name $::STMT $i] \
]
}
sqlite3_finalize $::STMT
return $ret
}
do_test capi2-11.1 {
execsql {
CREATE TABLE tab1(col1, col2);
}
} {}
do_test capi2-11.2 {
check_origins {SELECT col2, col1 FROM tab1}
} [list {main tab1 col2} {main tab1 col1}]
do_test capi2-11.3 {
check_origins {SELECT col2 AS hello, col1 AS world FROM tab1}
} [list {main tab1 col2} {main tab1 col1}]
ifcapable subquery {
do_test capi2-11.4 {
check_origins {SELECT b, a FROM (SELECT col1 AS a, col2 AS b FROM tab1)}
} [list {main tab1 col2} {main tab1 col1}]
do_test capi2-11.5 {
check_origins {SELECT (SELECT col2 FROM tab1), (SELECT col1 FROM tab1)}
} [list {main tab1 col2} {main tab1 col1}]
do_test capi2-11.6 {
check_origins {SELECT (SELECT col2), (SELECT col1) FROM tab1}
} [list {main tab1 col2} {main tab1 col1}]
do_test capi2-11.7 {
check_origins {SELECT * FROM tab1}
} [list {main tab1 col1} {main tab1 col2}]
do_test capi2-11.8 {
check_origins {SELECT * FROM (SELECT * FROM tab1)}
} [list {main tab1 col1} {main tab1 col2}]
}
ifcapable view&&subquery {
do_test capi2-12.1 {
execsql {
CREATE VIEW view1 AS SELECT * FROM tab1;
}
} {}
do_test capi2-12.2 {
check_origins {SELECT col2, col1 FROM view1}
} [list {main tab1 col2} {main tab1 col1}]
do_test capi2-12.3 {
check_origins {SELECT col2 AS hello, col1 AS world FROM view1}
} [list {main tab1 col2} {main tab1 col1}]
do_test capi2-12.4 {
check_origins {SELECT b, a FROM (SELECT col1 AS a, col2 AS b FROM view1)}
} [list {main tab1 col2} {main tab1 col1}]
do_test capi2-12.5 {
check_origins {SELECT (SELECT col2 FROM view1), (SELECT col1 FROM view1)}
} [list {main tab1 col2} {main tab1 col1}]
do_test capi2-12.6 {
check_origins {SELECT (SELECT col2), (SELECT col1) FROM view1}
} [list {main tab1 col2} {main tab1 col1}]
do_test capi2-12.7 {
check_origins {SELECT * FROM view1}
} [list {main tab1 col1} {main tab1 col2}]
do_test capi2-12.8 {
check_origins {select * from (select * from view1)}
} [list {main tab1 col1} {main tab1 col2}]
do_test capi2-12.9 {
check_origins {select * from (select * from (select * from view1))}
} [list {main tab1 col1} {main tab1 col2}]
do_test capi2-12.10 {
db close
sqlite3 db test.db
set ::DB [sqlite3_connection_pointer db]
check_origins {select * from (select * from (select * from view1))}
} [list {main tab1 col1} {main tab1 col2}]
# This view will thwart the flattening optimization.
do_test capi2-13.1 {
execsql {
CREATE VIEW view2 AS SELECT * FROM tab1 limit 10 offset 10;
}
} {}
do_test capi2-13.2 {
check_origins {SELECT col2, col1 FROM view2}
} [list {main tab1 col2} {main tab1 col1}]
do_test capi2-13.3 {
check_origins {SELECT col2 AS hello, col1 AS world FROM view2}
} [list {main tab1 col2} {main tab1 col1}]
do_test capi2-13.4 {
check_origins {SELECT b, a FROM (SELECT col1 AS a, col2 AS b FROM view2)}
} [list {main tab1 col2} {main tab1 col1}]
do_test capi2-13.5 {
check_origins {SELECT (SELECT col2 FROM view2), (SELECT col1 FROM view2)}
} [list {main tab1 col2} {main tab1 col1}]
do_test capi2-13.6 {
check_origins {SELECT (SELECT col2), (SELECT col1) FROM view2}
} [list {main tab1 col2} {main tab1 col1}]
do_test capi2-13.7 {
check_origins {SELECT * FROM view2}
} [list {main tab1 col1} {main tab1 col2}]
do_test capi2-13.8 {
check_origins {select * from (select * from view2)}
} [list {main tab1 col1} {main tab1 col2}]
do_test capi2-13.9 {
check_origins {select * from (select * from (select * from view2))}
} [list {main tab1 col1} {main tab1 col2}]
do_test capi2-13.10 {
db close
sqlite3 db test.db
set ::DB [sqlite3_connection_pointer db]
check_origins {select * from (select * from (select * from view2))}
} [list {main tab1 col1} {main tab1 col2}]
do_test capi2-13.11 {
check_origins {select * from (select * from tab1 limit 10 offset 10)}
} [list {main tab1 col1} {main tab1 col2}]
}
} ;# ifcapable columnmetadata
db2 close
finish_test

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,145 @@
# 2004 September 2
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library. The
# focus of this script testing the callback-free C/C++ API and in
# particular the behavior of sqlite3_step() when trying to commit
# with lock contention.
#
# $Id: capi3b.test,v 1.4 2007/08/10 19:46:14 drh Exp $
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
# These tests depend on the pager holding changes in cache
# until it is time to commit. But that won't happen if the
# soft-heap-limit is set too low. So disable the soft heap limit
# for the duration of this test.
#
sqlite3_soft_heap_limit 0
set DB [sqlite3_connection_pointer db]
sqlite3 db2 test.db
set DB2 [sqlite3_connection_pointer db2]
# Create some data in the database
#
do_test capi3b-1.1 {
execsql {
CREATE TABLE t1(x);
INSERT INTO t1 VALUES(1);
INSERT INTO t1 VALUES(2);
SELECT * FROM t1
}
} {1 2}
# Make sure the second database connection can see the data
#
do_test capi3b-1.2 {
execsql {
SELECT * FROM t1
} db2
} {1 2}
# First database connection acquires a shared lock
#
do_test capi3b-1.3 {
execsql {
BEGIN;
SELECT * FROM t1;
}
} {1 2}
# Second database connection tries to write. The sqlite3_step()
# function returns SQLITE_BUSY because it cannot commit.
#
do_test capi3b-1.4 {
set VM [sqlite3_prepare $DB2 {INSERT INTO t1 VALUES(3)} -1 TAIL]
sqlite3_step $VM
} SQLITE_BUSY
# The sqlite3_step call can be repeated multiple times.
#
do_test capi3b-1.5.1 {
sqlite3_step $VM
} SQLITE_BUSY
do_test capi3b-1.5.2 {
sqlite3_step $VM
} SQLITE_BUSY
# The first connection closes its transaction. This allows the second
# connections sqlite3_step to succeed.
#
do_test capi3b-1.6 {
execsql COMMIT
sqlite3_step $VM
} SQLITE_DONE
do_test capi3b-1.7 {
sqlite3_finalize $VM
} SQLITE_OK
do_test capi3b-1.8 {
execsql {SELECT * FROM t1} db2
} {1 2 3}
do_test capi3b-1.9 {
execsql {SELECT * FROM t1}
} {1 2 3}
# Start doing a SELECT with one connection. This gets a SHARED lock.
# Then do an INSERT with the other connection. The INSERT should
# not be able to complete until the SELECT finishes.
#
do_test capi3b-2.1 {
set VM1 [sqlite3_prepare $DB {SELECT * FROM t1} -1 TAIL]
sqlite3_step $VM1
} SQLITE_ROW
do_test capi3b-2.2 {
sqlite3_column_text $VM1 0
} 1
do_test capi3b-2.3 {
set VM2 [sqlite3_prepare $DB2 {INSERT INTO t1 VALUES(4)} -1 TAIL]
sqlite3_step $VM2
} SQLITE_BUSY
do_test capi3b-2.4 {
sqlite3_step $VM1
} SQLITE_ROW
do_test capi3b-2.5 {
sqlite3_column_text $VM1 0
} 2
do_test capi3b-2.6 {
sqlite3_step $VM2
} SQLITE_BUSY
do_test capi3b-2.7 {
sqlite3_step $VM1
} SQLITE_ROW
do_test capi3b-2.8 {
sqlite3_column_text $VM1 0
} 3
do_test capi3b-2.9 {
sqlite3_step $VM2
} SQLITE_BUSY
do_test capi3b-2.10 {
sqlite3_step $VM1
} SQLITE_DONE
do_test capi3b-2.11 {
sqlite3_step $VM2
} SQLITE_DONE
do_test capi3b-2.12 {
sqlite3_finalize $VM1
sqlite3_finalize $VM2
execsql {SELECT * FROM t1}
} {1 2 3 4}
catch {db2 close}
sqlite3_soft_heap_limit $cmdlinearg(soft-heap-limit)
finish_test

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,183 @@
# 2008 June 18
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library.
#
# This file is devoted to testing the sqlite3_next_stmt and
# sqlite3_stmt_readonly and sqlite3_stmt_busy interfaces.
#
# $Id: capi3d.test,v 1.2 2008/07/14 15:11:20 drh Exp $
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
# Create N prepared statements against database connection db
# and return a list of all the generated prepared statements.
#
proc make_prepared_statements {N} {
set plist {}
for {set i 0} {$i<$N} {incr i} {
set sql "SELECT $i FROM sqlite_master WHERE name LIKE '%$i%'"
if {rand()<0.33} {
set s [sqlite3_prepare_v2 db $sql -1 notused]
} else {
ifcapable utf16 {
if {rand()<0.5} {
set sql [encoding convertto unicode $sql]\x00\x00
set s [sqlite3_prepare16 db $sql -1 notused]
} else {
set s [sqlite3_prepare db $sql -1 notused]
}
}
ifcapable !utf16 {
set s [sqlite3_prepare db $sql -1 notused]
}
}
lappend plist $s
}
return $plist
}
# Scramble the $inlist into a random order.
#
proc scramble {inlist} {
set y {}
foreach x $inlist {
lappend y [list [expr {rand()}] $x]
}
set y [lsort $y]
set outlist {}
foreach x $y {
lappend outlist [lindex $x 1]
}
return $outlist
}
# Database initially has no prepared statements.
#
do_test capi3d-1.1 {
db cache flush
sqlite3_next_stmt db 0
} {}
# Run the following tests for between 1 and 100 prepared statements.
#
for {set i 1} {$i<=100} {incr i} {
set stmtlist [make_prepared_statements $i]
do_test capi3d-1.2.$i.1 {
set p [sqlite3_next_stmt db 0]
set x {}
while {$p!=""} {
lappend x $p
set p [sqlite3_next_stmt db $p]
}
lsort $x
} [lsort $stmtlist]
do_test capi3-1.2.$i.2 {
foreach p [scramble $::stmtlist] {
sqlite3_finalize $p
}
sqlite3_next_stmt db 0
} {}
}
# Tests for the is-read-only interface.
#
proc test_is_readonly {testname sql truth} {
do_test $testname [format {
set DB [sqlite3_connection_pointer db]
set STMT [sqlite3_prepare $DB {%s} -1 TAIL]
set rc [sqlite3_stmt_readonly $STMT]
sqlite3_finalize $STMT
set rc
} $sql] $truth
}
test_is_readonly capi3d-2.1 {SELECT * FROM sqlite_master} 1
test_is_readonly capi3d-2.2 {CREATE TABLE t1(x)} 0
db eval {CREATE TABLE t1(x)}
test_is_readonly capi3d-2.3 {INSERT INTO t1 VALUES(5)} 0
test_is_readonly capi3d-2.4 {UPDATE t1 SET x=x+1 WHERE x<0} 0
test_is_readonly capi3d-2.5 {SELECT * FROM t1} 1
ifcapable wal {
test_is_readonly capi3d-2.6 {PRAGMA journal_mode=WAL} 0
test_is_readonly capi3d-2.7 {PRAGMA wal_checkpoint} 0
}
test_is_readonly capi3d-2.8 {PRAGMA application_id=1234} 0
test_is_readonly capi3d-2.9 {VACUUM} 0
test_is_readonly capi3d-2.10 {PRAGMA integrity_check} 1
do_test capi3-2.99 {
sqlite3_stmt_readonly 0
} 1
# Tests for sqlite3_stmt_busy
#
do_test capi3d-3.1 {
db eval {INSERT INTO t1 VALUES(6); INSERT INTO t1 VALUES(7);}
set STMT [sqlite3_prepare db {SELECT * FROM t1} -1 TAIL]
sqlite3_stmt_busy $STMT
} {0}
do_test capi3d-3.2 {
sqlite3_step $STMT
sqlite3_stmt_busy $STMT
} {1}
do_test capi3d-3.3 {
sqlite3_step $STMT
sqlite3_stmt_busy $STMT
} {1}
do_test capi3d-3.4 {
sqlite3_reset $STMT
sqlite3_stmt_busy $STMT
} {0}
do_test capi3d-3.99 {
sqlite3_finalize $STMT
sqlite3_stmt_busy 0
} {0}
#--------------------------------------------------------------------------
# Test the sqlite3_stmt_busy() function with ROLLBACK statements.
#
reset_db
do_execsql_test capi3d-4.1 {
CREATE TABLE t4(x,y);
BEGIN;
}
do_test capi3d-4.2.1 {
set ::s1 [sqlite3_prepare_v2 db "ROLLBACK" -1 notused]
sqlite3_step $::s1
} {SQLITE_DONE}
do_test capi3d-4.2.2 {
sqlite3_stmt_busy $::s1
} {0}
do_catchsql_test capi3d-4.2.3 {
VACUUM
} {0 {}}
do_test capi3d-4.2.4 {
sqlite3_reset $::s1
} {SQLITE_OK}
do_catchsql_test capi3d-4.2.5 {
VACUUM
} {0 {}}
do_test capi3d-4.2.6 {
sqlite3_finalize $::s1
} {SQLITE_OK}
finish_test

View File

@ -0,0 +1,126 @@
# 2010 Novemeber 18
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library. The
# focus of this script testing the callback-free C/C++ API.
#
# $Id: capi3e.test,v 1.70 2009/01/09 02:49:32 drh Exp $
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
# Make sure the system encoding is utf-8. Otherwise, if the system encoding
# is other than utf-8, [file isfile $x] may not refer to the same file
# as [sqlite3 db $x].
#
# This is no longer needed here because it should be done within the test
# fixture executable itself, via Tcl_SetSystemEncoding.
#
# encoding system utf-8
# Do not use a codec for tests in this file, as the database file is
# manipulated directly using tcl scripts (using the [hexio_write] command).
#
do_not_use_codec
# Return the UTF-16 representation of the supplied UTF-8 string $str.
# If $nt is true, append two 0x00 bytes as a nul terminator.
proc utf16 {str {nt 1}} {
set r [encoding convertto unicode $str]
if {$nt} {
append r "\x00\x00"
}
return $r
}
# Return the UTF-8 representation of the supplied UTF-16 string $str.
proc utf8 {str} {
# If $str ends in two 0x00 0x00 bytes, knock these off before
# converting to UTF-8 using TCL.
binary scan $str \c* vals
if {[lindex $vals end]==0 && [lindex $vals end-1]==0} {
set str [binary format \c* [lrange $vals 0 end-2]]
}
set r [encoding convertfrom unicode $str]
return $r
}
# These tests complement those in capi2.test. They are organized
# as follows:
#
# capi3e-1.*: Test sqlite3_open with various UTF8 filenames
# capi3e-2.*: Test sqlite3_open16 with various UTF8 filenames
# capi3e-3.*: Test ATTACH with various UTF8 filenames
db close
# here's the list of file names we're testing
set names {t 1 t. 1. t.d 1.d t-1 1-1 t.db ä.db ë.db ö.db ü.db ÿ.db}
set i 0
foreach name $names {
incr i
do_test capi3e-1.1.$i {
set db2 [sqlite3_open $name {}]
sqlite3_errcode $db2
} {SQLITE_OK}
do_test capi3e-1.2.$i {
sqlite3_close $db2
} {SQLITE_OK}
do_test capi3e-1.3.$i {
file isfile $name
} {1}
}
ifcapable {utf16} {
set i 0
foreach name $names {
incr i
do_test capi3e-2.1.$i {
set db2 [sqlite3_open16 [utf16 $name] {}]
sqlite3_errcode $db2
} {SQLITE_OK}
do_test capi3e-2.2.$i {
sqlite3_close $db2
} {SQLITE_OK}
do_test capi3e-2.3.$i {
file isfile $name
} {1}
}
}
ifcapable attach {
do_test capi3e-3.1 {
sqlite3 db2 base.db
} {}
set i 0
foreach name $names {
incr i
do_test capi3e-3.2.$i {
db2 eval "ATTACH DATABASE '$name' AS db$i;"
} {}
do_test capi3e-3.3.$i {
db2 eval "DETACH DATABASE db$i;"
} {}
}
do_test capi3e-3.4 {
db2 close
} {}
}
# clean up
forcedelete base.db
foreach name $names {
forcedelete $name
}
finish_test

View File

@ -0,0 +1,346 @@
# 2005 June 25
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library. The
# focus of this file is testing the CAST operator.
#
# $Id: cast.test,v 1.10 2008/11/06 15:33:04 drh Exp $
set testdir [file dirname $argv0]
source $testdir/tester.tcl
# Only run these tests if the build includes the CAST operator
ifcapable !cast {
finish_test
return
}
# Tests for the CAST( AS blob), CAST( AS text) and CAST( AS numeric) built-ins
#
ifcapable bloblit {
do_test cast-1.1 {
execsql {SELECT x'616263'}
} abc
do_test cast-1.2 {
execsql {SELECT typeof(x'616263')}
} blob
do_test cast-1.3 {
execsql {SELECT CAST(x'616263' AS text)}
} abc
do_test cast-1.4 {
execsql {SELECT typeof(CAST(x'616263' AS text))}
} text
do_test cast-1.5 {
execsql {SELECT CAST(x'616263' AS numeric)}
} 0
do_test cast-1.6 {
execsql {SELECT typeof(CAST(x'616263' AS numeric))}
} integer
do_test cast-1.7 {
execsql {SELECT CAST(x'616263' AS blob)}
} abc
do_test cast-1.8 {
execsql {SELECT typeof(CAST(x'616263' AS blob))}
} blob
do_test cast-1.9 {
execsql {SELECT CAST(x'616263' AS integer)}
} 0
do_test cast-1.10 {
execsql {SELECT typeof(CAST(x'616263' AS integer))}
} integer
}
do_test cast-1.11 {
execsql {SELECT null}
} {{}}
do_test cast-1.12 {
execsql {SELECT typeof(NULL)}
} null
do_test cast-1.13 {
execsql {SELECT CAST(NULL AS text)}
} {{}}
do_test cast-1.14 {
execsql {SELECT typeof(CAST(NULL AS text))}
} null
do_test cast-1.15 {
execsql {SELECT CAST(NULL AS numeric)}
} {{}}
do_test cast-1.16 {
execsql {SELECT typeof(CAST(NULL AS numeric))}
} null
do_test cast-1.17 {
execsql {SELECT CAST(NULL AS blob)}
} {{}}
do_test cast-1.18 {
execsql {SELECT typeof(CAST(NULL AS blob))}
} null
do_test cast-1.19 {
execsql {SELECT CAST(NULL AS integer)}
} {{}}
do_test cast-1.20 {
execsql {SELECT typeof(CAST(NULL AS integer))}
} null
do_test cast-1.21 {
execsql {SELECT 123}
} {123}
do_test cast-1.22 {
execsql {SELECT typeof(123)}
} integer
do_test cast-1.23 {
execsql {SELECT CAST(123 AS text)}
} {123}
do_test cast-1.24 {
execsql {SELECT typeof(CAST(123 AS text))}
} text
do_test cast-1.25 {
execsql {SELECT CAST(123 AS numeric)}
} 123
do_test cast-1.26 {
execsql {SELECT typeof(CAST(123 AS numeric))}
} integer
do_test cast-1.27 {
execsql {SELECT CAST(123 AS blob)}
} {123}
do_test cast-1.28 {
execsql {SELECT typeof(CAST(123 AS blob))}
} blob
do_test cast-1.29 {
execsql {SELECT CAST(123 AS integer)}
} {123}
do_test cast-1.30 {
execsql {SELECT typeof(CAST(123 AS integer))}
} integer
do_test cast-1.31 {
execsql {SELECT 123.456}
} {123.456}
do_test cast-1.32 {
execsql {SELECT typeof(123.456)}
} real
do_test cast-1.33 {
execsql {SELECT CAST(123.456 AS text)}
} {123.456}
do_test cast-1.34 {
execsql {SELECT typeof(CAST(123.456 AS text))}
} text
do_test cast-1.35 {
execsql {SELECT CAST(123.456 AS numeric)}
} 123.456
do_test cast-1.36 {
execsql {SELECT typeof(CAST(123.456 AS numeric))}
} real
do_test cast-1.37 {
execsql {SELECT CAST(123.456 AS blob)}
} {123.456}
do_test cast-1.38 {
execsql {SELECT typeof(CAST(123.456 AS blob))}
} blob
do_test cast-1.39 {
execsql {SELECT CAST(123.456 AS integer)}
} {123}
do_test cast-1.38 {
execsql {SELECT typeof(CAST(123.456 AS integer))}
} integer
do_test cast-1.41 {
execsql {SELECT '123abc'}
} {123abc}
do_test cast-1.42 {
execsql {SELECT typeof('123abc')}
} text
do_test cast-1.43 {
execsql {SELECT CAST('123abc' AS text)}
} {123abc}
do_test cast-1.44 {
execsql {SELECT typeof(CAST('123abc' AS text))}
} text
do_test cast-1.45 {
execsql {SELECT CAST('123abc' AS numeric)}
} 123
do_test cast-1.46 {
execsql {SELECT typeof(CAST('123abc' AS numeric))}
} integer
do_test cast-1.47 {
execsql {SELECT CAST('123abc' AS blob)}
} {123abc}
do_test cast-1.48 {
execsql {SELECT typeof(CAST('123abc' AS blob))}
} blob
do_test cast-1.49 {
execsql {SELECT CAST('123abc' AS integer)}
} 123
do_test cast-1.50 {
execsql {SELECT typeof(CAST('123abc' AS integer))}
} integer
do_test cast-1.51 {
execsql {SELECT CAST('123.5abc' AS numeric)}
} 123.5
do_test cast-1.53 {
execsql {SELECT CAST('123.5abc' AS integer)}
} 123
do_test case-1.60 {
execsql {SELECT CAST(null AS REAL)}
} {{}}
do_test case-1.61 {
execsql {SELECT typeof(CAST(null AS REAL))}
} {null}
do_test case-1.62 {
execsql {SELECT CAST(1 AS REAL)}
} {1.0}
do_test case-1.63 {
execsql {SELECT typeof(CAST(1 AS REAL))}
} {real}
do_test case-1.64 {
execsql {SELECT CAST('1' AS REAL)}
} {1.0}
do_test case-1.65 {
execsql {SELECT typeof(CAST('1' AS REAL))}
} {real}
do_test case-1.66 {
execsql {SELECT CAST('abc' AS REAL)}
} {0.0}
do_test case-1.67 {
execsql {SELECT typeof(CAST('abc' AS REAL))}
} {real}
do_test case-1.68 {
execsql {SELECT CAST(x'31' AS REAL)}
} {1.0}
do_test case-1.69 {
execsql {SELECT typeof(CAST(x'31' AS REAL))}
} {real}
# Ticket #1662. Ignore leading spaces in numbers when casting.
#
do_test cast-2.1 {
execsql {SELECT CAST(' 123' AS integer)}
} 123
do_test cast-2.2 {
execsql {SELECT CAST(' -123.456' AS real)}
} -123.456
# ticket #2364. Use full percision integers if possible when casting
# to numeric. Do not fallback to real (and the corresponding 48-bit
# mantissa) unless absolutely necessary.
#
do_test cast-3.1 {
execsql {SELECT CAST(9223372036854774800 AS integer)}
} 9223372036854774800
do_test cast-3.2 {
execsql {SELECT CAST(9223372036854774800 AS numeric)}
} 9223372036854774800
do_realnum_test cast-3.3 {
execsql {SELECT CAST(9223372036854774800 AS real)}
} 9.22337203685477e+18
do_test cast-3.4 {
execsql {SELECT CAST(CAST(9223372036854774800 AS real) AS integer)}
} 9223372036854774784
do_test cast-3.5 {
execsql {SELECT CAST(-9223372036854774800 AS integer)}
} -9223372036854774800
do_test cast-3.6 {
execsql {SELECT CAST(-9223372036854774800 AS numeric)}
} -9223372036854774800
do_realnum_test cast-3.7 {
execsql {SELECT CAST(-9223372036854774800 AS real)}
} -9.22337203685477e+18
do_test cast-3.8 {
execsql {SELECT CAST(CAST(-9223372036854774800 AS real) AS integer)}
} -9223372036854774784
do_test cast-3.11 {
execsql {SELECT CAST('9223372036854774800' AS integer)}
} 9223372036854774800
do_test cast-3.12 {
execsql {SELECT CAST('9223372036854774800' AS numeric)}
} 9223372036854774800
do_realnum_test cast-3.13 {
execsql {SELECT CAST('9223372036854774800' AS real)}
} 9.22337203685477e+18
ifcapable long_double {
do_test cast-3.14 {
execsql {SELECT CAST(CAST('9223372036854774800' AS real) AS integer)}
} 9223372036854774784
}
do_test cast-3.15 {
execsql {SELECT CAST('-9223372036854774800' AS integer)}
} -9223372036854774800
do_test cast-3.16 {
execsql {SELECT CAST('-9223372036854774800' AS numeric)}
} -9223372036854774800
do_realnum_test cast-3.17 {
execsql {SELECT CAST('-9223372036854774800' AS real)}
} -9.22337203685477e+18
ifcapable long_double {
do_test cast-3.18 {
execsql {SELECT CAST(CAST('-9223372036854774800' AS real) AS integer)}
} -9223372036854774784
}
if {[db eval {PRAGMA encoding}]=="UTF-8"} {
do_test cast-3.21 {
execsql {SELECT CAST(x'39323233333732303336383534373734383030' AS integer)}
} 9223372036854774800
do_test cast-3.22 {
execsql {SELECT CAST(x'39323233333732303336383534373734383030' AS numeric)}
} 9223372036854774800
do_realnum_test cast-3.23 {
execsql {SELECT CAST(x'39323233333732303336383534373734383030' AS real)}
} 9.22337203685477e+18
ifcapable long_double {
do_test cast-3.24 {
execsql {
SELECT CAST(CAST(x'39323233333732303336383534373734383030' AS real)
AS integer)
}
} 9223372036854774784
}
}
do_test case-3.31 {
execsql {SELECT CAST(NULL AS numeric)}
} {{}}
# Test to see if it is possible to trick SQLite into reading past
# the end of a blob when converting it to a number.
do_test cast-3.32.1 {
set blob "1234567890"
set DB [sqlite3_connection_pointer db]
set ::STMT [sqlite3_prepare $DB {SELECT CAST(? AS real)} -1 TAIL]
sqlite3_bind_blob -static $::STMT 1 $blob 5
sqlite3_step $::STMT
} {SQLITE_ROW}
do_test cast-3.32.2 {
sqlite3_column_int $::STMT 0
} {12345}
do_test cast-3.32.3 {
sqlite3_finalize $::STMT
} {SQLITE_OK}
do_test cast-4.1 {
db eval {
CREATE TABLE t1(a);
INSERT INTO t1 VALUES('abc');
SELECT a, CAST(a AS integer) FROM t1;
}
} {abc 0}
do_test cast-4.2 {
db eval {
SELECT CAST(a AS integer), a FROM t1;
}
} {0 abc}
do_test cast-4.3 {
db eval {
SELECT a, CAST(a AS integer), a FROM t1;
}
} {abc 0 abc}
do_test cast-4.4 {
db eval {
SELECT CAST(a AS integer), a, CAST(a AS real), a FROM t1;
}
} {0 abc 0.0 abc}
finish_test

View File

@ -0,0 +1,158 @@
# 2011 November 16
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
#
# This file contains fault-injection test cases for the
# sqlite3_db_cacheflush API.
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
set testprefix cacheflush
source $testdir/malloc_common.tcl
# Run the supplied SQL on a copy of the database currently stored on
# disk in file $dbfile.
proc diskquery {dbfile sql} {
forcecopy $dbfile dq.db
sqlite3 dq dq.db
set res [execsql $sql dq]
dq close
set res
}
do_execsql_test 1.0 {
CREATE TABLE t1(a PRIMARY KEY, b);
CREATE INDEX i1 ON t1(b);
INSERT INTO t1 VALUES(1, 2);
INSERT INTO t1 VALUES(3, 4);
INSERT INTO t1 VALUES(5, 6);
INSERT INTO t1 VALUES(7, 8);
}
faultsim_save_and_close
do_faultsim_test 1.1 -prep {
faultsim_restore_and_reopen
db eval {
BEGIN;
UPDATE t1 SET b=b+1;
}
} -body {
sqlite3_db_cacheflush db
} -test {
if {[sqlite3_get_autocommit db]} { error "Transaction rolled back!" }
faultsim_test_result {0 {}} {1 {disk I/O error}}
catch { db eval COMMIT }
faultsim_integrity_check
}
do_faultsim_test 1.2 -prep {
faultsim_restore_and_reopen
db eval {
BEGIN;
UPDATE t1 SET b=b+1;
}
} -body {
set result [list]
db eval { SELECT * FROM t1 } {
if {$a==5} { catch { sqlite3_db_cacheflush db } }
lappend result $a $b
}
set result
} -test {
faultsim_test_result {0 {1 3 3 5 5 7 7 9}} {1 {disk I/O error}}
catch { db eval COMMIT }
faultsim_integrity_check
}
#-------------------------------------------------------------------------
reset_db
do_execsql_test 2.0 {
CREATE TABLE t1(a PRIMARY KEY, b, c);
CREATE INDEX i1 ON t1(b);
CREATE INDEX i2 ON t1(c, b);
INSERT INTO t1 VALUES(1, 2, randomblob(600));
INSERT INTO t1 VALUES(3, 4, randomblob(600));
INSERT INTO t1 VALUES(5, 6, randomblob(600));
INSERT INTO t1 VALUES(7, 8, randomblob(600));
INSERT INTO t1 VALUES(9, 10, randomblob(600));
}
faultsim_save_and_close
do_faultsim_test 2.1 -prep {
faultsim_restore_and_reopen
db eval {
BEGIN;
UPDATE t1 SET b=b+1;
}
} -body {
set result [list]
db eval { SELECT * FROM t1 } {
if {$a==5} { catch { sqlite3_db_cacheflush db } }
lappend result $a $b
}
set result
} -test {
faultsim_test_result {0 {1 3 3 5 5 7 7 9 9 11}} {1 {disk I/O error}}
catch { db eval { INSERT INTO t1 VALUES(11, 12, randomblob(600)) } }
catch { db eval COMMIT }
faultsim_integrity_check
}
do_faultsim_test 2.2 -prep {
faultsim_restore_and_reopen
db eval {
BEGIN;
UPDATE t1 SET b=b+1;
}
} -body {
sqlite3_db_cacheflush db
} -test {
if {[sqlite3_get_autocommit db]} { error "Transaction rolled back!" }
faultsim_test_result {0 {}} {1 {disk I/O error}}
catch { db eval { SELECT * FROM t1 } }
catch { db eval COMMIT }
faultsim_integrity_check
}
do_faultsim_test 2.3 -prep {
faultsim_restore_and_reopen
db eval {
BEGIN;
UPDATE t1 SET b=b-1;
}
} -body {
sqlite3_db_cacheflush db
} -test {
if {[sqlite3_get_autocommit db]} { error "Transaction rolled back!" }
faultsim_test_result {0 {}} {1 {disk I/O error}}
catch { db eval { INSERT INTO t1 VALUES(11, 12, randomblob(600)) } }
catch { db eval COMMIT }
faultsim_integrity_check
}
do_faultsim_test 2.4 -prep {
faultsim_restore_and_reopen
db eval {
BEGIN;
UPDATE t1 SET b=b-1;
}
} -body {
catch { sqlite3_db_cacheflush db }
catch { sqlite3_db_release_memory db }
catch { sqlite3_db_cacheflush db }
execsql { SELECT a, b FROM t1 }
} -test {
faultsim_test_result {0 {1 1 3 3 5 5 7 7 9 9}} {1 {disk I/O error}}
catchsql ROLLBACK
faultsim_integrity_check
}
finish_test

View File

@ -0,0 +1,482 @@
# 2005 November 2
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library. The
# focus of this file is testing CHECK constraints
#
# $Id: check.test,v 1.13 2009/06/05 17:09:12 drh Exp $
set testdir [file dirname $argv0]
source $testdir/tester.tcl
set ::testprefix check
# Only run these tests if the build includes support for CHECK constraints
ifcapable !check {
finish_test
return
}
do_test check-1.1 {
execsql {
CREATE TABLE t1(
x INTEGER CHECK( x<5 ),
y REAL CHECK( y>x )
);
}
} {}
do_test check-1.2 {
execsql {
INSERT INTO t1 VALUES(3,4);
SELECT * FROM t1;
}
} {3 4.0}
do_test check-1.3 {
catchsql {
INSERT INTO t1 VALUES(6,7);
}
} {1 {CHECK constraint failed: t1}}
do_test check-1.4 {
execsql {
SELECT * FROM t1;
}
} {3 4.0}
do_test check-1.5 {
catchsql {
INSERT INTO t1 VALUES(4,3);
}
} {1 {CHECK constraint failed: t1}}
do_test check-1.6 {
execsql {
SELECT * FROM t1;
}
} {3 4.0}
do_test check-1.7 {
catchsql {
INSERT INTO t1 VALUES(NULL,6);
}
} {0 {}}
do_test check-1.8 {
execsql {
SELECT * FROM t1;
}
} {3 4.0 {} 6.0}
do_test check-1.9 {
catchsql {
INSERT INTO t1 VALUES(2,NULL);
}
} {0 {}}
do_test check-1.10 {
execsql {
SELECT * FROM t1;
}
} {3 4.0 {} 6.0 2 {}}
do_test check-1.11 {
execsql {
DELETE FROM t1 WHERE x IS NULL OR x!=3;
UPDATE t1 SET x=2 WHERE x==3;
SELECT * FROM t1;
}
} {2 4.0}
do_test check-1.12 {
catchsql {
UPDATE t1 SET x=7 WHERE x==2
}
} {1 {CHECK constraint failed: t1}}
do_test check-1.13 {
execsql {
SELECT * FROM t1;
}
} {2 4.0}
do_test check-1.14 {
catchsql {
UPDATE t1 SET x=5 WHERE x==2
}
} {1 {CHECK constraint failed: t1}}
do_test check-1.15 {
execsql {
SELECT * FROM t1;
}
} {2 4.0}
do_test check-1.16 {
catchsql {
UPDATE t1 SET x=4, y=11 WHERE x==2
}
} {0 {}}
do_test check-1.17 {
execsql {
SELECT * FROM t1;
}
} {4 11.0}
do_test check-2.1 {
execsql {
CREATE TABLE t2(
x INTEGER CONSTRAINT one CHECK( typeof(coalesce(x,0))=="integer" ),
y REAL CONSTRAINT two CHECK( typeof(coalesce(y,0.1))=='real' ),
z TEXT CONSTRAINT three CHECK( typeof(coalesce(z,''))=='text' )
);
}
} {}
do_test check-2.2 {
execsql {
INSERT INTO t2 VALUES(1,2.2,'three');
SELECT * FROM t2;
}
} {1 2.2 three}
db close
sqlite3 db test.db
do_test check-2.3 {
execsql {
INSERT INTO t2 VALUES(NULL, NULL, NULL);
SELECT * FROM t2;
}
} {1 2.2 three {} {} {}}
do_test check-2.4 {
catchsql {
INSERT INTO t2 VALUES(1.1, NULL, NULL);
}
} {1 {CHECK constraint failed: one}}
do_test check-2.5 {
catchsql {
INSERT INTO t2 VALUES(NULL, 5, NULL);
}
} {1 {CHECK constraint failed: two}}
do_test check-2.6 {
catchsql {
INSERT INTO t2 VALUES(NULL, NULL, 3.14159);
}
} {1 {CHECK constraint failed: three}}
# Undocumented behavior: The CONSTRAINT name clause can follow a constraint.
# Such a clause is ignored. But the parser must accept it for backwards
# compatibility.
#
do_test check-2.10 {
execsql {
CREATE TABLE t2b(
x INTEGER CHECK( typeof(coalesce(x,0))=='integer' ) CONSTRAINT one,
y TEXT PRIMARY KEY constraint two,
z INTEGER,
UNIQUE(x,z) constraint three
);
}
} {}
do_test check-2.11 {
catchsql {
INSERT INTO t2b VALUES('xyzzy','hi',5);
}
} {1 {CHECK constraint failed: t2b}}
do_test check-2.12 {
execsql {
CREATE TABLE t2c(
x INTEGER CONSTRAINT x_one CONSTRAINT x_two
CHECK( typeof(coalesce(x,0))=='integer' )
CONSTRAINT x_two CONSTRAINT x_three,
y INTEGER, z INTEGER,
CONSTRAINT u_one UNIQUE(x,y,z) CONSTRAINT u_two
);
}
} {}
do_test check-2.13 {
catchsql {
INSERT INTO t2c VALUES('xyzzy',7,8);
}
} {1 {CHECK constraint failed: x_two}}
do_test check-2.cleanup {
execsql {
DROP TABLE IF EXISTS t2b;
DROP TABLE IF EXISTS t2c;
}
} {}
ifcapable subquery {
do_test check-3.1 {
catchsql {
CREATE TABLE t3(
x, y, z,
CHECK( x<(SELECT min(x) FROM t1) )
);
}
} {1 {subqueries prohibited in CHECK constraints}}
}
do_test check-3.2 {
execsql {
SELECT name FROM sqlite_master ORDER BY name
}
} {t1 t2}
do_test check-3.3 {
catchsql {
CREATE TABLE t3(
x, y, z,
CHECK( q<x )
);
}
} {1 {no such column: q}}
do_test check-3.4 {
execsql {
SELECT name FROM sqlite_master ORDER BY name
}
} {t1 t2}
do_test check-3.5 {
catchsql {
CREATE TABLE t3(
x, y, z,
CHECK( t2.x<x )
);
}
} {1 {no such column: t2.x}}
do_test check-3.6 {
execsql {
SELECT name FROM sqlite_master ORDER BY name
}
} {t1 t2}
do_test check-3.7 {
catchsql {
CREATE TABLE t3(
x, y, z,
CHECK( t3.x<25 )
);
}
} {0 {}}
do_test check-3.8 {
execsql {
INSERT INTO t3 VALUES(1,2,3);
SELECT * FROM t3;
}
} {1 2 3}
do_test check-3.9 {
catchsql {
INSERT INTO t3 VALUES(111,222,333);
}
} {1 {CHECK constraint failed: t3}}
do_test check-4.1 {
execsql {
CREATE TABLE t4(x, y,
CHECK (
x+y==11
OR x*y==12
OR x/y BETWEEN 5 AND 8
OR -x==y+10
)
);
}
} {}
do_test check-4.2 {
execsql {
INSERT INTO t4 VALUES(1,10);
SELECT * FROM t4
}
} {1 10}
do_test check-4.3 {
execsql {
UPDATE t4 SET x=4, y=3;
SELECT * FROM t4
}
} {4 3}
do_test check-4.4 {
execsql {
UPDATE t4 SET x=12, y=2;
SELECT * FROM t4
}
} {12 2}
do_test check-4.5 {
execsql {
UPDATE t4 SET x=12, y=-22;
SELECT * FROM t4
}
} {12 -22}
do_test check-4.6 {
catchsql {
UPDATE t4 SET x=0, y=1;
}
} {1 {CHECK constraint failed: t4}}
do_test check-4.7 {
execsql {
SELECT * FROM t4;
}
} {12 -22}
do_test check-4.8 {
execsql {
PRAGMA ignore_check_constraints=ON;
UPDATE t4 SET x=0, y=1;
SELECT * FROM t4;
}
} {0 1}
do_test check-4.9 {
catchsql {
PRAGMA ignore_check_constraints=OFF;
UPDATE t4 SET x=0, y=2;
}
} {1 {CHECK constraint failed: t4}}
ifcapable vacuum {
do_test check_4.10 {
catchsql {
VACUUM
}
} {0 {}}
}
do_test check-5.1 {
catchsql {
CREATE TABLE t5(x, y,
CHECK( x*y<:abc )
);
}
} {1 {parameters prohibited in CHECK constraints}}
do_test check-5.2 {
catchsql {
CREATE TABLE t5(x, y,
CHECK( x*y<? )
);
}
} {1 {parameters prohibited in CHECK constraints}}
ifcapable conflict {
do_test check-6.1 {
execsql {SELECT * FROM t1}
} {4 11.0}
do_test check-6.2 {
execsql {
UPDATE OR IGNORE t1 SET x=5;
SELECT * FROM t1;
}
} {4 11.0}
do_test check-6.3 {
execsql {
INSERT OR IGNORE INTO t1 VALUES(5,4.0);
SELECT * FROM t1;
}
} {4 11.0}
do_test check-6.4 {
execsql {
INSERT OR IGNORE INTO t1 VALUES(2,20.0);
SELECT * FROM t1;
}
} {4 11.0 2 20.0}
do_test check-6.5 {
catchsql {
UPDATE OR FAIL t1 SET x=7-x, y=y+1;
}
} {1 {CHECK constraint failed: t1}}
do_test check-6.6 {
execsql {
SELECT * FROM t1;
}
} {3 12.0 2 20.0}
do_test check-6.7 {
catchsql {
BEGIN;
INSERT INTO t1 VALUES(1,30.0);
INSERT OR ROLLBACK INTO t1 VALUES(8,40.0);
}
} {1 {CHECK constraint failed: t1}}
do_test check-6.8 {
catchsql {
COMMIT;
}
} {1 {cannot commit - no transaction is active}}
do_test check-6.9 {
execsql {
SELECT * FROM t1
}
} {3 12.0 2 20.0}
do_test check-6.11 {
execsql {SELECT * FROM t1}
} {3 12.0 2 20.0}
do_test check-6.12 {
catchsql {
REPLACE INTO t1 VALUES(6,7);
}
} {1 {CHECK constraint failed: t1}}
do_test check-6.13 {
execsql {SELECT * FROM t1}
} {3 12.0 2 20.0}
do_test check-6.14 {
catchsql {
INSERT OR IGNORE INTO t1 VALUES(6,7);
}
} {0 {}}
do_test check-6.15 {
execsql {SELECT * FROM t1}
} {3 12.0 2 20.0}
}
#--------------------------------------------------------------------------
# If a connection opens a database that contains a CHECK constraint that
# uses an unknown UDF, the schema should not be considered malformed.
# Attempting to modify the table should fail (since the CHECK constraint
# cannot be tested).
#
reset_db
proc myfunc {x} {expr $x < 10}
db func myfunc myfunc
do_execsql_test 7.1 { CREATE TABLE t6(a CHECK (myfunc(a))) }
do_execsql_test 7.2 { INSERT INTO t6 VALUES(9) }
do_catchsql_test 7.3 { INSERT INTO t6 VALUES(11) } \
{1 {CHECK constraint failed: t6}}
do_test 7.4 {
sqlite3 db2 test.db
execsql { SELECT * FROM t6 } db2
} {9}
do_test 7.5 {
catchsql { INSERT INTO t6 VALUES(8) } db2
} {1 {unknown function: myfunc()}}
do_test 7.6 {
catchsql { CREATE TABLE t7(a CHECK (myfunc(a))) } db2
} {1 {no such function: myfunc}}
do_test 7.7 {
db2 func myfunc myfunc
execsql { INSERT INTO t6 VALUES(8) } db2
} {}
do_test 7.8 {
db2 func myfunc myfunc
catchsql { INSERT INTO t6 VALUES(12) } db2
} {1 {CHECK constraint failed: t6}}
# 2013-08-02: Silently ignore database name qualifiers in CHECK constraints.
#
do_execsql_test 8.1 {
CREATE TABLE t810(a, CHECK( main.t810.a>0 ));
CREATE TABLE t811(b, CHECK( xyzzy.t811.b BETWEEN 5 AND 10 ));
} {}
# Make sure check constraints involving the ROWID are not ignored
#
do_execsql_test 9.1 {
CREATE TABLE t1(
a INTEGER PRIMARY KEY,
b INTEGER NOT NULL CONSTRAINT 'b-check' CHECK( b>a ),
c INTEGER NOT NULL CONSTRAINT 'c-check' CHECK( c>rowid*2 ),
d INTEGER NOT NULL CONSTRAINT 'd-check' CHECK( d BETWEEN b AND c )
);
INSERT INTO t1(a,b,c,d) VALUES(1,2,4,3),(2,4,6,5),(3,10,30,20);
} {}
do_catchsql_test 9.2 {
UPDATE t1 SET b=0 WHERE a=1;
} {1 {CHECK constraint failed: b-check}}
do_catchsql_test 9.3 {
UPDATE t1 SET c=a*2 WHERE a=1;
} {1 {CHECK constraint failed: c-check}}
finish_test

View File

@ -0,0 +1,78 @@
# 2013 May 14
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
#
# Test some specific circumstances to do with shared cache mode.
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
set ::testprefix close
do_execsql_test 1.0 {
CREATE TABLE t1(x);
INSERT INTO t1 VALUES('one');
INSERT INTO t1 VALUES('two');
INSERT INTO t1 VALUES('three');
}
db close
do_test 1.1 {
set DB [sqlite3_open test.db]
sqlite3_close_v2 $DB
} {SQLITE_OK}
do_test 1.2.1 {
set DB [sqlite3_open test.db]
set STMT [sqlite3_prepare $DB "SELECT * FROM t1" -1 dummy]
sqlite3_close_v2 $DB
} {SQLITE_OK}
do_test 1.2.2 {
sqlite3_finalize $STMT
} {SQLITE_OK}
do_test 1.3.1 {
set DB [sqlite3_open test.db]
set STMT [sqlite3_prepare $DB "SELECT * FROM t1" -1 dummy]
sqlite3_step $STMT
sqlite3_close_v2 $DB
} {SQLITE_OK}
do_test 1.3.2 {
sqlite3_column_text $STMT 0
} {one}
do_test 1.3.3 {
sqlite3_finalize $STMT
} {SQLITE_OK}
do_test 1.4.1 {
set DB [sqlite3_open test.db]
set STMT [sqlite3_prepare $DB "SELECT * FROM t1" -1 dummy]
sqlite3_step $STMT
sqlite3_close_v2 $DB
} {SQLITE_OK}
do_test 1.4.2 {
list [sqlite3_step $STMT] [sqlite3_column_text $STMT 0]
} {SQLITE_ROW two}
do_test 1.4.3 {
list [catch {
sqlite3_prepare $DB "SELECT * FROM sqlite_master" -1 dummy
} msg] $msg
} {1 {(21) library routine called out of sequence}}
do_test 1.4.4 {
sqlite3_finalize $STMT
} {SQLITE_OK}
finish_test

View File

@ -0,0 +1,276 @@
# 2013-04-25
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
#
# Test cases for transitive_closure virtual table.
set testdir [file dirname $argv0]
source $testdir/tester.tcl
set testprefix closure01
ifcapable !vtab||!cte { finish_test ; return }
load_static_extension db closure
do_execsql_test 1.0 {
BEGIN;
CREATE TABLE t1(x INTEGER PRIMARY KEY, y INTEGER);
WITH RECURSIVE
cnt(i) AS (VALUES(1) UNION ALL SELECT i+1 FROM cnt LIMIT 131072)
INSERT INTO t1(x, y) SELECT i, nullif(i,1)/2 FROM cnt;
CREATE INDEX t1y ON t1(y);
COMMIT;
CREATE VIRTUAL TABLE cx
USING transitive_closure(tablename=t1, idcolumn=x, parentcolumn=y);
} {}
# The entire table
do_timed_execsql_test 1.1 {
SELECT count(*), depth FROM cx WHERE root=1 GROUP BY depth ORDER BY 1;
} {/1 0 1 17 2 1 4 2 8 3 16 4 .* 65536 16/}
do_timed_execsql_test 1.1-cte {
WITH RECURSIVE
below(id,depth) AS (
VALUES(1,0)
UNION ALL
SELECT t1.x, below.depth+1
FROM t1 JOIN below on t1.y=below.id
)
SELECT count(*), depth FROM below GROUP BY depth ORDER BY 1;
} {/1 0 1 17 2 1 4 2 8 3 16 4 .* 65536 16/}
# descendents of 32768
do_timed_execsql_test 1.2 {
SELECT * FROM cx WHERE root=32768 ORDER BY id;
} {32768 0 65536 1 65537 1 131072 2}
do_timed_execsql_test 1.2-cte {
WITH RECURSIVE
below(id,depth) AS (
VALUES(32768,0)
UNION ALL
SELECT t1.x, below.depth+1
FROM t1 JOIN below on t1.y=below.id
WHERE below.depth<2
)
SELECT id, depth FROM below ORDER BY id;
} {32768 0 65536 1 65537 1 131072 2}
# descendents of 16384
do_timed_execsql_test 1.3 {
SELECT * FROM cx WHERE root=16384 AND depth<=2 ORDER BY id;
} {16384 0 32768 1 32769 1 65536 2 65537 2 65538 2 65539 2}
do_timed_execsql_test 1.3-cte {
WITH RECURSIVE
below(id,depth) AS (
VALUES(16384,0)
UNION ALL
SELECT t1.x, below.depth+1
FROM t1 JOIN below on t1.y=below.id
WHERE below.depth<2
)
SELECT id, depth FROM below ORDER BY id;
} {16384 0 32768 1 32769 1 65536 2 65537 2 65538 2 65539 2}
# children of 16384
do_execsql_test 1.4 {
SELECT id, depth, root, tablename, idcolumn, parentcolumn FROM cx
WHERE root=16384
AND depth=1
ORDER BY id;
} {32768 1 {} t1 x y 32769 1 {} t1 x y}
# great-grandparent of 16384
do_timed_execsql_test 1.5 {
SELECT id, depth, root, tablename, idcolumn, parentcolumn FROM cx
WHERE root=16384
AND depth=3
AND idcolumn='Y'
AND parentcolumn='X';
} {2048 3 {} t1 Y X}
do_timed_execsql_test 1.5-cte {
WITH RECURSIVE
above(id,depth) AS (
VALUES(16384,0)
UNION ALL
SELECT t1.y, above.depth+1
FROM t1 JOIN above ON t1.x=above.id
WHERE above.depth<3
)
SELECT id FROM above WHERE depth=3;
} {2048}
# depth<5
do_timed_execsql_test 1.6 {
SELECT count(*), depth FROM cx WHERE root=1 AND depth<5
GROUP BY depth ORDER BY 1;
} {1 0 2 1 4 2 8 3 16 4}
do_timed_execsql_test 1.6-cte {
WITH RECURSIVE
below(id,depth) AS (
VALUES(1,0)
UNION ALL
SELECT t1.x, below.depth+1
FROM t1 JOIN below ON t1.y=below.id
WHERE below.depth<4
)
SELECT count(*), depth FROM below GROUP BY depth ORDER BY 1;
} {1 0 2 1 4 2 8 3 16 4}
# depth<=5
do_execsql_test 1.7 {
SELECT count(*), depth FROM cx WHERE root=1 AND depth<=5
GROUP BY depth ORDER BY 1;
} {1 0 2 1 4 2 8 3 16 4 32 5}
# depth==5
do_execsql_test 1.8 {
SELECT count(*), depth FROM cx WHERE root=1 AND depth=5
GROUP BY depth ORDER BY 1;
} {32 5}
# depth BETWEEN 3 AND 5
do_execsql_test 1.9 {
SELECT count(*), depth FROM cx WHERE root=1 AND depth BETWEEN 3 AND 5
GROUP BY depth ORDER BY 1;
} {8 3 16 4 32 5}
# depth==5 with min() and max()
do_timed_execsql_test 1.10 {
SELECT count(*), min(id), max(id) FROM cx WHERE root=1 AND depth=5;
} {32 32 63}
do_timed_execsql_test 1.10-cte {
WITH RECURSIVE
below(id,depth) AS (
VALUES(1,0)
UNION ALL
SELECT t1.x, below.depth+1
FROM t1 JOIN below ON t1.y=below.id
WHERE below.depth<5
)
SELECT count(*), min(id), max(id) FROM below WHERE depth=5;
} {32 32 63}
# Create a much smaller table t2 with only 32 elements
db eval {
CREATE TABLE t2(x INTEGER PRIMARY KEY, y INTEGER);
INSERT INTO t2 SELECT x, y FROM t1 WHERE x<32;
CREATE INDEX t2y ON t2(y);
CREATE VIRTUAL TABLE c2
USING transitive_closure(tablename=t2, idcolumn=x, parentcolumn=y);
}
# t2 full-table
do_execsql_test 2.1 {
SELECT count(*), min(id), max(id) FROM c2 WHERE root=1;
} {31 1 31}
# t2 root=10
do_execsql_test 2.2 {
SELECT id FROM c2 WHERE root=10;
} {10 20 21}
# t2 root=11
do_execsql_test 2.3 {
SELECT id FROM c2 WHERE root=12;
} {12 24 25}
# t2 root IN [10,12]
do_execsql_test 2.4 {
SELECT id FROM c2 WHERE root IN (10,12) ORDER BY id;
} {10 12 20 21 24 25}
# t2 root IN [10,12] (sorted)
do_execsql_test 2.5 {
SELECT id FROM c2 WHERE root IN (10,12) ORDER BY +id;
} {10 12 20 21 24 25}
# t2 c2up from 20
do_execsql_test 3.0 {
CREATE VIRTUAL TABLE c2up USING transitive_closure(
tablename = t2,
idcolumn = y,
parentcolumn = x
);
SELECT id FROM c2up WHERE root=20;
} {1 2 5 10 20}
# cx as c2up
do_execsql_test 3.1 {
SELECT id FROM cx
WHERE root=20
AND tablename='t2'
AND idcolumn='y'
AND parentcolumn='x';
} {1 2 5 10 20}
# t2 first cousins of 20
do_execsql_test 3.2 {
SELECT DISTINCT id FROM c2
WHERE root IN (SELECT id FROM c2up
WHERE root=20 AND depth<=2)
ORDER BY id;
} {5 10 11 20 21 22 23}
# t2 first cousins of 20
do_execsql_test 3.3 {
SELECT id FROM c2
WHERE root=(SELECT id FROM c2up
WHERE root=20 AND depth=2)
AND depth=2
EXCEPT
SELECT id FROM c2
WHERE root=(SELECT id FROM c2up
WHERE root=20 AND depth=1)
AND depth<=1
ORDER BY id;
} {22 23}
# missing tablename.
do_test 4.1 {
catchsql {
SELECT id FROM cx
WHERE root=20
AND tablename='t3'
AND idcolumn='y'
AND parentcolumn='x';
}
} {1 {no such table: t3}}
# missing idcolumn
do_test 4.2 {
catchsql {
SELECT id FROM cx
WHERE root=20
AND tablename='t2'
AND idcolumn='xyz'
AND parentcolumn='x';
}
} {1 {no such column: t2.xyz}}
# missing parentcolumn
do_test 4.3 {
catchsql {
SELECT id FROM cx
WHERE root=20
AND tablename='t2'
AND idcolumn='x'
AND parentcolumn='pqr';
}
} {1 {no such column: t2.pqr}}
# generic closure
do_execsql_test 5.1 {
CREATE VIRTUAL TABLE temp.closure USING transitive_closure;
SELECT id FROM closure
WHERE root=1
AND depth=3
AND tablename='t1'
AND idcolumn='x'
AND parentcolumn='y'
ORDER BY id;
} {8 9 10 11 12 13 14 15}
finish_test

View File

@ -0,0 +1,84 @@
# 2009 November 10
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# Additional test cases for the COALESCE() and IFNULL() functions.
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
do_test coalesce-1.0 {
db eval {
CREATE TABLE t1(a INTEGER PRIMARY KEY, b, c, d);
INSERT INTO t1 VALUES(1, null, null, null);
INSERT INTO t1 VALUES(2, 2, 99, 99);
INSERT INTO t1 VALUES(3, null, 3, 99);
INSERT INTO t1 VALUES(4, null, null, 4);
INSERT INTO t1 VALUES(5, null, null, null);
INSERT INTO t1 VALUES(6, 22, 99, 99);
INSERT INTO t1 VALUES(7, null, 33, 99);
INSERT INTO t1 VALUES(8, null, null, 44);
SELECT coalesce(b,c,d) FROM t1 ORDER BY a;
}
} {{} 2 3 4 {} 22 33 44}
do_test coalesce-1.1 {
db eval {
SELECT coalesce(d+c+b,d+c,d) FROM t1 ORDER BY a;
}
} {{} 200 102 4 {} 220 132 44}
do_test coalesce-1.2 {
db eval {
SELECT ifnull(d+c+b,ifnull(d+c,d)) FROM t1 ORDER BY a;
}
} {{} 200 102 4 {} 220 132 44}
do_test coalesce-1.3 {
db eval {
SELECT ifnull(ifnull(d+c+b,d+c),d) FROM t1 ORDER BY a;
}
} {{} 200 102 4 {} 220 132 44}
do_test coalesce-1.4 {
db eval {
SELECT ifnull(ifnull(b,c),d) FROM t1 ORDER BY a;
}
} {{} 2 3 4 {} 22 33 44}
do_test coalesce-1.5 {
db eval {
SELECT ifnull(b,ifnull(c,d)) FROM t1 ORDER BY a;
}
} {{} 2 3 4 {} 22 33 44}
do_test coalesce-1.6 {
db eval {
SELECT coalesce(b,NOT b,-b,abs(b),lower(b),length(b),min(b,5),b*123,c)
FROM t1 ORDER BY a;
}
} {{} 2 3 {} {} 22 33 {}}
do_test coalesce-1.7 {
db eval {
SELECT ifnull(nullif(a,4),99)
FROM t1 ORDER BY a;
}
} {1 2 3 99 5 6 7 8}
do_test coalesce-1.8 {
db eval {
pragma vdbe_listing=on;
SELECT coalesce(
CASE WHEN b=2 THEN 123 END,
CASE WHEN b=3 THEN 234 END,
CASE WHEN c=3 THEN 345 WHEN c=33 THEN 456 END,
d
)
FROM t1 ORDER BY a;
}
} {{} 123 345 4 {} 99 456 44}
finish_test

View File

@ -0,0 +1,404 @@
#
# 2001 September 15
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library. The
# focus of this script is testing collation sequences.
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
set testprefix collate1
#
# Tests are roughly organised as follows:
#
# collate1-1.* - Single-field ORDER BY with an explicit COLLATE clause.
# collate1-2.* - Multi-field ORDER BY with an explicit COLLATE clause.
# collate1-3.* - ORDER BY using a default collation type. Also that an
# explict collate type overrides a default collate type.
# collate1-4.* - ORDER BY using a data type.
#
#
# Collation type 'HEX'. If an argument can be interpreted as a hexadecimal
# number, then it is converted to one before the comparison is performed.
# Numbers are less than other strings. If neither argument is a number,
# [string compare] is used.
#
db collate HEX hex_collate
proc hex_collate {lhs rhs} {
set lhs_ishex [regexp {^(0x|)[1234567890abcdefABCDEF]+$} $lhs]
set rhs_ishex [regexp {^(0x|)[1234567890abcdefABCDEF]+$} $rhs]
if {$lhs_ishex && $rhs_ishex} {
set lhsx [scan $lhs %x]
set rhsx [scan $rhs %x]
if {$lhs < $rhs} {return -1}
if {$lhs == $rhs} {return 0}
if {$lhs > $rhs} {return 1}
}
if {$lhs_ishex} {
return -1;
}
if {$rhs_ishex} {
return 1;
}
return [string compare $lhs $rhs]
}
db function hex {format 0x%X}
# Mimic the SQLite 2 collation type NUMERIC.
db collate numeric numeric_collate
proc numeric_collate {lhs rhs} {
if {$lhs == $rhs} {return 0}
return [expr ($lhs>$rhs)?1:-1]
}
do_test collate1-1.0 {
execsql {
CREATE TABLE collate1t1(c1, c2);
INSERT INTO collate1t1 VALUES(45, hex(45));
INSERT INTO collate1t1 VALUES(NULL, NULL);
INSERT INTO collate1t1 VALUES(281, hex(281));
}
} {}
do_test collate1-1.1 {
execsql {
SELECT c2 FROM collate1t1 ORDER BY 1;
}
} {{} 0x119 0x2D}
do_test collate1-1.2 {
execsql {
SELECT c2 FROM collate1t1 ORDER BY 1 COLLATE hex;
}
} {{} 0x2D 0x119}
do_test collate1-1.3 {
execsql {
SELECT c2 FROM collate1t1 ORDER BY 1 COLLATE hex DESC;
}
} {0x119 0x2D {}}
do_test collate1-1.4 {
execsql {
SELECT c2 FROM collate1t1 ORDER BY 1 COLLATE hex ASC;
}
} {{} 0x2D 0x119}
do_test collate1-1.5 {
execsql {
SELECT c2 COLLATE hex FROM collate1t1 ORDER BY 1
}
} {{} 0x2D 0x119}
do_test collate1-1.6 {
execsql {
SELECT c2 COLLATE hex FROM collate1t1 ORDER BY 1 ASC
}
} {{} 0x2D 0x119}
do_test collate1-1.7 {
execsql {
SELECT c2 COLLATE hex FROM collate1t1 ORDER BY 1 DESC
}
} {0x119 0x2D {}}
do_test collate1-1.99 {
execsql {
DROP TABLE collate1t1;
}
} {}
do_test collate1-2.0 {
execsql {
CREATE TABLE collate1t1(c1, c2);
INSERT INTO collate1t1 VALUES('5', '0x11');
INSERT INTO collate1t1 VALUES('5', '0xA');
INSERT INTO collate1t1 VALUES(NULL, NULL);
INSERT INTO collate1t1 VALUES('7', '0xA');
INSERT INTO collate1t1 VALUES('11', '0x11');
INSERT INTO collate1t1 VALUES('11', '0x101');
}
} {}
do_test collate1-2.2 {
execsql {
SELECT c1, c2 FROM collate1t1 ORDER BY 1 COLLATE numeric, 2 COLLATE hex;
}
} {{} {} 5 0xA 5 0x11 7 0xA 11 0x11 11 0x101}
do_test collate1-2.3 {
execsql {
SELECT c1, c2 FROM collate1t1 ORDER BY 1 COLLATE binary, 2 COLLATE hex;
}
} {{} {} 11 0x11 11 0x101 5 0xA 5 0x11 7 0xA}
do_test collate1-2.4 {
execsql {
SELECT c1, c2 FROM collate1t1 ORDER BY 1 COLLATE binary DESC, 2 COLLATE hex;
}
} {7 0xA 5 0xA 5 0x11 11 0x11 11 0x101 {} {}}
do_test collate1-2.5 {
execsql {
SELECT c1, c2 FROM collate1t1
ORDER BY 1 COLLATE binary DESC, 2 COLLATE hex DESC;
}
} {7 0xA 5 0x11 5 0xA 11 0x101 11 0x11 {} {}}
do_test collate1-2.6 {
execsql {
SELECT c1, c2 FROM collate1t1
ORDER BY 1 COLLATE binary ASC, 2 COLLATE hex ASC;
}
} {{} {} 11 0x11 11 0x101 5 0xA 5 0x11 7 0xA}
do_test collate1-2.12.1 {
execsql {
SELECT c1 COLLATE numeric, c2 FROM collate1t1
ORDER BY 1, 2 COLLATE hex;
}
} {{} {} 5 0xA 5 0x11 7 0xA 11 0x11 11 0x101}
do_test collate1-2.12.2 {
execsql {
SELECT c1 COLLATE hex, c2 FROM collate1t1
ORDER BY 1 COLLATE numeric, 2 COLLATE hex;
}
} {{} {} 5 0xA 5 0x11 7 0xA 11 0x11 11 0x101}
do_test collate1-2.12.3 {
execsql {
SELECT c1, c2 COLLATE hex FROM collate1t1
ORDER BY 1 COLLATE numeric, 2;
}
} {{} {} 5 0xA 5 0x11 7 0xA 11 0x11 11 0x101}
do_test collate1-2.12.4 {
execsql {
SELECT c1 COLLATE numeric, c2 COLLATE hex
FROM collate1t1
ORDER BY 1, 2;
}
} {{} {} 5 0xA 5 0x11 7 0xA 11 0x11 11 0x101}
do_test collate1-2.13 {
execsql {
SELECT c1 COLLATE binary, c2 COLLATE hex
FROM collate1t1
ORDER BY 1, 2;
}
} {{} {} 11 0x11 11 0x101 5 0xA 5 0x11 7 0xA}
do_test collate1-2.14 {
execsql {
SELECT c1, c2
FROM collate1t1 ORDER BY 1 COLLATE binary DESC, 2 COLLATE hex;
}
} {7 0xA 5 0xA 5 0x11 11 0x11 11 0x101 {} {}}
do_test collate1-2.15 {
execsql {
SELECT c1 COLLATE binary, c2 COLLATE hex
FROM collate1t1
ORDER BY 1 DESC, 2 DESC;
}
} {7 0xA 5 0x11 5 0xA 11 0x101 11 0x11 {} {}}
do_test collate1-2.16 {
execsql {
SELECT c1 COLLATE hex, c2 COLLATE binary
FROM collate1t1
ORDER BY 1 COLLATE binary ASC, 2 COLLATE hex ASC;
}
} {{} {} 11 0x11 11 0x101 5 0xA 5 0x11 7 0xA}
do_test collate1-2.99 {
execsql {
DROP TABLE collate1t1;
}
} {}
#
# These tests ensure that the default collation type for a column is used
# by an ORDER BY clause correctly. The focus is all the different ways
# the column can be referenced. i.e. a, collate2t1.a, main.collate2t1.a etc.
#
do_test collate1-3.0 {
execsql {
CREATE TABLE collate1t1(a COLLATE hex, b);
INSERT INTO collate1t1 VALUES( '0x5', 5 );
INSERT INTO collate1t1 VALUES( '1', 1 );
INSERT INTO collate1t1 VALUES( '0x45', 69 );
INSERT INTO collate1t1 VALUES( NULL, NULL );
SELECT * FROM collate1t1 ORDER BY a;
}
} {{} {} 1 1 0x5 5 0x45 69}
do_test collate1-3.1 {
execsql {
SELECT * FROM collate1t1 ORDER BY 1;
}
} {{} {} 1 1 0x5 5 0x45 69}
do_test collate1-3.2 {
execsql {
SELECT * FROM collate1t1 ORDER BY collate1t1.a;
}
} {{} {} 1 1 0x5 5 0x45 69}
do_test collate1-3.3 {
execsql {
SELECT * FROM collate1t1 ORDER BY main.collate1t1.a;
}
} {{} {} 1 1 0x5 5 0x45 69}
do_test collate1-3.4 {
execsql {
SELECT a as c1, b as c2 FROM collate1t1 ORDER BY c1;
}
} {{} {} 1 1 0x5 5 0x45 69}
do_test collate1-3.5 {
execsql {
SELECT a as c1, b as c2 FROM collate1t1 ORDER BY c1 COLLATE binary;
}
} {{} {} 0x45 69 0x5 5 1 1}
do_test collate1-3.5.1 {
execsql {
SELECT a COLLATE binary as c1, b as c2
FROM collate1t1 ORDER BY c1;
}
} {{} {} 0x45 69 0x5 5 1 1}
do_test collate1-3.6 {
execsql {
DROP TABLE collate1t1;
}
} {}
# Update for SQLite version 3. The collate1-4.* test cases were written
# before manifest types were introduced. The following test cases still
# work, due to the 'affinity' mechanism, but they don't prove anything
# about collation sequences.
#
do_test collate1-4.0 {
execsql {
CREATE TABLE collate1t1(c1 numeric, c2 text);
INSERT INTO collate1t1 VALUES(1, 1);
INSERT INTO collate1t1 VALUES(12, 12);
INSERT INTO collate1t1 VALUES(NULL, NULL);
INSERT INTO collate1t1 VALUES(101, 101);
}
} {}
do_test collate1-4.1 {
execsql {
SELECT c1 FROM collate1t1 ORDER BY 1;
}
} {{} 1 12 101}
do_test collate1-4.2 {
execsql {
SELECT c2 FROM collate1t1 ORDER BY 1;
}
} {{} 1 101 12}
do_test collate1-4.3 {
execsql {
SELECT c2+0 FROM collate1t1 ORDER BY 1;
}
} {{} 1 12 101}
do_test collate1-4.4 {
execsql {
SELECT c1||'' FROM collate1t1 ORDER BY 1;
}
} {{} 1 101 12}
do_test collate1-4.4.1 {
execsql {
SELECT (c1||'') COLLATE numeric FROM collate1t1 ORDER BY 1;
}
} {{} 1 12 101}
do_test collate1-4.5 {
execsql {
DROP TABLE collate1t1;
}
} {}
# A problem reported on the mailing list: A CREATE TABLE statement
# is allowed to have two or more COLLATE clauses on the same column.
# That probably ought to be an error, but we allow it for backwards
# compatibility. Just make sure it works and doesn't leak memory.
#
do_test collate1-5.1 {
execsql {
CREATE TABLE c5(
id INTEGER PRIMARY KEY,
a TEXT COLLATE binary COLLATE nocase COLLATE rtrim,
b TEXT COLLATE nocase COLLATE binary,
c TEXT COLLATE rtrim COLLATE binary COLLATE rtrim COLLATE nocase
);
INSERT INTO c5 VALUES(1, 'abc','abc','abc');
INSERT INTO c5 VALUES(2, 'abc ','ABC','ABC');
SELECT id FROM c5 WHERE a='abc' ORDER BY id;
}
} {1 2}
do_test collate1-5.2 {
execsql {
SELECT id FROM c5 WHERE b='abc' ORDER BY id;
}
} {1}
do_test collate1-5.3 {
execsql {
SELECT id FROM c5 WHERE c='abc' ORDER BY id;
}
} {1 2}
#-------------------------------------------------------------------------
# Fix problems with handling collation sequences named '"""'.
#
do_execsql_test 6.1 {
SELECT """""""";
} {\"\"\"}
do_catchsql_test 6.2 {
CREATE TABLE x1(a);
SELECT a FROM x1 ORDER BY a COLLATE """""""";
} {1 {no such collation sequence: """}}
do_catchsql_test 6.3 {
SELECT a FROM x1 ORDER BY 1 COLLATE """""""";
} {1 {no such collation sequence: """}}
do_catchsql_test 6.4 {
SELECT 0 UNION SELECT 0 ORDER BY 1 COLLATE """""""";
} {1 {no such collation sequence: """}}
db collate {"""} [list string compare -nocase]
do_execsql_test 6.5 {
PRAGMA foreign_keys = ON;
CREATE TABLE p1(a PRIMARY KEY COLLATE '"""');
CREATE TABLE c1(x, y REFERENCES p1);
} {}
do_execsql_test 6.6 {
INSERT INTO p1 VALUES('abc');
INSERT INTO c1 VALUES(1, 'ABC');
}
ifcapable foreignkey {
do_catchsql_test 6.7 {
DELETE FROM p1 WHERE rowid = 1
} {1 {FOREIGN KEY constraint failed}}
}
do_execsql_test 6.8 {
INSERT INTO p1 VALUES('abb');
INSERT INTO p1 VALUES('wxz');
INSERT INTO p1 VALUES('wxy');
INSERT INTO c1 VALUES(2, 'abb');
INSERT INTO c1 VALUES(3, 'wxz');
INSERT INTO c1 VALUES(4, 'WXY');
SELECT x, y FROM c1 ORDER BY y COLLATE """""""";
} {2 abb 1 ABC 4 WXY 3 wxz}
# 2015-04-15: Nested COLLATE operators
#
do_execsql_test 7.0 {
SELECT 'abc' UNION ALL SELECT 'DEF'
ORDER BY 1 COLLATE nocase COLLATE nocase COLLATE nocase COLLATE nocase;
} {abc DEF}
do_execsql_test 7.1 {
SELECT 'abc' UNION ALL SELECT 'DEF'
ORDER BY 1 COLLATE nocase COLLATE nocase COLLATE nocase COLLATE binary;
} {DEF abc}
do_execsql_test 7.2 {
SELECT 'abc' UNION ALL SELECT 'DEF'
ORDER BY 1 COLLATE binary COLLATE binary COLLATE binary COLLATE nocase;
} {abc DEF}
finish_test

View File

@ -0,0 +1,724 @@
#
# 2001 September 15
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library. The
# focus of this script is page cache subsystem.
#
# $Id: collate2.test,v 1.6 2008/08/20 16:35:10 drh Exp $
set testdir [file dirname $argv0]
source $testdir/tester.tcl
set ::testprefix collate2
#
# Tests are organised as follows:
#
# collate2-1.* WHERE <expr> expressions (sqliteExprIfTrue).
# collate2-2.* WHERE NOT <expr> expressions (sqliteExprIfFalse).
# collate2-3.* SELECT <expr> expressions (sqliteExprCode).
# collate2-4.* Precedence of collation/data types in binary comparisons
# collate2-5.* JOIN syntax.
#
# Create a collation type BACKWARDS for use in testing. This collation type
# is similar to the built-in TEXT collation type except the order of
# characters in each string is reversed before the comparison is performed.
db collate BACKWARDS backwards_collate
proc backwards_collate {a b} {
set ra {};
set rb {}
foreach c [split $a {}] { set ra $c$ra }
foreach c [split $b {}] { set rb $c$rb }
return [string compare $ra $rb]
}
# The following values are used in these tests:
# NULL aa ab ba bb aA aB bA bB Aa Ab Ba Bb AA AB BA BB
#
# The collation orders for each of the tested collation types are:
#
# BINARY: NULL AA AB Aa Ab BA BB Ba Bb aA aB aa ab bA bB ba bb
# NOCASE: NULL aa aA Aa AA ab aB Ab AB ba bA Ba BA bb bB Bb BB
# BACKWARDS: NULL AA BA aA bA AB BB aB bB Aa Ba aa ba Ab Bb ab bb
#
# These tests verify that the default collation type for a column is used
# for comparison operators (<, >, <=, >=, =) involving that column and
# an expression that is not a column with a default collation type.
#
# The collation sequences BINARY and NOCASE are built-in, the BACKWARDS
# collation sequence is implemented by the TCL proc backwards_collate
# above.
#
do_test collate2-1.0 {
execsql {
CREATE TABLE collate2t1(
a COLLATE BINARY,
b COLLATE NOCASE,
c COLLATE BACKWARDS
);
INSERT INTO collate2t1 VALUES( NULL, NULL, NULL );
INSERT INTO collate2t1 VALUES( 'aa', 'aa', 'aa' );
INSERT INTO collate2t1 VALUES( 'ab', 'ab', 'ab' );
INSERT INTO collate2t1 VALUES( 'ba', 'ba', 'ba' );
INSERT INTO collate2t1 VALUES( 'bb', 'bb', 'bb' );
INSERT INTO collate2t1 VALUES( 'aA', 'aA', 'aA' );
INSERT INTO collate2t1 VALUES( 'aB', 'aB', 'aB' );
INSERT INTO collate2t1 VALUES( 'bA', 'bA', 'bA' );
INSERT INTO collate2t1 VALUES( 'bB', 'bB', 'bB' );
INSERT INTO collate2t1 VALUES( 'Aa', 'Aa', 'Aa' );
INSERT INTO collate2t1 VALUES( 'Ab', 'Ab', 'Ab' );
INSERT INTO collate2t1 VALUES( 'Ba', 'Ba', 'Ba' );
INSERT INTO collate2t1 VALUES( 'Bb', 'Bb', 'Bb' );
INSERT INTO collate2t1 VALUES( 'AA', 'AA', 'AA' );
INSERT INTO collate2t1 VALUES( 'AB', 'AB', 'AB' );
INSERT INTO collate2t1 VALUES( 'BA', 'BA', 'BA' );
INSERT INTO collate2t1 VALUES( 'BB', 'BB', 'BB' );
}
if {[info exists collate_test_use_index]} {
execsql {
CREATE INDEX collate2t1_i1 ON collate2t1(a);
CREATE INDEX collate2t1_i2 ON collate2t1(b);
CREATE INDEX collate2t1_i3 ON collate2t1(c);
}
}
} {}
do_test collate2-1.1 {
execsql {
SELECT a FROM collate2t1 WHERE a > 'aa' ORDER BY 1;
}
} {ab bA bB ba bb}
do_test collate2-1.1.1 {
execsql {
SELECT a FROM collate2t1 WHERE a COLLATE binary > 'aa' ORDER BY 1;
}
} {ab bA bB ba bb}
do_test collate2-1.1.2 {
execsql {
SELECT a FROM collate2t1 WHERE b COLLATE binary > 'aa' ORDER BY 1;
}
} {ab bA bB ba bb}
do_test collate2-1.1.3 {
execsql {
SELECT a FROM collate2t1 WHERE c COLLATE binary > 'aa' ORDER BY 1;
}
} {ab bA bB ba bb}
do_test collate2-1.2 {
execsql {
SELECT b FROM collate2t1 WHERE b > 'aa' ORDER BY 1, oid;
}
} {ab aB Ab AB ba bA Ba BA bb bB Bb BB}
do_test collate2-1.2.1 {
execsql {
SELECT b FROM collate2t1 WHERE a COLLATE nocase > 'aa'
ORDER BY 1, oid;
}
} {ab aB Ab AB ba bA Ba BA bb bB Bb BB}
do_test collate2-1.2.2 {
execsql {
SELECT b FROM collate2t1 WHERE b COLLATE nocase > 'aa'
ORDER BY 1, oid;
}
} {ab aB Ab AB ba bA Ba BA bb bB Bb BB}
do_test collate2-1.2.3 {
execsql {
SELECT b FROM collate2t1 WHERE c COLLATE nocase > 'aa'
ORDER BY 1, oid;
}
} {ab aB Ab AB ba bA Ba BA bb bB Bb BB}
do_test collate2-1.2.4 {
execsql {
SELECT b FROM collate2t1 WHERE b > 'aa' ORDER BY +b;
}
} {ab aB Ab AB ba bA Ba BA bb bB Bb BB}
do_test collate2-1.2.5 {
execsql {
SELECT b FROM collate2t1 WHERE a COLLATE nocase > 'aa' ORDER BY +b;
}
} {ab aB Ab AB ba bA Ba BA bb bB Bb BB}
do_test collate2-1.2.6 {
execsql {
SELECT b FROM collate2t1 WHERE b COLLATE nocase > 'aa' ORDER BY +b;
}
} {ab aB Ab AB ba bA Ba BA bb bB Bb BB}
do_test collate2-1.2.7 {
execsql {
SELECT b FROM collate2t1 WHERE c COLLATE nocase > 'aa' ORDER BY +b;
}
} {ab aB Ab AB ba bA Ba BA bb bB Bb BB}
do_test collate2-1.3 {
execsql {
SELECT c FROM collate2t1 WHERE c > 'aa' ORDER BY 1;
}
} {ba Ab Bb ab bb}
do_test collate2-1.3.1 {
execsql {
SELECT c FROM collate2t1 WHERE a COLLATE backwards > 'aa'
ORDER BY 1;
}
} {ba Ab Bb ab bb}
do_test collate2-1.3.2 {
execsql {
SELECT c FROM collate2t1 WHERE b COLLATE backwards > 'aa'
ORDER BY 1;
}
} {ba Ab Bb ab bb}
do_test collate2-1.3.3 {
execsql {
SELECT c FROM collate2t1 WHERE c COLLATE backwards > 'aa'
ORDER BY 1;
}
} {ba Ab Bb ab bb}
do_test collate2-1.4 {
execsql {
SELECT a FROM collate2t1 WHERE a < 'aa' ORDER BY 1;
}
} {AA AB Aa Ab BA BB Ba Bb aA aB}
do_test collate2-1.5 {
execsql {
SELECT b FROM collate2t1 WHERE b < 'aa' ORDER BY 1, oid;
}
} {}
do_test collate2-1.5.1 {
execsql {
SELECT b FROM collate2t1 WHERE b < 'aa' ORDER BY +b;
}
} {}
do_test collate2-1.6 {
execsql {
SELECT c FROM collate2t1 WHERE c < 'aa' ORDER BY 1;
}
} {AA BA aA bA AB BB aB bB Aa Ba}
do_test collate2-1.7 {
execsql {
SELECT a FROM collate2t1 WHERE a = 'aa';
}
} {aa}
do_test collate2-1.8 {
execsql {
SELECT b FROM collate2t1 WHERE b = 'aa' ORDER BY oid;
}
} {aa aA Aa AA}
do_test collate2-1.9 {
execsql {
SELECT c FROM collate2t1 WHERE c = 'aa';
}
} {aa}
do_test collate2-1.10 {
execsql {
SELECT a FROM collate2t1 WHERE a >= 'aa' ORDER BY 1;
}
} {aa ab bA bB ba bb}
do_test collate2-1.11 {
execsql {
SELECT b FROM collate2t1 WHERE b >= 'aa' ORDER BY 1, oid;
}
} {aa aA Aa AA ab aB Ab AB ba bA Ba BA bb bB Bb BB}
do_test collate2-1.12 {
execsql {
SELECT c FROM collate2t1 WHERE c >= 'aa' ORDER BY 1;
}
} {aa ba Ab Bb ab bb}
do_test collate2-1.13 {
execsql {
SELECT a FROM collate2t1 WHERE a <= 'aa' ORDER BY 1;
}
} {AA AB Aa Ab BA BB Ba Bb aA aB aa}
do_test collate2-1.14 {
execsql {
SELECT b FROM collate2t1 WHERE b <= 'aa' ORDER BY 1, oid;
}
} {aa aA Aa AA}
do_test collate2-1.15 {
execsql {
SELECT c FROM collate2t1 WHERE c <= 'aa' ORDER BY 1;
}
} {AA BA aA bA AB BB aB bB Aa Ba aa}
do_test collate2-1.16 {
execsql {
SELECT a FROM collate2t1 WHERE a BETWEEN 'Aa' AND 'Bb' ORDER BY 1;
}
} {Aa Ab BA BB Ba Bb}
do_test collate2-1.17 {
execsql {
SELECT b FROM collate2t1 WHERE b BETWEEN 'Aa' AND 'Bb' ORDER BY 1, oid;
}
} {aa aA Aa AA ab aB Ab AB ba bA Ba BA bb bB Bb BB}
do_test collate2-1.17.1 {
execsql {
SELECT b FROM collate2t1 WHERE b BETWEEN 'Aa' AND 'Bb' ORDER BY +b;
}
} {aa aA Aa AA ab aB Ab AB ba bA Ba BA bb bB Bb BB}
do_test collate2-1.18 {
execsql {
SELECT c FROM collate2t1 WHERE c BETWEEN 'Aa' AND 'Bb' ORDER BY 1;
}
} {Aa Ba aa ba Ab Bb}
do_test collate2-1.19 {
execsql {
SELECT a FROM collate2t1 WHERE
CASE a WHEN 'aa' THEN 1 ELSE 0 END
ORDER BY 1, oid;
}
} {aa}
do_test collate2-1.20 {
execsql {
SELECT b FROM collate2t1 WHERE
CASE b WHEN 'aa' THEN 1 ELSE 0 END
ORDER BY 1, oid;
}
} {aa aA Aa AA}
do_test collate2-1.21 {
execsql {
SELECT c FROM collate2t1 WHERE
CASE c WHEN 'aa' THEN 1 ELSE 0 END
ORDER BY 1, oid;
}
} {aa}
ifcapable subquery {
do_test collate2-1.22 {
execsql {
SELECT a FROM collate2t1 WHERE a IN ('aa', 'bb') ORDER BY 1, oid;
}
} {aa bb}
do_test collate2-1.23 {
execsql {
SELECT b FROM collate2t1 WHERE b IN ('aa', 'bb') ORDER BY 1, oid;
}
} {aa aA Aa AA bb bB Bb BB}
do_test collate2-1.24 {
execsql {
SELECT c FROM collate2t1 WHERE c IN ('aa', 'bb') ORDER BY 1, oid;
}
} {aa bb}
do_test collate2-1.25 {
execsql {
SELECT a FROM collate2t1
WHERE a IN (SELECT a FROM collate2t1 WHERE a IN ('aa', 'bb'));
}
} {aa bb}
do_test collate2-1.26 {
execsql {
SELECT b FROM collate2t1
WHERE b IN (SELECT a FROM collate2t1 WHERE a IN ('aa', 'bb'));
}
} {aa bb aA bB Aa Bb AA BB}
do_test collate2-1.27 {
execsql {
SELECT c FROM collate2t1
WHERE c IN (SELECT a FROM collate2t1 WHERE a IN ('aa', 'bb'));
}
} {aa bb}
} ;# ifcapable subquery
do_test collate2-2.1 {
execsql {
SELECT a FROM collate2t1 WHERE NOT a > 'aa' ORDER BY 1;
}
} {AA AB Aa Ab BA BB Ba Bb aA aB aa}
do_test collate2-2.2 {
execsql {
SELECT b FROM collate2t1 WHERE NOT b > 'aa' ORDER BY 1, oid;
}
} {aa aA Aa AA}
do_test collate2-2.3 {
execsql {
SELECT c FROM collate2t1 WHERE NOT c > 'aa' ORDER BY 1;
}
} {AA BA aA bA AB BB aB bB Aa Ba aa}
do_test collate2-2.4 {
execsql {
SELECT a FROM collate2t1 WHERE NOT a < 'aa' ORDER BY 1;
}
} {aa ab bA bB ba bb}
do_test collate2-2.5 {
execsql {
SELECT b FROM collate2t1 WHERE NOT b < 'aa' ORDER BY 1, oid;
}
} {aa aA Aa AA ab aB Ab AB ba bA Ba BA bb bB Bb BB}
do_test collate2-2.6 {
execsql {
SELECT c FROM collate2t1 WHERE NOT c < 'aa' ORDER BY 1;
}
} {aa ba Ab Bb ab bb}
do_test collate2-2.7 {
execsql {
SELECT a FROM collate2t1 WHERE NOT a = 'aa';
}
} {ab ba bb aA aB bA bB Aa Ab Ba Bb AA AB BA BB}
do_test collate2-2.8 {
execsql {
SELECT b FROM collate2t1 WHERE NOT b = 'aa';
}
} {ab ba bb aB bA bB Ab Ba Bb AB BA BB}
do_test collate2-2.9 {
execsql {
SELECT c FROM collate2t1 WHERE NOT c = 'aa';
}
} {ab ba bb aA aB bA bB Aa Ab Ba Bb AA AB BA BB}
do_test collate2-2.10 {
execsql {
SELECT a FROM collate2t1 WHERE NOT a >= 'aa' ORDER BY 1;
}
} {AA AB Aa Ab BA BB Ba Bb aA aB}
do_test collate2-2.11 {
execsql {
SELECT b FROM collate2t1 WHERE NOT b >= 'aa' ORDER BY 1, oid;
}
} {}
do_test collate2-2.12 {
execsql {
SELECT c FROM collate2t1 WHERE NOT c >= 'aa' ORDER BY 1;
}
} {AA BA aA bA AB BB aB bB Aa Ba}
do_test collate2-2.13 {
execsql {
SELECT a FROM collate2t1 WHERE NOT a <= 'aa' ORDER BY 1;
}
} {ab bA bB ba bb}
do_test collate2-2.14 {
execsql {
SELECT b FROM collate2t1 WHERE NOT b <= 'aa' ORDER BY 1, oid;
}
} {ab aB Ab AB ba bA Ba BA bb bB Bb BB}
do_test collate2-2.15 {
execsql {
SELECT c FROM collate2t1 WHERE NOT c <= 'aa' ORDER BY 1;
}
} {ba Ab Bb ab bb}
do_test collate2-2.16 {
execsql {
SELECT a FROM collate2t1 WHERE a NOT BETWEEN 'Aa' AND 'Bb' ORDER BY 1;
}
} {AA AB aA aB aa ab bA bB ba bb}
do_test collate2-2.17 {
execsql {
SELECT b FROM collate2t1 WHERE b NOT BETWEEN 'Aa' AND 'Bb' ORDER BY 1, oid;
}
} {}
do_test collate2-2.18 {
execsql {
SELECT c FROM collate2t1 WHERE c NOT BETWEEN 'Aa' AND 'Bb' ORDER BY 1;
}
} {AA BA aA bA AB BB aB bB ab bb}
do_test collate2-2.19 {
execsql {
SELECT a FROM collate2t1 WHERE NOT CASE a WHEN 'aa' THEN 1 ELSE 0 END;
}
} {{} ab ba bb aA aB bA bB Aa Ab Ba Bb AA AB BA BB}
do_test collate2-2.20 {
execsql {
SELECT b FROM collate2t1 WHERE NOT CASE b WHEN 'aa' THEN 1 ELSE 0 END;
}
} {{} ab ba bb aB bA bB Ab Ba Bb AB BA BB}
do_test collate2-2.21 {
execsql {
SELECT c FROM collate2t1 WHERE NOT CASE c WHEN 'aa' THEN 1 ELSE 0 END;
}
} {{} ab ba bb aA aB bA bB Aa Ab Ba Bb AA AB BA BB}
ifcapable subquery {
do_test collate2-2.22 {
execsql {
SELECT a FROM collate2t1 WHERE NOT a IN ('aa', 'bb');
}
} {ab ba aA aB bA bB Aa Ab Ba Bb AA AB BA BB}
do_test collate2-2.23 {
execsql {
SELECT b FROM collate2t1 WHERE NOT b IN ('aa', 'bb');
}
} {ab ba aB bA Ab Ba AB BA}
do_test collate2-2.24 {
execsql {
SELECT c FROM collate2t1 WHERE NOT c IN ('aa', 'bb');
}
} {ab ba aA aB bA bB Aa Ab Ba Bb AA AB BA BB}
do_test collate2-2.25 {
execsql {
SELECT a FROM collate2t1
WHERE NOT a IN (SELECT a FROM collate2t1 WHERE a IN ('aa', 'bb'));
}
} {ab ba aA aB bA bB Aa Ab Ba Bb AA AB BA BB}
do_test collate2-2.26 {
execsql {
SELECT b FROM collate2t1
WHERE NOT b IN (SELECT a FROM collate2t1 WHERE a IN ('aa', 'bb'));
}
} {ab ba aB bA Ab Ba AB BA}
do_test collate2-2.27 {
execsql {
SELECT c FROM collate2t1
WHERE NOT c IN (SELECT a FROM collate2t1 WHERE a IN ('aa', 'bb'));
}
} {ab ba aA aB bA bB Aa Ab Ba Bb AA AB BA BB}
}
do_test collate2-3.1 {
execsql {
SELECT a > 'aa' FROM collate2t1;
}
} {{} 0 1 1 1 0 0 1 1 0 0 0 0 0 0 0 0}
do_test collate2-3.2 {
execsql {
SELECT b > 'aa' FROM collate2t1;
}
} {{} 0 1 1 1 0 1 1 1 0 1 1 1 0 1 1 1}
do_test collate2-3.3 {
execsql {
SELECT c > 'aa' FROM collate2t1;
}
} {{} 0 1 1 1 0 0 0 0 0 1 0 1 0 0 0 0}
do_test collate2-3.4 {
execsql {
SELECT a < 'aa' FROM collate2t1;
}
} {{} 0 0 0 0 1 1 0 0 1 1 1 1 1 1 1 1}
do_test collate2-3.5 {
execsql {
SELECT b < 'aa' FROM collate2t1;
}
} {{} 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0}
do_test collate2-3.6 {
execsql {
SELECT c < 'aa' FROM collate2t1;
}
} {{} 0 0 0 0 1 1 1 1 1 0 1 0 1 1 1 1}
do_test collate2-3.7 {
execsql {
SELECT a = 'aa' FROM collate2t1;
}
} {{} 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0}
do_test collate2-3.8 {
execsql {
SELECT b = 'aa' FROM collate2t1;
}
} {{} 1 0 0 0 1 0 0 0 1 0 0 0 1 0 0 0}
do_test collate2-3.9 {
execsql {
SELECT c = 'aa' FROM collate2t1;
}
} {{} 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0}
do_test collate2-3.10 {
execsql {
SELECT a <= 'aa' FROM collate2t1;
}
} {{} 1 0 0 0 1 1 0 0 1 1 1 1 1 1 1 1}
do_test collate2-3.11 {
execsql {
SELECT b <= 'aa' FROM collate2t1;
}
} {{} 1 0 0 0 1 0 0 0 1 0 0 0 1 0 0 0}
do_test collate2-3.12 {
execsql {
SELECT c <= 'aa' FROM collate2t1;
}
} {{} 1 0 0 0 1 1 1 1 1 0 1 0 1 1 1 1}
do_test collate2-3.13 {
execsql {
SELECT a >= 'aa' FROM collate2t1;
}
} {{} 1 1 1 1 0 0 1 1 0 0 0 0 0 0 0 0}
do_test collate2-3.14 {
execsql {
SELECT b >= 'aa' FROM collate2t1;
}
} {{} 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1}
do_test collate2-3.15 {
execsql {
SELECT c >= 'aa' FROM collate2t1;
}
} {{} 1 1 1 1 0 0 0 0 0 1 0 1 0 0 0 0}
do_test collate2-3.16 {
execsql {
SELECT a BETWEEN 'Aa' AND 'Bb' FROM collate2t1;
}
} {{} 0 0 0 0 0 0 0 0 1 1 1 1 0 0 1 1}
do_test collate2-3.17 {
execsql {
SELECT b BETWEEN 'Aa' AND 'Bb' FROM collate2t1;
}
} {{} 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1}
do_test collate2-3.18 {
execsql {
SELECT c BETWEEN 'Aa' AND 'Bb' FROM collate2t1;
}
} {{} 1 0 1 0 0 0 0 0 1 1 1 1 0 0 0 0}
do_test collate2-3.19 {
execsql {
SELECT CASE a WHEN 'aa' THEN 1 ELSE 0 END FROM collate2t1;
}
} {0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0}
do_test collate2-3.20 {
execsql {
SELECT CASE b WHEN 'aa' THEN 1 ELSE 0 END FROM collate2t1;
}
} {0 1 0 0 0 1 0 0 0 1 0 0 0 1 0 0 0}
do_test collate2-3.21 {
execsql {
SELECT CASE c WHEN 'aa' THEN 1 ELSE 0 END FROM collate2t1;
}
} {0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0}
ifcapable subquery {
do_test collate2-3.22 {
execsql {
SELECT a IN ('aa', 'bb') FROM collate2t1;
}
} {{} 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0}
do_test collate2-3.23 {
execsql {
SELECT b IN ('aa', 'bb') FROM collate2t1;
}
} {{} 1 0 0 1 1 0 0 1 1 0 0 1 1 0 0 1}
do_test collate2-3.24 {
execsql {
SELECT c IN ('aa', 'bb') FROM collate2t1;
}
} {{} 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0}
do_test collate2-3.25 {
execsql {
SELECT a IN (SELECT a FROM collate2t1 WHERE a IN ('aa', 'bb'))
FROM collate2t1;
}
} {{} 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0}
do_test collate2-3.26 {
execsql {
SELECT b IN (SELECT a FROM collate2t1 WHERE a IN ('aa', 'bb'))
FROM collate2t1;
}
} {{} 1 0 0 1 1 0 0 1 1 0 0 1 1 0 0 1}
do_test collate2-3.27 {
execsql {
SELECT c IN (SELECT a FROM collate2t1 WHERE a IN ('aa', 'bb'))
FROM collate2t1;
}
} {{} 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0}
}
do_test collate2-4.0 {
execsql {
CREATE TABLE collate2t2(b COLLATE binary);
CREATE TABLE collate2t3(b text);
INSERT INTO collate2t2 VALUES('aa');
INSERT INTO collate2t3 VALUES('aa');
}
} {}
# Test that when both sides of a binary comparison operator have
# default collation types, the collate type for the leftmost term
# is used.
do_test collate2-4.1 {
execsql {
SELECT collate2t1.a FROM collate2t1, collate2t2
WHERE collate2t1.b = collate2t2.b;
}
} {aa aA Aa AA}
do_test collate2-4.2 {
execsql {
SELECT collate2t1.a FROM collate2t1, collate2t2
WHERE collate2t2.b = collate2t1.b;
}
} {aa}
# Test that when one side has a default collation type and the other
# does not, the collation type is used.
do_test collate2-4.3 {
execsql {
SELECT collate2t1.a FROM collate2t1, collate2t3
WHERE collate2t1.b = collate2t3.b||''
ORDER BY +collate2t1.a DESC;
}
} {aa aA Aa AA}
do_test collate2-4.4 {
execsql {
SELECT collate2t1.a FROM collate2t1, collate2t3
WHERE collate2t3.b||'' = collate2t1.b
ORDER BY +collate2t1.a DESC;
}
} {aa aA Aa AA}
do_test collate2-4.5 {
execsql {
DROP TABLE collate2t3;
}
} {}
#
# Test that the default collation types are used when the JOIN syntax
# is used in place of a WHERE clause.
#
# SQLite transforms the JOIN syntax into a WHERE clause internally, so
# the focus of these tests is to ensure that the table on the left-hand-side
# of the join determines the collation type used.
#
do_test collate2-5.0 {
execsql {
SELECT collate2t1.b FROM collate2t1 JOIN collate2t2 USING (b);
}
} {aa aA Aa AA}
do_test collate2-5.1 {
execsql {
SELECT collate2t1.b FROM collate2t2 JOIN collate2t1 USING (b);
}
} {aa}
do_test collate2-5.2 {
execsql {
SELECT collate2t1.b FROM collate2t1 NATURAL JOIN collate2t2;
}
} {aa aA Aa AA}
do_test collate2-5.3 {
execsql {
SELECT collate2t1.b FROM collate2t2 NATURAL JOIN collate2t1;
}
} {aa}
do_test collate2-5.4 {
execsql {
SELECT collate2t2.b FROM collate2t1 LEFT OUTER JOIN collate2t2 USING (b) order by collate2t1.oid;
}
} {{} aa {} {} {} aa {} {} {} aa {} {} {} aa {} {} {}}
do_test collate2-5.5 {
execsql {
SELECT collate2t1.b, collate2t2.b FROM collate2t2 LEFT OUTER JOIN collate2t1 USING (b);
}
} {aa aa}
do_execsql_test 6.1 {
CREATE TABLE t1(x);
INSERT INTO t1 VALUES('b');
INSERT INTO t1 VALUES('B');
}
do_execsql_test 6.2 {
SELECT * FROM t1 WHERE x COLLATE nocase BETWEEN 'a' AND 'c';
} {b B}
do_execsql_test 6.3 {
SELECT * FROM t1 WHERE x BETWEEN 'a' COLLATE nocase AND 'c' COLLATE nocase;
} {b B}
do_execsql_test 6.4 {
SELECT * FROM t1
WHERE x COLLATE nocase BETWEEN 'a' COLLATE nocase AND 'c' COLLATE nocase;
} {b B}
do_execsql_test 6.5 {
SELECT * FROM t1 WHERE +x COLLATE nocase BETWEEN 'a' AND 'c';
} {b B}
do_execsql_test 6.6 {
SELECT * FROM t1 WHERE +x BETWEEN 'a' COLLATE nocase AND 'c' COLLATE nocase;
} {b B}
do_execsql_test 6.7 {
SELECT * FROM t1
WHERE +x COLLATE nocase BETWEEN 'a' COLLATE nocase AND 'c' COLLATE nocase;
} {b B}
finish_test

View File

@ -0,0 +1,531 @@
# 2001 September 15
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library. The
# focus of this script is page cache subsystem.
#
# $Id: collate3.test,v 1.13 2008/08/20 16:35:10 drh Exp $
set testdir [file dirname $argv0]
source $testdir/tester.tcl
#
# Tests are organised as follows:
#
# collate3.1.* - Errors related to unknown collation sequences.
# collate3.2.* - Errors related to undefined collation sequences.
# collate3.3.* - Writing to a table that has an index with an undefined c.s.
# collate3.4.* - Misc errors.
# collate3.5.* - Collation factory.
#
#
# These tests ensure that when a user executes a statement with an
# unknown collation sequence an error is returned.
#
do_test collate3-1.0 {
execsql {
CREATE TABLE collate3t1(c1 UNIQUE);
}
} {}
do_test collate3-1.1 {
catchsql {
SELECT * FROM collate3t1 ORDER BY 1 collate garbage;
}
} {1 {no such collation sequence: garbage}}
do_test collate3-1.1.2 {
catchsql {
SELECT DISTINCT c1 COLLATE garbage FROM collate3t1;
}
} {1 {no such collation sequence: garbage}}
do_test collate3-1.2 {
catchsql {
CREATE TABLE collate3t2(c1 collate garbage);
}
} {1 {no such collation sequence: garbage}}
do_test collate3-1.3 {
catchsql {
CREATE INDEX collate3i1 ON collate3t1(c1 COLLATE garbage);
}
} {1 {no such collation sequence: garbage}}
execsql {
DROP TABLE collate3t1;
}
proc caseless {a b} { string compare -nocase $a $b }
do_test collate3-1.4 {
db collate caseless caseless
execsql {
CREATE TABLE t1(a COLLATE caseless);
INSERT INTO t1 VALUES('Abc2');
INSERT INTO t1 VALUES('abc1');
INSERT INTO t1 VALUES('aBc3');
}
execsql { SELECT * FROM t1 ORDER BY a }
} {abc1 Abc2 aBc3}
do_test collate3-1.5 {
db close
sqlite3 db test.db
catchsql { SELECT * FROM t1 ORDER BY a }
} {1 {no such collation sequence: caseless}}
do_test collate3-1.6.1 {
db collate caseless caseless
execsql { CREATE INDEX i1 ON t1(a) }
execsql { SELECT * FROM t1 ORDER BY a }
} {abc1 Abc2 aBc3}
do_test collate3-1.6.2 {
db close
sqlite3 db test.db
catchsql { SELECT * FROM t1 ORDER BY a }
} {1 {no such collation sequence: caseless}}
do_test collate3-1.6.3 {
db close
sqlite3 db test.db
catchsql { PRAGMA integrity_check }
} {1 {no such collation sequence: caseless}}
do_test collate3-1.6.4 {
db close
sqlite3 db test.db
catchsql { REINDEX }
} {1 {no such collation sequence: caseless}}
do_test collate3-1.7.1 {
db collate caseless caseless
execsql {
DROP TABLE t1;
CREATE TABLE t1(a);
CREATE INDEX i1 ON t1(a COLLATE caseless);
INSERT INTO t1 VALUES('Abc2');
INSERT INTO t1 VALUES('abc1');
INSERT INTO t1 VALUES('aBc3');
SELECT * FROM t1 ORDER BY a COLLATE caseless;
}
} {abc1 Abc2 aBc3}
do_test collate3-1.7.2 {
db close
sqlite3 db test.db
catchsql { SELECT * FROM t1 ORDER BY a COLLATE caseless}
} {1 {no such collation sequence: caseless}}
do_test collate3-1.7.4 {
db close
sqlite3 db test.db
catchsql { REINDEX }
} {1 {no such collation sequence: caseless}}
do_test collate3-1.7.3 {
db close
sqlite3 db test.db
catchsql { PRAGMA integrity_check }
} {1 {no such collation sequence: caseless}}
do_test collate3-1.7.4 {
db close
sqlite3 db test.db
catchsql { REINDEX }
} {1 {no such collation sequence: caseless}}
do_test collate3-1.7.5 {
db close
sqlite3 db test.db
db collate caseless caseless
catchsql { PRAGMA integrity_check }
} {0 ok}
proc needed {nm} { db collate caseless caseless }
do_test collate3-1.7.6 {
db close
sqlite3 db test.db
db collation_needed needed
catchsql { PRAGMA integrity_check }
} {0 ok}
do_test collate3-1.8 {
execsql { DROP TABLE t1 }
} {}
#
# Create a table with a default collation sequence, then close
# and re-open the database without re-registering the collation
# sequence. Then make sure the library stops us from using
# the collation sequence in:
# * an explicitly collated ORDER BY
# * an ORDER BY that uses the default collation sequence
# * an expression (=)
# * a CREATE TABLE statement
# * a CREATE INDEX statement that uses a default collation sequence
# * a GROUP BY that uses the default collation sequence
# * a SELECT DISTINCT that uses the default collation sequence
# * Compound SELECTs that uses the default collation sequence
# * An ORDER BY on a compound SELECT with an explicit ORDER BY.
#
do_test collate3-2.0 {
db collate string_compare {string compare}
execsql {
CREATE TABLE collate3t1(c1 COLLATE string_compare, c2);
}
db close
sqlite3 db test.db
expr 0
} 0
do_test collate3-2.1 {
catchsql {
SELECT * FROM collate3t1 ORDER BY 1 COLLATE string_compare;
}
} {1 {no such collation sequence: string_compare}}
do_test collate3-2.2 {
catchsql {
SELECT * FROM collate3t1 ORDER BY c1;
}
} {1 {no such collation sequence: string_compare}}
do_test collate3-2.3 {
catchsql {
SELECT * FROM collate3t1 WHERE c1 = 'xxx';
}
} {1 {no such collation sequence: string_compare}}
do_test collate3-2.4 {
catchsql {
CREATE TABLE collate3t2(c1 COLLATE string_compare);
}
} {1 {no such collation sequence: string_compare}}
do_test collate3-2.5 {
catchsql {
CREATE INDEX collate3t1_i1 ON collate3t1(c1);
}
} {1 {no such collation sequence: string_compare}}
do_test collate3-2.6 {
catchsql {
SELECT * FROM collate3t1;
}
} {0 {}}
do_test collate3-2.7.1 {
catchsql {
SELECT count(*) FROM collate3t1 GROUP BY c1;
}
} {1 {no such collation sequence: string_compare}}
# do_test collate3-2.7.2 {
# catchsql {
# SELECT * FROM collate3t1 GROUP BY c1;
# }
# } {1 {GROUP BY may only be used on aggregate queries}}
do_test collate3-2.7.2 {
catchsql {
SELECT * FROM collate3t1 GROUP BY c1;
}
} {1 {no such collation sequence: string_compare}}
do_test collate3-2.8 {
catchsql {
SELECT DISTINCT c1 FROM collate3t1;
}
} {1 {no such collation sequence: string_compare}}
ifcapable compound {
do_test collate3-2.9 {
catchsql {
SELECT c1 FROM collate3t1 UNION SELECT c1 FROM collate3t1;
}
} {1 {no such collation sequence: string_compare}}
do_test collate3-2.10 {
catchsql {
SELECT c1 FROM collate3t1 EXCEPT SELECT c1 FROM collate3t1;
}
} {1 {no such collation sequence: string_compare}}
do_test collate3-2.11 {
catchsql {
SELECT c1 FROM collate3t1 INTERSECT SELECT c1 FROM collate3t1;
}
} {1 {no such collation sequence: string_compare}}
do_test collate3-2.12 {
catchsql {
SELECT c1 FROM collate3t1 UNION ALL SELECT c1 FROM collate3t1;
}
} {0 {}}
do_test collate3-2.13 {
catchsql {
SELECT 10 UNION ALL SELECT 20 ORDER BY 1 COLLATE string_compare;
}
} {1 {no such collation sequence: string_compare}}
do_test collate3-2.14 {
catchsql {
SELECT 10 INTERSECT SELECT 20 ORDER BY 1 COLLATE string_compare;
}
} {1 {no such collation sequence: string_compare}}
do_test collate3-2.15 {
catchsql {
SELECT 10 EXCEPT SELECT 20 ORDER BY 1 COLLATE string_compare;
}
} {1 {no such collation sequence: string_compare}}
do_test collate3-2.16 {
catchsql {
SELECT 10 UNION SELECT 20 ORDER BY 1 COLLATE string_compare;
}
} {1 {no such collation sequence: string_compare}}
do_test collate3-2.17 {
catchsql {
SELECT c1 FROM collate3t1 UNION ALL SELECT c1 FROM collate3t1 ORDER BY 1;
}
} {1 {no such collation sequence: string_compare}}
} ;# ifcapable compound
#
# Create an index that uses a collation sequence then close and
# re-open the database without re-registering the collation
# sequence. Then check that for the table with the index
# * An INSERT fails,
# * An UPDATE on the column with the index fails,
# * An UPDATE on a different column succeeds.
# * A DELETE with a WHERE clause fails
# * A DELETE without a WHERE clause succeeds
#
# Also, ensure that the restrictions tested by collate3-2.* still
# apply after the index has been created.
#
do_test collate3-3.0 {
db collate string_compare {string compare}
execsql {
CREATE INDEX collate3t1_i1 ON collate3t1(c1);
INSERT INTO collate3t1 VALUES('xxx', 'yyy');
}
db close
sqlite3 db test.db
expr 0
} 0
db eval {select * from collate3t1}
do_test collate3-3.1 {
catchsql {
INSERT INTO collate3t1 VALUES('xxx', 0);
}
} {1 {no such collation sequence: string_compare}}
do_test collate3-3.2 {
catchsql {
UPDATE collate3t1 SET c1 = 'xxx';
}
} {1 {no such collation sequence: string_compare}}
do_test collate3-3.3 {
catchsql {
UPDATE collate3t1 SET c2 = 'xxx';
}
} {0 {}}
do_test collate3-3.4 {
catchsql {
DELETE FROM collate3t1 WHERE 1;
}
} {1 {no such collation sequence: string_compare}}
do_test collate3-3.5 {
catchsql {
SELECT * FROM collate3t1;
}
} {0 {xxx xxx}}
do_test collate3-3.6 {
catchsql {
DELETE FROM collate3t1;
}
} {0 {}}
ifcapable {integrityck} {
do_test collate3-3.8 {
catchsql {
PRAGMA integrity_check
}
} {1 {no such collation sequence: string_compare}}
}
do_test collate3-3.9 {
catchsql {
SELECT * FROM collate3t1;
}
} {0 {}}
do_test collate3-3.10 {
catchsql {
SELECT * FROM collate3t1 ORDER BY 1 COLLATE string_compare;
}
} {1 {no such collation sequence: string_compare}}
do_test collate3-3.11 {
catchsql {
SELECT * FROM collate3t1 ORDER BY c1;
}
} {1 {no such collation sequence: string_compare}}
do_test collate3-3.12 {
catchsql {
SELECT * FROM collate3t1 WHERE c1 = 'xxx';
}
} {1 {no such collation sequence: string_compare}}
do_test collate3-3.13 {
catchsql {
CREATE TABLE collate3t2(c1 COLLATE string_compare);
}
} {1 {no such collation sequence: string_compare}}
do_test collate3-3.14 {
catchsql {
CREATE INDEX collate3t1_i2 ON collate3t1(c1);
}
} {1 {no such collation sequence: string_compare}}
do_test collate3-3.15 {
execsql {
DROP TABLE collate3t1;
}
} {}
# Check we can create an index that uses an explicit collation
# sequence and then close and re-open the database.
do_test collate3-4.6 {
db collate user_defined "string compare"
execsql {
CREATE TABLE collate3t1(a, b);
INSERT INTO collate3t1 VALUES('hello', NULL);
CREATE INDEX collate3i1 ON collate3t1(a COLLATE user_defined);
}
} {}
do_test collate3-4.7 {
db close
sqlite3 db test.db
catchsql {
SELECT * FROM collate3t1 ORDER BY a COLLATE user_defined;
}
} {1 {no such collation sequence: user_defined}}
do_test collate3-4.8.1 {
db collate user_defined "string compare"
catchsql {
SELECT * FROM collate3t1 ORDER BY a COLLATE user_defined;
}
} {0 {hello {}}}
do_test collate3-4.8.2 {
db close
lindex [catch {
sqlite3 db test.db
}] 0
} {0}
do_test collate3-4.8.3 {
execsql {
DROP TABLE collate3t1;
}
} {}
# Compare strings as numbers.
proc numeric_compare {lhs rhs} {
if {$rhs > $lhs} {
set res -1
} else {
set res [expr ($lhs > $rhs)?1:0]
}
return $res
}
# Check we can create a view that uses an explicit collation
# sequence and then close and re-open the database.
ifcapable view {
do_test collate3-4.9 {
db collate user_defined numeric_compare
execsql {
CREATE TABLE collate3t1(a, b);
INSERT INTO collate3t1 VALUES('2', NULL);
INSERT INTO collate3t1 VALUES('101', NULL);
INSERT INTO collate3t1 VALUES('12', NULL);
CREATE VIEW collate3v1 AS SELECT * FROM collate3t1
ORDER BY 1 COLLATE user_defined;
SELECT * FROM collate3v1;
}
} {2 {} 12 {} 101 {}}
do_test collate3-4.10 {
db close
sqlite3 db test.db
catchsql {
SELECT * FROM collate3v1;
}
} {1 {no such collation sequence: user_defined}}
do_test collate3-4.11 {
db collate user_defined numeric_compare
catchsql {
SELECT * FROM collate3v1;
}
} {0 {2 {} 12 {} 101 {}}}
do_test collate3-4.12 {
execsql {
DROP TABLE collate3t1;
}
} {}
} ;# ifcapable view
#
# Test the collation factory. In the code, the "no such collation sequence"
# message is only generated in two places. So these tests just test that
# the collation factory can be called once from each of those points.
#
do_test collate3-5.0 {
catchsql {
CREATE TABLE collate3t1(a);
INSERT INTO collate3t1 VALUES(10);
SELECT a FROM collate3t1 ORDER BY 1 COLLATE unk;
}
} {1 {no such collation sequence: unk}}
do_test collate3-5.1 {
set ::cfact_cnt 0
proc cfact {nm} {
db collate $nm {string compare}
incr ::cfact_cnt
}
db collation_needed cfact
} {}
do_test collate3-5.2 {
catchsql {
SELECT a FROM collate3t1 ORDER BY 1 COLLATE unk;
}
} {0 10}
do_test collate3-5.3 {
set ::cfact_cnt
} {1}
do_test collate3-5.4 {
catchsql {
SELECT a FROM collate3t1 ORDER BY 1 COLLATE unk;
}
} {0 10}
do_test collate3-5.5 {
set ::cfact_cnt
} {1}
do_test collate3-5.6 {
catchsql {
SELECT a FROM collate3t1 ORDER BY 1 COLLATE unk;
}
} {0 10}
do_test collate3-5.7 {
execsql {
DROP TABLE collate3t1;
CREATE TABLE collate3t1(a COLLATE unk);
}
db close
sqlite3 db test.db
catchsql {
SELECT a FROM collate3t1 ORDER BY 1;
}
} {1 {no such collation sequence: unk}}
do_test collate3-5.8 {
set ::cfact_cnt 0
proc cfact {nm} {
db collate $nm {string compare}
incr ::cfact_cnt
}
db collation_needed cfact
catchsql {
SELECT a FROM collate3t1 ORDER BY 1;
}
} {0 {}}
do_test collate3-5.9 {
execsql {
DROP TABLE collate3t1;
}
} {}
finish_test

Some files were not shown because too many files have changed in this diff Show More