MXS-2732 Rename sqlite-src-3110100 to sqlite-src-3110100.old

Originally, the sqlite installation was imported into the MaxScale
repository in the one gigantic MaxScale 1.4 -> 2.0 commit.

Consequently, there is no import commit to compare to if you want
to extract all MaxScale specific changes. To make it simpler in the
future, sqlite will now be imported in a commit of its own.
This commit is contained in:
Johan Wikman
2019-10-30 10:37:21 +02:00
parent 290d38c67f
commit 81e78726eb
497 changed files with 3 additions and 3 deletions

View File

@ -1,450 +0,0 @@
/*
** 2015 October 7
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
** This file contains C# code to download a single file based on a URI.
*/
using System;
using System.ComponentModel;
using System.Diagnostics;
using System.IO;
using System.Net;
using System.Reflection;
using System.Runtime.InteropServices;
using System.Threading;
///////////////////////////////////////////////////////////////////////////////
#region Assembly Metadata
[assembly: AssemblyTitle("GetFile Tool")]
[assembly: AssemblyDescription("Download a single file based on a URI.")]
[assembly: AssemblyCompany("SQLite Development Team")]
[assembly: AssemblyProduct("SQLite")]
[assembly: AssemblyCopyright("Public Domain")]
[assembly: ComVisible(false)]
[assembly: Guid("5c4b3728-1693-4a33-a218-8e6973ca15a6")]
[assembly: AssemblyVersion("1.0.*")]
#if DEBUG
[assembly: AssemblyConfiguration("Debug")]
#else
[assembly: AssemblyConfiguration("Release")]
#endif
#endregion
///////////////////////////////////////////////////////////////////////////////
namespace GetFile
{
/// <summary>
/// This enumeration is used to represent all the possible exit codes from
/// this tool.
/// </summary>
internal enum ExitCode
{
/// <summary>
/// The file download was a success.
/// </summary>
Success = 0,
/// <summary>
/// The command line arguments are missing (i.e. null). Generally,
/// this should not happen.
/// </summary>
MissingArgs = 1,
/// <summary>
/// The wrong number of command line arguments was supplied.
/// </summary>
WrongNumArgs = 2,
/// <summary>
/// The URI specified on the command line could not be parsed as a
/// supported absolute URI.
/// </summary>
BadUri = 3,
/// <summary>
/// The file name portion of the URI specified on the command line
/// could not be extracted from it.
/// </summary>
BadFileName = 4,
/// <summary>
/// The temporary directory is either invalid (i.e. null) or does not
/// represent an available directory.
/// </summary>
BadTempPath = 5,
/// <summary>
/// An exception was caught in <see cref="Main" />. Generally, this
/// should not happen.
/// </summary>
Exception = 6,
/// <summary>
/// The file download was canceled. This tool does not make use of
/// the <see cref="WebClient.CancelAsync" /> method; therefore, this
/// should not happen.
/// </summary>
DownloadCanceled = 7,
/// <summary>
/// The file download encountered an error. Further information about
/// this error should be displayed on the console.
/// </summary>
DownloadError = 8
}
///////////////////////////////////////////////////////////////////////////
internal static class Program
{
#region Private Data
/// <summary>
/// This is used to synchronize multithreaded access to the
/// <see cref="previousPercent" /> and <see cref="exitCode"/>
/// fields.
/// </summary>
private static readonly object syncRoot = new object();
///////////////////////////////////////////////////////////////////////
/// <summary>
/// This event will be signed when the file download has completed,
/// even if the file download itself was canceled or unsuccessful.
/// </summary>
private static EventWaitHandle doneEvent;
///////////////////////////////////////////////////////////////////////
/// <summary>
/// The previous file download completion percentage seen by the
/// <see cref="DownloadProgressChanged" /> event handler. This value
/// is never decreased, nor is it ever reset to zero.
/// </summary>
private static int previousPercent = 0;
///////////////////////////////////////////////////////////////////////
/// <summary>
/// This will be the exit code returned by this tool after the file
/// download completes, successfully or otherwise. This value is only
/// changed by the <see cref="DownloadFileCompleted" /> event handler.
/// </summary>
private static ExitCode exitCode = ExitCode.Success;
#endregion
///////////////////////////////////////////////////////////////////////
#region Private Support Methods
/// <summary>
/// This method displays an error message to the console and/or
/// displays the command line usage information for this tool.
/// </summary>
/// <param name="message">
/// The error message to display, if any.
/// </param>
/// <param name="usage">
/// Non-zero to display the command line usage information.
/// </param>
private static void Error(
string message,
bool usage
)
{
if (message != null)
Console.WriteLine(message);
string fileName = Path.GetFileName(
Process.GetCurrentProcess().MainModule.FileName);
Console.WriteLine(String.Format("usage: {0} <uri>", fileName));
}
///////////////////////////////////////////////////////////////////////
/// <summary>
/// This method attempts to determine the file name portion of the
/// specified URI.
/// </summary>
/// <param name="uri">
/// The URI to process.
/// </param>
/// <returns>
/// The file name portion of the specified URI -OR- null if it cannot
/// be determined.
/// </returns>
private static string GetFileName(
Uri uri
)
{
if (uri == null)
return null;
string pathAndQuery = uri.PathAndQuery;
if (String.IsNullOrEmpty(pathAndQuery))
return null;
int index = pathAndQuery.LastIndexOf('/');
if ((index < 0) || (index == pathAndQuery.Length))
return null;
return pathAndQuery.Substring(index + 1);
}
#endregion
///////////////////////////////////////////////////////////////////////
#region Private Event Handlers
/// <summary>
/// This method is an event handler that is called when the file
/// download completion percentage changes. It will display progress
/// on the console. Special care is taken to make sure that progress
/// events are not displayed out-of-order, even if duplicate and/or
/// out-of-order events are received.
/// </summary>
/// <param name="sender">
/// The source of the event.
/// </param>
/// <param name="e">
/// Information for the event being processed.
/// </param>
private static void DownloadProgressChanged(
object sender,
DownloadProgressChangedEventArgs e
)
{
if (e != null)
{
int percent = e.ProgressPercentage;
lock (syncRoot)
{
if (percent > previousPercent)
{
Console.Write('.');
if ((percent % 10) == 0)
Console.Write(" {0}% ", percent);
previousPercent = percent;
}
}
}
}
///////////////////////////////////////////////////////////////////////
/// <summary>
/// This method is an event handler that is called when the file
/// download has completed, successfully or otherwise. It will
/// display the overall result of the file download on the console,
/// including any <see cref="Exception" /> information, if applicable.
/// The <see cref="exitCode" /> field is changed by this method to
/// indicate the overall result of the file download and the event
/// within the <see cref="doneEvent" /> field will be signaled.
/// </summary>
/// <param name="sender">
/// The source of the event.
/// </param>
/// <param name="e">
/// Information for the event being processed.
/// </param>
private static void DownloadFileCompleted(
object sender,
AsyncCompletedEventArgs e
)
{
if (e != null)
{
lock (syncRoot)
{
if (previousPercent < 100)
Console.Write(' ');
}
if (e.Cancelled)
{
Console.WriteLine("Canceled");
lock (syncRoot)
{
exitCode = ExitCode.DownloadCanceled;
}
}
else
{
Exception error = e.Error;
if (error != null)
{
Console.WriteLine("Error: {0}", error);
lock (syncRoot)
{
exitCode = ExitCode.DownloadError;
}
}
else
{
Console.WriteLine("Done");
}
}
}
if (doneEvent != null)
doneEvent.Set();
}
#endregion
///////////////////////////////////////////////////////////////////////
#region Program Entry Point
/// <summary>
/// This is the entry-point for this tool. It handles processing the
/// command line arguments, setting up the web client, downloading the
/// file, and saving it to the file system.
/// </summary>
/// <param name="args">
/// The command line arguments.
/// </param>
/// <returns>
/// Zero upon success; non-zero on failure. This will be one of the
/// values from the <see cref="ExitCode" /> enumeration.
/// </returns>
private static int Main(
string[] args
)
{
//
// NOTE: Sanity check the command line arguments.
//
if (args == null)
{
Error(null, true);
return (int)ExitCode.MissingArgs;
}
if (args.Length != 1)
{
Error(null, true);
return (int)ExitCode.WrongNumArgs;
}
//
// NOTE: Attempt to convert the first (and only) command line
// argument to an absolute URI.
//
Uri uri;
if (!Uri.TryCreate(args[0], UriKind.Absolute, out uri))
{
Error("Could not create absolute URI from argument.", false);
return (int)ExitCode.BadUri;
}
//
// NOTE: Attempt to extract the file name portion of the URI we
// just created.
//
string fileName = GetFileName(uri);
if (fileName == null)
{
Error("Could not extract the file name from the URI.", false);
return (int)ExitCode.BadFileName;
}
//
// NOTE: Grab the temporary path setup for this process. If it is
// unavailable, we will not continue.
//
string directory = Path.GetTempPath();
if (String.IsNullOrEmpty(directory) ||
!Directory.Exists(directory))
{
Error("Temporary directory is invalid or unavailable.", false);
return (int)ExitCode.BadTempPath;
}
try
{
using (WebClient webClient = new WebClient())
{
//
// NOTE: Create the event used to signal completion of the
// file download.
//
doneEvent = new ManualResetEvent(false);
//
// NOTE: Hookup the event handlers we care about on the web
// client. These are necessary because the file is
// downloaded asynchronously.
//
webClient.DownloadProgressChanged +=
new DownloadProgressChangedEventHandler(
DownloadProgressChanged);
webClient.DownloadFileCompleted +=
new AsyncCompletedEventHandler(
DownloadFileCompleted);
//
// NOTE: Build the fully qualified path and file name,
// within the temporary directory, where the file to
// be downloaded will be saved.
//
fileName = Path.Combine(directory, fileName);
//
// NOTE: If the file name already exists (in the temporary)
// directory, delete it.
//
// TODO: Perhaps an error should be raised here instead?
//
if (File.Exists(fileName))
File.Delete(fileName);
//
// NOTE: After kicking off the asynchronous file download
// process, wait [forever] until the "done" event is
// signaled.
//
Console.WriteLine(
"Downloading \"{0}\" to \"{1}\"...", uri, fileName);
webClient.DownloadFileAsync(uri, fileName);
doneEvent.WaitOne();
}
lock (syncRoot)
{
return (int)exitCode;
}
}
catch (Exception e)
{
//
// NOTE: An exception was caught. Report it via the console
// and return failure.
//
Error(e.ToString(), false);
return (int)ExitCode.Exception;
}
}
#endregion
}
}

View File

@ -1,273 +0,0 @@
@ECHO OFF
::
:: GetTclKit.bat --
::
:: TclKit Download Tool
::
SETLOCAL
REM SET __ECHO=ECHO
REM SET __ECHO2=ECHO
REM SET __ECHO3=ECHO
IF NOT DEFINED _AECHO (SET _AECHO=REM)
IF NOT DEFINED _CECHO (SET _CECHO=REM)
IF NOT DEFINED _VECHO (SET _VECHO=REM)
SET OVERWRITE=^>
IF DEFINED __ECHO SET OVERWRITE=^^^>
SET APPEND=^>^>
IF DEFINED __ECHO SET APPEND=^^^>^^^>
SET PROCESSOR=%1
IF DEFINED PROCESSOR (
CALL :fn_UnquoteVariable PROCESSOR
) ELSE (
GOTO usage
)
%_VECHO% Processor = '%PROCESSOR%'
SET DUMMY2=%2
IF DEFINED DUMMY2 (
GOTO usage
)
SET ROOT=%~dp0\..
SET ROOT=%ROOT:\\=\%
%_VECHO% Root = '%ROOT%'
SET TOOLS=%~dp0
SET TOOLS=%TOOLS:~0,-1%
%_VECHO% Tools = '%TOOLS%'
IF NOT DEFINED windir (
ECHO The windir environment variable must be set first.
GOTO errors
)
%_VECHO% WinDir = '%windir%'
IF NOT DEFINED TEMP (
ECHO The TEMP environment variable must be set first.
GOTO errors
)
%_VECHO% Temp = '%TEMP%'
IF NOT DEFINED TCLKIT_URI (
SET TCLKIT_URI=https://tclsh.com/
)
%_VECHO% TclKitUri = '%TCLKIT_URI%'
IF /I "%PROCESSOR%" == "x86" (
CALL :fn_TclKitX86Variables
) ELSE IF /I "%PROCESSOR%" == "x64" (
CALL :fn_TclKitX64Variables
) ELSE (
GOTO usage
)
%_VECHO% TclKitVersion = '%TCLKIT_VERSION%'
%_VECHO% TclKitPatchLevel = '%TCLKIT_PATCHLEVEL%'
%_VECHO% TclKitNoSdk = '%TCLKIT_NOSDK%'
%_VECHO% TclKitExe = '%TCLKIT_EXE%'
%_VECHO% TclKitLib = '%TCLKIT_LIB%'
%_VECHO% TclKitLibStub = '%TCLKIT_LIB_STUB%'
%_VECHO% TclKitSdk = '%TCLKIT_SDK%'
%_VECHO% TclKitSdkZip = '%TCLKIT_SDK_ZIP%'
%_VECHO% TclKitFiles = '%TCLKIT_FILES%'
CALL :fn_ResetErrorLevel
FOR %%T IN (csc.exe) DO (
SET %%T_PATH=%%~dp$PATH:T
)
%_VECHO% Csc.exe_PATH = '%csc.exe_PATH%'
IF DEFINED csc.exe_PATH (
GOTO skip_addToPath
)
IF DEFINED FRAMEWORKDIR (
REM Use the existing .NET Framework directory...
) ELSE IF EXIST "%windir%\Microsoft.NET\Framework64\v2.0.50727" (
SET FRAMEWORKDIR=%windir%\Microsoft.NET\Framework64\v2.0.50727
) ELSE IF EXIST "%windir%\Microsoft.NET\Framework64\v3.5" (
SET FRAMEWORKDIR=%windir%\Microsoft.NET\Framework64\v3.5
) ELSE IF EXIST "%windir%\Microsoft.NET\Framework64\v4.0.30319" (
SET FRAMEWORKDIR=%windir%\Microsoft.NET\Framework64\v4.0.30319
) ELSE IF EXIST "%windir%\Microsoft.NET\Framework\v2.0.50727" (
SET FRAMEWORKDIR=%windir%\Microsoft.NET\Framework\v2.0.50727
) ELSE IF EXIST "%windir%\Microsoft.NET\Framework\v3.5" (
SET FRAMEWORKDIR=%windir%\Microsoft.NET\Framework\v3.5
) ELSE IF EXIST "%windir%\Microsoft.NET\Framework\v4.0.30319" (
SET FRAMEWORKDIR=%windir%\Microsoft.NET\Framework\v4.0.30319
) ELSE (
ECHO No suitable version of the .NET Framework appears to be installed.
GOTO errors
)
%_VECHO% FrameworkDir = '%FRAMEWORKDIR%'
IF NOT EXIST "%FRAMEWORKDIR%\csc.exe" (
ECHO The file "%FRAMEWORKDIR%\csc.exe" is missing.
GOTO errors
)
SET PATH=%FRAMEWORKDIR%;%PATH%
:skip_addToPath
IF NOT EXIST "%TEMP%\GetFile.exe" (
%__ECHO% csc.exe "/out:%TEMP%\GetFile.exe" /target:exe "%TOOLS%\GetFile.cs"
IF ERRORLEVEL 1 (
ECHO Compilation of "%TOOLS%\GetFile.cs" failed.
GOTO errors
)
)
FOR %%F IN (%TCLKIT_FILES%) DO (
IF NOT EXIST "%TEMP%\%%F" (
%__ECHO% "%TEMP%\GetFile.exe" "%TCLKIT_URI%%%F"
IF ERRORLEVEL 1 (
ECHO Download of "%%F" from "%TCLKIT_URI%" failed.
GOTO errors
)
)
)
IF DEFINED TCLKIT_NOSDK GOTO skip_sdkUnZip
IF NOT EXIST "%TEMP%\%TCLKIT_SDK%" (
%__ECHO% MKDIR "%TEMP%\%TCLKIT_SDK%"
IF ERRORLEVEL 1 (
ECHO Could not create directory "%TEMP%\%TCLKIT_SDK%".
GOTO errors
)
)
%__ECHO% "%TEMP%\unzip.exe" -n "%TEMP%\%TCLKIT_SDK_ZIP%" -d "%TEMP%\%TCLKIT_SDK%"
IF ERRORLEVEL 1 (
ECHO Could not unzip "%TEMP%\%TCLKIT_SDK_ZIP%" to "%TEMP%\%TCLKIT_SDK%".
GOTO errors
)
:skip_sdkUnZip
%__ECHO% ECHO SET TCLSH_CMD=%TEMP%\%TCLKIT_EXE%%OVERWRITE%"%ROOT%\SetTclKitEnv.bat"
IF DEFINED TCLKIT_NOSDK GOTO skip_sdkVariables
%__ECHO% ECHO SET TCLINCDIR=%TEMP%\%TCLKIT_SDK%\include%APPEND%"%ROOT%\SetTclKitEnv.bat"
%__ECHO% ECHO SET TCLLIBDIR=%TEMP%\%TCLKIT_SDK%\lib%APPEND%"%ROOT%\SetTclKitEnv.bat"
%__ECHO% ECHO SET LIBTCLPATH=%TEMP%\%TCLKIT_SDK%\lib%APPEND%"%ROOT%\SetTclKitEnv.bat"
%__ECHO% ECHO SET LIBTCL=%TCLKIT_LIB%%APPEND%"%ROOT%\SetTclKitEnv.bat"
%__ECHO% ECHO SET LIBTCLSTUB=%TCLKIT_LIB_STUB%%APPEND%"%ROOT%\SetTclKitEnv.bat"
:skip_sdkVariables
ECHO.
ECHO Wrote "%ROOT%\SetTclKitEnv.bat".
ECHO Please run it to set the necessary Tcl environment variables.
ECHO.
GOTO no_errors
:fn_TclKitX86Variables
IF NOT DEFINED TCLKIT_PATCHLEVEL (
SET TCLKIT_PATCHLEVEL=8.6.4
)
SET TCLKIT_VERSION=%TCLKIT_PATCHLEVEL:.=%
SET TCLKIT_VERSION=%TCLKIT_VERSION:~0,2%
SET TCLKIT_EXE=tclkit-%TCLKIT_PATCHLEVEL%.exe
SET TCLKIT_LIB=libtclkit%TCLKIT_PATCHLEVEL:.=%.lib
SET TCLKIT_LIB_STUB=libtclstub%TCLKIT_VERSION:.=%.a
SET TCLKIT_SDK=libtclkit-sdk-x86-%TCLKIT_PATCHLEVEL%
SET TCLKIT_SDK_ZIP=%TCLKIT_SDK%.zip
SET TCLKIT_FILES=%TCLKIT_EXE%
IF NOT DEFINED TCLKIT_NOSDK (
SET TCLKIT_FILES=%TCLKIT_FILES% unzip.exe %TCLKIT_SDK_ZIP%
)
GOTO :EOF
:fn_TclKitX64Variables
IF NOT DEFINED TCLKIT_PATCHLEVEL (
REM
REM NOTE: By default, use latest available version of the TclKit SDK
REM for x64. However, the "default" TclKit executable for x86
REM is still used here because it is the only one "well-known"
REM to be available for download.
REM
SET TCLKIT_PATCHLEVEL=8.6.3
SET TCLKIT_EXE=tclkit-8.6.4.exe
) ELSE (
SET TCLKIT_EXE=tclkit-%TCLKIT_PATCHLEVEL%.exe
)
SET TCLKIT_VERSION=%TCLKIT_PATCHLEVEL:.=%
SET TCLKIT_VERSION=%TCLKIT_VERSION:~0,2%
SET TCLKIT_LIB=libtclkit%TCLKIT_PATCHLEVEL:.=%.lib
SET TCLKIT_LIB_STUB=libtclstub%TCLKIT_VERSION:.=%.a
SET TCLKIT_SDK=libtclkit-sdk-x64-%TCLKIT_PATCHLEVEL%
SET TCLKIT_SDK_ZIP=%TCLKIT_SDK%.zip
SET TCLKIT_FILES=%TCLKIT_EXE%
IF NOT DEFINED TCLKIT_NOSDK (
SET TCLKIT_FILES=%TCLKIT_FILES% unzip.exe %TCLKIT_SDK_ZIP%
)
GOTO :EOF
:fn_UnquoteVariable
IF NOT DEFINED %1 GOTO :EOF
SETLOCAL
SET __ECHO_CMD=ECHO %%%1%%
FOR /F "delims=" %%V IN ('%__ECHO_CMD%') DO (
SET VALUE=%%V
)
SET VALUE=%VALUE:"=%
REM "
ENDLOCAL && SET %1=%VALUE%
GOTO :EOF
:fn_ResetErrorLevel
VERIFY > NUL
GOTO :EOF
:fn_SetErrorLevel
VERIFY MAYBE 2> NUL
GOTO :EOF
:usage
ECHO.
ECHO Usage: %~nx0 ^<processor^>
ECHO.
ECHO The only supported values for processor are "x86" and "x64".
GOTO errors
:errors
CALL :fn_SetErrorLevel
ENDLOCAL
ECHO.
ECHO Failure, errors were encountered.
GOTO end_of_file
:no_errors
CALL :fn_ResetErrorLevel
ENDLOCAL
ECHO.
ECHO Success, no errors were encountered.
GOTO end_of_file
:end_of_file
%__ECHO% EXIT /B %ERRORLEVEL%

View File

@ -1,63 +0,0 @@
#!/usr/bin/tclsh
#
# This script appends additional token codes to the end of the
# parse.h file that lemon generates. These extra token codes are
# not used by the parser. But they are used by the tokenizer and/or
# the code generator.
#
#
set in [open [lindex $argv 0] rb]
set max 0
while {![eof $in]} {
set line [gets $in]
if {[regexp {^#define TK_} $line]} {
puts $line
set x [lindex $line 2]
if {$x>$max} {set max $x}
}
}
close $in
# The following are the extra token codes to be added. SPACE and
# ILLEGAL *must* be the last two token codes and they must be in that order.
#
set extras {
TO_TEXT
TO_BLOB
TO_NUMERIC
TO_INT
TO_REAL
ISNOT
END_OF_FILE
UNCLOSED_STRING
FUNCTION
COLUMN
AGG_FUNCTION
AGG_COLUMN
UMINUS
UPLUS
REGISTER
ASTERISK
SPACE
ILLEGAL
}
if {[lrange $extras end-1 end]!="SPACE ILLEGAL"} {
error "SPACE and ILLEGAL must be the last two token codes and they\
must be in that order"
}
foreach x $extras {
incr max
puts [format "#define TK_%-29s %4d" $x $max]
}
# Some additional #defines related to token codes.
#
puts "\n/* The token codes above must all fit in 8 bits */"
puts [format "#define %-20s %-6s" TKFLG_MASK 0xff]
puts "\n/* Flags that can be added to a token code when it is not"
puts "** being stored in a u8: */"
foreach {fg val comment} {
TKFLG_DONTFOLD 0x100 {/* Omit constant folding optimizations */}
} {
puts [format "#define %-20s %-6s %s" $fg $val $comment]
}

View File

@ -1,851 +0,0 @@
@ECHO OFF
::
:: build-all-msvc.bat --
::
:: Multi-Platform Build Tool for MSVC
::
REM
REM This batch script is used to build the SQLite DLL for multiple platforms
REM and configurations using MSVC. The built SQLite DLLs, their associated
REM import libraries, and optionally their symbols files, are placed within
REM the directory specified on the command line, in sub-directories named for
REM their respective platforms and configurations. This batch script must be
REM run from inside a Visual Studio Command Prompt for the desired version of
REM Visual Studio ^(the initial platform configured for the command prompt does
REM not really matter^). Exactly one command line argument is required, the
REM name of an existing directory to be used as the final destination directory
REM for the generated output files, which will be placed in sub-directories
REM created therein. Ideally, the directory specified should be empty.
REM
REM Example:
REM
REM CD /D C:\dev\sqlite\core
REM CALL tool\build-all-msvc.bat C:\Temp
REM
REM In the example above, "C:\dev\sqlite\core" represents the root of the
REM source tree for SQLite and "C:\Temp" represents the final destination
REM directory for the generated output files.
REM
REM Please note that the SQLite build process performed by the Makefile
REM associated with this batch script requires a Tcl shell to be present
REM in a directory contained in the PATH environment variable unless a
REM pre-existing amalgamation file is used.
REM
REM There are several environment variables that may be set to modify the
REM behavior of this batch script and its associated Makefile. The list of
REM platforms to build may be overriden by using the PLATFORMS environment
REM variable, which should contain a list of platforms ^(e.g. x86 x86_amd64
REM x86_arm^). All platforms must be supported by the version of Visual Studio
REM being used. The list of configurations to build may be overridden by
REM setting the CONFIGURATIONS environment variable, which should contain a
REM list of configurations to build ^(e.g. Debug Retail^). Neither of these
REM variable values may contain any double quotes, surrounding or embedded.
REM
REM Finally, the NCRTLIBPATH, NUCRTLIBPATH, and NSDKLIBPATH environment
REM variables may be set to specify the location of the CRT, Universal CRT, and
REM Windows SDK, respectively, that may be needed to compile executables native
REM to the architecture of the build machine during any cross-compilation that
REM may be necessary, depending on the platforms to be built. These values in
REM these three variables should be surrounded by double quotes if they contain
REM spaces.
REM
REM There are a few other environment variables that impact the build process
REM when set ^(to anything^), they are:
REM
REM USE_AUTOCONF_MAKEFILE
REM
REM When set, the "autoconf" Makefile for MSVC will be used instead of the main
REM Makefile for MSVC. It must exist at "%ROOT%\autoconf\Makefile.msc".
REM
REM NOCLEAN
REM
REM When set, the "clean" target will not be used during each build iteration.
REM However, the target binaries, if any, will still be deleted manually prior
REM to being rebuilt. Setting this environment variable is only rarely needed
REM and could cause issues in some circumstances; therefore, setting it is not
REM recommended.
REM
REM NOSYMBOLS
REM
REM When set, copying of symbol files ^(*.pdb^) created during the build will
REM be skipped and they will not appear in the final destination directory.
REM Setting this environment variable is never strictly needed and could cause
REM issues in some circumstances; therefore, setting it is not recommended.
REM
REM NOMEMDEBUG
REM
REM When set, disables use of MEMDEBUG when building binaries for the "Debug"
REM configuration.
REM
REM BUILD_ALL_SHELL
REM
REM When set, the command line shell will be built for each selected platform
REM and configuration as well. In addition, the command line shell binaries
REM will be copied, with their symbols, to the final destination directory.
REM
REM USE_WINV63_NSDKLIBPATH
REM
REM When set, modifies how the NSDKLIBPATH environment variable is built, based
REM on the WindowsSdkDir environment variable. It forces this batch script to
REM assume the Windows 8.1 SDK location should be used.
REM
REM USE_WINV100_NSDKLIBPATH
REM
REM When set, modifies how the NSDKLIBPATH environment variable is built, based
REM on the WindowsSdkDir environment variable. It causes this batch script to
REM assume the Windows 10.0 SDK location should be used.
REM
REM NMAKE_ARGS
REM NMAKE_ARGS_DEBUG
REM NMAKE_ARGS_RETAIL
REM
REM When set, these values are expanded and passed to the NMAKE command line,
REM after its other arguments. These may be used to specify additional NMAKE
REM options, for example:
REM
REM SET NMAKE_ARGS=FOR_WINRT=1
REM SET NMAKE_ARGS_DEBUG=MEMDEBUG=1
REM SET NMAKE_ARGS_RETAIL=WIN32HEAP=1
REM
REM Using the above command before running this tool will cause the compiled
REM binaries to target the WinRT environment, which provides a subset of the
REM Win32 API.
REM
REM DLL_FILE_NAME
REM DLL_PDB_FILE_NAME
REM LIB_FILE_NAME
REM EXE_FILE_NAME
REM EXE_PDB_FILE_NAME
REM
REM When set, these values will override the associated target file name used
REM for the build.
REM
SETLOCAL
REM SET __ECHO=ECHO
REM SET __ECHO2=ECHO
REM SET __ECHO3=ECHO
IF NOT DEFINED _AECHO (SET _AECHO=REM)
IF NOT DEFINED _CECHO (SET _CECHO=REM)
IF NOT DEFINED _VECHO (SET _VECHO=REM)
SET REDIRECT=^>
IF DEFINED __ECHO SET REDIRECT=^^^>
%_AECHO% Running %0 %*
REM SET DFLAGS=/L
%_VECHO% DFlags = '%DFLAGS%'
SET FFLAGS=/V /F /G /H /I /R /Y /Z
%_VECHO% FFlags = '%FFLAGS%'
SET ROOT=%~dp0\..
SET ROOT=%ROOT:\\=\%
%_VECHO% Root = '%ROOT%'
REM
REM NOTE: The first and only argument to this batch file should be the output
REM directory where the platform-specific binary directories should be
REM created.
REM
SET BINARYDIRECTORY=%1
IF NOT DEFINED BINARYDIRECTORY (
GOTO usage
)
%_VECHO% BinaryDirectory = '%BINARYDIRECTORY%'
SET DUMMY=%2
IF DEFINED DUMMY (
GOTO usage
)
REM
REM NOTE: From this point, we need a clean error level. Reset it now.
REM
CALL :fn_ResetErrorLevel
REM
REM NOTE: Change the current directory to the root of the source tree, saving
REM the current directory on the directory stack.
REM
%__ECHO2% PUSHD "%ROOT%"
IF ERRORLEVEL 1 (
ECHO Could not change directory to "%ROOT%".
GOTO errors
)
REM
REM NOTE: This batch file requires the ComSpec environment variable to be set,
REM typically to something like "C:\Windows\System32\cmd.exe".
REM
IF NOT DEFINED ComSpec (
ECHO The ComSpec environment variable must be defined.
GOTO errors
)
REM
REM NOTE: This batch file requires the VcInstallDir environment variable to be
REM set. Tyipcally, this means this batch file needs to be run from an
REM MSVC command prompt.
REM
IF NOT DEFINED VCINSTALLDIR (
ECHO The VCINSTALLDIR environment variable must be defined.
GOTO errors
)
REM
REM NOTE: If the list of platforms is not already set, use the default list.
REM
IF NOT DEFINED PLATFORMS (
SET PLATFORMS=x86 x86_amd64 x86_arm
)
%_VECHO% Platforms = '%PLATFORMS%'
REM
REM NOTE: If the list of configurations is not already set, use the default
REM list.
REM
IF NOT DEFINED CONFIGURATIONS (
SET CONFIGURATIONS=Debug Retail
)
%_VECHO% Configurations = '%CONFIGURATIONS%'
REM
REM NOTE: If the command used to invoke NMAKE is not already set, use the
REM default.
REM
IF NOT DEFINED NMAKE_CMD (
IF DEFINED USE_AUTOCONF_MAKEFILE (
SET NMAKE_CMD=nmake -B -f autoconf\Makefile.msc
) ELSE (
SET NMAKE_CMD=nmake -B -f Makefile.msc
)
)
%_VECHO% NmakeCmd = '%NMAKE_CMD%'
%_VECHO% NmakeArgs = '%NMAKE_ARGS%'
%_VECHO% NmakeArgsDebug = '%NMAKE_ARGS_DEBUG%'
%_VECHO% NmakeArgsRetail = '%NMAKE_ARGS_RETAIL%'
REM
REM NOTE: Setup environment variables to translate between the MSVC platform
REM names and the names to be used for the platform-specific binary
REM directories.
REM
SET amd64_NAME=x64
SET arm_NAME=ARM
SET x64_NAME=x64
SET x86_NAME=x86
SET x86_amd64_NAME=x64
SET x86_arm_NAME=ARM
SET x86_x64_NAME=x64
%_VECHO% amd64_Name = '%amd64_NAME%'
%_VECHO% arm_Name = '%arm_NAME%'
%_VECHO% x64_Name = '%x64_NAME%'
%_VECHO% x86_Name = '%x86_NAME%'
%_VECHO% x86_amd64_Name = '%x86_amd64_NAME%'
%_VECHO% x86_arm_Name = '%x86_arm_NAME%'
%_VECHO% x86_x64_Name = '%x86_x64_NAME%'
REM
REM NOTE: Check for the external tools needed during the build process ^(i.e.
REM those that do not get compiled as part of the build process itself^)
REM along the PATH.
REM
IF DEFINED TCLSH_CMD (
SET TCLSH_FILE=%TCLSH_CMD%
) ELSE (
SET TCLSH_FILE=tclsh85.exe
)
FOR %%T IN (%TCLSH_FILE%) DO (
SET %%T_PATH=%%~dp$PATH:T
)
REM
REM NOTE: A Tcl shell executable is required during the SQLite build process
REM unless a pre-existing amalgamation file is used.
REM
IF NOT DEFINED %TCLSH_FILE%_PATH (
ECHO The Tcl shell executable "%TCLSH_FILE%" is required to be in the PATH.
GOTO errors
)
REM
REM NOTE: Setup the default names for the build targets we are creating. Any
REM ^(or all^) of these may end up being overridden.
REM
IF NOT DEFINED DLL_FILE_NAME (
SET DLL_FILE_NAME=sqlite3.dll
)
IF NOT DEFINED DLL_PDB_FILE_NAME (
SET DLL_PDB_FILE_NAME=sqlite3.pdb
)
IF NOT DEFINED LIB_FILE_NAME (
SET LIB_FILE_NAME=sqlite3.lib
)
IF NOT DEFINED EXE_FILE_NAME (
SET EXE_FILE_NAME=sqlite3.exe
)
IF NOT DEFINED EXE_PDB_FILE_NAME (
SET EXE_PDB_FILE_NAME=sqlite3sh.pdb
)
REM
REM NOTE: Set the TOOLPATH variable to contain all the directories where the
REM external tools were found in the search above.
REM
CALL :fn_CopyVariable %TCLSH_FILE%_PATH TOOLPATH
%_VECHO% ToolPath = '%TOOLPATH%'
REM
REM NOTE: Setting the Windows SDK library path is only required for MSVC
REM 2012, 2013, and 2015.
REM
CALL :fn_UnsetVariable SET_NSDKLIBPATH
REM
REM NOTE: Setting the Universal CRT library path is only required for MSVC
REM 2015.
REM
CALL :fn_UnsetVariable SET_NUCRTLIBPATH
REM
REM NOTE: Check for MSVC 2012, 2013, and 2015 specially because the Windows
REM SDK directory handling is slightly different for those versions.
REM
IF "%VisualStudioVersion%" == "11.0" (
REM
REM NOTE: If the Windows SDK library path has already been set, do not set
REM it to something else later on.
REM
IF NOT DEFINED NSDKLIBPATH (
SET SET_NSDKLIBPATH=1
)
) ELSE IF "%VisualStudioVersion%" == "12.0" (
REM
REM NOTE: If the Windows SDK library path has already been set, do not set
REM it to something else later on.
REM
IF NOT DEFINED NSDKLIBPATH (
SET SET_NSDKLIBPATH=1
)
) ELSE IF "%VisualStudioVersion%" == "14.0" (
REM
REM NOTE: If the Windows SDK library path has already been set, do not set
REM it to something else later on.
REM
IF NOT DEFINED NSDKLIBPATH (
SET SET_NSDKLIBPATH=1
)
REM
REM NOTE: If the Universal CRT library path has already been set, do not set
REM it to something else later on.
REM
IF NOT DEFINED NUCRTLIBPATH (
SET SET_NUCRTLIBPATH=1
)
)
REM
REM NOTE: This is the name of the sub-directory where the UCRT libraries may
REM be found. It is only used when compiling against the UCRT.
REM
IF DEFINED UCRTVersion (
SET NUCRTVER=%UCRTVersion%
) ELSE (
SET NUCRTVER=10.0.10586.0
)
REM
REM NOTE: This is the name of the sub-directory where the Windows 10.0 SDK
REM libraries may be found. It is only used when compiling with the
REM Windows 10.0 SDK.
REM
IF DEFINED WindowsSDKLibVersion (
SET WIN10SDKVER=%WindowsSDKLibVersion:\=%
) ELSE (
SET WIN10SDKVER=%NUCRTVER%
)
REM
REM NOTE: Check if this is the Windows Phone SDK. If so, a different batch
REM file is necessary to setup the build environment. Since the variable
REM values involved here may contain parenthesis, using GOTO instead of
REM an IF block is required.
REM
IF DEFINED WindowsPhoneKitDir GOTO set_vcvarsall_phone
SET VCVARSALL=%VCINSTALLDIR%\vcvarsall.bat
GOTO set_vcvarsall_done
:set_vcvarsall_phone
SET VCVARSALL=%VCINSTALLDIR%\WPSDK\WP80\vcvarsphoneall.bat
:set_vcvarsall_done
SET VCVARSALL=%VCVARSALL:\\=\%
REM
REM NOTE: This is the outer loop. There should be exactly one iteration per
REM platform.
REM
FOR %%P IN (%PLATFORMS%) DO (
REM
REM NOTE: Using the MSVC platform name, lookup the simpler platform name to
REM be used for the name of the platform-specific binary directory via
REM the environment variables setup earlier.
REM
CALL :fn_CopyVariable %%P_NAME PLATFORMNAME
REM
REM NOTE: This is the second loop. There should be exactly one iteration.
REM This loop is necessary because the PlatformName environment
REM variable was set above and that value is needed by some of the
REM commands contained in the inner loop. If these commands were
REM directly contained in the outer loop, the PlatformName environment
REM variable would be stuck with its initial empty value instead.
REM
FOR /F "tokens=2* delims==" %%D IN ('SET PLATFORMNAME') DO (
REM
REM NOTE: Attempt to clean the environment of all variables used by MSVC
REM and/or Visual Studio. This block may need to be updated in the
REM future to account for additional environment variables.
REM
CALL :fn_UnsetVariable CommandPromptType
CALL :fn_UnsetVariable DevEnvDir
CALL :fn_UnsetVariable DNX_HOME
CALL :fn_UnsetVariable ExtensionSdkDir
CALL :fn_UnsetVariable Framework35Version
CALL :fn_UnsetVariable Framework40Version
CALL :fn_UnsetVariable FrameworkDir
CALL :fn_UnsetVariable FrameworkDir32
CALL :fn_UnsetVariable FrameworkVersion
CALL :fn_UnsetVariable FrameworkVersion32
CALL :fn_UnsetVariable FSHARPINSTALLDIR
CALL :fn_UnsetVariable INCLUDE
CALL :fn_UnsetVariable LIB
CALL :fn_UnsetVariable LIBPATH
CALL :fn_UnsetVariable NETFXSDKDir
CALL :fn_UnsetVariable Platform
CALL :fn_UnsetVariable UCRTVersion
CALL :fn_UnsetVariable UniversalCRTSdkDir
REM CALL :fn_UnsetVariable VCINSTALLDIR
CALL :fn_UnsetVariable VSINSTALLDIR
CALL :fn_UnsetVariable WindowsLibPath
CALL :fn_UnsetVariable WindowsPhoneKitDir
CALL :fn_UnsetVariable WindowsSdkDir
CALL :fn_UnsetVariable WindowsSdkDir_35
CALL :fn_UnsetVariable WindowsSdkDir_old
CALL :fn_UnsetVariable WindowsSDKLibVersion
CALL :fn_UnsetVariable WindowsSDKVersion
CALL :fn_UnsetVariable WindowsSDK_ExecutablePath_x86
CALL :fn_UnsetVariable WindowsSDK_ExecutablePath_x64
REM
REM NOTE: Reset the PATH here to the absolute bare minimum required.
REM
SET PATH=%TOOLPATH%;%SystemRoot%\System32;%SystemRoot%
REM
REM NOTE: This is the inner loop. There are normally two iterations, one
REM for each supported build configuration, e.g. Debug or Retail.
REM
FOR %%B IN (%CONFIGURATIONS%) DO (
REM
REM NOTE: When preparing the debug build, set the DEBUG and MEMDEBUG
REM environment variables to be picked up by the MSVC makefile
REM itself.
REM
%_AECHO% Building the %%B configuration for platform %%P with name %%D...
IF /I "%%B" == "Debug" (
REM
REM NOTE: Using this level for the DEBUG environment variable should
REM disable all compiler optimizations and prevent use of the
REM NDEBUG define. Additionally, both SQLITE_ENABLE_API_ARMOR
REM and SQLITE_DEBUG defines should be enabled.
REM
SET DEBUG=3
REM
REM NOTE: Setting this to non-zero should enable the SQLITE_MEMDEBUG
REM define.
REM
IF NOT DEFINED NOMEMDEBUG (
SET MEMDEBUG=1
)
) ELSE (
CALL :fn_UnsetVariable DEBUG
CALL :fn_UnsetVariable MEMDEBUG
)
REM
REM NOTE: Copy the extra NMAKE arguments for this configuration into the
REM common variable used by the actual commands.
REM
CALL :fn_CopyVariable NMAKE_ARGS_%%B NMAKE_ARGS_CFG
REM
REM NOTE: Launch a nested command shell to perform the following steps:
REM
REM 1. Setup the MSVC environment for this platform using the
REM official batch file.
REM
REM 2. Make sure that no stale build output files are present.
REM
REM 3. Build the "sqlite3.dll" and "sqlite3.lib" binaries for this
REM platform.
REM
REM 4. Copy the "sqlite3.dll" and "sqlite3.lib" binaries for this
REM platform to the platform-specific directory beneath the
REM binary directory.
REM
REM 5. Unless prevented from doing so, copy the "sqlite3.pdb"
REM symbols file for this platform to the platform-specific
REM directory beneath the binary directory.
REM
"%ComSpec%" /C (
REM
REM NOTE: Attempt to setup the MSVC environment for this platform.
REM
%__ECHO3% CALL "%VCVARSALL%" %%P
IF ERRORLEVEL 1 (
ECHO Failed to call "%VCVARSALL%" for platform %%P.
GOTO errors
)
REM
REM NOTE: If this batch file is not running in "what-if" mode, check to
REM be sure we were actually able to setup the MSVC environment
REM as current versions of their official batch file do not set
REM the exit code upon failure.
REM
IF NOT DEFINED __ECHO3 (
IF NOT DEFINED WindowsPhoneKitDir (
IF NOT DEFINED WindowsSdkDir (
ECHO Cannot build, Windows SDK not found for platform %%P.
GOTO errors
)
)
)
REM
REM NOTE: When using MSVC 2012, 2013, or 2015, the native SDK path
REM cannot simply be the "lib" sub-directory beneath the location
REM specified in the WindowsSdkDir environment variable because
REM that location does not actually contain the necessary library
REM files for x86. This must be done for each iteration because
REM it relies upon the WindowsSdkDir environment variable being
REM set by the batch file used to setup the MSVC environment.
REM
IF DEFINED SET_NSDKLIBPATH (
REM
REM NOTE: The Windows Phone SDK has a slightly different directory
REM structure and must be handled specially here.
REM
IF DEFINED WindowsPhoneKitDir (
CALL :fn_CopyVariable WindowsPhoneKitDir NSDKLIBPATH
CALL :fn_AppendVariable NSDKLIBPATH \lib\x86
) ELSE IF DEFINED WindowsSdkDir (
CALL :fn_CopyVariable WindowsSdkDir NSDKLIBPATH
REM
REM NOTE: The Windows 8.x and Windows 10.0 SDKs have a slightly
REM different directory naming conventions.
REM
IF DEFINED USE_WINV100_NSDKLIBPATH (
CALL :fn_AppendVariable NSDKLIBPATH \..\10\lib\%WIN10SDKVER%\um\x86
CALL :fn_CopyVariable WindowsSdkDir PSDKLIBPATH
CALL :fn_AppendVariable PSDKLIBPATH lib\%WIN10SDKVER%\um\%%D
) ELSE IF DEFINED USE_WINV63_NSDKLIBPATH (
CALL :fn_AppendVariable NSDKLIBPATH \lib\winv6.3\um\x86
) ELSE IF "%VisualStudioVersion%" == "12.0" (
CALL :fn_AppendVariable NSDKLIBPATH \..\8.0\lib\win8\um\x86
) ELSE IF "%VisualStudioVersion%" == "14.0" (
CALL :fn_AppendVariable NSDKLIBPATH \..\8.0\lib\win8\um\x86
) ELSE (
CALL :fn_AppendVariable NSDKLIBPATH \lib\win8\um\x86
)
)
)
REM
REM NOTE: When using MSVC 2015, setting the Universal CRT library path
REM for x86 may be required as well. This must also be done for
REM each iteration because it relies upon the UniversalCRTSdkDir
REM environment variable being set by the batch file used to
REM setup the MSVC environment.
REM
IF DEFINED SET_NUCRTLIBPATH (
IF DEFINED UniversalCRTSdkDir (
CALL :fn_CopyVariable UniversalCRTSdkDir NUCRTLIBPATH
CALL :fn_AppendVariable NUCRTLIBPATH \lib\%NUCRTVER%\ucrt\x86
)
)
REM
REM NOTE: Unless prevented from doing so, invoke NMAKE with the MSVC
REM makefile to clean any stale build output from previous
REM iterations of this loop and/or previous runs of this batch
REM file, etc.
REM
IF NOT DEFINED NOCLEAN (
CALL :fn_MakeClean %%D
IF ERRORLEVEL 1 (
ECHO Failed to clean for platform %%P.
GOTO errors
)
) ELSE (
REM
REM NOTE: Even when the cleaning step has been disabled, we still
REM need to remove the build output for all the files we are
REM specifically wanting to build for each platform.
REM
%_AECHO% Cleaning final core library output files only...
%__ECHO% DEL /Q *.lo "%DLL_FILE_NAME%" "%LIB_FILE_NAME%" "%DLL_PDB_FILE_NAME%" 2%REDIRECT% NUL
)
REM
REM NOTE: Call NMAKE with the MSVC makefile to build the "sqlite3.dll"
REM binary. The x86 compiler will be used to compile the native
REM command line tools needed during the build process itself.
REM Also, disable looking for and/or linking to the native Tcl
REM runtime library.
REM
CALL :fn_MakeDll %%D
IF ERRORLEVEL 1 (
ECHO Failed to build %%B "%DLL_FILE_NAME%" for platform %%P.
GOTO errors
)
REM
REM NOTE: Copy the "sqlite3.dll" file to the appropriate directory for
REM the build and platform beneath the binary directory.
REM
%__ECHO% XCOPY "%DLL_FILE_NAME%" "%BINARYDIRECTORY%\%%B\%%D\" %FFLAGS% %DFLAGS%
IF ERRORLEVEL 1 (
ECHO Failed to copy "%DLL_FILE_NAME%" to "%BINARYDIRECTORY%\%%B\%%D\".
GOTO errors
)
REM
REM NOTE: Copy the "sqlite3.lib" file to the appropriate directory for
REM the build and platform beneath the binary directory.
REM
%__ECHO% XCOPY "%LIB_FILE_NAME%" "%BINARYDIRECTORY%\%%B\%%D\" %FFLAGS% %DFLAGS%
IF ERRORLEVEL 1 (
ECHO Failed to copy "%LIB_FILE_NAME%" to "%BINARYDIRECTORY%\%%B\%%D\".
GOTO errors
)
REM
REM NOTE: Copy the "sqlite3.pdb" file to the appropriate directory for
REM the build and platform beneath the binary directory unless we
REM are prevented from doing so.
REM
IF NOT DEFINED NOSYMBOLS (
%__ECHO% XCOPY "%DLL_PDB_FILE_NAME%" "%BINARYDIRECTORY%\%%B\%%D\" %FFLAGS% %DFLAGS%
IF ERRORLEVEL 1 (
ECHO Failed to copy "%DLL_PDB_FILE_NAME%" to "%BINARYDIRECTORY%\%%B\%%D\".
GOTO errors
)
)
REM
REM NOTE: If requested, also build the shell executable.
REM
IF DEFINED BUILD_ALL_SHELL (
REM
REM NOTE: If necessary, make sure any previous build output for the
REM shell executable is deleted.
REM
IF DEFINED NOCLEAN (
REM
REM NOTE: Even when the cleaning step has been disabled, we still
REM need to remove the build output for all the files we are
REM specifically wanting to build for each platform.
REM
%_AECHO% Cleaning final shell executable output files only...
%__ECHO% DEL /Q "%EXE_FILE_NAME%" "%EXE_PDB_FILE_NAME%" 2%REDIRECT% NUL
)
REM
REM NOTE: Call NMAKE with the MSVC makefile to build the "sqlite3.exe"
REM binary. The x86 compiler will be used to compile the native
REM command line tools needed during the build process itself.
REM Also, disable looking for and/or linking to the native Tcl
REM runtime library.
REM
CALL :fn_MakeExe %%D
IF ERRORLEVEL 1 (
ECHO Failed to build %%B "%EXE_FILE_NAME%" for platform %%P.
GOTO errors
)
REM
REM NOTE: Copy the "sqlite3.exe" file to the appropriate directory
REM for the build and platform beneath the binary directory.
REM
%__ECHO% XCOPY "%EXE_FILE_NAME%" "%BINARYDIRECTORY%\%%B\%%D\" %FFLAGS% %DFLAGS%
IF ERRORLEVEL 1 (
ECHO Failed to copy "%EXE_FILE_NAME%" to "%BINARYDIRECTORY%\%%B\%%D\".
GOTO errors
)
REM
REM NOTE: Copy the "sqlite3sh.pdb" file to the appropriate directory
REM for the build and platform beneath the binary directory
REM unless we are prevented from doing so.
REM
IF NOT DEFINED NOSYMBOLS (
%__ECHO% XCOPY "%EXE_PDB_FILE_NAME%" "%BINARYDIRECTORY%\%%B\%%D\" %FFLAGS% %DFLAGS%
IF ERRORLEVEL 1 (
ECHO Failed to copy "%EXE_PDB_FILE_NAME%" to "%BINARYDIRECTORY%\%%B\%%D\".
GOTO errors
)
)
)
)
)
)
REM
REM NOTE: Handle any errors generated during the nested command shell.
REM
IF ERRORLEVEL 1 (
GOTO errors
)
)
REM
REM NOTE: Restore the saved current directory from the directory stack.
REM
%__ECHO2% POPD
IF ERRORLEVEL 1 (
ECHO Could not restore directory.
GOTO errors
)
REM
REM NOTE: If we get to this point, we have succeeded.
REM
GOTO no_errors
:fn_MakeClean
%__ECHO% %NMAKE_CMD% clean "PLATFORM=%1" XCOMPILE=1 USE_NATIVE_LIBPATHS=1 NO_TCL=1 %NMAKE_ARGS% %NMAKE_ARGS_CFG%
GOTO :EOF
:fn_MakeDll
%__ECHO% %NMAKE_CMD% "%DLL_FILE_NAME%" "PLATFORM=%1" XCOMPILE=1 USE_NATIVE_LIBPATHS=1 NO_TCL=1 %NMAKE_ARGS% %NMAKE_ARGS_CFG%
GOTO :EOF
:fn_MakeExe
%__ECHO% %NMAKE_CMD% "%EXE_FILE_NAME%" "PLATFORM=%1" XCOMPILE=1 USE_NATIVE_LIBPATHS=1 NO_TCL=1 %NMAKE_ARGS% %NMAKE_ARGS_CFG%
GOTO :EOF
:fn_ShowVariable
SETLOCAL
SET __ECHO_CMD=ECHO %%%2%%
FOR /F "delims=" %%V IN ('%__ECHO_CMD%') DO (
IF NOT "%%V" == "" (
IF NOT "%%V" == "%%%2%%" (
%_VECHO% %1 = '%%V'
)
)
)
ENDLOCAL
GOTO :EOF
:fn_ResetErrorLevel
VERIFY > NUL
GOTO :EOF
:fn_SetErrorLevel
VERIFY MAYBE 2> NUL
GOTO :EOF
:fn_CopyVariable
IF NOT DEFINED %1 GOTO :EOF
IF "%2" == "" GOTO :EOF
SETLOCAL
SET __ECHO_CMD=ECHO %%%1%%
FOR /F "delims=" %%V IN ('%__ECHO_CMD%') DO (
SET VALUE=%%V
)
ENDLOCAL && SET %2=%VALUE%
GOTO :EOF
:fn_UnsetVariable
SETLOCAL
SET VALUE=%1
IF DEFINED VALUE (
SET VALUE=
ENDLOCAL
SET %VALUE%=
) ELSE (
ENDLOCAL
)
CALL :fn_ResetErrorLevel
GOTO :EOF
:fn_AppendVariable
SET __ECHO_CMD=ECHO %%%1%%
IF DEFINED %1 (
FOR /F "delims=" %%V IN ('%__ECHO_CMD%') DO (
SET %1=%%V%~2
)
) ELSE (
SET %1=%~2
)
SET __ECHO_CMD=
CALL :fn_ResetErrorLevel
GOTO :EOF
:usage
ECHO.
ECHO Usage: %~nx0 ^<binaryDirectory^>
ECHO.
GOTO errors
:errors
CALL :fn_SetErrorLevel
ENDLOCAL
ECHO.
ECHO Failure, errors were encountered.
GOTO end_of_file
:no_errors
CALL :fn_ResetErrorLevel
ENDLOCAL
ECHO.
ECHO Success, no errors were encountered.
GOTO end_of_file
:end_of_file
%__ECHO% EXIT /B %ERRORLEVEL%

View File

@ -1,22 +0,0 @@
#!/bin/sh
#
# This script demonstrates how to do a full-featured build of the sqlite3
# command-line shell on Linux.
#
# SQLite source code should be in a sibling directory named "sqlite". For
# example, put SQLite sources in ~/sqlite/sqlite and run this script from
# ~/sqlite/bld. There should be an appropriate Makefile in the current
# directory as well.
#
make sqlite3.c
gcc -o sqlite3 -g -Os -I. \
-DSQLITE_THREADSAFE=0 \
-DSQLITE_ENABLE_VFSTRACE \
-DSQLITE_ENABLE_STAT3 \
-DSQLITE_ENABLE_FTS4 \
-DSQLITE_ENABLE_RTREE \
-DHAVE_READLINE \
-DHAVE_USLEEP=1 \
../sqlite/src/shell.c \
../sqlite/src/test_vfstrace.c \
sqlite3.c -ldl -lreadline -lncurses

View File

@ -1,24 +0,0 @@
#!/usr/bin/tclsh
#
# A wrapper around cg_annotate that sets appropriate command-line options
# and rearranges the output so that annotated files occur in a consistent
# sorted order. Used by the run-speed-test.tcl script.
#
set in [open "|cg_annotate --show=Ir --auto=yes --context=40 $argv" r]
set dest !
set out(!) {}
while {![eof $in]} {
set line [string map {\t { }} [gets $in]]
if {[regexp {^-- Auto-annotated source: (.*)} $line all name]} {
set dest $name
} elseif {[regexp {^-- line \d+ ------} $line]} {
set line [lreplace $line 2 2 {#}]
} elseif {[regexp {^The following files chosen for } $line]} {
set dest !
}
append out($dest) $line\n
}
foreach x [lsort [array names out]] {
puts $out($x)
}

View File

@ -1,84 +0,0 @@
/*
** This program checks for formatting problems in source code:
**
** * Any use of tab characters
** * White space at the end of a line
** * Blank lines at the end of a file
**
** Any violations are reported.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define CR_OK 0x001
#define WSEOL_OK 0x002
static void checkSpacing(const char *zFile, unsigned flags){
FILE *in = fopen(zFile, "rb");
int i;
int seenSpace;
int seenTab;
int ln = 0;
int lastNonspace = 0;
char zLine[2000];
if( in==0 ){
printf("cannot open %s\n", zFile);
return;
}
while( fgets(zLine, sizeof(zLine), in) ){
seenSpace = 0;
seenTab = 0;
ln++;
for(i=0; zLine[i]; i++){
if( zLine[i]=='\t' && seenTab==0 ){
printf("%s:%d: tab (\\t) character\n", zFile, ln);
seenTab = 1;
}else if( zLine[i]=='\r' ){
if( (flags & CR_OK)==0 ){
printf("%s:%d: carriage-return (\\r) character\n", zFile, ln);
}
}else if( zLine[i]==' ' ){
seenSpace = 1;
}else if( zLine[i]!='\n' ){
lastNonspace = ln;
seenSpace = 0;
}
}
if( seenSpace && (flags & WSEOL_OK)==0 ){
printf("%s:%d: whitespace at end-of-line\n", zFile, ln);
}
}
fclose(in);
if( lastNonspace<ln ){
printf("%s:%d: blank lines at end of file (%d)\n",
zFile, ln, ln - lastNonspace);
}
}
int main(int argc, char **argv){
int i;
unsigned flags = WSEOL_OK;
for(i=1; i<argc; i++){
const char *z = argv[i];
if( z[0]=='-' ){
while( z[0]=='-' ) z++;
if( strcmp(z,"crok")==0 ){
flags |= CR_OK;
}else if( strcmp(z, "wseol")==0 ){
flags &= ~WSEOL_OK;
}else if( strcmp(z, "help")==0 ){
printf("Usage: %s [options] FILE ...\n", argv[0]);
printf(" --crok Do not report on carriage-returns\n");
printf(" --wseol Complain about whitespace at end-of-line\n");
printf(" --help This message\n");
}else{
printf("unknown command-line option: [%s]\n", argv[i]);
printf("use --help for additional information\n");
}
}else{
checkSpacing(argv[i], flags);
}
}
return 0;
}

View File

@ -1,46 +0,0 @@
/*
** Extract a range of bytes from a file.
**
** Usage:
**
** extract FILENAME OFFSET AMOUNT
**
** The bytes are written to standard output.
*/
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char **argv){
FILE *f;
char *zBuf;
int ofst;
int n;
size_t got;
if( argc!=4 ){
fprintf(stderr, "Usage: %s FILENAME OFFSET AMOUNT\n", *argv);
return 1;
}
f = fopen(argv[1], "rb");
if( f==0 ){
fprintf(stderr, "cannot open \"%s\"\n", argv[1]);
return 1;
}
ofst = atoi(argv[2]);
n = atoi(argv[3]);
zBuf = malloc( n );
if( zBuf==0 ){
fprintf(stderr, "out of memory\n");
return 1;
}
fseek(f, ofst, SEEK_SET);
got = fread(zBuf, 1, n, f);
fclose(f);
if( got<n ){
fprintf(stderr, "got only %d of %d bytes\n", got, n);
return 1;
}else{
fwrite(zBuf, 1, n, stdout);
}
return 0;
}

View File

@ -1,234 +0,0 @@
/*
** 2013-10-01
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
**
** This program implements a high-speed version of the VACUUM command.
** It repacks an SQLite database to remove as much unused space as
** possible and to relocate content sequentially in the file.
**
** This program runs faster and uses less temporary disk space than the
** built-in VACUUM command. On the other hand, this program has a number
** of important restrictions relative to the built-in VACUUM command.
**
** (1) The caller must ensure that no other processes are accessing the
** database file while the vacuum is taking place. The usual SQLite
** file locking is insufficient for this. The caller must use
** external means to make sure only this one routine is reading and
** writing the database.
**
** (2) Database reconfiguration such as page size or auto_vacuum changes
** are not supported by this utility.
**
** (3) The database file might be renamed if a power loss or crash
** occurs at just the wrong moment. Recovery must be prepared to
** to deal with the possibly changed filename.
**
** This program is intended as a *Demonstration Only*. The intent of this
** program is to provide example code that application developers can use
** when creating similar functionality in their applications.
**
** To compile this program:
**
** cc fast_vacuum.c sqlite3.c
**
** Add whatever linker options are required. (Example: "-ldl -lpthread").
** Then to run the program:
**
** ./a.out file-to-vacuum
**
*/
#include "sqlite3.h"
#include <stdio.h>
#include <stdlib.h>
/*
** Finalize a prepared statement. If an error has occurred, print the
** error message and exit.
*/
static void vacuumFinalize(sqlite3_stmt *pStmt){
sqlite3 *db = sqlite3_db_handle(pStmt);
int rc = sqlite3_finalize(pStmt);
if( rc ){
fprintf(stderr, "finalize error: %s\n", sqlite3_errmsg(db));
exit(1);
}
}
/*
** Execute zSql on database db. The SQL text is printed to standard
** output. If an error occurs, print an error message and exit the
** process.
*/
static void execSql(sqlite3 *db, const char *zSql){
sqlite3_stmt *pStmt;
if( !zSql ){
fprintf(stderr, "out of memory!\n");
exit(1);
}
printf("%s;\n", zSql);
if( SQLITE_OK!=sqlite3_prepare(db, zSql, -1, &pStmt, 0) ){
fprintf(stderr, "Error: %s\n", sqlite3_errmsg(db));
exit(1);
}
sqlite3_step(pStmt);
vacuumFinalize(pStmt);
}
/*
** Execute zSql on database db. The zSql statement returns exactly
** one column. Execute this return value as SQL on the same database.
**
** The zSql statement is printed on standard output prior to being
** run. If any errors occur, an error is printed and the process
** exits.
*/
static void execExecSql(sqlite3 *db, const char *zSql){
sqlite3_stmt *pStmt;
int rc;
printf("%s;\n", zSql);
rc = sqlite3_prepare(db, zSql, -1, &pStmt, 0);
if( rc!=SQLITE_OK ){
fprintf(stderr, "Error: %s\n", sqlite3_errmsg(db));
exit(1);
}
while( SQLITE_ROW==sqlite3_step(pStmt) ){
execSql(db, (char*)sqlite3_column_text(pStmt, 0));
}
vacuumFinalize(pStmt);
}
int main(int argc, char **argv){
sqlite3 *db; /* Connection to the database file */
int rc; /* Return code from SQLite interface calls */
sqlite3_uint64 r; /* A random number */
const char *zDbToVacuum; /* Database to be vacuumed */
char *zBackupDb; /* Backup copy of the original database */
char *zTempDb; /* Temporary database */
char *zSql; /* An SQL statement */
if( argc!=2 ){
fprintf(stderr, "Usage: %s DATABASE\n", argv[0]);
return 1;
}
/* Identify the database file to be vacuumed and open it.
*/
zDbToVacuum = argv[1];
printf("-- open database file \"%s\"\n", zDbToVacuum);
rc = sqlite3_open(zDbToVacuum, &db);
if( rc ){
fprintf(stderr, "%s: %s\n", zDbToVacuum, sqlite3_errstr(rc));
return 1;
}
/* Create names for two other files. zTempDb will be a new database
** into which we construct a vacuumed copy of zDbToVacuum. zBackupDb
** will be a new name for zDbToVacuum after it is vacuumed.
*/
sqlite3_randomness(sizeof(r), &r);
zTempDb = sqlite3_mprintf("%s-vacuum-%016llx", zDbToVacuum, r);
zBackupDb = sqlite3_mprintf("%s-backup-%016llx", zDbToVacuum, r);
/* Attach the zTempDb database to the database connection.
*/
zSql = sqlite3_mprintf("ATTACH '%q' AS vacuum_db;", zTempDb);
execSql(db, zSql);
sqlite3_free(zSql);
/* TODO:
** Set the page_size and auto_vacuum mode for zTempDb here, if desired.
*/
/* The vacuum will occur inside of a transaction. Set writable_schema
** to ON so that we can directly update the sqlite_master table in the
** zTempDb database.
*/
execSql(db, "PRAGMA writable_schema=ON");
execSql(db, "BEGIN");
/* Query the schema of the main database. Create a mirror schema
** in the temporary database.
*/
execExecSql(db,
"SELECT 'CREATE TABLE vacuum_db.' || substr(sql,14) "
" FROM sqlite_master WHERE type='table' AND name!='sqlite_sequence'"
" AND rootpage>0"
);
execExecSql(db,
"SELECT 'CREATE INDEX vacuum_db.' || substr(sql,14)"
" FROM sqlite_master WHERE sql LIKE 'CREATE INDEX %'"
);
execExecSql(db,
"SELECT 'CREATE UNIQUE INDEX vacuum_db.' || substr(sql,21) "
" FROM sqlite_master WHERE sql LIKE 'CREATE UNIQUE INDEX %'"
);
/* Loop through the tables in the main database. For each, do
** an "INSERT INTO vacuum_db.xxx SELECT * FROM main.xxx;" to copy
** the contents to the temporary database.
*/
execExecSql(db,
"SELECT 'INSERT INTO vacuum_db.' || quote(name) "
"|| ' SELECT * FROM main.' || quote(name) "
"FROM main.sqlite_master "
"WHERE type = 'table' AND name!='sqlite_sequence' "
" AND rootpage>0"
);
/* Copy over the sequence table
*/
execExecSql(db,
"SELECT 'DELETE FROM vacuum_db.' || quote(name) "
"FROM vacuum_db.sqlite_master WHERE name='sqlite_sequence'"
);
execExecSql(db,
"SELECT 'INSERT INTO vacuum_db.' || quote(name) "
"|| ' SELECT * FROM main.' || quote(name) "
"FROM vacuum_db.sqlite_master WHERE name=='sqlite_sequence'"
);
/* Copy the triggers, views, and virtual tables from the main database
** over to the temporary database. None of these objects has any
** associated storage, so all we have to do is copy their entries
** from the SQLITE_MASTER table.
*/
execSql(db,
"INSERT INTO vacuum_db.sqlite_master "
" SELECT type, name, tbl_name, rootpage, sql"
" FROM main.sqlite_master"
" WHERE type='view' OR type='trigger'"
" OR (type='table' AND rootpage=0)"
);
/* Commit the transaction and close the database
*/
execSql(db, "COMMIT");
printf("-- close database\n");
sqlite3_close(db);
/* At this point, zDbToVacuum is unchanged. zTempDb contains a
** vacuumed copy of zDbToVacuum. Rearrange filenames so that
** zTempDb becomes thenew zDbToVacuum.
*/
printf("-- rename \"%s\" to \"%s\"\n", zDbToVacuum, zBackupDb);
rename(zDbToVacuum, zBackupDb);
printf("-- rename \"%s\" to \"%s\"\n", zTempDb, zDbToVacuum);
rename(zTempDb, zDbToVacuum);
/* Release allocated memory */
sqlite3_free(zTempDb);
sqlite3_free(zBackupDb);
return 0;
}

View File

@ -1,149 +0,0 @@
# Run this TCL script using "testfixture" to get a report that shows
# the sequence of database pages used by a particular table or index.
# This information is used for fragmentation analysis.
#
# Get the name of the database to analyze
#
if {[llength $argv]!=2} {
puts stderr "Usage: $argv0 database-name table-or-index-name"
exit 1
}
set file_to_analyze [lindex $argv 0]
if {![file exists $file_to_analyze]} {
puts stderr "No such file: $file_to_analyze"
exit 1
}
if {![file readable $file_to_analyze]} {
puts stderr "File is not readable: $file_to_analyze"
exit 1
}
if {[file size $file_to_analyze]<512} {
puts stderr "Empty or malformed database: $file_to_analyze"
exit 1
}
set objname [lindex $argv 1]
# Open the database
#
sqlite3 db [lindex $argv 0]
set DB [btree_open [lindex $argv 0] 1000 0]
# This proc is a wrapper around the btree_cursor_info command. The
# second argument is an open btree cursor returned by [btree_cursor].
# The first argument is the name of an array variable that exists in
# the scope of the caller. If the third argument is non-zero, then
# info is returned for the page that lies $up entries upwards in the
# tree-structure. (i.e. $up==1 returns the parent page, $up==2 the
# grandparent etc.)
#
# The following entries in that array are filled in with information retrieved
# using [btree_cursor_info]:
#
# $arrayvar(page_no) = The page number
# $arrayvar(entry_no) = The entry number
# $arrayvar(page_entries) = Total number of entries on this page
# $arrayvar(cell_size) = Cell size (local payload + header)
# $arrayvar(page_freebytes) = Number of free bytes on this page
# $arrayvar(page_freeblocks) = Number of free blocks on the page
# $arrayvar(payload_bytes) = Total payload size (local + overflow)
# $arrayvar(header_bytes) = Header size in bytes
# $arrayvar(local_payload_bytes) = Local payload size
# $arrayvar(parent) = Parent page number
#
proc cursor_info {arrayvar csr {up 0}} {
upvar $arrayvar a
foreach [list a(page_no) \
a(entry_no) \
a(page_entries) \
a(cell_size) \
a(page_freebytes) \
a(page_freeblocks) \
a(payload_bytes) \
a(header_bytes) \
a(local_payload_bytes) \
a(parent) \
a(first_ovfl) ] [btree_cursor_info $csr $up] break
}
# Determine the page-size of the database. This global variable is used
# throughout the script.
#
set pageSize [db eval {PRAGMA page_size}]
# Find the root page of table or index to be analyzed. Also find out
# if the object is a table or an index.
#
if {$objname=="sqlite_master"} {
set rootpage 1
set type table
} else {
db eval {
SELECT rootpage, type FROM sqlite_master
WHERE name=$objname
} break
if {![info exists rootpage]} {
puts stderr "no such table or index: $objname"
exit 1
}
if {$type!="table" && $type!="index"} {
puts stderr "$objname is something other than a table or index"
exit 1
}
if {![string is integer -strict $rootpage]} {
puts stderr "invalid root page for $objname: $rootpage"
exit 1
}
}
# The cursor $csr is pointing to an entry. Print out information
# about the page that $up levels above that page that contains
# the entry. If $up==0 use the page that contains the entry.
#
# If information about the page has been printed already, then
# this is a no-op.
#
proc page_info {csr up} {
global seen
cursor_info ci $csr $up
set pg $ci(page_no)
if {[info exists seen($pg)]} return
set seen($pg) 1
# Do parent pages first
#
if {$ci(parent)} {
page_info $csr [expr {$up+1}]
}
# Find the depth of this page
#
set depth 1
set i $up
while {$ci(parent)} {
incr i
incr depth
cursor_info ci $csr $i
}
# print the results
#
puts [format {LEVEL %d: %6d} $depth $pg]
}
# Loop through the object and print out page numbers
#
set csr [btree_cursor $DB $rootpage 0]
for {btree_first $csr} {![btree_eof $csr]} {btree_next $csr} {
page_info $csr 0
set i 1
foreach pg [btree_ovfl_info $DB $csr] {
puts [format {OVFL %3d: %6d} $i $pg]
incr i
}
}
exit 0

View File

@ -1,867 +0,0 @@
/*
** 2015-04-17
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
**
** This is a utility program designed to aid running the SQLite library
** against an external fuzzer, such as American Fuzzy Lop (AFL)
** (http://lcamtuf.coredump.cx/afl/). Basically, this program reads
** SQL text from standard input and passes it through to SQLite for evaluation,
** just like the "sqlite3" command-line shell. Differences from the
** command-line shell:
**
** (1) The complex "dot-command" extensions are omitted. This
** prevents the fuzzer from discovering that it can run things
** like ".shell rm -rf ~"
**
** (2) The database is opened with the SQLITE_OPEN_MEMORY flag so that
** no disk I/O from the database is permitted. The ATTACH command
** with a filename still uses an in-memory database.
**
** (3) The main in-memory database can be initialized from a template
** disk database so that the fuzzer starts with a database containing
** content.
**
** (4) The eval() SQL function is added, allowing the fuzzer to do
** interesting recursive operations.
**
** (5) An error is raised if there is a memory leak.
**
** The input text can be divided into separate test cases using comments
** of the form:
**
** |****<...>****|
**
** where the "..." is arbitrary text. (Except the "|" should really be "/".
** "|" is used here to avoid compiler errors about nested comments.)
** A separate in-memory SQLite database is created to run each test case.
** This feature allows the "queue" of AFL to be captured into a single big
** file using a command like this:
**
** (for i in id:*; do echo '|****<'$i'>****|'; cat $i; done) >~/all-queue.txt
**
** (Once again, change the "|" to "/") Then all elements of the AFL queue
** can be run in a single go (for regression testing, for example) by typing:
**
** fuzzershell -f ~/all-queue.txt
**
** After running each chunk of SQL, the database connection is closed. The
** program aborts if the close fails or if there is any unfreed memory after
** the close.
**
** New test cases can be appended to all-queue.txt at any time. If redundant
** test cases are added, they can be eliminated by running:
**
** fuzzershell -f ~/all-queue.txt --unique-cases ~/unique-cases.txt
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdarg.h>
#include <ctype.h>
#include "sqlite3.h"
#define ISDIGIT(X) isdigit((unsigned char)(X))
/*
** All global variables are gathered into the "g" singleton.
*/
struct GlobalVars {
const char *zArgv0; /* Name of program */
sqlite3_mem_methods sOrigMem; /* Original memory methods */
sqlite3_mem_methods sOomMem; /* Memory methods with OOM simulator */
int iOomCntdown; /* Memory fails on 1 to 0 transition */
int nOomFault; /* Increments for each OOM fault */
int bOomOnce; /* Fail just once if true */
int bOomEnable; /* True to enable OOM simulation */
int nOomBrkpt; /* Number of calls to oomFault() */
char zTestName[100]; /* Name of current test */
} g;
/*
** Maximum number of iterations for an OOM test
*/
#ifndef OOM_MAX
# define OOM_MAX 625
#endif
/*
** This routine is called when a simulated OOM occurs. It exists as a
** convenient place to set a debugger breakpoint.
*/
static void oomFault(void){
g.nOomBrkpt++; /* Prevent oomFault() from being optimized out */
}
/* Versions of malloc() and realloc() that simulate OOM conditions */
static void *oomMalloc(int nByte){
if( nByte>0 && g.bOomEnable && g.iOomCntdown>0 ){
g.iOomCntdown--;
if( g.iOomCntdown==0 ){
if( g.nOomFault==0 ) oomFault();
g.nOomFault++;
if( !g.bOomOnce ) g.iOomCntdown = 1;
return 0;
}
}
return g.sOrigMem.xMalloc(nByte);
}
static void *oomRealloc(void *pOld, int nByte){
if( nByte>0 && g.bOomEnable && g.iOomCntdown>0 ){
g.iOomCntdown--;
if( g.iOomCntdown==0 ){
if( g.nOomFault==0 ) oomFault();
g.nOomFault++;
if( !g.bOomOnce ) g.iOomCntdown = 1;
return 0;
}
}
return g.sOrigMem.xRealloc(pOld, nByte);
}
/*
** Print an error message and abort in such a way to indicate to the
** fuzzer that this counts as a crash.
*/
static void abendError(const char *zFormat, ...){
va_list ap;
if( g.zTestName[0] ){
fprintf(stderr, "%s (%s): ", g.zArgv0, g.zTestName);
}else{
fprintf(stderr, "%s: ", g.zArgv0);
}
va_start(ap, zFormat);
vfprintf(stderr, zFormat, ap);
va_end(ap);
fprintf(stderr, "\n");
abort();
}
/*
** Print an error message and quit, but not in a way that would look
** like a crash.
*/
static void fatalError(const char *zFormat, ...){
va_list ap;
if( g.zTestName[0] ){
fprintf(stderr, "%s (%s): ", g.zArgv0, g.zTestName);
}else{
fprintf(stderr, "%s: ", g.zArgv0);
}
va_start(ap, zFormat);
vfprintf(stderr, zFormat, ap);
va_end(ap);
fprintf(stderr, "\n");
exit(1);
}
/*
** Evaluate some SQL. Abort if unable.
*/
static void sqlexec(sqlite3 *db, const char *zFormat, ...){
va_list ap;
char *zSql;
char *zErrMsg = 0;
int rc;
va_start(ap, zFormat);
zSql = sqlite3_vmprintf(zFormat, ap);
va_end(ap);
rc = sqlite3_exec(db, zSql, 0, 0, &zErrMsg);
if( rc ) abendError("failed sql [%s]: %s", zSql, zErrMsg);
sqlite3_free(zSql);
}
/*
** This callback is invoked by sqlite3_log().
*/
static void shellLog(void *pNotUsed, int iErrCode, const char *zMsg){
printf("LOG: (%d) %s\n", iErrCode, zMsg);
fflush(stdout);
}
static void shellLogNoop(void *pNotUsed, int iErrCode, const char *zMsg){
return;
}
/*
** This callback is invoked by sqlite3_exec() to return query results.
*/
static int execCallback(void *NotUsed, int argc, char **argv, char **colv){
int i;
static unsigned cnt = 0;
printf("ROW #%u:\n", ++cnt);
for(i=0; i<argc; i++){
printf(" %s=", colv[i]);
if( argv[i] ){
printf("[%s]\n", argv[i]);
}else{
printf("NULL\n");
}
}
fflush(stdout);
return 0;
}
static int execNoop(void *NotUsed, int argc, char **argv, char **colv){
return 0;
}
#ifndef SQLITE_OMIT_TRACE
/*
** This callback is invoked by sqlite3_trace() as each SQL statement
** starts.
*/
static void traceCallback(void *NotUsed, const char *zMsg){
printf("TRACE: %s\n", zMsg);
fflush(stdout);
}
static void traceNoop(void *NotUsed, const char *zMsg){
return;
}
#endif
/***************************************************************************
** eval() implementation copied from ../ext/misc/eval.c
*/
/*
** Structure used to accumulate the output
*/
struct EvalResult {
char *z; /* Accumulated output */
const char *zSep; /* Separator */
int szSep; /* Size of the separator string */
sqlite3_int64 nAlloc; /* Number of bytes allocated for z[] */
sqlite3_int64 nUsed; /* Number of bytes of z[] actually used */
};
/*
** Callback from sqlite_exec() for the eval() function.
*/
static int callback(void *pCtx, int argc, char **argv, char **colnames){
struct EvalResult *p = (struct EvalResult*)pCtx;
int i;
for(i=0; i<argc; i++){
const char *z = argv[i] ? argv[i] : "";
size_t sz = strlen(z);
if( (sqlite3_int64)sz+p->nUsed+p->szSep+1 > p->nAlloc ){
char *zNew;
p->nAlloc = p->nAlloc*2 + sz + p->szSep + 1;
/* Using sqlite3_realloc64() would be better, but it is a recent
** addition and will cause a segfault if loaded by an older version
** of SQLite. */
zNew = p->nAlloc<=0x7fffffff ? sqlite3_realloc(p->z, (int)p->nAlloc) : 0;
if( zNew==0 ){
sqlite3_free(p->z);
memset(p, 0, sizeof(*p));
return 1;
}
p->z = zNew;
}
if( p->nUsed>0 ){
memcpy(&p->z[p->nUsed], p->zSep, p->szSep);
p->nUsed += p->szSep;
}
memcpy(&p->z[p->nUsed], z, sz);
p->nUsed += sz;
}
return 0;
}
/*
** Implementation of the eval(X) and eval(X,Y) SQL functions.
**
** Evaluate the SQL text in X. Return the results, using string
** Y as the separator. If Y is omitted, use a single space character.
*/
static void sqlEvalFunc(
sqlite3_context *context,
int argc,
sqlite3_value **argv
){
const char *zSql;
sqlite3 *db;
char *zErr = 0;
int rc;
struct EvalResult x;
memset(&x, 0, sizeof(x));
x.zSep = " ";
zSql = (const char*)sqlite3_value_text(argv[0]);
if( zSql==0 ) return;
if( argc>1 ){
x.zSep = (const char*)sqlite3_value_text(argv[1]);
if( x.zSep==0 ) return;
}
x.szSep = (int)strlen(x.zSep);
db = sqlite3_context_db_handle(context);
rc = sqlite3_exec(db, zSql, callback, &x, &zErr);
if( rc!=SQLITE_OK ){
sqlite3_result_error(context, zErr, -1);
sqlite3_free(zErr);
}else if( x.zSep==0 ){
sqlite3_result_error_nomem(context);
sqlite3_free(x.z);
}else{
sqlite3_result_text(context, x.z, (int)x.nUsed, sqlite3_free);
}
}
/* End of the eval() implementation
******************************************************************************/
/*
** Print sketchy documentation for this utility program
*/
static void showHelp(void){
printf("Usage: %s [options] ?FILE...?\n", g.zArgv0);
printf(
"Read SQL text from FILE... (or from standard input if FILE... is omitted)\n"
"and then evaluate each block of SQL contained therein.\n"
"Options:\n"
" --autovacuum Enable AUTOVACUUM mode\n"
" --database FILE Use database FILE instead of an in-memory database\n"
" --disable-lookaside Turn off lookaside memory\n"
" --heap SZ MIN Memory allocator uses SZ bytes & min allocation MIN\n"
" --help Show this help text\n"
" --lookaside N SZ Configure lookaside for N slots of SZ bytes each\n"
" --oom Run each test multiple times in a simulated OOM loop\n"
" --pagesize N Set the page size to N\n"
" --pcache N SZ Configure N pages of pagecache each of size SZ bytes\n"
" -q Reduced output\n"
" --quiet Reduced output\n"
" --scratch N SZ Configure scratch memory for N slots of SZ bytes each\n"
" --unique-cases FILE Write all unique test cases to FILE\n"
" --utf16be Set text encoding to UTF-16BE\n"
" --utf16le Set text encoding to UTF-16LE\n"
" -v Increased output\n"
" --verbose Increased output\n"
);
}
/*
** Return the value of a hexadecimal digit. Return -1 if the input
** is not a hex digit.
*/
static int hexDigitValue(char c){
if( c>='0' && c<='9' ) return c - '0';
if( c>='a' && c<='f' ) return c - 'a' + 10;
if( c>='A' && c<='F' ) return c - 'A' + 10;
return -1;
}
/*
** Interpret zArg as an integer value, possibly with suffixes.
*/
static int integerValue(const char *zArg){
sqlite3_int64 v = 0;
static const struct { char *zSuffix; int iMult; } aMult[] = {
{ "KiB", 1024 },
{ "MiB", 1024*1024 },
{ "GiB", 1024*1024*1024 },
{ "KB", 1000 },
{ "MB", 1000000 },
{ "GB", 1000000000 },
{ "K", 1000 },
{ "M", 1000000 },
{ "G", 1000000000 },
};
int i;
int isNeg = 0;
if( zArg[0]=='-' ){
isNeg = 1;
zArg++;
}else if( zArg[0]=='+' ){
zArg++;
}
if( zArg[0]=='0' && zArg[1]=='x' ){
int x;
zArg += 2;
while( (x = hexDigitValue(zArg[0]))>=0 ){
v = (v<<4) + x;
zArg++;
}
}else{
while( ISDIGIT(zArg[0]) ){
v = v*10 + zArg[0] - '0';
zArg++;
}
}
for(i=0; i<sizeof(aMult)/sizeof(aMult[0]); i++){
if( sqlite3_stricmp(aMult[i].zSuffix, zArg)==0 ){
v *= aMult[i].iMult;
break;
}
}
if( v>0x7fffffff ) abendError("parameter too large - max 2147483648");
return (int)(isNeg? -v : v);
}
/* Return the current wall-clock time */
static sqlite3_int64 timeOfDay(void){
static sqlite3_vfs *clockVfs = 0;
sqlite3_int64 t;
if( clockVfs==0 ) clockVfs = sqlite3_vfs_find(0);
if( clockVfs->iVersion>=1 && clockVfs->xCurrentTimeInt64!=0 ){
clockVfs->xCurrentTimeInt64(clockVfs, &t);
}else{
double r;
clockVfs->xCurrentTime(clockVfs, &r);
t = (sqlite3_int64)(r*86400000.0);
}
return t;
}
int main(int argc, char **argv){
char *zIn = 0; /* Input text */
int nAlloc = 0; /* Number of bytes allocated for zIn[] */
int nIn = 0; /* Number of bytes of zIn[] used */
size_t got; /* Bytes read from input */
int rc = SQLITE_OK; /* Result codes from API functions */
int i; /* Loop counter */
int iNext; /* Next block of SQL */
sqlite3 *db; /* Open database */
char *zErrMsg = 0; /* Error message returned from sqlite3_exec() */
const char *zEncoding = 0; /* --utf16be or --utf16le */
int nHeap = 0, mnHeap = 0; /* Heap size from --heap */
int nLook = 0, szLook = 0; /* --lookaside configuration */
int nPCache = 0, szPCache = 0;/* --pcache configuration */
int nScratch = 0, szScratch=0;/* --scratch configuration */
int pageSize = 0; /* Desired page size. 0 means default */
void *pHeap = 0; /* Allocated heap space */
void *pLook = 0; /* Allocated lookaside space */
void *pPCache = 0; /* Allocated storage for pcache */
void *pScratch = 0; /* Allocated storage for scratch */
int doAutovac = 0; /* True for --autovacuum */
char *zSql; /* SQL to run */
char *zToFree = 0; /* Call sqlite3_free() on this afte running zSql */
int verboseFlag = 0; /* --verbose or -v flag */
int quietFlag = 0; /* --quiet or -q flag */
int nTest = 0; /* Number of test cases run */
int multiTest = 0; /* True if there will be multiple test cases */
int lastPct = -1; /* Previous percentage done output */
sqlite3 *dataDb = 0; /* Database holding compacted input data */
sqlite3_stmt *pStmt = 0; /* Statement to insert testcase into dataDb */
const char *zDataOut = 0; /* Write compacted data to this output file */
int nHeader = 0; /* Bytes of header comment text on input file */
int oomFlag = 0; /* --oom */
int oomCnt = 0; /* Counter for the OOM loop */
char zErrBuf[200]; /* Space for the error message */
const char *zFailCode; /* Value of the TEST_FAILURE environment var */
const char *zPrompt; /* Initial prompt when large-file fuzzing */
int nInFile = 0; /* Number of input files to read */
char **azInFile = 0; /* Array of input file names */
int jj; /* Loop counter for azInFile[] */
sqlite3_int64 iBegin; /* Start time for the whole program */
sqlite3_int64 iStart, iEnd; /* Start and end-times for a test case */
const char *zDbName = 0; /* Name of an on-disk database file to open */
iBegin = timeOfDay();
sqlite3_shutdown();
zFailCode = getenv("TEST_FAILURE");
g.zArgv0 = argv[0];
zPrompt = "<stdin>";
for(i=1; i<argc; i++){
const char *z = argv[i];
if( z[0]=='-' ){
z++;
if( z[0]=='-' ) z++;
if( strcmp(z,"autovacuum")==0 ){
doAutovac = 1;
}else
if( strcmp(z,"database")==0 ){
if( i>=argc-1 ) abendError("missing argument on %s\n", argv[i]);
zDbName = argv[i+1];
i += 1;
}else
if( strcmp(z,"disable-lookaside")==0 ){
nLook = 1;
szLook = 0;
}else
if( strcmp(z, "f")==0 && i+1<argc ){
i++;
goto addNewInFile;
}else
if( strcmp(z,"heap")==0 ){
if( i>=argc-2 ) abendError("missing arguments on %s\n", argv[i]);
nHeap = integerValue(argv[i+1]);
mnHeap = integerValue(argv[i+2]);
i += 2;
}else
if( strcmp(z,"help")==0 ){
showHelp();
return 0;
}else
if( strcmp(z,"lookaside")==0 ){
if( i>=argc-2 ) abendError("missing arguments on %s", argv[i]);
nLook = integerValue(argv[i+1]);
szLook = integerValue(argv[i+2]);
i += 2;
}else
if( strcmp(z,"oom")==0 ){
oomFlag = 1;
}else
if( strcmp(z,"pagesize")==0 ){
if( i>=argc-1 ) abendError("missing argument on %s", argv[i]);
pageSize = integerValue(argv[++i]);
}else
if( strcmp(z,"pcache")==0 ){
if( i>=argc-2 ) abendError("missing arguments on %s", argv[i]);
nPCache = integerValue(argv[i+1]);
szPCache = integerValue(argv[i+2]);
i += 2;
}else
if( strcmp(z,"quiet")==0 || strcmp(z,"q")==0 ){
quietFlag = 1;
verboseFlag = 0;
}else
if( strcmp(z,"scratch")==0 ){
if( i>=argc-2 ) abendError("missing arguments on %s", argv[i]);
nScratch = integerValue(argv[i+1]);
szScratch = integerValue(argv[i+2]);
i += 2;
}else
if( strcmp(z, "unique-cases")==0 ){
if( i>=argc-1 ) abendError("missing arguments on %s", argv[i]);
if( zDataOut ) abendError("only one --minimize allowed");
zDataOut = argv[++i];
}else
if( strcmp(z,"utf16le")==0 ){
zEncoding = "utf16le";
}else
if( strcmp(z,"utf16be")==0 ){
zEncoding = "utf16be";
}else
if( strcmp(z,"verbose")==0 || strcmp(z,"v")==0 ){
quietFlag = 0;
verboseFlag = 1;
}else
{
abendError("unknown option: %s", argv[i]);
}
}else{
addNewInFile:
nInFile++;
azInFile = realloc(azInFile, sizeof(azInFile[0])*nInFile);
if( azInFile==0 ) abendError("out of memory");
azInFile[nInFile-1] = argv[i];
}
}
/* Do global SQLite initialization */
sqlite3_config(SQLITE_CONFIG_LOG, verboseFlag ? shellLog : shellLogNoop, 0);
if( nHeap>0 ){
pHeap = malloc( nHeap );
if( pHeap==0 ) fatalError("cannot allocate %d-byte heap\n", nHeap);
rc = sqlite3_config(SQLITE_CONFIG_HEAP, pHeap, nHeap, mnHeap);
if( rc ) abendError("heap configuration failed: %d\n", rc);
}
if( oomFlag ){
sqlite3_config(SQLITE_CONFIG_GETMALLOC, &g.sOrigMem);
g.sOomMem = g.sOrigMem;
g.sOomMem.xMalloc = oomMalloc;
g.sOomMem.xRealloc = oomRealloc;
sqlite3_config(SQLITE_CONFIG_MALLOC, &g.sOomMem);
}
if( nLook>0 ){
sqlite3_config(SQLITE_CONFIG_LOOKASIDE, 0, 0);
if( szLook>0 ){
pLook = malloc( nLook*szLook );
if( pLook==0 ) fatalError("out of memory");
}
}
if( nScratch>0 && szScratch>0 ){
pScratch = malloc( nScratch*(sqlite3_int64)szScratch );
if( pScratch==0 ) fatalError("cannot allocate %lld-byte scratch",
nScratch*(sqlite3_int64)szScratch);
rc = sqlite3_config(SQLITE_CONFIG_SCRATCH, pScratch, szScratch, nScratch);
if( rc ) abendError("scratch configuration failed: %d\n", rc);
}
if( nPCache>0 && szPCache>0 ){
pPCache = malloc( nPCache*(sqlite3_int64)szPCache );
if( pPCache==0 ) fatalError("cannot allocate %lld-byte pcache",
nPCache*(sqlite3_int64)szPCache);
rc = sqlite3_config(SQLITE_CONFIG_PAGECACHE, pPCache, szPCache, nPCache);
if( rc ) abendError("pcache configuration failed: %d", rc);
}
/* If the --unique-cases option was supplied, open the database that will
** be used to gather unique test cases.
*/
if( zDataOut ){
rc = sqlite3_open(":memory:", &dataDb);
if( rc ) abendError("cannot open :memory: database");
rc = sqlite3_exec(dataDb,
"CREATE TABLE testcase(sql BLOB PRIMARY KEY, tm) WITHOUT ROWID;",0,0,0);
if( rc ) abendError("%s", sqlite3_errmsg(dataDb));
rc = sqlite3_prepare_v2(dataDb,
"INSERT OR IGNORE INTO testcase(sql,tm)VALUES(?1,?2)",
-1, &pStmt, 0);
if( rc ) abendError("%s", sqlite3_errmsg(dataDb));
}
/* Initialize the input buffer used to hold SQL text */
if( nInFile==0 ) nInFile = 1;
nAlloc = 1000;
zIn = malloc(nAlloc);
if( zIn==0 ) fatalError("out of memory");
/* Loop over all input files */
for(jj=0; jj<nInFile; jj++){
/* Read the complete content of the next input file into zIn[] */
FILE *in;
if( azInFile ){
int j, k;
in = fopen(azInFile[jj],"rb");
if( in==0 ){
abendError("cannot open %s for reading", azInFile[jj]);
}
zPrompt = azInFile[jj];
for(j=k=0; zPrompt[j]; j++) if( zPrompt[j]=='/' ) k = j+1;
zPrompt += k;
}else{
in = stdin;
zPrompt = "<stdin>";
}
while( !feof(in) ){
got = fread(zIn+nIn, 1, nAlloc-nIn-1, in);
nIn += (int)got;
zIn[nIn] = 0;
if( got==0 ) break;
if( nAlloc - nIn - 1 < 100 ){
nAlloc += nAlloc+1000;
zIn = realloc(zIn, nAlloc);
if( zIn==0 ) fatalError("out of memory");
}
}
if( in!=stdin ) fclose(in);
lastPct = -1;
/* Skip initial lines of the input file that begin with "#" */
for(i=0; i<nIn; i=iNext+1){
if( zIn[i]!='#' ) break;
for(iNext=i+1; iNext<nIn && zIn[iNext]!='\n'; iNext++){}
}
nHeader = i;
/* Process all test cases contained within the input file.
*/
for(; i<nIn; i=iNext, nTest++, g.zTestName[0]=0){
char cSaved;
if( strncmp(&zIn[i], "/****<",6)==0 ){
char *z = strstr(&zIn[i], ">****/");
if( z ){
z += 6;
sqlite3_snprintf(sizeof(g.zTestName), g.zTestName, "%.*s",
(int)(z-&zIn[i]) - 12, &zIn[i+6]);
if( verboseFlag ){
printf("%.*s\n", (int)(z-&zIn[i]), &zIn[i]);
fflush(stdout);
}
i += (int)(z-&zIn[i]);
multiTest = 1;
}
}
for(iNext=i; iNext<nIn && strncmp(&zIn[iNext],"/****<",6)!=0; iNext++){}
cSaved = zIn[iNext];
zIn[iNext] = 0;
/* Print out the SQL of the next test case is --verbose is enabled
*/
zSql = &zIn[i];
if( verboseFlag ){
printf("INPUT (offset: %d, size: %d): [%s]\n",
i, (int)strlen(&zIn[i]), &zIn[i]);
}else if( multiTest && !quietFlag ){
if( oomFlag ){
printf("%s\n", g.zTestName);
}else{
int pct = (10*iNext)/nIn;
if( pct!=lastPct ){
if( lastPct<0 ) printf("%s:", zPrompt);
printf(" %d%%", pct*10);
lastPct = pct;
}
}
}else if( nInFile>1 ){
printf("%s\n", zPrompt);
}
fflush(stdout);
/* Run the next test case. Run it multiple times in --oom mode
*/
if( oomFlag ){
oomCnt = g.iOomCntdown = 1;
g.nOomFault = 0;
g.bOomOnce = 1;
if( verboseFlag ){
printf("Once.%d\n", oomCnt);
fflush(stdout);
}
}else{
oomCnt = 0;
}
do{
if( zDbName ){
rc = sqlite3_open_v2(zDbName, &db, SQLITE_OPEN_READWRITE, 0);
if( rc!=SQLITE_OK ){
abendError("Cannot open database file %s", zDbName);
}
}else{
rc = sqlite3_open_v2(
"main.db", &db,
SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | SQLITE_OPEN_MEMORY,
0);
if( rc!=SQLITE_OK ){
abendError("Unable to open the in-memory database");
}
}
if( pLook ){
rc = sqlite3_db_config(db, SQLITE_DBCONFIG_LOOKASIDE,pLook,szLook,nLook);
if( rc!=SQLITE_OK ) abendError("lookaside configuration filed: %d", rc);
}
#ifndef SQLITE_OMIT_TRACE
sqlite3_trace(db, verboseFlag ? traceCallback : traceNoop, 0);
#endif
sqlite3_create_function(db, "eval", 1, SQLITE_UTF8, 0, sqlEvalFunc, 0, 0);
sqlite3_create_function(db, "eval", 2, SQLITE_UTF8, 0, sqlEvalFunc, 0, 0);
sqlite3_limit(db, SQLITE_LIMIT_LENGTH, 1000000);
if( zEncoding ) sqlexec(db, "PRAGMA encoding=%s", zEncoding);
if( pageSize ) sqlexec(db, "PRAGMA pagesize=%d", pageSize);
if( doAutovac ) sqlexec(db, "PRAGMA auto_vacuum=FULL");
iStart = timeOfDay();
g.bOomEnable = 1;
if( verboseFlag ){
zErrMsg = 0;
rc = sqlite3_exec(db, zSql, execCallback, 0, &zErrMsg);
if( zErrMsg ){
sqlite3_snprintf(sizeof(zErrBuf),zErrBuf,"%z", zErrMsg);
zErrMsg = 0;
}
}else {
rc = sqlite3_exec(db, zSql, execNoop, 0, 0);
}
g.bOomEnable = 0;
iEnd = timeOfDay();
rc = sqlite3_close(db);
if( rc ){
abendError("sqlite3_close() failed with rc=%d", rc);
}
if( !zDataOut && sqlite3_memory_used()>0 ){
abendError("memory in use after close: %lld bytes",sqlite3_memory_used());
}
if( oomFlag ){
/* Limit the number of iterations of the OOM loop to OOM_MAX. If the
** first pass (single failure) exceeds 2/3rds of OOM_MAX this skip the
** second pass (continuous failure after first) completely. */
if( g.nOomFault==0 || oomCnt>OOM_MAX ){
if( g.bOomOnce && oomCnt<=(OOM_MAX*2/3) ){
oomCnt = g.iOomCntdown = 1;
g.bOomOnce = 0;
}else{
oomCnt = 0;
}
}else{
g.iOomCntdown = ++oomCnt;
g.nOomFault = 0;
}
if( oomCnt ){
if( verboseFlag ){
printf("%s.%d\n", g.bOomOnce ? "Once" : "Multi", oomCnt);
fflush(stdout);
}
nTest++;
}
}
}while( oomCnt>0 );
/* Store unique test cases in the in the dataDb database if the
** --unique-cases flag is present
*/
if( zDataOut ){
sqlite3_bind_blob(pStmt, 1, &zIn[i], iNext-i, SQLITE_STATIC);
sqlite3_bind_int64(pStmt, 2, iEnd - iStart);
rc = sqlite3_step(pStmt);
if( rc!=SQLITE_DONE ) abendError("%s", sqlite3_errmsg(dataDb));
sqlite3_reset(pStmt);
}
/* Free the SQL from the current test case
*/
if( zToFree ){
sqlite3_free(zToFree);
zToFree = 0;
}
zIn[iNext] = cSaved;
/* Show test-case results in --verbose mode
*/
if( verboseFlag ){
printf("RESULT-CODE: %d\n", rc);
if( zErrMsg ){
printf("ERROR-MSG: [%s]\n", zErrBuf);
}
fflush(stdout);
}
/* Simulate an error if the TEST_FAILURE environment variable is "5".
** This is used to verify that automated test script really do spot
** errors that occur in this test program.
*/
if( zFailCode ){
if( zFailCode[0]=='5' && zFailCode[1]==0 ){
abendError("simulated failure");
}else if( zFailCode[0]!=0 ){
/* If TEST_FAILURE is something other than 5, just exit the test
** early */
printf("\nExit early due to TEST_FAILURE being set");
break;
}
}
}
if( !verboseFlag && multiTest && !quietFlag && !oomFlag ) printf("\n");
}
/* Report total number of tests run
*/
if( nTest>1 && !quietFlag ){
sqlite3_int64 iElapse = timeOfDay() - iBegin;
printf("%s: 0 errors out of %d tests in %d.%03d seconds\nSQLite %s %s\n",
g.zArgv0, nTest, (int)(iElapse/1000), (int)(iElapse%1000),
sqlite3_libversion(), sqlite3_sourceid());
}
/* Write the unique test cases if the --unique-cases flag was used
*/
if( zDataOut ){
int n = 0;
FILE *out = fopen(zDataOut, "wb");
if( out==0 ) abendError("cannot open %s for writing", zDataOut);
if( nHeader>0 ) fwrite(zIn, nHeader, 1, out);
sqlite3_finalize(pStmt);
rc = sqlite3_prepare_v2(dataDb, "SELECT sql, tm FROM testcase ORDER BY tm, sql",
-1, &pStmt, 0);
if( rc ) abendError("%s", sqlite3_errmsg(dataDb));
while( sqlite3_step(pStmt)==SQLITE_ROW ){
fprintf(out,"/****<%d:%dms>****/", ++n, sqlite3_column_int(pStmt,1));
fwrite(sqlite3_column_blob(pStmt,0),sqlite3_column_bytes(pStmt,0),1,out);
}
fclose(out);
sqlite3_finalize(pStmt);
sqlite3_close(dataDb);
}
/* Clean up and exit.
*/
free(azInFile);
free(zIn);
free(pHeap);
free(pLook);
free(pScratch);
free(pPCache);
return 0;
}

View File

@ -1,137 +0,0 @@
OVERVIEW
The SQLite library is capable of parsing SQL foreign key constraints
supplied as part of CREATE TABLE statements, but it does not actually
implement them. However, most of the features of foreign keys may be
implemented using SQL triggers, which SQLite does support. This text
file describes a feature of the SQLite shell tool (sqlite3) that
extracts foreign key definitions from an existing SQLite database and
creates the set of CREATE TRIGGER statements required to implement
the foreign key constraints.
CAPABILITIES
An SQL foreign key is a constraint that requires that each row in
the "child" table corresponds to a row in the "parent" table. For
example, the following schema:
CREATE TABLE parent(a, b, c, PRIMARY KEY(a, b));
CREATE TABLE child(d, e, f, FOREIGN KEY(d, e) REFERENCES parent(a, b));
implies that for each row in table "child", there must be a row in
"parent" for which the expression (child.d==parent.a AND child.e==parent.b)
is true. The columns in the parent table are required to be either the
primary key columns or subject to a UNIQUE constraint. There is no such
requirement for the columns of the child table.
At this time, all foreign keys are implemented as if they were
"MATCH NONE", even if the declaration specified "MATCH PARTIAL" or
"MATCH FULL". "MATCH NONE" means that if any of the key columns in
the child table are NULL, then there is no requirement for a corresponding
row in the parent table. So, taking this into account, the expression that
must be true for every row of the child table in the above example is
actually:
(child.d IS NULL) OR
(child.e IS NULL) OR
(child.d==parent.a AND child.e==parent.b)
Attempting to insert or update a row in the child table so that the
affected row violates this constraint results in an exception being
thrown.
The effect of attempting to delete or update a row in the parent table
so that the constraint becomes untrue for one or more rows in the child
table depends on the "ON DELETE" or "ON UPDATE" actions specified as
part of the foreign key definition, respectively. Three different actions
are supported: "RESTRICT" (the default), "CASCADE" and "SET NULL". SQLite
will also parse the "SET DEFAULT" action, but this is not implemented
and "RESTRICT" is used instead.
RESTRICT: Attempting to update or delete a row in the parent table so
that the constraint becomes untrue for one or more rows in
the child table is not allowed. An exception is thrown.
CASCADE: Instead of throwing an exception, all corresponding child table
rows are either deleted (if the parent row is being deleted)
or updated to match the new parent key values (if the parent
row is being updated).
SET NULL: Instead of throwing an exception, the foreign key fields of
all corresponding child table rows are set to NULL.
LIMITATIONS
Apart from those limitiations described above:
* Implicit mapping to composite primary keys is not supported. If
a parent table has a composite primary key, then any child table
that refers to it must explicitly map each column. For example, given
the following definition of table "parent":
CREATE TABLE parent(a, b, c, PRIMARY KEY(a, b));
only the first of the following two definitions of table "child"
is supported:
CREATE TABLE child(d, e, f, FOREIGN KEY(d, e) REFERENCES parent(a, b));
CREATE TABLE child(d, e, f, FOREIGN KEY(d, e) REFERENCES parent);
An implicit reference to a composite primary key is detected as an
error when the program is run (see below).
* SQLite does not support recursive triggers, and therefore this program
does not support recursive CASCADE or SET NULL foreign key
relationships. If the parent and the child tables of a CASCADE or
SET NULL foreign key are the same table, the generated triggers will
malfunction. This is also true if the recursive foreign key constraint
is indirect (for example if table A references table B which references
table A with a CASCADE or SET NULL foreign key constraint).
Recursive CASCADE or SET NULL foreign key relationships are *not*
detected as errors when the program is run. Buyer beware.
USAGE
The functionality is accessed through an sqlite3 shell tool "dot-command":
.genfkey ?--no-drop? ?--ignore-errors? ?--exec?
When this command is run, it first checks the schema of the open SQLite
database for foreign key related errors or inconsistencies. For example,
a foreign key that refers to a parent table that does not exist, or
a foreign key that refers to columns in a parent table that are not
guaranteed to be unique. If such errors are found and the --ignore-errors
option was not present, a message for each one is printed to stderr and
no further processing takes place.
If errors are found and the --ignore-errors option is passed, then
no error messages are printed. No "CREATE TRIGGER" statements are generated
for foriegn-key definitions that contained errors, they are silently
ignored by subsequent processing.
All triggers generated by this command have names that match the pattern
"genfkey*". Unless the --no-drop option is specified, then the program
also generates a "DROP TRIGGER" statement for each trigger that exists
in the database with a name that matches this pattern. This allows the
program to be used to upgrade a database schema for which foreign key
triggers have already been installed (i.e. after new tables are created
or existing tables dropped).
Finally, a series of SQL trigger definitions (CREATE TRIGGER statements)
that implement the foreign key constraints found in the database schema are
generated.
If the --exec option was passed, then all generated SQL is immediately
executed on the database. Otherwise, the generated SQL strings are output
in the same way as the results of SELECT queries are. Normally, this means
they will be printed to stdout, but this can be configured using other
dot-commands (i.e. ".output").
The simplest way to activate the foriegn key definitions in a database
is simply to open it using the shell tool and enter the command
".genfkey --exec":
sqlite> .genfkey --exec

View File

@ -1,354 +0,0 @@
package require sqlite3
proc do_test {name cmd expected} {
puts -nonewline "$name ..."
set res [uplevel $cmd]
if {$res eq $expected} {
puts Ok
} else {
puts Error
puts " Got: $res"
puts " Expected: $expected"
exit
}
}
proc execsql {sql} {
uplevel [list db eval $sql]
}
proc catchsql {sql} {
set rc [catch {uplevel [list db eval $sql]} msg]
list $rc $msg
}
file delete -force test.db test.db.journal
sqlite3 db test.db
# The following tests - genfkey-1.* - test RESTRICT foreign keys.
#
do_test genfkey-1.1 {
execsql {
CREATE TABLE t1(a INTEGER PRIMARY KEY, b, c, UNIQUE(b, c));
CREATE TABLE t2(e REFERENCES t1, f);
CREATE TABLE t3(g, h, i, FOREIGN KEY (h, i) REFERENCES t1(b, c));
}
} {}
do_test genfkey-1.2 {
execsql [exec ./sqlite3 test.db .genfkey]
} {}
do_test genfkey-1.3 {
catchsql { INSERT INTO t2 VALUES(1, 2) }
} {1 {constraint failed}}
do_test genfkey-1.4 {
execsql {
INSERT INTO t1 VALUES(1, 2, 3);
INSERT INTO t2 VALUES(1, 2);
}
} {}
do_test genfkey-1.5 {
execsql { INSERT INTO t2 VALUES(NULL, 3) }
} {}
do_test genfkey-1.6 {
catchsql { UPDATE t2 SET e = 5 WHERE e IS NULL }
} {1 {constraint failed}}
do_test genfkey-1.7 {
execsql { UPDATE t2 SET e = 1 WHERE e IS NULL }
} {}
do_test genfkey-1.8 {
execsql { UPDATE t2 SET e = NULL WHERE f = 3 }
} {}
do_test genfkey-1.9 {
catchsql { UPDATE t1 SET a = 10 }
} {1 {constraint failed}}
do_test genfkey-1.9a {
catchsql { UPDATE t1 SET a = NULL }
} {1 {datatype mismatch}}
do_test genfkey-1.10 {
catchsql { DELETE FROM t1 }
} {1 {constraint failed}}
do_test genfkey-1.11 {
execsql { UPDATE t2 SET e = NULL }
} {}
do_test genfkey-1.12 {
execsql {
UPDATE t1 SET a = 10 ;
DELETE FROM t1;
DELETE FROM t2;
}
} {}
do_test genfkey-1.13 {
execsql {
INSERT INTO t3 VALUES(1, NULL, NULL);
INSERT INTO t3 VALUES(1, 2, NULL);
INSERT INTO t3 VALUES(1, NULL, 3);
}
} {}
do_test genfkey-1.14 {
catchsql { INSERT INTO t3 VALUES(3, 1, 4) }
} {1 {constraint failed}}
do_test genfkey-1.15 {
execsql {
INSERT INTO t1 VALUES(1, 1, 4);
INSERT INTO t3 VALUES(3, 1, 4);
}
} {}
do_test genfkey-1.16 {
catchsql { DELETE FROM t1 }
} {1 {constraint failed}}
do_test genfkey-1.17 {
catchsql { UPDATE t1 SET b = 10}
} {1 {constraint failed}}
do_test genfkey-1.18 {
execsql { UPDATE t1 SET a = 10}
} {}
do_test genfkey-1.19 {
catchsql { UPDATE t3 SET h = 'hello' WHERE i = 3}
} {1 {constraint failed}}
do_test genfkey-1.X {
execsql {
DROP TABLE t1;
DROP TABLE t2;
DROP TABLE t3;
}
} {}
# The following tests - genfkey-2.* - test CASCADE foreign keys.
#
do_test genfkey-2.1 {
execsql {
CREATE TABLE t1(a INTEGER PRIMARY KEY, b, c, UNIQUE(b, c));
CREATE TABLE t2(e REFERENCES t1 ON UPDATE CASCADE ON DELETE CASCADE, f);
CREATE TABLE t3(g, h, i,
FOREIGN KEY (h, i)
REFERENCES t1(b, c) ON UPDATE CASCADE ON DELETE CASCADE
);
}
} {}
do_test genfkey-2.2 {
execsql [exec ./sqlite3 test.db .genfkey]
} {}
do_test genfkey-2.3 {
execsql {
INSERT INTO t1 VALUES(1, 2, 3);
INSERT INTO t1 VALUES(4, 5, 6);
INSERT INTO t2 VALUES(1, 'one');
INSERT INTO t2 VALUES(4, 'four');
}
} {}
do_test genfkey-2.4 {
execsql {
UPDATE t1 SET a = 2 WHERE a = 1;
SELECT * FROM t2;
}
} {2 one 4 four}
do_test genfkey-2.5 {
execsql {
DELETE FROM t1 WHERE a = 4;
SELECT * FROM t2;
}
} {2 one}
do_test genfkey-2.6 {
execsql {
INSERT INTO t3 VALUES('hello', 2, 3);
UPDATE t1 SET c = 2;
SELECT * FROM t3;
}
} {hello 2 2}
do_test genfkey-2.7 {
execsql {
DELETE FROM t1;
SELECT * FROM t3;
}
} {}
do_test genfkey-2.X {
execsql {
DROP TABLE t1;
DROP TABLE t2;
DROP TABLE t3;
}
} {}
# The following tests - genfkey-3.* - test SET NULL foreign keys.
#
do_test genfkey-3.1 {
execsql {
CREATE TABLE t1(a INTEGER PRIMARY KEY, b, c, UNIQUE(c, b));
CREATE TABLE t2(e REFERENCES t1 ON UPDATE SET NULL ON DELETE SET NULL, f);
CREATE TABLE t3(g, h, i,
FOREIGN KEY (h, i)
REFERENCES t1(b, c) ON UPDATE SET NULL ON DELETE SET NULL
);
}
} {}
do_test genfkey-3.2 {
execsql [exec ./sqlite3 test.db .genfkey]
} {}
do_test genfkey-3.3 {
execsql {
INSERT INTO t1 VALUES(1, 2, 3);
INSERT INTO t1 VALUES(4, 5, 6);
INSERT INTO t2 VALUES(1, 'one');
INSERT INTO t2 VALUES(4, 'four');
}
} {}
do_test genfkey-3.4 {
execsql {
UPDATE t1 SET a = 2 WHERE a = 1;
SELECT * FROM t2;
}
} {{} one 4 four}
do_test genfkey-3.5 {
execsql {
DELETE FROM t1 WHERE a = 4;
SELECT * FROM t2;
}
} {{} one {} four}
do_test genfkey-3.6 {
execsql {
INSERT INTO t3 VALUES('hello', 2, 3);
UPDATE t1 SET c = 2;
SELECT * FROM t3;
}
} {hello {} {}}
do_test genfkey-2.7 {
execsql {
UPDATE t3 SET h = 2, i = 2;
DELETE FROM t1;
SELECT * FROM t3;
}
} {hello {} {}}
do_test genfkey-3.X {
execsql {
DROP TABLE t1;
DROP TABLE t2;
DROP TABLE t3;
}
} {}
# The following tests - genfkey-4.* - test that errors in the schema
# are detected correctly.
#
do_test genfkey-4.1 {
execsql {
CREATE TABLE t1(a REFERENCES nosuchtable, b);
CREATE TABLE t2(a REFERENCES t1, b);
CREATE TABLE t3(a, b, c, PRIMARY KEY(a, b));
CREATE TABLE t4(a, b, c, FOREIGN KEY(c, b) REFERENCES t3);
CREATE TABLE t5(a REFERENCES t4(d), b, c);
CREATE TABLE t6(a REFERENCES t4(a), b, c);
CREATE TABLE t7(a REFERENCES t3(a), b, c);
CREATE TABLE t8(a REFERENCES nosuchtable(a), b, c);
}
} {}
do_test genfkey-4.X {
set rc [catch {exec ./sqlite3 test.db .genfkey} msg]
list $rc $msg
} "1 {[string trim {
Error in table t5: foreign key columns do not exist
Error in table t8: foreign key columns do not exist
Error in table t4: implicit mapping to composite primary key
Error in table t1: implicit mapping to non-existant primary key
Error in table t2: implicit mapping to non-existant primary key
Error in table t6: foreign key is not unique
Error in table t7: foreign key is not unique
}]}"
# Test that ticket #3800 has been resolved.
#
do_test genfkey-5.1 {
execsql {
DROP TABLE t1; DROP TABLE t2; DROP TABLE t3;
DROP TABLE t4; DROP TABLE t5; DROP TABLE t6;
DROP TABLE t7; DROP TABLE t8;
}
} {}
do_test genfkey-5.2 {
execsql {
CREATE TABLE "t.3" (c1 PRIMARY KEY);
CREATE TABLE t13 (c1, foreign key(c1) references "t.3"(c1));
}
} {}
do_test genfkey-5.3 {
set rc [catch {exec ./sqlite3 test.db .genfkey} msg]
} {0}
do_test genfkey-5.4 {
db eval $msg
} {}
do_test genfkey-5.5 {
catchsql { INSERT INTO t13 VALUES(1) }
} {1 {constraint failed}}
do_test genfkey-5.5 {
catchsql {
INSERT INTO "t.3" VALUES(1);
INSERT INTO t13 VALUES(1);
}
} {0 {}}
# Test also column names that require quoting.
do_test genfkey-6.1 {
execsql {
DROP TABLE "t.3";
DROP TABLE t13;
CREATE TABLE p(
"a.1 first", "b.2 second",
UNIQUE("a.1 first", "b.2 second")
);
CREATE TABLE c(
"c.1 I", "d.2 II",
FOREIGN KEY("c.1 I", "d.2 II")
REFERENCES p("a.1 first", "b.2 second")
ON UPDATE CASCADE ON DELETE CASCADE
);
}
} {}
do_test genfkey-6.2 {
set rc [catch {exec ./sqlite3 test.db .genfkey} msg]
} {0}
do_test genfkey-6.3 {
execsql $msg
execsql {
INSERT INTO p VALUES('A', 'B');
INSERT INTO p VALUES('C', 'D');
INSERT INTO c VALUES('A', 'B');
INSERT INTO c VALUES('C', 'D');
UPDATE p SET "a.1 first" = 'X' WHERE rowid = 1;
DELETE FROM p WHERE rowid = 2;
}
execsql { SELECT * FROM c }
} {X B}
do_test genfkey-6.4 {
execsql {
DROP TABLE p;
DROP TABLE c;
CREATE TABLE parent("a.1", PRIMARY KEY("a.1"));
CREATE TABLE child("b.2", FOREIGN KEY("b.2") REFERENCES parent("a.1"));
}
set rc [catch {exec ./sqlite3 test.db .genfkey} msg]
} {0}
do_test genfkey-6.5 {
execsql $msg
execsql {
INSERT INTO parent VALUES(1);
INSERT INTO child VALUES(1);
}
catchsql { UPDATE parent SET "a.1"=0 }
} {1 {constraint failed}}
do_test genfkey-6.6 {
catchsql { UPDATE child SET "b.2"=7 }
} {1 {constraint failed}}
do_test genfkey-6.7 {
execsql {
SELECT * FROM parent;
SELECT * FROM child;
}
} {1 1}

View File

@ -1,134 +0,0 @@
/*
** This utility program looks at an SQLite database and determines whether
** or not it is locked, the kind of lock, and who is holding this lock.
**
** This only works on unix when the posix advisory locking method is used
** (which is the default on unix) and when the PENDING_BYTE is in its
** usual place.
*/
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <fcntl.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
static void usage(const char *argv0){
fprintf(stderr, "Usage: %s database\n", argv0);
exit(1);
}
/* Check for a conflicting lock. If one is found, print an this
** on standard output using the format string given and return 1.
** If there are no conflicting locks, return 0.
*/
static int isLocked(
int h, /* File descriptor to check */
int type, /* F_RDLCK or F_WRLCK */
unsigned int iOfst, /* First byte of the lock */
unsigned int iCnt, /* Number of bytes in the lock range */
const char *zType /* Type of lock */
){
struct flock lk;
memset(&lk, 0, sizeof(lk));
lk.l_type = type;
lk.l_whence = SEEK_SET;
lk.l_start = iOfst;
lk.l_len = iCnt;
if( fcntl(h, F_GETLK, &lk)==(-1) ){
fprintf(stderr, "fcntl(%d) failed: errno=%d\n", h, errno);
exit(1);
}
if( lk.l_type==F_UNLCK ) return 0;
printf("%s lock held by %d\n", zType, (int)lk.l_pid);
return 1;
}
/*
** Location of locking bytes in the database file
*/
#define PENDING_BYTE (0x40000000)
#define RESERVED_BYTE (PENDING_BYTE+1)
#define SHARED_FIRST (PENDING_BYTE+2)
#define SHARED_SIZE 510
/*
** Lock locations for shared-memory locks used by WAL mode.
*/
#define SHM_BASE 120
#define SHM_WRITE SHM_BASE
#define SHM_CHECKPOINT (SHM_BASE+1)
#define SHM_RECOVER (SHM_BASE+2)
#define SHM_READ_FIRST (SHM_BASE+3)
#define SHM_READ_SIZE 5
int main(int argc, char **argv){
int hDb; /* File descriptor for the open database file */
int hShm; /* File descriptor for WAL shared-memory file */
char *zShm; /* Name of the shared-memory file for WAL mode */
ssize_t got; /* Bytes read from header */
int isWal; /* True if in WAL mode */
int nName; /* Length of filename */
unsigned char aHdr[100]; /* Database header */
int nLock = 0; /* Number of locks held */
int i; /* Loop counter */
if( argc!=2 ) usage(argv[0]);
hDb = open(argv[1], O_RDONLY, 0);
if( hDb<0 ){
fprintf(stderr, "cannot open %s\n", argv[1]);
return 1;
}
/* Make sure we are dealing with an database file */
got = read(hDb, aHdr, 100);
if( got!=100 || memcmp(aHdr, "SQLite format 3",16)!=0 ){
fprintf(stderr, "not an SQLite database: %s\n", argv[1]);
exit(1);
}
/* First check for an exclusive lock */
if( isLocked(hDb, F_RDLCK, SHARED_FIRST, SHARED_SIZE, "EXCLUSIVE") ){
return 0;
}
isWal = aHdr[18]==2;
if( isWal==0 ){
/* Rollback mode */
if( isLocked(hDb, F_RDLCK, PENDING_BYTE, 1, "PENDING") ) return 0;
if( isLocked(hDb, F_RDLCK, RESERVED_BYTE, 1, "RESERVED") ) return 0;
if( isLocked(hDb, F_WRLCK, SHARED_FIRST, SHARED_SIZE, "SHARED") ){
return 0;
}
}else{
/* WAL mode */
nName = (int)strlen(argv[1]);
zShm = malloc( nName + 100 );
if( zShm==0 ){
fprintf(stderr, "out of memory\n");
exit(1);
}
memcpy(zShm, argv[1], nName);
memcpy(&zShm[nName], "-shm", 5);
hShm = open(zShm, O_RDONLY, 0);
if( hShm<0 ){
fprintf(stderr, "cannot open %s\n", zShm);
return 1;
}
if( isLocked(hShm, F_RDLCK, SHM_RECOVER, 1, "WAL-RECOVERY") ){
return 0;
}
nLock += isLocked(hShm, F_RDLCK, SHM_CHECKPOINT, 1, "WAL-CHECKPOINT");
nLock += isLocked(hShm, F_RDLCK, SHM_WRITE, 1, "WAL-WRITE");
for(i=0; i<SHM_READ_SIZE; i++){
nLock += isLocked(hShm, F_WRLCK, SHM_READ_FIRST+i, 1, "WAL-READ");
}
}
if( nLock==0 ){
printf("file is not locked\n");
}
return 0;
}

File diff suppressed because it is too large Load Diff

View File

@ -1,918 +0,0 @@
/*
** 2000-05-29
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
** Driver template for the LEMON parser generator.
**
** The "lemon" program processes an LALR(1) input grammar file, then uses
** this template to construct a parser. The "lemon" program inserts text
** at each "%%" line. Also, any "P-a-r-s-e" identifer prefix (without the
** interstitial "-" characters) contained in this template is changed into
** the value of the %name directive from the grammar. Otherwise, the content
** of this template is copied straight through into the generate parser
** source file.
**
** The following is the concatenation of all %include directives from the
** input grammar file:
*/
#include <stdio.h>
/************ Begin %include sections from the grammar ************************/
%%
/**************** End of %include directives **********************************/
/* These constants specify the various numeric values for terminal symbols
** in a format understandable to "makeheaders". This section is blank unless
** "lemon" is run with the "-m" command-line option.
***************** Begin makeheaders token definitions *************************/
%%
/**************** End makeheaders token definitions ***************************/
/* The next sections is a series of control #defines.
** various aspects of the generated parser.
** YYCODETYPE is the data type used to store the integer codes
** that represent terminal and non-terminal symbols.
** "unsigned char" is used if there are fewer than
** 256 symbols. Larger types otherwise.
** YYNOCODE is a number of type YYCODETYPE that is not used for
** any terminal or nonterminal symbol.
** YYFALLBACK If defined, this indicates that one or more tokens
** (also known as: "terminal symbols") have fall-back
** values which should be used if the original symbol
** would not parse. This permits keywords to sometimes
** be used as identifiers, for example.
** YYACTIONTYPE is the data type used for "action codes" - numbers
** that indicate what to do in response to the next
** token.
** ParseTOKENTYPE is the data type used for minor type for terminal
** symbols. Background: A "minor type" is a semantic
** value associated with a terminal or non-terminal
** symbols. For example, for an "ID" terminal symbol,
** the minor type might be the name of the identifier.
** Each non-terminal can have a different minor type.
** Terminal symbols all have the same minor type, though.
** This macros defines the minor type for terminal
** symbols.
** YYMINORTYPE is the data type used for all minor types.
** This is typically a union of many types, one of
** which is ParseTOKENTYPE. The entry in the union
** for terminal symbols is called "yy0".
** YYSTACKDEPTH is the maximum depth of the parser's stack. If
** zero the stack is dynamically sized using realloc()
** ParseARG_SDECL A static variable declaration for the %extra_argument
** ParseARG_PDECL A parameter declaration for the %extra_argument
** ParseARG_STORE Code to store %extra_argument into yypParser
** ParseARG_FETCH Code to extract %extra_argument from yypParser
** YYERRORSYMBOL is the code number of the error symbol. If not
** defined, then do no error processing.
** YYNSTATE the combined number of states.
** YYNRULE the number of rules in the grammar
** YY_MAX_SHIFT Maximum value for shift actions
** YY_MIN_SHIFTREDUCE Minimum value for shift-reduce actions
** YY_MAX_SHIFTREDUCE Maximum value for shift-reduce actions
** YY_MIN_REDUCE Maximum value for reduce actions
** YY_ERROR_ACTION The yy_action[] code for syntax error
** YY_ACCEPT_ACTION The yy_action[] code for accept
** YY_NO_ACTION The yy_action[] code for no-op
*/
#ifndef INTERFACE
# define INTERFACE 1
#endif
/************* Begin control #defines *****************************************/
%%
/************* End control #defines *******************************************/
/* The yyzerominor constant is used to initialize instances of
** YYMINORTYPE objects to zero. */
static const YYMINORTYPE yyzerominor = { 0 };
/* Define the yytestcase() macro to be a no-op if is not already defined
** otherwise.
**
** Applications can choose to define yytestcase() in the %include section
** to a macro that can assist in verifying code coverage. For production
** code the yytestcase() macro should be turned off. But it is useful
** for testing.
*/
#ifndef yytestcase
# define yytestcase(X)
#endif
/* Next are the tables used to determine what action to take based on the
** current state and lookahead token. These tables are used to implement
** functions that take a state number and lookahead value and return an
** action integer.
**
** Suppose the action integer is N. Then the action is determined as
** follows
**
** 0 <= N <= YY_MAX_SHIFT Shift N. That is, push the lookahead
** token onto the stack and goto state N.
**
** N between YY_MIN_SHIFTREDUCE Shift to an arbitrary state then
** and YY_MAX_SHIFTREDUCE reduce by rule N-YY_MIN_SHIFTREDUCE.
**
** N between YY_MIN_REDUCE Reduce by rule N-YY_MIN_REDUCE
** and YY_MAX_REDUCE
** N == YY_ERROR_ACTION A syntax error has occurred.
**
** N == YY_ACCEPT_ACTION The parser accepts its input.
**
** N == YY_NO_ACTION No such action. Denotes unused
** slots in the yy_action[] table.
**
** The action table is constructed as a single large table named yy_action[].
** Given state S and lookahead X, the action is computed as
**
** yy_action[ yy_shift_ofst[S] + X ]
**
** If the index value yy_shift_ofst[S]+X is out of range or if the value
** yy_lookahead[yy_shift_ofst[S]+X] is not equal to X or if yy_shift_ofst[S]
** is equal to YY_SHIFT_USE_DFLT, it means that the action is not in the table
** and that yy_default[S] should be used instead.
**
** The formula above is for computing the action when the lookahead is
** a terminal symbol. If the lookahead is a non-terminal (as occurs after
** a reduce action) then the yy_reduce_ofst[] array is used in place of
** the yy_shift_ofst[] array and YY_REDUCE_USE_DFLT is used in place of
** YY_SHIFT_USE_DFLT.
**
** The following are the tables generated in this section:
**
** yy_action[] A single table containing all actions.
** yy_lookahead[] A table containing the lookahead for each entry in
** yy_action. Used to detect hash collisions.
** yy_shift_ofst[] For each state, the offset into yy_action for
** shifting terminals.
** yy_reduce_ofst[] For each state, the offset into yy_action for
** shifting non-terminals after a reduce.
** yy_default[] Default action for each state.
**
*********** Begin parsing tables **********************************************/
%%
/********** End of lemon-generated parsing tables *****************************/
/* The next table maps tokens (terminal symbols) into fallback tokens.
** If a construct like the following:
**
** %fallback ID X Y Z.
**
** appears in the grammar, then ID becomes a fallback token for X, Y,
** and Z. Whenever one of the tokens X, Y, or Z is input to the parser
** but it does not parse, the type of the token is changed to ID and
** the parse is retried before an error is thrown.
**
** This feature can be used, for example, to cause some keywords in a language
** to revert to identifiers if they keyword does not apply in the context where
** it appears.
*/
#ifdef YYFALLBACK
static const YYCODETYPE yyFallback[] = {
%%
};
#endif /* YYFALLBACK */
/* The following structure represents a single element of the
** parser's stack. Information stored includes:
**
** + The state number for the parser at this level of the stack.
**
** + The value of the token stored at this level of the stack.
** (In other words, the "major" token.)
**
** + The semantic value stored at this level of the stack. This is
** the information used by the action routines in the grammar.
** It is sometimes called the "minor" token.
**
** After the "shift" half of a SHIFTREDUCE action, the stateno field
** actually contains the reduce action for the second half of the
** SHIFTREDUCE.
*/
struct yyStackEntry {
YYACTIONTYPE stateno; /* The state-number, or reduce action in SHIFTREDUCE */
YYCODETYPE major; /* The major token value. This is the code
** number for the token at this stack level */
YYMINORTYPE minor; /* The user-supplied minor token value. This
** is the value of the token */
};
typedef struct yyStackEntry yyStackEntry;
/* The state of the parser is completely contained in an instance of
** the following structure */
struct yyParser {
int yyidx; /* Index of top element in stack */
#ifdef YYTRACKMAXSTACKDEPTH
int yyidxMax; /* Maximum value of yyidx */
#endif
int yyerrcnt; /* Shifts left before out of the error */
ParseARG_SDECL /* A place to hold %extra_argument */
#if YYSTACKDEPTH<=0
int yystksz; /* Current side of the stack */
yyStackEntry *yystack; /* The parser's stack */
#else
yyStackEntry yystack[YYSTACKDEPTH]; /* The parser's stack */
#endif
};
typedef struct yyParser yyParser;
#ifndef NDEBUG
#include <stdio.h>
static FILE *yyTraceFILE = 0;
static char *yyTracePrompt = 0;
#endif /* NDEBUG */
#ifndef NDEBUG
/*
** Turn parser tracing on by giving a stream to which to write the trace
** and a prompt to preface each trace message. Tracing is turned off
** by making either argument NULL
**
** Inputs:
** <ul>
** <li> A FILE* to which trace output should be written.
** If NULL, then tracing is turned off.
** <li> A prefix string written at the beginning of every
** line of trace output. If NULL, then tracing is
** turned off.
** </ul>
**
** Outputs:
** None.
*/
void ParseTrace(FILE *TraceFILE, char *zTracePrompt){
yyTraceFILE = TraceFILE;
yyTracePrompt = zTracePrompt;
if( yyTraceFILE==0 ) yyTracePrompt = 0;
else if( yyTracePrompt==0 ) yyTraceFILE = 0;
}
#endif /* NDEBUG */
#ifndef NDEBUG
/* For tracing shifts, the names of all terminals and nonterminals
** are required. The following table supplies these names */
static const char *const yyTokenName[] = {
%%
};
#endif /* NDEBUG */
#ifndef NDEBUG
/* For tracing reduce actions, the names of all rules are required.
*/
static const char *const yyRuleName[] = {
%%
};
#endif /* NDEBUG */
#if YYSTACKDEPTH<=0
/*
** Try to increase the size of the parser stack.
*/
static void yyGrowStack(yyParser *p){
int newSize;
yyStackEntry *pNew;
newSize = p->yystksz*2 + 100;
pNew = realloc(p->yystack, newSize*sizeof(pNew[0]));
if( pNew ){
p->yystack = pNew;
p->yystksz = newSize;
#ifndef NDEBUG
if( yyTraceFILE ){
fprintf(yyTraceFILE,"%sStack grows to %d entries!\n",
yyTracePrompt, p->yystksz);
}
#endif
}
}
#endif
/* Datatype of the argument to the memory allocated passed as the
** second argument to ParseAlloc() below. This can be changed by
** putting an appropriate #define in the %include section of the input
** grammar.
*/
#ifndef YYMALLOCARGTYPE
# define YYMALLOCARGTYPE size_t
#endif
/*
** This function allocates a new parser.
** The only argument is a pointer to a function which works like
** malloc.
**
** Inputs:
** A pointer to the function used to allocate memory.
**
** Outputs:
** A pointer to a parser. This pointer is used in subsequent calls
** to Parse and ParseFree.
*/
void *ParseAlloc(void *(*mallocProc)(YYMALLOCARGTYPE)){
yyParser *pParser;
pParser = (yyParser*)(*mallocProc)( (YYMALLOCARGTYPE)sizeof(yyParser) );
if( pParser ){
pParser->yyidx = -1;
#ifdef YYTRACKMAXSTACKDEPTH
pParser->yyidxMax = 0;
#endif
#if YYSTACKDEPTH<=0
pParser->yystack = NULL;
pParser->yystksz = 0;
yyGrowStack(pParser);
#endif
}
return pParser;
}
/* The following function deletes the "minor type" or semantic value
** associated with a symbol. The symbol can be either a terminal
** or nonterminal. "yymajor" is the symbol code, and "yypminor" is
** a pointer to the value to be deleted. The code used to do the
** deletions is derived from the %destructor and/or %token_destructor
** directives of the input grammar.
*/
static void yy_destructor(
yyParser *yypParser, /* The parser */
YYCODETYPE yymajor, /* Type code for object to destroy */
YYMINORTYPE *yypminor /* The object to be destroyed */
){
ParseARG_FETCH;
switch( yymajor ){
/* Here is inserted the actions which take place when a
** terminal or non-terminal is destroyed. This can happen
** when the symbol is popped from the stack during a
** reduce or during error processing or when a parser is
** being destroyed before it is finished parsing.
**
** Note: during a reduce, the only symbols destroyed are those
** which appear on the RHS of the rule, but which are *not* used
** inside the C code.
*/
/********* Begin destructor definitions ***************************************/
%%
/********* End destructor definitions *****************************************/
default: break; /* If no destructor action specified: do nothing */
}
}
/*
** Pop the parser's stack once.
**
** If there is a destructor routine associated with the token which
** is popped from the stack, then call it.
*/
static void yy_pop_parser_stack(yyParser *pParser){
yyStackEntry *yytos;
assert( pParser->yyidx>=0 );
yytos = &pParser->yystack[pParser->yyidx--];
#ifndef NDEBUG
if( yyTraceFILE ){
fprintf(yyTraceFILE,"%sPopping %s\n",
yyTracePrompt,
yyTokenName[yytos->major]);
}
#endif
yy_destructor(pParser, yytos->major, &yytos->minor);
}
/*
** Deallocate and destroy a parser. Destructors are called for
** all stack elements before shutting the parser down.
**
** If the YYPARSEFREENEVERNULL macro exists (for example because it
** is defined in a %include section of the input grammar) then it is
** assumed that the input pointer is never NULL.
*/
void ParseFree(
void *p, /* The parser to be deleted */
void (*freeProc)(void*) /* Function used to reclaim memory */
){
yyParser *pParser = (yyParser*)p;
#ifndef YYPARSEFREENEVERNULL
if( pParser==0 ) return;
#endif
while( pParser->yyidx>=0 ) yy_pop_parser_stack(pParser);
#if YYSTACKDEPTH<=0
free(pParser->yystack);
#endif
(*freeProc)((void*)pParser);
}
/*
** Return the peak depth of the stack for a parser.
*/
#ifdef YYTRACKMAXSTACKDEPTH
int ParseStackPeak(void *p){
yyParser *pParser = (yyParser*)p;
return pParser->yyidxMax;
}
#endif
/*
** Find the appropriate action for a parser given the terminal
** look-ahead token iLookAhead.
*/
static int yy_find_shift_action(
yyParser *pParser, /* The parser */
YYCODETYPE iLookAhead /* The look-ahead token */
){
int i;
int stateno = pParser->yystack[pParser->yyidx].stateno;
if( stateno>=YY_MIN_REDUCE ) return stateno;
assert( stateno <= YY_SHIFT_COUNT );
do{
i = yy_shift_ofst[stateno];
if( i==YY_SHIFT_USE_DFLT ) return yy_default[stateno];
assert( iLookAhead!=YYNOCODE );
i += iLookAhead;
if( i<0 || i>=YY_ACTTAB_COUNT || yy_lookahead[i]!=iLookAhead ){
if( iLookAhead>0 ){
#ifdef YYFALLBACK
YYCODETYPE iFallback; /* Fallback token */
if( iLookAhead<sizeof(yyFallback)/sizeof(yyFallback[0])
&& (iFallback = yyFallback[iLookAhead])!=0 ){
#ifndef NDEBUG
if( yyTraceFILE ){
fprintf(yyTraceFILE, "%sFALLBACK %s => %s\n",
yyTracePrompt, yyTokenName[iLookAhead], yyTokenName[iFallback]);
}
#endif
assert( yyFallback[iFallback]==0 ); /* Fallback loop must terminate */
iLookAhead = iFallback;
continue;
}
#endif
#ifdef YYWILDCARD
{
int j = i - iLookAhead + YYWILDCARD;
if(
#if YY_SHIFT_MIN+YYWILDCARD<0
j>=0 &&
#endif
#if YY_SHIFT_MAX+YYWILDCARD>=YY_ACTTAB_COUNT
j<YY_ACTTAB_COUNT &&
#endif
yy_lookahead[j]==YYWILDCARD
){
#ifndef NDEBUG
if( yyTraceFILE ){
fprintf(yyTraceFILE, "%sWILDCARD %s => %s\n",
yyTracePrompt, yyTokenName[iLookAhead],
yyTokenName[YYWILDCARD]);
}
#endif /* NDEBUG */
return yy_action[j];
}
}
#endif /* YYWILDCARD */
}
return yy_default[stateno];
}else{
return yy_action[i];
}
}while(1);
}
/*
** Find the appropriate action for a parser given the non-terminal
** look-ahead token iLookAhead.
*/
static int yy_find_reduce_action(
int stateno, /* Current state number */
YYCODETYPE iLookAhead /* The look-ahead token */
){
int i;
#ifdef YYERRORSYMBOL
if( stateno>YY_REDUCE_COUNT ){
return yy_default[stateno];
}
#else
assert( stateno<=YY_REDUCE_COUNT );
#endif
i = yy_reduce_ofst[stateno];
assert( i!=YY_REDUCE_USE_DFLT );
assert( iLookAhead!=YYNOCODE );
i += iLookAhead;
#ifdef YYERRORSYMBOL
if( i<0 || i>=YY_ACTTAB_COUNT || yy_lookahead[i]!=iLookAhead ){
return yy_default[stateno];
}
#else
assert( i>=0 && i<YY_ACTTAB_COUNT );
assert( yy_lookahead[i]==iLookAhead );
#endif
return yy_action[i];
}
/*
** The following routine is called if the stack overflows.
*/
static void yyStackOverflow(yyParser *yypParser, YYMINORTYPE *yypMinor){
ParseARG_FETCH;
yypParser->yyidx--;
#ifndef NDEBUG
if( yyTraceFILE ){
fprintf(yyTraceFILE,"%sStack Overflow!\n",yyTracePrompt);
}
#endif
while( yypParser->yyidx>=0 ) yy_pop_parser_stack(yypParser);
/* Here code is inserted which will execute if the parser
** stack every overflows */
/******** Begin %stack_overflow code ******************************************/
%%
/******** End %stack_overflow code ********************************************/
ParseARG_STORE; /* Suppress warning about unused %extra_argument var */
}
/*
** Print tracing information for a SHIFT action
*/
#ifndef NDEBUG
static void yyTraceShift(yyParser *yypParser, int yyNewState){
if( yyTraceFILE ){
if( yyNewState<YYNSTATE ){
fprintf(yyTraceFILE,"%sShift '%s', go to state %d\n",
yyTracePrompt,yyTokenName[yypParser->yystack[yypParser->yyidx].major],
yyNewState);
}else{
fprintf(yyTraceFILE,"%sShift '%s'\n",
yyTracePrompt,yyTokenName[yypParser->yystack[yypParser->yyidx].major]);
}
}
}
#else
# define yyTraceShift(X,Y)
#endif
/*
** Perform a shift action.
*/
static void yy_shift(
yyParser *yypParser, /* The parser to be shifted */
int yyNewState, /* The new state to shift in */
int yyMajor, /* The major token to shift in */
YYMINORTYPE *yypMinor /* Pointer to the minor token to shift in */
){
yyStackEntry *yytos;
yypParser->yyidx++;
#ifdef YYTRACKMAXSTACKDEPTH
if( yypParser->yyidx>yypParser->yyidxMax ){
yypParser->yyidxMax = yypParser->yyidx;
}
#endif
#if YYSTACKDEPTH>0
if( yypParser->yyidx>=YYSTACKDEPTH ){
yyStackOverflow(yypParser, yypMinor);
return;
}
#else
if( yypParser->yyidx>=yypParser->yystksz ){
yyGrowStack(yypParser);
if( yypParser->yyidx>=yypParser->yystksz ){
yyStackOverflow(yypParser, yypMinor);
return;
}
}
#endif
yytos = &yypParser->yystack[yypParser->yyidx];
yytos->stateno = (YYACTIONTYPE)yyNewState;
yytos->major = (YYCODETYPE)yyMajor;
yytos->minor = *yypMinor;
yyTraceShift(yypParser, yyNewState);
}
/* The following table contains information about every rule that
** is used during the reduce.
*/
static const struct {
YYCODETYPE lhs; /* Symbol on the left-hand side of the rule */
unsigned char nrhs; /* Number of right-hand side symbols in the rule */
} yyRuleInfo[] = {
%%
};
static void yy_accept(yyParser*); /* Forward Declaration */
/*
** Perform a reduce action and the shift that must immediately
** follow the reduce.
*/
static void yy_reduce(
yyParser *yypParser, /* The parser */
int yyruleno /* Number of the rule by which to reduce */
){
int yygoto; /* The next state */
int yyact; /* The next action */
YYMINORTYPE yygotominor; /* The LHS of the rule reduced */
yyStackEntry *yymsp; /* The top of the parser's stack */
int yysize; /* Amount to pop the stack */
ParseARG_FETCH;
yymsp = &yypParser->yystack[yypParser->yyidx];
#ifndef NDEBUG
if( yyTraceFILE && yyruleno>=0
&& yyruleno<(int)(sizeof(yyRuleName)/sizeof(yyRuleName[0])) ){
yysize = yyRuleInfo[yyruleno].nrhs;
fprintf(yyTraceFILE, "%sReduce [%s], go to state %d.\n", yyTracePrompt,
yyRuleName[yyruleno], yymsp[-yysize].stateno);
}
#endif /* NDEBUG */
yygotominor = yyzerominor;
switch( yyruleno ){
/* Beginning here are the reduction cases. A typical example
** follows:
** case 0:
** #line <lineno> <grammarfile>
** { ... } // User supplied code
** #line <lineno> <thisfile>
** break;
*/
/********** Begin reduce actions **********************************************/
%%
/********** End reduce actions ************************************************/
};
assert( yyruleno>=0 && yyruleno<sizeof(yyRuleInfo)/sizeof(yyRuleInfo[0]) );
yygoto = yyRuleInfo[yyruleno].lhs;
yysize = yyRuleInfo[yyruleno].nrhs;
yypParser->yyidx -= yysize;
yyact = yy_find_reduce_action(yymsp[-yysize].stateno,(YYCODETYPE)yygoto);
if( yyact <= YY_MAX_SHIFTREDUCE ){
if( yyact>YY_MAX_SHIFT ) yyact += YY_MIN_REDUCE - YY_MIN_SHIFTREDUCE;
/* If the reduce action popped at least
** one element off the stack, then we can push the new element back
** onto the stack here, and skip the stack overflow test in yy_shift().
** That gives a significant speed improvement. */
if( yysize ){
yypParser->yyidx++;
yymsp -= yysize-1;
yymsp->stateno = (YYACTIONTYPE)yyact;
yymsp->major = (YYCODETYPE)yygoto;
yymsp->minor = yygotominor;
yyTraceShift(yypParser, yyact);
}else{
yy_shift(yypParser,yyact,yygoto,&yygotominor);
}
}else{
assert( yyact == YY_ACCEPT_ACTION );
yy_accept(yypParser);
}
}
/*
** The following code executes when the parse fails
*/
#ifndef YYNOERRORRECOVERY
static void yy_parse_failed(
yyParser *yypParser /* The parser */
){
ParseARG_FETCH;
#ifndef NDEBUG
if( yyTraceFILE ){
fprintf(yyTraceFILE,"%sFail!\n",yyTracePrompt);
}
#endif
while( yypParser->yyidx>=0 ) yy_pop_parser_stack(yypParser);
/* Here code is inserted which will be executed whenever the
** parser fails */
/************ Begin %parse_failure code ***************************************/
%%
/************ End %parse_failure code *****************************************/
ParseARG_STORE; /* Suppress warning about unused %extra_argument variable */
}
#endif /* YYNOERRORRECOVERY */
/*
** The following code executes when a syntax error first occurs.
*/
static void yy_syntax_error(
yyParser *yypParser, /* The parser */
int yymajor, /* The major type of the error token */
YYMINORTYPE yyminor /* The minor type of the error token */
){
ParseARG_FETCH;
#define TOKEN (yyminor.yy0)
/************ Begin %syntax_error code ****************************************/
%%
/************ End %syntax_error code ******************************************/
ParseARG_STORE; /* Suppress warning about unused %extra_argument variable */
}
/*
** The following is executed when the parser accepts
*/
static void yy_accept(
yyParser *yypParser /* The parser */
){
ParseARG_FETCH;
#ifndef NDEBUG
if( yyTraceFILE ){
fprintf(yyTraceFILE,"%sAccept!\n",yyTracePrompt);
}
#endif
while( yypParser->yyidx>=0 ) yy_pop_parser_stack(yypParser);
/* Here code is inserted which will be executed whenever the
** parser accepts */
/*********** Begin %parse_accept code *****************************************/
%%
/*********** End %parse_accept code *******************************************/
ParseARG_STORE; /* Suppress warning about unused %extra_argument variable */
}
/* The main parser program.
** The first argument is a pointer to a structure obtained from
** "ParseAlloc" which describes the current state of the parser.
** The second argument is the major token number. The third is
** the minor token. The fourth optional argument is whatever the
** user wants (and specified in the grammar) and is available for
** use by the action routines.
**
** Inputs:
** <ul>
** <li> A pointer to the parser (an opaque structure.)
** <li> The major token number.
** <li> The minor token number.
** <li> An option argument of a grammar-specified type.
** </ul>
**
** Outputs:
** None.
*/
void Parse(
void *yyp, /* The parser */
int yymajor, /* The major token code number */
ParseTOKENTYPE yyminor /* The value for the token */
ParseARG_PDECL /* Optional %extra_argument parameter */
){
YYMINORTYPE yyminorunion;
int yyact; /* The parser action. */
#if !defined(YYERRORSYMBOL) && !defined(YYNOERRORRECOVERY)
int yyendofinput; /* True if we are at the end of input */
#endif
#ifdef YYERRORSYMBOL
int yyerrorhit = 0; /* True if yymajor has invoked an error */
#endif
yyParser *yypParser; /* The parser */
/* (re)initialize the parser, if necessary */
yypParser = (yyParser*)yyp;
if( yypParser->yyidx<0 ){
#if YYSTACKDEPTH<=0
if( yypParser->yystksz <=0 ){
/*memset(&yyminorunion, 0, sizeof(yyminorunion));*/
yyminorunion = yyzerominor;
yyStackOverflow(yypParser, &yyminorunion);
return;
}
#endif
yypParser->yyidx = 0;
yypParser->yyerrcnt = -1;
yypParser->yystack[0].stateno = 0;
yypParser->yystack[0].major = 0;
#ifndef NDEBUG
if( yyTraceFILE ){
fprintf(yyTraceFILE,"%sInitialize. Empty stack. State 0\n",
yyTracePrompt);
}
#endif
}
yyminorunion.yy0 = yyminor;
#if !defined(YYERRORSYMBOL) && !defined(YYNOERRORRECOVERY)
yyendofinput = (yymajor==0);
#endif
ParseARG_STORE;
#ifndef NDEBUG
if( yyTraceFILE ){
fprintf(yyTraceFILE,"%sInput '%s'\n",yyTracePrompt,yyTokenName[yymajor]);
}
#endif
do{
yyact = yy_find_shift_action(yypParser,(YYCODETYPE)yymajor);
if( yyact <= YY_MAX_SHIFTREDUCE ){
if( yyact > YY_MAX_SHIFT ) yyact += YY_MIN_REDUCE - YY_MIN_SHIFTREDUCE;
yy_shift(yypParser,yyact,yymajor,&yyminorunion);
yypParser->yyerrcnt--;
yymajor = YYNOCODE;
}else if( yyact <= YY_MAX_REDUCE ){
yy_reduce(yypParser,yyact-YY_MIN_REDUCE);
}else{
assert( yyact == YY_ERROR_ACTION );
#ifdef YYERRORSYMBOL
int yymx;
#endif
#ifndef NDEBUG
if( yyTraceFILE ){
fprintf(yyTraceFILE,"%sSyntax Error!\n",yyTracePrompt);
}
#endif
#ifdef YYERRORSYMBOL
/* A syntax error has occurred.
** The response to an error depends upon whether or not the
** grammar defines an error token "ERROR".
**
** This is what we do if the grammar does define ERROR:
**
** * Call the %syntax_error function.
**
** * Begin popping the stack until we enter a state where
** it is legal to shift the error symbol, then shift
** the error symbol.
**
** * Set the error count to three.
**
** * Begin accepting and shifting new tokens. No new error
** processing will occur until three tokens have been
** shifted successfully.
**
*/
if( yypParser->yyerrcnt<0 ){
yy_syntax_error(yypParser,yymajor,yyminorunion);
}
yymx = yypParser->yystack[yypParser->yyidx].major;
if( yymx==YYERRORSYMBOL || yyerrorhit ){
#ifndef NDEBUG
if( yyTraceFILE ){
fprintf(yyTraceFILE,"%sDiscard input token %s\n",
yyTracePrompt,yyTokenName[yymajor]);
}
#endif
yy_destructor(yypParser, (YYCODETYPE)yymajor,&yyminorunion);
yymajor = YYNOCODE;
}else{
while(
yypParser->yyidx >= 0 &&
yymx != YYERRORSYMBOL &&
(yyact = yy_find_reduce_action(
yypParser->yystack[yypParser->yyidx].stateno,
YYERRORSYMBOL)) >= YY_MIN_REDUCE
){
yy_pop_parser_stack(yypParser);
}
if( yypParser->yyidx < 0 || yymajor==0 ){
yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion);
yy_parse_failed(yypParser);
yymajor = YYNOCODE;
}else if( yymx!=YYERRORSYMBOL ){
YYMINORTYPE u2;
u2.YYERRSYMDT = 0;
yy_shift(yypParser,yyact,YYERRORSYMBOL,&u2);
}
}
yypParser->yyerrcnt = 3;
yyerrorhit = 1;
#elif defined(YYNOERRORRECOVERY)
/* If the YYNOERRORRECOVERY macro is defined, then do not attempt to
** do any kind of error recovery. Instead, simply invoke the syntax
** error routine and continue going as if nothing had happened.
**
** Applications can set this macro (for example inside %include) if
** they intend to abandon the parse upon the first syntax error seen.
*/
yy_syntax_error(yypParser,yymajor,yyminorunion);
yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion);
yymajor = YYNOCODE;
#else /* YYERRORSYMBOL is not defined */
/* This is what we do if the grammar does not define ERROR:
**
** * Report an error message, and throw away the input token.
**
** * If the input token is $, then fail the parse.
**
** As before, subsequent error messages are suppressed until
** three input tokens have been successfully shifted.
*/
if( yypParser->yyerrcnt<=0 ){
yy_syntax_error(yypParser,yymajor,yyminorunion);
}
yypParser->yyerrcnt = 3;
yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion);
if( yyendofinput ){
yy_parse_failed(yypParser);
}
yymajor = YYNOCODE;
#endif
}
}while( yymajor!=YYNOCODE && yypParser->yyidx>=0 );
#ifndef NDEBUG
if( yyTraceFILE ){
int i;
fprintf(yyTraceFILE,"%sReturn. Stack=",yyTracePrompt);
for(i=1; i<=yypParser->yyidx; i++)
fprintf(yyTraceFILE,"%c%s", i==1 ? '[' : ' ',
yyTokenName[yypParser->yystack[i].major]);
fprintf(yyTraceFILE,"]\n");
}
#endif
return;
}

View File

@ -1,242 +0,0 @@
/*
** 2014-07-28
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
**
** This file implements a utility program that will load many disk
** files (all files under a given directory) into a FTS table. This is
** used for performance testing of FTS3, FTS4, and FTS5.
*/
#include <stdio.h>
#include <stdlib.h>
#include <ctype.h>
#include <assert.h>
#include <string.h>
#include <errno.h>
#include <dirent.h>
#include "sqlite3.h"
/*
** Implementation of the "readtext(X)" SQL function. The entire content
** of the file named X is read and returned as a TEXT value. It is assumed
** the file contains UTF-8 text. NULL is returned if the file does not
** exist or is unreadable.
*/
static void readfileFunc(
sqlite3_context *context,
int argc,
sqlite3_value **argv
){
const char *zName;
FILE *in;
long nIn;
void *pBuf;
zName = (const char*)sqlite3_value_text(argv[0]);
if( zName==0 ) return;
in = fopen(zName, "rb");
if( in==0 ) return;
fseek(in, 0, SEEK_END);
nIn = ftell(in);
rewind(in);
pBuf = sqlite3_malloc( nIn );
if( pBuf && 1==fread(pBuf, nIn, 1, in) ){
sqlite3_result_text(context, pBuf, nIn, sqlite3_free);
}else{
sqlite3_free(pBuf);
}
fclose(in);
}
/*
** Print usage text for this program and exit.
*/
static void showHelp(const char *zArgv0){
printf("\n"
"Usage: %s SWITCHES... DB\n"
"\n"
" This program opens the database named on the command line and attempts to\n"
" create an FTS table named \"fts\" with a single column. If successful, it\n"
" recursively traverses the directory named by the -dir option and inserts\n"
" the contents of each file into the fts table. All files are assumed to\n"
" contain UTF-8 text.\n"
"\n"
"Switches are:\n"
" -fts [345] FTS version to use (default=5)\n"
" -idx [01] Create a mapping from filename to rowid (default=0)\n"
" -dir <path> Root of directory tree to load data from (default=.)\n"
" -trans <integer> Number of inserts per transaction (default=1)\n"
, zArgv0
);
exit(1);
}
/*
** Exit with a message based on the argument and the current value of errno.
*/
static void error_out(const char *zText){
fprintf(stderr, "%s: %s\n", zText, strerror(errno));
exit(-1);
}
/*
** Exit with a message based on the first argument and the error message
** currently stored in database handle db.
*/
static void sqlite_error_out(const char *zText, sqlite3 *db){
fprintf(stderr, "%s: %s\n", zText, sqlite3_errmsg(db));
exit(-1);
}
/*
** Context object for visit_file().
*/
typedef struct VisitContext VisitContext;
struct VisitContext {
int nRowPerTrans;
sqlite3 *db; /* Database handle */
sqlite3_stmt *pInsert; /* INSERT INTO fts VALUES(readtext(:1)) */
};
/*
** Callback used with traverse(). The first argument points to an object
** of type VisitContext. This function inserts the contents of the text
** file zPath into the FTS table.
*/
void visit_file(void *pCtx, const char *zPath){
int rc;
VisitContext *p = (VisitContext*)pCtx;
/* printf("%s\n", zPath); */
sqlite3_bind_text(p->pInsert, 1, zPath, -1, SQLITE_STATIC);
sqlite3_step(p->pInsert);
rc = sqlite3_reset(p->pInsert);
if( rc!=SQLITE_OK ){
sqlite_error_out("insert", p->db);
}else if( p->nRowPerTrans>0
&& (sqlite3_last_insert_rowid(p->db) % p->nRowPerTrans)==0
){
sqlite3_exec(p->db, "COMMIT ; BEGIN", 0, 0, 0);
}
}
/*
** Recursively traverse directory zDir. For each file that is not a
** directory, invoke the supplied callback with its path.
*/
static void traverse(
const char *zDir, /* Directory to traverse */
void *pCtx, /* First argument passed to callback */
void (*xCallback)(void*, const char *zPath)
){
DIR *d;
struct dirent *e;
d = opendir(zDir);
if( d==0 ) error_out("opendir()");
for(e=readdir(d); e; e=readdir(d)){
if( strcmp(e->d_name, ".")==0 || strcmp(e->d_name, "..")==0 ) continue;
char *zPath = sqlite3_mprintf("%s/%s", zDir, e->d_name);
if (e->d_type & DT_DIR) {
traverse(zPath, pCtx, xCallback);
}else{
xCallback(pCtx, zPath);
}
sqlite3_free(zPath);
}
closedir(d);
}
int main(int argc, char **argv){
int iFts = 5; /* Value of -fts option */
int bMap = 0; /* True to create mapping table */
const char *zDir = "."; /* Directory to scan */
int i;
int rc;
int nRowPerTrans = 0;
sqlite3 *db;
char *zSql;
VisitContext sCtx;
int nCmd = 0;
char **aCmd = 0;
if( argc % 2 ) showHelp(argv[0]);
for(i=1; i<(argc-1); i+=2){
char *zOpt = argv[i];
char *zArg = argv[i+1];
if( strcmp(zOpt, "-fts")==0 ){
iFts = atoi(zArg);
if( iFts!=3 && iFts!=4 && iFts!= 5) showHelp(argv[0]);
}
else if( strcmp(zOpt, "-trans")==0 ){
nRowPerTrans = atoi(zArg);
}
else if( strcmp(zOpt, "-idx")==0 ){
bMap = atoi(zArg);
if( bMap!=0 && bMap!=1 ) showHelp(argv[0]);
}
else if( strcmp(zOpt, "-dir")==0 ){
zDir = zArg;
}
else if( strcmp(zOpt, "-special")==0 ){
nCmd++;
aCmd = sqlite3_realloc(aCmd, sizeof(char*) * nCmd);
aCmd[nCmd-1] = zArg;
}
else{
showHelp(argv[0]);
}
}
/* Open the database file */
rc = sqlite3_open(argv[argc-1], &db);
if( rc!=SQLITE_OK ) sqlite_error_out("sqlite3_open()", db);
rc = sqlite3_create_function(db, "readtext", 1, SQLITE_UTF8, 0,
readfileFunc, 0, 0);
if( rc!=SQLITE_OK ) sqlite_error_out("sqlite3_create_function()", db);
/* Create the FTS table */
zSql = sqlite3_mprintf("CREATE VIRTUAL TABLE fts USING fts%d(content)", iFts);
rc = sqlite3_exec(db, zSql, 0, 0, 0);
if( rc!=SQLITE_OK ) sqlite_error_out("sqlite3_exec(1)", db);
sqlite3_free(zSql);
for(i=0; i<nCmd; i++){
zSql = sqlite3_mprintf("INSERT INTO fts(fts) VALUES(%Q)", aCmd[i]);
rc = sqlite3_exec(db, zSql, 0, 0, 0);
if( rc!=SQLITE_OK ) sqlite_error_out("sqlite3_exec(1)", db);
sqlite3_free(zSql);
}
/* Compile the INSERT statement to write data to the FTS table. */
memset(&sCtx, 0, sizeof(VisitContext));
sCtx.db = db;
sCtx.nRowPerTrans = nRowPerTrans;
rc = sqlite3_prepare_v2(db,
"INSERT INTO fts VALUES(readtext(?))", -1, &sCtx.pInsert, 0
);
if( rc!=SQLITE_OK ) sqlite_error_out("sqlite3_prepare_v2(1)", db);
/* Load all files in the directory hierarchy into the FTS table. */
if( sCtx.nRowPerTrans>0 ) sqlite3_exec(db, "BEGIN", 0, 0, 0);
traverse(zDir, (void*)&sCtx, visit_file);
if( sCtx.nRowPerTrans>0 ) sqlite3_exec(db, "COMMIT", 0, 0, 0);
/* Clean up and exit. */
sqlite3_finalize(sCtx.pInsert);
sqlite3_close(db);
sqlite3_free(aCmd);
return 0;
}

View File

@ -1,170 +0,0 @@
/*
** 2013-06-10
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
** This file contains a simple command-line utility for converting from
** integers and LogEst values and back again and for doing simple
** arithmetic operations (multiple and add) on LogEst values.
**
** Usage:
**
** ./LogEst ARGS
**
** See the showHelp() routine for a description of valid arguments.
** Examples:
**
** To convert 123 from LogEst to integer:
**
** ./LogEst ^123
**
** To convert 123456 from integer to LogEst:
**
** ./LogEst 123456
**
*/
#include <stdio.h>
#include <stdlib.h>
#include <ctype.h>
#include <assert.h>
#include <string.h>
#include "sqlite3.h"
typedef short int LogEst; /* 10 times log2() */
LogEst logEstMultiply(LogEst a, LogEst b){ return a+b; }
LogEst logEstAdd(LogEst a, LogEst b){
static const unsigned char x[] = {
10, 10, /* 0,1 */
9, 9, /* 2,3 */
8, 8, /* 4,5 */
7, 7, 7, /* 6,7,8 */
6, 6, 6, /* 9,10,11 */
5, 5, 5, /* 12-14 */
4, 4, 4, 4, /* 15-18 */
3, 3, 3, 3, 3, 3, /* 19-24 */
2, 2, 2, 2, 2, 2, 2, /* 25-31 */
};
if( a<b ){ LogEst t = a; a = b; b = t; }
if( a>b+49 ) return a;
if( a>b+31 ) return a+1;
return a+x[a-b];
}
LogEst logEstFromInteger(sqlite3_uint64 x){
static LogEst a[] = { 0, 2, 3, 5, 6, 7, 8, 9 };
LogEst y = 40;
if( x<8 ){
if( x<2 ) return 0;
while( x<8 ){ y -= 10; x <<= 1; }
}else{
while( x>255 ){ y += 40; x >>= 4; }
while( x>15 ){ y += 10; x >>= 1; }
}
return a[x&7] + y - 10;
}
static sqlite3_uint64 logEstToInt(LogEst x){
sqlite3_uint64 n;
if( x<10 ) return 1;
n = x%10;
x /= 10;
if( n>=5 ) n -= 2;
else if( n>=1 ) n -= 1;
if( x>=3 ) return (n+8)<<(x-3);
return (n+8)>>(3-x);
}
static LogEst logEstFromDouble(double x){
sqlite3_uint64 a;
LogEst e;
assert( sizeof(x)==8 && sizeof(a)==8 );
if( x<=0.0 ) return -32768;
if( x<0.01 ) return -logEstFromDouble(1.0/x);
if( x<1.0 ) return logEstFromDouble(100.0*x) - 66;
if( x<1024.0 ) return logEstFromInteger((sqlite3_uint64)(1024.0*x)) - 100;
if( x<=2000000000.0 ) return logEstFromInteger((sqlite3_uint64)x);
memcpy(&a, &x, 8);
e = (a>>52) - 1022;
return e*10;
}
int isInteger(const char *z){
while( z[0]>='0' && z[0]<='9' ) z++;
return z[0]==0;
}
int isFloat(const char *z){
char c;
while( ((c=z[0])>='0' && c<='9') || c=='.' || c=='E' || c=='e'
|| c=='+' || c=='-' ) z++;
return z[0]==0;
}
static void showHelp(const char *zArgv0){
printf("Usage: %s ARGS...\n", zArgv0);
printf("Arguments:\n"
" NUM Convert NUM from integer to LogEst and push onto the stack\n"
" ^NUM Interpret NUM as a LogEst and push onto stack\n"
" x Multiple the top two elements of the stack\n"
" + Add the top two elements of the stack\n"
" dup Dupliate the top element on the stack\n"
" inv Take the reciprocal of the top of stack. N = 1/N.\n"
" log Find the LogEst of the number on top of stack\n"
" nlogn Compute NlogN where N is the top of stack\n"
);
exit(1);
}
int main(int argc, char **argv){
int i;
int n = 0;
LogEst a[100];
for(i=1; i<argc; i++){
const char *z = argv[i];
if( strcmp(z,"+")==0 ){
if( n>=2 ){
a[n-2] = logEstAdd(a[n-2],a[n-1]);
n--;
}
}else if( strcmp(z,"x")==0 ){
if( n>=2 ){
a[n-2] = logEstMultiply(a[n-2],a[n-1]);
n--;
}
}else if( strcmp(z,"dup")==0 ){
if( n>0 ){
a[n] = a[n-1];
n++;
}
}else if( strcmp(z,"log")==0 ){
if( n>0 ) a[n-1] = logEstFromInteger(a[n-1]) - 33;
}else if( strcmp(z,"nlogn")==0 ){
if( n>0 ) a[n-1] += logEstFromInteger(a[n-1]) - 33;
}else if( strcmp(z,"inv")==0 ){
if( n>0 ) a[n-1] = -a[n-1];
}else if( z[0]=='^' ){
a[n++] = atoi(z+1);
}else if( isInteger(z) ){
a[n++] = logEstFromInteger(atoi(z));
}else if( isFloat(z) && z[0]!='-' ){
a[n++] = logEstFromDouble(atof(z));
}else{
showHelp(argv[0]);
}
}
for(i=n-1; i>=0; i--){
if( a[i]<-40 ){
printf("%5d (%f)\n", a[i], 1.0/(double)logEstToInt(-a[i]));
}else if( a[i]<10 ){
printf("%5d (%f)\n", a[i], logEstToInt(a[i]+100)/1024.0);
}else{
sqlite3_uint64 x = logEstToInt(a[i]+100)*100/1024;
printf("%5d (%lld.%02lld)\n", a[i], x/100, x%100);
}
}
return 0;
}

View File

@ -1,91 +0,0 @@
#!/bin/sh
# This script is used to build the amalgamation autoconf package.
# It assumes the following:
#
# 1. The files "sqlite3.c", "sqlite3.h" and "sqlite3ext.h"
# are available in the current directory.
#
# 2. Variable $TOP is set to the full path of the root directory
# of the SQLite source tree.
#
# 3. There is nothing of value in the ./mkpkg_tmp_dir directory.
# This is important, as the script executes "rm -rf ./mkpkg_tmp_dir".
#
# Bail out of the script if any command returns a non-zero exit
# status. Or if the script tries to use an unset variable. These
# may fail for old /bin/sh interpreters.
#
set -e
set -u
TMPSPACE=./mkpkg_tmp_dir
VERSION=`cat $TOP/VERSION`
HASH=`sed 's/^\(..........\).*/\1/' $TOP/manifest.uuid`
DATETIME=`grep '^D' $TOP/manifest | sed -e 's/[^0-9]//g' -e 's/\(............\).*/\1/'`
# If this script is given an argument of --snapshot, then generate a
# snapshot tarball named for the current checkout SHA1 hash, rather than
# the version number.
#
if test "$#" -ge 1 -a x$1 != x--snapshot
then
# Set global variable $ARTIFACT to the "3xxyyzz" string incorporated
# into artifact filenames. And $VERSION2 to the "3.x.y[.z]" form.
xx=`echo $VERSION|sed 's/3\.\([0-9]*\)\..*/\1/'`
yy=`echo $VERSION|sed 's/3\.[^.]*\.\([0-9]*\).*/\1/'`
zz=0
set +e
zz=`echo $VERSION|sed 's/3\.[^.]*\.[^.]*\.\([0-9]*\).*/\1/'|grep -v '\.'`
set -e
TARBALLNAME=`printf "sqlite-autoconf-3%.2d%.2d%.2d" $xx $yy $zz`
else
TARBALLNAME=sqlite-snapshot-$DATETIME
fi
rm -rf $TMPSPACE
cp -R $TOP/autoconf $TMPSPACE
cp sqlite3.c $TMPSPACE
cp sqlite3.h $TMPSPACE
cp sqlite3ext.h $TMPSPACE
cp $TOP/sqlite3.1 $TMPSPACE
cp $TOP/sqlite3.pc.in $TMPSPACE
cp $TOP/src/shell.c $TMPSPACE
cp $TOP/src/sqlite3.rc $TMPSPACE
cat $TMPSPACE/configure.ac |
sed "s/--SQLITE-VERSION--/$VERSION/" > $TMPSPACE/tmp
mv $TMPSPACE/tmp $TMPSPACE/configure.ac
cd $TMPSPACE
autoreconf -i
#libtoolize
#aclocal
#autoconf
#automake --add-missing
mkdir -p tea/generic
echo "#ifdef USE_SYSTEM_SQLITE" > tea/generic/tclsqlite3.c
echo "# include <sqlite3.h>" >> tea/generic/tclsqlite3.c
echo "#else" >> tea/generic/tclsqlite3.c
echo "#include \"sqlite3.c\"" >> tea/generic/tclsqlite3.c
echo "#endif" >> tea/generic/tclsqlite3.c
cat $TOP/src/tclsqlite.c >> tea/generic/tclsqlite3.c
cat tea/configure.ac |
sed "s/AC_INIT(\[sqlite\], .*)/AC_INIT([sqlite], [$VERSION])/" > tmp
mv tmp tea/configure.ac
cd tea
autoconf
rm -rf autom4te.cache
cd ../
./configure && make dist
tar -xzf sqlite-$VERSION.tar.gz
mv sqlite-$VERSION $TARBALLNAME
tar -czf $TARBALLNAME.tar.gz $TARBALLNAME
mv $TARBALLNAME.tar.gz ..
cd ..
ls -l $TARBALLNAME.tar.gz

View File

@ -1,841 +0,0 @@
/*
** Compile and run this standalone program in order to generate code that
** implements a function that will translate alphabetic identifiers into
** parser token codes.
*/
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <assert.h>
/*
** A header comment placed at the beginning of generated code.
*/
static const char zHdr[] =
"/***** This file contains automatically generated code ******\n"
"**\n"
"** The code in this file has been automatically generated by\n"
"**\n"
"** sqlite/tool/mkkeywordhash.c\n"
"**\n"
"** The code in this file implements a function that determines whether\n"
"** or not a given identifier is really an SQL keyword. The same thing\n"
"** might be implemented more directly using a hand-written hash table.\n"
"** But by using this automatically generated code, the size of the code\n"
"** is substantially reduced. This is important for embedded applications\n"
"** on platforms with limited memory.\n"
"*/\n"
;
/*
** All the keywords of the SQL language are stored in a hash
** table composed of instances of the following structure.
*/
typedef struct Keyword Keyword;
struct Keyword {
char *zName; /* The keyword name */
char *zTokenType; /* Token value for this keyword */
int mask; /* Code this keyword if non-zero */
int id; /* Unique ID for this record */
int hash; /* Hash on the keyword */
int offset; /* Offset to start of name string */
int len; /* Length of this keyword, not counting final \000 */
int prefix; /* Number of characters in prefix */
int longestSuffix; /* Longest suffix that is a prefix on another word */
int iNext; /* Index in aKeywordTable[] of next with same hash */
int substrId; /* Id to another keyword this keyword is embedded in */
int substrOffset; /* Offset into substrId for start of this keyword */
char zOrigName[20]; /* Original keyword name before processing */
};
/*
** Define masks used to determine which keywords are allowed
*/
#ifdef SQLITE_OMIT_ALTERTABLE
# define ALTER 0
#else
# define ALTER 0x00000001
#endif
#define ALWAYS 0x00000002
#ifdef SQLITE_OMIT_ANALYZE
# define ANALYZE 0
#else
# define ANALYZE 0x00000004
#endif
#ifdef SQLITE_OMIT_ATTACH
# define ATTACH 0
#else
# define ATTACH 0x00000008
#endif
#ifdef SQLITE_OMIT_AUTOINCREMENT
# define AUTOINCR 0
#else
# define AUTOINCR 0x00000010
#endif
#ifdef SQLITE_OMIT_CAST
# define CAST 0
#else
# define CAST 0x00000020
#endif
#ifdef SQLITE_OMIT_COMPOUND_SELECT
# define COMPOUND 0
#else
# define COMPOUND 0x00000040
#endif
#ifdef SQLITE_OMIT_CONFLICT_CLAUSE
# define CONFLICT 0
#else
# define CONFLICT 0x00000080
#endif
#ifdef SQLITE_OMIT_EXPLAIN
# define EXPLAIN 0
#else
# define EXPLAIN 0x00000100
#endif
#ifdef SQLITE_OMIT_FOREIGN_KEY
# define FKEY 0
#else
# define FKEY 0x00000200
#endif
#ifdef SQLITE_OMIT_PRAGMA
# define PRAGMA 0
#else
# define PRAGMA 0x00000400
#endif
#ifdef SQLITE_OMIT_REINDEX
# define REINDEX 0
#else
# define REINDEX 0x00000800
#endif
#ifdef SQLITE_OMIT_SUBQUERY
# define SUBQUERY 0
#else
# define SUBQUERY 0x00001000
#endif
#ifdef SQLITE_OMIT_TRIGGER
# define TRIGGER 0
#else
# define TRIGGER 0x00002000
#endif
#if defined(SQLITE_OMIT_AUTOVACUUM) && \
(defined(SQLITE_OMIT_VACUUM) || defined(SQLITE_OMIT_ATTACH))
# define VACUUM 0
#else
# define VACUUM 0x00004000
#endif
#ifdef SQLITE_OMIT_VIEW
# define VIEW 0
#else
# define VIEW 0x00008000
#endif
#ifdef SQLITE_OMIT_VIRTUALTABLE
# define VTAB 0
#else
# define VTAB 0x00010000
#endif
#ifdef SQLITE_OMIT_AUTOVACUUM
# define AUTOVACUUM 0
#else
# define AUTOVACUUM 0x00020000
#endif
#ifdef SQLITE_OMIT_CTE
# define CTE 0
#else
# define CTE 0x00040000
#endif
/*
** These are the keywords
*/
static Keyword aKeywordTable[] = {
#ifndef MAXSCALE
{ "ABORT", "TK_ABORT", CONFLICT|TRIGGER },
#endif
{ "ACTION", "TK_ACTION", FKEY },
{ "ADD", "TK_ADD", ALTER },
{ "AFTER", "TK_AFTER", TRIGGER },
#ifdef MAXSCALE
{ "AGAINST", "TK_AGAINST", ALWAYS },
#endif
{ "ALL", "TK_ALL", ALWAYS },
{ "ALTER", "TK_ALTER", ALTER },
#ifdef MAXSCALE
{ "ALGORITHM", "TK_ALGORITHM", ANALYZE },
#endif
{ "ANALYZE", "TK_ANALYZE", ANALYZE },
{ "AND", "TK_AND", ALWAYS },
{ "AS", "TK_AS", ALWAYS },
{ "ASC", "TK_ASC", ALWAYS },
{ "ATTACH", "TK_ATTACH", ATTACH },
{ "AUTOINCREMENT", "TK_AUTOINCR", AUTOINCR },
#ifdef MAXSCALE
{ "AUTO_INCREMENT", "TK_AUTOINCR", AUTOINCR },
#endif
{ "BEFORE", "TK_BEFORE", TRIGGER },
{ "BEGIN", "TK_BEGIN", ALWAYS },
{ "BETWEEN", "TK_BETWEEN", ALWAYS },
#ifdef MAXSCALE
{ "BINARY", "TK_BINARY", ALWAYS },
#endif
{ "BY", "TK_BY", ALWAYS },
#ifdef MAXSCALE
{ "CALL", "TK_CALL", ALWAYS },
#endif
{ "CASCADE", "TK_CASCADE", FKEY },
{ "CASE", "TK_CASE", ALWAYS },
{ "CAST", "TK_CAST", CAST },
#ifdef MAXSCALE
{ "CHARACTER", "TK_CHARACTER", ALWAYS },
{ "CHARSET", "TK_CHARSET", ALWAYS },
#endif
{ "CHECK", "TK_CHECK", ALWAYS },
#ifdef MAXSCALE
{ "CLOSE", "TK_CLOSE", ALWAYS },
#endif
{ "COLLATE", "TK_COLLATE", ALWAYS },
{ "COLUMN", "TK_COLUMNKW", ALTER },
#ifdef MAXSCALE
{ "COLUMNS", "TK_COLUMNS", ALWAYS },
{ "COMMENT", "TK_COMMENT", ALWAYS },
#endif
{ "COMMIT", "TK_COMMIT", ALWAYS },
#ifndef MAXSCALE
{ "CONFLICT", "TK_CONFLICT", CONFLICT },
#endif
#ifdef MAXSCALE
{ "CONNECTION", "TK_CONNECTION", ALWAYS },
{ "CONCURRENT", "TK_CONCURRENT", ALWAYS },
#endif
{ "CONSTRAINT", "TK_CONSTRAINT", ALWAYS },
{ "CREATE", "TK_CREATE", ALWAYS },
{ "CROSS", "TK_JOIN_KW", ALWAYS },
#ifdef MAXSCALE
{ "CURRENT", "TK_CURRENT", ALWAYS },
#endif
{ "CURRENT_DATE", "TK_CTIME_KW", ALWAYS },
{ "CURRENT_TIME", "TK_CTIME_KW", ALWAYS },
{ "CURRENT_TIMESTAMP","TK_CTIME_KW", ALWAYS },
#ifdef MAXSCALE
{ "DATA", "TK_DATA", ALWAYS },
{ "DATABASE", "TK_DATABASE", ALWAYS },
#else
{ "DATABASE", "TK_DATABASE", ATTACH },
#endif
#ifdef MAXSCALE
{ "DATABASES", "TK_DATABASES_KW", ALWAYS },
{ "DEALLOCATE", "TK_DEALLOCATE", ALWAYS },
{ "DECLARE", "TK_DECLARE", ALWAYS },
#endif
{ "DEFAULT", "TK_DEFAULT", ALWAYS },
{ "DEFERRED", "TK_DEFERRED", ALWAYS },
{ "DEFERRABLE", "TK_DEFERRABLE", FKEY },
#ifdef MAXSCALE
{ "DELAYED", "TK_DELAYED", ALWAYS },
#endif
{ "DELETE", "TK_DELETE", ALWAYS },
{ "DESC", "TK_DESC", ALWAYS },
#ifdef MAXSCALE
{ "DESCRIBE", "TK_EXPLAIN" , ALWAYS },
#endif
{ "DETACH", "TK_DETACH", ATTACH },
{ "DISTINCT", "TK_DISTINCT", ALWAYS },
#ifdef MAXSCALE
{ "DISTINCTROW", "TK_DISTINCT", ALWAYS },
{ "DO", "TK_DO", ALWAYS },
#endif
{ "DROP", "TK_DROP", ALWAYS },
#ifdef MAXSCALE
{ "DUMPFILE", "TK_DUMPFILE", ALWAYS },
#endif
{ "END", "TK_END", ALWAYS },
{ "EACH", "TK_EACH", TRIGGER },
#ifdef MAXSCALE
{ "ENABLE", "TK_ENABLE", ALWAYS },
{ "ENGINE", "TK_ENGINE", ALWAYS },
{ "ENUM", "TK_ENUM", ALWAYS },
#endif
{ "ELSE", "TK_ELSE", ALWAYS },
{ "ESCAPE", "TK_ESCAPE", ALWAYS },
{ "EXCEPT", "TK_EXCEPT", COMPOUND },
{ "EXCLUSIVE", "TK_EXCLUSIVE", ALWAYS },
#ifdef MAXSCALE
{ "EXECUTE", "TK_EXECUTE", ALWAYS },
{ "EXCLUDE", "TK_EXCLUDE", ALWAYS },
{ "EXTENDED", "TK_EXTENDED", ALWAYS },
#endif
{ "EXISTS", "TK_EXISTS", ALWAYS },
{ "EXPLAIN", "TK_EXPLAIN", EXPLAIN },
#ifndef MAXSCALE
{ "FAIL", "TK_FAIL", CONFLICT|TRIGGER },
#endif
#ifdef MAXSCALE
{ "FIELDS", "TK_FIELDS", ALWAYS },
{ "FIRST", "TK_FIRST", ALWAYS },
{ "FLUSH", "TK_FLUSH", ALWAYS },
{ "FOLLOWING", "TK_FOLLOWING", ALWAYS },
#endif
#ifdef MAXSCALE
{ "FOR", "TK_FOR", ALWAYS },
#else
{ "FOR", "TK_FOR", TRIGGER },
#endif
#ifdef MAXSCALE
{ "FORCE", "TK_FORCE", ALWAYS },
#endif
{ "FOREIGN", "TK_FOREIGN", FKEY },
#ifdef MAXSCALE
{ "FORMAT", "TK_FORMAT", ALWAYS },
#endif
{ "FROM", "TK_FROM", ALWAYS },
{ "FULL", "TK_JOIN_KW", ALWAYS },
#ifdef MAXSCALE
{ "FULLTEXT", "TK_FULLTEXT", ALWAYS },
{ "FUNCTION", "TK_FUNCTION_KW", ALWAYS },
#endif
{ "GLOB", "TK_LIKE_KW", ALWAYS },
#ifdef MAXSCALE
{ "GLOBAL", "TK_GLOBAL", ALWAYS },
{ "GRANT", "TK_GRANT", ALWAYS },
#endif
{ "GROUP", "TK_GROUP", ALWAYS },
#ifdef MAXSCALE
{ "HANDLER", "TK_HANDLER", ALWAYS },
#endif
{ "HAVING", "TK_HAVING", ALWAYS },
#ifdef MAXSCALE
{ "HIGH_PRIORITY", "TK_HIGH_PRIORITY",ALWAYS },
#endif
{ "IF", "TK_IF", ALWAYS },
#ifdef MAXSCALE
{ "IGNORE", "TK_IGNORE", ALWAYS },
#else
{ "IGNORE", "TK_IGNORE", CONFLICT|TRIGGER },
#endif
{ "IMMEDIATE", "TK_IMMEDIATE", ALWAYS },
{ "IN", "TK_IN", ALWAYS },
{ "INDEX", "TK_INDEX", ALWAYS },
{ "INDEXED", "TK_INDEXED", ALWAYS },
#ifdef MAXSCALE
{ "INDEXES", "TK_INDEXES", ALWAYS },
{ "INFILE", "TK_INFILE", ALWAYS },
#endif
{ "INITIALLY", "TK_INITIALLY", FKEY },
{ "INNER", "TK_JOIN_KW", ALWAYS },
{ "INSERT", "TK_INSERT", ALWAYS },
{ "INSTEAD", "TK_INSTEAD", TRIGGER },
{ "INTERSECT", "TK_INTERSECT", COMPOUND },
#ifdef MAXSCALE
{ "INTERVAL", "TK_INTERVAL", ALWAYS },
#endif
{ "INTO", "TK_INTO", ALWAYS },
{ "IS", "TK_IS", ALWAYS },
{ "ISNULL", "TK_ISNULL", ALWAYS },
{ "JOIN", "TK_JOIN", ALWAYS },
{ "KEY", "TK_KEY", ALWAYS },
#ifdef MAXSCALE
{ "KEYS", "TK_KEYS", ALWAYS },
#endif
{ "LEFT", "TK_JOIN_KW", ALWAYS },
{ "LIKE", "TK_LIKE_KW", ALWAYS },
{ "LIMIT", "TK_LIMIT", ALWAYS },
#ifdef MAXSCALE
{ "LOAD", "TK_LOAD", ALWAYS },
{ "LOCAL", "TK_LOCAL", ALWAYS },
{ "LOCK", "TK_LOCK", ALWAYS },
{ "LOW_PRIORITY", "TK_LOW_PRIORITY", ALWAYS },
#endif
{ "MATCH", "TK_MATCH", ALWAYS },
#ifdef MAXSCALE
{ "MASTER", "TK_MASTER", ALWAYS },
{ "MERGE", "TK_MERGE", ALWAYS },
{ "NAMES", "TK_NAMES", ALWAYS },
#endif
{ "NATURAL", "TK_JOIN_KW", ALWAYS },
#ifdef MAXSCALE
{ "NEXT", "TK_NEXT", ALWAYS },
#endif
{ "NO", "TK_NO", FKEY },
{ "NOT", "TK_NOT", ALWAYS },
{ "NOTNULL", "TK_NOTNULL", ALWAYS },
#ifdef MAXSCALE
{ "NO_WRITE_TO_BINLOG","TK_NO_WRITE_TO_BINLOG",ALWAYS },
#endif
{ "NULL", "TK_NULL", ALWAYS },
{ "OF", "TK_OF", ALWAYS },
{ "OFFSET", "TK_OFFSET", ALWAYS },
{ "ON", "TK_ON", ALWAYS },
#ifdef MAXSCALE
{ "OPEN", "TK_OPEN", ALWAYS },
#endif
{ "OR", "TK_OR", ALWAYS },
{ "ORDER", "TK_ORDER", ALWAYS },
#ifdef MAXSCALE
{ "OTHERS", "TK_OTHERS", ALWAYS },
#endif
{ "OUTER", "TK_JOIN_KW", ALWAYS },
#ifdef MAXSCALE
{ "OUTFILE", "TK_OUTFILE", ALWAYS },
{ "OVER", "TK_OVER", ALWAYS },
#endif
#ifdef MAXSCALE
{ "PARTITION", "TK_PARTITION", ALWAYS },
{ "PARTITIONS", "TK_PARTITIONS", ALWAYS },
{ "PASSWORD", "TK_PASSWORD", ALWAYS },
{ "PERSISTENT", "TK_PERSISTENT", ALWAYS },
#endif
#ifndef MAXSCALE
{ "PLAN", "TK_PLAN", EXPLAIN },
#endif
{ "PRAGMA", "TK_PRAGMA", PRAGMA },
#ifdef MAXSCALE
{ "PRECEDING", "TK_PRECEDING", ALWAYS },
{ "PREPARE", "TK_PREPARE", ALWAYS },
{ "PREVIOUS", "TK_PREVIOUS", ALWAYS },
#endif
{ "PRIMARY", "TK_PRIMARY", ALWAYS },
#ifdef MAXSCALE
{ "PROCEDURE", "TK_FUNCTION_KW", ALWAYS },
#endif
#ifndef MAXSCALE
{ "QUERY", "TK_QUERY", EXPLAIN },
#endif
#ifdef MAXSCALE
{ "QUICK", "TK_QUICK", ALWAYS },
#endif
{ "RAISE", "TK_RAISE", TRIGGER },
#ifdef MAXSCALE
{ "RANGE", "TK_RANGE", ALWAYS },
{ "READ", "TK_READ", ALWAYS },
#endif
{ "RECURSIVE", "TK_RECURSIVE", CTE },
{ "REFERENCES", "TK_REFERENCES", FKEY },
{ "REGEXP", "TK_LIKE_KW", ALWAYS },
{ "REINDEX", "TK_REINDEX", REINDEX },
{ "RELEASE", "TK_RELEASE", ALWAYS },
{ "RENAME", "TK_RENAME", ALTER },
{ "REPLACE", "TK_REPLACE", CONFLICT },
{ "RESTRICT", "TK_RESTRICT", FKEY },
#ifdef MAXSCALE
{ "REVOKE", "TK_REVOKE", ALWAYS },
#endif
{ "RIGHT", "TK_JOIN_KW", ALWAYS },
{ "ROLLBACK", "TK_ROLLBACK", ALWAYS },
#ifdef MAXSCALE
{ "ROLLUP", "TK_ROLLUP", ALWAYS },
#endif
{ "ROW", "TK_ROW", TRIGGER },
#ifdef MAXSCALE
{ "ROWS", "TK_ROWS", ALWAYS },
#endif
{ "SAVEPOINT", "TK_SAVEPOINT", ALWAYS },
#ifdef MAXSCALE
{ "SCHEMAS", "TK_DATABASES_KW", ALWAYS },
{ "SEQUENCE", "TK_SEQUENCE", ALWAYS },
#endif
{ "SELECT", "TK_SELECT", ALWAYS },
#ifdef MAXSCALE
{ "SESSION", "TK_SESSION", ALWAYS },
{ "SEPARATOR", "TK_SEPARATOR", ALWAYS },
{ "SHOW", "TK_SHOW", ALWAYS },
{ "SLAVE", "TK_SLAVE", ALWAYS },
{ "SPATIAL", "TK_SPATIAL", ALWAYS },
{ "SQL_BIG_RESULT", "TK_SELECT_OPTIONS_KW", ALWAYS },
{ "SQL_BUFFER_RESULT","TK_SELECT_OPTIONS_KW", ALWAYS },
{ "SQL_CACHE", "TK_SELECT_OPTIONS_KW", ALWAYS },
{ "SQL_CALC_FOUND_ROWS","TK_SELECT_OPTIONS_KW", ALWAYS },
{ "SQL_NO_CACHE", "TK_SELECT_OPTIONS_KW", ALWAYS },
{ "SQL_SMALL_RESULT", "TK_SELECT_OPTIONS_KW", ALWAYS },
#endif
{ "SET", "TK_SET", ALWAYS },
#ifdef MAXSCALE
{ "START", "TK_START", ALWAYS },
{ "STATEMENT", "TK_STATEMENT", ALWAYS },
{ "STATUS", "TK_STATUS", ALWAYS },
{ "STRAIGHT_JOIN", "TK_STRAIGHT_JOIN",ALWAYS },
#endif
{ "TABLE", "TK_TABLE", ALWAYS },
#ifdef MAXSCALE
{ "TABLES", "TK_TABLES", ALWAYS },
#endif
{ "TEMP", "TK_TEMP", ALWAYS },
{ "TEMPORARY", "TK_TEMP", ALWAYS },
#ifdef MAXSCALE
{ "TEMPTABLE", "TK_TEMPTABLE", ANALYZE },
#endif
{ "THEN", "TK_THEN", ALWAYS },
#ifdef MAXSCALE
{ "TIES", "TK_TIES", ANALYZE },
#endif
{ "TO", "TK_TO", ALWAYS },
{ "TRANSACTION", "TK_TRANSACTION", ALWAYS },
{ "TRIGGER", "TK_TRIGGER", TRIGGER },
#ifdef MAXSCALE
{ "TRUNCATE", "TK_TRUNCATE", ALWAYS },
{ "UNBOUNDED", "TK_UNBOUNDED", ALWAYS },
#endif
{ "UNION", "TK_UNION", COMPOUND },
{ "UNSIGNED", "TK_UNSIGNED", ALWAYS },
{ "UNIQUE", "TK_UNIQUE", ALWAYS },
#ifdef MAXSCALE
{ "UNLOCK", "TK_UNLOCK", ALWAYS },
#endif
{ "UPDATE", "TK_UPDATE", ALWAYS },
{ "USE", "TK_USE", ALWAYS },
{ "USING", "TK_USING", ALWAYS },
{ "VACUUM", "TK_VACUUM", VACUUM },
#ifdef MAXSCALE
{ "VALUE", "TK_VALUE", ALWAYS },
#endif
{ "VALUES", "TK_VALUES", ALWAYS },
#ifdef MAXSCALE
{ "VARIABLES", "TK_VARIABLES", ALWAYS },
#endif
{ "VIEW", "TK_VIEW", VIEW },
{ "VIRTUAL", "TK_VIRTUAL", VTAB },
#ifdef MAXSCALE
{ "WARNINGS", "TK_WARNINGS", ALWAYS },
{ "WINDOW", "TK_WINDOW", ALWAYS },
#endif
{ "WITH", "TK_WITH", CTE },
#ifndef MAXSCALE
{ "WITHOUT", "TK_WITHOUT", ALWAYS },
#endif
{ "WHEN", "TK_WHEN", ALWAYS },
{ "WHERE", "TK_WHERE", ALWAYS },
#ifdef MAXSCALE
{ "WORK", "TK_WORK", ALWAYS },
{ "WRITE", "TK_WRITE", ALWAYS },
{ "XA", "TK_XA", ALWAYS },
#endif
{ "ZEROFILL", "TK_ZEROFILL", ALWAYS },
};
/* Number of keywords */
static int nKeyword = (sizeof(aKeywordTable)/sizeof(aKeywordTable[0]));
/* Map all alphabetic characters into lower-case for hashing. This is
** only valid for alphabetics. In particular it does not work for '_'
** and so the hash cannot be on a keyword position that might be an '_'.
*/
#define charMap(X) (0x20|(X))
/*
** Comparision function for two Keyword records
*/
static int keywordCompare1(const void *a, const void *b){
const Keyword *pA = (Keyword*)a;
const Keyword *pB = (Keyword*)b;
int n = pA->len - pB->len;
if( n==0 ){
n = strcmp(pA->zName, pB->zName);
}
assert( n!=0 );
return n;
}
static int keywordCompare2(const void *a, const void *b){
const Keyword *pA = (Keyword*)a;
const Keyword *pB = (Keyword*)b;
int n = pB->longestSuffix - pA->longestSuffix;
if( n==0 ){
n = strcmp(pA->zName, pB->zName);
}
assert( n!=0 );
return n;
}
static int keywordCompare3(const void *a, const void *b){
const Keyword *pA = (Keyword*)a;
const Keyword *pB = (Keyword*)b;
int n = pA->offset - pB->offset;
if( n==0 ) n = pB->id - pA->id;
assert( n!=0 );
return n;
}
/*
** Return a KeywordTable entry with the given id
*/
static Keyword *findById(int id){
int i;
for(i=0; i<nKeyword; i++){
if( aKeywordTable[i].id==id ) break;
}
return &aKeywordTable[i];
}
/*
** This routine does the work. The generated code is printed on standard
** output.
*/
int main(int argc, char **argv){
int i, j, k, h;
int bestSize, bestCount;
int count;
int nChar;
int totalLen = 0;
int aHash[1000]; /* 1000 is much bigger than nKeyword */
char zText[2000];
/* Remove entries from the list of keywords that have mask==0 */
for(i=j=0; i<nKeyword; i++){
if( aKeywordTable[i].mask==0 ) continue;
if( j<i ){
aKeywordTable[j] = aKeywordTable[i];
}
j++;
}
nKeyword = j;
/* Fill in the lengths of strings and hashes for all entries. */
for(i=0; i<nKeyword; i++){
Keyword *p = &aKeywordTable[i];
p->len = (int)strlen(p->zName);
assert( p->len<sizeof(p->zOrigName) );
memcpy(p->zOrigName, p->zName, p->len+1);
totalLen += p->len;
p->hash = (charMap(p->zName[0])*4) ^
(charMap(p->zName[p->len-1])*3) ^ (p->len*1);
p->id = i+1;
}
/* Sort the table from shortest to longest keyword */
qsort(aKeywordTable, nKeyword, sizeof(aKeywordTable[0]), keywordCompare1);
/* Look for short keywords embedded in longer keywords */
for(i=nKeyword-2; i>=0; i--){
Keyword *p = &aKeywordTable[i];
for(j=nKeyword-1; j>i && p->substrId==0; j--){
Keyword *pOther = &aKeywordTable[j];
if( pOther->substrId ) continue;
if( pOther->len<=p->len ) continue;
for(k=0; k<=pOther->len-p->len; k++){
if( memcmp(p->zName, &pOther->zName[k], p->len)==0 ){
p->substrId = pOther->id;
p->substrOffset = k;
break;
}
}
}
}
/* Compute the longestSuffix value for every word */
for(i=0; i<nKeyword; i++){
Keyword *p = &aKeywordTable[i];
if( p->substrId ) continue;
for(j=0; j<nKeyword; j++){
Keyword *pOther;
if( j==i ) continue;
pOther = &aKeywordTable[j];
if( pOther->substrId ) continue;
for(k=p->longestSuffix+1; k<p->len && k<pOther->len; k++){
if( memcmp(&p->zName[p->len-k], pOther->zName, k)==0 ){
p->longestSuffix = k;
}
}
}
}
/* Sort the table into reverse order by length */
qsort(aKeywordTable, nKeyword, sizeof(aKeywordTable[0]), keywordCompare2);
/* Fill in the offset for all entries */
nChar = 0;
for(i=0; i<nKeyword; i++){
Keyword *p = &aKeywordTable[i];
if( p->offset>0 || p->substrId ) continue;
p->offset = nChar;
nChar += p->len;
for(k=p->len-1; k>=1; k--){
for(j=i+1; j<nKeyword; j++){
Keyword *pOther = &aKeywordTable[j];
if( pOther->offset>0 || pOther->substrId ) continue;
if( pOther->len<=k ) continue;
if( memcmp(&p->zName[p->len-k], pOther->zName, k)==0 ){
p = pOther;
p->offset = nChar - k;
nChar = p->offset + p->len;
p->zName += k;
p->len -= k;
p->prefix = k;
j = i;
k = p->len;
}
}
}
}
for(i=0; i<nKeyword; i++){
Keyword *p = &aKeywordTable[i];
if( p->substrId ){
p->offset = findById(p->substrId)->offset + p->substrOffset;
}
}
/* Sort the table by offset */
qsort(aKeywordTable, nKeyword, sizeof(aKeywordTable[0]), keywordCompare3);
/* Figure out how big to make the hash table in order to minimize the
** number of collisions */
bestSize = nKeyword;
bestCount = nKeyword*nKeyword;
for(i=nKeyword/2; i<=2*nKeyword; i++){
for(j=0; j<i; j++) aHash[j] = 0;
for(j=0; j<nKeyword; j++){
h = aKeywordTable[j].hash % i;
aHash[h] *= 2;
aHash[h]++;
}
for(j=count=0; j<i; j++) count += aHash[j];
if( count<bestCount ){
bestCount = count;
bestSize = i;
}
}
/* Compute the hash */
for(i=0; i<bestSize; i++) aHash[i] = 0;
for(i=0; i<nKeyword; i++){
h = aKeywordTable[i].hash % bestSize;
aKeywordTable[i].iNext = aHash[h];
aHash[h] = i+1;
}
/* Begin generating code */
printf("%s", zHdr);
printf("/* Hash score: %d */\n", bestCount);
printf("static int keywordCode(const char *z, int n, int *pType){\n");
printf(" /* zText[] encodes %d bytes of keywords in %d bytes */\n",
totalLen + nKeyword, nChar+1 );
for(i=j=k=0; i<nKeyword; i++){
Keyword *p = &aKeywordTable[i];
if( p->substrId ) continue;
memcpy(&zText[k], p->zName, p->len);
k += p->len;
if( j+p->len>70 ){
printf("%*s */\n", 74-j, "");
j = 0;
}
if( j==0 ){
printf(" /* ");
j = 8;
}
printf("%s", p->zName);
j += p->len;
}
if( j>0 ){
printf("%*s */\n", 74-j, "");
}
printf(" static const char zText[%d] = {\n", nChar);
zText[nChar] = 0;
for(i=j=0; i<k; i++){
if( j==0 ){
printf(" ");
}
if( zText[i]==0 ){
printf("0");
}else{
printf("'%c',", zText[i]);
}
j += 4;
if( j>68 ){
printf("\n");
j = 0;
}
}
if( j>0 ) printf("\n");
printf(" };\n");
printf(" static const unsigned char aHash[%d] = {\n", bestSize);
for(i=j=0; i<bestSize; i++){
if( j==0 ) printf(" ");
printf(" %3d,", aHash[i]);
j++;
if( j>12 ){
printf("\n");
j = 0;
}
}
printf("%s };\n", j==0 ? "" : "\n");
printf(" static const unsigned char aNext[%d] = {\n", nKeyword);
for(i=j=0; i<nKeyword; i++){
if( j==0 ) printf(" ");
printf(" %3d,", aKeywordTable[i].iNext);
j++;
if( j>12 ){
printf("\n");
j = 0;
}
}
printf("%s };\n", j==0 ? "" : "\n");
printf(" static const unsigned char aLen[%d] = {\n", nKeyword);
for(i=j=0; i<nKeyword; i++){
if( j==0 ) printf(" ");
printf(" %3d,", aKeywordTable[i].len+aKeywordTable[i].prefix);
j++;
if( j>12 ){
printf("\n");
j = 0;
}
}
printf("%s };\n", j==0 ? "" : "\n");
printf(" static const unsigned short int aOffset[%d] = {\n", nKeyword);
for(i=j=0; i<nKeyword; i++){
if( j==0 ) printf(" ");
printf(" %3d,", aKeywordTable[i].offset);
j++;
if( j>12 ){
printf("\n");
j = 0;
}
}
printf("%s };\n", j==0 ? "" : "\n");
printf(" static const unsigned char aCode[%d] = {\n", nKeyword);
for(i=j=0; i<nKeyword; i++){
char *zToken = aKeywordTable[i].zTokenType;
if( j==0 ) printf(" ");
printf("%s,%*s", zToken, (int)(14-strlen(zToken)), "");
j++;
if( j>=5 ){
printf("\n");
j = 0;
}
}
printf("%s };\n", j==0 ? "" : "\n");
printf(" int i, j;\n");
printf(" const char *zKW;\n");
printf(" if( n>=2 ){\n");
printf(" i = ((charMap(z[0])*4) ^ (charMap(z[n-1])*3) ^ n) %% %d;\n",
bestSize);
printf(" for(i=((int)aHash[i])-1; i>=0; i=((int)aNext[i])-1){\n");
printf(" if( aLen[i]!=n ) continue;\n");
printf(" j = 0;\n");
printf(" zKW = &zText[aOffset[i]];\n");
printf("#ifdef SQLITE_ASCII\n");
printf(" while( j<n && (z[j]&~0x20)==zKW[j] ){ j++; }\n");
printf("#endif\n");
printf("#ifdef SQLITE_EBCDIC\n");
printf(" while( j<n && toupper(z[j])==zKW[j] ){ j++; }\n");
printf("#endif\n");
printf(" if( j<n ) continue;\n");
for(i=0; i<nKeyword; i++){
printf(" testcase( i==%d ); /* %s */\n",
i, aKeywordTable[i].zOrigName);
}
printf(" *pType = aCode[i];\n");
printf(" break;\n");
printf(" }\n");
printf(" }\n");
printf(" return n;\n");
printf("}\n");
printf("int sqlite3KeywordCode(const unsigned char *z, int n){\n");
printf(" int id = TK_ID;\n");
printf(" keywordCode((char*)z, n, &id);\n");
printf(" return id;\n");
printf("}\n");
printf("#define SQLITE_N_KEYWORD %d\n", nKeyword);
return 0;
}

View File

@ -1,99 +0,0 @@
#!/usr/bin/tcl
#
# This script reads the regular MSVC makefile (../Makefile.msc) and outputs
# a revised version of that Makefile that is "minimal" in the sense that
# it uses the sqlite3.c amalgamation as input and does not require tclsh.
# The resulting "../Makefile.min.msc" is suitable for use in the amalgamation
# tarballs.
#
if {$argc==0} {
set basedir [file dir [file dir [file normalize $argv0]]]
set fromFileName [file join $basedir Makefile.msc]
set toFileName [file join $basedir autoconf Makefile.msc]
} else {
set fromFileName [lindex $argv 0]
if {![file exists $fromFileName]} {
error "input file \"$fromFileName\" does not exist"
}
set toFileName [lindex $argv 1]
if {[file exists $toFileName]} {
error "output file \"$toFileName\" already exists"
}
}
proc readFile { fileName } {
set file_id [open $fileName RDONLY]
fconfigure $file_id -encoding binary -translation binary
set result [read $file_id]
close $file_id
return $result
}
proc writeFile { fileName data } {
set file_id [open $fileName {WRONLY CREAT TRUNC}]
fconfigure $file_id -encoding binary -translation binary
puts -nonewline $file_id $data
close $file_id
return ""
}
proc escapeSubSpec { data } {
regsub -all -- {&} $data {\\\&} data
regsub -all -- {\\(\d+)} $data {\\\\\1} data
return $data
}
proc substVars { data } {
return [uplevel 1 [list subst -nocommands -nobackslashes $data]]
}
#
# NOTE: This block is used to replace the section marked <<block1>> in
# the Makefile, if it exists.
#
set blocks(1) [string trimleft [string map [list \\\\ \\] {
_HASHCHAR=^#
!IF ![echo !IFNDEF VERSION > rcver.vc] && \\
![for /F "delims=" %V in ('type "$(SQLITE3H)" ^| find "$(_HASHCHAR)define SQLITE_VERSION "') do (echo VERSION = ^^%V >> rcver.vc)] && \\
![echo !ENDIF >> rcver.vc]
!INCLUDE rcver.vc
!ENDIF
RESOURCE_VERSION = $(VERSION:^#=)
RESOURCE_VERSION = $(RESOURCE_VERSION:define=)
RESOURCE_VERSION = $(RESOURCE_VERSION:SQLITE_VERSION=)
RESOURCE_VERSION = $(RESOURCE_VERSION:"=)
RESOURCE_VERSION = $(RESOURCE_VERSION:.=,)
$(LIBRESOBJS): $(TOP)\sqlite3.rc rcver.vc $(SQLITE3H)
echo #ifndef SQLITE_RESOURCE_VERSION > sqlite3rc.h
echo #define SQLITE_RESOURCE_VERSION $(RESOURCE_VERSION) >> sqlite3rc.h
echo #endif >> sqlite3rc.h
$(LTRCOMPILE) -fo $(LIBRESOBJS) -DRC_VERONLY $(TOP)\sqlite3.rc
}]]
set data "#### DO NOT EDIT ####\n"
append data "# This makefile is automatically "
append data "generated from the [file tail $fromFileName] at\n"
append data "# the root of the canonical SQLite source tree (not the\n"
append data "# amalgamation tarball) using the tool/[file tail $argv0]\n"
append data "# script.\n#\n\n"
append data [readFile $fromFileName]
regsub -all -- {# <<mark>>\n.*?# <</mark>>\n} \
$data "" data
foreach i [lsort -integer [array names blocks]] {
regsub -all -- [substVars \
{# <<block${i}>>\n.*?# <</block${i}>>\n}] \
$data [escapeSubSpec $blocks($i)] data
}
set data [string map [list " -I\$(TOP)\\src" ""] $data]
set data [string map [list " /DEF:sqlite3.def" ""] $data]
set data [string map [list " sqlite3.def" ""] $data]
set data [string map [list " libsqlite3.lib" ""] $data]
set data [string map [list " \$(ALL_TCL_TARGETS)" ""] $data]
set data [string map [list "\$(TOP)\\src\\" "\$(TOP)\\"] $data]
writeFile $toFileName $data

View File

@ -1,50 +0,0 @@
#!/usr/bin/tclsh
#
# This TCL script scans the opcodes.h file (which is itself generated by
# another TCL script) and uses the information gleaned to create the
# opcodes.c source file.
#
# Opcodes.c contains strings which are the symbolic names for the various
# opcodes used by the VDBE. These strings are used when disassembling a
# VDBE program during tracing or as a result of the EXPLAIN keyword.
#
puts "/* Automatically generated. Do not edit */"
puts "/* See the tool/mkopcodec.tcl script for details. */"
puts "#if !defined(SQLITE_OMIT_EXPLAIN) \\"
puts " || defined(VDBE_PROFILE) \\"
puts " || defined(SQLITE_DEBUG)"
puts "#if defined(SQLITE_ENABLE_EXPLAIN_COMMENTS) || defined(SQLITE_DEBUG)"
puts "# define OpHelp(X) \"\\0\" X"
puts "#else"
puts "# define OpHelp(X)"
puts "#endif"
puts "const char *sqlite3OpcodeName(int i)\173"
puts " static const char *const azName\[\] = \173"
set mx 0
set in [open [lindex $argv 0] rb]
while {![eof $in]} {
set line [gets $in]
if {[regexp {^#define OP_} $line]} {
set name [lindex $line 1]
regsub {^OP_} $name {} name
set i [lindex $line 2]
set label($i) $name
if {$mx<$i} {set mx $i}
if {[regexp {synopsis: (.*) \*/} $line all x]} {
set synopsis($i) [string trim $x]
} else {
set synopsis($i) {}
}
}
}
close $in
for {set i 0} {$i<=$mx} {incr i} {
puts [format " /* %3d */ %-18s OpHelp(\"%s\")," \
$i \"$label($i)\" $synopsis($i)]
}
puts " \175;"
puts " return azName\[i\];"
puts "\175"
puts "#endif"

View File

@ -1,271 +0,0 @@
#!/usr/bin/tclsh
#
# Generate the file opcodes.h.
#
# This TCL script scans a concatenation of the parse.h output file from the
# parser and the vdbe.c source file in order to generate the opcodes numbers
# for all opcodes.
#
# The lines of the vdbe.c that we are interested in are of the form:
#
# case OP_aaaa: /* same as TK_bbbbb */
#
# The TK_ comment is optional. If it is present, then the value assigned to
# the OP_ is the same as the TK_ value. If missing, the OP_ value is assigned
# a small integer that is different from every other OP_ value.
#
# We go to the trouble of making some OP_ values the same as TK_ values
# as an optimization. During parsing, things like expression operators
# are coded with TK_ values such as TK_ADD, TK_DIVIDE, and so forth. Later
# during code generation, we need to generate corresponding opcodes like
# OP_Add and OP_Divide. By making TK_ADD==OP_Add and TK_DIVIDE==OP_Divide,
# code to translate from one to the other is avoided. This makes the
# code generator smaller and faster.
#
# This script also scans for lines of the form:
#
# case OP_aaaa: /* jump, in1, in2, in3, out2-prerelease, out3 */
#
# When such comments are found on an opcode, it means that certain
# properties apply to that opcode. Set corresponding flags using the
# OPFLG_INITIALIZER macro.
#
set in stdin
set currentOp {}
set nOp 0
while {![eof $in]} {
set line [gets $in]
# Remember the TK_ values from the parse.h file.
# NB: The "TK_" prefix stands for "ToKen", not the graphical Tk toolkit
# commonly associated with TCL.
#
if {[regexp {^#define TK_} $line]} {
set tk([lindex $line 1]) [lindex $line 2]
continue
}
# Find "/* Opcode: " lines in the vdbe.c file. Each one introduces
# a new opcode. Remember which parameters are used.
#
if {[regexp {^.. Opcode: } $line]} {
set currentOp OP_[lindex $line 2]
set m 0
foreach term $line {
switch $term {
P1 {incr m 1}
P2 {incr m 2}
P3 {incr m 4}
P4 {incr m 8}
P5 {incr m 16}
}
}
set paramused($currentOp) $m
}
# Find "** Synopsis: " lines that follow Opcode:
#
if {[regexp {^.. Synopsis: (.*)} $line all x] && $currentOp!=""} {
set synopsis($currentOp) [string trim $x]
}
# Scan for "case OP_aaaa:" lines in the vdbe.c file
#
if {[regexp {^case OP_} $line]} {
set line [split $line]
set name [string trim [lindex $line 1] :]
set op($name) -1
set jump($name) 0
set in1($name) 0
set in2($name) 0
set in3($name) 0
set out2($name) 0
set out3($name) 0
for {set i 3} {$i<[llength $line]-1} {incr i} {
switch [string trim [lindex $line $i] ,] {
same {
incr i
if {[lindex $line $i]=="as"} {
incr i
set sym [string trim [lindex $line $i] ,]
set val $tk($sym)
set op($name) $val
set used($val) 1
set sameas($val) $sym
set def($val) $name
}
}
jump {set jump($name) 1}
in1 {set in1($name) 1}
in2 {set in2($name) 1}
in3 {set in3($name) 1}
out2 {set out2($name) 1}
out3 {set out3($name) 1}
}
}
set order($nOp) $name
incr nOp
}
}
# Assign numbers to all opcodes and output the result.
#
puts "/* Automatically generated. Do not edit */"
puts "/* See the tool/mkopcodeh.tcl script for details */"
foreach name {OP_Noop OP_Explain} {
set jump($name) 0
set in1($name) 0
set in2($name) 0
set in3($name) 0
set out2($name) 0
set out3($name) 0
set op($name) -1
set order($nOp) $name
incr nOp
}
# The following are the opcodes that are processed by resolveP2Values()
#
set rp2v_ops {
OP_Transaction
OP_AutoCommit
OP_Savepoint
OP_Checkpoint
OP_Vacuum
OP_JournalMode
OP_VUpdate
OP_VFilter
OP_Next
OP_NextIfOpen
OP_SorterNext
OP_Prev
OP_PrevIfOpen
}
# Assign small values to opcodes that are processed by resolveP2Values()
# to make code generation for the switch() statement smaller and faster.
#
set cnt -1
for {set i 0} {$i<$nOp} {incr i} {
set name $order($i)
if {[lsearch $rp2v_ops $name]>=0} {
incr cnt
while {[info exists used($cnt)]} {incr cnt}
set op($name) $cnt
set used($cnt) 1
set def($cnt) $name
}
}
# Assign the next group of values to JUMP opcodes
#
for {set i 0} {$i<$nOp} {incr i} {
set name $order($i)
if {$op($name)>=0} continue
if {!$jump($name)} continue
incr cnt
while {[info exists used($cnt)]} {incr cnt}
set op($name) $cnt
set used($cnt) 1
set def($cnt) $name
}
# Find the numeric value for the largest JUMP opcode
#
set mxJump -1
for {set i 0} {$i<$nOp} {incr i} {
set name $order($i)
if {$jump($name) && $op($name)>$mxJump} {set mxJump $op($name)}
}
# Generate the numeric values for all remaining opcodes
#
for {set i 0} {$i<$nOp} {incr i} {
set name $order($i)
if {$op($name)<0} {
incr cnt
while {[info exists used($cnt)]} {incr cnt}
set op($name) $cnt
set used($cnt) 1
set def($cnt) $name
}
}
set max [lindex [lsort -decr -integer [array names used]] 0]
for {set i 0} {$i<=$max} {incr i} {
if {![info exists used($i)]} {
set def($i) "OP_NotUsed_$i"
}
if {$i>$max} {set max $i}
set name $def($i)
puts -nonewline [format {#define %-16s %3d} $name $i]
set com {}
if {[info exists sameas($i)]} {
set com "same as $sameas($i)"
}
if {[info exists synopsis($name)]} {
set x $synopsis($name)
if {$com==""} {
set com "synopsis: $x"
} else {
append com ", synopsis: $x"
}
}
if {$com!=""} {
puts -nonewline [format " /* %-42s */" $com]
}
puts ""
}
if {$max>255} {
error "More than 255 opcodes - VdbeOp.opcode is of type u8!"
}
# Generate the bitvectors:
#
set bv(0) 0
for {set i 0} {$i<=$max} {incr i} {
set x 0
set name $def($i)
if {[string match OP_NotUsed* $name]==0} {
if {$jump($name)} {incr x 1}
if {$in1($name)} {incr x 2}
if {$in2($name)} {incr x 4}
if {$in3($name)} {incr x 8}
if {$out2($name)} {incr x 16}
if {$out3($name)} {incr x 32}
}
set bv($i) $x
}
puts ""
puts "/* Properties such as \"out2\" or \"jump\" that are specified in"
puts "** comments following the \"case\" for each opcode in the vdbe.c"
puts "** are encoded into bitvectors as follows:"
puts "*/"
puts "#define OPFLG_JUMP 0x01 /* jump: P2 holds jmp target */"
puts "#define OPFLG_IN1 0x02 /* in1: P1 is an input */"
puts "#define OPFLG_IN2 0x04 /* in2: P2 is an input */"
puts "#define OPFLG_IN3 0x08 /* in3: P3 is an input */"
puts "#define OPFLG_OUT2 0x10 /* out2: P2 is an output */"
puts "#define OPFLG_OUT3 0x20 /* out3: P3 is an output */"
puts "#define OPFLG_INITIALIZER \173\\"
for {set i 0} {$i<=$max} {incr i} {
if {$i%8==0} {
puts -nonewline [format "/* %3d */" $i]
}
puts -nonewline [format " 0x%02x," $bv($i)]
if {$i%8==7} {
puts "\\"
}
}
puts "\175"
puts ""
puts "/* The sqlite3P2Values() routine is able to run faster if it knows"
puts "** the value of the largest JUMP opcode. The smaller the maximum"
puts "** JUMP opcode the better, so the mkopcodeh.tcl script that"
puts "** generated this include file strives to group all JUMP opcodes"
puts "** together near the beginning of the list."
puts "*/"
puts "#define SQLITE_MX_JUMP_OPCODE $mxJump /* Maximum JUMP opcode */"

View File

@ -1,51 +0,0 @@
#!/usr/bin/tclsh
#
# This script is used to generate the array of strings and the enum
# that appear at the beginning of the C code implementation of a
# a TCL command and that define the available subcommands for that
# TCL command.
set prefix {}
while {![eof stdin]} {
set line [gets stdin]
if {$line==""} continue
regsub -all "\[ \t\n,\]+" [string trim $line] { } line
foreach token [split $line { }] {
if {![regexp {(([a-zA-Z]+)_)?([_a-zA-Z]+)} $token all px p2 name]} continue
lappend namelist [string tolower $name]
if {$px!=""} {set prefix $p2}
}
}
puts " static const char *${prefix}_strs\[\] = \173"
set col 0
proc put_item x {
global col
if {$col==0} {puts -nonewline " "}
if {$col<2} {
puts -nonewline [format " %-21s" $x]
incr col
} else {
puts $x
set col 0
}
}
proc finalize {} {
global col
if {$col>0} {puts {}}
set col 0
}
foreach name [lsort $namelist] {
put_item \"$name\",
}
put_item 0
finalize
puts " \175;"
puts " enum ${prefix}_enum \173"
foreach name [lsort $namelist] {
regsub -all {@} $name {} name
put_item ${prefix}_[string toupper $name],
}
finalize
puts " \175;"

View File

@ -1,470 +0,0 @@
#!/usr/bin/tclsh
#
# Run this script to generate the pragma name lookup table C code.
#
# To add new pragmas, first add the name and other relevant attributes
# of the pragma to the "pragma_def" object below. Then run this script
# to generate the ../src/pragma.h header file that contains macros and
# the lookup table needed for pragma name lookup in the pragma.c module.
# Then add the extra "case PragTyp_XXXXX:" and subsequent code for the
# new pragma in ../src/pragma.c.
#
set pragma_def {
NAME: full_column_names
TYPE: FLAG
ARG: SQLITE_FullColNames
IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
NAME: short_column_names
TYPE: FLAG
ARG: SQLITE_ShortColNames
IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
NAME: count_changes
TYPE: FLAG
ARG: SQLITE_CountRows
IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
NAME: empty_result_callbacks
TYPE: FLAG
ARG: SQLITE_NullCallback
IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
NAME: legacy_file_format
TYPE: FLAG
ARG: SQLITE_LegacyFileFmt
IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
NAME: fullfsync
TYPE: FLAG
ARG: SQLITE_FullFSync
IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
NAME: checkpoint_fullfsync
TYPE: FLAG
ARG: SQLITE_CkptFullFSync
IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
NAME: cache_spill
IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
NAME: reverse_unordered_selects
TYPE: FLAG
ARG: SQLITE_ReverseOrder
IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
NAME: query_only
TYPE: FLAG
ARG: SQLITE_QueryOnly
IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
NAME: automatic_index
TYPE: FLAG
ARG: SQLITE_AutoIndex
IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
IF: !defined(SQLITE_OMIT_AUTOMATIC_INDEX)
NAME: sql_trace
TYPE: FLAG
ARG: SQLITE_SqlTrace
IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
IF: defined(SQLITE_DEBUG)
NAME: vdbe_listing
TYPE: FLAG
ARG: SQLITE_VdbeListing
IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
IF: defined(SQLITE_DEBUG)
NAME: vdbe_trace
TYPE: FLAG
ARG: SQLITE_VdbeTrace
IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
IF: defined(SQLITE_DEBUG)
NAME: vdbe_addoptrace
TYPE: FLAG
ARG: SQLITE_VdbeAddopTrace
IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
IF: defined(SQLITE_DEBUG)
NAME: vdbe_debug
TYPE: FLAG
ARG: SQLITE_SqlTrace|SQLITE_VdbeListing|SQLITE_VdbeTrace
IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
IF: defined(SQLITE_DEBUG)
NAME: vdbe_eqp
TYPE: FLAG
ARG: SQLITE_VdbeEQP
IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
IF: defined(SQLITE_DEBUG)
NAME: ignore_check_constraints
TYPE: FLAG
ARG: SQLITE_IgnoreChecks
IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
IF: !defined(SQLITE_OMIT_CHECK)
NAME: writable_schema
TYPE: FLAG
ARG: SQLITE_WriteSchema|SQLITE_RecoveryMode
IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
NAME: read_uncommitted
TYPE: FLAG
ARG: SQLITE_ReadUncommitted
IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
NAME: recursive_triggers
TYPE: FLAG
ARG: SQLITE_RecTriggers
IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
NAME: foreign_keys
TYPE: FLAG
ARG: SQLITE_ForeignKeys
IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
IF: !defined(SQLITE_OMIT_FOREIGN_KEY) && !defined(SQLITE_OMIT_TRIGGER)
NAME: defer_foreign_keys
TYPE: FLAG
ARG: SQLITE_DeferFKs
IF: !defined(SQLITE_OMIT_FLAG_PRAGMAS)
IF: !defined(SQLITE_OMIT_FOREIGN_KEY) && !defined(SQLITE_OMIT_TRIGGER)
NAME: cell_size_check
TYPE: FLAG
ARG: SQLITE_CellSizeCk
NAME: default_cache_size
FLAG: NeedSchema
IF: !defined(SQLITE_OMIT_PAGER_PRAGMAS) && !defined(SQLITE_OMIT_DEPRECATED)
NAME: page_size
IF: !defined(SQLITE_OMIT_PAGER_PRAGMAS)
NAME: secure_delete
IF: !defined(SQLITE_OMIT_PAGER_PRAGMAS)
NAME: page_count
FLAG: NeedSchema
IF: !defined(SQLITE_OMIT_PAGER_PRAGMAS)
NAME: max_page_count
TYPE: PAGE_COUNT
FLAG: NeedSchema
IF: !defined(SQLITE_OMIT_PAGER_PRAGMAS)
NAME: locking_mode
IF: !defined(SQLITE_OMIT_PAGER_PRAGMAS)
NAME: journal_mode
FLAG: NeedSchema
IF: !defined(SQLITE_OMIT_PAGER_PRAGMAS)
NAME: journal_size_limit
IF: !defined(SQLITE_OMIT_PAGER_PRAGMAS)
NAME: cache_size
FLAG: NeedSchema
IF: !defined(SQLITE_OMIT_PAGER_PRAGMAS)
NAME: mmap_size
IF: !defined(SQLITE_OMIT_PAGER_PRAGMAS)
NAME: auto_vacuum
FLAG: NeedSchema
IF: !defined(SQLITE_OMIT_AUTOVACUUM)
NAME: incremental_vacuum
FLAG: NeedSchema
IF: !defined(SQLITE_OMIT_AUTOVACUUM)
NAME: temp_store
IF: !defined(SQLITE_OMIT_PAGER_PRAGMAS)
NAME: temp_store_directory
IF: !defined(SQLITE_OMIT_PAGER_PRAGMAS)
NAME: data_store_directory
IF: !defined(SQLITE_OMIT_PAGER_PRAGMAS) && SQLITE_OS_WIN
NAME: lock_proxy_file
IF: !defined(SQLITE_OMIT_PAGER_PRAGMAS) && SQLITE_ENABLE_LOCKING_STYLE
NAME: synchronous
FLAG: NeedSchema
IF: !defined(SQLITE_OMIT_PAGER_PRAGMAS)
NAME: table_info
FLAG: NeedSchema
IF: !defined(SQLITE_OMIT_SCHEMA_PRAGMAS)
NAME: stats
FLAG: NeedSchema
IF: !defined(SQLITE_OMIT_SCHEMA_PRAGMAS)
NAME: index_info
TYPE: INDEX_INFO
ARG: 0
FLAG: NeedSchema
IF: !defined(SQLITE_OMIT_SCHEMA_PRAGMAS)
NAME: index_xinfo
TYPE: INDEX_INFO
ARG: 1
FLAG: NeedSchema
IF: !defined(SQLITE_OMIT_SCHEMA_PRAGMAS)
NAME: index_list
FLAG: NeedSchema
IF: !defined(SQLITE_OMIT_SCHEMA_PRAGMAS)
NAME: database_list
FLAG: NeedSchema
IF: !defined(SQLITE_OMIT_SCHEMA_PRAGMAS)
NAME: collation_list
IF: !defined(SQLITE_OMIT_SCHEMA_PRAGMAS)
NAME: foreign_key_list
FLAG: NeedSchema
IF: !defined(SQLITE_OMIT_FOREIGN_KEY)
NAME: foreign_key_check
FLAG: NeedSchema
IF: !defined(SQLITE_OMIT_FOREIGN_KEY) && !defined(SQLITE_OMIT_TRIGGER)
NAME: parser_trace
IF: defined(SQLITE_DEBUG) && !defined(SQLITE_OMIT_PARSER_TRACE)
NAME: case_sensitive_like
NAME: integrity_check
FLAG: NeedSchema
IF: !defined(SQLITE_OMIT_INTEGRITY_CHECK)
NAME: quick_check
TYPE: INTEGRITY_CHECK
FLAG: NeedSchema
IF: !defined(SQLITE_OMIT_INTEGRITY_CHECK)
NAME: encoding
IF: !defined(SQLITE_OMIT_UTF16)
NAME: schema_version
TYPE: HEADER_VALUE
ARG: BTREE_SCHEMA_VERSION
IF: !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS)
NAME: user_version
TYPE: HEADER_VALUE
ARG: BTREE_USER_VERSION
IF: !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS)
NAME: data_version
TYPE: HEADER_VALUE
ARG: BTREE_DATA_VERSION
FLAG: ReadOnly
IF: !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS)
NAME: freelist_count
TYPE: HEADER_VALUE
ARG: BTREE_FREE_PAGE_COUNT
FLAG: ReadOnly
IF: !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS)
NAME: application_id
TYPE: HEADER_VALUE
ARG: BTREE_APPLICATION_ID
IF: !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS)
NAME: compile_options
IF: !defined(SQLITE_OMIT_COMPILEOPTION_DIAGS)
NAME: wal_checkpoint
FLAG: NeedSchema
IF: !defined(SQLITE_OMIT_WAL)
NAME: wal_autocheckpoint
IF: !defined(SQLITE_OMIT_WAL)
NAME: shrink_memory
NAME: busy_timeout
NAME: lock_status
IF: defined(SQLITE_DEBUG) || defined(SQLITE_TEST)
NAME: key
IF: defined(SQLITE_HAS_CODEC)
NAME: rekey
IF: defined(SQLITE_HAS_CODEC)
NAME: hexkey
IF: defined(SQLITE_HAS_CODEC)
NAME: hexrekey
TYPE: HEXKEY
IF: defined(SQLITE_HAS_CODEC)
NAME: activate_extensions
IF: defined(SQLITE_HAS_CODEC) || defined(SQLITE_ENABLE_CEROD)
NAME: soft_heap_limit
NAME: threads
}
# Open the output file
#
set destfile "[file dir [file dir [file normal $argv0]]]/src/pragma.h"
puts "Overwriting $destfile with new pragma table..."
set fd [open $destfile wb]
puts $fd {/* DO NOT EDIT!
** This file is automatically generated by the script at
** ../tool/mkpragmatab.tcl. To update the set of pragmas, edit
** that script and rerun it.
*/}
# Parse the PRAGMA table above.
#
set name {}
set type {}
set if {}
set flags {}
set arg 0
proc record_one {} {
global name type if arg allbyname typebyif flags
if {$name==""} return
set allbyname($name) [list $type $arg $if $flags]
set name {}
set type {}
set if {}
set flags {}
set arg 0
}
foreach line [split $pragma_def \n] {
set line [string trim $line]
if {$line==""} continue
foreach {id val} [split $line :] break
set val [string trim $val]
if {$id=="NAME"} {
record_one
set name $val
set type [string toupper $val]
} elseif {$id=="TYPE"} {
set type $val
} elseif {$id=="ARG"} {
set arg $val
} elseif {$id=="IF"} {
lappend if $val
} elseif {$id=="FLAG"} {
foreach term [split $val] {
lappend flags $term
set allflags($term) 1
}
} else {
error "bad pragma_def line: $line"
}
}
record_one
set allnames [lsort [array names allbyname]]
# Generate #defines for all pragma type names. Group the pragmas that are
# omit in default builds (defined(SQLITE_DEBUG) and defined(SQLITE_HAS_CODEC))
# at the end.
#
set pnum 0
foreach name $allnames {
set type [lindex $allbyname($name) 0]
if {[info exists seentype($type)]} continue
set if [lindex $allbyname($name) 2]
if {[regexp SQLITE_DEBUG $if] || [regexp SQLITE_HAS_CODEC $if]} continue
set seentype($type) 1
puts $fd [format {#define %-35s %4d} PragTyp_$type $pnum]
incr pnum
}
foreach name $allnames {
set type [lindex $allbyname($name) 0]
if {[info exists seentype($type)]} continue
set if [lindex $allbyname($name) 2]
if {[regexp SQLITE_DEBUG $if]} continue
set seentype($type) 1
puts $fd [format {#define %-35s %4d} PragTyp_$type $pnum]
incr pnum
}
foreach name $allnames {
set type [lindex $allbyname($name) 0]
if {[info exists seentype($type)]} continue
set seentype($type) 1
puts $fd [format {#define %-35s %4d} PragTyp_$type $pnum]
incr pnum
}
# Generate #defines for flags
#
set fv 1
foreach f [lsort [array names allflags]] {
puts $fd [format {#define PragFlag_%-20s 0x%02x} $f $fv]
set fv [expr {$fv*2}]
}
# Generate the lookup table
#
puts $fd "static const struct sPragmaNames \173"
puts $fd " const char *const zName; /* Name of pragma */"
puts $fd " u8 ePragTyp; /* PragTyp_XXX value */"
puts $fd " u8 mPragFlag; /* Zero or more PragFlag_XXX values */"
puts $fd " u32 iArg; /* Extra argument */"
puts $fd "\175 aPragmaNames\[\] = \173"
set current_if {}
set spacer [format { %26s } {}]
foreach name $allnames {
foreach {type arg if flag} $allbyname($name) break
if {$if!=$current_if} {
if {$current_if!=""} {
foreach this_if $current_if {
puts $fd "#endif"
}
}
set current_if $if
if {$current_if!=""} {
foreach this_if $current_if {
puts $fd "#if $this_if"
}
}
}
set typex [format PragTyp_%-23s $type,]
if {$flag==""} {
set flagx "0"
} else {
set flagx PragFlag_[join $flag {|PragFlag_}]
}
puts $fd " \173 /* zName: */ \"$name\","
puts $fd " /* ePragTyp: */ PragTyp_$type,"
puts $fd " /* ePragFlag: */ $flagx,"
puts $fd " /* iArg: */ $arg \175,"
}
if {$current_if!=""} {
foreach this_if $current_if {
puts $fd "#endif"
}
}
puts $fd "\175;"
# count the number of pragmas, for information purposes
#
set allcnt 0
set dfltcnt 0
foreach name $allnames {
incr allcnt
set if [lindex $allbyname($name) 2]
if {[regexp {^defined} $if] || [regexp {[^!]defined} $if]} continue
incr dfltcnt
}
puts $fd "/* Number of pragmas: $dfltcnt on by default, $allcnt total. */"

View File

@ -1,237 +0,0 @@
# 2008 October 9
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#*************************************************************************
# This file generates SQL text used for performance testing.
#
# $Id: mkspeedsql.tcl,v 1.1 2008/10/09 17:57:34 drh Exp $
#
# Set a uniform random seed
expr srand(0)
# The number_name procedure below converts its argment (an integer)
# into a string which is the English-language name for that number.
#
# Example:
#
# puts [number_name 123] -> "one hundred twenty three"
#
set ones {zero one two three four five six seven eight nine
ten eleven twelve thirteen fourteen fifteen sixteen seventeen
eighteen nineteen}
set tens {{} ten twenty thirty forty fifty sixty seventy eighty ninety}
proc number_name {n} {
if {$n>=1000} {
set txt "[number_name [expr {$n/1000}]] thousand"
set n [expr {$n%1000}]
} else {
set txt {}
}
if {$n>=100} {
append txt " [lindex $::ones [expr {$n/100}]] hundred"
set n [expr {$n%100}]
}
if {$n>=20} {
append txt " [lindex $::tens [expr {$n/10}]]"
set n [expr {$n%10}]
}
if {$n>0} {
append txt " [lindex $::ones $n]"
}
set txt [string trim $txt]
if {$txt==""} {set txt zero}
return $txt
}
# Create a database schema.
#
puts {
PRAGMA page_size=1024;
PRAGMA cache_size=8192;
PRAGMA locking_mode=EXCLUSIVE;
CREATE TABLE t1(a INTEGER, b INTEGER, c TEXT);
CREATE TABLE t2(a INTEGER, b INTEGER, c TEXT);
CREATE INDEX i2a ON t2(a);
CREATE INDEX i2b ON t2(b);
SELECT name FROM sqlite_master ORDER BY 1;
}
# 50000 INSERTs on an unindexed table
#
set t1c_list {}
puts {BEGIN;}
for {set i 1} {$i<=50000} {incr i} {
set r [expr {int(rand()*500000)}]
set x [number_name $r]
lappend t1c_list $x
puts "INSERT INTO t1 VALUES($i,$r,'$x');"
}
puts {COMMIT;}
# 50000 INSERTs on an indexed table
#
puts {BEGIN;}
for {set i 1} {$i<=50000} {incr i} {
set r [expr {int(rand()*500000)}]
puts "INSERT INTO t2 VALUES($i,$r,'[number_name $r]');"
}
puts {COMMIT;}
# 50 SELECTs on an integer comparison. There is no index so
# a full table scan is required.
#
for {set i 0} {$i<50} {incr i} {
set lwr [expr {$i*100}]
set upr [expr {($i+10)*100}]
puts "SELECT count(*), avg(b) FROM t1 WHERE b>=$lwr AND b<$upr;"
}
# 50 SELECTs on an LIKE comparison. There is no index so a full
# table scan is required.
#
for {set i 0} {$i<50} {incr i} {
puts "SELECT count(*), avg(b) FROM t1 WHERE c LIKE '%[number_name $i]%';"
}
# Create indices
#
puts {BEGIN;}
puts {
CREATE INDEX i1a ON t1(a);
CREATE INDEX i1b ON t1(b);
CREATE INDEX i1c ON t1(c);
}
puts {COMMIT;}
# 5000 SELECTs on an integer comparison where the integer is
# indexed.
#
set sql {}
for {set i 0} {$i<5000} {incr i} {
set lwr [expr {$i*100}]
set upr [expr {($i+10)*100}]
puts "SELECT count(*), avg(b) FROM t1 WHERE b>=$lwr AND b<$upr;"
}
# 100000 random SELECTs against rowid.
#
for {set i 1} {$i<=100000} {incr i} {
set id [expr {int(rand()*50000)+1}]
puts "SELECT c FROM t1 WHERE rowid=$id;"
}
# 100000 random SELECTs against a unique indexed column.
#
for {set i 1} {$i<=100000} {incr i} {
set id [expr {int(rand()*50000)+1}]
puts "SELECT c FROM t1 WHERE a=$id;"
}
# 50000 random SELECTs against an indexed column text column
#
set nt1c [llength $t1c_list]
for {set i 0} {$i<50000} {incr i} {
set r [expr {int(rand()*$nt1c)}]
set c [lindex $t1c_list $i]
puts "SELECT c FROM t1 WHERE c='$c';"
}
# Vacuum
puts {VACUUM;}
# 5000 updates of ranges where the field being compared is indexed.
#
puts {BEGIN;}
for {set i 0} {$i<5000} {incr i} {
set lwr [expr {$i*2}]
set upr [expr {($i+1)*2}]
puts "UPDATE t1 SET b=b*2 WHERE a>=$lwr AND a<$upr;"
}
puts {COMMIT;}
# 50000 single-row updates. An index is used to find the row quickly.
#
puts {BEGIN;}
for {set i 0} {$i<50000} {incr i} {
set r [expr {int(rand()*500000)}]
puts "UPDATE t1 SET b=$r WHERE a=$i;"
}
puts {COMMIT;}
# 1 big text update that touches every row in the table.
#
puts {
UPDATE t1 SET c=a;
}
# Many individual text updates. Each row in the table is
# touched through an index.
#
puts {BEGIN;}
for {set i 1} {$i<=50000} {incr i} {
set r [expr {int(rand()*500000)}]
puts "UPDATE t1 SET c='[number_name $r]' WHERE a=$i;"
}
puts {COMMIT;}
# Delete all content in a table.
#
puts {DELETE FROM t1;}
# Copy one table into another
#
puts {INSERT INTO t1 SELECT * FROM t2;}
# Delete all content in a table, one row at a time.
#
puts {DELETE FROM t1 WHERE 1;}
# Refill the table yet again
#
puts {INSERT INTO t1 SELECT * FROM t2;}
# Drop the table and recreate it without its indices.
#
puts {BEGIN;}
puts {
DROP TABLE t1;
CREATE TABLE t1(a INTEGER, b INTEGER, c TEXT);
}
puts {COMMIT;}
# Refill the table yet again. This copy should be faster because
# there are no indices to deal with.
#
puts {INSERT INTO t1 SELECT * FROM t2;}
# Select 20000 rows from the table at random.
#
puts {
SELECT rowid FROM t1 ORDER BY random() LIMIT 20000;
}
# Delete 20000 random rows from the table.
#
puts {
DELETE FROM t1 WHERE rowid IN
(SELECT rowid FROM t1 ORDER BY random() LIMIT 20000);
}
puts {SELECT count(*) FROM t1;}
# Delete 20000 more rows at random from the table.
#
puts {
DELETE FROM t1 WHERE rowid IN
(SELECT rowid FROM t1 ORDER BY random() LIMIT 20000);
}
puts {SELECT count(*) FROM t1;}

View File

@ -1,355 +0,0 @@
#!/usr/bin/tclsh
#
# To build a single huge source file holding all of SQLite (or at
# least the core components - the test harness, shell, and TCL
# interface are omitted.) first do
#
# make target_source
#
# The make target above moves all of the source code files into
# a subdirectory named "tsrc". (This script expects to find the files
# there and will not work if they are not found.) There are a few
# generated C code files that are also added to the tsrc directory.
# For example, the "parse.c" and "parse.h" files to implement the
# the parser are derived from "parse.y" using lemon. And the
# "keywordhash.h" files is generated by a program named "mkkeywordhash".
#
# After the "tsrc" directory has been created and populated, run
# this script:
#
# tclsh mksqlite3c-noext.tcl
#
# The amalgamated SQLite code will be written into sqlite3.c
#
# Begin by reading the "sqlite3.h" header file. Extract the version number
# from in this file. The version number is needed to generate the header
# comment of the amalgamation.
#
if {[lsearch $argv --nostatic]>=0} {
set addstatic 0
} else {
set addstatic 1
}
if {[lsearch $argv --linemacros]>=0} {
set linemacros 1
} else {
set linemacros 0
}
set in [open tsrc/sqlite3.h]
set cnt 0
set VERSION ?????
while {![eof $in]} {
set line [gets $in]
if {$line=="" && [eof $in]} break
incr cnt
regexp {#define\s+SQLITE_VERSION\s+"(.*)"} $line all VERSION
}
close $in
# Open the output file and write a header comment at the beginning
# of the file.
#
set out [open sqlite3.c w]
# Force the output to use unix line endings, even on Windows.
fconfigure $out -translation lf
set today [clock format [clock seconds] -format "%Y-%m-%d %H:%M:%S UTC" -gmt 1]
puts $out [subst \
{/******************************************************************************
** This file is an amalgamation of many separate C source files from SQLite
** version $VERSION. By combining all the individual C code files into this
** single large file, the entire code can be compiled as a single translation
** unit. This allows many compilers to do optimizations that would not be
** possible if the files were compiled separately. Performance improvements
** of 5% or more are commonly seen when SQLite is compiled as a single
** translation unit.
**
** This file is all you need to compile SQLite. To use SQLite in other
** programs, you need this file and the "sqlite3.h" header file that defines
** the programming interface to the SQLite library. (If you do not have
** the "sqlite3.h" header file at hand, you will find a copy embedded within
** the text of this file. Search for "Begin file sqlite3.h" to find the start
** of the embedded sqlite3.h header file.) Additional code files may be needed
** if you want a wrapper to interface SQLite with your choice of programming
** language. The code for the "sqlite3" command-line shell is also in a
** separate file. This file contains only code for the core SQLite library.
*/
#define SQLITE_CORE 1
#define SQLITE_AMALGAMATION 1}]
if {$addstatic} {
puts $out \
{#ifndef SQLITE_PRIVATE
# define SQLITE_PRIVATE static
#endif}
}
# These are the header files used by SQLite. The first time any of these
# files are seen in a #include statement in the C code, include the complete
# text of the file in-line. The file only needs to be included once.
#
foreach hdr {
btree.h
btreeInt.h
hash.h
hwtime.h
keywordhash.h
msvc.h
mutex.h
opcodes.h
os_common.h
os_setup.h
os_win.h
os.h
pager.h
parse.h
pcache.h
pragma.h
sqlite3ext.h
sqlite3.h
sqliteicu.h
sqliteInt.h
sqliteLimit.h
vdbe.h
vdbeInt.h
vxworks.h
wal.h
whereInt.h
} {
set available_hdr($hdr) 1
}
set available_hdr(sqliteInt.h) 0
# These headers should be copied into the amalgamation without modifying any
# of their function declarations or definitions.
set varonly_hdr(sqlite3.h) 1
# These are the functions that accept a variable number of arguments. They
# always need to use the "cdecl" calling convention even when another calling
# convention (e.g. "stcall") is being used for the rest of the library.
set cdecllist {
sqlite3_config
sqlite3_db_config
sqlite3_log
sqlite3_mprintf
sqlite3_snprintf
sqlite3_test_control
sqlite3_vtab_config
}
# 78 stars used for comment formatting.
set s78 \
{*****************************************************************************}
# Insert a comment into the code
#
proc section_comment {text} {
global out s78
set n [string length $text]
set nstar [expr {60 - $n}]
set stars [string range $s78 0 $nstar]
puts $out "/************** $text $stars/"
}
# Read the source file named $filename and write it into the
# sqlite3.c output file. If any #include statements are seen,
# process them appropriately.
#
proc copy_file {filename} {
global seen_hdr available_hdr varonly_hdr cdecllist out addstatic linemacros
set ln 0
set tail [file tail $filename]
section_comment "Begin file $tail"
if {$linemacros} {puts $out "#line 1 \"$filename\""}
set in [open $filename r]
set varpattern {^[a-zA-Z][a-zA-Z_0-9 *]+(sqlite3[_a-zA-Z0-9]+)(\[|;| =)}
set declpattern {([a-zA-Z][a-zA-Z_0-9 ]+ \**)(sqlite3[_a-zA-Z0-9]+)(\(.*)}
if {[file extension $filename]==".h"} {
set declpattern " *$declpattern"
}
set declpattern ^$declpattern\$
while {![eof $in]} {
set line [gets $in]
incr ln
if {[regexp {^\s*#\s*include\s+["<]([^">]+)[">]} $line all hdr]} {
if {[info exists available_hdr($hdr)]} {
if {$available_hdr($hdr)} {
if {$hdr!="os_common.h" && $hdr!="hwtime.h"} {
set available_hdr($hdr) 0
}
section_comment "Include $hdr in the middle of $tail"
copy_file tsrc/$hdr
section_comment "Continuing where we left off in $tail"
if {$linemacros} {puts $out "#line [expr {$ln+1}] \"$filename\""}
} else {
# Comment out the entire line, replacing any nested comment
# begin/end markers with the harmless substring "**".
puts $out "/* [string map [list /* ** */ **] $line] */"
}
} elseif {![info exists seen_hdr($hdr)]} {
if {![regexp {/\*\s+amalgamator:\s+dontcache\s+\*/} $line]} {
set seen_hdr($hdr) 1
}
puts $out $line
} elseif {[regexp {/\*\s+amalgamator:\s+keep\s+\*/} $line]} {
# This include file must be kept because there was a "keep"
# directive inside of a line comment.
puts $out $line
} else {
# Comment out the entire line, replacing any nested comment
# begin/end markers with the harmless substring "**".
puts $out "/* [string map [list /* ** */ **] $line] */"
}
} elseif {[regexp {^#ifdef __cplusplus} $line]} {
puts $out "#if 0"
} elseif {!$linemacros && [regexp {^#line} $line]} {
# Skip #line directives.
} elseif {$addstatic && ![regexp {^(static|typedef)} $line]} {
# Skip adding the SQLITE_PRIVATE or SQLITE_API keyword before
# functions if this header file does not need it.
if {![info exists varonly_hdr($tail)]
&& [regexp $declpattern $line all rettype funcname rest]} {
regsub {^SQLITE_API } $line {} line
# Add the SQLITE_PRIVATE or SQLITE_API keyword before functions.
# so that linkage can be modified at compile-time.
if {[regexp {^sqlite3_} $funcname]} {
set line SQLITE_API
append line " " [string trim $rettype]
if {[string index $rettype end] ne "*"} {
append line " "
}
if {[lsearch -exact $cdecllist $funcname] >= 0} {
append line SQLITE_CDECL
} else {
append line SQLITE_STDCALL
}
append line " " $funcname $rest
puts $out $line
} else {
puts $out "SQLITE_PRIVATE $line"
}
} elseif {[regexp $varpattern $line all varname]} {
# Add the SQLITE_PRIVATE before variable declarations or
# definitions for internal use
regsub {^SQLITE_API } $line {} line
if {![regexp {^sqlite3_} $varname]} {
regsub {^extern } $line {} line
puts $out "SQLITE_PRIVATE $line"
} else {
if {[regexp {const char sqlite3_version\[\];} $line]} {
set line {const char sqlite3_version[] = SQLITE_VERSION;}
}
regsub {^SQLITE_EXTERN } $line {} line
puts $out "SQLITE_API $line"
}
} elseif {[regexp {^(SQLITE_EXTERN )?void \(\*sqlite3IoTrace\)} $line]} {
regsub {^SQLITE_API } $line {} line
regsub {^SQLITE_EXTERN } $line {} line
puts $out $line
} elseif {[regexp {^void \(\*sqlite3Os} $line]} {
regsub {^SQLITE_API } $line {} line
puts $out "SQLITE_PRIVATE $line"
} else {
puts $out $line
}
} else {
puts $out $line
}
}
close $in
section_comment "End of $tail"
}
# Process the source files. Process files containing commonly
# used subroutines first in order to help the compiler find
# inlining opportunities.
#
foreach file {
sqliteInt.h
global.c
ctime.c
status.c
date.c
os.c
fault.c
mem0.c
mem1.c
mem2.c
mem3.c
mem5.c
mutex.c
mutex_noop.c
mutex_unix.c
mutex_w32.c
malloc.c
printf.c
random.c
threads.c
utf.c
util.c
hash.c
opcodes.c
os_unix.c
os_win.c
bitvec.c
pcache.c
pcache1.c
rowset.c
pager.c
wal.c
btmutex.c
btree.c
backup.c
vdbemem.c
vdbeaux.c
vdbeapi.c
vdbetrace.c
vdbe.c
vdbeblob.c
vdbesort.c
journal.c
memjournal.c
walker.c
resolve.c
expr.c
alter.c
analyze.c
attach.c
auth.c
build.c
callback.c
delete.c
func.c
fkey.c
insert.c
legacy.c
loadext.c
pragma.c
prepare.c
select.c
table.c
trigger.c
update.c
vacuum.c
vtab.c
where.c
parse.c
tokenize.c
complete.c
main.c
notify.c
} {
copy_file tsrc/$file
}
close $out

View File

@ -1,388 +0,0 @@
#!/usr/bin/tclsh
#
# To build a single huge source file holding all of SQLite (or at
# least the core components - the test harness, shell, and TCL
# interface are omitted.) first do
#
# make target_source
#
# The make target above moves all of the source code files into
# a subdirectory named "tsrc". (This script expects to find the files
# there and will not work if they are not found.) There are a few
# generated C code files that are also added to the tsrc directory.
# For example, the "parse.c" and "parse.h" files to implement the
# the parser are derived from "parse.y" using lemon. And the
# "keywordhash.h" files is generated by a program named "mkkeywordhash".
#
# After the "tsrc" directory has been created and populated, run
# this script:
#
# tclsh mksqlite3c.tcl --srcdir $SRC
#
# The amalgamated SQLite code will be written into sqlite3.c
#
# Begin by reading the "sqlite3.h" header file. Extract the version number
# from in this file. The version number is needed to generate the header
# comment of the amalgamation.
#
set addstatic 1
set linemacros 0
for {set i 0} {$i<[llength $argv]} {incr i} {
set x [lindex $argv $i]
if {[regexp {^-+nostatic$} $x]} {
set addstatic 0
} elseif {[regexp {^-+linemacros} $x]} {
set linemacros 1
} else {
error "unknown command-line option: $x"
}
}
set in [open tsrc/sqlite3.h]
set cnt 0
set VERSION ?????
while {![eof $in]} {
set line [gets $in]
if {$line=="" && [eof $in]} break
incr cnt
regexp {#define\s+SQLITE_VERSION\s+"(.*)"} $line all VERSION
}
close $in
# Open the output file and write a header comment at the beginning
# of the file.
#
set out [open sqlite3.c w]
# Force the output to use unix line endings, even on Windows.
fconfigure $out -translation lf
set today [clock format [clock seconds] -format "%Y-%m-%d %H:%M:%S UTC" -gmt 1]
puts $out [subst \
{/******************************************************************************
** This file is an amalgamation of many separate C source files from SQLite
** version $VERSION. By combining all the individual C code files into this
** single large file, the entire code can be compiled as a single translation
** unit. This allows many compilers to do optimizations that would not be
** possible if the files were compiled separately. Performance improvements
** of 5% or more are commonly seen when SQLite is compiled as a single
** translation unit.
**
** This file is all you need to compile SQLite. To use SQLite in other
** programs, you need this file and the "sqlite3.h" header file that defines
** the programming interface to the SQLite library. (If you do not have
** the "sqlite3.h" header file at hand, you will find a copy embedded within
** the text of this file. Search for "Begin file sqlite3.h" to find the start
** of the embedded sqlite3.h header file.) Additional code files may be needed
** if you want a wrapper to interface SQLite with your choice of programming
** language. The code for the "sqlite3" command-line shell is also in a
** separate file. This file contains only code for the core SQLite library.
*/
#define SQLITE_CORE 1
#define SQLITE_AMALGAMATION 1}]
if {$addstatic} {
puts $out \
{#ifndef SQLITE_PRIVATE
# define SQLITE_PRIVATE static
#endif}
}
# These are the header files used by SQLite. The first time any of these
# files are seen in a #include statement in the C code, include the complete
# text of the file in-line. The file only needs to be included once.
#
foreach hdr {
btree.h
btreeInt.h
fts3.h
fts3Int.h
fts3_hash.h
fts3_tokenizer.h
hash.h
hwtime.h
keywordhash.h
msvc.h
mutex.h
opcodes.h
os_common.h
os_setup.h
os_win.h
os.h
pager.h
parse.h
pcache.h
pragma.h
rtree.h
sqlite3.h
sqlite3ext.h
sqlite3rbu.h
sqliteicu.h
sqliteInt.h
sqliteLimit.h
vdbe.h
vdbeInt.h
vxworks.h
wal.h
whereInt.h
} {
set available_hdr($hdr) 1
}
set available_hdr(sqliteInt.h) 0
# These headers should be copied into the amalgamation without modifying any
# of their function declarations or definitions.
set varonly_hdr(sqlite3.h) 1
# These are the functions that accept a variable number of arguments. They
# always need to use the "cdecl" calling convention even when another calling
# convention (e.g. "stcall") is being used for the rest of the library.
set cdecllist {
sqlite3_config
sqlite3_db_config
sqlite3_log
sqlite3_mprintf
sqlite3_snprintf
sqlite3_test_control
sqlite3_vtab_config
}
# 78 stars used for comment formatting.
set s78 \
{*****************************************************************************}
# Insert a comment into the code
#
proc section_comment {text} {
global out s78
set n [string length $text]
set nstar [expr {60 - $n}]
set stars [string range $s78 0 $nstar]
puts $out "/************** $text $stars/"
}
# Read the source file named $filename and write it into the
# sqlite3.c output file. If any #include statements are seen,
# process them appropriately.
#
proc copy_file {filename} {
global seen_hdr available_hdr varonly_hdr cdecllist out addstatic linemacros
set ln 0
set tail [file tail $filename]
section_comment "Begin file $tail"
if {$linemacros} {puts $out "#line 1 \"$filename\""}
set in [open $filename r]
set varpattern {^[a-zA-Z][a-zA-Z_0-9 *]+(sqlite3[_a-zA-Z0-9]+)(\[|;| =)}
set declpattern {([a-zA-Z][a-zA-Z_0-9 ]+ \**)(sqlite3[_a-zA-Z0-9]+)(\(.*)}
if {[file extension $filename]==".h"} {
set declpattern " *$declpattern"
}
set declpattern ^$declpattern\$
while {![eof $in]} {
set line [gets $in]
incr ln
if {[regexp {^\s*#\s*include\s+["<]([^">]+)[">]} $line all hdr]} {
if {[info exists available_hdr($hdr)]} {
if {$available_hdr($hdr)} {
if {$hdr!="os_common.h" && $hdr!="hwtime.h"} {
set available_hdr($hdr) 0
}
section_comment "Include $hdr in the middle of $tail"
copy_file tsrc/$hdr
section_comment "Continuing where we left off in $tail"
if {$linemacros} {puts $out "#line [expr {$ln+1}] \"$filename\""}
} else {
# Comment out the entire line, replacing any nested comment
# begin/end markers with the harmless substring "**".
puts $out "/* [string map [list /* ** */ **] $line] */"
}
} elseif {![info exists seen_hdr($hdr)]} {
if {![regexp {/\*\s+amalgamator:\s+dontcache\s+\*/} $line]} {
set seen_hdr($hdr) 1
}
puts $out $line
} elseif {[regexp {/\*\s+amalgamator:\s+keep\s+\*/} $line]} {
# This include file must be kept because there was a "keep"
# directive inside of a line comment.
puts $out $line
} else {
# Comment out the entire line, replacing any nested comment
# begin/end markers with the harmless substring "**".
puts $out "/* [string map [list /* ** */ **] $line] */"
}
} elseif {[regexp {^#ifdef __cplusplus} $line]} {
puts $out "#if 0"
} elseif {!$linemacros && [regexp {^#line} $line]} {
# Skip #line directives.
} elseif {$addstatic
&& ![regexp {^(static|typedef|SQLITE_PRIVATE)} $line]} {
# Skip adding the SQLITE_PRIVATE or SQLITE_API keyword before
# functions if this header file does not need it.
if {![info exists varonly_hdr($tail)]
&& [regexp $declpattern $line all rettype funcname rest]} {
regsub {^SQLITE_API } $line {} line
# Add the SQLITE_PRIVATE or SQLITE_API keyword before functions.
# so that linkage can be modified at compile-time.
if {[regexp {^sqlite3(_|rbu_)} $funcname]} {
set line SQLITE_API
append line " " [string trim $rettype]
if {[string index $rettype end] ne "*"} {
append line " "
}
if {[lsearch -exact $cdecllist $funcname] >= 0} {
append line SQLITE_CDECL
} else {
append line SQLITE_STDCALL
}
append line " " $funcname $rest
puts $out $line
} else {
puts $out "SQLITE_PRIVATE $line"
}
} elseif {[regexp $varpattern $line all varname]} {
# Add the SQLITE_PRIVATE before variable declarations or
# definitions for internal use
regsub {^SQLITE_API } $line {} line
if {![regexp {^sqlite3_} $varname]} {
regsub {^extern } $line {} line
puts $out "SQLITE_PRIVATE $line"
} else {
if {[regexp {const char sqlite3_version\[\];} $line]} {
set line {const char sqlite3_version[] = SQLITE_VERSION;}
}
regsub {^SQLITE_EXTERN } $line {} line
puts $out "SQLITE_API $line"
}
} elseif {[regexp {^(SQLITE_EXTERN )?void \(\*sqlite3IoTrace\)} $line]} {
regsub {^SQLITE_API } $line {} line
regsub {^SQLITE_EXTERN } $line {} line
puts $out $line
} elseif {[regexp {^void \(\*sqlite3Os} $line]} {
regsub {^SQLITE_API } $line {} line
puts $out "SQLITE_PRIVATE $line"
} else {
puts $out $line
}
} else {
puts $out $line
}
}
close $in
section_comment "End of $tail"
}
# Process the source files. Process files containing commonly
# used subroutines first in order to help the compiler find
# inlining opportunities.
#
foreach file {
sqliteInt.h
global.c
ctime.c
status.c
date.c
os.c
fault.c
mem0.c
mem1.c
mem2.c
mem3.c
mem5.c
mutex.c
mutex_noop.c
mutex_unix.c
mutex_w32.c
malloc.c
printf.c
treeview.c
random.c
threads.c
utf.c
util.c
hash.c
opcodes.c
os_unix.c
os_win.c
bitvec.c
pcache.c
pcache1.c
rowset.c
pager.c
wal.c
btmutex.c
btree.c
backup.c
vdbemem.c
vdbeaux.c
vdbeapi.c
vdbetrace.c
vdbe.c
vdbeblob.c
vdbesort.c
journal.c
memjournal.c
walker.c
resolve.c
expr.c
alter.c
analyze.c
attach.c
auth.c
build.c
callback.c
delete.c
func.c
fkey.c
insert.c
legacy.c
loadext.c
pragma.c
prepare.c
select.c
table.c
trigger.c
update.c
vacuum.c
vtab.c
wherecode.c
whereexpr.c
where.c
parse.c
tokenize.c
complete.c
main.c
notify.c
fts3.c
fts3_aux.c
fts3_expr.c
fts3_hash.c
fts3_porter.c
fts3_tokenizer.c
fts3_tokenizer1.c
fts3_tokenize_vtab.c
fts3_write.c
fts3_snippet.c
fts3_unicode.c
fts3_unicode2.c
rtree.c
icu.c
fts3_icu.c
sqlite3rbu.c
dbstat.c
json1.c
fts5.c
} {
copy_file tsrc/$file
}
close $out

View File

@ -1,127 +0,0 @@
#!/usr/bin/tclsh
#
# This script constructs the "sqlite3.h" header file from the following
# sources:
#
# 1) The src/sqlite.h.in source file. This is the template for sqlite3.h.
# 2) The VERSION file containing the current SQLite version number.
# 3) The manifest file from the fossil SCM. This gives use the date.
# 4) The manifest.uuid file from the fossil SCM. This gives the SHA1 hash.
#
# Run this script by specifying the root directory of the source tree
# on the command-line.
#
# This script performs processing on src/sqlite.h.in. It:
#
# 1) Adds SQLITE_EXTERN in front of the declaration of global variables,
# 2) Adds SQLITE_API in front of the declaration of API functions,
# 3) Replaces the string --VERS-- with the current library version,
# formatted as a string (e.g. "3.6.17"), and
# 4) Replaces the string --VERSION-NUMBER-- with current library version,
# formatted as an integer (e.g. "3006017").
# 5) Replaces the string --SOURCE-ID-- with the date and time and sha1
# hash of the fossil-scm manifest for the source tree.
#
# This script outputs to stdout.
#
# Example usage:
#
# tclsh mksqlite3h.tcl ../sqlite >sqlite3.h
#
# Get the source tree root directory from the command-line
#
set TOP [lindex $argv 0]
# Get the SQLite version number (ex: 3.6.18) from the $TOP/VERSION file.
#
set in [open $TOP/VERSION]
set zVersion [string trim [read $in]]
close $in
set nVersion [eval format "%d%03d%03d" [split $zVersion .]]
# Get the fossil-scm version number from $TOP/manifest.uuid.
#
set in [open $TOP/manifest.uuid]
set zUuid [string trim [read $in]]
close $in
# Get the fossil-scm check-in date from the "D" card of $TOP/manifest.
#
set in [open $TOP/manifest]
set zDate {}
while {![eof $in]} {
set line [gets $in]
if {[regexp {^D (2[-0-9T:]+)} $line all date]} {
set zDate [string map {T { }} $date]
break
}
}
close $in
# Set up patterns for recognizing API declarations.
#
set varpattern {^[a-zA-Z][a-zA-Z_0-9 *]+sqlite3_[_a-zA-Z0-9]+(\[|;| =)}
set declpattern {^ *([a-zA-Z][a-zA-Z_0-9 ]+ \**)(sqlite3_[_a-zA-Z0-9]+)(\(.*)$}
# Force the output to use unix line endings, even on Windows.
fconfigure stdout -translation lf
set filelist [subst {
$TOP/src/sqlite.h.in
$TOP/ext/rtree/sqlite3rtree.h
$TOP/ext/fts5/fts5.h
}]
# These are the functions that accept a variable number of arguments. They
# always need to use the "cdecl" calling convention even when another calling
# convention (e.g. "stcall") is being used for the rest of the library.
set cdecllist {
sqlite3_config
sqlite3_db_config
sqlite3_log
sqlite3_mprintf
sqlite3_snprintf
sqlite3_test_control
sqlite3_vtab_config
}
# Process the source files.
#
foreach file $filelist {
set in [open $file]
while {![eof $in]} {
set line [gets $in]
# File sqlite3rtree.h contains a line "#include <sqlite3.h>". Omit this
# line when copying sqlite3rtree.h into sqlite3.h.
#
if {[string match {*#include*[<"]sqlite3.h[>"]*} $line]} continue
regsub -- --VERS-- $line $zVersion line
regsub -- --VERSION-NUMBER-- $line $nVersion line
regsub -- --SOURCE-ID-- $line "$zDate $zUuid" line
if {[regexp $varpattern $line] && ![regexp {^ *typedef} $line]} {
set line "SQLITE_API $line"
} else {
if {[regexp $declpattern $line all rettype funcname rest]} {
set line SQLITE_API
append line " " [string trim $rettype]
if {[string index $rettype end] ne "*"} {
append line " "
}
if {[lsearch -exact $cdecllist $funcname] >= 0} {
append line SQLITE_CDECL
} else {
append line SQLITE_STDCALL
}
append line " " $funcname $rest
}
}
puts $line
}
close $in
}

View File

@ -1,148 +0,0 @@
#!/usr/bin/tclsh
#
# To build a single huge source file holding all of SQLite (or at
# least the core components - the test harness, shell, and TCL
# interface are omitted.) first do
#
# make target_source
#
# The make target above moves all of the source code files into
# a subdirectory named "tsrc". (This script expects to find the files
# there and will not work if they are not found.) There are a few
# generated C code files that are also added to the tsrc directory.
# For example, the "parse.c" and "parse.h" files to implement the
# the parser are derived from "parse.y" using lemon. And the
# "keywordhash.h" files is generated by a program named "mkkeywordhash".
#
# After the "tsrc" directory has been created and populated, run
# this script:
#
# tclsh mksqlite3c.tcl
#
# The amalgamated SQLite code will be written into sqlite3.c
#
# Begin by reading the "sqlite3.h" header file. Count the number of lines
# in this file and extract the version number. That information will be
# needed in order to generate the header of the amalgamation.
#
set in [open tsrc/sqlite3.h]
set cnt 0
set VERSION ?????
while {![eof $in]} {
set line [gets $in]
if {$line=="" && [eof $in]} break
incr cnt
regexp {#define\s+SQLITE_VERSION\s+"(.*)"} $line all VERSION
}
close $in
# Open the output file and write a header comment at the beginning
# of the file.
#
set out [open sqlite3internal.h w]
set today [clock format [clock seconds] -format "%Y-%m-%d %H:%M:%S UTC" -gmt 1]
puts $out [subst \
{/******************************************************************************
** This file is an amalgamation of many private header files from SQLite
** version $VERSION.
*/}]
# These are the header files used by SQLite. The first time any of these
# files are seen in a #include statement in the C code, include the complete
# text of the file in-line. The file only needs to be included once.
#
foreach hdr {
btree.h
btreeInt.h
hash.h
hwtime.h
keywordhash.h
msvc.h
opcodes.h
os_common.h
os_setup.h
os_win.h
os.h
pager.h
parse.h
sqlite3ext.h
sqlite3.h
sqliteInt.h
sqliteLimit.h
vdbe.h
vdbeInt.h
} {
set available_hdr($hdr) 1
}
# 78 stars used for comment formatting.
set s78 \
{*****************************************************************************}
# Insert a comment into the code
#
proc section_comment {text} {
global out s78
set n [string length $text]
set nstar [expr {60 - $n}]
set stars [string range $s78 0 $nstar]
puts $out "/************** $text $stars/"
}
# Read the source file named $filename and write it into the
# sqlite3.c output file. If any #include statements are seen,
# process them approprately.
#
proc copy_file {filename} {
global seen_hdr available_hdr out
set tail [file tail $filename]
section_comment "Begin file $tail"
set in [open $filename r]
while {![eof $in]} {
set line [gets $in]
if {[regexp {^#\s*include\s+["<]([^">]+)[">]} $line all hdr]} {
if {[info exists available_hdr($hdr)]} {
if {$available_hdr($hdr)} {
section_comment "Include $hdr in the middle of $tail"
copy_file tsrc/$hdr
section_comment "Continuing where we left off in $tail"
}
} elseif {![info exists seen_hdr($hdr)]} {
set seen_hdr($hdr) 1
puts $out $line
}
} elseif {[regexp {^#ifdef __cplusplus} $line]} {
puts $out "#if 0"
} elseif {[regexp {^#line} $line]} {
# Skip #line directives.
} else {
puts $out $line
}
}
close $in
section_comment "End of $tail"
}
# Process the source files. Process files containing commonly
# used subroutines first in order to help the compiler find
# inlining opportunities.
#
foreach file {
sqliteInt.h
sqlite3.h
btree.h
hash.h
os.h
pager.h
parse.h
sqlite3ext.h
vdbe.h
} {
if {$available_hdr($file)} {
copy_file tsrc/$file
}
}
close $out

View File

@ -1,840 +0,0 @@
#!/usr/bin/tclsh
#
# This script is used to generate a VSIX (Visual Studio Extension) file for
# SQLite usable by Visual Studio.
#
# PREREQUISITES
#
# 1. Tcl 8.4 and later are supported, earlier versions have not been tested.
#
# 2. The "sqlite3.h" file is assumed to exist in the parent directory of the
# directory containing this script. The [optional] second command line
# argument to this script may be used to specify an alternate location.
# This script also assumes that the "sqlite3.h" file corresponds with the
# version of the binaries to be packaged. This assumption is not verified
# by this script.
#
# 3. The temporary directory specified in the TEMP or TMP environment variables
# must refer to an existing directory writable by the current user.
#
# 4. The "zip" and "unzip" command line tools must be located either in a
# directory contained in the PATH environment variable or specified as the
# exact file names to execute in the "ZipTool" and "UnZipTool" environment
# variables, respectively.
#
# 5. The template VSIX file (which is basically a zip file) must be located in
# a "win" directory inside the directory containing this script. It should
# not contain any executable binaries. It should only contain dynamic
# textual content files to be processed using [subst] and/or static content
# files to be copied verbatim.
#
# 6. The executable and other compiled binary files to be packaged into the
# final VSIX file (e.g. DLLs, LIBs, and PDBs) must be located in a single
# directory tree. The top-level directory of the tree must be specified as
# the first command line argument to this script. The second level
# sub-directory names must match those of the build configuration (e.g.
# "Debug" or "Retail"). The third level sub-directory names must match
# those of the platform (e.g. "x86", "x64", and "ARM"). For example, the
# binary files to be packaged would need to be organized as follows when
# packaging the "Debug" and "Retail" build configurations for the "x86" and
# "x64" platforms (in this example, "C:\temp" is the top-level directory as
# specified in the first command line argument):
#
# C:\Temp\Debug\x86\sqlite3.lib
# C:\Temp\Debug\x86\sqlite3.dll
# C:\Temp\Debug\x86\sqlite3.pdb
# C:\Temp\Debug\x64\sqlite3.lib
# C:\Temp\Debug\x64\sqlite3.dll
# C:\Temp\Debug\x64\sqlite3.pdb
# C:\Temp\Retail\x86\sqlite3.lib
# C:\Temp\Retail\x86\sqlite3.dll
# C:\Temp\Retail\x86\sqlite3.pdb
# C:\Temp\Retail\x64\sqlite3.lib
# C:\Temp\Retail\x64\sqlite3.dll
# C:\Temp\Retail\x64\sqlite3.pdb
#
# The above directory tree organization is performed automatically if the
# "tool\build-all-msvc.bat" batch script is used to build the binary files
# to be packaged.
#
# USAGE
#
# The first argument to this script is required and must be the name of the
# top-level directory containing the directories and files organized into a
# tree as described in item 6 of the PREREQUISITES section, above. The second
# argument is optional and if present must contain the name of the directory
# containing the root of the source tree for SQLite. The third argument is
# optional and if present must contain the flavor the VSIX package to build.
# Currently, the only supported package flavors are "WinRT", "WinRT81", "WP80",
# "WP81", and "Win32". The fourth argument is optional and if present must be
# a string containing a list of platforms to include in the VSIX package. The
# platform list is "platform1,platform2,platform3". The fifth argument is
# optional and if present must contain the version of Visual Studio required by
# the package. Currently, the only supported versions are "2012" and "2013".
# The package flavors "WinRT81" and "WP81" are only supported when the Visual
# Studio version is "2013". Typically, when on Windows, this script is
# executed using commands similar to the following from a normal Windows
# command prompt:
#
# CD /D C:\dev\sqlite\core
# tclsh85 tool\mkvsix.tcl C:\Temp
#
# In the example above, "C:\dev\sqlite\core" represents the root of the source
# tree for SQLite and "C:\Temp" represents the top-level directory containing
# the executable and other compiled binary files, organized into a directory
# tree as described in item 6 of the PREREQUISITES section, above.
#
# This script should work on non-Windows platforms as well, provided that all
# the requirements listed in the PREREQUISITES section are met.
#
# NOTES
#
# The temporary directory is used as a staging area for the final VSIX file.
# The template VSIX file is extracted, its contents processed, and then the
# resulting files are packaged into the final VSIX file.
#
package require Tcl 8.4
proc fail { {error ""} {usage false} } {
if {[string length $error] > 0} then {
puts stdout $error
if {!$usage} then {exit 1}
}
puts stdout "usage:\
[file tail [info nameofexecutable]]\
[file tail [info script]] <binaryDirectory> \[sourceDirectory\]\
\[packageFlavor\] \[platformNames\] \[vsVersion\]"
exit 1
}
proc getEnvironmentVariable { name } {
#
# NOTE: Returns the value of the specified environment variable or an empty
# string for environment variables that do not exist in the current
# process environment.
#
return [expr {[info exists ::env($name)] ? $::env($name) : ""}]
}
proc getTemporaryPath {} {
#
# NOTE: Returns the normalized path to the first temporary directory found
# in the typical set of environment variables used for that purpose
# or an empty string to signal a failure to locate such a directory.
#
set names [list]
foreach name [list TEMP TMP] {
lappend names [string toupper $name] [string tolower $name] \
[string totitle $name]
}
foreach name $names {
set value [getEnvironmentVariable $name]
if {[string length $value] > 0} then {
return [file normalize $value]
}
}
return ""
}
proc appendArgs { args } {
#
# NOTE: Returns all passed arguments joined together as a single string with
# no intervening spaces between arguments.
#
eval append result $args
}
proc readFile { fileName } {
#
# NOTE: Reads and returns the entire contents of the specified file, which
# may contain binary data.
#
set file_id [open $fileName RDONLY]
fconfigure $file_id -encoding binary -translation binary
set result [read $file_id]
close $file_id
return $result
}
proc writeFile { fileName data } {
#
# NOTE: Writes the entire contents of the specified file, which may contain
# binary data.
#
set file_id [open $fileName {WRONLY CREAT TRUNC}]
fconfigure $file_id -encoding binary -translation binary
puts -nonewline $file_id $data
close $file_id
return ""
}
#
# TODO: Modify this procedure when a new version of Visual Studio is released.
#
proc getMinVsVersionXmlChunk { vsVersion } {
switch -exact $vsVersion {
2012 {
return [appendArgs \
"\r\n " {MinVSVersion="11.0"}]
}
2013 {
return [appendArgs \
"\r\n " {MinVSVersion="12.0"}]
}
2015 {
return [appendArgs \
"\r\n " {MinVSVersion="14.0"}]
}
default {
return ""
}
}
}
#
# TODO: Modify this procedure when a new version of Visual Studio is released.
#
proc getMaxPlatformVersionXmlChunk { packageFlavor vsVersion } {
#
# NOTE: Only Visual Studio 2013 and later support this attribute within the
# SDK manifest.
#
if {![string equal $vsVersion 2013] && \
![string equal $vsVersion 2015]} then {
return ""
}
switch -exact $packageFlavor {
WinRT {
return [appendArgs \
"\r\n " {MaxPlatformVersion="8.0"}]
}
WinRT81 {
return [appendArgs \
"\r\n " {MaxPlatformVersion="8.1"}]
}
WP80 {
return [appendArgs \
"\r\n " {MaxPlatformVersion="8.0"}]
}
WP81 {
return [appendArgs \
"\r\n " {MaxPlatformVersion="8.1"}]
}
default {
return ""
}
}
}
#
# TODO: Modify this procedure when a new version of Visual Studio is released.
#
proc getExtraFileListXmlChunk { packageFlavor vsVersion } {
#
# NOTE: Windows Phone 8.0 does not require any extra attributes in its VSIX
# package SDK manifests; however, it appears that Windows Phone 8.1
# does.
#
if {[string equal $packageFlavor WP80]} then {
return ""
}
set appliesTo [expr {[string equal $packageFlavor Win32] ? \
"VisualC" : "WindowsAppContainer"}]
switch -exact $vsVersion {
2012 {
return [appendArgs \
"\r\n " AppliesTo=\" $appliesTo \" \
"\r\n " {DependsOn="Microsoft.VCLibs, version=11.0"}]
}
2013 {
return [appendArgs \
"\r\n " AppliesTo=\" $appliesTo \" \
"\r\n " {DependsOn="Microsoft.VCLibs, version=12.0"}]
}
2015 {
return [appendArgs \
"\r\n " AppliesTo=\" $appliesTo \" \
"\r\n " {DependsOn="Microsoft.VCLibs, version=14.0"}]
}
default {
return ""
}
}
}
proc replaceFileNameTokens { fileName name buildName platformName } {
#
# NOTE: Returns the specified file name containing the platform name instead
# of platform placeholder tokens.
#
return [string map [list <build> $buildName <platform> $platformName \
<name> $name] $fileName]
}
proc substFile { fileName } {
#
# NOTE: Performs all Tcl command, variable, and backslash substitutions in
# the specified file and then rewrites the contents of that same file
# with the substituted data.
#
return [writeFile $fileName [uplevel 1 [list subst [readFile $fileName]]]]
}
#
# NOTE: This is the entry point for this script.
#
set script [file normalize [info script]]
if {[string length $script] == 0} then {
fail "script file currently being evaluated is unknown" true
}
set path [file dirname $script]
set rootName [file rootname [file tail $script]]
###############################################################################
#
# NOTE: Process and verify all the command line arguments.
#
set argc [llength $argv]
if {$argc < 1 || $argc > 5} then {fail}
set binaryDirectory [lindex $argv 0]
if {[string length $binaryDirectory] == 0} then {
fail "invalid binary directory"
}
if {![file exists $binaryDirectory] || \
![file isdirectory $binaryDirectory]} then {
fail "binary directory does not exist"
}
if {$argc >= 2} then {
set sourceDirectory [lindex $argv 1]
} else {
#
# NOTE: Assume that the source directory is the parent directory of the one
# that contains this script file.
#
set sourceDirectory [file dirname $path]
}
if {[string length $sourceDirectory] == 0} then {
fail "invalid source directory"
}
if {![file exists $sourceDirectory] || \
![file isdirectory $sourceDirectory]} then {
fail "source directory does not exist"
}
if {$argc >= 3} then {
set packageFlavor [lindex $argv 2]
} else {
#
# NOTE: Assume the package flavor is WinRT.
#
set packageFlavor WinRT
}
if {[string length $packageFlavor] == 0} then {
fail "invalid package flavor"
}
if {$argc >= 4} then {
set platformNames [list]
foreach platformName [split [lindex $argv 3] ", "] {
set platformName [string trim $platformName]
if {[string length $platformName] > 0} then {
lappend platformNames $platformName
}
}
}
if {$argc >= 5} then {
set vsVersion [lindex $argv 4]
} else {
set vsVersion 2012
}
if {[string length $vsVersion] == 0} then {
fail "invalid Visual Studio version"
}
if {![string equal $vsVersion 2012] && ![string equal $vsVersion 2013] && \
![string equal $vsVersion 2015]} then {
fail [appendArgs \
"unsupported Visual Studio version, must be one of: " \
[list 2012 2013 2015]]
}
set shortNames(WinRT,2012) SQLite.WinRT
set shortNames(WinRT,2013) SQLite.WinRT.2013
set shortNames(WinRT81,2013) SQLite.WinRT81
set shortNames(WP80,2012) SQLite.WP80
set shortNames(WP80,2013) SQLite.WP80.2013
set shortNames(WP81,2013) SQLite.WP81
set shortNames(Win32,2012) SQLite.Win32
set shortNames(Win32,2013) SQLite.Win32.2013
set shortNames(UWP,2015) SQLite.UWP.2015
set displayNames(WinRT,2012) "SQLite for Windows Runtime"
set displayNames(WinRT,2013) "SQLite for Windows Runtime"
set displayNames(WinRT81,2013) "SQLite for Windows Runtime (Windows 8.1)"
set displayNames(WP80,2012) "SQLite for Windows Phone"
set displayNames(WP80,2013) "SQLite for Windows Phone"
set displayNames(WP81,2013) "SQLite for Windows Phone 8.1"
set displayNames(Win32,2012) "SQLite for Windows"
set displayNames(Win32,2013) "SQLite for Windows"
set displayNames(UWP,2015) "SQLite for Universal Windows Platform"
if {[string equal $packageFlavor WinRT]} then {
set shortName $shortNames($packageFlavor,$vsVersion)
set displayName $displayNames($packageFlavor,$vsVersion)
set targetPlatformIdentifier Windows
set targetPlatformVersion v8.0
set minVsVersion [getMinVsVersionXmlChunk $vsVersion]
set maxPlatformVersion \
[getMaxPlatformVersionXmlChunk $packageFlavor $vsVersion]
set extraSdkPath ""
set extraFileListAttributes \
[getExtraFileListXmlChunk $packageFlavor $vsVersion]
} elseif {[string equal $packageFlavor WinRT81]} then {
if {$vsVersion ne "2013"} then {
fail [appendArgs \
"unsupported combination, package flavor " $packageFlavor \
" is only supported with Visual Studio 2013"]
}
set shortName $shortNames($packageFlavor,$vsVersion)
set displayName $displayNames($packageFlavor,$vsVersion)
set targetPlatformIdentifier Windows
set targetPlatformVersion v8.1
set minVsVersion [getMinVsVersionXmlChunk $vsVersion]
set maxPlatformVersion \
[getMaxPlatformVersionXmlChunk $packageFlavor $vsVersion]
set extraSdkPath ""
set extraFileListAttributes \
[getExtraFileListXmlChunk $packageFlavor $vsVersion]
} elseif {[string equal $packageFlavor WP80]} then {
set shortName $shortNames($packageFlavor,$vsVersion)
set displayName $displayNames($packageFlavor,$vsVersion)
set targetPlatformIdentifier "Windows Phone"
set targetPlatformVersion v8.0
set minVsVersion [getMinVsVersionXmlChunk $vsVersion]
set maxPlatformVersion \
[getMaxPlatformVersionXmlChunk $packageFlavor $vsVersion]
set extraSdkPath "\\..\\$targetPlatformIdentifier"
set extraFileListAttributes \
[getExtraFileListXmlChunk $packageFlavor $vsVersion]
} elseif {[string equal $packageFlavor WP81]} then {
if {$vsVersion ne "2013"} then {
fail [appendArgs \
"unsupported combination, package flavor " $packageFlavor \
" is only supported with Visual Studio 2013"]
}
set shortName $shortNames($packageFlavor,$vsVersion)
set displayName $displayNames($packageFlavor,$vsVersion)
set targetPlatformIdentifier WindowsPhoneApp
set targetPlatformVersion v8.1
set minVsVersion [getMinVsVersionXmlChunk $vsVersion]
set maxPlatformVersion \
[getMaxPlatformVersionXmlChunk $packageFlavor $vsVersion]
set extraSdkPath "\\..\\$targetPlatformIdentifier"
set extraFileListAttributes \
[getExtraFileListXmlChunk $packageFlavor $vsVersion]
} elseif {[string equal $packageFlavor UWP]} then {
if {$vsVersion ne "2015"} then {
fail [appendArgs \
"unsupported combination, package flavor " $packageFlavor \
" is only supported with Visual Studio 2015"]
}
set shortName $shortNames($packageFlavor,$vsVersion)
set displayName $displayNames($packageFlavor,$vsVersion)
set targetPlatformIdentifier UAP; # NOTE: Not "UWP".
set targetPlatformVersion v0.8.0.0
set minVsVersion [getMinVsVersionXmlChunk $vsVersion]
set maxPlatformVersion \
[getMaxPlatformVersionXmlChunk $packageFlavor $vsVersion]
set extraSdkPath "\\..\\$targetPlatformIdentifier"
set extraFileListAttributes \
[getExtraFileListXmlChunk $packageFlavor $vsVersion]
} elseif {[string equal $packageFlavor Win32]} then {
set shortName $shortNames($packageFlavor,$vsVersion)
set displayName $displayNames($packageFlavor,$vsVersion)
set targetPlatformIdentifier Windows
set targetPlatformVersion v8.0
set minVsVersion [getMinVsVersionXmlChunk $vsVersion]
set maxPlatformVersion \
[getMaxPlatformVersionXmlChunk $packageFlavor $vsVersion]
set extraSdkPath ""
set extraFileListAttributes \
[getExtraFileListXmlChunk $packageFlavor $vsVersion]
} else {
fail [appendArgs \
"unsupported package flavor, must be one of: " \
[list WinRT WinRT81 WP80 WP81 UWP Win32]]
}
###############################################################################
#
# NOTE: Evaluate the user-specific customizations file, if it exists.
#
set userFile [file join $path [appendArgs \
$rootName . $tcl_platform(user) .tcl]]
if {[file exists $userFile] && \
[file isfile $userFile]} then {
source $userFile
}
###############################################################################
set templateFile [file join $path win sqlite.vsix]
if {![file exists $templateFile] || \
![file isfile $templateFile]} then {
fail [appendArgs "template file \"" $templateFile "\" does not exist"]
}
set currentDirectory [pwd]
set outputFile [file join $currentDirectory [appendArgs sqlite- \
$packageFlavor -output.vsix]]
if {[file exists $outputFile]} then {
fail [appendArgs "output file \"" $outputFile "\" already exists"]
}
###############################################################################
#
# NOTE: Make sure that a valid temporary directory exists.
#
set temporaryDirectory [getTemporaryPath]
if {[string length $temporaryDirectory] == 0 || \
![file exists $temporaryDirectory] || \
![file isdirectory $temporaryDirectory]} then {
fail "cannot locate a usable temporary directory"
}
#
# NOTE: Setup the staging directory to have a unique name inside of the
# configured temporary directory.
#
set stagingDirectory [file normalize [file join $temporaryDirectory \
[appendArgs $rootName . [pid]]]]
###############################################################################
#
# NOTE: Configure the external zipping tool. First, see if it has already
# been pre-configured. If not, try to query it from the environment.
# Finally, fallback on the default of simply "zip", which will then
# be assumed to exist somewhere along the PATH.
#
if {![info exists zip]} then {
if {[info exists env(ZipTool)]} then {
set zip $env(ZipTool)
}
if {![info exists zip] || ![file exists $zip]} then {
set zip zip
}
}
#
# NOTE: Configure the external unzipping tool. First, see if it has already
# been pre-configured. If not, try to query it from the environment.
# Finally, fallback on the default of simply "unzip", which will then
# be assumed to exist somewhere along the PATH.
#
if {![info exists unzip]} then {
if {[info exists env(UnZipTool)]} then {
set unzip $env(UnZipTool)
}
if {![info exists unzip] || ![file exists $unzip]} then {
set unzip unzip
}
}
###############################################################################
#
# NOTE: Attempt to extract the SQLite version from the "sqlite3.h" header file
# in the source directory. This script assumes that the header file has
# already been generated by the build process.
#
set pattern {^#define\s+SQLITE_VERSION\s+"(.*)"$}
set data [readFile [file join $sourceDirectory sqlite3.h]]
if {![regexp -line -- $pattern $data dummy version]} then {
fail [appendArgs "cannot locate SQLITE_VERSION value in \"" \
[file join $sourceDirectory sqlite3.h] \"]
}
###############################################################################
#
# NOTE: Setup all the master file list data. This includes the source file
# names, the destination file names, and the file processing flags. The
# possible file processing flags are:
#
# "buildNeutral" -- This flag indicates the file location and content do
# not depend on the build configuration.
#
# "platformNeutral" -- This flag indicates the file location and content
# do not depend on the build platform.
#
# "subst" -- This flag indicates that the file contains dynamic textual
# content that needs to be processed using [subst] prior to
# packaging the file into the final VSIX package. The primary
# use of this flag is to insert the name of the VSIX package,
# some package flavor-specific value, or the SQLite version
# into a file.
#
# "noDebug" -- This flag indicates that the file should be skipped when
# processing the debug build.
#
# "noRetail" -- This flag indicates that the file should be skipped when
# processing the retail build.
#
# "move" -- This flag indicates that the file should be moved from the
# source to the destination instead of being copied.
#
# This file metadata may be overridden, either in whole or in part, via
# the user-specific customizations file.
#
if {![info exists fileNames(source)]} then {
set fileNames(source) [list "" "" \
[file join $stagingDirectory DesignTime <build> <platform> sqlite3.props] \
[file join $sourceDirectory sqlite3.h] \
[file join $binaryDirectory <build> <platform> sqlite3.lib] \
[file join $binaryDirectory <build> <platform> sqlite3.dll]]
if {![info exists no(symbols)]} then {
lappend fileNames(source) \
[file join $binaryDirectory <build> <platform> sqlite3.pdb]
}
}
if {![info exists fileNames(destination)]} then {
set fileNames(destination) [list \
[file join $stagingDirectory extension.vsixmanifest] \
[file join $stagingDirectory SDKManifest.xml] \
[file join $stagingDirectory DesignTime <build> <platform> <name>.props] \
[file join $stagingDirectory DesignTime <build> <platform> sqlite3.h] \
[file join $stagingDirectory DesignTime <build> <platform> sqlite3.lib] \
[file join $stagingDirectory Redist <build> <platform> sqlite3.dll]]
if {![info exists no(symbols)]} then {
lappend fileNames(destination) \
[file join $stagingDirectory Redist <build> <platform> sqlite3.pdb]
}
}
if {![info exists fileNames(flags)]} then {
set fileNames(flags) [list \
[list buildNeutral platformNeutral subst] \
[list buildNeutral platformNeutral subst] \
[list buildNeutral platformNeutral subst move] \
[list buildNeutral platformNeutral] \
[list] [list] [list noRetail]]
if {![info exists no(symbols)]} then {
lappend fileNames(flags) [list noRetail]
}
}
###############################################################################
#
# NOTE: Setup the list of builds supported by this script. These may be
# overridden via the user-specific customizations file.
#
if {![info exists buildNames]} then {
set buildNames [list Debug Retail]
}
###############################################################################
#
# NOTE: Setup the list of platforms supported by this script. These may be
# overridden via the command line or the user-specific customizations
# file.
#
if {![info exists platformNames] || [llength $platformNames] == 0} then {
set platformNames [list x86 x64 ARM]
}
###############################################################################
#
# NOTE: Make sure the staging directory exists, creating it if necessary.
#
file mkdir $stagingDirectory
#
# NOTE: Build the Tcl command used to extract the template VSIX package to
# the staging directory.
#
set extractCommand [list exec -- $unzip $templateFile -d $stagingDirectory]
#
# NOTE: Extract the template VSIX package to the staging directory.
#
eval $extractCommand
###############################################################################
#
# NOTE: Process each file in the master file list. There are actually three
# parallel lists that contain the source file names, the destination file
# names, and the file processing flags. If the "buildNeutral" flag is
# present, the file location and content do not depend on the build
# configuration and "CommonConfiguration" will be used in place of the
# build configuration name. If the "platformNeutral" flag is present,
# the file location and content do not depend on the build platform and
# "neutral" will be used in place of the build platform name. If the
# "subst" flag is present, the file is assumed to be a text file that may
# contain Tcl variable, command, and backslash replacements, to be
# dynamically replaced during processing using the Tcl [subst] command.
# If the "noDebug" flag is present, the file will be skipped when
# processing for the debug build. If the "noRetail" flag is present, the
# file will be skipped when processing for the retail build. If the
# "move" flag is present, the source file will be deleted after it is
# copied to the destination file. If the source file name is an empty
# string, the destination file name will be assumed to already exist in
# the staging directory and will not be copied; however, Tcl variable,
# command, and backslash replacements may still be performed on the
# destination file prior to the final VSIX package being built if the
# "subst" flag is present.
#
foreach sourceFileName $fileNames(source) \
destinationFileName $fileNames(destination) \
fileFlags $fileNames(flags) {
#
# NOTE: Process the file flags into separate boolean variables that may be
# used within the loop.
#
set isBuildNeutral [expr {[lsearch $fileFlags buildNeutral] != -1}]
set isPlatformNeutral [expr {[lsearch $fileFlags platformNeutral] != -1}]
set isMove [expr {[lsearch $fileFlags move] != -1}]
set useSubst [expr {[lsearch $fileFlags subst] != -1}]
#
# NOTE: If the current file is build-neutral, then only one build will
# be processed for it, namely "CommonConfiguration"; otherwise, each
# supported build will be processed for it individually.
#
foreach buildName \
[expr {$isBuildNeutral ? [list CommonConfiguration] : $buildNames}] {
#
# NOTE: Should the current file be skipped for this build?
#
if {[lsearch $fileFlags no${buildName}] != -1} then {
continue
}
#
# NOTE: If the current file is platform-neutral, then only one platform
# will be processed for it, namely "neutral"; otherwise, each
# supported platform will be processed for it individually.
#
foreach platformName \
[expr {$isPlatformNeutral ? [list neutral] : $platformNames}] {
#
# NOTE: Use the actual platform name in the destination file name.
#
set newDestinationFileName [replaceFileNameTokens $destinationFileName \
$shortName $buildName $platformName]
#
# NOTE: Does the source file need to be copied to the destination file?
#
if {[string length $sourceFileName] > 0} then {
#
# NOTE: First, make sure the destination directory exists.
#
file mkdir [file dirname $newDestinationFileName]
#
# NOTE: Then, copy the source file to the destination file verbatim.
#
set newSourceFileName [replaceFileNameTokens $sourceFileName \
$shortName $buildName $platformName]
file copy $newSourceFileName $newDestinationFileName
#
# NOTE: If this is a move instead of a copy, delete the source file
# now.
#
if {$isMove} then {
file delete $newSourceFileName
}
}
#
# NOTE: Does the destination file contain dynamic replacements that must
# be processed now?
#
if {$useSubst} then {
#
# NOTE: Perform any dynamic replacements contained in the destination
# file and then re-write it in-place.
#
substFile $newDestinationFileName
}
}
}
}
###############################################################################
#
# NOTE: Change the current directory to the staging directory so that the
# external archive building tool can pickup the necessary files using
# relative paths.
#
cd $stagingDirectory
#
# NOTE: Build the Tcl command used to archive the final VSIX package in the
# output directory.
#
set archiveCommand [list exec -- $zip -r $outputFile *]
#
# NOTE: Build the final VSIX package archive in the output directory.
#
eval $archiveCommand
#
# NOTE: Change back to the previously saved current directory.
#
cd $currentDirectory
#
# NOTE: Cleanup the temporary staging directory.
#
file delete -force $stagingDirectory
###############################################################################
#
# NOTE: Success, emit the fully qualified path of the generated VSIX file.
#
puts stdout $outputFile

View File

@ -1,329 +0,0 @@
/*
** This program searches an SQLite database file for the lengths and
** offsets for all TEXT or BLOB entries for a particular column of a
** particular table. The rowid, size and offset for the column are
** written to standard output. There are three arguments, which are the
** name of the database file, the table, and the column.
*/
#include "sqlite3.h"
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
typedef unsigned char u8;
typedef struct GState GState;
#define ArraySize(X) (sizeof(X)/sizeof(X[0]))
/*
** Global state information for this program.
*/
struct GState {
char *zErr; /* Error message text */
FILE *f; /* Open database file */
int szPg; /* Page size for the database file */
int iRoot; /* Root page of the table */
int iCol; /* Column number for the column */
int pgno; /* Current page number */
u8 *aPage; /* Current page content */
u8 *aStack[20]; /* Page stack */
int aPgno[20]; /* Page number stack */
int nStack; /* Depth of stack */
int bTrace; /* True for tracing output */
};
/*
** Write an error.
*/
static void ofstError(GState *p, const char *zFormat, ...){
va_list ap;
sqlite3_free(p->zErr);
va_start(ap, zFormat);
p->zErr = sqlite3_vmprintf(zFormat, ap);
va_end(ap);
}
/*
** Write a trace message
*/
static void ofstTrace(GState *p, const char *zFormat, ...){
va_list ap;
if( p->bTrace ){
va_start(ap, zFormat);
vprintf(zFormat, ap);
va_end(ap);
}
}
/*
** Find the root page of the table and the column number of the column.
*/
static void ofstRootAndColumn(
GState *p, /* Global state */
const char *zFile, /* Name of the database file */
const char *zTable, /* Name of the table */
const char *zColumn /* Name of the column */
){
sqlite3 *db = 0;
sqlite3_stmt *pStmt = 0;
char *zSql = 0;
int rc;
if( p->zErr ) return;
rc = sqlite3_open(zFile, &db);
if( rc ){
ofstError(p, "cannot open database file \"%s\"", zFile);
goto rootAndColumn_exit;
}
zSql = sqlite3_mprintf("SELECT rootpage FROM sqlite_master WHERE name=%Q",
zTable);
rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0);
if( rc ) ofstError(p, "%s: [%s]", sqlite3_errmsg(db), zSql);
sqlite3_free(zSql);
if( p->zErr ) goto rootAndColumn_exit;
if( sqlite3_step(pStmt)!=SQLITE_ROW ){
ofstError(p, "cannot find table [%s]\n", zTable);
sqlite3_finalize(pStmt);
goto rootAndColumn_exit;
}
p->iRoot = sqlite3_column_int(pStmt , 0);
sqlite3_finalize(pStmt);
p->iCol = -1;
zSql = sqlite3_mprintf("PRAGMA table_info(%Q)", zTable);
rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0);
if( rc ) ofstError(p, "%s: [%s}", sqlite3_errmsg(db), zSql);
sqlite3_free(zSql);
if( p->zErr ) goto rootAndColumn_exit;
while( sqlite3_step(pStmt)==SQLITE_ROW ){
const char *zCol = sqlite3_column_text(pStmt, 1);
if( strlen(zCol)==strlen(zColumn)
&& sqlite3_strnicmp(zCol, zColumn, strlen(zCol))==0
){
p->iCol = sqlite3_column_int(pStmt, 0);
break;
}
}
sqlite3_finalize(pStmt);
if( p->iCol<0 ){
ofstError(p, "no such column: %s.%s", zTable, zColumn);
goto rootAndColumn_exit;
}
zSql = sqlite3_mprintf("PRAGMA page_size");
rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0);
if( rc ) ofstError(p, "%s: [%s]", sqlite3_errmsg(db), zSql);
sqlite3_free(zSql);
if( p->zErr ) goto rootAndColumn_exit;
if( sqlite3_step(pStmt)!=SQLITE_ROW ){
ofstError(p, "cannot find page size");
}else{
p->szPg = sqlite3_column_int(pStmt, 0);
}
sqlite3_finalize(pStmt);
rootAndColumn_exit:
sqlite3_close(db);
return;
}
/*
** Pop a page from the stack
*/
static void ofstPopPage(GState *p){
if( p->nStack<=0 ) return;
p->nStack--;
sqlite3_free(p->aStack[p->nStack]);
p->pgno = p->aPgno[p->nStack-1];
p->aPage = p->aStack[p->nStack-1];
}
/*
** Push a new page onto the stack.
*/
static void ofstPushPage(GState *p, int pgno){
u8 *pPage;
size_t got;
if( p->zErr ) return;
if( p->nStack >= ArraySize(p->aStack) ){
ofstError(p, "page stack overflow");
return;
}
p->aPgno[p->nStack] = pgno;
p->aStack[p->nStack] = pPage = sqlite3_malloc( p->szPg );
if( pPage==0 ){
fprintf(stderr, "out of memory\n");
exit(1);
}
p->nStack++;
p->aPage = pPage;
p->pgno = pgno;
fseek(p->f, (pgno-1)*p->szPg, SEEK_SET);
got = fread(pPage, 1, p->szPg, p->f);
if( got!=p->szPg ){
ofstError(p, "unable to read page %d", pgno);
ofstPopPage(p);
}
}
/* Read a two-byte integer at the given offset into the current page */
static int ofst2byte(GState *p, int ofst){
int x = p->aPage[ofst];
return (x<<8) + p->aPage[ofst+1];
}
/* Read a four-byte integer at the given offset into the current page */
static int ofst4byte(GState *p, int ofst){
int x = p->aPage[ofst];
x = (x<<8) + p->aPage[ofst+1];
x = (x<<8) + p->aPage[ofst+2];
x = (x<<8) + p->aPage[ofst+3];
return x;
}
/* Read a variable-length integer. Update the offset */
static sqlite3_int64 ofstVarint(GState *p, int *pOfst){
sqlite3_int64 x = 0;
u8 *a = &p->aPage[*pOfst];
int n = 0;
while( n<8 && (a[0] & 0x80)!=0 ){
x = (x<<7) + (a[0] & 0x7f);
n++;
a++;
}
if( n==8 ){
x = (x<<8) + a[0];
}else{
x = (x<<7) + a[0];
}
*pOfst += (n+1);
return x;
}
/* Return the absolute offset into a file for the given offset
** into the current page */
static int ofstInFile(GState *p, int ofst){
return p->szPg*(p->pgno-1) + ofst;
}
/* Return the size (in bytes) of the data corresponding to the
** given serial code */
static int ofstSerialSize(int scode){
if( scode<5 ) return scode;
if( scode==5 ) return 6;
if( scode<8 ) return 8;
if( scode<12 ) return 0;
return (scode-12)/2;
}
/* Forward reference */
static void ofstWalkPage(GState*, int);
/* Walk an interior btree page */
static void ofstWalkInteriorPage(GState *p){
int nCell;
int i;
int ofst;
int iChild;
nCell = ofst2byte(p, 3);
for(i=0; i<nCell; i++){
ofst = ofst2byte(p, 12+i*2);
iChild = ofst4byte(p, ofst);
ofstWalkPage(p, iChild);
if( p->zErr ) return;
}
ofstWalkPage(p, ofst4byte(p, 8));
}
/* Walk a leaf btree page */
static void ofstWalkLeafPage(GState *p){
int nCell;
int i;
int ofst;
int nPayload;
sqlite3_int64 rowid;
int nHdr;
int j;
int scode;
int sz;
int dataOfst;
char zMsg[200];
nCell = ofst2byte(p, 3);
for(i=0; i<nCell; i++){
ofst = ofst2byte(p, 8+i*2);
nPayload = ofstVarint(p, &ofst);
rowid = ofstVarint(p, &ofst);
if( nPayload > p->szPg-35 ){
sqlite3_snprintf(sizeof(zMsg), zMsg,
"# overflow rowid %lld", rowid);
printf("%s\n", zMsg);
continue;
}
dataOfst = ofst;
nHdr = ofstVarint(p, &ofst);
dataOfst += nHdr;
for(j=0; j<p->iCol; j++){
scode = ofstVarint(p, &ofst);
dataOfst += ofstSerialSize(scode);
}
scode = ofstVarint(p, &ofst);
sz = ofstSerialSize(scode);
sqlite3_snprintf(sizeof(zMsg), zMsg,
"rowid %12lld size %5d offset %8d",
rowid, sz, ofstInFile(p, dataOfst));
printf("%s\n", zMsg);
}
}
/*
** Output results from a single page.
*/
static void ofstWalkPage(GState *p, int pgno){
if( p->zErr ) return;
ofstPushPage(p, pgno);
if( p->zErr ) return;
if( p->aPage[0]==5 ){
ofstWalkInteriorPage(p);
}else if( p->aPage[0]==13 ){
ofstWalkLeafPage(p);
}else{
ofstError(p, "page %d has a faulty type byte: %d", pgno, p->aPage[0]);
}
ofstPopPage(p);
}
int main(int argc, char **argv){
GState g;
memset(&g, 0, sizeof(g));
if( argc>2 && strcmp(argv[1],"--trace")==0 ){
g.bTrace = 1;
argc--;
argv++;
}
if( argc!=4 ){
fprintf(stderr, "Usage: %s DATABASE TABLE COLUMN\n", *argv);
exit(1);
}
ofstRootAndColumn(&g, argv[1], argv[2], argv[3]);
if( g.zErr ){
fprintf(stderr, "%s\n", g.zErr);
exit(1);
}
ofstTrace(&g, "# szPg = %d\n", g.szPg);
ofstTrace(&g, "# iRoot = %d\n", g.iRoot);
ofstTrace(&g, "# iCol = %d\n", g.iCol);
g.f = fopen(argv[1], "rb");
if( g.f==0 ){
fprintf(stderr, "cannot open \"%s\"\n", argv[1]);
exit(1);
}
ofstWalkPage(&g, g.iRoot);
if( g.zErr ){
fprintf(stderr, "%s\n", g.zErr);
exit(1);
}
return 0;
}

View File

@ -1,299 +0,0 @@
set rcsid {$Id: omittest.tcl,v 1.8 2008/10/13 15:35:09 drh Exp $}
# Documentation for this script. This may be output to stderr
# if the script is invoked incorrectly.
set ::USAGE_MESSAGE {
This Tcl script is used to test the various compile time options
available for omitting code (the SQLITE_OMIT_xxx options). It
should be invoked as follows:
<script> ?test-symbol? ?-makefile PATH-TO-MAKEFILE? ?-skip_run?
The default value for ::MAKEFILE is "../Makefile.linux.gcc".
If -skip_run option is given then only the compile part is attempted.
This script builds the testfixture program and runs the SQLite test suite
once with each SQLITE_OMIT_ option defined and then once with all options
defined together. Each run is performed in a seperate directory created
as a sub-directory of the current directory by the script. The output
of the build is saved in <sub-directory>/build.log. The output of the
test-suite is saved in <sub-directory>/test.log.
Almost any SQLite makefile (except those generated by configure - see below)
should work. The following properties are required:
* The makefile should support the "testfixture" target.
* The makefile should support the "test" target.
* The makefile should support the variable "OPTS" as a way to pass
options from the make command line to lemon and the C compiler.
More precisely, the following two invocations must be supported:
$::MAKEBIN -f $::MAKEFILE testfixture OPTS="-DSQLITE_OMIT_ALTERTABLE=1"
$::MAKEBIN -f $::MAKEFILE test
Makefiles generated by the sqlite configure program cannot be used as
they do not respect the OPTS variable.
}
# Build a testfixture executable and run quick.test using it. The first
# parameter is the name of the directory to create and use to run the
# test in. The second parameter is a list of OMIT symbols to define
# when doing so. For example:
#
# run_quick_test /tmp/testdir {SQLITE_OMIT_TRIGGER SQLITE_OMIT_VIEW}
#
#
proc run_quick_test {dir omit_symbol_list} {
# Compile the value of the OPTS Makefile variable.
set opts ""
if {$::tcl_platform(platform)=="windows"} {
append opts "OPTS += -DSQLITE_OS_WIN=1\n"
set target "testfixture.exe"
} else {
append opts "OPTS += -DSQLITE_OS_UNIX=1\n"
}
foreach sym $omit_symbol_list {
append opts "OPTS += -D${sym}=1\n"
}
# Create the directory and do the build. If an error occurs return
# early without attempting to run the test suite.
file mkdir $dir
puts -nonewline "Building $dir..."
flush stdout
catch {
file copy -force ./config.h $dir
file copy -force ./libtool $dir
}
set fd [open $::MAKEFILE]
set mkfile [read $fd]
close $fd
regsub {\ninclude} $mkfile "\n$opts\ninclude" mkfile
set fd [open $dir/makefile w]
puts $fd $mkfile
close $fd
set rc [catch {
exec $::MAKEBIN -C $dir -f makefile clean $::TARGET >& $dir/build.log
}]
if {$rc} {
puts "No good. See $dir/build.log."
return
} else {
puts "Ok"
}
# Create an empty file "$dir/sqlite3". This is to trick the makefile out
# of trying to build the sqlite shell. The sqlite shell won't build
# with some of the OMIT options (i.e OMIT_COMPLETE).
set sqlite3_dummy $dir/sqlite3
if {$::tcl_platform(platform)=="windows"} {
append sqlite3_dummy ".exe"
}
if {![file exists $sqlite3_dummy]} {
set wr [open $sqlite3_dummy w]
puts $wr "dummy"
close $wr
}
if {$::SKIP_RUN} {
puts "Skip testing $dir."
} else {
# Run the test suite.
puts -nonewline "Testing $dir..."
flush stdout
set rc [catch {
exec $::MAKEBIN -C $dir -f makefile test >& $dir/test.log
}]
if {$rc} {
puts "No good. See $dir/test.log."
} else {
puts "Ok"
}
}
}
# This proc processes the command line options passed to this script.
# Currently the only option supported is "-makefile", default
# "../Makefile.linux-gcc". Set the ::MAKEFILE variable to the value of this
# option.
#
proc process_options {argv} {
set ::MAKEBIN make ;# Default value
if {$::tcl_platform(platform)=="windows"} {
set ::MAKEFILE ./Makefile ;# Default value on Windows
} else {
set ::MAKEFILE ./Makefile.linux-gcc ;# Default value
}
set ::SKIP_RUN 0 ;# Default to attempt test
set ::TARGET testfixture ;# Default thing to build
for {set i 0} {$i < [llength $argv]} {incr i} {
switch -- [lindex $argv $i] {
-makefile {
incr i
set ::MAKEFILE [lindex $argv $i]
}
-nmake {
set ::MAKEBIN nmake
set ::MAKEFILE ./Makefile.msc
}
-target {
incr i
set ::TARGET [lindex $argv $i]
}
-skip_run {
set ::SKIP_RUN 1
}
default {
if {[info exists ::SYMBOL]} {
puts stderr [string trim $::USAGE_MESSAGE]
exit -1
}
set ::SYMBOL [lindex $argv $i]
}
}
set ::MAKEFILE [file normalize $::MAKEFILE]
}
}
# Main routine.
#
proc main {argv} {
# List of SQLITE_OMIT_XXX symbols supported by SQLite.
set ::OMIT_SYMBOLS [list \
SQLITE_OMIT_ALTERTABLE \
SQLITE_OMIT_ANALYZE \
SQLITE_OMIT_ATTACH \
SQLITE_OMIT_AUTHORIZATION \
SQLITE_OMIT_AUTOINCREMENT \
SQLITE_OMIT_AUTOINIT \
SQLITE_OMIT_AUTOMATIC_INDEX \
SQLITE_OMIT_AUTORESET \
SQLITE_OMIT_AUTOVACUUM \
SQLITE_OMIT_BETWEEN_OPTIMIZATION \
SQLITE_OMIT_BLOB_LITERAL \
SQLITE_OMIT_BTREECOUNT \
SQLITE_OMIT_BUILTIN_TEST \
SQLITE_OMIT_CAST \
SQLITE_OMIT_CHECK \
SQLITE_OMIT_COMPILEOPTION_DIAGS \
SQLITE_OMIT_COMPLETE \
SQLITE_OMIT_COMPOUND_SELECT \
SQLITE_OMIT_CTE \
SQLITE_OMIT_DATETIME_FUNCS \
SQLITE_OMIT_DECLTYPE \
SQLITE_OMIT_DEPRECATED \
SQLITE_OMIT_EXPLAIN \
SQLITE_OMIT_FLAG_PRAGMAS \
SQLITE_OMIT_FLOATING_POINT \
SQLITE_OMIT_FOREIGN_KEY \
SQLITE_OMIT_GET_TABLE \
SQLITE_OMIT_INCRBLOB \
SQLITE_OMIT_INTEGRITY_CHECK \
SQLITE_OMIT_LIKE_OPTIMIZATION \
SQLITE_OMIT_LOAD_EXTENSION \
SQLITE_OMIT_LOCALTIME \
SQLITE_OMIT_LOOKASIDE \
SQLITE_OMIT_MEMORYDB \
SQLITE_OMIT_OR_OPTIMIZATION \
SQLITE_OMIT_PAGER_PRAGMAS \
SQLITE_OMIT_PRAGMA \
SQLITE_OMIT_PROGRESS_CALLBACK \
SQLITE_OMIT_QUICKBALANCE \
SQLITE_OMIT_REINDEX \
SQLITE_OMIT_SCHEMA_PRAGMAS \
SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS \
SQLITE_OMIT_SHARED_CACHE \
SQLITE_OMIT_SUBQUERY \
SQLITE_OMIT_TCL_VARIABLE \
SQLITE_OMIT_TEMPDB \
SQLITE_OMIT_TRACE \
SQLITE_OMIT_TRIGGER \
SQLITE_OMIT_TRUNCATE_OPTIMIZATION \
SQLITE_OMIT_UNIQUE_ENFORCEMENT \
SQLITE_OMIT_UTF16 \
SQLITE_OMIT_VACUUM \
SQLITE_OMIT_VIEW \
SQLITE_OMIT_VIRTUALTABLE \
SQLITE_OMIT_WAL \
SQLITE_OMIT_WSD \
SQLITE_OMIT_XFER_OPT \
]
set ::ENABLE_SYMBOLS [list \
SQLITE_DISABLE_DIRSYNC \
SQLITE_DISABLE_LFS \
SQLITE_ENABLE_ATOMIC_WRITE \
SQLITE_ENABLE_COLUMN_METADATA \
SQLITE_ENABLE_EXPENSIVE_ASSERT \
SQLITE_ENABLE_FTS3 \
SQLITE_ENABLE_FTS3_PARENTHESIS \
SQLITE_ENABLE_FTS4 \
SQLITE_ENABLE_IOTRACE \
SQLITE_ENABLE_LOAD_EXTENSION \
SQLITE_ENABLE_LOCKING_STYLE \
SQLITE_ENABLE_MEMORY_MANAGEMENT \
SQLITE_ENABLE_MEMSYS3 \
SQLITE_ENABLE_MEMSYS5 \
SQLITE_ENABLE_OVERSIZE_CELL_CHECK \
SQLITE_ENABLE_RTREE \
SQLITE_ENABLE_STAT3 \
SQLITE_ENABLE_UNLOCK_NOTIFY \
SQLITE_ENABLE_UPDATE_DELETE_LIMIT \
]
# Process any command line options.
process_options $argv
if {[info exists ::SYMBOL] } {
set sym $::SYMBOL
if {[lsearch $::OMIT_SYMBOLS $sym]<0 && [lsearch $::ENABLE_SYMBOLS $sym]<0} {
puts stderr "No such symbol: $sym"
exit -1
}
set dirname "test_[regsub -nocase {^x*SQLITE_} $sym {}]"
run_quick_test $dirname $sym
} else {
# First try a test with all OMIT symbols except SQLITE_OMIT_FLOATING_POINT
# and SQLITE_OMIT_PRAGMA defined. The former doesn't work (causes segfaults)
# and the latter is currently incompatible with the test suite (this should
# be fixed, but it will be a lot of work).
set allsyms [list]
foreach s $::OMIT_SYMBOLS {
if {$s!="SQLITE_OMIT_FLOATING_POINT" && $s!="SQLITE_OMIT_PRAGMA"} {
lappend allsyms $s
}
}
run_quick_test test_OMIT_EVERYTHING $allsyms
# Now try one quick.test with each of the OMIT symbols defined. Included
# are the OMIT_FLOATING_POINT and OMIT_PRAGMA symbols, even though we
# know they will fail. It's good to be reminded of this from time to time.
foreach sym $::OMIT_SYMBOLS {
set dirname "test_[regsub -nocase {^x*SQLITE_} $sym {}]"
run_quick_test $dirname $sym
}
# Try the ENABLE/DISABLE symbols one at a time.
# We don't do them all at once since some are conflicting.
foreach sym $::ENABLE_SYMBOLS {
set dirname "test_[regsub -nocase {^x*SQLITE_} $sym {}]"
run_quick_test $dirname $sym
}
}
}
main $argv

View File

@ -1,92 +0,0 @@
/*
** 2013-10-01
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
******************************************************************************
**
** Compute hash signatures for every page of a database file. This utility
** program is useful for analyzing the output logs generated by the
** ext/misc/vfslog.c extension.
*/
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include <stdlib.h>
/*
** Compute signature for a block of content.
**
** For blocks of 16 or fewer bytes, the signature is just a hex dump of
** the entire block.
**
** For blocks of more than 16 bytes, the signature is a hex dump of the
** first 8 bytes followed by a 64-bit has of the entire block.
*/
static void vlogSignature(unsigned char *p, int n, char *zCksum){
unsigned int s0 = 0, s1 = 0;
unsigned int *pI;
int i;
if( n<=16 ){
for(i=0; i<n; i++) sprintf(zCksum+i*2, "%02x", p[i]);
}else{
pI = (unsigned int*)p;
for(i=0; i<n-7; i+=8){
s0 += pI[0] + s1;
s1 += pI[1] + s0;
pI += 2;
}
for(i=0; i<8; i++) sprintf(zCksum+i*2, "%02x", p[i]);
sprintf(zCksum+i*2, "-%08x%08x", s0, s1);
}
}
/*
** Open a file. Find its page size. Read each page, and compute and
** display the page signature.
*/
static void computeSigs(const char *zFilename){
FILE *in = fopen(zFilename, "rb");
unsigned pgsz;
size_t got;
unsigned n;
unsigned char aBuf[50];
unsigned char aPage[65536];
if( in==0 ){
fprintf(stderr, "cannot open \"%s\"\n", zFilename);
return;
}
got = fread(aBuf, 1, sizeof(aBuf), in);
if( got!=sizeof(aBuf) ){
goto endComputeSigs;
}
pgsz = aBuf[16]*256 + aBuf[17];
if( pgsz==1 ) pgsz = 65536;
if( (pgsz & (pgsz-1))!=0 ){
fprintf(stderr, "invalid page size: %02x%02x\n", aBuf[16], aBuf[17]);
goto endComputeSigs;
}
rewind(in);
for(n=1; (got=fread(aPage, 1, pgsz, in))==pgsz; n++){
vlogSignature(aPage, pgsz, aBuf);
printf("%4d: %s\n", n, aBuf);
}
endComputeSigs:
fclose(in);
}
/*
** Find page signatures for all named files.
*/
int main(int argc, char **argv){
int i;
for(i=1; i<argc; i++) computeSigs(argv[i]);
return 0;
}

View File

@ -1,20 +0,0 @@
#!/usr/bin/tcl
#
# Replace string with another string -OR- include
# only lines successfully modified with a regular
# expression.
#
set mode [string tolower [lindex $argv 0]]
set from [lindex $argv 1]
set to [lindex $argv 2]
if {$mode ni [list exact include]} {exit 1}
if {[string length $from]==0} {exit 2}
while {![eof stdin]} {
set line [gets stdin]
if {[eof stdin]} break
switch -exact $mode {
exact {set line [string map [list $from $to] $line]}
include {if {[regsub -all -- $from $line $to line]==0} continue}
}
puts stdout $line
}

View File

@ -1,233 +0,0 @@
# 2010 January 7
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements utility functions for SQLite library.
#
# This file attempts to restore the header of a journal.
# This may be useful for rolling-back the last committed
# transaction from a recovered journal.
#
package require sqlite3
set parm_error 0
set fix_chksums 0
set dump_pages 0
set db_name ""
for {set i 0} {$i<$argc} {incr i} {
if {[lindex $argv $i] == "-fix_chksums"} {
set fix_chksums -1
} elseif {[lindex $argv $i] == "-dump_pages"} {
set dump_pages -1
} elseif {$db_name == ""} {
set db_name [lindex $argv $i]
set jrnl_name $db_name-journal
} else {
set parm_error -1
}
}
if {$parm_error || $db_name == ""} {
puts "USAGE: restore_jrnl.tcl \[-fix_chksums\] \[-dump_pages\] db_name"
puts "Example: restore_jrnl.tcl foo.sqlite"
return
}
# is there a way to determine this?
set sectsz 512
# Copy file $from into $to
#
proc copy_file {from to} {
file copy -force $from $to
}
# Execute some SQL
#
proc catchsql {sql} {
set rc [catch {uplevel [list db eval $sql]} msg]
list $rc $msg
}
# Perform a test
#
proc do_test {name cmd expected} {
puts -nonewline "$name ..."
set res [uplevel $cmd]
if {$res eq $expected} {
puts Ok
} else {
puts Error
puts " Got: $res"
puts " Expected: $expected"
}
}
# Calc checksum nonce from journal page data.
#
proc calc_nonce {jrnl_pgno} {
global sectsz
global db_pgsz
global jrnl_name
set jrnl_pg_offset [expr $sectsz+((4+$db_pgsz+4)*$jrnl_pgno)]
set nonce [hexio_get_int [hexio_read $jrnl_name [expr $jrnl_pg_offset+4+$db_pgsz] 4]]
for {set i [expr $db_pgsz-200]} {$i>0} {set i [expr $i-200]} {
set byte [hexio_get_int [hexio_read $jrnl_name [expr $jrnl_pg_offset+4+$i] 1]]
set nonce [expr $nonce-$byte]
}
return $nonce
}
# Calc checksum from journal page data.
#
proc calc_chksum {jrnl_pgno} {
global sectsz
global db_pgsz
global jrnl_name
global nonce
set jrnl_pg_offset [expr $sectsz+((4+$db_pgsz+4)*$jrnl_pgno)]
set chksum $nonce
for {set i [expr $db_pgsz-200]} {$i>0} {set i [expr $i-200]} {
set byte [hexio_get_int [hexio_read $jrnl_name [expr $jrnl_pg_offset+4+$i] 1]]
set chksum [expr $chksum+$byte]
}
return $chksum
}
# Print journal page data in hex dump form
#
proc dump_jrnl_page {jrnl_pgno} {
global sectsz
global db_pgsz
global jrnl_name
# print a header block for the page
puts [string repeat "-" 79]
set jrnl_pg_offset [expr $sectsz+((4+$db_pgsz+4)*$jrnl_pgno)]
set db_pgno [hexio_get_int [hexio_read $jrnl_name [expr $jrnl_pg_offset] 4]]
set chksum [hexio_get_int [hexio_read $jrnl_name [expr $jrnl_pg_offset+4+$db_pgsz] 4]]
set nonce [calc_nonce $jrnl_pgno]
puts [ format {jrnl_pg_offset: %08x (%d) jrnl_pgno: %d db_pgno: %d} \
$jrnl_pg_offset $jrnl_pg_offset \
$jrnl_pgno $db_pgno]
puts [ format {nonce: %08x chksum: %08x} \
$nonce $chksum]
# now hex dump the data
# This is derived from the Tcler's WIKI
set fid [open $jrnl_name r]
fconfigure $fid -translation binary -encoding binary
seek $fid [expr $jrnl_pg_offset+4]
set data [read $fid $db_pgsz]
close $fid
for {set addr 0} {$addr<$db_pgsz} {set addr [expr $addr+16]} {
# get 16 bytes of data
set s [string range $data $addr [expr $addr+16]]
# Convert the data to hex and to characters.
binary scan $s H*@0a* hex ascii
# Replace non-printing characters in the data.
regsub -all -- {[^[:graph:] ]} $ascii {.} ascii
# Split the 16 bytes into two 8-byte chunks
regexp -- {(.{16})(.{0,16})} $hex -> hex1 hex2
# Convert the hex to pairs of hex digits
regsub -all -- {..} $hex1 {& } hex1
regsub -all -- {..} $hex2 {& } hex2
# Print the hex and ascii data
puts [ format {%08x %-24s %-24s %-16s} \
$addr $hex1 $hex2 $ascii ]
}
}
# Setup for the tests. Make a backup copy of the files.
#
if [file exist $db_name.org] {
puts "ERROR: during back-up: $db_name.org exists already."
return;
}
if [file exist $jrnl_name.org] {
puts "ERROR: during back-up: $jrnl_name.org exists already."
return
}
copy_file $db_name $db_name.org
copy_file $jrnl_name $jrnl_name.org
set db_fsize [file size $db_name]
set db_pgsz [hexio_get_int [hexio_read $db_name 16 2]]
set db_npage [expr {$db_fsize / $db_pgsz}]
set jrnl_fsize [file size $jrnl_name]
set jrnl_npage [expr {($jrnl_fsize - $sectsz) / (4 + $db_pgsz + 4)}]
# calculate checksum nonce for first page
set nonce [calc_nonce 0]
# verify all the pages in the journal use the same nonce
for {set i 1} {$i<$jrnl_npage} {incr i} {
set tnonce [calc_nonce $i]
if {$tnonce != $nonce} {
puts "WARNING: different nonces: 0=$nonce $i=$tnonce"
if {$fix_chksums } {
set jrnl_pg_offset [expr $sectsz+((4+$db_pgsz+4)*$i)]
set tchksum [calc_chksum $i]
hexio_write $jrnl_name [expr $jrnl_pg_offset+4+$db_pgsz] [format %08x $tchksum]
puts "INFO: fixing chksum: $i=$tchksum"
}
}
}
# verify all the page numbers in the journal
for {set i 0} {$i<$jrnl_npage} {incr i} {
set jrnl_pg_offset [expr $sectsz+((4+$db_pgsz+4)*$i)]
set db_pgno [hexio_get_int [hexio_read $jrnl_name $jrnl_pg_offset 4]]
if {$db_pgno < 1} {
puts "WARNING: page number < 1: $i=$db_pgno"
}
if {$db_pgno >= $db_npage} {
puts "WARNING: page number >= $db_npage: $i=$db_pgno"
}
}
# dump page data
if {$dump_pages} {
for {set i 0} {$i<$jrnl_npage} {incr i} {
dump_jrnl_page $i
}
}
# write the 8 byte magic string
hexio_write $jrnl_name 0 d9d505f920a163d7
# write -1 for number of records
hexio_write $jrnl_name 8 ffffffff
# write 00 for checksum nonce
hexio_write $jrnl_name 12 [format %08x $nonce]
# write page count
hexio_write $jrnl_name 16 [format %08x $db_npage]
# write sector size
hexio_write $jrnl_name 20 [format %08x $sectsz]
# write page size
hexio_write $jrnl_name 24 [format %08x $db_pgsz]
# check the integrity of the database with the patched journal
sqlite3 db $db_name
do_test restore_jrnl-1.0 {
catchsql {PRAGMA integrity_check}
} {0 ok}
db close

View File

@ -1,155 +0,0 @@
/*
** This program is used to generate and verify databases with hot journals.
** Use this program to generate a hot journal on one machine and verify
** that it rolls back correctly on another machine with a different
** architecture.
**
** Usage:
**
** rollback-test new [-utf8] [-utf16le] [-utf16be] [-pagesize=N] DATABASE
** rollback-test check DATABASE
** rollback-test crash [-wal] [-rollback] DATABASE
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "sqlite3.h"
static void usage(char *argv0){
fprintf(stderr,
"Usage: %s new [-utf8] [-utf16le] [-utf16be] [-pagesize=N] DATABASE\n"
" %s check DATABASE\n"
" %s crash [-wal] DATABASE\n",
argv0, argv0, argv0
);
exit(1);
}
static sqlite3 *openDb(const char *zFilename){
int rc;
sqlite3 *db;
rc = sqlite3_open(zFilename, &db);
if( rc ){
fprintf(stderr, "Cannot open \"%s\": %s\n",
zFilename, sqlite3_errmsg(db));
sqlite3_close(db);
exit(1);
}
return db;
}
static int nReply = 0;
static char zReply[1000];
static int execCallback(void *NotUsed, int nArg, char **azArg, char **azCol){
int i, n;
char *z;
for(i=0; i<nArg; i++){
z = azArg[i];
if( z==0 ) z = "NULL";
if( nReply>0 && nReply<sizeof(zReply)-1 ) zReply[nReply++] = ' ';
n = strlen(z);
if( nReply+n>=sizeof(zReply)-1 ) n = sizeof(zReply) - nReply - 1;
memcpy(&zReply[nReply], z, n);
nReply += n;
zReply[nReply] = 0;
}
return 0;
}
static void runSql(sqlite3 *db, const char *zSql){
char *zErr = 0;
int rc;
nReply = 0;
rc = sqlite3_exec(db, zSql, execCallback, 0, &zErr);
if( zErr ){
fprintf(stderr, "SQL error: %s\n", zErr);
exit(1);
}
if( rc ){
fprintf(stderr, "SQL error: %s\n", sqlite3_errmsg(db));
exit(1);
}
}
int main(int argc, char **argv){
sqlite3 *db;
int i;
if( argc<3 ) usage(argv[0]);
if( strcmp(argv[1], "new")==0 ){
db = openDb(argv[argc-1]);
for(i=2; i<argc-1; i++){
if( strcmp(argv[i],"-utf8")==0 ){
runSql(db, "PRAGMA encoding=UTF8");
}else if( strcmp(argv[i], "-utf16le")==0 ){
runSql(db, "PRAGMA encoding=UTF16LE");
}else if( strcmp(argv[i], "-utf16be")==0 ){
runSql(db, "PRAGMA encoding=UTF16BE");
}else if( strncmp(argv[i], "-pagesize=", 10)==0 ){
int szPg = atoi(&argv[i][10]);
char zBuf[100];
sprintf(zBuf, "PRAGMA pagesize=%d", szPg);
runSql(db, zBuf);
}else{
fprintf(stderr, "unknown option %s\n", argv[i]);
usage(argv[0]);
}
}
runSql(db,
"BEGIN;"
"CREATE TABLE t1(x INTEGER PRIMARY KEY, y);"
"INSERT INTO t1(y) VALUES('abcdefghijklmnopqrstuvwxyz');"
"INSERT INTO t1(y) VALUES('abcdefghijklmnopqrstuvwxyz');"
"INSERT INTO t1(y) SELECT y FROM t1;" /* 4 */
"INSERT INTO t1(y) SELECT y FROM t1;" /* 8 */
"INSERT INTO t1(y) SELECT y FROM t1;" /* 16 */
"INSERT INTO t1(y) SELECT y FROM t1;" /* 32 */
"INSERT INTO t1(y) SELECT y FROM t1;" /* 64 */
"INSERT INTO t1(y) SELECT y FROM t1;" /* 128 */
"INSERT INTO t1(y) SELECT y FROM t1;" /* 256 */
"INSERT INTO t1(y) SELECT y FROM t1;" /* 512 */
"INSERT INTO t1(y) SELECT y FROM t1;" /* 1024 */
"UPDATE t1 SET y=(y || x);"
"CREATE INDEX t1y ON t1(y);"
"COMMIT;"
);
sqlite3_close(db);
}else if( strcmp(argv[1], "check")==0 ){
db = openDb(argv[argc-1]);
runSql(db, "PRAGMA integrity_check");
if( strcmp(zReply, "ok")!=0 ){
fprintf(stderr, "Integrity check: %s\n", zReply);
exit(1);
}
runSql(db,
"SELECT count(*) FROM t1 WHERE y<>('abcdefghijklmnopqrstuvwxyz' || x)"
);
if( strcmp(zReply, "0")!=0 ){
fprintf(stderr, "Wrong content\n");
exit(1);
}
printf("Ok\n");
}else if( strcmp(argv[1], "crash")==0 ){
db = openDb(argv[argc-1]);
for(i=2; i<argc-1; i++){
if( strcmp(argv[i],"-wal")==0 ){
runSql(db, "PRAGMA journal_mode=WAL");
}else if( strcmp(argv[i], "-rollback")==0 ){
runSql(db, "PRAGMA journal_mode=DELETE");
}else{
fprintf(stderr, "unknown option %s\n", argv[i]);
usage(argv[0]);
}
}
runSql(db,
"PRAGMA cache_size=10;"
"BEGIN;"
"UPDATE t1 SET y=(y || -x)"
);
exit(0);
}else{
usage(argv[0]);
}
return 0;
}

View File

@ -1,68 +0,0 @@
#!/bin/bash
#
# This is a template for a script used for day-to-day size and
# performance monitoring of SQLite. Typical usage:
#
# sh run-speed-test.sh trunk # Baseline measurement of trunk
# sh run-speed-test.sh x1 # Measure some experimental change
# fossil test-diff --tk cout-trunk.txt cout-x1.txt # View chanages
#
# There are multiple output files, all with a base name given by
# the first argument:
#
# summary-$BASE.txt # Copy of standard output
# cout-$BASE.txt # cachegrind output
# explain-$BASE.txt # EXPLAIN listings
#
if test "$1" = ""
then
echo "Usage: $0 OUTPUTFILE [OPTIONS]"
exit
fi
NAME=$1
shift
CC_OPTS="-DSQLITE_ENABLE_RTREE"
SPEEDTEST_OPTS="--shrink-memory --reprepare"
SIZE=5
while test "$1" != ""; do
case $1 in
--reprepare)
SPEEDTEST_OPTS="$SPEEDTEST_OPTS $1"
;;
--autovacuum)
SPEEDTEST_OPTS="$SPEEDTEST_OPTS $1"
;;
--utf16be)
SPEEDTEST_OPTS="$SPEEDTEST_OPTS $1"
;;
--without-rowid)
SPEEDTEST_OPTS="$SPEEDTEST_OPTS $1"
;;
--size)
shift; SIZE=$1
;;
*)
CC_OPTS="$CC_OPTS $1"
;;
esac
shift
done
SPEEDTEST_OPTS="$SPEEDTEST_OPTS --size $SIZE"
echo "NAME = $NAME" | tee summary-$NAME.txt
echo "SPEEDTEST_OPTS = $SPEEDTEST_OPTS" | tee -a summary-$NAME.txt
echo "CC_OPTS = $CC_OPTS" | tee -a summary-$NAME.txt
rm -f cachegrind.out.* speedtest1 speedtest1.db sqlite3.o
gcc -g -Os -Wall -I. $CC_OPTS -c sqlite3.c
size sqlite3.o | tee -a summary-$NAME.txt
gcc -g -Os -Wall -I. $CC_OPTS \
-DSQLITE_ENABLE_EXPLAIN_COMMENTS \
./shell.c ./sqlite3.c -o sqlite3 -ldl -lpthread
SRC=./speedtest1.c
gcc -g -Os -Wall -I. $CC_OPTS $SRC ./sqlite3.o -o speedtest1 -ldl -lpthread
ls -l speedtest1 | tee -a summary-$NAME.txt
valgrind --tool=cachegrind ./speedtest1 speedtest1.db \
$SPEEDTEST_OPTS 2>&1 | tee -a summary-$NAME.txt
size sqlite3.o | tee -a summary-$NAME.txt
wc sqlite3.c
cg_anno.tcl cachegrind.out.* >cout-$NAME.txt
./speedtest1 --explain $SPEEDTEST_OPTS | ./sqlite3 >explain-$NAME.txt

File diff suppressed because it is too large Load Diff

View File

@ -1,138 +0,0 @@
/*
** A utility for printing an SQLite database journal.
*/
#include <stdio.h>
#include <ctype.h>
#include <stdlib.h>
#include <string.h>
/*
** state information
*/
static int pageSize = 1024;
static int sectorSize = 512;
static FILE *db = 0;
static int fileSize = 0;
static unsigned cksumNonce = 0;
/* Report a memory allocation error */
static void out_of_memory(void){
fprintf(stderr,"Out of memory...\n");
exit(1);
}
/*
** Read N bytes of memory starting at iOfst into space obtained
** from malloc().
*/
static unsigned char *read_content(int N, int iOfst){
int got;
unsigned char *pBuf = malloc(N);
if( pBuf==0 ) out_of_memory();
fseek(db, iOfst, SEEK_SET);
got = (int)fread(pBuf, 1, N, db);
if( got<0 ){
fprintf(stderr, "I/O error reading %d bytes from %d\n", N, iOfst);
memset(pBuf, 0, N);
}else if( got<N ){
fprintf(stderr, "Short read: got only %d of %d bytes from %d\n",
got, N, iOfst);
memset(&pBuf[got], 0, N-got);
}
return pBuf;
}
/* Print a line of decode output showing a 4-byte integer.
*/
static unsigned print_decode_line(
const unsigned char *aData, /* Content being decoded */
int ofst, int nByte, /* Start and size of decode */
const char *zMsg /* Message to append */
){
int i, j;
unsigned val = aData[ofst];
char zBuf[100];
sprintf(zBuf, " %05x: %02x", ofst, aData[ofst]);
i = (int)strlen(zBuf);
for(j=1; j<4; j++){
if( j>=nByte ){
sprintf(&zBuf[i], " ");
}else{
sprintf(&zBuf[i], " %02x", aData[ofst+j]);
val = val*256 + aData[ofst+j];
}
i += (int)strlen(&zBuf[i]);
}
sprintf(&zBuf[i], " %10u", val);
printf("%s %s\n", zBuf, zMsg);
return val;
}
/*
** Read and print a journal header. Store key information (page size, etc)
** in global variables.
*/
static unsigned decode_journal_header(int iOfst){
unsigned char *pHdr = read_content(64, iOfst);
unsigned nPage;
printf("Header at offset %d:\n", iOfst);
print_decode_line(pHdr, 0, 4, "Header part 1 (3654616569)");
print_decode_line(pHdr, 4, 4, "Header part 2 (547447767)");
nPage =
print_decode_line(pHdr, 8, 4, "page count");
cksumNonce =
print_decode_line(pHdr, 12, 4, "chksum nonce");
print_decode_line(pHdr, 16, 4, "initial database size in pages");
sectorSize =
print_decode_line(pHdr, 20, 4, "sector size");
pageSize =
print_decode_line(pHdr, 24, 4, "page size");
print_decode_line(pHdr, 28, 4, "zero");
print_decode_line(pHdr, 32, 4, "zero");
print_decode_line(pHdr, 36, 4, "zero");
print_decode_line(pHdr, 40, 4, "zero");
free(pHdr);
return nPage;
}
static void print_page(int iOfst){
unsigned char *aData;
char zTitle[50];
aData = read_content(pageSize+8, iOfst);
sprintf(zTitle, "page number for page at offset %d", iOfst);
print_decode_line(aData-iOfst, iOfst, 4, zTitle);
free(aData);
}
int main(int argc, char **argv){
int nPage, cnt;
int iOfst;
if( argc!=2 ){
fprintf(stderr,"Usage: %s FILENAME\n", argv[0]);
exit(1);
}
db = fopen(argv[1], "rb");
if( db==0 ){
fprintf(stderr,"%s: can't open %s\n", argv[0], argv[1]);
exit(1);
}
fseek(db, 0, SEEK_END);
fileSize = ftell(db);
printf("journal file size: %d bytes\n", fileSize);
fseek(db, 0, SEEK_SET);
iOfst = 0;
while( iOfst<fileSize ){
cnt = nPage = (int)decode_journal_header(iOfst);
if( cnt==0 ){
cnt = (fileSize - sectorSize)/(pageSize+8);
}
iOfst += sectorSize;
while( cnt && iOfst<fileSize ){
print_page(iOfst);
iOfst += pageSize+8;
}
iOfst = (iOfst/sectorSize + 1)*sectorSize;
}
fclose(db);
return 0;
}

View File

@ -1,64 +0,0 @@
/*
** This file implements a simple command-line utility that shows all of the
** Posix Advisory Locks on a file.
**
** Usage:
**
** showlocks FILENAME
**
** To compile: gcc -o showlocks showlocks.c
*/
#include <stdio.h>
#include <unistd.h>
#include <fcntl.h>
#include <stdlib.h>
#include <string.h>
/* This utility only looks for locks in the first 2 billion bytes */
#define MX_LCK 2147483647
/*
** Print all locks on the inode of "fd" that occur in between
** lwr and upr, inclusive.
*/
static int showLocksInRange(int fd, off_t lwr, off_t upr){
int cnt = 0;
struct flock x;
x.l_type = F_WRLCK;
x.l_whence = SEEK_SET;
x.l_start = lwr;
x.l_len = upr-lwr;
fcntl(fd, F_GETLK, &x);
if( x.l_type==F_UNLCK ) return 0;
printf("start: %-12d len: %-5d pid: %-5d type: %s\n",
(int)x.l_start, (int)x.l_len,
x.l_pid, x.l_type==F_WRLCK ? "WRLCK" : "RDLCK");
cnt++;
if( x.l_start>lwr ){
cnt += showLocksInRange(fd, lwr, x.l_start-1);
}
if( x.l_start+x.l_len<upr ){
cnt += showLocksInRange(fd, x.l_start+x.l_len+1, upr);
}
return cnt;
}
int main(int argc, char **argv){
int fd;
int cnt;
if( argc!=2 ){
fprintf(stderr, "Usage: %s FILENAME\n", argv[0]);
return 1;
}
fd = open(argv[1], O_RDWR, 0);
if( fd<0 ){
fprintf(stderr, "%s: cannot open %s\n", argv[0], argv[1]);
return 1;
}
cnt = showLocksInRange(fd, 0, MX_LCK);
if( cnt==0 ) printf("no locks\n");
close(fd);
return 0;
}

View File

@ -1,160 +0,0 @@
/*
** This utility program decodes and displays the content of the
** sqlite_stat4 table in the database file named on the command
** line.
*/
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <ctype.h>
#include "sqlite3.h"
#define ISPRINT(X) isprint((unsigned char)(X))
typedef sqlite3_int64 i64; /* 64-bit signed integer type */
/*
** Convert the var-int format into i64. Return the number of bytes
** in the var-int. Write the var-int value into *pVal.
*/
static int decodeVarint(const unsigned char *z, i64 *pVal){
i64 v = 0;
int i;
for(i=0; i<8; i++){
v = (v<<7) + (z[i]&0x7f);
if( (z[i]&0x80)==0 ){ *pVal = v; return i+1; }
}
v = (v<<8) + (z[i]&0xff);
*pVal = v;
return 9;
}
int main(int argc, char **argv){
sqlite3 *db;
sqlite3_stmt *pStmt;
char *zIdx = 0;
int rc, j, x, y, mxHdr;
const unsigned char *aSample;
int nSample;
i64 iVal;
const char *zSep;
int iRow = 0;
if( argc!=2 ){
fprintf(stderr, "Usage: %s DATABASE-FILE\n", argv[0]);
exit(1);
}
rc = sqlite3_open(argv[1], &db);
if( rc!=SQLITE_OK || db==0 ){
fprintf(stderr, "Cannot open database file [%s]\n", argv[1]);
exit(1);
}
rc = sqlite3_prepare_v2(db,
"SELECT tbl||'.'||idx, nEq, nLT, nDLt, sample "
"FROM sqlite_stat4 ORDER BY 1", -1,
&pStmt, 0);
if( rc!=SQLITE_OK || pStmt==0 ){
fprintf(stderr, "%s\n", sqlite3_errmsg(db));
sqlite3_close(db);
exit(1);
}
while( SQLITE_ROW==sqlite3_step(pStmt) ){
if( zIdx==0 || strcmp(zIdx, (const char*)sqlite3_column_text(pStmt,0))!=0 ){
if( zIdx ) printf("\n**************************************"
"**************\n\n");
sqlite3_free(zIdx);
zIdx = sqlite3_mprintf("%s", sqlite3_column_text(pStmt,0));
iRow = 0;
}
printf("%s sample %d ------------------------------------\n", zIdx, ++iRow);
printf(" nEq = %s\n", sqlite3_column_text(pStmt,1));
printf(" nLt = %s\n", sqlite3_column_text(pStmt,2));
printf(" nDLt = %s\n", sqlite3_column_text(pStmt,3));
printf(" sample = x'");
aSample = sqlite3_column_blob(pStmt,4);
nSample = sqlite3_column_bytes(pStmt,4);
for(j=0; j<nSample; j++) printf("%02x", aSample[j]);
printf("'\n ");
zSep = " ";
x = decodeVarint(aSample, &iVal);
if( iVal<x || iVal>nSample ){
printf(" <error>\n");
continue;
}
y = mxHdr = (int)iVal;
while( x<mxHdr ){
int sz;
i64 v;
x += decodeVarint(aSample+x, &iVal);
if( x>mxHdr ) break;
if( iVal<0 ) break;
switch( iVal ){
case 0: sz = 0; break;
case 1: sz = 1; break;
case 2: sz = 2; break;
case 3: sz = 3; break;
case 4: sz = 4; break;
case 5: sz = 6; break;
case 6: sz = 8; break;
case 7: sz = 8; break;
case 8: sz = 0; break;
case 9: sz = 0; break;
case 10:
case 11: sz = 0; break;
default: sz = (int)(iVal-12)/2; break;
}
if( y+sz>nSample ) break;
if( iVal==0 ){
printf("%sNULL", zSep);
}else if( iVal==8 || iVal==9 ){
printf("%s%d", zSep, ((int)iVal)-8);
}else if( iVal<=7 ){
v = (signed char)aSample[y];
for(j=1; j<sz; j++){
v = (v<<8) + aSample[y+j];
}
if( iVal==7 ){
double r;
memcpy(&r, &v, sizeof(r));
printf("%s%#g", zSep, r);
}else{
printf("%s%lld", zSep, v);
}
}else if( (iVal&1)==0 ){
printf("%sx'", zSep);
for(j=0; j<sz; j++){
printf("%02x", aSample[y+j]);
}
printf("'");
}else{
printf("%s\"", zSep);
for(j=0; j<sz; j++){
char c = (char)aSample[y+j];
if( ISPRINT(c) ){
if( c=='"' || c=='\\' ) putchar('\\');
putchar(c);
}else if( c=='\n' ){
printf("\\n");
}else if( c=='\t' ){
printf("\\t");
}else if( c=='\r' ){
printf("\\r");
}else{
printf("\\%03o", c);
}
}
printf("\"");
}
zSep = ",";
y += sz;
}
printf("\n");
}
sqlite3_free(zIdx);
sqlite3_finalize(pStmt);
sqlite3_close(db);
return 0;
}

View File

@ -1,599 +0,0 @@
/*
** A utility for printing content from a write-ahead log file.
*/
#include <stdio.h>
#include <ctype.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#define ISDIGIT(X) isdigit((unsigned char)(X))
#define ISPRINT(X) isprint((unsigned char)(X))
#if !defined(_MSC_VER)
#include <unistd.h>
#else
#include <io.h>
#endif
#include <stdlib.h>
#include <string.h>
static int pagesize = 1024; /* Size of a database page */
static int fd = -1; /* File descriptor for reading the WAL file */
static int mxFrame = 0; /* Last frame */
static int perLine = 16; /* HEX elements to print per line */
typedef long long int i64; /* Datatype for 64-bit integers */
/* Information for computing the checksum */
typedef struct Cksum Cksum;
struct Cksum {
int bSwap; /* True to do byte swapping on 32-bit words */
unsigned s0, s1; /* Current checksum value */
};
/*
** extract a 32-bit big-endian integer
*/
static unsigned int getInt32(const unsigned char *a){
unsigned int x = (a[0]<<24) + (a[1]<<16) + (a[2]<<8) + a[3];
return x;
}
/*
** Swap bytes on a 32-bit unsigned integer
*/
static unsigned int swab32(unsigned int x){
return (((x)&0x000000FF)<<24) + (((x)&0x0000FF00)<<8)
+ (((x)&0x00FF0000)>>8) + (((x)&0xFF000000)>>24);
}
/* Extend the checksum. Reinitialize the checksum if bInit is true.
*/
static void extendCksum(
Cksum *pCksum,
unsigned char *aData,
unsigned int nByte,
int bInit
){
unsigned int *a32;
if( bInit ){
int a = 0;
*((char*)&a) = 1;
if( a==1 ){
/* Host is little-endian */
pCksum->bSwap = getInt32(aData)!=0x377f0682;
}else{
/* Host is big-endian */
pCksum->bSwap = getInt32(aData)!=0x377f0683;
}
pCksum->s0 = 0;
pCksum->s1 = 0;
}
a32 = (unsigned int*)aData;
while( nByte>0 ){
unsigned int x0 = a32[0];
unsigned int x1 = a32[1];
if( pCksum->bSwap ){
x0 = swab32(x0);
x1 = swab32(x1);
}
pCksum->s0 += x0 + pCksum->s1;
pCksum->s1 += x1 + pCksum->s0;
nByte -= 8;
a32 += 2;
}
}
/*
** Convert the var-int format into i64. Return the number of bytes
** in the var-int. Write the var-int value into *pVal.
*/
static int decodeVarint(const unsigned char *z, i64 *pVal){
i64 v = 0;
int i;
for(i=0; i<8; i++){
v = (v<<7) + (z[i]&0x7f);
if( (z[i]&0x80)==0 ){ *pVal = v; return i+1; }
}
v = (v<<8) + (z[i]&0xff);
*pVal = v;
return 9;
}
/* Report an out-of-memory error and die.
*/
static void out_of_memory(void){
fprintf(stderr,"Out of memory...\n");
exit(1);
}
/*
** Read content from the file.
**
** Space to hold the content is obtained from malloc() and needs to be
** freed by the caller.
*/
static unsigned char *getContent(int ofst, int nByte){
unsigned char *aData;
aData = malloc(nByte);
if( aData==0 ) out_of_memory();
lseek(fd, ofst, SEEK_SET);
read(fd, aData, nByte);
return aData;
}
/*
** Print a range of bytes as hex and as ascii.
*/
static void print_byte_range(
int ofst, /* First byte in the range of bytes to print */
int nByte, /* Number of bytes to print */
unsigned char *aData, /* Content to print */
int printOfst /* Add this amount to the index on the left column */
){
int i, j;
const char *zOfstFmt;
if( ((printOfst+nByte)&~0xfff)==0 ){
zOfstFmt = " %03x: ";
}else if( ((printOfst+nByte)&~0xffff)==0 ){
zOfstFmt = " %04x: ";
}else if( ((printOfst+nByte)&~0xfffff)==0 ){
zOfstFmt = " %05x: ";
}else if( ((printOfst+nByte)&~0xffffff)==0 ){
zOfstFmt = " %06x: ";
}else{
zOfstFmt = " %08x: ";
}
for(i=0; i<nByte; i += perLine){
fprintf(stdout, zOfstFmt, i+printOfst);
for(j=0; j<perLine; j++){
if( i+j>nByte ){
fprintf(stdout, " ");
}else{
fprintf(stdout,"%02x ", aData[i+j]);
}
}
for(j=0; j<perLine; j++){
if( i+j>nByte ){
fprintf(stdout, " ");
}else{
fprintf(stdout,"%c", ISPRINT(aData[i+j]) ? aData[i+j] : '.');
}
}
fprintf(stdout,"\n");
}
}
/* Print a line of decode output showing a 4-byte integer.
*/
static void print_decode_line(
unsigned char *aData, /* Content being decoded */
int ofst, int nByte, /* Start and size of decode */
int asHex, /* If true, output value as hex */
const char *zMsg /* Message to append */
){
int i, j;
int val = aData[ofst];
char zBuf[100];
sprintf(zBuf, " %03x: %02x", ofst, aData[ofst]);
i = (int)strlen(zBuf);
for(j=1; j<4; j++){
if( j>=nByte ){
sprintf(&zBuf[i], " ");
}else{
sprintf(&zBuf[i], " %02x", aData[ofst+j]);
val = val*256 + aData[ofst+j];
}
i += (int)strlen(&zBuf[i]);
}
if( asHex ){
sprintf(&zBuf[i], " 0x%08x", val);
}else{
sprintf(&zBuf[i], " %9d", val);
}
printf("%s %s\n", zBuf, zMsg);
}
/*
** Print an entire page of content as hex
*/
static void print_frame(int iFrame){
int iStart;
unsigned char *aData;
iStart = 32 + (iFrame-1)*(pagesize+24);
fprintf(stdout, "Frame %d: (offsets 0x%x..0x%x)\n",
iFrame, iStart, iStart+pagesize+24);
aData = getContent(iStart, pagesize+24);
print_decode_line(aData, 0, 4, 0, "Page number");
print_decode_line(aData, 4, 4, 0, "DB size, or 0 for non-commit");
print_decode_line(aData, 8, 4, 1, "Salt-1");
print_decode_line(aData,12, 4, 1, "Salt-2");
print_decode_line(aData,16, 4, 1, "Checksum-1");
print_decode_line(aData,20, 4, 1, "Checksum-2");
print_byte_range(iStart+24, pagesize, aData+24, 0);
free(aData);
}
/*
** Summarize a single frame on a single line.
*/
static void print_oneline_frame(int iFrame, Cksum *pCksum){
int iStart;
unsigned char *aData;
unsigned int s0, s1;
iStart = 32 + (iFrame-1)*(pagesize+24);
aData = getContent(iStart, 24);
extendCksum(pCksum, aData, 8, 0);
extendCksum(pCksum, getContent(iStart+24, pagesize), pagesize, 0);
s0 = getInt32(aData+16);
s1 = getInt32(aData+20);
fprintf(stdout, "Frame %4d: %6d %6d 0x%08x,%08x 0x%08x,%08x %s\n",
iFrame,
getInt32(aData),
getInt32(aData+4),
getInt32(aData+8),
getInt32(aData+12),
s0,
s1,
(s0==pCksum->s0 && s1==pCksum->s1) ? "" : "cksum-fail"
);
/* Reset the checksum so that a single frame checksum failure will not
** cause all subsequent frames to also show a failure. */
pCksum->s0 = s0;
pCksum->s1 = s1;
free(aData);
}
/*
** Decode the WAL header.
*/
static void print_wal_header(Cksum *pCksum){
unsigned char *aData;
aData = getContent(0, 32);
if( pCksum ){
extendCksum(pCksum, aData, 24, 1);
printf("Checksum byte order: %s\n", pCksum->bSwap ? "swapped" : "native");
}
printf("WAL Header:\n");
print_decode_line(aData, 0, 4,1,"Magic. 0x377f0682 (le) or 0x377f0683 (be)");
print_decode_line(aData, 4, 4, 0, "File format");
print_decode_line(aData, 8, 4, 0, "Database page size");
print_decode_line(aData, 12,4, 0, "Checkpoint sequence number");
print_decode_line(aData, 16,4, 1, "Salt-1");
print_decode_line(aData, 20,4, 1, "Salt-2");
print_decode_line(aData, 24,4, 1, "Checksum-1");
print_decode_line(aData, 28,4, 1, "Checksum-2");
if( pCksum ){
if( pCksum->s0!=getInt32(aData+24) ){
printf("**** cksum-1 mismatch: 0x%08x\n", pCksum->s0);
}
if( pCksum->s1!=getInt32(aData+28) ){
printf("**** cksum-2 mismatch: 0x%08x\n", pCksum->s1);
}
}
free(aData);
}
/*
** Describe cell content.
*/
static i64 describeContent(
unsigned char *a, /* Cell content */
i64 nLocal, /* Bytes in a[] */
char *zDesc /* Write description here */
){
int nDesc = 0;
int n, j;
i64 i, x, v;
const unsigned char *pData;
const unsigned char *pLimit;
char sep = ' ';
pLimit = &a[nLocal];
n = decodeVarint(a, &x);
pData = &a[x];
a += n;
i = x - n;
while( i>0 && pData<=pLimit ){
n = decodeVarint(a, &x);
a += n;
i -= n;
nLocal -= n;
zDesc[0] = sep;
sep = ',';
nDesc++;
zDesc++;
if( x==0 ){
sprintf(zDesc, "*"); /* NULL is a "*" */
}else if( x>=1 && x<=6 ){
v = (signed char)pData[0];
pData++;
switch( x ){
case 6: v = (v<<16) + (pData[0]<<8) + pData[1]; pData += 2;
case 5: v = (v<<16) + (pData[0]<<8) + pData[1]; pData += 2;
case 4: v = (v<<8) + pData[0]; pData++;
case 3: v = (v<<8) + pData[0]; pData++;
case 2: v = (v<<8) + pData[0]; pData++;
}
sprintf(zDesc, "%lld", v);
}else if( x==7 ){
sprintf(zDesc, "real");
pData += 8;
}else if( x==8 ){
sprintf(zDesc, "0");
}else if( x==9 ){
sprintf(zDesc, "1");
}else if( x>=12 ){
i64 size = (x-12)/2;
if( (x&1)==0 ){
sprintf(zDesc, "blob(%lld)", size);
}else{
sprintf(zDesc, "txt(%lld)", size);
}
pData += size;
}
j = (int)strlen(zDesc);
zDesc += j;
nDesc += j;
}
return nDesc;
}
/*
** Compute the local payload size given the total payload size and
** the page size.
*/
static i64 localPayload(i64 nPayload, char cType){
i64 maxLocal;
i64 minLocal;
i64 surplus;
i64 nLocal;
if( cType==13 ){
/* Table leaf */
maxLocal = pagesize-35;
minLocal = (pagesize-12)*32/255-23;
}else{
maxLocal = (pagesize-12)*64/255-23;
minLocal = (pagesize-12)*32/255-23;
}
if( nPayload>maxLocal ){
surplus = minLocal + (nPayload-minLocal)%(pagesize-4);
if( surplus<=maxLocal ){
nLocal = surplus;
}else{
nLocal = minLocal;
}
}else{
nLocal = nPayload;
}
return nLocal;
}
/*
** Create a description for a single cell.
**
** The return value is the local cell size.
*/
static i64 describeCell(
unsigned char cType, /* Page type */
unsigned char *a, /* Cell content */
int showCellContent, /* Show cell content if true */
char **pzDesc /* Store description here */
){
int i;
i64 nDesc = 0;
int n = 0;
int leftChild;
i64 nPayload;
i64 rowid;
i64 nLocal;
static char zDesc[1000];
i = 0;
if( cType<=5 ){
leftChild = ((a[0]*256 + a[1])*256 + a[2])*256 + a[3];
a += 4;
n += 4;
sprintf(zDesc, "lx: %d ", leftChild);
nDesc = strlen(zDesc);
}
if( cType!=5 ){
i = decodeVarint(a, &nPayload);
a += i;
n += i;
sprintf(&zDesc[nDesc], "n: %lld ", nPayload);
nDesc += strlen(&zDesc[nDesc]);
nLocal = localPayload(nPayload, cType);
}else{
nPayload = nLocal = 0;
}
if( cType==5 || cType==13 ){
i = decodeVarint(a, &rowid);
a += i;
n += i;
sprintf(&zDesc[nDesc], "r: %lld ", rowid);
nDesc += strlen(&zDesc[nDesc]);
}
if( nLocal<nPayload ){
int ovfl;
unsigned char *b = &a[nLocal];
ovfl = ((b[0]*256 + b[1])*256 + b[2])*256 + b[3];
sprintf(&zDesc[nDesc], "ov: %d ", ovfl);
nDesc += strlen(&zDesc[nDesc]);
n += 4;
}
if( showCellContent && cType!=5 ){
nDesc += describeContent(a, nLocal, &zDesc[nDesc-1]);
}
*pzDesc = zDesc;
return nLocal+n;
}
/*
** Decode a btree page
*/
static void decode_btree_page(
unsigned char *a, /* Content of the btree page to be decoded */
int pgno, /* Page number */
int hdrSize, /* Size of the page1-header in bytes */
const char *zArgs /* Flags to control formatting */
){
const char *zType = "unknown";
int nCell;
int i, j;
int iCellPtr;
int showCellContent = 0;
int showMap = 0;
char *zMap = 0;
switch( a[0] ){
case 2: zType = "index interior node"; break;
case 5: zType = "table interior node"; break;
case 10: zType = "index leaf"; break;
case 13: zType = "table leaf"; break;
}
while( zArgs[0] ){
switch( zArgs[0] ){
case 'c': showCellContent = 1; break;
case 'm': showMap = 1; break;
}
zArgs++;
}
printf("Decode of btree page %d:\n", pgno);
print_decode_line(a, 0, 1, 0, zType);
print_decode_line(a, 1, 2, 0, "Offset to first freeblock");
print_decode_line(a, 3, 2, 0, "Number of cells on this page");
nCell = a[3]*256 + a[4];
print_decode_line(a, 5, 2, 0, "Offset to cell content area");
print_decode_line(a, 7, 1, 0, "Fragmented byte count");
if( a[0]==2 || a[0]==5 ){
print_decode_line(a, 8, 4, 0, "Right child");
iCellPtr = 12;
}else{
iCellPtr = 8;
}
if( nCell>0 ){
printf(" key: lx=left-child n=payload-size r=rowid\n");
}
if( showMap ){
zMap = malloc(pagesize);
memset(zMap, '.', pagesize);
memset(zMap, '1', hdrSize);
memset(&zMap[hdrSize], 'H', iCellPtr);
memset(&zMap[hdrSize+iCellPtr], 'P', 2*nCell);
}
for(i=0; i<nCell; i++){
int cofst = iCellPtr + i*2;
char *zDesc;
i64 n;
cofst = a[cofst]*256 + a[cofst+1];
n = describeCell(a[0], &a[cofst-hdrSize], showCellContent, &zDesc);
if( showMap ){
char zBuf[30];
memset(&zMap[cofst], '*', (size_t)n);
zMap[cofst] = '[';
zMap[cofst+n-1] = ']';
sprintf(zBuf, "%d", i);
j = (int)strlen(zBuf);
if( j<=n-2 ) memcpy(&zMap[cofst+1], zBuf, j);
}
printf(" %03x: cell[%d] %s\n", cofst, i, zDesc);
}
if( showMap ){
for(i=0; i<pagesize; i+=64){
printf(" %03x: %.64s\n", i, &zMap[i]);
}
free(zMap);
}
}
int main(int argc, char **argv){
struct stat sbuf;
unsigned char zPgSz[4];
if( argc<2 ){
fprintf(stderr,"Usage: %s FILENAME ?PAGE? ...\n", argv[0]);
exit(1);
}
fd = open(argv[1], O_RDONLY);
if( fd<0 ){
fprintf(stderr,"%s: can't open %s\n", argv[0], argv[1]);
exit(1);
}
zPgSz[0] = 0;
zPgSz[1] = 0;
lseek(fd, 8, SEEK_SET);
read(fd, zPgSz, 4);
pagesize = zPgSz[1]*65536 + zPgSz[2]*256 + zPgSz[3];
if( pagesize==0 ) pagesize = 1024;
printf("Pagesize: %d\n", pagesize);
fstat(fd, &sbuf);
if( sbuf.st_size<32 ){
printf("file too small to be a WAL\n");
return 0;
}
mxFrame = (sbuf.st_size - 32)/(pagesize + 24);
printf("Available pages: 1..%d\n", mxFrame);
if( argc==2 ){
int i;
Cksum x;
print_wal_header(&x);
for(i=1; i<=mxFrame; i++){
print_oneline_frame(i, &x);
}
}else{
int i;
for(i=2; i<argc; i++){
int iStart, iEnd;
char *zLeft;
if( strcmp(argv[i], "header")==0 ){
print_wal_header(0);
continue;
}
if( !ISDIGIT(argv[i][0]) ){
fprintf(stderr, "%s: unknown option: [%s]\n", argv[0], argv[i]);
continue;
}
iStart = strtol(argv[i], &zLeft, 0);
if( zLeft && strcmp(zLeft,"..end")==0 ){
iEnd = mxFrame;
}else if( zLeft && zLeft[0]=='.' && zLeft[1]=='.' ){
iEnd = strtol(&zLeft[2], 0, 0);
}else if( zLeft && zLeft[0]=='b' ){
int ofst, nByte, hdrSize;
unsigned char *a;
if( iStart==1 ){
hdrSize = 100;
ofst = hdrSize = 100;
nByte = pagesize-100;
}else{
hdrSize = 0;
ofst = (iStart-1)*pagesize;
nByte = pagesize;
}
ofst = 32 + hdrSize + (iStart-1)*(pagesize+24) + 24;
a = getContent(ofst, nByte);
decode_btree_page(a, iStart, hdrSize, zLeft+1);
free(a);
continue;
}else{
iEnd = iStart;
}
if( iStart<1 || iEnd<iStart || iEnd>mxFrame ){
fprintf(stderr,
"Page argument should be LOWER?..UPPER?. Range 1 to %d\n",
mxFrame);
exit(1);
}
while( iStart<=iEnd ){
print_frame(iStart);
iStart++;
}
}
}
close(fd);
return 0;
}

View File

@ -1,103 +0,0 @@
#!/usr/bin/tclsh
#
# Usage:
#
# tclsh soak1.tcl local-makefile.mk ?target? ?scenario?
#
# This generates many variations on local-makefile.mk (by modifing
# the OPT = lines) and runs them will fulltest, one by one. The
# constructed makefiles are named "soak1.mk".
#
# If ?target? is provided, that is the makefile target that is run.
# The default is "fulltest"
#
# If ?scenario? is provided, it is the name of a single scenario to
# be run. All other scenarios are skipped.
#
set localmake [lindex $argv 0]
set target [lindex $argv 1]
set scene [lindex $argv 2]
if {$target==""} {set target fulltest}
if {$scene==""} {set scene all}
set in [open $localmake]
set maketxt [read $in]
close $in
regsub -all {\\\n} $maketxt {} maketxt
#set makefilename "soak1-[expr {int(rand()*1000000000)}].mk"
set makefilename "soak1.mk"
# Generate a makefile
#
proc generate_makefile {pattern} {
global makefilename maketxt
set out [open $makefilename w]
set seen_opt 0
foreach line [split $maketxt \n] {
if {[regexp {^ *#? *OPTS[ =+]} $line]} {
if {!$seen_opt} {
puts $out "OPTS += -DSQLITE_NO_SYNC=1"
foreach x $pattern {
puts $out "OPTS += -D$x"
}
set seen_opt 1
}
} else {
puts $out $line
}
}
close $out
}
# Run a test
#
proc scenario {id title pattern} {
global makefilename target scene
if {$scene!="all" && $scene!=$id && $scene!=$title} return
puts "**************** $title ***************"
generate_makefile $pattern
exec make -f $makefilename clean >@stdout 2>@stdout
exec make -f $makefilename $target >@stdout 2>@stdout
}
###############################################################################
# Add new scenarios here
#
scenario 0 {Default} {}
scenario 1 {Debug} {
SQLITE_DEBUG=1
SQLITE_MEMDEBUG=1
}
scenario 2 {Everything} {
SQLITE_DEBUG=1
SQLITE_MEMDEBUG=1
SQLITE_ENABLE_MEMORY_MANAGEMENT=1
SQLITE_ENABLE_COLUMN_METADATA=1
SQLITE_ENABLE_LOAD_EXTENSION=1 HAVE_DLOPEN=1
SQLITE_ENABLE_MEMORY_MANAGEMENT=1
}
scenario 3 {Customer-1} {
SQLITE_DEBUG=1 SQLITE_MEMDEBUG=1
SQLITE_THREADSAFE=1 SQLITE_OS_UNIX=1
SQLITE_DISABLE_LFS=1
SQLITE_DEFAULT_AUTOVACUUM=1
SQLITE_DEFAULT_PAGE_SIZE=1024
SQLITE_MAX_PAGE_SIZE=4096
SQLITE_DEFAULT_CACHE_SIZE=64
SQLITE_DEFAULT_TEMP_CACHE_SIZE=32
SQLITE_TEMP_STORE=3
SQLITE_OMIT_PROGRESS_CALLBACK=1
SQLITE_OMIT_LOAD_EXTENSION=1
SQLITE_OMIT_VIRTUALTABLE=1
SQLITE_ENABLE_IOTRACE=1
}
scenario 4 {Small-Cache} {
SQLITE_DEBUG=1 SQLITE_MEMDEBUG=1
SQLITE_THREADSAFE=1 SQLITE_OS_UNIX=1
SQLITE_DEFAULT_AUTOVACUUM=1
SQLITE_DEFAULT_PAGE_SIZE=1024
SQLITE_MAX_PAGE_SIZE=2048
SQLITE_DEFAULT_CACHE_SIZE=13
SQLITE_DEFAULT_TEMP_CACHE_SIZE=11
SQLITE_TEMP_STORE=1
}

View File

@ -1,805 +0,0 @@
# Run this TCL script using "testfixture" in order get a report that shows
# how much disk space is used by a particular data to actually store data
# versus how much space is unused.
#
if {[catch {
# Argument $tname is the name of a table within the database opened by
# database handle [db]. Return true if it is a WITHOUT ROWID table, or
# false otherwise.
#
proc is_without_rowid {tname} {
set t [string map {' ''} $tname]
db eval "PRAGMA index_list = '$t'" o {
if {$o(origin) == "pk"} {
set n $o(name)
if {0==[db one { SELECT count(*) FROM sqlite_master WHERE name=$n }]} {
return 1
}
}
}
return 0
}
# Get the name of the database to analyze
#
proc usage {} {
set argv0 [file rootname [file tail [info nameofexecutable]]]
puts stderr "Usage: $argv0 ?--pageinfo? ?--stats? database-filename"
puts stderr {
Analyze the SQLite3 database file specified by the "database-filename"
argument and output a report detailing size and storage efficiency
information for the database and its constituent tables and indexes.
Options:
--stats Output SQL text that creates a new database containing
statistics about the database that was analyzed
--pageinfo Show how each page of the database-file is used
}
exit 1
}
set file_to_analyze {}
set flags(-pageinfo) 0
set flags(-stats) 0
append argv {}
foreach arg $argv {
if {[regexp {^-+pageinfo$} $arg]} {
set flags(-pageinfo) 1
} elseif {[regexp {^-+stats$} $arg]} {
set flags(-stats) 1
} elseif {[regexp {^-} $arg]} {
puts stderr "Unknown option: $arg"
usage
} elseif {$file_to_analyze!=""} {
usage
} else {
set file_to_analyze $arg
}
}
if {$file_to_analyze==""} usage
set root_filename $file_to_analyze
regexp {^file:(//)?([^?]*)} $file_to_analyze all x1 root_filename
if {![file exists $root_filename]} {
puts stderr "No such file: $root_filename"
exit 1
}
if {![file readable $root_filename]} {
puts stderr "File is not readable: $root_filename"
exit 1
}
set true_file_size [file size $root_filename]
if {$true_file_size<512} {
puts stderr "Empty or malformed database: $root_filename"
exit 1
}
# Compute the total file size assuming test_multiplexor is being used.
# Assume that SQLITE_ENABLE_8_3_NAMES might be enabled
#
set extension [file extension $root_filename]
set pattern $root_filename
append pattern {[0-3][0-9][0-9]}
foreach f [glob -nocomplain $pattern] {
incr true_file_size [file size $f]
set extension {}
}
if {[string length $extension]>=2 && [string length $extension]<=4} {
set pattern [file rootname $root_filename]
append pattern {.[0-3][0-9][0-9]}
foreach f [glob -nocomplain $pattern] {
incr true_file_size [file size $f]
}
}
# Open the database
#
if {[catch {sqlite3 db $file_to_analyze -uri 1} msg]} {
puts stderr "error trying to open $file_to_analyze: $msg"
exit 1
}
db eval {SELECT count(*) FROM sqlite_master}
set pageSize [expr {wide([db one {PRAGMA page_size}])}]
if {$flags(-pageinfo)} {
db eval {CREATE VIRTUAL TABLE temp.stat USING dbstat}
db eval {SELECT name, path, pageno FROM temp.stat ORDER BY pageno} {
puts "$pageno $name $path"
}
exit 0
}
if {$flags(-stats)} {
db eval {CREATE VIRTUAL TABLE temp.stat USING dbstat}
puts "BEGIN;"
puts "CREATE TABLE stats("
puts " name STRING, /* Name of table or index */"
puts " path INTEGER, /* Path to page from root */"
puts " pageno INTEGER, /* Page number */"
puts " pagetype STRING, /* 'internal', 'leaf' or 'overflow' */"
puts " ncell INTEGER, /* Cells on page (0 for overflow) */"
puts " payload INTEGER, /* Bytes of payload on this page */"
puts " unused INTEGER, /* Bytes of unused space on this page */"
puts " mx_payload INTEGER, /* Largest payload size of all cells */"
puts " pgoffset INTEGER, /* Offset of page in file */"
puts " pgsize INTEGER /* Size of the page */"
puts ");"
db eval {SELECT quote(name) || ',' ||
quote(path) || ',' ||
quote(pageno) || ',' ||
quote(pagetype) || ',' ||
quote(ncell) || ',' ||
quote(payload) || ',' ||
quote(unused) || ',' ||
quote(mx_payload) || ',' ||
quote(pgoffset) || ',' ||
quote(pgsize) AS x FROM stat} {
puts "INSERT INTO stats VALUES($x);"
}
puts "COMMIT;"
exit 0
}
# In-memory database for collecting statistics. This script loops through
# the tables and indices in the database being analyzed, adding a row for each
# to an in-memory database (for which the schema is shown below). It then
# queries the in-memory db to produce the space-analysis report.
#
sqlite3 mem :memory:
set tabledef {CREATE TABLE space_used(
name clob, -- Name of a table or index in the database file
tblname clob, -- Name of associated table
is_index boolean, -- TRUE if it is an index, false for a table
nentry int, -- Number of entries in the BTree
leaf_entries int, -- Number of leaf entries
depth int, -- Depth of the b-tree
payload int, -- Total amount of data stored in this table or index
ovfl_payload int, -- Total amount of data stored on overflow pages
ovfl_cnt int, -- Number of entries that use overflow
mx_payload int, -- Maximum payload size
int_pages int, -- Number of interior pages used
leaf_pages int, -- Number of leaf pages used
ovfl_pages int, -- Number of overflow pages used
int_unused int, -- Number of unused bytes on interior pages
leaf_unused int, -- Number of unused bytes on primary pages
ovfl_unused int, -- Number of unused bytes on overflow pages
gap_cnt int, -- Number of gaps in the page layout
compressed_size int -- Total bytes stored on disk
);}
mem eval $tabledef
# Create a temporary "dbstat" virtual table.
#
db eval {CREATE VIRTUAL TABLE temp.stat USING dbstat}
db eval {CREATE TEMP TABLE dbstat AS SELECT * FROM temp.stat
ORDER BY name, path}
db eval {DROP TABLE temp.stat}
set isCompressed 0
set compressOverhead 0
set depth 0
set sql { SELECT name, tbl_name FROM sqlite_master WHERE rootpage>0 }
foreach {name tblname} [concat sqlite_master sqlite_master [db eval $sql]] {
set is_index [expr {$name!=$tblname}]
set idx_btree [expr {$is_index || [is_without_rowid $name]}]
db eval {
SELECT
sum(ncell) AS nentry,
sum((pagetype=='leaf')*ncell) AS leaf_entries,
sum(payload) AS payload,
sum((pagetype=='overflow') * payload) AS ovfl_payload,
sum(path LIKE '%+000000') AS ovfl_cnt,
max(mx_payload) AS mx_payload,
sum(pagetype=='internal') AS int_pages,
sum(pagetype=='leaf') AS leaf_pages,
sum(pagetype=='overflow') AS ovfl_pages,
sum((pagetype=='internal') * unused) AS int_unused,
sum((pagetype=='leaf') * unused) AS leaf_unused,
sum((pagetype=='overflow') * unused) AS ovfl_unused,
sum(pgsize) AS compressed_size,
max((length(CASE WHEN path LIKE '%+%' THEN '' ELSE path END)+3)/4)
AS depth
FROM temp.dbstat WHERE name = $name
} break
set total_pages [expr {$leaf_pages+$int_pages+$ovfl_pages}]
set storage [expr {$total_pages*$pageSize}]
if {!$isCompressed && $storage>$compressed_size} {
set isCompressed 1
set compressOverhead 14
}
# Column 'gap_cnt' is set to the number of non-contiguous entries in the
# list of pages visited if the b-tree structure is traversed in a top-down
# fashion (each node visited before its child-tree is passed). Any overflow
# chains present are traversed from start to finish before any child-tree
# is.
#
set gap_cnt 0
set prev 0
db eval {
SELECT pageno, pagetype FROM temp.dbstat
WHERE name=$name
ORDER BY pageno
} {
if {$prev>0 && $pagetype=="leaf" && $pageno!=$prev+1} {
incr gap_cnt
}
set prev $pageno
}
mem eval {
INSERT INTO space_used VALUES(
$name,
$tblname,
$is_index,
$nentry,
$leaf_entries,
$depth,
$payload,
$ovfl_payload,
$ovfl_cnt,
$mx_payload,
$int_pages,
$leaf_pages,
$ovfl_pages,
$int_unused,
$leaf_unused,
$ovfl_unused,
$gap_cnt,
$compressed_size
);
}
}
proc integerify {real} {
if {[string is double -strict $real]} {
return [expr {wide($real)}]
} else {
return 0
}
}
mem function int integerify
# Quote a string for use in an SQL query. Examples:
#
# [quote {hello world}] == {'hello world'}
# [quote {hello world's}] == {'hello world''s'}
#
proc quote {txt} {
return [string map {' ''} $txt]
}
# Output a title line
#
proc titleline {title} {
if {$title==""} {
puts [string repeat * 79]
} else {
set len [string length $title]
set stars [string repeat * [expr 79-$len-5]]
puts "*** $title $stars"
}
}
# Generate a single line of output in the statistics section of the
# report.
#
proc statline {title value {extra {}}} {
set len [string length $title]
set dots [string repeat . [expr 50-$len]]
set len [string length $value]
set sp2 [string range { } $len end]
if {$extra ne ""} {
set extra " $extra"
}
puts "$title$dots $value$sp2$extra"
}
# Generate a formatted percentage value for $num/$denom
#
proc percent {num denom {of {}}} {
if {$denom==0.0} {return ""}
set v [expr {$num*100.0/$denom}]
set of {}
if {$v==100.0 || $v<0.001 || ($v>1.0 && $v<99.0)} {
return [format {%5.1f%% %s} $v $of]
} elseif {$v<0.1 || $v>99.9} {
return [format {%7.3f%% %s} $v $of]
} else {
return [format {%6.2f%% %s} $v $of]
}
}
proc divide {num denom} {
if {$denom==0} {return 0.0}
return [format %.2f [expr double($num)/double($denom)]]
}
# Generate a subreport that covers some subset of the database.
# the $where clause determines which subset to analyze.
#
proc subreport {title where showFrag} {
global pageSize file_pgcnt compressOverhead
# Query the in-memory database for the sum of various statistics
# for the subset of tables/indices identified by the WHERE clause in
# $where. Note that even if the WHERE clause matches no rows, the
# following query returns exactly one row (because it is an aggregate).
#
# The results of the query are stored directly by SQLite into local
# variables (i.e. $nentry, $nleaf etc.).
#
mem eval "
SELECT
int(sum(nentry)) AS nentry,
int(sum(leaf_entries)) AS nleaf,
int(sum(payload)) AS payload,
int(sum(ovfl_payload)) AS ovfl_payload,
max(mx_payload) AS mx_payload,
int(sum(ovfl_cnt)) as ovfl_cnt,
int(sum(leaf_pages)) AS leaf_pages,
int(sum(int_pages)) AS int_pages,
int(sum(ovfl_pages)) AS ovfl_pages,
int(sum(leaf_unused)) AS leaf_unused,
int(sum(int_unused)) AS int_unused,
int(sum(ovfl_unused)) AS ovfl_unused,
int(sum(gap_cnt)) AS gap_cnt,
int(sum(compressed_size)) AS compressed_size,
int(max(depth)) AS depth,
count(*) AS cnt
FROM space_used WHERE $where" {} {}
# Output the sub-report title, nicely decorated with * characters.
#
puts ""
titleline $title
puts ""
# Calculate statistics and store the results in TCL variables, as follows:
#
# total_pages: Database pages consumed.
# total_pages_percent: Pages consumed as a percentage of the file.
# storage: Bytes consumed.
# payload_percent: Payload bytes used as a percentage of $storage.
# total_unused: Unused bytes on pages.
# avg_payload: Average payload per btree entry.
# avg_fanout: Average fanout for internal pages.
# avg_unused: Average unused bytes per btree entry.
# ovfl_cnt_percent: Percentage of btree entries that use overflow pages.
#
set total_pages [expr {$leaf_pages+$int_pages+$ovfl_pages}]
set total_pages_percent [percent $total_pages $file_pgcnt]
set storage [expr {$total_pages*$pageSize}]
set payload_percent [percent $payload $storage {of storage consumed}]
set total_unused [expr {$ovfl_unused+$int_unused+$leaf_unused}]
set avg_payload [divide $payload $nleaf]
set avg_unused [divide $total_unused $nleaf]
if {$int_pages>0} {
# TODO: Is this formula correct?
set nTab [mem eval "
SELECT count(*) FROM (
SELECT DISTINCT tblname FROM space_used WHERE $where AND is_index=0
)
"]
set avg_fanout [mem eval "
SELECT (sum(leaf_pages+int_pages)-$nTab)/sum(int_pages) FROM space_used
WHERE $where
"]
set avg_fanout [format %.2f $avg_fanout]
}
set ovfl_cnt_percent [percent $ovfl_cnt $nleaf {of all entries}]
# Print out the sub-report statistics.
#
statline {Percentage of total database} $total_pages_percent
statline {Number of entries} $nleaf
statline {Bytes of storage consumed} $storage
if {$compressed_size!=$storage} {
set compressed_size [expr {$compressed_size+$compressOverhead*$total_pages}]
set pct [expr {$compressed_size*100.0/$storage}]
set pct [format {%5.1f%%} $pct]
statline {Bytes used after compression} $compressed_size $pct
}
statline {Bytes of payload} $payload $payload_percent
if {$cnt==1} {statline {B-tree depth} $depth}
statline {Average payload per entry} $avg_payload
statline {Average unused bytes per entry} $avg_unused
if {[info exists avg_fanout]} {
statline {Average fanout} $avg_fanout
}
if {$showFrag && $total_pages>1} {
set fragmentation [percent $gap_cnt [expr {$total_pages-1}]]
statline {Non-sequential pages} $gap_cnt $fragmentation
}
statline {Maximum payload per entry} $mx_payload
statline {Entries that use overflow} $ovfl_cnt $ovfl_cnt_percent
if {$int_pages>0} {
statline {Index pages used} $int_pages
}
statline {Primary pages used} $leaf_pages
statline {Overflow pages used} $ovfl_pages
statline {Total pages used} $total_pages
if {$int_unused>0} {
set int_unused_percent [
percent $int_unused [expr {$int_pages*$pageSize}] {of index space}]
statline "Unused bytes on index pages" $int_unused $int_unused_percent
}
statline "Unused bytes on primary pages" $leaf_unused [
percent $leaf_unused [expr {$leaf_pages*$pageSize}] {of primary space}]
statline "Unused bytes on overflow pages" $ovfl_unused [
percent $ovfl_unused [expr {$ovfl_pages*$pageSize}] {of overflow space}]
statline "Unused bytes on all pages" $total_unused [
percent $total_unused $storage {of all space}]
return 1
}
# Calculate the overhead in pages caused by auto-vacuum.
#
# This procedure calculates and returns the number of pages used by the
# auto-vacuum 'pointer-map'. If the database does not support auto-vacuum,
# then 0 is returned. The two arguments are the size of the database file in
# pages and the page size used by the database (in bytes).
proc autovacuum_overhead {filePages pageSize} {
# Set $autovacuum to non-zero for databases that support auto-vacuum.
set autovacuum [db one {PRAGMA auto_vacuum}]
# If the database is not an auto-vacuum database or the file consists
# of one page only then there is no overhead for auto-vacuum. Return zero.
if {0==$autovacuum || $filePages==1} {
return 0
}
# The number of entries on each pointer map page. The layout of the
# database file is one pointer-map page, followed by $ptrsPerPage other
# pages, followed by a pointer-map page etc. The first pointer-map page
# is the second page of the file overall.
set ptrsPerPage [expr double($pageSize/5)]
# Return the number of pointer map pages in the database.
return [expr wide(ceil( ($filePages-1.0)/($ptrsPerPage+1.0) ))]
}
# Calculate the summary statistics for the database and store the results
# in TCL variables. They are output below. Variables are as follows:
#
# pageSize: Size of each page in bytes.
# file_bytes: File size in bytes.
# file_pgcnt: Number of pages in the file.
# file_pgcnt2: Number of pages in the file (calculated).
# av_pgcnt: Pages consumed by the auto-vacuum pointer-map.
# av_percent: Percentage of the file consumed by auto-vacuum pointer-map.
# inuse_pgcnt: Data pages in the file.
# inuse_percent: Percentage of pages used to store data.
# free_pgcnt: Free pages calculated as (<total pages> - <in-use pages>)
# free_pgcnt2: Free pages in the file according to the file header.
# free_percent: Percentage of file consumed by free pages (calculated).
# free_percent2: Percentage of file consumed by free pages (header).
# ntable: Number of tables in the db.
# nindex: Number of indices in the db.
# nautoindex: Number of indices created automatically.
# nmanindex: Number of indices created manually.
# user_payload: Number of bytes of payload in table btrees
# (not including sqlite_master)
# user_percent: $user_payload as a percentage of total file size.
### The following, setting $file_bytes based on the actual size of the file
### on disk, causes this tool to choke on zipvfs databases. So set it based
### on the return of [PRAGMA page_count] instead.
if 0 {
set file_bytes [file size $file_to_analyze]
set file_pgcnt [expr {$file_bytes/$pageSize}]
}
set file_pgcnt [db one {PRAGMA page_count}]
set file_bytes [expr {$file_pgcnt * $pageSize}]
set av_pgcnt [autovacuum_overhead $file_pgcnt $pageSize]
set av_percent [percent $av_pgcnt $file_pgcnt]
set sql {SELECT sum(leaf_pages+int_pages+ovfl_pages) FROM space_used}
set inuse_pgcnt [expr wide([mem eval $sql])]
set inuse_percent [percent $inuse_pgcnt $file_pgcnt]
set free_pgcnt [expr {$file_pgcnt-$inuse_pgcnt-$av_pgcnt}]
set free_percent [percent $free_pgcnt $file_pgcnt]
set free_pgcnt2 [db one {PRAGMA freelist_count}]
set free_percent2 [percent $free_pgcnt2 $file_pgcnt]
set file_pgcnt2 [expr {$inuse_pgcnt+$free_pgcnt2+$av_pgcnt}]
set ntable [db eval {SELECT count(*)+1 FROM sqlite_master WHERE type='table'}]
set nindex [db eval {SELECT count(*) FROM sqlite_master WHERE type='index'}]
set sql {SELECT count(*) FROM sqlite_master WHERE name LIKE 'sqlite_autoindex%'}
set nautoindex [db eval $sql]
set nmanindex [expr {$nindex-$nautoindex}]
# set total_payload [mem eval "SELECT sum(payload) FROM space_used"]
set user_payload [mem one {SELECT int(sum(payload)) FROM space_used
WHERE NOT is_index AND name NOT LIKE 'sqlite_master'}]
set user_percent [percent $user_payload $file_bytes]
# Output the summary statistics calculated above.
#
puts "/** Disk-Space Utilization Report For $root_filename"
puts ""
statline {Page size in bytes} $pageSize
statline {Pages in the whole file (measured)} $file_pgcnt
statline {Pages in the whole file (calculated)} $file_pgcnt2
statline {Pages that store data} $inuse_pgcnt $inuse_percent
statline {Pages on the freelist (per header)} $free_pgcnt2 $free_percent2
statline {Pages on the freelist (calculated)} $free_pgcnt $free_percent
statline {Pages of auto-vacuum overhead} $av_pgcnt $av_percent
statline {Number of tables in the database} $ntable
statline {Number of indices} $nindex
statline {Number of defined indices} $nmanindex
statline {Number of implied indices} $nautoindex
if {$isCompressed} {
statline {Size of uncompressed content in bytes} $file_bytes
set efficiency [percent $true_file_size $file_bytes]
statline {Size of compressed file on disk} $true_file_size $efficiency
} else {
statline {Size of the file in bytes} $file_bytes
}
statline {Bytes of user payload stored} $user_payload $user_percent
# Output table rankings
#
puts ""
titleline "Page counts for all tables with their indices"
puts ""
mem eval {SELECT tblname, count(*) AS cnt,
int(sum(int_pages+leaf_pages+ovfl_pages)) AS size
FROM space_used GROUP BY tblname ORDER BY size+0 DESC, tblname} {} {
statline [string toupper $tblname] $size [percent $size $file_pgcnt]
}
puts ""
titleline "Page counts for all tables and indices separately"
puts ""
mem eval {
SELECT
upper(name) AS nm,
int(int_pages+leaf_pages+ovfl_pages) AS size
FROM space_used
ORDER BY size+0 DESC, name} {} {
statline $nm $size [percent $size $file_pgcnt]
}
if {$isCompressed} {
puts ""
titleline "Bytes of disk space used after compression"
puts ""
set csum 0
mem eval {SELECT tblname,
int(sum(compressed_size)) +
$compressOverhead*sum(int_pages+leaf_pages+ovfl_pages)
AS csize
FROM space_used GROUP BY tblname ORDER BY csize+0 DESC, tblname} {} {
incr csum $csize
statline [string toupper $tblname] $csize [percent $csize $true_file_size]
}
set overhead [expr {$true_file_size - $csum}]
if {$overhead>0} {
statline {Header and free space} $overhead [percent $overhead $true_file_size]
}
}
# Output subreports
#
if {$nindex>0} {
subreport {All tables and indices} 1 0
}
subreport {All tables} {NOT is_index} 0
if {$nindex>0} {
subreport {All indices} {is_index} 0
}
foreach tbl [mem eval {SELECT DISTINCT tblname name FROM space_used
ORDER BY name}] {
set qn [quote $tbl]
set name [string toupper $tbl]
set n [mem eval {SELECT count(*) FROM space_used WHERE tblname=$tbl}]
if {$n>1} {
set idxlist [mem eval "SELECT name FROM space_used
WHERE tblname='$qn' AND is_index
ORDER BY 1"]
subreport "Table $name and all its indices" "tblname='$qn'" 0
subreport "Table $name w/o any indices" "name='$qn'" 1
if {[llength $idxlist]>1} {
subreport "Indices of table $name" "tblname='$qn' AND is_index" 0
}
foreach idx $idxlist {
set qidx [quote $idx]
subreport "Index [string toupper $idx] of table $name" "name='$qidx'" 1
}
} else {
subreport "Table $name" "name='$qn'" 1
}
}
# Output instructions on what the numbers above mean.
#
puts ""
titleline Definitions
puts {
Page size in bytes
The number of bytes in a single page of the database file.
Usually 1024.
Number of pages in the whole file
}
puts " The number of $pageSize-byte pages that go into forming the complete
database"
puts {
Pages that store data
The number of pages that store data, either as primary B*Tree pages or
as overflow pages. The number at the right is the data pages divided by
the total number of pages in the file.
Pages on the freelist
The number of pages that are not currently in use but are reserved for
future use. The percentage at the right is the number of freelist pages
divided by the total number of pages in the file.
Pages of auto-vacuum overhead
The number of pages that store data used by the database to facilitate
auto-vacuum. This is zero for databases that do not support auto-vacuum.
Number of tables in the database
The number of tables in the database, including the SQLITE_MASTER table
used to store schema information.
Number of indices
The total number of indices in the database.
Number of defined indices
The number of indices created using an explicit CREATE INDEX statement.
Number of implied indices
The number of indices used to implement PRIMARY KEY or UNIQUE constraints
on tables.
Size of the file in bytes
The total amount of disk space used by the entire database files.
Bytes of user payload stored
The total number of bytes of user payload stored in the database. The
schema information in the SQLITE_MASTER table is not counted when
computing this number. The percentage at the right shows the payload
divided by the total file size.
Percentage of total database
The amount of the complete database file that is devoted to storing
information described by this category.
Number of entries
The total number of B-Tree key/value pairs stored under this category.
Bytes of storage consumed
The total amount of disk space required to store all B-Tree entries
under this category. The is the total number of pages used times
the pages size.
Bytes of payload
The amount of payload stored under this category. Payload is the data
part of table entries and the key part of index entries. The percentage
at the right is the bytes of payload divided by the bytes of storage
consumed.
Average payload per entry
The average amount of payload on each entry. This is just the bytes of
payload divided by the number of entries.
Average unused bytes per entry
The average amount of free space remaining on all pages under this
category on a per-entry basis. This is the number of unused bytes on
all pages divided by the number of entries.
Non-sequential pages
The number of pages in the table or index that are out of sequence.
Many filesystems are optimized for sequential file access so a small
number of non-sequential pages might result in faster queries,
especially for larger database files that do not fit in the disk cache.
Note that after running VACUUM, the root page of each table or index is
at the beginning of the database file and all other pages are in a
separate part of the database file, resulting in a single non-
sequential page.
Maximum payload per entry
The largest payload size of any entry.
Entries that use overflow
The number of entries that user one or more overflow pages.
Total pages used
This is the number of pages used to hold all information in the current
category. This is the sum of index, primary, and overflow pages.
Index pages used
This is the number of pages in a table B-tree that hold only key (rowid)
information and no data.
Primary pages used
This is the number of B-tree pages that hold both key and data.
Overflow pages used
The total number of overflow pages used for this category.
Unused bytes on index pages
The total number of bytes of unused space on all index pages. The
percentage at the right is the number of unused bytes divided by the
total number of bytes on index pages.
Unused bytes on primary pages
The total number of bytes of unused space on all primary pages. The
percentage at the right is the number of unused bytes divided by the
total number of bytes on primary pages.
Unused bytes on overflow pages
The total number of bytes of unused space on all overflow pages. The
percentage at the right is the number of unused bytes divided by the
total number of bytes on overflow pages.
Unused bytes on all pages
The total number of bytes of unused space on all primary and overflow
pages. The percentage at the right is the number of unused bytes
divided by the total number of bytes.
}
# Output a dump of the in-memory database. This can be used for more
# complex offline analysis.
#
titleline {}
puts "The entire text of this report can be sourced into any SQL database"
puts "engine for further analysis. All of the text above is an SQL comment."
puts "The data used to generate this report follows:"
puts "*/"
puts "BEGIN;"
puts $tabledef
unset -nocomplain x
mem eval {SELECT * FROM space_used} x {
puts -nonewline "INSERT INTO space_used VALUES"
set sep (
foreach col $x(*) {
set v $x($col)
if {$v=="" || ![string is double $v]} {set v '[quote $v]'}
puts -nonewline $sep$v
set sep ,
}
puts ");"
}
puts "COMMIT;"
} err]} {
puts "ERROR: $err"
puts $errorInfo
exit 1
}

View File

@ -1,275 +0,0 @@
#!/usr/bin/tclsh
#
# Run this script using TCLSH to do a speed comparison between
# various versions of SQLite and PostgreSQL and MySQL
#
# Run a test
#
set cnt 1
proc runtest {title} {
global cnt
set sqlfile test$cnt.sql
puts "<h2>Test $cnt: $title</h2>"
incr cnt
set fd [open $sqlfile r]
set sql [string trim [read $fd [file size $sqlfile]]]
close $fd
set sx [split $sql \n]
set n [llength $sx]
if {$n>8} {
set sql {}
for {set i 0} {$i<3} {incr i} {append sql [lindex $sx $i]<br>\n}
append sql "<i>... [expr {$n-6}] lines omitted</i><br>\n"
for {set i [expr {$n-3}]} {$i<$n} {incr i} {
append sql [lindex $sx $i]<br>\n
}
} else {
regsub -all \n [string trim $sql] <br> sql
}
puts "<blockquote>"
puts "$sql"
puts "</blockquote><table border=0 cellpadding=0 cellspacing=0>"
set format {<tr><td>%s</td><td align="right">&nbsp;&nbsp;&nbsp;%.3f</td></tr>}
set delay 1000
# exec sync; after $delay;
# set t [time "exec psql drh <$sqlfile" 1]
# set t [expr {[lindex $t 0]/1000000.0}]
# puts [format $format PostgreSQL: $t]
exec sync; after $delay;
set t [time "exec mysql -f drh <$sqlfile" 1]
set t [expr {[lindex $t 0]/1000000.0}]
puts [format $format MySQL: $t]
# set t [time "exec ./sqlite232 s232.db <$sqlfile" 1]
# set t [expr {[lindex $t 0]/1000000.0}]
# puts [format $format {SQLite 2.3.2:} $t]
# set t [time "exec ./sqlite-100 s100.db <$sqlfile" 1]
# set t [expr {[lindex $t 0]/1000000.0}]
# puts [format $format {SQLite 2.4 (cache=100):} $t]
exec sync; after $delay;
set t [time "exec ./sqlite248 s2k.db <$sqlfile" 1]
set t [expr {[lindex $t 0]/1000000.0}]
puts [format $format {SQLite 2.4.8:} $t]
exec sync; after $delay;
set t [time "exec ./sqlite248 sns.db <$sqlfile" 1]
set t [expr {[lindex $t 0]/1000000.0}]
puts [format $format {SQLite 2.4.8 (nosync):} $t]
exec sync; after $delay;
set t [time "exec ./sqlite2412 s2kb.db <$sqlfile" 1]
set t [expr {[lindex $t 0]/1000000.0}]
puts [format $format {SQLite 2.4.12:} $t]
exec sync; after $delay;
set t [time "exec ./sqlite2412 snsb.db <$sqlfile" 1]
set t [expr {[lindex $t 0]/1000000.0}]
puts [format $format {SQLite 2.4.12 (nosync):} $t]
# set t [time "exec ./sqlite-t1 st1.db <$sqlfile" 1]
# set t [expr {[lindex $t 0]/1000000.0}]
# puts [format $format {SQLite 2.4 (test):} $t]
puts "</table>"
}
# Initialize the environment
#
expr srand(1)
catch {exec /bin/sh -c {rm -f s*.db}}
set fd [open clear.sql w]
puts $fd {
drop table t1;
drop table t2;
}
close $fd
catch {exec psql drh <clear.sql}
catch {exec mysql drh <clear.sql}
set fd [open 2kinit.sql w]
puts $fd {
PRAGMA default_cache_size=2000;
PRAGMA default_synchronous=on;
}
close $fd
exec ./sqlite248 s2k.db <2kinit.sql
exec ./sqlite2412 s2kb.db <2kinit.sql
set fd [open nosync-init.sql w]
puts $fd {
PRAGMA default_cache_size=2000;
PRAGMA default_synchronous=off;
}
close $fd
exec ./sqlite248 sns.db <nosync-init.sql
exec ./sqlite2412 snsb.db <nosync-init.sql
set ones {zero one two three four five six seven eight nine
ten eleven twelve thirteen fourteen fifteen sixteen seventeen
eighteen nineteen}
set tens {{} ten twenty thirty forty fifty sixty seventy eighty ninety}
proc number_name {n} {
if {$n>=1000} {
set txt "[number_name [expr {$n/1000}]] thousand"
set n [expr {$n%1000}]
} else {
set txt {}
}
if {$n>=100} {
append txt " [lindex $::ones [expr {$n/100}]] hundred"
set n [expr {$n%100}]
}
if {$n>=20} {
append txt " [lindex $::tens [expr {$n/10}]]"
set n [expr {$n%10}]
}
if {$n>0} {
append txt " [lindex $::ones $n]"
}
set txt [string trim $txt]
if {$txt==""} {set txt zero}
return $txt
}
set fd [open test$cnt.sql w]
puts $fd "CREATE TABLE t1(a INTEGER, b INTEGER, c VARCHAR(100));"
for {set i 1} {$i<=1000} {incr i} {
set r [expr {int(rand()*100000)}]
puts $fd "INSERT INTO t1 VALUES($i,$r,'[number_name $r]');"
}
close $fd
runtest {1000 INSERTs}
set fd [open test$cnt.sql w]
puts $fd "BEGIN;"
puts $fd "CREATE TABLE t2(a INTEGER, b INTEGER, c VARCHAR(100));"
for {set i 1} {$i<=25000} {incr i} {
set r [expr {int(rand()*500000)}]
puts $fd "INSERT INTO t2 VALUES($i,$r,'[number_name $r]');"
}
puts $fd "COMMIT;"
close $fd
runtest {25000 INSERTs in a transaction}
set fd [open test$cnt.sql w]
for {set i 0} {$i<100} {incr i} {
set lwr [expr {$i*100}]
set upr [expr {($i+10)*100}]
puts $fd "SELECT count(*), avg(b) FROM t2 WHERE b>=$lwr AND b<$upr;"
}
close $fd
runtest {100 SELECTs without an index}
set fd [open test$cnt.sql w]
for {set i 1} {$i<=100} {incr i} {
puts $fd "SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%[number_name $i]%';"
}
close $fd
runtest {100 SELECTs on a string comparison}
set fd [open test$cnt.sql w]
puts $fd {CREATE INDEX i2a ON t2(a);}
puts $fd {CREATE INDEX i2b ON t2(b);}
close $fd
runtest {Creating an index}
set fd [open test$cnt.sql w]
for {set i 0} {$i<5000} {incr i} {
set lwr [expr {$i*100}]
set upr [expr {($i+1)*100}]
puts $fd "SELECT count(*), avg(b) FROM t2 WHERE b>=$lwr AND b<$upr;"
}
close $fd
runtest {5000 SELECTs with an index}
set fd [open test$cnt.sql w]
puts $fd "BEGIN;"
for {set i 0} {$i<1000} {incr i} {
set lwr [expr {$i*10}]
set upr [expr {($i+1)*10}]
puts $fd "UPDATE t1 SET b=b*2 WHERE a>=$lwr AND a<$upr;"
}
puts $fd "COMMIT;"
close $fd
runtest {1000 UPDATEs without an index}
set fd [open test$cnt.sql w]
puts $fd "BEGIN;"
for {set i 1} {$i<=25000} {incr i} {
set r [expr {int(rand()*500000)}]
puts $fd "UPDATE t2 SET b=$r WHERE a=$i;"
}
puts $fd "COMMIT;"
close $fd
runtest {25000 UPDATEs with an index}
set fd [open test$cnt.sql w]
puts $fd "BEGIN;"
for {set i 1} {$i<=25000} {incr i} {
set r [expr {int(rand()*500000)}]
puts $fd "UPDATE t2 SET c='[number_name $r]' WHERE a=$i;"
}
puts $fd "COMMIT;"
close $fd
runtest {25000 text UPDATEs with an index}
set fd [open test$cnt.sql w]
puts $fd "BEGIN;"
puts $fd "INSERT INTO t1 SELECT * FROM t2;"
puts $fd "INSERT INTO t2 SELECT * FROM t1;"
puts $fd "COMMIT;"
close $fd
runtest {INSERTs from a SELECT}
set fd [open test$cnt.sql w]
puts $fd {DELETE FROM t2 WHERE c LIKE '%fifty%';}
close $fd
runtest {DELETE without an index}
set fd [open test$cnt.sql w]
puts $fd {DELETE FROM t2 WHERE a>10 AND a<20000;}
close $fd
runtest {DELETE with an index}
set fd [open test$cnt.sql w]
puts $fd {INSERT INTO t2 SELECT * FROM t1;}
close $fd
runtest {A big INSERT after a big DELETE}
set fd [open test$cnt.sql w]
puts $fd {BEGIN;}
puts $fd {DELETE FROM t1;}
for {set i 1} {$i<=3000} {incr i} {
set r [expr {int(rand()*100000)}]
puts $fd "INSERT INTO t1 VALUES($i,$r,'[number_name $r]');"
}
puts $fd {COMMIT;}
close $fd
runtest {A big DELETE followed by many small INSERTs}
set fd [open test$cnt.sql w]
puts $fd {DROP TABLE t1;}
puts $fd {DROP TABLE t2;}
close $fd
runtest {DROP TABLE}

View File

@ -1,171 +0,0 @@
/*
** Performance test for SQLite.
**
** This program reads ASCII text from a file named on the command-line.
** It converts each SQL statement into UTF16 and submits it to SQLite
** for evaluation. A new UTF16 database is created at the beginning of
** the program. All statements are timed using the high-resolution timer
** built into Intel-class processors.
**
** To compile this program, first compile the SQLite library separately
** will full optimizations. For example:
**
** gcc -c -O6 -DSQLITE_THREADSAFE=0 sqlite3.c
**
** Then link against this program. But to do optimize this program
** because that defeats the hi-res timer.
**
** gcc speedtest16.c sqlite3.o -ldl -I../src
**
** Then run this program with a single argument which is the name of
** a file containing SQL script that you want to test:
**
** ./a.out database.db test.sql
*/
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <ctype.h>
#include <unistd.h>
#include "sqlite3.h"
#define ISSPACE(X) isspace((unsigned char)(X))
/*
** hwtime.h contains inline assembler code for implementing
** high-performance timing routines.
*/
#include "hwtime.h"
/*
** Convert a zero-terminated ASCII string into a zero-terminated
** UTF-16le string. Memory to hold the returned string comes
** from malloc() and should be freed by the caller.
*/
static void *asciiToUtf16le(const char *z){
int n = strlen(z);
char *z16;
int i, j;
z16 = malloc( n*2 + 2 );
for(i=j=0; i<=n; i++){
z16[j++] = z[i];
z16[j++] = 0;
}
return (void*)z16;
}
/*
** Timers
*/
static sqlite_uint64 prepTime = 0;
static sqlite_uint64 runTime = 0;
static sqlite_uint64 finalizeTime = 0;
/*
** Prepare and run a single statement of SQL.
*/
static void prepareAndRun(sqlite3 *db, const char *zSql){
void *utf16;
sqlite3_stmt *pStmt;
const void *stmtTail;
sqlite_uint64 iStart, iElapse;
int rc;
printf("****************************************************************\n");
printf("SQL statement: [%s]\n", zSql);
utf16 = asciiToUtf16le(zSql);
iStart = sqlite3Hwtime();
rc = sqlite3_prepare16_v2(db, utf16, -1, &pStmt, &stmtTail);
iElapse = sqlite3Hwtime() - iStart;
prepTime += iElapse;
printf("sqlite3_prepare16_v2() returns %d in %llu cycles\n", rc, iElapse);
if( rc==SQLITE_OK ){
int nRow = 0;
iStart = sqlite3Hwtime();
while( (rc=sqlite3_step(pStmt))==SQLITE_ROW ){ nRow++; }
iElapse = sqlite3Hwtime() - iStart;
runTime += iElapse;
printf("sqlite3_step() returns %d after %d rows in %llu cycles\n",
rc, nRow, iElapse);
iStart = sqlite3Hwtime();
rc = sqlite3_finalize(pStmt);
iElapse = sqlite3Hwtime() - iStart;
finalizeTime += iElapse;
printf("sqlite3_finalize() returns %d in %llu cycles\n", rc, iElapse);
}
free(utf16);
}
int main(int argc, char **argv){
void *utf16;
sqlite3 *db;
int rc;
int nSql;
char *zSql;
int i, j;
FILE *in;
sqlite_uint64 iStart, iElapse;
sqlite_uint64 iSetup = 0;
int nStmt = 0;
int nByte = 0;
if( argc!=3 ){
fprintf(stderr, "Usage: %s FILENAME SQL-SCRIPT\n"
"Runs SQL-SCRIPT as UTF16 against a UTF16 database\n",
argv[0]);
exit(1);
}
in = fopen(argv[2], "r");
fseek(in, 0L, SEEK_END);
nSql = ftell(in);
zSql = malloc( nSql+1 );
fseek(in, 0L, SEEK_SET);
nSql = fread(zSql, 1, nSql, in);
zSql[nSql] = 0;
printf("SQLite version: %d\n", sqlite3_libversion_number());
unlink(argv[1]);
utf16 = asciiToUtf16le(argv[1]);
iStart = sqlite3Hwtime();
rc = sqlite3_open16(utf16, &db);
iElapse = sqlite3Hwtime() - iStart;
iSetup = iElapse;
printf("sqlite3_open16() returns %d in %llu cycles\n", rc, iElapse);
free(utf16);
for(i=j=0; j<nSql; j++){
if( zSql[j]==';' ){
int isComplete;
char c = zSql[j+1];
zSql[j+1] = 0;
isComplete = sqlite3_complete(&zSql[i]);
zSql[j+1] = c;
if( isComplete ){
zSql[j] = 0;
while( i<j && ISSPACE(zSql[i]) ){ i++; }
if( i<j ){
nStmt++;
nByte += j-i;
prepareAndRun(db, &zSql[i]);
}
zSql[j] = ';';
i = j+1;
}
}
}
iStart = sqlite3Hwtime();
sqlite3_close(db);
iElapse = sqlite3Hwtime() - iStart;
iSetup += iElapse;
printf("sqlite3_close() returns in %llu cycles\n", iElapse);
printf("\n");
printf("Statements run: %15d\n", nStmt);
printf("Bytes of SQL text: %15d\n", nByte);
printf("Total prepare time: %15llu cycles\n", prepTime);
printf("Total run time: %15llu cycles\n", runTime);
printf("Total finalize time: %15llu cycles\n", finalizeTime);
printf("Open/Close time: %15llu cycles\n", iSetup);
printf("Total Time: %15llu cycles\n",
prepTime + runTime + finalizeTime + iSetup);
return 0;
}

View File

@ -1,207 +0,0 @@
#!/usr/bin/tclsh
#
# Run this script using TCLSH to do a speed comparison between
# various versions of SQLite and PostgreSQL and MySQL
#
# Run a test
#
set cnt 1
proc runtest {title} {
global cnt
set sqlfile test$cnt.sql
puts "<h2>Test $cnt: $title</h2>"
incr cnt
set fd [open $sqlfile r]
set sql [string trim [read $fd [file size $sqlfile]]]
close $fd
set sx [split $sql \n]
set n [llength $sx]
if {$n>8} {
set sql {}
for {set i 0} {$i<3} {incr i} {append sql [lindex $sx $i]<br>\n}
append sql "<i>... [expr {$n-6}] lines omitted</i><br>\n"
for {set i [expr {$n-3}]} {$i<$n} {incr i} {
append sql [lindex $sx $i]<br>\n
}
} else {
regsub -all \n [string trim $sql] <br> sql
}
puts "<blockquote>"
puts "$sql"
puts "</blockquote><table border=0 cellpadding=0 cellspacing=0>"
set format {<tr><td>%s</td><td align="right">&nbsp;&nbsp;&nbsp;%.3f</td></tr>}
set delay 1000
exec sync; after $delay;
set t [time "exec psql drh <$sqlfile" 1]
set t [expr {[lindex $t 0]/1000000.0}]
puts [format $format PostgreSQL: $t]
exec sync; after $delay;
set t [time "exec mysql -f drh <$sqlfile" 1]
set t [expr {[lindex $t 0]/1000000.0}]
puts [format $format MySQL: $t]
# set t [time "exec ./sqlite232 s232.db <$sqlfile" 1]
# set t [expr {[lindex $t 0]/1000000.0}]
# puts [format $format {SQLite 2.3.2:} $t]
# set t [time "exec ./sqlite-100 s100.db <$sqlfile" 1]
# set t [expr {[lindex $t 0]/1000000.0}]
# puts [format $format {SQLite 2.4 (cache=100):} $t]
exec sync; after $delay;
set t [time "exec ./sqlite240 s2k.db <$sqlfile" 1]
set t [expr {[lindex $t 0]/1000000.0}]
puts [format $format {SQLite 2.4:} $t]
exec sync; after $delay;
set t [time "exec ./sqlite240 sns.db <$sqlfile" 1]
set t [expr {[lindex $t 0]/1000000.0}]
puts [format $format {SQLite 2.4 (nosync):} $t]
# set t [time "exec ./sqlite-t1 st1.db <$sqlfile" 1]
# set t [expr {[lindex $t 0]/1000000.0}]
# puts [format $format {SQLite 2.4 (test):} $t]
puts "</table>"
}
# Initialize the environment
#
expr srand(1)
catch {exec /bin/sh -c {rm -f s*.db}}
set fd [open clear.sql w]
puts $fd {
drop table t1;
drop table t2;
}
close $fd
catch {exec psql drh <clear.sql}
catch {exec mysql drh <clear.sql}
set fd [open 2kinit.sql w]
puts $fd {
PRAGMA default_cache_size=2000;
PRAGMA default_synchronous=on;
}
close $fd
exec ./sqlite240 s2k.db <2kinit.sql
exec ./sqlite-t1 st1.db <2kinit.sql
set fd [open nosync-init.sql w]
puts $fd {
PRAGMA default_cache_size=2000;
PRAGMA default_synchronous=off;
}
close $fd
exec ./sqlite240 sns.db <nosync-init.sql
set ones {zero one two three four five six seven eight nine
ten eleven twelve thirteen fourteen fifteen sixteen seventeen
eighteen nineteen}
set tens {{} ten twenty thirty forty fifty sixty seventy eighty ninety}
proc number_name {n} {
if {$n>=1000} {
set txt "[number_name [expr {$n/1000}]] thousand"
set n [expr {$n%1000}]
} else {
set txt {}
}
if {$n>=100} {
append txt " [lindex $::ones [expr {$n/100}]] hundred"
set n [expr {$n%100}]
}
if {$n>=20} {
append txt " [lindex $::tens [expr {$n/10}]]"
set n [expr {$n%10}]
}
if {$n>0} {
append txt " [lindex $::ones $n]"
}
set txt [string trim $txt]
if {$txt==""} {set txt zero}
return $txt
}
set fd [open test$cnt.sql w]
puts $fd "BEGIN;"
puts $fd "CREATE TABLE t1(a INTEGER, b INTEGER, c VARCHAR(100));"
for {set i 1} {$i<=25000} {incr i} {
set r [expr {int(rand()*500000)}]
puts $fd "INSERT INTO t1 VALUES($i,$r,'[number_name $r]');"
}
puts $fd "COMMIT;"
close $fd
runtest {25000 INSERTs in a transaction}
set fd [open test$cnt.sql w]
puts $fd "DELETE FROM t1;"
close $fd
runtest {DELETE everything}
set fd [open test$cnt.sql w]
puts $fd "BEGIN;"
for {set i 1} {$i<=25000} {incr i} {
set r [expr {int(rand()*500000)}]
puts $fd "INSERT INTO t1 VALUES($i,$r,'[number_name $r]');"
}
puts $fd "COMMIT;"
close $fd
runtest {25000 INSERTs in a transaction}
set fd [open test$cnt.sql w]
puts $fd "DELETE FROM t1;"
close $fd
runtest {DELETE everything}
set fd [open test$cnt.sql w]
puts $fd "BEGIN;"
for {set i 1} {$i<=25000} {incr i} {
set r [expr {int(rand()*500000)}]
puts $fd "INSERT INTO t1 VALUES($i,$r,'[number_name $r]');"
}
puts $fd "COMMIT;"
close $fd
runtest {25000 INSERTs in a transaction}
set fd [open test$cnt.sql w]
puts $fd "DELETE FROM t1;"
close $fd
runtest {DELETE everything}
set fd [open test$cnt.sql w]
puts $fd "BEGIN;"
for {set i 1} {$i<=25000} {incr i} {
set r [expr {int(rand()*500000)}]
puts $fd "INSERT INTO t1 VALUES($i,$r,'[number_name $r]');"
}
puts $fd "COMMIT;"
close $fd
runtest {25000 INSERTs in a transaction}
set fd [open test$cnt.sql w]
puts $fd "DELETE FROM t1;"
close $fd
runtest {DELETE everything}
set fd [open test$cnt.sql w]
puts $fd "BEGIN;"
for {set i 1} {$i<=25000} {incr i} {
set r [expr {int(rand()*500000)}]
puts $fd "INSERT INTO t1 VALUES($i,$r,'[number_name $r]');"
}
puts $fd "COMMIT;"
close $fd
runtest {25000 INSERTs in a transaction}
set fd [open test$cnt.sql w]
puts $fd "DELETE FROM t1;"
close $fd
runtest {DELETE everything}
set fd [open test$cnt.sql w]
puts $fd {DROP TABLE t1;}
close $fd
runtest {DROP TABLE}

View File

@ -1,260 +0,0 @@
/*
** Performance test for SQLite.
**
** This program reads ASCII text from a file named on the command-line
** and submits that text to SQLite for evaluation. A new database
** is created at the beginning of the program. All statements are
** timed using the high-resolution timer built into Intel-class processors.
**
** To compile this program, first compile the SQLite library separately
** will full optimizations. For example:
**
** gcc -c -O6 -DSQLITE_THREADSAFE=0 sqlite3.c
**
** Then link against this program. But to do optimize this program
** because that defeats the hi-res timer.
**
** gcc speedtest8.c sqlite3.o -ldl -I../src
**
** Then run this program with a single argument which is the name of
** a file containing SQL script that you want to test:
**
** ./a.out test.db test.sql
*/
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <ctype.h>
#include <time.h>
#if defined(_MSC_VER)
#include <windows.h>
#else
#include <unistd.h>
#include <sys/times.h>
#include <sched.h>
#endif
#include "sqlite3.h"
/*
** hwtime.h contains inline assembler code for implementing
** high-performance timing routines.
*/
#include "hwtime.h"
/*
** Timers
*/
static sqlite_uint64 prepTime = 0;
static sqlite_uint64 runTime = 0;
static sqlite_uint64 finalizeTime = 0;
/*
** Prepare and run a single statement of SQL.
*/
static void prepareAndRun(sqlite3 *db, const char *zSql, int bQuiet){
sqlite3_stmt *pStmt;
const char *stmtTail;
sqlite_uint64 iStart, iElapse;
int rc;
if (!bQuiet){
printf("***************************************************************\n");
}
if (!bQuiet) printf("SQL statement: [%s]\n", zSql);
iStart = sqlite3Hwtime();
rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, &stmtTail);
iElapse = sqlite3Hwtime() - iStart;
prepTime += iElapse;
if (!bQuiet){
printf("sqlite3_prepare_v2() returns %d in %llu cycles\n", rc, iElapse);
}
if( rc==SQLITE_OK ){
int nRow = 0;
iStart = sqlite3Hwtime();
while( (rc=sqlite3_step(pStmt))==SQLITE_ROW ){ nRow++; }
iElapse = sqlite3Hwtime() - iStart;
runTime += iElapse;
if (!bQuiet){
printf("sqlite3_step() returns %d after %d rows in %llu cycles\n",
rc, nRow, iElapse);
}
iStart = sqlite3Hwtime();
rc = sqlite3_finalize(pStmt);
iElapse = sqlite3Hwtime() - iStart;
finalizeTime += iElapse;
if (!bQuiet){
printf("sqlite3_finalize() returns %d in %llu cycles\n", rc, iElapse);
}
}
}
int main(int argc, char **argv){
sqlite3 *db;
int rc;
int nSql;
char *zSql;
int i, j;
FILE *in;
sqlite_uint64 iStart, iElapse;
sqlite_uint64 iSetup = 0;
int nStmt = 0;
int nByte = 0;
const char *zArgv0 = argv[0];
int bQuiet = 0;
#if !defined(_MSC_VER)
struct tms tmsStart, tmsEnd;
clock_t clkStart, clkEnd;
#endif
#ifdef HAVE_OSINST
extern sqlite3_vfs *sqlite3_instvfs_binarylog(char *, char *, char *);
extern void sqlite3_instvfs_destroy(sqlite3_vfs *);
sqlite3_vfs *pVfs = 0;
#endif
while (argc>3)
{
#ifdef HAVE_OSINST
if( argc>4 && (strcmp(argv[1], "-log")==0) ){
pVfs = sqlite3_instvfs_binarylog("oslog", 0, argv[2]);
sqlite3_vfs_register(pVfs, 1);
argv += 2;
argc -= 2;
continue;
}
#endif
/*
** Increasing the priority slightly above normal can help with
** repeatability of testing. Note that with Cygwin, -5 equates
** to "High", +5 equates to "Low", and anything in between
** equates to "Normal".
*/
if( argc>4 && (strcmp(argv[1], "-priority")==0) ){
#if defined(_MSC_VER)
int new_priority = atoi(argv[2]);
if(!SetPriorityClass(GetCurrentProcess(),
(new_priority<=-5) ? HIGH_PRIORITY_CLASS :
(new_priority<=0) ? ABOVE_NORMAL_PRIORITY_CLASS :
(new_priority==0) ? NORMAL_PRIORITY_CLASS :
(new_priority<5) ? BELOW_NORMAL_PRIORITY_CLASS :
IDLE_PRIORITY_CLASS)){
printf ("error setting priority\n");
exit(2);
}
#else
struct sched_param myParam;
sched_getparam(0, &myParam);
printf ("Current process priority is %d.\n", (int)myParam.sched_priority);
myParam.sched_priority = atoi(argv[2]);
printf ("Setting process priority to %d.\n", (int)myParam.sched_priority);
if (sched_setparam (0, &myParam) != 0){
printf ("error setting priority\n");
exit(2);
}
#endif
argv += 2;
argc -= 2;
continue;
}
if( argc>3 && strcmp(argv[1], "-quiet")==0 ){
bQuiet = -1;
argv++;
argc--;
continue;
}
break;
}
if( argc!=3 ){
fprintf(stderr, "Usage: %s [options] FILENAME SQL-SCRIPT\n"
"Runs SQL-SCRIPT against a UTF8 database\n"
"\toptions:\n"
#ifdef HAVE_OSINST
"\t-log <log>\n"
#endif
"\t-priority <value> : set priority of task\n"
"\t-quiet : only display summary results\n",
zArgv0);
exit(1);
}
in = fopen(argv[2], "r");
fseek(in, 0L, SEEK_END);
nSql = ftell(in);
zSql = malloc( nSql+1 );
fseek(in, 0L, SEEK_SET);
nSql = fread(zSql, 1, nSql, in);
zSql[nSql] = 0;
printf("SQLite version: %d\n", sqlite3_libversion_number());
unlink(argv[1]);
#if !defined(_MSC_VER)
clkStart = times(&tmsStart);
#endif
iStart = sqlite3Hwtime();
rc = sqlite3_open(argv[1], &db);
iElapse = sqlite3Hwtime() - iStart;
iSetup = iElapse;
if (!bQuiet) printf("sqlite3_open() returns %d in %llu cycles\n", rc, iElapse);
for(i=j=0; j<nSql; j++){
if( zSql[j]==';' ){
int isComplete;
char c = zSql[j+1];
zSql[j+1] = 0;
isComplete = sqlite3_complete(&zSql[i]);
zSql[j+1] = c;
if( isComplete ){
zSql[j] = 0;
while( i<j && isspace(zSql[i]) ){ i++; }
if( i<j ){
int n = j - i;
if( n>=6 && memcmp(&zSql[i], ".crash",6)==0 ) exit(1);
nStmt++;
nByte += n;
prepareAndRun(db, &zSql[i], bQuiet);
}
zSql[j] = ';';
i = j+1;
}
}
}
iStart = sqlite3Hwtime();
sqlite3_close(db);
iElapse = sqlite3Hwtime() - iStart;
#if !defined(_MSC_VER)
clkEnd = times(&tmsEnd);
#endif
iSetup += iElapse;
if (!bQuiet) printf("sqlite3_close() returns in %llu cycles\n", iElapse);
printf("\n");
printf("Statements run: %15d stmts\n", nStmt);
printf("Bytes of SQL text: %15d bytes\n", nByte);
printf("Total prepare time: %15llu cycles\n", prepTime);
printf("Total run time: %15llu cycles\n", runTime);
printf("Total finalize time: %15llu cycles\n", finalizeTime);
printf("Open/Close time: %15llu cycles\n", iSetup);
printf("Total time: %15llu cycles\n",
prepTime + runTime + finalizeTime + iSetup);
#if !defined(_MSC_VER)
printf("\n");
printf("Total user CPU time: %15.3g secs\n", (tmsEnd.tms_utime - tmsStart.tms_utime)/(double)CLOCKS_PER_SEC );
printf("Total system CPU time: %15.3g secs\n", (tmsEnd.tms_stime - tmsStart.tms_stime)/(double)CLOCKS_PER_SEC );
printf("Total real time: %15.3g secs\n", (clkEnd -clkStart)/(double)CLOCKS_PER_SEC );
#endif
#ifdef HAVE_OSINST
if( pVfs ){
sqlite3_instvfs_destroy(pVfs);
printf("vfs log written to %s\n", argv[0]);
}
#endif
return 0;
}

View File

@ -1,218 +0,0 @@
/*
** Performance test for SQLite.
**
** This program reads ASCII text from a file named on the command-line
** and submits that text to SQLite for evaluation. A new database
** is created at the beginning of the program. All statements are
** timed using the high-resolution timer built into Intel-class processors.
**
** To compile this program, first compile the SQLite library separately
** will full optimizations. For example:
**
** gcc -c -O6 -DSQLITE_THREADSAFE=0 sqlite3.c
**
** Then link against this program. But to do optimize this program
** because that defeats the hi-res timer.
**
** gcc speedtest8.c sqlite3.o -ldl -I../src
**
** Then run this program with a single argument which is the name of
** a file containing SQL script that you want to test:
**
** ./a.out test.db test.sql
*/
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <ctype.h>
#include <unistd.h>
#include <stdarg.h>
#include "sqlite3.h"
#define ISSPACE(X) isspace((unsigned char)(X))
#include "test_osinst.c"
/*
** Prepare and run a single statement of SQL.
*/
static void prepareAndRun(sqlite3_vfs *pInstVfs, sqlite3 *db, const char *zSql){
sqlite3_stmt *pStmt;
const char *stmtTail;
int rc;
char zMessage[1024];
zMessage[1023] = '\0';
sqlite3_uint64 iTime;
sqlite3_snprintf(1023, zMessage, "sqlite3_prepare_v2: %s", zSql);
sqlite3_instvfs_binarylog_marker(pInstVfs, zMessage);
iTime = sqlite3Hwtime();
rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, &stmtTail);
iTime = sqlite3Hwtime() - iTime;
sqlite3_instvfs_binarylog_call(pInstVfs,BINARYLOG_PREPARE_V2,iTime,rc,zSql);
if( rc==SQLITE_OK ){
int nRow = 0;
sqlite3_snprintf(1023, zMessage, "sqlite3_step loop: %s", zSql);
sqlite3_instvfs_binarylog_marker(pInstVfs, zMessage);
iTime = sqlite3Hwtime();
while( (rc=sqlite3_step(pStmt))==SQLITE_ROW ){ nRow++; }
iTime = sqlite3Hwtime() - iTime;
sqlite3_instvfs_binarylog_call(pInstVfs, BINARYLOG_STEP, iTime, rc, zSql);
sqlite3_snprintf(1023, zMessage, "sqlite3_finalize: %s", zSql);
sqlite3_instvfs_binarylog_marker(pInstVfs, zMessage);
iTime = sqlite3Hwtime();
rc = sqlite3_finalize(pStmt);
iTime = sqlite3Hwtime() - iTime;
sqlite3_instvfs_binarylog_call(pInstVfs, BINARYLOG_FINALIZE, iTime, rc, zSql);
}
}
static int stringcompare(const char *zLeft, const char *zRight){
int ii;
for(ii=0; zLeft[ii] && zRight[ii]; ii++){
if( zLeft[ii]!=zRight[ii] ) return 0;
}
return( zLeft[ii]==zRight[ii] );
}
static char *readScriptFile(const char *zFile, int *pnScript){
sqlite3_vfs *pVfs = sqlite3_vfs_find(0);
sqlite3_file *p;
int rc;
sqlite3_int64 nByte;
char *zData = 0;
int flags = SQLITE_OPEN_READONLY|SQLITE_OPEN_MAIN_DB;
p = (sqlite3_file *)malloc(pVfs->szOsFile);
rc = pVfs->xOpen(pVfs, zFile, p, flags, &flags);
if( rc!=SQLITE_OK ){
goto error_out;
}
rc = p->pMethods->xFileSize(p, &nByte);
if( rc!=SQLITE_OK ){
goto close_out;
}
zData = (char *)malloc(nByte+1);
rc = p->pMethods->xRead(p, zData, nByte, 0);
if( rc!=SQLITE_OK ){
goto close_out;
}
zData[nByte] = '\0';
p->pMethods->xClose(p);
free(p);
*pnScript = nByte;
return zData;
close_out:
p->pMethods->xClose(p);
error_out:
free(p);
free(zData);
return 0;
}
int main(int argc, char **argv){
const char zUsageMsg[] =
"Usage: %s options...\n"
" where available options are:\n"
"\n"
" -db DATABASE-FILE (database file to operate on)\n"
" -script SCRIPT-FILE (script file to read sql from)\n"
" -log LOG-FILE (log file to create)\n"
" -logdata (log all data to log file)\n"
"\n"
" Options -db, -script and -log are compulsory\n"
"\n"
;
const char *zDb = 0;
const char *zScript = 0;
const char *zLog = 0;
int logdata = 0;
int ii;
int i, j;
int rc;
sqlite3_vfs *pInstVfs; /* Instrumentation VFS */
char *zSql = 0;
int nSql;
sqlite3 *db;
for(ii=1; ii<argc; ii++){
if( stringcompare("-db", argv[ii]) && (ii+1)<argc ){
zDb = argv[++ii];
}
else if( stringcompare("-script", argv[ii]) && (ii+1)<argc ){
zScript = argv[++ii];
}
else if( stringcompare("-log", argv[ii]) && (ii+1)<argc ){
zLog = argv[++ii];
}
else if( stringcompare("-logdata", argv[ii]) ){
logdata = 1;
}
else {
goto usage;
}
}
if( !zDb || !zScript || !zLog ) goto usage;
zSql = readScriptFile(zScript, &nSql);
if( !zSql ){
fprintf(stderr, "Failed to read script file\n");
return -1;
}
pInstVfs = sqlite3_instvfs_binarylog("logging", 0, zLog, logdata);
rc = sqlite3_open_v2(
zDb, &db, SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE, "logging"
);
if( rc!=SQLITE_OK ){
fprintf(stderr, "Failed to open db: %s\n", sqlite3_errmsg(db));
return -2;
}
for(i=j=0; j<nSql; j++){
if( zSql[j]==';' ){
int isComplete;
char c = zSql[j+1];
zSql[j+1] = 0;
isComplete = sqlite3_complete(&zSql[i]);
zSql[j+1] = c;
if( isComplete ){
zSql[j] = 0;
while( i<j && ISSPACE(zSql[i]) ){ i++; }
if( i<j ){
prepareAndRun(pInstVfs, db, &zSql[i]);
}
zSql[j] = ';';
i = j+1;
}
}
}
sqlite3_instvfs_destroy(pInstVfs);
return 0;
usage:
fprintf(stderr, zUsageMsg, argv[0]);
return -3;
}

View File

@ -1,82 +0,0 @@
#!/usr/bin/tclsh
#
# This script splits the sqlite3.c amalgamated source code files into
# several smaller files such that no single files is more than a fixed
# number of lines in length (32k or 64k). Each of the split out files
# is #include-ed by the master file.
#
# Splitting files up this way allows them to be used with older compilers
# that cannot handle really long source files.
#
set MAX 32768 ;# Maximum number of lines per file.
set BEGIN {^/\*+ Begin file ([a-zA-Z0-9_.]+) \*+/}
set END {^/\*+ End of %s \*+/}
set in [open sqlite3.c]
set out1 [open sqlite3-all.c w]
# Copy the header from sqlite3.c into sqlite3-all.c
#
while {[gets $in line]} {
if {[regexp $BEGIN $line]} break
puts $out1 $line
}
# Gather the complete content of a file into memory. Store the
# content in $bufout. Store the number of lines is $nout
#
proc gather_one_file {firstline bufout nout} {
regexp $::BEGIN $firstline all filename
set end [format $::END $filename]
upvar $bufout buf $nout n
set buf $firstline\n
global in
set n 0
while {[gets $in line]>=0} {
incr n
append buf $line\n
if {[regexp $end $line]} break
}
}
# Write a big chunk of text in to an auxiliary file "sqlite3-NNN.c".
# Also add an appropriate #include to sqlite3-all.c
#
set filecnt 0
proc write_one_file {content} {
global filecnt
incr filecnt
set out [open sqlite3-$filecnt.c w]
puts -nonewline $out $content
close $out
puts $::out1 "#include \"sqlite3-$filecnt.c\""
}
# Continue reading input. Store chunks in separate files and add
# the #includes to the main sqlite3-all.c file as necessary to reference
# the extra chunks.
#
set all {}
set N 0
while {[regexp $BEGIN $line]} {
set buf {}
set n 0
gather_one_file $line buf n
if {$n+$N>=$MAX} {
write_one_file $all
set all {}
set N 0
}
append all $buf
incr N $n
while {[gets $in line]>=0} {
if {[regexp $BEGIN $line]} break
puts $out1 $line
}
}
if {$N>0} {
write_one_file $all
}
close $out1
close $in

File diff suppressed because it is too large Load Diff

View File

@ -1,158 +0,0 @@
/*
** The program does some simple static analysis of the sqlite3.c source
** file looking for mistakes.
**
** Usage:
**
** ./srcck1 sqlite3.c
**
** This program looks for instances of assert(), ALWAYS(), NEVER() or
** testcase() that contain side-effects and reports errors if any such
** instances are found.
**
** The aim of this utility is to prevent recurrences of errors such
** as the one fixed at:
**
** https://www.sqlite.org/src/info/a2952231ac7abe16
**
** Note that another similar error was found by this utility when it was
** first written. That other error was fixed by the same check-in that
** committed the first version of this utility program.
*/
#include <stdlib.h>
#include <ctype.h>
#include <stdio.h>
#include <string.h>
/* Read the complete text of a file into memory. Return a pointer to
** the result. Panic if unable to read the file or allocate memory.
*/
static char *readFile(const char *zFilename){
FILE *in;
char *z;
long n;
size_t got;
in = fopen(zFilename, "rb");
if( in==0 ){
fprintf(stderr, "unable to open '%s' for reading\n", zFilename);
exit(1);
}
fseek(in, 0, SEEK_END);
n = ftell(in);
rewind(in);
z = malloc( n+1 );
if( z==0 ){
fprintf(stderr, "cannot allocate %d bytes to store '%s'\n",
(int)(n+1), zFilename);
exit(1);
}
got = fread(z, 1, n, in);
fclose(in);
if( got!=(size_t)n ){
fprintf(stderr, "only read %d of %d bytes from '%s'\n",
(int)got, (int)n, zFilename);
exit(1);
}
z[n] = 0;
return z;
}
/* Change the C code in the argument to see if it might have
** side effects. The only accurate way to know this is to do a full
** parse of the C code, which this routine does not do. This routine
** uses a simple heuristic of looking for:
**
** * '=' not immediately after '>', '<', '!', or '='.
** * '++'
** * '--'
**
** If the code contains the phrase "side-effects-ok" is inside a
** comment, then always return false. This is used to disable checking
** for assert()s with deliberate side-effects, such as used by
** SQLITE_TESTCTRL_ASSERT - a facility that allows applications to
** determine at runtime whether or not assert()s are enabled.
** Obviously, that determination cannot be made unless the assert()
** has some side-effect.
**
** Return true if a side effect is seen. Return false if not.
*/
static int hasSideEffect(const char *z, unsigned int n){
unsigned int i;
for(i=0; i<n; i++){
if( z[i]=='/' && strncmp(&z[i], "/*side-effects-ok*/", 19)==0 ) return 0;
if( z[i]=='=' && i>0 && z[i-1]!='=' && z[i-1]!='>'
&& z[i-1]!='<' && z[i-1]!='!' && z[i+1]!='=' ) return 1;
if( z[i]=='+' && z[i+1]=='+' ) return 1;
if( z[i]=='-' && z[i+1]=='-' ) return 1;
}
return 0;
}
/* Return the number of bytes in string z[] prior to the first unmatched ')'
** character.
*/
static unsigned int findCloseParen(const char *z){
unsigned int nOpen = 0;
unsigned i;
for(i=0; z[i]; i++){
if( z[i]=='(' ) nOpen++;
if( z[i]==')' ){
if( nOpen==0 ) break;
nOpen--;
}
}
return i;
}
/* Search for instances of assert(...), ALWAYS(...), NEVER(...), and/or
** testcase(...) where the argument contains side effects.
**
** Print error messages whenever a side effect is found. Return the number
** of problems seen.
*/
static unsigned int findAllSideEffects(const char *z){
unsigned int lineno = 1; /* Line number */
unsigned int i;
unsigned int nErr = 0;
char c, prevC = 0;
for(i=0; (c = z[i])!=0; prevC=c, i++){
if( c=='\n' ){ lineno++; continue; }
if( isalpha(c) && !isalpha(prevC) ){
if( strncmp(&z[i],"assert(",7)==0
|| strncmp(&z[i],"ALWAYS(",7)==0
|| strncmp(&z[i],"NEVER(",6)==0
|| strncmp(&z[i],"testcase(",9)==0
){
unsigned int n;
const char *z2 = &z[i+5];
while( z2[0]!='(' ){ z2++; }
z2++;
n = findCloseParen(z2);
if( hasSideEffect(z2, n) ){
nErr++;
fprintf(stderr, "side-effect line %u: %.*s\n", lineno,
(int)(&z2[n+1] - &z[i]), &z[i]);
}
}
}
}
return nErr;
}
int main(int argc, char **argv){
char *z;
unsigned int nErr = 0;
if( argc!=2 ){
fprintf(stderr, "Usage: %s FILENAME\n", argv[0]);
return 1;
}
z = readFile(argv[1]);
nErr = findAllSideEffects(z);
free(z);
if( nErr ){
fprintf(stderr, "Found %u undesirable side-effects\n", nErr);
return 1;
}
return 0;
}

View File

@ -1,98 +0,0 @@
#!/usr/bin/tclsh
#
# Parse the output of
#
# objdump -d sqlite3.o
#
# for x64 and generate a report showing:
#
# (1) Stack used by each function
# (2) Recursion paths and their aggregate stack depth
#
set getStack 0
while {![eof stdin]} {
set line [gets stdin]
if {[regexp {^[0-9a-f]+ <([^>]+)>:\s*$} $line all procname]} {
set curfunc $procname
set root($curfunc) 1
set calls($curfunc) {}
set calledby($curfunc) {}
set recursive($curfunc) {}
set stkdepth($curfunc) 0
set getStack 1
continue
}
if {[regexp {callq? +[0-9a-z]+ <([^>]+)>} $line all other]} {
set key [list $curfunc $other]
set callpair($key) 1
unset -nocomplain root($curfunc)
continue
}
if {[regexp {sub +\$(0x[0-9a-z]+),%[er]sp} $line all xdepth]} {
if {$getStack} {
scan $xdepth %x depth
set stkdepth($curfunc) $depth
set getStack 0
}
continue
}
}
puts "****************** Stack Usage By Function ********************"
set sdlist {}
foreach f [array names stkdepth] {
lappend sdlist [list $stkdepth($f) $f]
}
foreach sd [lsort -integer -decr -index 0 $sdlist] {
foreach {depth fname} $sd break
puts [format {%6d %s} $depth $fname]
}
puts "****************** Stack Usage By Recursion *******************"
foreach key [array names callpair] {
foreach {from to} $key break
lappend calls($from) $to
# lappend calledby($to) $from
}
proc all_descendents {root} {
global calls recursive
set todo($root) $root
set go 1
while {$go} {
set go 0
foreach f [array names todo] {
set path $todo($f)
unset todo($f)
if {![info exists calls($f)]} continue
foreach x $calls($f) {
if {$x==$root} {
lappend recursive($root) [concat $path $root]
} elseif {![info exists d($x)]} {
set go 1
set todo($x) [concat $path $x]
set d($x) 1
}
}
}
}
return [array names d]
}
set pathlist {}
foreach f [array names recursive] {
all_descendents $f
foreach m $recursive($f) {
set depth 0
foreach b [lrange $m 0 end-1] {
set depth [expr {$depth+$stkdepth($b)}]
}
lappend pathlist [list $depth $m]
}
}
foreach path [lsort -integer -decr -index 0 $pathlist] {
foreach {depth m} $path break
set first [lindex $m 0]
puts [format {%6d %s %d} $depth $first $stkdepth($first)]
foreach b [lrange $m 1 end] {
puts " $b $stkdepth($b)"
}
}

View File

@ -1,33 +0,0 @@
#!/bin/sh
#
# Run this script in a directory that contains a valid SQLite makefile in
# order to verify that unintentionally exported symbols.
#
make sqlite3.c
echo '****** Exported symbols from a build including RTREE && FTS4 ******'
gcc -c -DSQLITE_ENABLE_FTS3 -DSQLITE_ENABLE_RTREE \
-DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_STAT3 \
-DSQLITE_ENABLE_MEMSYS5 -DSQLITE_ENABLE_UNLOCK_NOTIFY \
-DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_ATOMIC_WRITE \
sqlite3.c
nm sqlite3.o | grep " [TD] "
echo '****** Surplus symbols from a build including RTREE & FTS4 ******'
nm sqlite3.o | grep " [TD] " | grep -v " .*sqlite3_"
echo '****** Dependencies of the core. No extensions. No OS interface *******'
gcc -c -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_STAT3 \
-DSQLITE_ENABLE_MEMSYS5 -DSQLITE_ENABLE_UNLOCK_NOTIFY \
-DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_ATOMIC_WRITE \
-DSQLITE_OS_OTHER -DSQLITE_THREADSAFE=0 \
sqlite3.c
nm sqlite3.o | grep " U "
echo '****** Dependencies including RTREE & FTS4 *******'
gcc -c -DSQLITE_ENABLE_FTS3 -DSQLITE_ENABLE_RTREE \
-DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_STAT3 \
-DSQLITE_ENABLE_MEMSYS5 -DSQLITE_ENABLE_UNLOCK_NOTIFY \
-DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_ATOMIC_WRITE \
sqlite3.c
nm sqlite3.o | grep " U "

View File

@ -1,34 +0,0 @@
#!/bin/sh
#
# Run this script in a directory that contains a valid SQLite makefile in
# order to verify that unintentionally exported symbols.
#
make sqlite3.c
echo '****** Exported symbols from a build including RTREE, FTS4 & ICU ******'
gcc -c -DSQLITE_ENABLE_FTS3 -DSQLITE_ENABLE_RTREE \
-DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_STAT3 \
-DSQLITE_ENABLE_MEMSYS5 -DSQLITE_ENABLE_UNLOCK_NOTIFY \
-DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_ATOMIC_WRITE \
-DSQLITE_ENABLE_ICU \
sqlite3.c
nm sqlite3.o | grep ' [TD] ' | sort -k 3
echo '****** Surplus symbols from a build including RTREE, FTS4 & ICU ******'
nm sqlite3.o | grep ' [TD] ' | grep -v ' .*sqlite3_'
echo '****** Dependencies of the core. No extensions. No OS interface *******'
gcc -c -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_STAT3 \
-DSQLITE_ENABLE_MEMSYS5 -DSQLITE_ENABLE_UNLOCK_NOTIFY \
-DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_ATOMIC_WRITE \
-DSQLITE_OS_OTHER -DSQLITE_THREADSAFE=0 \
sqlite3.c
nm sqlite3.o | grep ' U ' | sort -k 3
echo '****** Dependencies including RTREE & FTS4 *******'
gcc -c -DSQLITE_ENABLE_FTS3 -DSQLITE_ENABLE_RTREE \
-DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_STAT3 \
-DSQLITE_ENABLE_MEMSYS5 -DSQLITE_ENABLE_UNLOCK_NOTIFY \
-DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_ATOMIC_WRITE \
sqlite3.c
nm sqlite3.o | grep ' U ' | sort -k 3

View File

@ -1,12 +0,0 @@
#!/usr/bin/tcl
#
# Convert input text into a C string
#
set in [open [lindex $argv 0] rb]
while {![eof $in]} {
set line [gets $in]
if {[eof $in]} break;
set x [string map "\\\\ \\\\\\\\ \\\" \\\\\"" $line]
puts "\"$x\\n\""
}
close $in

View File

@ -1,123 +0,0 @@
/*
** A utility program to translate SQLite varints into decimal and decimal
** integers into varints.
*/
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#if defined(_MSC_VER) || defined(__BORLANDC__)
typedef __int64 i64;
typedef unsigned __int64 u64;
#else
typedef long long int i64;
typedef unsigned long long int u64;
#endif
static int hexValue(char c){
if( c>='0' && c<='9' ) return c - '0';
if( c>='a' && c<='f' ) return c - 'a' + 10;
if( c>='A' && c<='F' ) return c - 'A' + 10;
return -1;
}
static char toHex(unsigned char c){
return "0123456789abcdef"[c&0xf];
}
static int putVarint(unsigned char *p, u64 v){
int i, j, n;
unsigned char buf[10];
if( v & (((u64)0xff000000)<<32) ){
p[8] = (unsigned char)v;
v >>= 8;
for(i=7; i>=0; i--){
p[i] = (unsigned char)((v & 0x7f) | 0x80);
v >>= 7;
}
return 9;
}
n = 0;
do{
buf[n++] = (unsigned char)((v & 0x7f) | 0x80);
v >>= 7;
}while( v!=0 );
buf[0] &= 0x7f;
for(i=0, j=n-1; j>=0; j--, i++){
p[i] = buf[j];
}
return n;
}
int main(int argc, char **argv){
int i;
u64 x;
u64 uX = 0;
i64 iX;
int n;
unsigned char zHex[20];
if( argc==1 ){
fprintf(stderr,
"Usage:\n"
" %s HH HH HH ... Convert varint to decimal\n"
" %s DDDDD Convert decimal to varint\n"
" Add '+' or '-' before DDDDD to disambiguate.\n",
argv[0], argv[0]);
exit(1);
}
if( argc>2
|| (strlen(argv[1])==2 && hexValue(argv[1][0])>=0 && hexValue(argv[1][1])>=0)
){
/* Hex to decimal */
for(i=1; i<argc && i<9; i++){
if( strlen(argv[i])!=2 ){
fprintf(stderr, "Not a hex byte: %s\n", argv[i]);
exit(1);
}
x = (hexValue(argv[i][0])<<4) + hexValue(argv[i][1]);
uX = (uX<<7) + (x&0x7f);
if( (x&0x80)==0 ) break;
}
if( i==9 && i<argc ){
if( strlen(argv[i])!=2 ){
fprintf(stderr, "Not a hex byte: %s\n", argv[i]);
exit(1);
}
x = (hexValue(argv[i][0])<<4) + hexValue(argv[i][1]);
uX = (uX<<8) + x;
}
i++;
if( i<argc ){
fprintf(stderr, "Extra arguments: %s...\n", argv[i]);
exit(1);
}
}else{
char *z = argv[1];
int sign = 1;
if( z[0]=='+' ) z++;
else if( z[0]=='-' ){ z++; sign = -1; }
uX = 0;
while( z[0] ){
if( z[0]<'0' || z[0]>'9' ){
fprintf(stderr, "Not a decimal number: %s", argv[1]);
exit(1);
}
uX = uX*10 + z[0] - '0';
z++;
}
if( sign<0 ){
memcpy(&iX, &uX, 8);
iX = -iX;
memcpy(&uX, &iX, 8);
}
}
n = putVarint(zHex, uX);
printf("%lld =", (i64)uX);
for(i=0; i<n; i++){
printf(" %c%c", toHex(zHex[i]>>4), toHex(zHex[i]&0x0f));
}
printf("\n");
return 0;
}

View File

@ -1,143 +0,0 @@
#!/usr/bin/tcl
#
# This script makes modifications to the vdbe.c source file which reduce
# the amount of stack space required by the sqlite3VdbeExec() routine.
#
# The modifications performed by this script are optional. The vdbe.c
# source file will compile correctly with and without the modifications
# performed by this script. And all routines within vdbe.c will compute
# the same result. The modifications made by this script merely help
# the C compiler to generate code for sqlite3VdbeExec() that uses less
# stack space.
#
# Script usage:
#
# mv vdbe.c vdbe.c.template
# tclsh vdbe-compress.tcl $CFLAGS <vdbe.c.template >vdbe.c
#
# Modifications made:
#
# All modifications are within the sqlite3VdbeExec() function. The
# modifications seek to reduce the amount of stack space allocated by
# this routine by moving local variable declarations out of individual
# opcode implementations and into a single large union. The union contains
# a separate structure for each opcode and that structure contains the
# local variables used by that opcode. In this way, the total amount
# of stack space required by sqlite3VdbeExec() is reduced from the
# sum of all local variables to the maximum of the local variable space
# required for any single opcode.
#
# In order to be recognized by this script, local variables must appear
# on the first line after the open curly-brace that begins a new opcode
# implementation. Local variables must not have initializers, though they
# may be commented.
#
# The union definition is inserted in place of a special marker comment
# in the preamble to the sqlite3VdbeExec() implementation.
#
#############################################################################
#
set beforeUnion {} ;# C code before union
set unionDef {} ;# C code of the union
set afterUnion {} ;# C code after the union
set sCtr 0 ;# Context counter
# If the SQLITE_SMALL_STACK compile-time option is missing, then
# this transformation becomes a no-op.
#
if {![regexp {SQLITE_SMALL_STACK} $argv]} {
while {![eof stdin]} {
puts [gets stdin]
}
exit
}
# Read program text up to the spot where the union should be
# inserted.
#
while {![eof stdin]} {
set line [gets stdin]
if {[regexp {INSERT STACK UNION HERE} $line]} break
append beforeUnion $line\n
}
# Process the remaining text. Build up the union definition as we go.
#
set vlist {}
set seenDecl 0
set namechars {abcdefghijklmnopqrstuvwxyz}
set nnc [string length $namechars]
while {![eof stdin]} {
set line [gets stdin]
if {[regexp "^case (OP_\\w+): \173" $line all operator]} {
append afterUnion $line\n
set vlist {}
while {![eof stdin]} {
set line [gets stdin]
if {[regexp {^ +(const )?\w+ \**(\w+)(\[.*\])?;} $line \
all constKeyword vname notused1]} {
if {!$seenDecl} {
set sname {}
append sname [string index $namechars [expr {$sCtr/$nnc}]]
append sname [string index $namechars [expr {$sCtr%$nnc}]]
incr sCtr
append unionDef " struct ${operator}_stack_vars \173\n"
append afterUnion \
"#if 0 /* local variables moved into u.$sname */\n"
set seenDecl 1
}
append unionDef " $line\n"
append afterUnion $line\n
lappend vlist $vname
} elseif {[regexp {^#(if|endif)} $line] && [llength $vlist]>0} {
append unionDef "$line\n"
append afterUnion $line\n
} else {
break
}
}
if {$seenDecl} {
append unionDef " \175 $sname;\n"
append afterUnion "#endif /* local variables moved into u.$sname */\n"
}
set seenDecl 0
}
if {[regexp "^\175" $line]} {
append afterUnion $line\n
set vlist {}
} elseif {[llength $vlist]>0} {
append line " "
foreach v $vlist {
regsub -all "(\[^a-zA-Z0-9>.\])${v}(\\W)" $line "\\1u.$sname.$v\\2" line
regsub -all "(\[^a-zA-Z0-9>.\])${v}(\\W)" $line "\\1u.$sname.$v\\2" line
# The expressions above fail to catch instance of variable "abc" in
# expressions like (32>abc). The following expression makes those
# substitutions.
regsub -all "(\[^-\])>${v}(\\W)" $line "\\1>u.$sname.$v\\2" line
}
append afterUnion [string trimright $line]\n
} elseif {$line=="" && [eof stdin]} {
# no-op
} else {
append afterUnion $line\n
}
}
# Output the resulting text.
#
puts -nonewline $beforeUnion
puts " /********************************************************************"
puts " ** Automatically generated code"
puts " **"
puts " ** The following union is automatically generated by the"
puts " ** vdbe-compress.tcl script. The purpose of this union is to"
puts " ** reduce the amount of stack space required by this function."
puts " ** See comments in the vdbe-compress.tcl script for details."
puts " */"
puts " union vdbeExecUnion \173"
puts -nonewline $unionDef
puts " \175 u;"
puts " /* End automatically generated code"
puts " ********************************************************************/"
puts -nonewline $afterUnion

View File

@ -1,90 +0,0 @@
#!/bin/tclsh
#
# SUMMARY:
# Run this script in the same directory as the "vdbe_profile.out" file.
# This script summarizes the results contained in that file.
#
# DETAILS:
# Compile SQLite using the -DVDBE_PROFILE option on Linux. This causes
# performance information about individual VDBE operations to be appended
# to the "vdbe_profile.out" file. After content has been accumulated in
# vdbe_profile.out, run this script to analyze the output and generate a
# report.
#
if {![file readable vdbe_profile.out]} {
error "run this script in the same directory as the vdbe_profile.out file"
}
set in [open vdbe_profile.out r]
set stmt {}
set allstmt {}
while {![eof $in]} {
set line [gets $in]
if {$line==""} continue
if {[regexp {^---- } $line]} {
set stmt [lindex $line 1]
if {[info exists cnt($stmt)]} {
incr cnt($stmt)
set firsttime 0
} else {
set cnt($stmt) 1
set sql($stmt) {}
set firsttime 1
lappend allstmt $stmt
}
continue;
}
if {[regexp {^-- } $line]} {
if {$firsttime} {
append sql($stmt) [string range $line 3 end]\n
}
continue
}
if {![regexp {^ *\d+ *\d+ *\d+ *\d+ ([A-Z].*)} $line all detail]} continue
set c [lindex $line 0]
set t [lindex $line 1]
set addr [lindex $line 3]
set op [lindex $line 4]
if {[info exists opcnt($op)]} {
incr opcnt($op) $c
incr opcycle($op) $t
} else {
set opcnt($op) $c
set opcycle($op) $t
}
if {[info exists stat($stmt,$addr)]} {
foreach {cx tx detail} $stat($stmt,$addr) break
incr cx $c
incr tx $t
set stat($stmt,$addr) [list $cx $tx $detail]
} else {
set stat($stmt,$addr) [list $c $t $detail]
}
}
close $in
foreach stmt $allstmt {
puts "********************************************************************"
puts [string trim $sql($stmt)]
puts "Execution count: $cnt($stmt)"
for {set i 0} {[info exists stat($stmt,$i)]} {incr i} {
foreach {cx tx detail} $stat($stmt,$i) break
if {$cx==0} {
set ax 0
} else {
set ax [expr {$tx/$cx}]
}
puts [format {%8d %12d %12d %4d %s} $cx $tx $ax $i $detail]
}
}
puts "********************************************************************"
puts "OPCODES:"
foreach op [lsort [array names opcnt]] {
set cx $opcnt($op)
set tx $opcycle($op)
if {$cx==0} {
set ax 0
} else {
set ax [expr {$tx/$cx}]
}
puts [format {%8d %12d %12d %s} $cx $tx $ax $op]
}

View File

@ -1,14 +0,0 @@
#/bin/sh
#
# Run this script in a directory with a working makefile to check for
# compiler warnings in SQLite.
#
rm -f sqlite3.c
make sqlite3.c
echo '************* FTS4 and RTREE ****************'
scan-build gcc -c -DHAVE_STDINT_H -DSQLITE_ENABLE_FTS4 -DSQLITE_ENABLE_RTREE \
-DSQLITE_DEBUG -DSQLITE_ENABLE_STAT3 sqlite3.c 2>&1 | grep -v 'ANALYZE:'
echo '********** ENABLE_STAT3. THREADSAFE=0 *******'
scan-build gcc -c -I. -DSQLITE_ENABLE_STAT3 -DSQLITE_THREADSAFE=0 \
-DSQLITE_DEBUG \
sqlite3.c ../sqlite/src/shell.c -ldl 2>&1 | grep -v 'ANALYZE:'

View File

@ -1,43 +0,0 @@
#/bin/sh
#
# Run this script in a directory with a working makefile to check for
# compiler warnings in SQLite.
#
rm -f sqlite3.c
make sqlite3.c
echo '********** No optimizations. Includes FTS4/5, RTREE, JSON1 ***'
gcc -c -Wshadow -Wall -Wextra -pedantic-errors -Wno-long-long -std=c89 \
-ansi -DHAVE_STDINT_H -DSQLITE_ENABLE_FTS4 -DSQLITE_ENABLE_RTREE \
-DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_JSON1 \
sqlite3.c
echo '********** Android configuration ******************************'
gcc -c \
-DHAVE_USLEEP=1 \
-DSQLITE_HAVE_ISNAN \
-DSQLITE_DEFAULT_JOURNAL_SIZE_LIMIT=1048576 \
-DSQLITE_THREADSAFE=2 \
-DSQLITE_TEMP_STORE=3 \
-DSQLITE_POWERSAFE_OVERWRITE=1 \
-DSQLITE_DEFAULT_FILE_FORMAT=4 \
-DSQLITE_DEFAULT_AUTOVACUUM=1 \
-DSQLITE_ENABLE_MEMORY_MANAGEMENT=1 \
-DSQLITE_ENABLE_FTS3 \
-DSQLITE_ENABLE_FTS3_BACKWARDS \
-DSQLITE_ENABLE_FTS4 \
-DSQLITE_OMIT_BUILTIN_TEST \
-DSQLITE_OMIT_COMPILEOPTION_DIAGS \
-DSQLITE_OMIT_LOAD_EXTENSION \
-DSQLITE_DEFAULT_FILE_PERMISSIONS=0600 \
-DSQLITE_ENABLE_ICU \
-DUSE_PREAD64 \
-Wshadow -Wall -Wextra \
-Os sqlite3.c shell.c
echo '********** No optimizations. ENABLE_STAT4. THREADSAFE=0 *******'
gcc -c -Wshadow -Wall -Wextra -pedantic-errors -Wno-long-long -std=c89 \
-ansi -DSQLITE_ENABLE_STAT4 -DSQLITE_THREADSAFE=0 \
sqlite3.c
echo '********** Optimized -O3. Includes FTS4/5, RTREE, JSON1 ******'
gcc -O3 -c -Wshadow -Wall -Wextra -pedantic-errors -Wno-long-long -std=c89 \
-ansi -DHAVE_STDINT_H -DSQLITE_ENABLE_FTS4 -DSQLITE_ENABLE_RTREE \
-DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_JSON1 \
sqlite3.c