
b709e29 Fix URL typo in release notes 01f203c Update release notes c49810a Update COPYRIGHT e327526 Add BSL version number to LICENSE.TXT 07e3a4e Remove superfluous COPURIGHT.md and LICENSE.md 54c3310 Replace Dynamic Data Routing Platform with Database Proxy 305d02f Remove *.cmake wildcard from .gitignore b0b5208 Cleanup of spaces aeca6d0 Extend maxscaled error messages 817d74c Document where the CDC users are stored 9a569db Update license ff8697a MXS-716: Fix table level privilege detection 2071a8c Only check replies of slaves that are in use f8dfa42 Fix possible hangs in CDC python scripts fa1d99e Removed "filestem option" from example 009b549 Removed "filestem option" from example 8d515c2 Add example Kafka producer script for Python 64e976b Fix sporadic SSL authentication failures 5a655dc MXS-814: Check service/monitor permissions on all servers 2a7f596 Add note about galeramon priority to Galera tutorials b90b5a6 Fixed SHOW SLAVE STATUS in binlog router e22fe39 Changed couln size for SHOW SLAVE STATUS ae97b18 Fix avrorouter build failure with older sqlite libraries 56ef8b7 Replace GPL license with BSL license in scripts and tests 552836f Initialize all fields when MySQL users are loaded from cache bf42947 Update all licensing related files b29db9d Remove optimize_wildcard parameter from use 5170844 Make readwritesplit diagnosting output more clear 262ffb1 Fix crash when a config parameter has no section 33ac9e6 Add note about LEAST_BEHIND_MASTER and server weights e13e860 Fix a memory leak when backend authentication fails 75d00c2 MXS-801: Set the default value of strip_db_esc to true bd5f2db MXS-800: Add a log message about the working directory 4b1dd8c Update MySQL Monitor documentation on detect_replication_lag 559bc99 Fix installation of license file b057587 Change LICENSE to LICENSE.TXT 223fa43 Remove null characters from log messages 36fd05b Fix fatal signal handler log message 053dc8a Fix typos in documentation 371dc87 Fix Galera text in Master-Slave tutorial 30b6265 Disable adding of new objects at runtime db92311 Update the documentation on configuration reloading 0923d40 Update Connector-C version c4738b5 Add define for avro-conversion.ini 196e6ac Update license from GPL to BSL. e612366 Correctly calculate the number of bytes read in dcb_read 93a2a03 Update rotate documentation in admin tutorial c5eb854 MXS-585: Fix authentication handling regression 6330070 Fix schemarouter memory leak aa5827e Fix CDC authentication memory leak a5af4ad Fix avro memory leaks 627d73f Fix Avro scripts 0ff7556 Add build instructions to avrorouter documentation 734a1c8 Fix doxygen mainpage e51ce09 Add licence text to avro scripts 4d27c14 Update Avro documentation and fix installation directories a58a330 Fix readconnroute error message about router_options 22b138c MXS-772: Fix postinstall script a9960b7 Fix function declaration in mysql_backend.c cbe1704 Add missing newline 09d76ee Fix avro documentation default values 1d3f8f3 Added refresh users on START SLAVE 880db34 Make router configuration errors fatal 3bad5ca Update documentation and add logging to avrorouter 10f3384 Disable SSLv3 ca8d902 Fix rwsplit error reporting when master is lost e816d65 Fix MaxScale Tutorial deca3e1 Update MaxScale man page f1735b7 Update release notes 9238714 qc: Change type of DEALLOCATE PREPARE 0b77c3b dbfwfilter: Do not require a trailing new line 1152ca9 Remove copyright message a038a85 Remove debug assertion on ERANGE error in galeramon 12ab235 Fix comparison error for connections limit. 5de1a8f qc_sqlite: Correct string recognition b63d754 Fix links in documentation contents 05d457e CDC protocol link fix 50676ef Fix monitor code formatting 218ba09 Remove MaxScale-and-SSL.md 0d6845b Add images to Avro documentation and tutorial 8dd2c9b Update MaxScale-2.0.0-Release-Notes.md 6d4b593 Change avrorouter default transaction grouping 4c629de Add notes about monitor changes to upgrading and release notes 267d0dc Update Binlogrouter.md c624781 Update Replication-Proxy-Binlog-Router-Tutorial.md f3261bc CDC users 1368797 Format authenticator and module headers ab01749 Format filters 8b05d32 Format core and include files f3974e5 Add GPL LICENSE to qc_mysqlembedded bfec36a astyle rabbitmq_consumer/consumer.c 54b960a Check that the Avro directory is writable 3d4cd2e Fix cdc_users using the wrong path for service users cache 1e738dd Add CDC client program documentation f6809fd Remove superfluous rabbitmw_consumer/LICENSE 6b5e667 Update license text in files 9bfcb46 Change CDC protocol documentation formatting 607f25c REQUEST-DATA formatting 8175ab4 CDC protocol update d5ca272 CDC protocol update 6c91764 Only check wsrep_local_index if node is joined f12e2c2 Do not use SSL for monitors and services 6d2cd99 Fix TestAdminUsers f4ae50d Apply astyle to server/core/test/*.c 7cc2824 Update build instructions cf8e2b5 Update release notes 03c7a6c Remove wrong function prototypes 5a11eed Revert "Remove duplicate functions" 80ed488 Remove duplicate functions bb0de8d Add info on SSL and throttling to release notes for 2.0. 0934aee Update MaxAdmin reference guide 2a3fe9b Update source URL in release notes e575cf0 Merge branch 'MXS-177-develop' into develop cc8c88d Change header for BSL ecde266 Change header for BSL 890b208 Log a message when a script is executed 9c365df Added information on server side SSL to config guide. aa3e002 Remove obsolete heading 79dd73a Make dprintAllSessions use dprintSession 1fc0db9 Align output of "show services" 1b9d301 Make monitorShowAll use monitorShow 983615e Adjust output of 'show modules' 436badd qc_sqlite: The module is now beta a7cbbe5 Update Upgrade document 71ac13f Remove obsolete user/password from example eb20ff8 Fix and clean up Avrorouter diagnostics code 31d4052 Change MaxScale to MariaDB MaxScale e6e4858 Fix `source` parameter not working with `router_options` d8de99a Update module version numbers eb81add Merge remote-tracking branch 'origin/develop' into MXS-177-develop daba563 Merge remote-tracking branch 'origin/MXS-651-develop-merge' into develop 678f417 Changes in response to reviews. 410fb81 Changes in response to reviews. 60135e5 Add initial release notes about Avrorouter 7400ecc qc_sqlite: Remove uninitialized read 536962c Update version number 018f044 Fix debug assertion in client command processing 51f0804 Prevent 'show monitor' from crashing with failed monitor 559347e Fix "Too many connections" message; add comments. 01d3929 Add printf format checking to dcb_printf fbd49a6 dbfwfilter: Require complete parsing only when needed 1885863 Add information to release notes about readwritesplit changes 73b56a2 Update MaxScale section in release notes. 0a2f56f MaxAdmin: Remove debug information from 'show users' 3cf3279 MaxAdmin: Report SSL information as well 29c2b66 Always use SSL if server configured with SSL 7d6b335 dprintAllServers should use dprintServer 02a5246 qc_sqlite: Correctly detect parsing context 469419b compare: Add strict mode 8c5b3d3 compare: Allow the comparison of a single statement 4691514 Add Updgrade to 2.0 document 38b3ecb Expand the checks done before a monitor is stopped 8e2cfb9 Add backend name to authentication error message 9600a07 Fix MaxInfo crash 91c58b0 Removed log message for duplicate entry while adding an user 40392fe Fixed log message priority 0ec35b8 maxadmin: Allow the last user to be removed 5a0ebed maxadmin: Change name of user file 87aa8f1 maxadmin: Always allow root to connect bf37751 Fix COM_QUIT packet detection 7c93ee4 Update avrorouter documentation and tutorial 95ce463 Fix wrong directory in avrorouter log message cfe54c7 Update ChangeLog d69562c Fix LOAD DATA LOCAL INFILE data size tracking 24e7cd6 MXS-584: added support for SET @@session.autocommit d6f6f76 Fixes, correct too many connections message efeb924 Update release notes for 2.0.0 8f71a87 qc_sqlite: Adjust error messages b967d60 Remove copy of enum enum_server_command 822b7e3 Update package license b58301a Update MaxScale License for overlooked files c09ee47 Update MaxScale License 49f46fa Tidy up. Comment out config items not yet supported. f5c3470 Updated and simplified the Building from Source document 98b98e2 Add note about master failure modes to documentation e036f2c Update Limitations document 62219a5 Merge remote-tracking branch 'origin/drain-writeq' into develop 5caf667 Invoke DCB_REASON_DRAINED more frequently. 77b107b qc_sqlite: Add support for LOAD DATA INFILE 8e70f26 compare: Optionally print out the parse result ad750e6 Merge remote-tracking branch 'origin/MXS-651-develop-merge' into develop ef85779 Merge remote-tracking branch 'origin/develop' into MXS-651-develop-merge ea9fdda MXS-477: Add LONGBLOB support for readconnroute eae6d42 qc_sqlite: Remove superfluous columnname definition 8fe2b21 Add binlog source to avrorouter b25cc37 qc_sqlite: Add missing destructors 8a749e7 qc_sqlite: Reduce number of keywords 5f4bb8b compare: Output query immediately 2456e52 dbfwfilter: Reject queries that cannot be parsed 5f1fbbd qc_sqlite: Extend SET grammar b8d8418 dbfwfilter: Remove 'allow' from firewall filter rule 0bd2a44 MXS-741 When no events are read from binlog file, ... a07c491 Remove duplicated function (merge error, probably) b237008 Save conflict resolution, missed last time. a0c0b40 Merge remote-tracking branch 'origin/develop' into MXS-651-develop 385d47d Change SSL logic, fix large read problem. b93b5e0 Remove false debug assertion b953b1f Turn off SSL read ahead. e0d46a5 Fix error messages and remove commented code 49b4655 MXS-739: Fix invalid JSON in Maxinfo 0c30692 qc_sqlite: Handle GROUP_CONCAT arguments 54e48a1 qc_sqlite: Consider \ as an escape character in strings 713a5d6 qc_sqlite: Add test cases 20d1b51 qc_sqlite: Handle qualified names in CREATE VIEW 1019313 qc_sqlite: Make QUERY_TYPE_WRITE default for SHOW 059c14e qc_sqlite: Accept qualified function names in SELECT db34989 qc_sqlite: Accept qualified function names b93e2f1 qc_sqlite: Add limited support for GRAND and REVOKE 678672d qc_sqlite: Cleanup copying of database and table names 9b744b9 qc_sqlite: Update table and database names at the same time db75e61 qc: Support getting the qualified table names 1f867f4 qc: Add join.test 9c7e02a qc_sqlite: Accept "...from NATURAL straight_join..." 93d1c31 qc_sqlite: Both " and ' can enclose a string literal 8055b21 qc_sqlite: Set more information based upon tokens 37e3663 qc_sqlite: Do not blindly add affected fields 50f1360 qc: Correctly collect affected fields 71c234e qc_sqlite: Recognize CREATE TABLE ... UNION 01803f1 qc_sqlite: Recognize {DEALLOCATE|DROP} PREPARE ... 6ecd4b3 qc_sqlite: Parse index hints 0bdab01 qc: Compare sets of tables b908c8f Fix double freeing of internal DCBs 8903556 qc_sqlite: Recognize LEFT, INSERT and REPLACE 266e6c0 qc: Log all problems by default (compare program) 7b54cac qc_sqlite: Fix logging bug 9566e9f qc_sqlite: Plug a leak b0a860d qc: Run compare a specified number of times 050d698 qc_sqlite: Simplified argument handling 97c56b8 qc: Allow arguments to be passed to the query classifier 09a46e0 qc_sqlite: Add argument log_unrecognized_statements fd98153 qc: Allow arguments to be provided to the query classifier 313aa7e Fix Problems SSL assertion; non SSL connect to SSL 1d721e6 Fix DEB packaging errors 96bdc39 Fix RPM packaging failures on CentOS 7 6ba900d qc_sqlite: Recognize more SHOW commands 2869d0b qc_sqlite: Exclude support for sqlite's PRAGMA 0be68a3 qc_sqlite: Enhance SELECT syntax 28f3e1a Merge branch 'develop' into MXS-729 e18bd41 qc: Expose the result of the parsing 5896085 Add BUILD_AVRO to the CMake cache daeb896 Remove changes to blr_master.c memory handling d523821 Add comments 4eb9a66 Empty admin users file is now handled 52b46c6 qc: Update create.test db09711 qc_sqlite: Ignore case when looking for test language keywords f042a1d qc_sqlite: Extend CREATE TABLE syntax 177d2de qc_sqlite: Extend CREATE TABLE syntax d3ca8e6 qc_sqlite: Add some support for HANDLER 86c6a96 qc_sqlite: Recognize RENAME TABLE 471594f qc_sqlite: Accept more table options at CREATE TABLE 3da6cde qc_sqlite: Remove unused keywords bd89662 Fix crash on corrupted passwd file b5d1764 MXS-733: Always print session states 043e2db Remove unused CMake variables 5604fe2 Restore missing line, fixes logic error. 66d15a5 Added log message warning for old users found 5be9fca Changes in response to review by Johan 899e0e2 Removed password parameter from admin_user_add and admin_remove_user a2d9302 Merge branch 'develop' into MXS-729 bcaf82f Code review update e61c716 Nagios plugin update with Maxadmin using UNIX socket only d7150a2 qc_sqlite: Extend column syntax 3b78df0 qc_sqlite: Accept VALUE in addition to VALUES 85a705b qc_sqlite: Accept CHARSET in addition to CHARACTER SET db9cec8 qc_sqlite: Accept qualified column names in CREATE TABLE a9cabb0 qc_sqlite: Extend SELECT syntax f5e9878 qc_sqlite: Add set type 675cb93 qc_sqlite: Allow BINARY to turn into an identifier b04a760 qc_sqlite: Accept DROP TABLES 1075d9c qc_sqlite: Allow qualified name with LIKE in CREATE 420ac56 qc_sqlite: Extend EXPLAIN grammar 727d626 Add missing error message to readwritesplit f4fd09e Change templates and testing configurations to use sockets 1ef2e06 Add configurable default admin user a723731 Remove wrong file 7c3b02b Maxadmin/maxscaled UNIX socket update eed78d4 qc_sqlite: Pick out more information from select when CREATEing 267f091 qc_sqlite: Recognise DROP TEMPORARY TABLE 54fc29f qc_sqlite: Accept $ as a valid character in identifiers afa2ec9 qc_sqlite: Allow keywords to be used in qualified name db0427d MXS-729 code review update a3b3000 Merge branch 'develop' into MXS-729 e73d66c qc_sqlite: Identify qualified identifiers 5bacade Trailing space fix 3bc9be3 MXS-729 socket=default support in maxscale.cnf 1a5c23c Code review update for MXS-729 d6665c7 qc_sqlite: Extend CREATE TABLE grammar 91725ce qc_sqlite: Dequote table and database names cd7a022 qc: Add create test 1aa4e6b qc: Update test files 762b0c2 qc_mysqlembedded: Do not return "*" as table name cd9968f qc_sqlite: Update delete.test f16703d qc_sqlite: Add support for CALL e3ca9b4 qc_mysqlembedded: Do not return an array of empty strings 5878a22 qc_sqlite: Support selects in DELETE 1cf0444 qc_sqlite: Fix bug in DELETE grammar 0bf39a1 qc_sqlite: Add support for CHECK TABLE 4a8feca qc_sqlite: Add helper for catenating SrcLists ab299b3 qc_sqlite: Extend DELETE syntax 5778856 qc_sqlite: Extract database name as well 99901f1 qc_sqlite: Extend DELETE syntax 63396f8 qc_sqlite: Match "autocommit" caseinsensitively e804dd3 qc_sqlite: Add support for LOCK/UNLOCK c23e442 qc_sqlite: Extend DELETE syntax 5460e31 qc: Add delete test ab392ad qc_sqlite: Free unused data 598c6f0 qc: Measure time of parsing 2fa3844 qc_sqlite: Put all changes being {%|#}ifdefs 1b43992 qc_sqlite: Modified update.test 1676ea4 qc_sqlite: LEFT|RIGHT are not required with JOIN 224ebd3 qc_sqlite: Extend UPDATE grammar dbecca9 qc_sqlite: Extend UPDATE grammar b6ca3b3 MaxAdmin security modification MXS-729 8fb47dd Remove copying of MariaDB embedded library files 22e1257 Normalize whitespace when canonicalizing queries 269cff2 MXS-697: Fix dbfwfilter logging for matched queries 6344f6f Ignore Apple .DS_Store files. d606977 Improve comments in response to code review. 619aa13 Add configuration check flag to MaxScale 27c860b Drain write queue improvements. 33d4154 Read only one configuration file d104c19 Format more core files 83fdead Format part of core source code 311d5de Format gateway.c and config.c with Astyle 8cbb48e Don't build maxavro library if BUILD_AVRO is not defined 32bb77a Merge branch 'MXS-483' into develop db72c5d Format CDC/Avro related files 3c26e27 qc_sqlite: Use SrcList instead of Expr f96ad6a Merge branch 'develop' into MXS-729 0728734 Fix query canonical form tests e68262d Merge remote-tracking branch 'gpl-maxscale/master' into develop 65460dc Fix missing symbols from MySQLAuth 791c821 MaxAdmin listens on UNIX socket only and maxadmin can connect 89afed6 MXS-66: All configuration errors are fatal errors d613053 Add more details to galeramon documentation 22f4f57 qc: Add support for multi UPDATE 0dba25a Added default port to blr_make_registration 9d8248c qc_sqlit: Plug leaks and access errors 057551a qc_sqlite: Fix to small an allocation 1f73820 qc_sqlite: Free memory allocated during parsing 93fefb9 qc: Enable compare to run the same test repeatedly e52c578 qc_sqlite: Handle last_insert_id() 929e02a qc_sqlite: Extend UPDATE grammar de3b9f7 qc_sqlite: Defines provided when running cmake and make 4d5c3b2 qc_sqlite: Add support for multiple-table DELETE FROM 36a4296 qc_mysqlembedded: Handle SQLCOM_DELETE_MULTI 41f613a Fix DCB and SESSION removal from free object pools 00f2ddd Move some common code used in only one protocol into protocol. 6fbd0b0 Format Go source with gofmt abfbbcb Fix build failures and internal test suite 31de74a Merge branch 'develop' into MXS-483 20d461d Remove uniqueness constrain on oneshot tasks 6c09288 Add missing error message to converter task 0c2c389 Merge branch 'develop' into MXS-483 fa0accc Set freed memory to NULL after authentication failure 63f24e4 Install cdc_schema.go 5123c21 Fix ALTER TABLE parsing 004acc2 Merge branch 'develop' into MXS-483 f69a671 Remove array foreach macro use a159cd9 qc_sqlite: Add support for SHOW DATABASES 31a2118 Make qc_mysqlembedded optional 27ef30e Changed the default query classifier 359010d Add -h flag as the alias for --host bebc086 Fix minor bugs c7ca253 qc_sqlite: Recognize START [TRANSACTION] 240f8bf qc_sqlite: Collect info from nested SELECTs 93ea393 qc_sqlite: Pass along the relative position of a token cc960af qc_sqlite: Fix incorrect assigment 22a6fef Fix `gtid` avro index table 4c38bef qc_sqlite: STATUS is not a registered word cace998 qc_sqlite: Include all fields of UPDATE 997b19c qc: Add update tests 7601b3f qc_sqlite: Parse "INSERT INTO t VALUES (), ();" correctly ca426f1 qc_sqlite: Handle CREATE TRIGGER f54f963 qc_sqlite: Allow INSERT without INTO e4a1b6d Remove foreign keys from Avro index e4501a2 Merge branch 'develop' into MXS-483 82b9585 Fix MMMon never assigning master status a45a709 qc_mysqlembedded: Find the leaf name 2f3ca8f qc_mysqlembedded: Recognize SQLCOM_REPLACE cc7ad83 qc_mysqlembedded: Pick up fields for INSERT SELECT as well 0e6b39e qc: Cleanup of select.test 9113f4f qc_sqlite: Pickup more fields from INSERT 4d58f98 Dummy query classifier dfe824f Document `query_classifier` option 4aa329b MXS-718: Collect fields of INSERT 53818f2 Modify packet number for SSL backend connection 346f973 qc_sqlite: Accept qualified column names 8a83616 Fix in-memory SQLite table structure 6f2c884 Further backend SSL development 4444e92 qc_sqlite: Extend INSERT grammar 2aebcab qc_sqlite: Add support for TRUNCATE 1a6742e qc_sqlite: Accept DEFAULT as value in INSERT 07dec05 qc_sqlite: Crude classification made based on seen keywords a90a579 Add missing function documentation 72bd0cf qc_sqlite: Change CREATE TABLE grammar 6e04bc8 qc: Add INSERT tests 3666bda qc_sqlite: Add SELECT test d27e173 Add server/mysql-test/t/select.test to query_classifier 562d004 qc_sqlite: Cleanup error logging. 819cacb Merge branch 'develop' into MXS-483 0d3a789 Add warnings and comments to Avro row event processing 2fab570 Added support for SET autocommit=1 1aa83cf Code review fix c999f0a Addition of SELECT USER() 8c723da Clean up avro_client.c and avro_file.c eb21ee8 Clean up avro.c 946a284 Added Avro schema to table metadata processing 72f90be qc_sqlite: Add support for CREATE {FUNCTION|PROCEDURE} ... 4a4ab49 qc: Update line number also when skipping a block ffddb2a qc_sqlite: Allow queries using INTERVAL b8b03bd qc_sqlite: Add support for SELECT * FROM tbl2 = tbl1; 77a261a qc_sqlite: Add support for GROUP BY ... WITH ROLLUP 0ead41e cdc_schema now generates lowercase JSON 66e327a Classifier has to be specified explicitly 9074b91 Updated Avrorouter documentation cf06c7a qc_sqlite: Some comments added. f579eff Added simple Go based Avro schema generator f448e90 MXS-419: Added ulimit calls to init scripts b4ad257 Added FindAvro.cmake 56cc9b9 Added the last transaction script to installations 2d52da2 Added temporary avro-alpha package name 6ada071 Fixed cdc_users script 61f0206 Renaming and minor fixes to CDC Python scripts 9d77c32 Moved GTID table tracking to an in-memory database 8ae7cb0 MXS-704: Fixed `which` usage in post-install scripts 195e118 Readwritesplit sessions aren't created if master is down 2be91da Added affected tables to avro diagnostics b185320 QUERY-LAST-TRANSACTION now returns proper table names 90860b5 Log stale master message only once 4859c60 Table name to GTID mapping f77bd23 First steps to backend SSL, not yet working. 68b5bf0 qc_sqlite: Don't treat TRUE and FALSE as identifiers fca8e59 qc_sqlite: Collect database names as well 6b0e04d qc_sqlite: Add support for SHOW CREATE VIEW 77f4b34 qc_mysqlembedded: Report more, rather than less a73e033 qc_sqlite: Extend builtin functions 9d9650e qc_sqlite: SQL_BUFFER_RESULT must decay to an id 83fe99d qc_sqlite: Support INSERT IGNORE 9d1c9ca Added avrorouter limitations and tutorial 8dd094d qc_sqlite: Recognize builtin functions 2edc3d6 Moved write operations of the maxavro library to a different file 1364e54 Added more comments to the Avro RBR handling code f711588 Added warnign about unsupported field types df0d250 Added SQLite3 based indexing to avrorouter 0c55706 Added GTID event flag check in AVRO processing bfe28dd qc_sqlite: Accept SET GLOBAL|SESSION ... a8d2068 qc_mysqlembedded: Exlude code that won't compile on 5.5.42 16ea0b3 qc_sqlite: Add support for DROP FUNCTION 1c0f1fc qc: Report stats after comparison 02345b2 qc_sqlite: Recognize builtin readonly functions c7a5e75 qc_sqlite: Recognize := 0aa849d qc: Make compare undestand the delimiter command fb0a877 qc_mysqlembedded: Examine Item::SUBSELECT_ITEMs 045cf8d qc: Add missing mtl commands e5c6f45 qc_sqlite: Relax qc_get_type comparison ac3b2df qc_sqlite: Add support for SHOW STATUS 73a34fb qc_sqlite: Add initial support for FLUSH 4ffbe79 qc_sqlite: Extend CREATE TABLE syntax 009af81 qc_sqlite: Add support for SHOW WARNINGS 001de97 qc: Ignore mysqltest constructs 128307d Merge branch 'release-1.4.3' into gpl-master 5e8a06a SET NAMES XXX added 3ca12ba MXS-685: MMMon clears server state before setting it dc4d2b0 Further steps to connection limit, non-working. ef70257 MXS-636: Master failures are no longer fatal errors 99f4c64 Updated QUERY-LAST-TRANSACTION format d1ff157 Changed QUERY-LAST-TRANSACTION format to JSON 8b2f1ac Fixed formatting of the avrorouter 61543df Added QUERY-LAST-TRANSACTION command c10d10b qc_sqlite: Add support for SHOW CREATE TABLE 106a38f qc_sqlite: Add support for DROP INDEX 2a85421 qc_sqlite: Extend what can be stated about a table 794cd1c qc_sqlite: Add support for MATCH ... AGAINST dd7b747 qc_sqlite: Accept FULLTEXT and SPATIAL in CREATE TABLE a13d6ce qc_sqlite: Add support for PREPARE and EXECUTE 0c5d29f qc_sqlite: Add support for ANALYZE a6cd32b qc_sqlite: Extend SET syntax 5d47704 qc_sqlite: Pick out fields from UPDATE t SET i = ... 0e05735 qc: Understand --error in server test files 8535975 qc_sqlite: Extend CREATE VIEW syntax b35e638 qc: Igore read type bit if write bit is on 818a814 qc_sqlite: Add support for SHOW VARIABLES 1aa877b qc_sqlite: Add initial support for DO e92913a qc_sqlite: Add support for CREATE VIEW d53a46d qc_sqlite: Recognize bit field literals b'1010' 1fb7977 Added GTID event timestmap into struct gtid_pos 8f95b10 Added new fields in AVRO diagnostics cb4db54 Added tests with large SQL packets to modutil tests e4dbd6b MXS-621: More fixes to log messages at startup 4f1e9ee qc: compare tester can now read server MySQL tests cd8154b qc_sqlite: Allow CHARACTER SET to be specified for column 6f8d053 Added MariaDB 10.1 check for new flags in GTID event 71c471b qc_mysqlembedded: Fix type bits setting 26b00a7 qc_sqlite: Extend ALTER grammar ea6057c qc_sqlite: Handle also pInto when dupping a struct select 2271559 qc_sqlite: Add support for SHOW TABLE STATUS 9caaf27 qc_sqlite: Add support for CREATE ... LIKE oldtable cd19932 Merge tag '1.4.2' into master 9e9e4d8 Merge branch 'develop' of https://github.com/mariadb-corporation/maxscale-bsl into develop 267cb60 qc_mysqlembedded: Look into parenthesized comma expressions 77c6ca9 qc_sqlite: Recognize token "<=>" 5ca9a9f qc_sqlite: Allow comma expressions in where clause b08e910 qc_sqlite: Add SELECT options d11e581 qc_sqlite: Some recursion cleanup d53d063 Add but don't invoke connection queue functionality. 6818104 Fix logic error in connections limiter 3c61605 qc_sqlite: Find more affected fields 9af8dfd Allow the classifiers to be specified on the command line 5d4a134 Activate call to protocol for max connections error message. 16638e7 Fix another mistake 234f9e6 Fix mistake 843a6fc Fix mistake. 2c6e9ad Fix errors in config.c; enable call to protocol on connection limit. fd27849 Introduce configuration items for Maximum and Queued Service connections 60d198d Implement very simple connection limit. 84d8f0f Merge remote-tracking branch 'origin/develop' into MXS-177 8a58e63 Merge remote-tracking branch 'origin/develop' into develop 08487cd Add assertion to enforce number of free DCBs not being negative. f73af2f Added MariaDB 10.1 check for new flags in GTID event 23898ec Fix wrong sprintf specifier, trailing white space. ea6cfa3 readwritesplit: Cleaned up routeQuery 3858df0 Cleaned up select_connect_backend_servers c38ee13 Added more buffer tests 48816df Added more modutils tests 537eac2 Added tests for modutil_get_complete_packets 22a6095 MXS-669: modutil_get_complete_packets no longer makes the buffer contiguous 51af97e qc_sqlite: Add support for CREATE INDEX e50c573 qc_sqlite: Dig out fields for IN f58c6df qc_sqlite: Dequote table name 319422b qc_sqlite: Accept ENUM as type for a column 5d6a45c qc_sqlite: Allow UNSIGNED to fallback to an id 16a5f20 qc_sqlite: Extend CREATE TABLE syntax d6268da qc_sqlite: Accept RIGHT and FULL OUTER joins 2207415 qc_sqlite: Allow STRAIGHT_JOIN in SELECT 6fee546 qc_sqlite: Pick upp more table names 9de5f84 Remove trailing white space. 758f84d Improve comments and messages in dcb.c and session.c re recycle memory. 1c2de21 Merge remote-tracking branch 'origin/develop' into dcb-optimise 6614944 DCB code tidying. Fix missing spinlock release; remove redundant variables ecd5e5c Remove extra code introduced by merge. 877127a Merge commit '3c0d3e5ab6ddde59da764ec904b517759074a31e' into develop 4275bbe Updated the Connector-C version to 2.2.3 c71042b Some tentative list management code; provide for counting service clients. ad0c8a6 qc_sqlite: Allow empty insert statement 72e75e5 qc_sqlite: Add support for SELECT ... INTO cc553fa qc_sqlite: MAXSCALE define can now be used everywhere 3305c6e qc_sqlite: Handle CASE in SELECT 702f62e qc_sqlite: Extend CREATE TABLE grammar 941c212 qc_sqlite: Add support for SHOW [INDEX|INDEXES|KEYS] 6a79136 qc_sqlite: Extend grammar for SHOW TABLES and SHOW COLUMNS f175d2d qc_sqlite: Add SHOW COLUMNS support 6e47951 qc_sqlite: Add support for SHOW TABLES bcfa0e7 qc_mysqlembedded: Return the actual name and not as-name 3e19f2e Fixed qlafilter build failure 810b24e MXS-675: Standardized qlafilter output be92173 qc_sqlite: Exclude alias names from affected fields 9479280 qc_sqlite: Add support for explain EXTENDED 13b0e10 qc_sqlite: Add support for DELETE a6ccfea qc_mysqlembedded: Look at all conditional items b428041 qc_sqlite: Extend SELECT options 83f829f query_classifier: Correctly calculate the length of a GWBUF 2ddb24c query_classifier: Ensure that -- comments are handled fa7a746 qc_sqlite: Allow STRAIGHT_JOIN in SELECTS 6f47819 FindLibUUID update 5ed897b Added FindLibUUID cmake file 16e02bb Added FindLibUUID cmake file aff63e0 MXS-680: qc_mysqlembedded does not look into functions 8a0eeb4 query_classifier: Improve output of compare 6f08185 Query classifier can now convert enums to strings 124e2b9 MXS-679: Correctly collect fields of WHERE 353c97c transaction_safety default is off 896e37b qc_sqlite: Invert stop logic and be more verbose 7a44d4d qc_sqlite: Extend what is accepted in CREATE TABLE 4dbf499 qc_sqlite: Accept FIRST in ALTER TABLE 3f655c0 qc_sqlite: Update table and affected fields for INSERT 8e1e275 qc_sqlite: Make AS optional in CREATE statement 5f2084b qc_sqlite: Add support for ENGINE when creating a table 242f183 qc_sqlite: CREATE paramters handled in the correct place 8ed2e25 qc_sqlite: Trace only when needed 63d4531 qc_sqlite: Update affected fields also from functions 118cdc3 qc_sqlite: Allow multiple index names in USE|IGNORE INDEX 912da76 qc_sqlite: Add initial support for ...IGNORE INDEX... 0aa7de6 qc_sqlite: Log detailed message on error 3e3bf1a qc_sqlite: Extend create syntax. c4a4572 qc_sqlite: Exclude quoted values 1621f49 Removed MYSQL_EMBEDDED_LIBRARIES d3e324c UUID generation now comes from libuuid e8fe678 qc_sqlite: Enable confitional compilation a9522ba qc_sqlite: Handle X.Y selects 9bc9770 qc_sqlite: Use same stream when outputting padding 366257a qc_sqlite: Add support for UNSIGNED and ZEROFILL d4d90ff qc_sqlite: Add support for DROP VIEW d0519bd qc_sqlite: Extend DROP TABLE syntax c1e4894 qc_sqlite: Add flag to compare for stopping at first error 9fd6344 MXS-674: Maxinfo generates invalid JSON 3c0d3e5 Fix stupid errors. 9d32b2d Include read queue in buffer provided by dcb_read; changes to match. b690797 Fix double spinlock release in random_jkiss. 6a4328f Fix problems of memory not being freed in some error cases. 2112e56 Change DCB and Session handling to recycle memory; fix bug in random_jkiss. 3912f72 MXS-631, MXS-632: Cleaned up default value CMake files 383ccb8 Fixed build failure on MariaDB 5.5 a60bca5 Merge branch '1.2.1-binlog_router_trx' into develop 3c2a062 Fix to crashes in embedded library with MariaDB 10.0 d3fe938 MXS-662: Service protocol check no longer ignores bind address c3da49b qc_sqlite: Update affected fields from everywhere 7a0fab8 qc_sqlite: Allow verbosity of compare test to be controlled 81d6822 qc_sqlite: Cleanup handling of select columns 13e5c59 qc_sqlite: Introduce custom allocation functions 026f27d qc_sqlite: Add support for "USE database" 99079df qc_sqlite: Ignore duplicates when comparing affected fields ca45cd6 qc_sqlite: Add initial support for qc_get_database_names 75970b6 qc_sqlite: Add support for DROP TABLE. b97e45d qc_sqlite: Move get_affected_fields() to other helpers cb0fa96 qc_sqlite: Collect table names of INSERT 3a7c513 qc_mysqlembedded: Only look for created name if CREATE 308b0a4 qc_sqlite: Add support for gc_get_created_table_name. 0dc4af2 qc_sqlite: Add qc_has_clause() handling to update e9f2d1d qc_sqlite: Update now also provides table names c3192e4 qc_sqlite: Add initial support for get_table_names c51eafd qc_sqlite: Add support for qc_has_clause f318fb2 qc_mysqlembedded: Work around embedded lib bug 4ba2e11 qc_sqlite: Add initial support for qc_get_affected_fields 080dea5 qc_sqlite: Support is_read_query 3f94df1 Fixed compare.cc build failure 868a712 Updated freeing of buffer chains in readwritesplit 9bf7fca Formatted readwritesplit source code de4da2b Add assertion to spinlock release to detect release of already released spinlock. d30955a qc_sqlite: Handle the default case of affected fields. 5d02b3f qc_sqlite: Set operation when creating table 94a334d Add test for comparing qc-output aa6f5d6 Allow a specific query classifier to be loaded explicitly c799d37 Test both qc_mysqlembedded and qc_sqlite f8d9aa1 qc_sqlite: Enable "set @user_var=@@system_var" f190bdc qc_sqlite: Recognize /*!-comments b694b55 Fixed binary Avro format streaming c95fa86 qc_sqlite: Report correctly the type of set autocommit 9cb236c qc_sqlite: Add test case 77b4e62 Ensure classify test checks all types 962039e Change return type of qc_get_type ae00df8 qc_sqlite: Add initial support for the SET statement. 88253c5 qc_sqlite: Rename functions fa48043 Rework of MySQL backend protocol clean up to fix fault. 3851064 qc_sqlite: Correct recognition of system variables (@@xyz). 9d86f7f qc_sqlite: Detect user and system variables. a683297 qc_sqlite: Recognize and accept system variables (@@xyz). a4f64dd qc_sqlite: Add initial support for CREATE [TEMPORARY] TABLE f834b4f MXS-661: Only COM_QUERY packets are parsed 30077c1 CMake policies set only for correct versions a166f34 Suppress warning about unknown CMake target 1412730 Added more variables to launchable monitor scripts 358c194 MXS-656: Galera nodes with index 0 can be master again 842aec5 qc_sqlite: Add support for BEGIN, COMMIT, ROLLBACK b9cad6d Add initial support for UPDATE. 95741cb Add initial support for insert. 3796158 Re-install sqlite whenever parse.y has changed 5bcd8cf Ensure that the query is the one passed cf05533 Add support for obtaining the type of a query 400d8b4 Always log the outcome 45cf632 Fixed resource leaks and minor bugs fa9e970 Printout the query when there is a mismatch. 263cb07 All classify to be used with any query classifier ea381b9 Further cleanup of classify.c 23f6f30 Merge pull request #107 from godmodelabs/typo-dpkg 8c2a64e Fixed classify build failure 0c3070b Fixed binlog to Avro conversion bugs b827ba9 MXS-653: Silence maxpasswd 30d981c MXS-654: Add test for checking maxpasswd 984039b Rearrange classify.c 837e46d Add log initialization 1cc7a6e Reformat query_classifier/test/classify.c 065a4e5 Merge branch 'develop' into develop-MXS-544-b-merge ca27f13 Fixed binlog build failure fb81be2 fixed typo dpgk <-> dpkg 1e88d5d Added python based CDC user creation script 040bbdd MXS-633: Monitor permission checks moved to modules cde7595 Master-Slave clusters are now robust by default 158c776 Cleaned up core test suite 94c6e66 Fixed bugs resulting from merge a491e11 Merge remote-tracking branch 'origin/MXS-544-b' into develop-MXS-544-b-merge 30f9f25 Cleaned up avro.c 6286f64 Merge branch 'release-1.4.1' into develop 00206ac MXS-194: Added support for more query types to dbfwfilter 267832b Fixed diagnostic output a64b694 Fixed bugs in avrorouter 8faaba1 Fixed a bug in GTID seeking a5fafb7 Fixed typos in avrorouter documentation 8080379 Added avrorouter documentation fa07d8a Fixed dbfwfilter rule parser build failure 744ce0d Constraints are ignored in DDL statement processing 50808c6 Cleaned up avrorouter 47f6032 Merge branch '1.2.1-binlog_router_trx_lint' into develop caa0956 Added missing dependencies to maxscale-core 92df61a Remove parallel make from travis coverity builds fa2b2b4 Added more error logging to Avro record reading 9a98e8b Support for GTID requests and data bursts c2a787b Small diagnostic fix c4cee7e Added format output requested by client 50483c7 Cleaning up of Avro code d485379 Added support for binary Avro protocol c22cdbb Converted Avro GTID from string to integer representation 5795ca9 Added coverity notification email to .travis.yml a06e44d Added coverity_scan to Travis 6b94384 Fixed memory leak in avro_schema.c a11096c Support for db.table request for Avrorouter 4e5cbbf Fixed bugs in Avro record reading a99e427 Fixed minor bugs in avrorouter 01db8ae Fixed errors with CREATE TABLE statements f5f3d7a Diagnostic routine update 209324f Added missing include for log_manager.h e62f764 Added sending of schemas and file rotation 8c8fcbb Added missing log_manager.h include b13942d Changed printf calls in maxavro library to MXS_ERROR 1168962 More lint inspired changes, mainly in blr_master.c and blr_slave.c ced8f2f Fixed directory checks in avrorouter a8ae6be Minor fix to string processing fbd2d95 Fixed typo in dbfwfilter's CMakeLists.txt 29c3cf4 Merge pull request #106 from mariadb-corporation/willfong-patch-1 854d4e9 Add password column name to example 2f956df Moved server state change logging to a common function 007121f Fixed truncated string values 782892b Fix lint errors and warnings for blr_file.c 4f99fc5 Added Avro testing script 2820980 Small fix to help clear lint problems in blr.c 3afeda4 Fixed errors and warnings located by lint ecfff82 Fix most lint problems in blr.c 223689c Added ALTER TABLE support 80bc935 Fix final lint problems with mysql_common protocol functions. e068310 Added preliminary alter table parsing 8c723f1 Lint monitor modules fdb5620 Fix lint issues in authenticators. 84f0e04 Added function documentation and renamed files 365d9f5 Tidy up, mainly for lint 2ff3005 Added update rows event processing and event types to avro records 2ae0371 Fixed failing regex and improved data streaming f19206a Renamed avrorouter header aa7174b Moved relpacement and storage of ddl statements to a separate function 0c10be8 Improved client notification and added Avro block size managemet 91405a7 Cleaned up instance creation dd97485 Removed useless vars af64e9e Added CDC authentication with a db file b73a118 Streamline and lint MySQL backend protocol. 65034ce Merge branch 'release-1.4.0' into develop 28f7e4e Added callback for AVRO client async data transmission 628c27a Added MAXAVRO_FILE struct to AVRO_CLIENT 32b3645 Fixed slavelag build failure 7b15542 Added default authentication method for CDC protocol 5f8e20f Renamed maxavro types and formatted files that use them 882cf84 Added more function documentation to maxavro library 9532f0b Fixed CDC protocol build failure 35a1d3a Added support for offsets in client requests 94577ac Fixed, formatted and refactored CDC protocol da9bcad Use the maxavro library to read data from Avro files 3ececee Added low level error checking to maxavro library 01b0b8b Tidy and lint mysql_client.c 943f0a7 Added handling of Avro boolean data types to maxavro library 4c781f7 Cleaned up maxavro library and avrorouter 6b2e85d Renamed functions more consistently and cleaned up code e07158a Moved query event handling to its own function df7d4c0 Added avro_ prefix to rbr.c fcbfceb Added seeking to a position in an Avro file 068243a CDC auth decoding 3584d54 Add checks to simplify downstream logic. 9b2c323 Removed useless fprintf bd5cd52 Added missing authfunc setup e4aff59 Added record value processing 5cc8615 Added value length functions 7921ecc Merge branch 'MXS-615' into MXS-483 4b09cca Added Travis status to readme.md cca3a48 Simplify interface to max admin authentication. 4739838 Authenticator API update 233505f Maxavrocheck now accepts multiple files 3fdd137 Improved the Avro file handling a6ba913 Merge from MXS-615 417d742 Added maxavrocheck 014f9cf Remove obsolete second parameter from authenticate function in authenticators. ece7ece MaxAdmin authentication converted to a module. Fix quirk in SSL setup. 7c8b37e Moved contents of avro_schema.h into mxs_avro.h d6660cf Improvements to type handling 71ed0cf Protocol API to have entry point for obtaining default authenticator name. 9d35de2 Fixed transaction tracking 5be02a2 Avrorouter internal state is now stored in the Avro file directory 9293464 Added new info to avro diagnostics 06e0e93 Protocol modules can still handle the authentication outside authenticator modules 6d7108b Added JSON output when Requesting an avro file 6188211 Added new CDC protocol state c8af27f CDC authentication uses its own authenticator 6590f94 Factor out qc_get_qtype_str b7880f1 Fix qc_sqlite CMakeLists.txt bd4ff43 Fixed connector-c being updated and built after every make invokation 0d9e57b Fixed non-MariaDB connectors being used in builds 3d3b779 FIX BUG IN CLIENT DCB SHUTDOWN THAT CAN CAUSE CRASHES e45ba33 Fixed Connector-C .cmake files c130189 Fixed connector-c being updated and built after every make invokation 7f3cdf3 Fixed errors on binlog rotation 9d3c83a Remove qc_sqlite 15e8ba5 CDC protocol is now compliant with new protocol structure 4460869 Merge branch 'release-1.4.0' into MXS-483 ea40812 Cleaned up the binlog processing loop cb646ca Add minimal select recognition to qc_sqlite ac1a9c5 Fixed binlogrouter test 85dd227 Re-route sqlite's sqlite3Select. 7a2e6f3 Update CMakeLists.txt for qc_sqlite 7a751c3 Added timestamps to records and fixed minor bugs f73bdde Avrorouter state storage to disk fcf0488 Fixed Connector-C .cmake files 48b8e4e Merge branch 'MXS-615' into MXS-615-binlog-merge 7c8e19f Add missing dependencies for qc_sqlite bb9b667 Improvements to type handling and binlog position tracking dc66b74 Client UUID added f12fce4 AVRO registration is now handled by avro router 575b809 Add skeleton sqlite-based query classifier. d09d5fc Build sqlite 146d1f9 Fixed BLOB type handling and refined error messages 6e9e521 Added client user to diagnostics 4538bb8 Merge pull request #104 from rasmushoj/develop 7e18d95 Avro router diagnostics routine update 01e3f75 reverted changes in CMakeLists.txt 52f7c78 reverted changes in postinst.in eaed577 Added sqlite 3110100 a58cdda Travis configuration for MaxScale. ... 38b452d MIGRATE FREE CLIENT DATA TO AUTH MODULE; BUG FIXES; TIDY UP 6e64506 Fixed minor bugs aff2411 Enabled CDC protocol f669100 Fixed NULL Avro value being assigned to a field which cannot be NULL 8f6b16a Added row event processing to avrorouter 2939fe0 Updated Avro schema management to use actual column names 9e3b0cb Removed use of RBR related functions in binlogrouter d674903 Formatted avro files fe028d1 DEVELOPMENT OF AUTHENTICATION AS MODULE - WILL NOT WORK YET 977aded Added authenticator modules to the build a2b384f MOVE MYSQL AUTH CODE INTO AUTHENTICATOR MODULES DIRECTORY a5d7484 PRELIMINARY CHANGES TO CREATE AUTHENTICATORS AS MODULES 66cf802 Merge remote-tracking branch 'origin/develop' into MXS-615 bca0a7d MINOR CHANGES TO SATISFY LINT 5a9e397 Added Avrorouter binlog file walking fbc737f Fixed binlogrouter test 3c7c9d7 Added avrorouter main event handling loop 07ad81b Moved common binlogrouter code to a separate file 8c605ed Fixed avrorouter build failures aa1ba05 Moved binlog definitions to a separate header and fixed build failures eee7c55 Added create table statement detection e52b27e Added AVRO_INSTANCE and AVRO_CLIENT 0830caa Change test for client DCB to use role being DCB_ROLE_CLIENT_HANDLER. ... 997bbca Change protocols to continue looping if an accept fails; ... 522e42d Make use of dcb_accept and dcb_listen in httpd and telnetd protocols. 4e692b0 Generalise dcb_listen to tailor log messages to different protocols. ... 52c431d Remove support for passing default port number when handling ... afe5abc Fix bug in creation of SSL listener structure; fix bugs in ... 0bd6c77 Merge remote-tracking branch 'origin/MXS-544' into MXS-544-a ... 7598597 Add dcb_listen function to make a given DCB into a listener, ... a275d89 Maxbinlogcheck avro version can detect proper end of file 9bb55a5 Moved row event and table map event handling to a separate file b7d9e09 Add/improve comments, fix mistake with premature return. c598770 First attempt at extracting general code into dcb_accept, ... f20f28f Testing with maxbinlogcheck b3c60b7 Added mysql_binlog files 0ff9971 Added MariaDB/MySQL binary data processing functions 124560c Merge branch '1.2.1-binlog_router_trx' into MXS-483 4deccff New router fro cdc client 2c11434 Fixed test compiler errors c1f7d24 Obliged to merge remote-tracking branch 'origin/develop' ... 1775599 Merge remote-tracking branch 'origin/MXS-544' into Test-dev-544-merge c5317da Small modifications in comments 11c0666 Code cleanup 64a5e9a Merge branch 'release-1.3.0' into MXS-483 2c11e89 First Implementation of CDC
1888 lines
64 KiB
C
1888 lines
64 KiB
C
/*
|
|
** 2005-07-08
|
|
**
|
|
** The author disclaims copyright to this source code. In place of
|
|
** a legal notice, here is a blessing:
|
|
**
|
|
** May you do good and not evil.
|
|
** May you find forgiveness for yourself and forgive others.
|
|
** May you share freely, never taking more than you give.
|
|
**
|
|
*************************************************************************
|
|
** This file contains code associated with the ANALYZE command.
|
|
**
|
|
** The ANALYZE command gather statistics about the content of tables
|
|
** and indices. These statistics are made available to the query planner
|
|
** to help it make better decisions about how to perform queries.
|
|
**
|
|
** The following system tables are or have been supported:
|
|
**
|
|
** CREATE TABLE sqlite_stat1(tbl, idx, stat);
|
|
** CREATE TABLE sqlite_stat2(tbl, idx, sampleno, sample);
|
|
** CREATE TABLE sqlite_stat3(tbl, idx, nEq, nLt, nDLt, sample);
|
|
** CREATE TABLE sqlite_stat4(tbl, idx, nEq, nLt, nDLt, sample);
|
|
**
|
|
** Additional tables might be added in future releases of SQLite.
|
|
** The sqlite_stat2 table is not created or used unless the SQLite version
|
|
** is between 3.6.18 and 3.7.8, inclusive, and unless SQLite is compiled
|
|
** with SQLITE_ENABLE_STAT2. The sqlite_stat2 table is deprecated.
|
|
** The sqlite_stat2 table is superseded by sqlite_stat3, which is only
|
|
** created and used by SQLite versions 3.7.9 and later and with
|
|
** SQLITE_ENABLE_STAT3 defined. The functionality of sqlite_stat3
|
|
** is a superset of sqlite_stat2. The sqlite_stat4 is an enhanced
|
|
** version of sqlite_stat3 and is only available when compiled with
|
|
** SQLITE_ENABLE_STAT4 and in SQLite versions 3.8.1 and later. It is
|
|
** not possible to enable both STAT3 and STAT4 at the same time. If they
|
|
** are both enabled, then STAT4 takes precedence.
|
|
**
|
|
** For most applications, sqlite_stat1 provides all the statistics required
|
|
** for the query planner to make good choices.
|
|
**
|
|
** Format of sqlite_stat1:
|
|
**
|
|
** There is normally one row per index, with the index identified by the
|
|
** name in the idx column. The tbl column is the name of the table to
|
|
** which the index belongs. In each such row, the stat column will be
|
|
** a string consisting of a list of integers. The first integer in this
|
|
** list is the number of rows in the index. (This is the same as the
|
|
** number of rows in the table, except for partial indices.) The second
|
|
** integer is the average number of rows in the index that have the same
|
|
** value in the first column of the index. The third integer is the average
|
|
** number of rows in the index that have the same value for the first two
|
|
** columns. The N-th integer (for N>1) is the average number of rows in
|
|
** the index which have the same value for the first N-1 columns. For
|
|
** a K-column index, there will be K+1 integers in the stat column. If
|
|
** the index is unique, then the last integer will be 1.
|
|
**
|
|
** The list of integers in the stat column can optionally be followed
|
|
** by the keyword "unordered". The "unordered" keyword, if it is present,
|
|
** must be separated from the last integer by a single space. If the
|
|
** "unordered" keyword is present, then the query planner assumes that
|
|
** the index is unordered and will not use the index for a range query.
|
|
**
|
|
** If the sqlite_stat1.idx column is NULL, then the sqlite_stat1.stat
|
|
** column contains a single integer which is the (estimated) number of
|
|
** rows in the table identified by sqlite_stat1.tbl.
|
|
**
|
|
** Format of sqlite_stat2:
|
|
**
|
|
** The sqlite_stat2 is only created and is only used if SQLite is compiled
|
|
** with SQLITE_ENABLE_STAT2 and if the SQLite version number is between
|
|
** 3.6.18 and 3.7.8. The "stat2" table contains additional information
|
|
** about the distribution of keys within an index. The index is identified by
|
|
** the "idx" column and the "tbl" column is the name of the table to which
|
|
** the index belongs. There are usually 10 rows in the sqlite_stat2
|
|
** table for each index.
|
|
**
|
|
** The sqlite_stat2 entries for an index that have sampleno between 0 and 9
|
|
** inclusive are samples of the left-most key value in the index taken at
|
|
** evenly spaced points along the index. Let the number of samples be S
|
|
** (10 in the standard build) and let C be the number of rows in the index.
|
|
** Then the sampled rows are given by:
|
|
**
|
|
** rownumber = (i*C*2 + C)/(S*2)
|
|
**
|
|
** For i between 0 and S-1. Conceptually, the index space is divided into
|
|
** S uniform buckets and the samples are the middle row from each bucket.
|
|
**
|
|
** The format for sqlite_stat2 is recorded here for legacy reference. This
|
|
** version of SQLite does not support sqlite_stat2. It neither reads nor
|
|
** writes the sqlite_stat2 table. This version of SQLite only supports
|
|
** sqlite_stat3.
|
|
**
|
|
** Format for sqlite_stat3:
|
|
**
|
|
** The sqlite_stat3 format is a subset of sqlite_stat4. Hence, the
|
|
** sqlite_stat4 format will be described first. Further information
|
|
** about sqlite_stat3 follows the sqlite_stat4 description.
|
|
**
|
|
** Format for sqlite_stat4:
|
|
**
|
|
** As with sqlite_stat2, the sqlite_stat4 table contains histogram data
|
|
** to aid the query planner in choosing good indices based on the values
|
|
** that indexed columns are compared against in the WHERE clauses of
|
|
** queries.
|
|
**
|
|
** The sqlite_stat4 table contains multiple entries for each index.
|
|
** The idx column names the index and the tbl column is the table of the
|
|
** index. If the idx and tbl columns are the same, then the sample is
|
|
** of the INTEGER PRIMARY KEY. The sample column is a blob which is the
|
|
** binary encoding of a key from the index. The nEq column is a
|
|
** list of integers. The first integer is the approximate number
|
|
** of entries in the index whose left-most column exactly matches
|
|
** the left-most column of the sample. The second integer in nEq
|
|
** is the approximate number of entries in the index where the
|
|
** first two columns match the first two columns of the sample.
|
|
** And so forth. nLt is another list of integers that show the approximate
|
|
** number of entries that are strictly less than the sample. The first
|
|
** integer in nLt contains the number of entries in the index where the
|
|
** left-most column is less than the left-most column of the sample.
|
|
** The K-th integer in the nLt entry is the number of index entries
|
|
** where the first K columns are less than the first K columns of the
|
|
** sample. The nDLt column is like nLt except that it contains the
|
|
** number of distinct entries in the index that are less than the
|
|
** sample.
|
|
**
|
|
** There can be an arbitrary number of sqlite_stat4 entries per index.
|
|
** The ANALYZE command will typically generate sqlite_stat4 tables
|
|
** that contain between 10 and 40 samples which are distributed across
|
|
** the key space, though not uniformly, and which include samples with
|
|
** large nEq values.
|
|
**
|
|
** Format for sqlite_stat3 redux:
|
|
**
|
|
** The sqlite_stat3 table is like sqlite_stat4 except that it only
|
|
** looks at the left-most column of the index. The sqlite_stat3.sample
|
|
** column contains the actual value of the left-most column instead
|
|
** of a blob encoding of the complete index key as is found in
|
|
** sqlite_stat4.sample. The nEq, nLt, and nDLt entries of sqlite_stat3
|
|
** all contain just a single integer which is the same as the first
|
|
** integer in the equivalent columns in sqlite_stat4.
|
|
*/
|
|
#ifndef SQLITE_OMIT_ANALYZE
|
|
#include "sqliteInt.h"
|
|
|
|
#if defined(SQLITE_ENABLE_STAT4)
|
|
# define IsStat4 1
|
|
# define IsStat3 0
|
|
#elif defined(SQLITE_ENABLE_STAT3)
|
|
# define IsStat4 0
|
|
# define IsStat3 1
|
|
#else
|
|
# define IsStat4 0
|
|
# define IsStat3 0
|
|
# undef SQLITE_STAT4_SAMPLES
|
|
# define SQLITE_STAT4_SAMPLES 1
|
|
#endif
|
|
#define IsStat34 (IsStat3+IsStat4) /* 1 for STAT3 or STAT4. 0 otherwise */
|
|
|
|
/*
|
|
** This routine generates code that opens the sqlite_statN tables.
|
|
** The sqlite_stat1 table is always relevant. sqlite_stat2 is now
|
|
** obsolete. sqlite_stat3 and sqlite_stat4 are only opened when
|
|
** appropriate compile-time options are provided.
|
|
**
|
|
** If the sqlite_statN tables do not previously exist, it is created.
|
|
**
|
|
** Argument zWhere may be a pointer to a buffer containing a table name,
|
|
** or it may be a NULL pointer. If it is not NULL, then all entries in
|
|
** the sqlite_statN tables associated with the named table are deleted.
|
|
** If zWhere==0, then code is generated to delete all stat table entries.
|
|
*/
|
|
static void openStatTable(
|
|
Parse *pParse, /* Parsing context */
|
|
int iDb, /* The database we are looking in */
|
|
int iStatCur, /* Open the sqlite_stat1 table on this cursor */
|
|
const char *zWhere, /* Delete entries for this table or index */
|
|
const char *zWhereType /* Either "tbl" or "idx" */
|
|
){
|
|
static const struct {
|
|
const char *zName;
|
|
const char *zCols;
|
|
} aTable[] = {
|
|
{ "sqlite_stat1", "tbl,idx,stat" },
|
|
#if defined(SQLITE_ENABLE_STAT4)
|
|
{ "sqlite_stat4", "tbl,idx,neq,nlt,ndlt,sample" },
|
|
{ "sqlite_stat3", 0 },
|
|
#elif defined(SQLITE_ENABLE_STAT3)
|
|
{ "sqlite_stat3", "tbl,idx,neq,nlt,ndlt,sample" },
|
|
{ "sqlite_stat4", 0 },
|
|
#else
|
|
{ "sqlite_stat3", 0 },
|
|
{ "sqlite_stat4", 0 },
|
|
#endif
|
|
};
|
|
int i;
|
|
sqlite3 *db = pParse->db;
|
|
Db *pDb;
|
|
Vdbe *v = sqlite3GetVdbe(pParse);
|
|
int aRoot[ArraySize(aTable)];
|
|
u8 aCreateTbl[ArraySize(aTable)];
|
|
|
|
if( v==0 ) return;
|
|
assert( sqlite3BtreeHoldsAllMutexes(db) );
|
|
assert( sqlite3VdbeDb(v)==db );
|
|
pDb = &db->aDb[iDb];
|
|
|
|
/* Create new statistic tables if they do not exist, or clear them
|
|
** if they do already exist.
|
|
*/
|
|
for(i=0; i<ArraySize(aTable); i++){
|
|
const char *zTab = aTable[i].zName;
|
|
Table *pStat;
|
|
if( (pStat = sqlite3FindTable(db, zTab, pDb->zName))==0 ){
|
|
if( aTable[i].zCols ){
|
|
/* The sqlite_statN table does not exist. Create it. Note that a
|
|
** side-effect of the CREATE TABLE statement is to leave the rootpage
|
|
** of the new table in register pParse->regRoot. This is important
|
|
** because the OpenWrite opcode below will be needing it. */
|
|
sqlite3NestedParse(pParse,
|
|
"CREATE TABLE %Q.%s(%s)", pDb->zName, zTab, aTable[i].zCols
|
|
);
|
|
aRoot[i] = pParse->regRoot;
|
|
aCreateTbl[i] = OPFLAG_P2ISREG;
|
|
}
|
|
}else{
|
|
/* The table already exists. If zWhere is not NULL, delete all entries
|
|
** associated with the table zWhere. If zWhere is NULL, delete the
|
|
** entire contents of the table. */
|
|
aRoot[i] = pStat->tnum;
|
|
aCreateTbl[i] = 0;
|
|
sqlite3TableLock(pParse, iDb, aRoot[i], 1, zTab);
|
|
if( zWhere ){
|
|
sqlite3NestedParse(pParse,
|
|
"DELETE FROM %Q.%s WHERE %s=%Q",
|
|
pDb->zName, zTab, zWhereType, zWhere
|
|
);
|
|
}else{
|
|
/* The sqlite_stat[134] table already exists. Delete all rows. */
|
|
sqlite3VdbeAddOp2(v, OP_Clear, aRoot[i], iDb);
|
|
}
|
|
}
|
|
}
|
|
|
|
/* Open the sqlite_stat[134] tables for writing. */
|
|
for(i=0; aTable[i].zCols; i++){
|
|
assert( i<ArraySize(aTable) );
|
|
sqlite3VdbeAddOp4Int(v, OP_OpenWrite, iStatCur+i, aRoot[i], iDb, 3);
|
|
sqlite3VdbeChangeP5(v, aCreateTbl[i]);
|
|
VdbeComment((v, aTable[i].zName));
|
|
}
|
|
}
|
|
|
|
/*
|
|
** Recommended number of samples for sqlite_stat4
|
|
*/
|
|
#ifndef SQLITE_STAT4_SAMPLES
|
|
# define SQLITE_STAT4_SAMPLES 24
|
|
#endif
|
|
|
|
/*
|
|
** Three SQL functions - stat_init(), stat_push(), and stat_get() -
|
|
** share an instance of the following structure to hold their state
|
|
** information.
|
|
*/
|
|
typedef struct Stat4Accum Stat4Accum;
|
|
typedef struct Stat4Sample Stat4Sample;
|
|
struct Stat4Sample {
|
|
tRowcnt *anEq; /* sqlite_stat4.nEq */
|
|
tRowcnt *anDLt; /* sqlite_stat4.nDLt */
|
|
#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
|
|
tRowcnt *anLt; /* sqlite_stat4.nLt */
|
|
union {
|
|
i64 iRowid; /* Rowid in main table of the key */
|
|
u8 *aRowid; /* Key for WITHOUT ROWID tables */
|
|
} u;
|
|
u32 nRowid; /* Sizeof aRowid[] */
|
|
u8 isPSample; /* True if a periodic sample */
|
|
int iCol; /* If !isPSample, the reason for inclusion */
|
|
u32 iHash; /* Tiebreaker hash */
|
|
#endif
|
|
};
|
|
struct Stat4Accum {
|
|
tRowcnt nRow; /* Number of rows in the entire table */
|
|
tRowcnt nPSample; /* How often to do a periodic sample */
|
|
int nCol; /* Number of columns in index + pk/rowid */
|
|
int nKeyCol; /* Number of index columns w/o the pk/rowid */
|
|
int mxSample; /* Maximum number of samples to accumulate */
|
|
Stat4Sample current; /* Current row as a Stat4Sample */
|
|
u32 iPrn; /* Pseudo-random number used for sampling */
|
|
Stat4Sample *aBest; /* Array of nCol best samples */
|
|
int iMin; /* Index in a[] of entry with minimum score */
|
|
int nSample; /* Current number of samples */
|
|
int iGet; /* Index of current sample accessed by stat_get() */
|
|
Stat4Sample *a; /* Array of mxSample Stat4Sample objects */
|
|
sqlite3 *db; /* Database connection, for malloc() */
|
|
};
|
|
|
|
/* Reclaim memory used by a Stat4Sample
|
|
*/
|
|
#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
|
|
static void sampleClear(sqlite3 *db, Stat4Sample *p){
|
|
assert( db!=0 );
|
|
if( p->nRowid ){
|
|
sqlite3DbFree(db, p->u.aRowid);
|
|
p->nRowid = 0;
|
|
}
|
|
}
|
|
#endif
|
|
|
|
/* Initialize the BLOB value of a ROWID
|
|
*/
|
|
#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
|
|
static void sampleSetRowid(sqlite3 *db, Stat4Sample *p, int n, const u8 *pData){
|
|
assert( db!=0 );
|
|
if( p->nRowid ) sqlite3DbFree(db, p->u.aRowid);
|
|
p->u.aRowid = sqlite3DbMallocRawNN(db, n);
|
|
if( p->u.aRowid ){
|
|
p->nRowid = n;
|
|
memcpy(p->u.aRowid, pData, n);
|
|
}else{
|
|
p->nRowid = 0;
|
|
}
|
|
}
|
|
#endif
|
|
|
|
/* Initialize the INTEGER value of a ROWID.
|
|
*/
|
|
#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
|
|
static void sampleSetRowidInt64(sqlite3 *db, Stat4Sample *p, i64 iRowid){
|
|
assert( db!=0 );
|
|
if( p->nRowid ) sqlite3DbFree(db, p->u.aRowid);
|
|
p->nRowid = 0;
|
|
p->u.iRowid = iRowid;
|
|
}
|
|
#endif
|
|
|
|
|
|
/*
|
|
** Copy the contents of object (*pFrom) into (*pTo).
|
|
*/
|
|
#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
|
|
static void sampleCopy(Stat4Accum *p, Stat4Sample *pTo, Stat4Sample *pFrom){
|
|
pTo->isPSample = pFrom->isPSample;
|
|
pTo->iCol = pFrom->iCol;
|
|
pTo->iHash = pFrom->iHash;
|
|
memcpy(pTo->anEq, pFrom->anEq, sizeof(tRowcnt)*p->nCol);
|
|
memcpy(pTo->anLt, pFrom->anLt, sizeof(tRowcnt)*p->nCol);
|
|
memcpy(pTo->anDLt, pFrom->anDLt, sizeof(tRowcnt)*p->nCol);
|
|
if( pFrom->nRowid ){
|
|
sampleSetRowid(p->db, pTo, pFrom->nRowid, pFrom->u.aRowid);
|
|
}else{
|
|
sampleSetRowidInt64(p->db, pTo, pFrom->u.iRowid);
|
|
}
|
|
}
|
|
#endif
|
|
|
|
/*
|
|
** Reclaim all memory of a Stat4Accum structure.
|
|
*/
|
|
static void stat4Destructor(void *pOld){
|
|
Stat4Accum *p = (Stat4Accum*)pOld;
|
|
#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
|
|
int i;
|
|
for(i=0; i<p->nCol; i++) sampleClear(p->db, p->aBest+i);
|
|
for(i=0; i<p->mxSample; i++) sampleClear(p->db, p->a+i);
|
|
sampleClear(p->db, &p->current);
|
|
#endif
|
|
sqlite3DbFree(p->db, p);
|
|
}
|
|
|
|
/*
|
|
** Implementation of the stat_init(N,K,C) SQL function. The three parameters
|
|
** are:
|
|
** N: The number of columns in the index including the rowid/pk (note 1)
|
|
** K: The number of columns in the index excluding the rowid/pk.
|
|
** C: The number of rows in the index (note 2)
|
|
**
|
|
** Note 1: In the special case of the covering index that implements a
|
|
** WITHOUT ROWID table, N is the number of PRIMARY KEY columns, not the
|
|
** total number of columns in the table.
|
|
**
|
|
** Note 2: C is only used for STAT3 and STAT4.
|
|
**
|
|
** For indexes on ordinary rowid tables, N==K+1. But for indexes on
|
|
** WITHOUT ROWID tables, N=K+P where P is the number of columns in the
|
|
** PRIMARY KEY of the table. The covering index that implements the
|
|
** original WITHOUT ROWID table as N==K as a special case.
|
|
**
|
|
** This routine allocates the Stat4Accum object in heap memory. The return
|
|
** value is a pointer to the Stat4Accum object. The datatype of the
|
|
** return value is BLOB, but it is really just a pointer to the Stat4Accum
|
|
** object.
|
|
*/
|
|
static void statInit(
|
|
sqlite3_context *context,
|
|
int argc,
|
|
sqlite3_value **argv
|
|
){
|
|
Stat4Accum *p;
|
|
int nCol; /* Number of columns in index being sampled */
|
|
int nKeyCol; /* Number of key columns */
|
|
int nColUp; /* nCol rounded up for alignment */
|
|
int n; /* Bytes of space to allocate */
|
|
sqlite3 *db; /* Database connection */
|
|
#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
|
|
int mxSample = SQLITE_STAT4_SAMPLES;
|
|
#endif
|
|
|
|
/* Decode the three function arguments */
|
|
UNUSED_PARAMETER(argc);
|
|
nCol = sqlite3_value_int(argv[0]);
|
|
assert( nCol>0 );
|
|
nColUp = sizeof(tRowcnt)<8 ? (nCol+1)&~1 : nCol;
|
|
nKeyCol = sqlite3_value_int(argv[1]);
|
|
assert( nKeyCol<=nCol );
|
|
assert( nKeyCol>0 );
|
|
|
|
/* Allocate the space required for the Stat4Accum object */
|
|
n = sizeof(*p)
|
|
+ sizeof(tRowcnt)*nColUp /* Stat4Accum.anEq */
|
|
+ sizeof(tRowcnt)*nColUp /* Stat4Accum.anDLt */
|
|
#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
|
|
+ sizeof(tRowcnt)*nColUp /* Stat4Accum.anLt */
|
|
+ sizeof(Stat4Sample)*(nCol+mxSample) /* Stat4Accum.aBest[], a[] */
|
|
+ sizeof(tRowcnt)*3*nColUp*(nCol+mxSample)
|
|
#endif
|
|
;
|
|
db = sqlite3_context_db_handle(context);
|
|
p = sqlite3DbMallocZero(db, n);
|
|
if( p==0 ){
|
|
sqlite3_result_error_nomem(context);
|
|
return;
|
|
}
|
|
|
|
p->db = db;
|
|
p->nRow = 0;
|
|
p->nCol = nCol;
|
|
p->nKeyCol = nKeyCol;
|
|
p->current.anDLt = (tRowcnt*)&p[1];
|
|
p->current.anEq = &p->current.anDLt[nColUp];
|
|
|
|
#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
|
|
{
|
|
u8 *pSpace; /* Allocated space not yet assigned */
|
|
int i; /* Used to iterate through p->aSample[] */
|
|
|
|
p->iGet = -1;
|
|
p->mxSample = mxSample;
|
|
p->nPSample = (tRowcnt)(sqlite3_value_int64(argv[2])/(mxSample/3+1) + 1);
|
|
p->current.anLt = &p->current.anEq[nColUp];
|
|
p->iPrn = 0x689e962d*(u32)nCol ^ 0xd0944565*(u32)sqlite3_value_int(argv[2]);
|
|
|
|
/* Set up the Stat4Accum.a[] and aBest[] arrays */
|
|
p->a = (struct Stat4Sample*)&p->current.anLt[nColUp];
|
|
p->aBest = &p->a[mxSample];
|
|
pSpace = (u8*)(&p->a[mxSample+nCol]);
|
|
for(i=0; i<(mxSample+nCol); i++){
|
|
p->a[i].anEq = (tRowcnt *)pSpace; pSpace += (sizeof(tRowcnt) * nColUp);
|
|
p->a[i].anLt = (tRowcnt *)pSpace; pSpace += (sizeof(tRowcnt) * nColUp);
|
|
p->a[i].anDLt = (tRowcnt *)pSpace; pSpace += (sizeof(tRowcnt) * nColUp);
|
|
}
|
|
assert( (pSpace - (u8*)p)==n );
|
|
|
|
for(i=0; i<nCol; i++){
|
|
p->aBest[i].iCol = i;
|
|
}
|
|
}
|
|
#endif
|
|
|
|
/* Return a pointer to the allocated object to the caller. Note that
|
|
** only the pointer (the 2nd parameter) matters. The size of the object
|
|
** (given by the 3rd parameter) is never used and can be any positive
|
|
** value. */
|
|
sqlite3_result_blob(context, p, sizeof(*p), stat4Destructor);
|
|
}
|
|
static const FuncDef statInitFuncdef = {
|
|
2+IsStat34, /* nArg */
|
|
SQLITE_UTF8, /* funcFlags */
|
|
0, /* pUserData */
|
|
0, /* pNext */
|
|
statInit, /* xSFunc */
|
|
0, /* xFinalize */
|
|
"stat_init", /* zName */
|
|
0, /* pHash */
|
|
0 /* pDestructor */
|
|
};
|
|
|
|
#ifdef SQLITE_ENABLE_STAT4
|
|
/*
|
|
** pNew and pOld are both candidate non-periodic samples selected for
|
|
** the same column (pNew->iCol==pOld->iCol). Ignoring this column and
|
|
** considering only any trailing columns and the sample hash value, this
|
|
** function returns true if sample pNew is to be preferred over pOld.
|
|
** In other words, if we assume that the cardinalities of the selected
|
|
** column for pNew and pOld are equal, is pNew to be preferred over pOld.
|
|
**
|
|
** This function assumes that for each argument sample, the contents of
|
|
** the anEq[] array from pSample->anEq[pSample->iCol+1] onwards are valid.
|
|
*/
|
|
static int sampleIsBetterPost(
|
|
Stat4Accum *pAccum,
|
|
Stat4Sample *pNew,
|
|
Stat4Sample *pOld
|
|
){
|
|
int nCol = pAccum->nCol;
|
|
int i;
|
|
assert( pNew->iCol==pOld->iCol );
|
|
for(i=pNew->iCol+1; i<nCol; i++){
|
|
if( pNew->anEq[i]>pOld->anEq[i] ) return 1;
|
|
if( pNew->anEq[i]<pOld->anEq[i] ) return 0;
|
|
}
|
|
if( pNew->iHash>pOld->iHash ) return 1;
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
|
|
/*
|
|
** Return true if pNew is to be preferred over pOld.
|
|
**
|
|
** This function assumes that for each argument sample, the contents of
|
|
** the anEq[] array from pSample->anEq[pSample->iCol] onwards are valid.
|
|
*/
|
|
static int sampleIsBetter(
|
|
Stat4Accum *pAccum,
|
|
Stat4Sample *pNew,
|
|
Stat4Sample *pOld
|
|
){
|
|
tRowcnt nEqNew = pNew->anEq[pNew->iCol];
|
|
tRowcnt nEqOld = pOld->anEq[pOld->iCol];
|
|
|
|
assert( pOld->isPSample==0 && pNew->isPSample==0 );
|
|
assert( IsStat4 || (pNew->iCol==0 && pOld->iCol==0) );
|
|
|
|
if( (nEqNew>nEqOld) ) return 1;
|
|
#ifdef SQLITE_ENABLE_STAT4
|
|
if( nEqNew==nEqOld ){
|
|
if( pNew->iCol<pOld->iCol ) return 1;
|
|
return (pNew->iCol==pOld->iCol && sampleIsBetterPost(pAccum, pNew, pOld));
|
|
}
|
|
return 0;
|
|
#else
|
|
return (nEqNew==nEqOld && pNew->iHash>pOld->iHash);
|
|
#endif
|
|
}
|
|
|
|
/*
|
|
** Copy the contents of sample *pNew into the p->a[] array. If necessary,
|
|
** remove the least desirable sample from p->a[] to make room.
|
|
*/
|
|
static void sampleInsert(Stat4Accum *p, Stat4Sample *pNew, int nEqZero){
|
|
Stat4Sample *pSample = 0;
|
|
int i;
|
|
|
|
assert( IsStat4 || nEqZero==0 );
|
|
|
|
#ifdef SQLITE_ENABLE_STAT4
|
|
if( pNew->isPSample==0 ){
|
|
Stat4Sample *pUpgrade = 0;
|
|
assert( pNew->anEq[pNew->iCol]>0 );
|
|
|
|
/* This sample is being added because the prefix that ends in column
|
|
** iCol occurs many times in the table. However, if we have already
|
|
** added a sample that shares this prefix, there is no need to add
|
|
** this one. Instead, upgrade the priority of the highest priority
|
|
** existing sample that shares this prefix. */
|
|
for(i=p->nSample-1; i>=0; i--){
|
|
Stat4Sample *pOld = &p->a[i];
|
|
if( pOld->anEq[pNew->iCol]==0 ){
|
|
if( pOld->isPSample ) return;
|
|
assert( pOld->iCol>pNew->iCol );
|
|
assert( sampleIsBetter(p, pNew, pOld) );
|
|
if( pUpgrade==0 || sampleIsBetter(p, pOld, pUpgrade) ){
|
|
pUpgrade = pOld;
|
|
}
|
|
}
|
|
}
|
|
if( pUpgrade ){
|
|
pUpgrade->iCol = pNew->iCol;
|
|
pUpgrade->anEq[pUpgrade->iCol] = pNew->anEq[pUpgrade->iCol];
|
|
goto find_new_min;
|
|
}
|
|
}
|
|
#endif
|
|
|
|
/* If necessary, remove sample iMin to make room for the new sample. */
|
|
if( p->nSample>=p->mxSample ){
|
|
Stat4Sample *pMin = &p->a[p->iMin];
|
|
tRowcnt *anEq = pMin->anEq;
|
|
tRowcnt *anLt = pMin->anLt;
|
|
tRowcnt *anDLt = pMin->anDLt;
|
|
sampleClear(p->db, pMin);
|
|
memmove(pMin, &pMin[1], sizeof(p->a[0])*(p->nSample-p->iMin-1));
|
|
pSample = &p->a[p->nSample-1];
|
|
pSample->nRowid = 0;
|
|
pSample->anEq = anEq;
|
|
pSample->anDLt = anDLt;
|
|
pSample->anLt = anLt;
|
|
p->nSample = p->mxSample-1;
|
|
}
|
|
|
|
/* The "rows less-than" for the rowid column must be greater than that
|
|
** for the last sample in the p->a[] array. Otherwise, the samples would
|
|
** be out of order. */
|
|
#ifdef SQLITE_ENABLE_STAT4
|
|
assert( p->nSample==0
|
|
|| pNew->anLt[p->nCol-1] > p->a[p->nSample-1].anLt[p->nCol-1] );
|
|
#endif
|
|
|
|
/* Insert the new sample */
|
|
pSample = &p->a[p->nSample];
|
|
sampleCopy(p, pSample, pNew);
|
|
p->nSample++;
|
|
|
|
/* Zero the first nEqZero entries in the anEq[] array. */
|
|
memset(pSample->anEq, 0, sizeof(tRowcnt)*nEqZero);
|
|
|
|
#ifdef SQLITE_ENABLE_STAT4
|
|
find_new_min:
|
|
#endif
|
|
if( p->nSample>=p->mxSample ){
|
|
int iMin = -1;
|
|
for(i=0; i<p->mxSample; i++){
|
|
if( p->a[i].isPSample ) continue;
|
|
if( iMin<0 || sampleIsBetter(p, &p->a[iMin], &p->a[i]) ){
|
|
iMin = i;
|
|
}
|
|
}
|
|
assert( iMin>=0 );
|
|
p->iMin = iMin;
|
|
}
|
|
}
|
|
#endif /* SQLITE_ENABLE_STAT3_OR_STAT4 */
|
|
|
|
/*
|
|
** Field iChng of the index being scanned has changed. So at this point
|
|
** p->current contains a sample that reflects the previous row of the
|
|
** index. The value of anEq[iChng] and subsequent anEq[] elements are
|
|
** correct at this point.
|
|
*/
|
|
static void samplePushPrevious(Stat4Accum *p, int iChng){
|
|
#ifdef SQLITE_ENABLE_STAT4
|
|
int i;
|
|
|
|
/* Check if any samples from the aBest[] array should be pushed
|
|
** into IndexSample.a[] at this point. */
|
|
for(i=(p->nCol-2); i>=iChng; i--){
|
|
Stat4Sample *pBest = &p->aBest[i];
|
|
pBest->anEq[i] = p->current.anEq[i];
|
|
if( p->nSample<p->mxSample || sampleIsBetter(p, pBest, &p->a[p->iMin]) ){
|
|
sampleInsert(p, pBest, i);
|
|
}
|
|
}
|
|
|
|
/* Update the anEq[] fields of any samples already collected. */
|
|
for(i=p->nSample-1; i>=0; i--){
|
|
int j;
|
|
for(j=iChng; j<p->nCol; j++){
|
|
if( p->a[i].anEq[j]==0 ) p->a[i].anEq[j] = p->current.anEq[j];
|
|
}
|
|
}
|
|
#endif
|
|
|
|
#if defined(SQLITE_ENABLE_STAT3) && !defined(SQLITE_ENABLE_STAT4)
|
|
if( iChng==0 ){
|
|
tRowcnt nLt = p->current.anLt[0];
|
|
tRowcnt nEq = p->current.anEq[0];
|
|
|
|
/* Check if this is to be a periodic sample. If so, add it. */
|
|
if( (nLt/p->nPSample)!=(nLt+nEq)/p->nPSample ){
|
|
p->current.isPSample = 1;
|
|
sampleInsert(p, &p->current, 0);
|
|
p->current.isPSample = 0;
|
|
}else
|
|
|
|
/* Or if it is a non-periodic sample. Add it in this case too. */
|
|
if( p->nSample<p->mxSample
|
|
|| sampleIsBetter(p, &p->current, &p->a[p->iMin])
|
|
){
|
|
sampleInsert(p, &p->current, 0);
|
|
}
|
|
}
|
|
#endif
|
|
|
|
#ifndef SQLITE_ENABLE_STAT3_OR_STAT4
|
|
UNUSED_PARAMETER( p );
|
|
UNUSED_PARAMETER( iChng );
|
|
#endif
|
|
}
|
|
|
|
/*
|
|
** Implementation of the stat_push SQL function: stat_push(P,C,R)
|
|
** Arguments:
|
|
**
|
|
** P Pointer to the Stat4Accum object created by stat_init()
|
|
** C Index of left-most column to differ from previous row
|
|
** R Rowid for the current row. Might be a key record for
|
|
** WITHOUT ROWID tables.
|
|
**
|
|
** This SQL function always returns NULL. It's purpose it to accumulate
|
|
** statistical data and/or samples in the Stat4Accum object about the
|
|
** index being analyzed. The stat_get() SQL function will later be used to
|
|
** extract relevant information for constructing the sqlite_statN tables.
|
|
**
|
|
** The R parameter is only used for STAT3 and STAT4
|
|
*/
|
|
static void statPush(
|
|
sqlite3_context *context,
|
|
int argc,
|
|
sqlite3_value **argv
|
|
){
|
|
int i;
|
|
|
|
/* The three function arguments */
|
|
Stat4Accum *p = (Stat4Accum*)sqlite3_value_blob(argv[0]);
|
|
int iChng = sqlite3_value_int(argv[1]);
|
|
|
|
UNUSED_PARAMETER( argc );
|
|
UNUSED_PARAMETER( context );
|
|
assert( p->nCol>0 );
|
|
assert( iChng<p->nCol );
|
|
|
|
if( p->nRow==0 ){
|
|
/* This is the first call to this function. Do initialization. */
|
|
for(i=0; i<p->nCol; i++) p->current.anEq[i] = 1;
|
|
}else{
|
|
/* Second and subsequent calls get processed here */
|
|
samplePushPrevious(p, iChng);
|
|
|
|
/* Update anDLt[], anLt[] and anEq[] to reflect the values that apply
|
|
** to the current row of the index. */
|
|
for(i=0; i<iChng; i++){
|
|
p->current.anEq[i]++;
|
|
}
|
|
for(i=iChng; i<p->nCol; i++){
|
|
p->current.anDLt[i]++;
|
|
#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
|
|
p->current.anLt[i] += p->current.anEq[i];
|
|
#endif
|
|
p->current.anEq[i] = 1;
|
|
}
|
|
}
|
|
p->nRow++;
|
|
#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
|
|
if( sqlite3_value_type(argv[2])==SQLITE_INTEGER ){
|
|
sampleSetRowidInt64(p->db, &p->current, sqlite3_value_int64(argv[2]));
|
|
}else{
|
|
sampleSetRowid(p->db, &p->current, sqlite3_value_bytes(argv[2]),
|
|
sqlite3_value_blob(argv[2]));
|
|
}
|
|
p->current.iHash = p->iPrn = p->iPrn*1103515245 + 12345;
|
|
#endif
|
|
|
|
#ifdef SQLITE_ENABLE_STAT4
|
|
{
|
|
tRowcnt nLt = p->current.anLt[p->nCol-1];
|
|
|
|
/* Check if this is to be a periodic sample. If so, add it. */
|
|
if( (nLt/p->nPSample)!=(nLt+1)/p->nPSample ){
|
|
p->current.isPSample = 1;
|
|
p->current.iCol = 0;
|
|
sampleInsert(p, &p->current, p->nCol-1);
|
|
p->current.isPSample = 0;
|
|
}
|
|
|
|
/* Update the aBest[] array. */
|
|
for(i=0; i<(p->nCol-1); i++){
|
|
p->current.iCol = i;
|
|
if( i>=iChng || sampleIsBetterPost(p, &p->current, &p->aBest[i]) ){
|
|
sampleCopy(p, &p->aBest[i], &p->current);
|
|
}
|
|
}
|
|
}
|
|
#endif
|
|
}
|
|
static const FuncDef statPushFuncdef = {
|
|
2+IsStat34, /* nArg */
|
|
SQLITE_UTF8, /* funcFlags */
|
|
0, /* pUserData */
|
|
0, /* pNext */
|
|
statPush, /* xSFunc */
|
|
0, /* xFinalize */
|
|
"stat_push", /* zName */
|
|
0, /* pHash */
|
|
0 /* pDestructor */
|
|
};
|
|
|
|
#define STAT_GET_STAT1 0 /* "stat" column of stat1 table */
|
|
#define STAT_GET_ROWID 1 /* "rowid" column of stat[34] entry */
|
|
#define STAT_GET_NEQ 2 /* "neq" column of stat[34] entry */
|
|
#define STAT_GET_NLT 3 /* "nlt" column of stat[34] entry */
|
|
#define STAT_GET_NDLT 4 /* "ndlt" column of stat[34] entry */
|
|
|
|
/*
|
|
** Implementation of the stat_get(P,J) SQL function. This routine is
|
|
** used to query statistical information that has been gathered into
|
|
** the Stat4Accum object by prior calls to stat_push(). The P parameter
|
|
** has type BLOB but it is really just a pointer to the Stat4Accum object.
|
|
** The content to returned is determined by the parameter J
|
|
** which is one of the STAT_GET_xxxx values defined above.
|
|
**
|
|
** If neither STAT3 nor STAT4 are enabled, then J is always
|
|
** STAT_GET_STAT1 and is hence omitted and this routine becomes
|
|
** a one-parameter function, stat_get(P), that always returns the
|
|
** stat1 table entry information.
|
|
*/
|
|
static void statGet(
|
|
sqlite3_context *context,
|
|
int argc,
|
|
sqlite3_value **argv
|
|
){
|
|
Stat4Accum *p = (Stat4Accum*)sqlite3_value_blob(argv[0]);
|
|
#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
|
|
/* STAT3 and STAT4 have a parameter on this routine. */
|
|
int eCall = sqlite3_value_int(argv[1]);
|
|
assert( argc==2 );
|
|
assert( eCall==STAT_GET_STAT1 || eCall==STAT_GET_NEQ
|
|
|| eCall==STAT_GET_ROWID || eCall==STAT_GET_NLT
|
|
|| eCall==STAT_GET_NDLT
|
|
);
|
|
if( eCall==STAT_GET_STAT1 )
|
|
#else
|
|
assert( argc==1 );
|
|
#endif
|
|
{
|
|
/* Return the value to store in the "stat" column of the sqlite_stat1
|
|
** table for this index.
|
|
**
|
|
** The value is a string composed of a list of integers describing
|
|
** the index. The first integer in the list is the total number of
|
|
** entries in the index. There is one additional integer in the list
|
|
** for each indexed column. This additional integer is an estimate of
|
|
** the number of rows matched by a stabbing query on the index using
|
|
** a key with the corresponding number of fields. In other words,
|
|
** if the index is on columns (a,b) and the sqlite_stat1 value is
|
|
** "100 10 2", then SQLite estimates that:
|
|
**
|
|
** * the index contains 100 rows,
|
|
** * "WHERE a=?" matches 10 rows, and
|
|
** * "WHERE a=? AND b=?" matches 2 rows.
|
|
**
|
|
** If D is the count of distinct values and K is the total number of
|
|
** rows, then each estimate is computed as:
|
|
**
|
|
** I = (K+D-1)/D
|
|
*/
|
|
char *z;
|
|
int i;
|
|
|
|
char *zRet = sqlite3MallocZero( (p->nKeyCol+1)*25 );
|
|
if( zRet==0 ){
|
|
sqlite3_result_error_nomem(context);
|
|
return;
|
|
}
|
|
|
|
sqlite3_snprintf(24, zRet, "%llu", (u64)p->nRow);
|
|
z = zRet + sqlite3Strlen30(zRet);
|
|
for(i=0; i<p->nKeyCol; i++){
|
|
u64 nDistinct = p->current.anDLt[i] + 1;
|
|
u64 iVal = (p->nRow + nDistinct - 1) / nDistinct;
|
|
sqlite3_snprintf(24, z, " %llu", iVal);
|
|
z += sqlite3Strlen30(z);
|
|
assert( p->current.anEq[i] );
|
|
}
|
|
assert( z[0]=='\0' && z>zRet );
|
|
|
|
sqlite3_result_text(context, zRet, -1, sqlite3_free);
|
|
}
|
|
#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
|
|
else if( eCall==STAT_GET_ROWID ){
|
|
if( p->iGet<0 ){
|
|
samplePushPrevious(p, 0);
|
|
p->iGet = 0;
|
|
}
|
|
if( p->iGet<p->nSample ){
|
|
Stat4Sample *pS = p->a + p->iGet;
|
|
if( pS->nRowid==0 ){
|
|
sqlite3_result_int64(context, pS->u.iRowid);
|
|
}else{
|
|
sqlite3_result_blob(context, pS->u.aRowid, pS->nRowid,
|
|
SQLITE_TRANSIENT);
|
|
}
|
|
}
|
|
}else{
|
|
tRowcnt *aCnt = 0;
|
|
|
|
assert( p->iGet<p->nSample );
|
|
switch( eCall ){
|
|
case STAT_GET_NEQ: aCnt = p->a[p->iGet].anEq; break;
|
|
case STAT_GET_NLT: aCnt = p->a[p->iGet].anLt; break;
|
|
default: {
|
|
aCnt = p->a[p->iGet].anDLt;
|
|
p->iGet++;
|
|
break;
|
|
}
|
|
}
|
|
|
|
if( IsStat3 ){
|
|
sqlite3_result_int64(context, (i64)aCnt[0]);
|
|
}else{
|
|
char *zRet = sqlite3MallocZero(p->nCol * 25);
|
|
if( zRet==0 ){
|
|
sqlite3_result_error_nomem(context);
|
|
}else{
|
|
int i;
|
|
char *z = zRet;
|
|
for(i=0; i<p->nCol; i++){
|
|
sqlite3_snprintf(24, z, "%llu ", (u64)aCnt[i]);
|
|
z += sqlite3Strlen30(z);
|
|
}
|
|
assert( z[0]=='\0' && z>zRet );
|
|
z[-1] = '\0';
|
|
sqlite3_result_text(context, zRet, -1, sqlite3_free);
|
|
}
|
|
}
|
|
}
|
|
#endif /* SQLITE_ENABLE_STAT3_OR_STAT4 */
|
|
#ifndef SQLITE_DEBUG
|
|
UNUSED_PARAMETER( argc );
|
|
#endif
|
|
}
|
|
static const FuncDef statGetFuncdef = {
|
|
1+IsStat34, /* nArg */
|
|
SQLITE_UTF8, /* funcFlags */
|
|
0, /* pUserData */
|
|
0, /* pNext */
|
|
statGet, /* xSFunc */
|
|
0, /* xFinalize */
|
|
"stat_get", /* zName */
|
|
0, /* pHash */
|
|
0 /* pDestructor */
|
|
};
|
|
|
|
static void callStatGet(Vdbe *v, int regStat4, int iParam, int regOut){
|
|
assert( regOut!=regStat4 && regOut!=regStat4+1 );
|
|
#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
|
|
sqlite3VdbeAddOp2(v, OP_Integer, iParam, regStat4+1);
|
|
#elif SQLITE_DEBUG
|
|
assert( iParam==STAT_GET_STAT1 );
|
|
#else
|
|
UNUSED_PARAMETER( iParam );
|
|
#endif
|
|
sqlite3VdbeAddOp4(v, OP_Function0, 0, regStat4, regOut,
|
|
(char*)&statGetFuncdef, P4_FUNCDEF);
|
|
sqlite3VdbeChangeP5(v, 1 + IsStat34);
|
|
}
|
|
|
|
/*
|
|
** Generate code to do an analysis of all indices associated with
|
|
** a single table.
|
|
*/
|
|
static void analyzeOneTable(
|
|
Parse *pParse, /* Parser context */
|
|
Table *pTab, /* Table whose indices are to be analyzed */
|
|
Index *pOnlyIdx, /* If not NULL, only analyze this one index */
|
|
int iStatCur, /* Index of VdbeCursor that writes the sqlite_stat1 table */
|
|
int iMem, /* Available memory locations begin here */
|
|
int iTab /* Next available cursor */
|
|
){
|
|
sqlite3 *db = pParse->db; /* Database handle */
|
|
Index *pIdx; /* An index to being analyzed */
|
|
int iIdxCur; /* Cursor open on index being analyzed */
|
|
int iTabCur; /* Table cursor */
|
|
Vdbe *v; /* The virtual machine being built up */
|
|
int i; /* Loop counter */
|
|
int jZeroRows = -1; /* Jump from here if number of rows is zero */
|
|
int iDb; /* Index of database containing pTab */
|
|
u8 needTableCnt = 1; /* True to count the table */
|
|
int regNewRowid = iMem++; /* Rowid for the inserted record */
|
|
int regStat4 = iMem++; /* Register to hold Stat4Accum object */
|
|
int regChng = iMem++; /* Index of changed index field */
|
|
#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
|
|
int regRowid = iMem++; /* Rowid argument passed to stat_push() */
|
|
#endif
|
|
int regTemp = iMem++; /* Temporary use register */
|
|
int regTabname = iMem++; /* Register containing table name */
|
|
int regIdxname = iMem++; /* Register containing index name */
|
|
int regStat1 = iMem++; /* Value for the stat column of sqlite_stat1 */
|
|
int regPrev = iMem; /* MUST BE LAST (see below) */
|
|
|
|
pParse->nMem = MAX(pParse->nMem, iMem);
|
|
v = sqlite3GetVdbe(pParse);
|
|
if( v==0 || NEVER(pTab==0) ){
|
|
return;
|
|
}
|
|
if( pTab->tnum==0 ){
|
|
/* Do not gather statistics on views or virtual tables */
|
|
return;
|
|
}
|
|
if( sqlite3_strlike("sqlite_%", pTab->zName, 0)==0 ){
|
|
/* Do not gather statistics on system tables */
|
|
return;
|
|
}
|
|
assert( sqlite3BtreeHoldsAllMutexes(db) );
|
|
iDb = sqlite3SchemaToIndex(db, pTab->pSchema);
|
|
assert( iDb>=0 );
|
|
assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
|
|
#ifndef SQLITE_OMIT_AUTHORIZATION
|
|
if( sqlite3AuthCheck(pParse, SQLITE_ANALYZE, pTab->zName, 0,
|
|
db->aDb[iDb].zName ) ){
|
|
return;
|
|
}
|
|
#endif
|
|
|
|
/* Establish a read-lock on the table at the shared-cache level.
|
|
** Open a read-only cursor on the table. Also allocate a cursor number
|
|
** to use for scanning indexes (iIdxCur). No index cursor is opened at
|
|
** this time though. */
|
|
sqlite3TableLock(pParse, iDb, pTab->tnum, 0, pTab->zName);
|
|
iTabCur = iTab++;
|
|
iIdxCur = iTab++;
|
|
pParse->nTab = MAX(pParse->nTab, iTab);
|
|
sqlite3OpenTable(pParse, iTabCur, iDb, pTab, OP_OpenRead);
|
|
sqlite3VdbeLoadString(v, regTabname, pTab->zName);
|
|
|
|
for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){
|
|
int nCol; /* Number of columns in pIdx. "N" */
|
|
int addrRewind; /* Address of "OP_Rewind iIdxCur" */
|
|
int addrNextRow; /* Address of "next_row:" */
|
|
const char *zIdxName; /* Name of the index */
|
|
int nColTest; /* Number of columns to test for changes */
|
|
|
|
if( pOnlyIdx && pOnlyIdx!=pIdx ) continue;
|
|
if( pIdx->pPartIdxWhere==0 ) needTableCnt = 0;
|
|
if( !HasRowid(pTab) && IsPrimaryKeyIndex(pIdx) ){
|
|
nCol = pIdx->nKeyCol;
|
|
zIdxName = pTab->zName;
|
|
nColTest = nCol - 1;
|
|
}else{
|
|
nCol = pIdx->nColumn;
|
|
zIdxName = pIdx->zName;
|
|
nColTest = pIdx->uniqNotNull ? pIdx->nKeyCol-1 : nCol-1;
|
|
}
|
|
|
|
/* Populate the register containing the index name. */
|
|
sqlite3VdbeLoadString(v, regIdxname, zIdxName);
|
|
VdbeComment((v, "Analysis for %s.%s", pTab->zName, zIdxName));
|
|
|
|
/*
|
|
** Pseudo-code for loop that calls stat_push():
|
|
**
|
|
** Rewind csr
|
|
** if eof(csr) goto end_of_scan;
|
|
** regChng = 0
|
|
** goto chng_addr_0;
|
|
**
|
|
** next_row:
|
|
** regChng = 0
|
|
** if( idx(0) != regPrev(0) ) goto chng_addr_0
|
|
** regChng = 1
|
|
** if( idx(1) != regPrev(1) ) goto chng_addr_1
|
|
** ...
|
|
** regChng = N
|
|
** goto chng_addr_N
|
|
**
|
|
** chng_addr_0:
|
|
** regPrev(0) = idx(0)
|
|
** chng_addr_1:
|
|
** regPrev(1) = idx(1)
|
|
** ...
|
|
**
|
|
** endDistinctTest:
|
|
** regRowid = idx(rowid)
|
|
** stat_push(P, regChng, regRowid)
|
|
** Next csr
|
|
** if !eof(csr) goto next_row;
|
|
**
|
|
** end_of_scan:
|
|
*/
|
|
|
|
/* Make sure there are enough memory cells allocated to accommodate
|
|
** the regPrev array and a trailing rowid (the rowid slot is required
|
|
** when building a record to insert into the sample column of
|
|
** the sqlite_stat4 table. */
|
|
pParse->nMem = MAX(pParse->nMem, regPrev+nColTest);
|
|
|
|
/* Open a read-only cursor on the index being analyzed. */
|
|
assert( iDb==sqlite3SchemaToIndex(db, pIdx->pSchema) );
|
|
sqlite3VdbeAddOp3(v, OP_OpenRead, iIdxCur, pIdx->tnum, iDb);
|
|
sqlite3VdbeSetP4KeyInfo(pParse, pIdx);
|
|
VdbeComment((v, "%s", pIdx->zName));
|
|
|
|
/* Invoke the stat_init() function. The arguments are:
|
|
**
|
|
** (1) the number of columns in the index including the rowid
|
|
** (or for a WITHOUT ROWID table, the number of PK columns),
|
|
** (2) the number of columns in the key without the rowid/pk
|
|
** (3) the number of rows in the index,
|
|
**
|
|
**
|
|
** The third argument is only used for STAT3 and STAT4
|
|
*/
|
|
#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
|
|
sqlite3VdbeAddOp2(v, OP_Count, iIdxCur, regStat4+3);
|
|
#endif
|
|
sqlite3VdbeAddOp2(v, OP_Integer, nCol, regStat4+1);
|
|
sqlite3VdbeAddOp2(v, OP_Integer, pIdx->nKeyCol, regStat4+2);
|
|
sqlite3VdbeAddOp4(v, OP_Function0, 0, regStat4+1, regStat4,
|
|
(char*)&statInitFuncdef, P4_FUNCDEF);
|
|
sqlite3VdbeChangeP5(v, 2+IsStat34);
|
|
|
|
/* Implementation of the following:
|
|
**
|
|
** Rewind csr
|
|
** if eof(csr) goto end_of_scan;
|
|
** regChng = 0
|
|
** goto next_push_0;
|
|
**
|
|
*/
|
|
addrRewind = sqlite3VdbeAddOp1(v, OP_Rewind, iIdxCur);
|
|
VdbeCoverage(v);
|
|
sqlite3VdbeAddOp2(v, OP_Integer, 0, regChng);
|
|
addrNextRow = sqlite3VdbeCurrentAddr(v);
|
|
|
|
if( nColTest>0 ){
|
|
int endDistinctTest = sqlite3VdbeMakeLabel(v);
|
|
int *aGotoChng; /* Array of jump instruction addresses */
|
|
aGotoChng = sqlite3DbMallocRawNN(db, sizeof(int)*nColTest);
|
|
if( aGotoChng==0 ) continue;
|
|
|
|
/*
|
|
** next_row:
|
|
** regChng = 0
|
|
** if( idx(0) != regPrev(0) ) goto chng_addr_0
|
|
** regChng = 1
|
|
** if( idx(1) != regPrev(1) ) goto chng_addr_1
|
|
** ...
|
|
** regChng = N
|
|
** goto endDistinctTest
|
|
*/
|
|
sqlite3VdbeAddOp0(v, OP_Goto);
|
|
addrNextRow = sqlite3VdbeCurrentAddr(v);
|
|
if( nColTest==1 && pIdx->nKeyCol==1 && IsUniqueIndex(pIdx) ){
|
|
/* For a single-column UNIQUE index, once we have found a non-NULL
|
|
** row, we know that all the rest will be distinct, so skip
|
|
** subsequent distinctness tests. */
|
|
sqlite3VdbeAddOp2(v, OP_NotNull, regPrev, endDistinctTest);
|
|
VdbeCoverage(v);
|
|
}
|
|
for(i=0; i<nColTest; i++){
|
|
char *pColl = (char*)sqlite3LocateCollSeq(pParse, pIdx->azColl[i]);
|
|
sqlite3VdbeAddOp2(v, OP_Integer, i, regChng);
|
|
sqlite3VdbeAddOp3(v, OP_Column, iIdxCur, i, regTemp);
|
|
aGotoChng[i] =
|
|
sqlite3VdbeAddOp4(v, OP_Ne, regTemp, 0, regPrev+i, pColl, P4_COLLSEQ);
|
|
sqlite3VdbeChangeP5(v, SQLITE_NULLEQ);
|
|
VdbeCoverage(v);
|
|
}
|
|
sqlite3VdbeAddOp2(v, OP_Integer, nColTest, regChng);
|
|
sqlite3VdbeGoto(v, endDistinctTest);
|
|
|
|
|
|
/*
|
|
** chng_addr_0:
|
|
** regPrev(0) = idx(0)
|
|
** chng_addr_1:
|
|
** regPrev(1) = idx(1)
|
|
** ...
|
|
*/
|
|
sqlite3VdbeJumpHere(v, addrNextRow-1);
|
|
for(i=0; i<nColTest; i++){
|
|
sqlite3VdbeJumpHere(v, aGotoChng[i]);
|
|
sqlite3VdbeAddOp3(v, OP_Column, iIdxCur, i, regPrev+i);
|
|
}
|
|
sqlite3VdbeResolveLabel(v, endDistinctTest);
|
|
sqlite3DbFree(db, aGotoChng);
|
|
}
|
|
|
|
/*
|
|
** chng_addr_N:
|
|
** regRowid = idx(rowid) // STAT34 only
|
|
** stat_push(P, regChng, regRowid) // 3rd parameter STAT34 only
|
|
** Next csr
|
|
** if !eof(csr) goto next_row;
|
|
*/
|
|
#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
|
|
assert( regRowid==(regStat4+2) );
|
|
if( HasRowid(pTab) ){
|
|
sqlite3VdbeAddOp2(v, OP_IdxRowid, iIdxCur, regRowid);
|
|
}else{
|
|
Index *pPk = sqlite3PrimaryKeyIndex(pIdx->pTable);
|
|
int j, k, regKey;
|
|
regKey = sqlite3GetTempRange(pParse, pPk->nKeyCol);
|
|
for(j=0; j<pPk->nKeyCol; j++){
|
|
k = sqlite3ColumnOfIndex(pIdx, pPk->aiColumn[j]);
|
|
assert( k>=0 && k<pTab->nCol );
|
|
sqlite3VdbeAddOp3(v, OP_Column, iIdxCur, k, regKey+j);
|
|
VdbeComment((v, "%s", pTab->aCol[pPk->aiColumn[j]].zName));
|
|
}
|
|
sqlite3VdbeAddOp3(v, OP_MakeRecord, regKey, pPk->nKeyCol, regRowid);
|
|
sqlite3ReleaseTempRange(pParse, regKey, pPk->nKeyCol);
|
|
}
|
|
#endif
|
|
assert( regChng==(regStat4+1) );
|
|
sqlite3VdbeAddOp4(v, OP_Function0, 1, regStat4, regTemp,
|
|
(char*)&statPushFuncdef, P4_FUNCDEF);
|
|
sqlite3VdbeChangeP5(v, 2+IsStat34);
|
|
sqlite3VdbeAddOp2(v, OP_Next, iIdxCur, addrNextRow); VdbeCoverage(v);
|
|
|
|
/* Add the entry to the stat1 table. */
|
|
callStatGet(v, regStat4, STAT_GET_STAT1, regStat1);
|
|
assert( "BBB"[0]==SQLITE_AFF_TEXT );
|
|
sqlite3VdbeAddOp4(v, OP_MakeRecord, regTabname, 3, regTemp, "BBB", 0);
|
|
sqlite3VdbeAddOp2(v, OP_NewRowid, iStatCur, regNewRowid);
|
|
sqlite3VdbeAddOp3(v, OP_Insert, iStatCur, regTemp, regNewRowid);
|
|
sqlite3VdbeChangeP5(v, OPFLAG_APPEND);
|
|
|
|
/* Add the entries to the stat3 or stat4 table. */
|
|
#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
|
|
{
|
|
int regEq = regStat1;
|
|
int regLt = regStat1+1;
|
|
int regDLt = regStat1+2;
|
|
int regSample = regStat1+3;
|
|
int regCol = regStat1+4;
|
|
int regSampleRowid = regCol + nCol;
|
|
int addrNext;
|
|
int addrIsNull;
|
|
u8 seekOp = HasRowid(pTab) ? OP_NotExists : OP_NotFound;
|
|
|
|
pParse->nMem = MAX(pParse->nMem, regCol+nCol);
|
|
|
|
addrNext = sqlite3VdbeCurrentAddr(v);
|
|
callStatGet(v, regStat4, STAT_GET_ROWID, regSampleRowid);
|
|
addrIsNull = sqlite3VdbeAddOp1(v, OP_IsNull, regSampleRowid);
|
|
VdbeCoverage(v);
|
|
callStatGet(v, regStat4, STAT_GET_NEQ, regEq);
|
|
callStatGet(v, regStat4, STAT_GET_NLT, regLt);
|
|
callStatGet(v, regStat4, STAT_GET_NDLT, regDLt);
|
|
sqlite3VdbeAddOp4Int(v, seekOp, iTabCur, addrNext, regSampleRowid, 0);
|
|
/* We know that the regSampleRowid row exists because it was read by
|
|
** the previous loop. Thus the not-found jump of seekOp will never
|
|
** be taken */
|
|
VdbeCoverageNeverTaken(v);
|
|
#ifdef SQLITE_ENABLE_STAT3
|
|
sqlite3ExprCodeLoadIndexColumn(pParse, pIdx, iTabCur, 0, regSample);
|
|
#else
|
|
for(i=0; i<nCol; i++){
|
|
sqlite3ExprCodeLoadIndexColumn(pParse, pIdx, iTabCur, i, regCol+i);
|
|
}
|
|
sqlite3VdbeAddOp3(v, OP_MakeRecord, regCol, nCol, regSample);
|
|
#endif
|
|
sqlite3VdbeAddOp3(v, OP_MakeRecord, regTabname, 6, regTemp);
|
|
sqlite3VdbeAddOp2(v, OP_NewRowid, iStatCur+1, regNewRowid);
|
|
sqlite3VdbeAddOp3(v, OP_Insert, iStatCur+1, regTemp, regNewRowid);
|
|
sqlite3VdbeAddOp2(v, OP_Goto, 1, addrNext); /* P1==1 for end-of-loop */
|
|
sqlite3VdbeJumpHere(v, addrIsNull);
|
|
}
|
|
#endif /* SQLITE_ENABLE_STAT3_OR_STAT4 */
|
|
|
|
/* End of analysis */
|
|
sqlite3VdbeJumpHere(v, addrRewind);
|
|
}
|
|
|
|
|
|
/* Create a single sqlite_stat1 entry containing NULL as the index
|
|
** name and the row count as the content.
|
|
*/
|
|
if( pOnlyIdx==0 && needTableCnt ){
|
|
VdbeComment((v, "%s", pTab->zName));
|
|
sqlite3VdbeAddOp2(v, OP_Count, iTabCur, regStat1);
|
|
jZeroRows = sqlite3VdbeAddOp1(v, OP_IfNot, regStat1); VdbeCoverage(v);
|
|
sqlite3VdbeAddOp2(v, OP_Null, 0, regIdxname);
|
|
assert( "BBB"[0]==SQLITE_AFF_TEXT );
|
|
sqlite3VdbeAddOp4(v, OP_MakeRecord, regTabname, 3, regTemp, "BBB", 0);
|
|
sqlite3VdbeAddOp2(v, OP_NewRowid, iStatCur, regNewRowid);
|
|
sqlite3VdbeAddOp3(v, OP_Insert, iStatCur, regTemp, regNewRowid);
|
|
sqlite3VdbeChangeP5(v, OPFLAG_APPEND);
|
|
sqlite3VdbeJumpHere(v, jZeroRows);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
** Generate code that will cause the most recent index analysis to
|
|
** be loaded into internal hash tables where is can be used.
|
|
*/
|
|
static void loadAnalysis(Parse *pParse, int iDb){
|
|
Vdbe *v = sqlite3GetVdbe(pParse);
|
|
if( v ){
|
|
sqlite3VdbeAddOp1(v, OP_LoadAnalysis, iDb);
|
|
}
|
|
}
|
|
|
|
/*
|
|
** Generate code that will do an analysis of an entire database
|
|
*/
|
|
static void analyzeDatabase(Parse *pParse, int iDb){
|
|
sqlite3 *db = pParse->db;
|
|
Schema *pSchema = db->aDb[iDb].pSchema; /* Schema of database iDb */
|
|
HashElem *k;
|
|
int iStatCur;
|
|
int iMem;
|
|
int iTab;
|
|
|
|
sqlite3BeginWriteOperation(pParse, 0, iDb);
|
|
iStatCur = pParse->nTab;
|
|
pParse->nTab += 3;
|
|
openStatTable(pParse, iDb, iStatCur, 0, 0);
|
|
iMem = pParse->nMem+1;
|
|
iTab = pParse->nTab;
|
|
assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
|
|
for(k=sqliteHashFirst(&pSchema->tblHash); k; k=sqliteHashNext(k)){
|
|
Table *pTab = (Table*)sqliteHashData(k);
|
|
analyzeOneTable(pParse, pTab, 0, iStatCur, iMem, iTab);
|
|
}
|
|
loadAnalysis(pParse, iDb);
|
|
}
|
|
|
|
/*
|
|
** Generate code that will do an analysis of a single table in
|
|
** a database. If pOnlyIdx is not NULL then it is a single index
|
|
** in pTab that should be analyzed.
|
|
*/
|
|
static void analyzeTable(Parse *pParse, Table *pTab, Index *pOnlyIdx){
|
|
int iDb;
|
|
int iStatCur;
|
|
|
|
assert( pTab!=0 );
|
|
assert( sqlite3BtreeHoldsAllMutexes(pParse->db) );
|
|
iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema);
|
|
sqlite3BeginWriteOperation(pParse, 0, iDb);
|
|
iStatCur = pParse->nTab;
|
|
pParse->nTab += 3;
|
|
if( pOnlyIdx ){
|
|
openStatTable(pParse, iDb, iStatCur, pOnlyIdx->zName, "idx");
|
|
}else{
|
|
openStatTable(pParse, iDb, iStatCur, pTab->zName, "tbl");
|
|
}
|
|
analyzeOneTable(pParse, pTab, pOnlyIdx, iStatCur,pParse->nMem+1,pParse->nTab);
|
|
loadAnalysis(pParse, iDb);
|
|
}
|
|
|
|
/*
|
|
** Generate code for the ANALYZE command. The parser calls this routine
|
|
** when it recognizes an ANALYZE command.
|
|
**
|
|
** ANALYZE -- 1
|
|
** ANALYZE <database> -- 2
|
|
** ANALYZE ?<database>.?<tablename> -- 3
|
|
**
|
|
** Form 1 causes all indices in all attached databases to be analyzed.
|
|
** Form 2 analyzes all indices the single database named.
|
|
** Form 3 analyzes all indices associated with the named table.
|
|
*/
|
|
void sqlite3Analyze(Parse *pParse, Token *pName1, Token *pName2){
|
|
sqlite3 *db = pParse->db;
|
|
int iDb;
|
|
int i;
|
|
char *z, *zDb;
|
|
Table *pTab;
|
|
Index *pIdx;
|
|
Token *pTableName;
|
|
Vdbe *v;
|
|
|
|
/* Read the database schema. If an error occurs, leave an error message
|
|
** and code in pParse and return NULL. */
|
|
assert( sqlite3BtreeHoldsAllMutexes(pParse->db) );
|
|
if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){
|
|
return;
|
|
}
|
|
|
|
assert( pName2!=0 || pName1==0 );
|
|
if( pName1==0 ){
|
|
/* Form 1: Analyze everything */
|
|
for(i=0; i<db->nDb; i++){
|
|
if( i==1 ) continue; /* Do not analyze the TEMP database */
|
|
analyzeDatabase(pParse, i);
|
|
}
|
|
}else if( pName2->n==0 ){
|
|
/* Form 2: Analyze the database or table named */
|
|
iDb = sqlite3FindDb(db, pName1);
|
|
if( iDb>=0 ){
|
|
analyzeDatabase(pParse, iDb);
|
|
}else{
|
|
z = sqlite3NameFromToken(db, pName1);
|
|
if( z ){
|
|
if( (pIdx = sqlite3FindIndex(db, z, 0))!=0 ){
|
|
analyzeTable(pParse, pIdx->pTable, pIdx);
|
|
}else if( (pTab = sqlite3LocateTable(pParse, 0, z, 0))!=0 ){
|
|
analyzeTable(pParse, pTab, 0);
|
|
}
|
|
sqlite3DbFree(db, z);
|
|
}
|
|
}
|
|
}else{
|
|
/* Form 3: Analyze the fully qualified table name */
|
|
iDb = sqlite3TwoPartName(pParse, pName1, pName2, &pTableName);
|
|
if( iDb>=0 ){
|
|
zDb = db->aDb[iDb].zName;
|
|
z = sqlite3NameFromToken(db, pTableName);
|
|
if( z ){
|
|
if( (pIdx = sqlite3FindIndex(db, z, zDb))!=0 ){
|
|
analyzeTable(pParse, pIdx->pTable, pIdx);
|
|
}else if( (pTab = sqlite3LocateTable(pParse, 0, z, zDb))!=0 ){
|
|
analyzeTable(pParse, pTab, 0);
|
|
}
|
|
sqlite3DbFree(db, z);
|
|
}
|
|
}
|
|
}
|
|
v = sqlite3GetVdbe(pParse);
|
|
if( v ) sqlite3VdbeAddOp0(v, OP_Expire);
|
|
}
|
|
|
|
/*
|
|
** Used to pass information from the analyzer reader through to the
|
|
** callback routine.
|
|
*/
|
|
typedef struct analysisInfo analysisInfo;
|
|
struct analysisInfo {
|
|
sqlite3 *db;
|
|
const char *zDatabase;
|
|
};
|
|
|
|
/*
|
|
** The first argument points to a nul-terminated string containing a
|
|
** list of space separated integers. Read the first nOut of these into
|
|
** the array aOut[].
|
|
*/
|
|
static void decodeIntArray(
|
|
char *zIntArray, /* String containing int array to decode */
|
|
int nOut, /* Number of slots in aOut[] */
|
|
tRowcnt *aOut, /* Store integers here */
|
|
LogEst *aLog, /* Or, if aOut==0, here */
|
|
Index *pIndex /* Handle extra flags for this index, if not NULL */
|
|
){
|
|
char *z = zIntArray;
|
|
int c;
|
|
int i;
|
|
tRowcnt v;
|
|
|
|
#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
|
|
if( z==0 ) z = "";
|
|
#else
|
|
assert( z!=0 );
|
|
#endif
|
|
for(i=0; *z && i<nOut; i++){
|
|
v = 0;
|
|
while( (c=z[0])>='0' && c<='9' ){
|
|
v = v*10 + c - '0';
|
|
z++;
|
|
}
|
|
#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
|
|
if( aOut ) aOut[i] = v;
|
|
if( aLog ) aLog[i] = sqlite3LogEst(v);
|
|
#else
|
|
assert( aOut==0 );
|
|
UNUSED_PARAMETER(aOut);
|
|
assert( aLog!=0 );
|
|
aLog[i] = sqlite3LogEst(v);
|
|
#endif
|
|
if( *z==' ' ) z++;
|
|
}
|
|
#ifndef SQLITE_ENABLE_STAT3_OR_STAT4
|
|
assert( pIndex!=0 ); {
|
|
#else
|
|
if( pIndex ){
|
|
#endif
|
|
pIndex->bUnordered = 0;
|
|
pIndex->noSkipScan = 0;
|
|
while( z[0] ){
|
|
if( sqlite3_strglob("unordered*", z)==0 ){
|
|
pIndex->bUnordered = 1;
|
|
}else if( sqlite3_strglob("sz=[0-9]*", z)==0 ){
|
|
pIndex->szIdxRow = sqlite3LogEst(sqlite3Atoi(z+3));
|
|
}else if( sqlite3_strglob("noskipscan*", z)==0 ){
|
|
pIndex->noSkipScan = 1;
|
|
}
|
|
#ifdef SQLITE_ENABLE_COSTMULT
|
|
else if( sqlite3_strglob("costmult=[0-9]*",z)==0 ){
|
|
pIndex->pTable->costMult = sqlite3LogEst(sqlite3Atoi(z+9));
|
|
}
|
|
#endif
|
|
while( z[0]!=0 && z[0]!=' ' ) z++;
|
|
while( z[0]==' ' ) z++;
|
|
}
|
|
}
|
|
}
|
|
|
|
/*
|
|
** This callback is invoked once for each index when reading the
|
|
** sqlite_stat1 table.
|
|
**
|
|
** argv[0] = name of the table
|
|
** argv[1] = name of the index (might be NULL)
|
|
** argv[2] = results of analysis - on integer for each column
|
|
**
|
|
** Entries for which argv[1]==NULL simply record the number of rows in
|
|
** the table.
|
|
*/
|
|
static int analysisLoader(void *pData, int argc, char **argv, char **NotUsed){
|
|
analysisInfo *pInfo = (analysisInfo*)pData;
|
|
Index *pIndex;
|
|
Table *pTable;
|
|
const char *z;
|
|
|
|
assert( argc==3 );
|
|
UNUSED_PARAMETER2(NotUsed, argc);
|
|
|
|
if( argv==0 || argv[0]==0 || argv[2]==0 ){
|
|
return 0;
|
|
}
|
|
pTable = sqlite3FindTable(pInfo->db, argv[0], pInfo->zDatabase);
|
|
if( pTable==0 ){
|
|
return 0;
|
|
}
|
|
if( argv[1]==0 ){
|
|
pIndex = 0;
|
|
}else if( sqlite3_stricmp(argv[0],argv[1])==0 ){
|
|
pIndex = sqlite3PrimaryKeyIndex(pTable);
|
|
}else{
|
|
pIndex = sqlite3FindIndex(pInfo->db, argv[1], pInfo->zDatabase);
|
|
}
|
|
z = argv[2];
|
|
|
|
if( pIndex ){
|
|
tRowcnt *aiRowEst = 0;
|
|
int nCol = pIndex->nKeyCol+1;
|
|
#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
|
|
/* Index.aiRowEst may already be set here if there are duplicate
|
|
** sqlite_stat1 entries for this index. In that case just clobber
|
|
** the old data with the new instead of allocating a new array. */
|
|
if( pIndex->aiRowEst==0 ){
|
|
pIndex->aiRowEst = (tRowcnt*)sqlite3MallocZero(sizeof(tRowcnt) * nCol);
|
|
if( pIndex->aiRowEst==0 ) sqlite3OomFault(pInfo->db);
|
|
}
|
|
aiRowEst = pIndex->aiRowEst;
|
|
#endif
|
|
pIndex->bUnordered = 0;
|
|
decodeIntArray((char*)z, nCol, aiRowEst, pIndex->aiRowLogEst, pIndex);
|
|
if( pIndex->pPartIdxWhere==0 ) pTable->nRowLogEst = pIndex->aiRowLogEst[0];
|
|
}else{
|
|
Index fakeIdx;
|
|
fakeIdx.szIdxRow = pTable->szTabRow;
|
|
#ifdef SQLITE_ENABLE_COSTMULT
|
|
fakeIdx.pTable = pTable;
|
|
#endif
|
|
decodeIntArray((char*)z, 1, 0, &pTable->nRowLogEst, &fakeIdx);
|
|
pTable->szTabRow = fakeIdx.szIdxRow;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
** If the Index.aSample variable is not NULL, delete the aSample[] array
|
|
** and its contents.
|
|
*/
|
|
void sqlite3DeleteIndexSamples(sqlite3 *db, Index *pIdx){
|
|
#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
|
|
if( pIdx->aSample ){
|
|
int j;
|
|
for(j=0; j<pIdx->nSample; j++){
|
|
IndexSample *p = &pIdx->aSample[j];
|
|
sqlite3DbFree(db, p->p);
|
|
}
|
|
sqlite3DbFree(db, pIdx->aSample);
|
|
}
|
|
if( db && db->pnBytesFreed==0 ){
|
|
pIdx->nSample = 0;
|
|
pIdx->aSample = 0;
|
|
}
|
|
#else
|
|
UNUSED_PARAMETER(db);
|
|
UNUSED_PARAMETER(pIdx);
|
|
#endif /* SQLITE_ENABLE_STAT3_OR_STAT4 */
|
|
}
|
|
|
|
#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
|
|
/*
|
|
** Populate the pIdx->aAvgEq[] array based on the samples currently
|
|
** stored in pIdx->aSample[].
|
|
*/
|
|
static void initAvgEq(Index *pIdx){
|
|
if( pIdx ){
|
|
IndexSample *aSample = pIdx->aSample;
|
|
IndexSample *pFinal = &aSample[pIdx->nSample-1];
|
|
int iCol;
|
|
int nCol = 1;
|
|
if( pIdx->nSampleCol>1 ){
|
|
/* If this is stat4 data, then calculate aAvgEq[] values for all
|
|
** sample columns except the last. The last is always set to 1, as
|
|
** once the trailing PK fields are considered all index keys are
|
|
** unique. */
|
|
nCol = pIdx->nSampleCol-1;
|
|
pIdx->aAvgEq[nCol] = 1;
|
|
}
|
|
for(iCol=0; iCol<nCol; iCol++){
|
|
int nSample = pIdx->nSample;
|
|
int i; /* Used to iterate through samples */
|
|
tRowcnt sumEq = 0; /* Sum of the nEq values */
|
|
tRowcnt avgEq = 0;
|
|
tRowcnt nRow; /* Number of rows in index */
|
|
i64 nSum100 = 0; /* Number of terms contributing to sumEq */
|
|
i64 nDist100; /* Number of distinct values in index */
|
|
|
|
if( !pIdx->aiRowEst || iCol>=pIdx->nKeyCol || pIdx->aiRowEst[iCol+1]==0 ){
|
|
nRow = pFinal->anLt[iCol];
|
|
nDist100 = (i64)100 * pFinal->anDLt[iCol];
|
|
nSample--;
|
|
}else{
|
|
nRow = pIdx->aiRowEst[0];
|
|
nDist100 = ((i64)100 * pIdx->aiRowEst[0]) / pIdx->aiRowEst[iCol+1];
|
|
}
|
|
pIdx->nRowEst0 = nRow;
|
|
|
|
/* Set nSum to the number of distinct (iCol+1) field prefixes that
|
|
** occur in the stat4 table for this index. Set sumEq to the sum of
|
|
** the nEq values for column iCol for the same set (adding the value
|
|
** only once where there exist duplicate prefixes). */
|
|
for(i=0; i<nSample; i++){
|
|
if( i==(pIdx->nSample-1)
|
|
|| aSample[i].anDLt[iCol]!=aSample[i+1].anDLt[iCol]
|
|
){
|
|
sumEq += aSample[i].anEq[iCol];
|
|
nSum100 += 100;
|
|
}
|
|
}
|
|
|
|
if( nDist100>nSum100 ){
|
|
avgEq = ((i64)100 * (nRow - sumEq))/(nDist100 - nSum100);
|
|
}
|
|
if( avgEq==0 ) avgEq = 1;
|
|
pIdx->aAvgEq[iCol] = avgEq;
|
|
}
|
|
}
|
|
}
|
|
|
|
/*
|
|
** Look up an index by name. Or, if the name of a WITHOUT ROWID table
|
|
** is supplied instead, find the PRIMARY KEY index for that table.
|
|
*/
|
|
static Index *findIndexOrPrimaryKey(
|
|
sqlite3 *db,
|
|
const char *zName,
|
|
const char *zDb
|
|
){
|
|
Index *pIdx = sqlite3FindIndex(db, zName, zDb);
|
|
if( pIdx==0 ){
|
|
Table *pTab = sqlite3FindTable(db, zName, zDb);
|
|
if( pTab && !HasRowid(pTab) ) pIdx = sqlite3PrimaryKeyIndex(pTab);
|
|
}
|
|
return pIdx;
|
|
}
|
|
|
|
/*
|
|
** Load the content from either the sqlite_stat4 or sqlite_stat3 table
|
|
** into the relevant Index.aSample[] arrays.
|
|
**
|
|
** Arguments zSql1 and zSql2 must point to SQL statements that return
|
|
** data equivalent to the following (statements are different for stat3,
|
|
** see the caller of this function for details):
|
|
**
|
|
** zSql1: SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx
|
|
** zSql2: SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4
|
|
**
|
|
** where %Q is replaced with the database name before the SQL is executed.
|
|
*/
|
|
static int loadStatTbl(
|
|
sqlite3 *db, /* Database handle */
|
|
int bStat3, /* Assume single column records only */
|
|
const char *zSql1, /* SQL statement 1 (see above) */
|
|
const char *zSql2, /* SQL statement 2 (see above) */
|
|
const char *zDb /* Database name (e.g. "main") */
|
|
){
|
|
int rc; /* Result codes from subroutines */
|
|
sqlite3_stmt *pStmt = 0; /* An SQL statement being run */
|
|
char *zSql; /* Text of the SQL statement */
|
|
Index *pPrevIdx = 0; /* Previous index in the loop */
|
|
IndexSample *pSample; /* A slot in pIdx->aSample[] */
|
|
|
|
assert( db->lookaside.bDisable );
|
|
zSql = sqlite3MPrintf(db, zSql1, zDb);
|
|
if( !zSql ){
|
|
return SQLITE_NOMEM;
|
|
}
|
|
rc = sqlite3_prepare(db, zSql, -1, &pStmt, 0);
|
|
sqlite3DbFree(db, zSql);
|
|
if( rc ) return rc;
|
|
|
|
while( sqlite3_step(pStmt)==SQLITE_ROW ){
|
|
int nIdxCol = 1; /* Number of columns in stat4 records */
|
|
|
|
char *zIndex; /* Index name */
|
|
Index *pIdx; /* Pointer to the index object */
|
|
int nSample; /* Number of samples */
|
|
int nByte; /* Bytes of space required */
|
|
int i; /* Bytes of space required */
|
|
tRowcnt *pSpace;
|
|
|
|
zIndex = (char *)sqlite3_column_text(pStmt, 0);
|
|
if( zIndex==0 ) continue;
|
|
nSample = sqlite3_column_int(pStmt, 1);
|
|
pIdx = findIndexOrPrimaryKey(db, zIndex, zDb);
|
|
assert( pIdx==0 || bStat3 || pIdx->nSample==0 );
|
|
/* Index.nSample is non-zero at this point if data has already been
|
|
** loaded from the stat4 table. In this case ignore stat3 data. */
|
|
if( pIdx==0 || pIdx->nSample ) continue;
|
|
if( bStat3==0 ){
|
|
assert( !HasRowid(pIdx->pTable) || pIdx->nColumn==pIdx->nKeyCol+1 );
|
|
if( !HasRowid(pIdx->pTable) && IsPrimaryKeyIndex(pIdx) ){
|
|
nIdxCol = pIdx->nKeyCol;
|
|
}else{
|
|
nIdxCol = pIdx->nColumn;
|
|
}
|
|
}
|
|
pIdx->nSampleCol = nIdxCol;
|
|
nByte = sizeof(IndexSample) * nSample;
|
|
nByte += sizeof(tRowcnt) * nIdxCol * 3 * nSample;
|
|
nByte += nIdxCol * sizeof(tRowcnt); /* Space for Index.aAvgEq[] */
|
|
|
|
pIdx->aSample = sqlite3DbMallocZero(db, nByte);
|
|
if( pIdx->aSample==0 ){
|
|
sqlite3_finalize(pStmt);
|
|
return SQLITE_NOMEM;
|
|
}
|
|
pSpace = (tRowcnt*)&pIdx->aSample[nSample];
|
|
pIdx->aAvgEq = pSpace; pSpace += nIdxCol;
|
|
for(i=0; i<nSample; i++){
|
|
pIdx->aSample[i].anEq = pSpace; pSpace += nIdxCol;
|
|
pIdx->aSample[i].anLt = pSpace; pSpace += nIdxCol;
|
|
pIdx->aSample[i].anDLt = pSpace; pSpace += nIdxCol;
|
|
}
|
|
assert( ((u8*)pSpace)-nByte==(u8*)(pIdx->aSample) );
|
|
}
|
|
rc = sqlite3_finalize(pStmt);
|
|
if( rc ) return rc;
|
|
|
|
zSql = sqlite3MPrintf(db, zSql2, zDb);
|
|
if( !zSql ){
|
|
return SQLITE_NOMEM;
|
|
}
|
|
rc = sqlite3_prepare(db, zSql, -1, &pStmt, 0);
|
|
sqlite3DbFree(db, zSql);
|
|
if( rc ) return rc;
|
|
|
|
while( sqlite3_step(pStmt)==SQLITE_ROW ){
|
|
char *zIndex; /* Index name */
|
|
Index *pIdx; /* Pointer to the index object */
|
|
int nCol = 1; /* Number of columns in index */
|
|
|
|
zIndex = (char *)sqlite3_column_text(pStmt, 0);
|
|
if( zIndex==0 ) continue;
|
|
pIdx = findIndexOrPrimaryKey(db, zIndex, zDb);
|
|
if( pIdx==0 ) continue;
|
|
/* This next condition is true if data has already been loaded from
|
|
** the sqlite_stat4 table. In this case ignore stat3 data. */
|
|
nCol = pIdx->nSampleCol;
|
|
if( bStat3 && nCol>1 ) continue;
|
|
if( pIdx!=pPrevIdx ){
|
|
initAvgEq(pPrevIdx);
|
|
pPrevIdx = pIdx;
|
|
}
|
|
pSample = &pIdx->aSample[pIdx->nSample];
|
|
decodeIntArray((char*)sqlite3_column_text(pStmt,1),nCol,pSample->anEq,0,0);
|
|
decodeIntArray((char*)sqlite3_column_text(pStmt,2),nCol,pSample->anLt,0,0);
|
|
decodeIntArray((char*)sqlite3_column_text(pStmt,3),nCol,pSample->anDLt,0,0);
|
|
|
|
/* Take a copy of the sample. Add two 0x00 bytes the end of the buffer.
|
|
** This is in case the sample record is corrupted. In that case, the
|
|
** sqlite3VdbeRecordCompare() may read up to two varints past the
|
|
** end of the allocated buffer before it realizes it is dealing with
|
|
** a corrupt record. Adding the two 0x00 bytes prevents this from causing
|
|
** a buffer overread. */
|
|
pSample->n = sqlite3_column_bytes(pStmt, 4);
|
|
pSample->p = sqlite3DbMallocZero(db, pSample->n + 2);
|
|
if( pSample->p==0 ){
|
|
sqlite3_finalize(pStmt);
|
|
return SQLITE_NOMEM;
|
|
}
|
|
memcpy(pSample->p, sqlite3_column_blob(pStmt, 4), pSample->n);
|
|
pIdx->nSample++;
|
|
}
|
|
rc = sqlite3_finalize(pStmt);
|
|
if( rc==SQLITE_OK ) initAvgEq(pPrevIdx);
|
|
return rc;
|
|
}
|
|
|
|
/*
|
|
** Load content from the sqlite_stat4 and sqlite_stat3 tables into
|
|
** the Index.aSample[] arrays of all indices.
|
|
*/
|
|
static int loadStat4(sqlite3 *db, const char *zDb){
|
|
int rc = SQLITE_OK; /* Result codes from subroutines */
|
|
|
|
assert( db->lookaside.bDisable );
|
|
if( sqlite3FindTable(db, "sqlite_stat4", zDb) ){
|
|
rc = loadStatTbl(db, 0,
|
|
"SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx",
|
|
"SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4",
|
|
zDb
|
|
);
|
|
}
|
|
|
|
if( rc==SQLITE_OK && sqlite3FindTable(db, "sqlite_stat3", zDb) ){
|
|
rc = loadStatTbl(db, 1,
|
|
"SELECT idx,count(*) FROM %Q.sqlite_stat3 GROUP BY idx",
|
|
"SELECT idx,neq,nlt,ndlt,sqlite_record(sample) FROM %Q.sqlite_stat3",
|
|
zDb
|
|
);
|
|
}
|
|
|
|
return rc;
|
|
}
|
|
#endif /* SQLITE_ENABLE_STAT3_OR_STAT4 */
|
|
|
|
/*
|
|
** Load the content of the sqlite_stat1 and sqlite_stat3/4 tables. The
|
|
** contents of sqlite_stat1 are used to populate the Index.aiRowEst[]
|
|
** arrays. The contents of sqlite_stat3/4 are used to populate the
|
|
** Index.aSample[] arrays.
|
|
**
|
|
** If the sqlite_stat1 table is not present in the database, SQLITE_ERROR
|
|
** is returned. In this case, even if SQLITE_ENABLE_STAT3/4 was defined
|
|
** during compilation and the sqlite_stat3/4 table is present, no data is
|
|
** read from it.
|
|
**
|
|
** If SQLITE_ENABLE_STAT3/4 was defined during compilation and the
|
|
** sqlite_stat4 table is not present in the database, SQLITE_ERROR is
|
|
** returned. However, in this case, data is read from the sqlite_stat1
|
|
** table (if it is present) before returning.
|
|
**
|
|
** If an OOM error occurs, this function always sets db->mallocFailed.
|
|
** This means if the caller does not care about other errors, the return
|
|
** code may be ignored.
|
|
*/
|
|
int sqlite3AnalysisLoad(sqlite3 *db, int iDb){
|
|
analysisInfo sInfo;
|
|
HashElem *i;
|
|
char *zSql;
|
|
int rc;
|
|
|
|
assert( iDb>=0 && iDb<db->nDb );
|
|
assert( db->aDb[iDb].pBt!=0 );
|
|
|
|
/* Clear any prior statistics */
|
|
assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
|
|
for(i=sqliteHashFirst(&db->aDb[iDb].pSchema->idxHash);i;i=sqliteHashNext(i)){
|
|
Index *pIdx = sqliteHashData(i);
|
|
sqlite3DefaultRowEst(pIdx);
|
|
#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
|
|
sqlite3DeleteIndexSamples(db, pIdx);
|
|
pIdx->aSample = 0;
|
|
#endif
|
|
}
|
|
|
|
/* Check to make sure the sqlite_stat1 table exists */
|
|
sInfo.db = db;
|
|
sInfo.zDatabase = db->aDb[iDb].zName;
|
|
if( sqlite3FindTable(db, "sqlite_stat1", sInfo.zDatabase)==0 ){
|
|
return SQLITE_ERROR;
|
|
}
|
|
|
|
/* Load new statistics out of the sqlite_stat1 table */
|
|
zSql = sqlite3MPrintf(db,
|
|
"SELECT tbl,idx,stat FROM %Q.sqlite_stat1", sInfo.zDatabase);
|
|
if( zSql==0 ){
|
|
rc = SQLITE_NOMEM;
|
|
}else{
|
|
rc = sqlite3_exec(db, zSql, analysisLoader, &sInfo, 0);
|
|
sqlite3DbFree(db, zSql);
|
|
}
|
|
|
|
|
|
/* Load the statistics from the sqlite_stat4 table. */
|
|
#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
|
|
if( rc==SQLITE_OK && OptimizationEnabled(db, SQLITE_Stat34) ){
|
|
db->lookaside.bDisable++;
|
|
rc = loadStat4(db, sInfo.zDatabase);
|
|
db->lookaside.bDisable--;
|
|
}
|
|
for(i=sqliteHashFirst(&db->aDb[iDb].pSchema->idxHash);i;i=sqliteHashNext(i)){
|
|
Index *pIdx = sqliteHashData(i);
|
|
sqlite3_free(pIdx->aiRowEst);
|
|
pIdx->aiRowEst = 0;
|
|
}
|
|
#endif
|
|
|
|
if( rc==SQLITE_NOMEM ){
|
|
sqlite3OomFault(db);
|
|
}
|
|
return rc;
|
|
}
|
|
|
|
|
|
#endif /* SQLITE_OMIT_ANALYZE */
|