From 20d89717c73f9876f9366721d953d6dd43a4eadc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Thu, 9 Mar 2017 13:02:23 +0200 Subject: [PATCH 01/27] Remove unused headers The my_config.h headers are not used. --- query_classifier/test/classify.c | 1 - server/core/config.c | 1 - server/core/gateway.c | 1 - server/core/test/testfeedback.c | 1 - server/modules/filter/dbfwfilter/dbfwfilter.c | 1 - server/modules/filter/mqfilter.c | 1 - server/modules/filter/test/harness_util.c | 1 - server/modules/routing/debugcmd.c | 1 - server/modules/routing/readwritesplit/readwritesplit.c | 1 - server/modules/routing/schemarouter/schemarouter.c | 1 - server/modules/routing/schemarouter/shardrouter.c | 1 - server/modules/routing/schemarouter/test/testschemarouter.c | 1 - server/modules/routing/schemarouter/test/testschemarouter2.c | 1 - 13 files changed, 13 deletions(-) diff --git a/query_classifier/test/classify.c b/query_classifier/test/classify.c index 846700490..300924d7c 100644 --- a/query_classifier/test/classify.c +++ b/query_classifier/test/classify.c @@ -11,7 +11,6 @@ * Public License. */ -#include #include #include #include diff --git a/server/core/config.c b/server/core/config.c index 321a7ccdc..03a725c31 100644 --- a/server/core/config.c +++ b/server/core/config.c @@ -44,7 +44,6 @@ * * @endverbatim */ -#include #include #include #include diff --git a/server/core/gateway.c b/server/core/gateway.c index 0177d2321..a77be3491 100644 --- a/server/core/gateway.c +++ b/server/core/gateway.c @@ -36,7 +36,6 @@ */ #define _XOPEN_SOURCE 700 #define OPENSSL_THREAD_DEFINES -#include #include #if defined(OPENSSL_THREADS) diff --git a/server/core/test/testfeedback.c b/server/core/test/testfeedback.c index 1ccaddc58..cb7396644 100644 --- a/server/core/test/testfeedback.c +++ b/server/core/test/testfeedback.c @@ -31,7 +31,6 @@ #undef NDEBUG #endif #define FAILTEST(s) printf("TEST FAILED: " s "\n");return 1; -#include #include #include #include diff --git a/server/modules/filter/dbfwfilter/dbfwfilter.c b/server/modules/filter/dbfwfilter/dbfwfilter.c index a5ef051a5..ab825c285 100644 --- a/server/modules/filter/dbfwfilter/dbfwfilter.c +++ b/server/modules/filter/dbfwfilter/dbfwfilter.c @@ -60,7 +60,6 @@ *@endcode */ -#include #include #include #include diff --git a/server/modules/filter/mqfilter.c b/server/modules/filter/mqfilter.c index 97a8b230e..b60ca0e8a 100644 --- a/server/modules/filter/mqfilter.c +++ b/server/modules/filter/mqfilter.c @@ -56,7 +56,6 @@ *@endverbatim * See the individual struct documentations for logging trigger parameters */ -#include #include #include #include diff --git a/server/modules/filter/test/harness_util.c b/server/modules/filter/test/harness_util.c index c23e8c192..077704a5c 100644 --- a/server/modules/filter/test/harness_util.c +++ b/server/modules/filter/test/harness_util.c @@ -1,4 +1,3 @@ -#include #include #include diff --git a/server/modules/routing/debugcmd.c b/server/modules/routing/debugcmd.c index 8e0be70f1..4225e3569 100644 --- a/server/modules/routing/debugcmd.c +++ b/server/modules/routing/debugcmd.c @@ -45,7 +45,6 @@ * * @endverbatim */ -#include #include #include #include diff --git a/server/modules/routing/readwritesplit/readwritesplit.c b/server/modules/routing/readwritesplit/readwritesplit.c index 339ba6b39..ca0b75e1a 100644 --- a/server/modules/routing/readwritesplit/readwritesplit.c +++ b/server/modules/routing/readwritesplit/readwritesplit.c @@ -10,7 +10,6 @@ * of this software will be governed by version 2 or later of the General * Public License. */ -#include #include #include #include diff --git a/server/modules/routing/schemarouter/schemarouter.c b/server/modules/routing/schemarouter/schemarouter.c index 4126958f7..0921c356d 100644 --- a/server/modules/routing/schemarouter/schemarouter.c +++ b/server/modules/routing/schemarouter/schemarouter.c @@ -10,7 +10,6 @@ * of this software will be governed by version 2 or later of the General * Public License. */ -#include #include #include #include diff --git a/server/modules/routing/schemarouter/shardrouter.c b/server/modules/routing/schemarouter/shardrouter.c index 7bc54d6fe..c02b00f28 100644 --- a/server/modules/routing/schemarouter/shardrouter.c +++ b/server/modules/routing/schemarouter/shardrouter.c @@ -11,7 +11,6 @@ * Public License. */ -#include #include #include #include diff --git a/server/modules/routing/schemarouter/test/testschemarouter.c b/server/modules/routing/schemarouter/test/testschemarouter.c index 0e0abd8d8..1d0a7346c 100644 --- a/server/modules/routing/schemarouter/test/testschemarouter.c +++ b/server/modules/routing/schemarouter/test/testschemarouter.c @@ -1,4 +1,3 @@ -#include #include #include #include diff --git a/server/modules/routing/schemarouter/test/testschemarouter2.c b/server/modules/routing/schemarouter/test/testschemarouter2.c index edd7d074a..94ca41380 100644 --- a/server/modules/routing/schemarouter/test/testschemarouter2.c +++ b/server/modules/routing/schemarouter/test/testschemarouter2.c @@ -1,4 +1,3 @@ -#include #include #include #include From 454248e3eca6f494facded03a371c7e8d8195154 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Thu, 9 Mar 2017 12:56:13 +0200 Subject: [PATCH 02/27] Fix builds with system Connector-C If the system Connector-C is used, we must generate a fake connector-c target that the core can depend on. --- CMakeLists.txt | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index fe46a7232..048fa6700 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -57,6 +57,10 @@ include_directories(${PCRE2_INCLUDE_DIRS}) if(NOT MARIADB_CONNECTOR_FOUND) message(STATUS "Building MariaDB Connector-C from source.") include(cmake/BuildMariaDBConnector.cmake) +else() + # This is required as the core depends on the `connector-c` target + add_custom_target(connector-c) + message(STATUS "Using system Connector-C") endif() # You can find the variables set by this in the FindCURL.cmake file From 552ee4ad641af6470c8f9be3d170842310d1c511 Mon Sep 17 00:00:00 2001 From: Johan Wikman Date: Sat, 11 Mar 2017 08:04:11 +0200 Subject: [PATCH 03/27] Update release date --- Documentation/Release-Notes/MaxScale-2.0.5-Release-Notes.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/Release-Notes/MaxScale-2.0.5-Release-Notes.md b/Documentation/Release-Notes/MaxScale-2.0.5-Release-Notes.md index becf7b416..46b1ba8b0 100644 --- a/Documentation/Release-Notes/MaxScale-2.0.5-Release-Notes.md +++ b/Documentation/Release-Notes/MaxScale-2.0.5-Release-Notes.md @@ -1,4 +1,4 @@ -# MariaDB MaxScale 2.0.5 Release Notes +# MariaDB MaxScale 2.0.5 Release Notes -- 2017-03-10 Release 2.0.5 is a GA release. From b24bf7004d523f2500f164ff8f09b3675eb0da54 Mon Sep 17 00:00:00 2001 From: Johan Wikman Date: Tue, 14 Mar 2017 08:18:13 +0200 Subject: [PATCH 04/27] Update release notes link --- Documentation/Documentation-Contents.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/Documentation-Contents.md b/Documentation/Documentation-Contents.md index 993c347cd..71e3031d8 100644 --- a/Documentation/Documentation-Contents.md +++ b/Documentation/Documentation-Contents.md @@ -6,7 +6,7 @@ ## About MariaDB MaxScale - [About MariaDB MaxScale](About/About-MaxScale.md) - - [Release Notes](Release-Notes/MaxScale-2.0.0-Release-Notes.md) + - [Release Notes](Release-Notes/MaxScale-2.0.5-Release-Notes.md) - [Changelog](Changelog.md) - [Limitations](About/Limitations.md) From 5ea4f44aa2fd7964db7ec1b9504707b4349b3acd Mon Sep 17 00:00:00 2001 From: Johan Wikman Date: Mon, 20 Mar 2017 09:29:16 +0200 Subject: [PATCH 05/27] Remove all but 2.0 release notes from 2.0 branch --- .../MaxScale-0.5-Release-Notes.md | 336 ------------------ .../MaxScale-0.6-Release-Notes.md | 31 -- .../MaxScale-0.7-Release-Notes.md | 158 -------- .../MaxScale-1.0-Release-Notes.md | 124 ------- .../MaxScale-1.0.1-Release-Notes.md | 332 ----------------- .../MaxScale-1.0.3-Release-Notes.md | 136 ------- .../MaxScale-1.0.4-Release-Notes.md | 140 -------- .../MaxScale-1.0.5-Release-Notes.md | 113 ------ .../MaxScale-1.1-Release-Notes.md | 284 --------------- .../MaxScale-1.1.1-Release-Notes.md | 94 ----- .../MaxScale-1.2.0-Release-Notes.md | 99 ------ .../MaxScale-1.3.0-Release-Notes.md | 267 -------------- .../MaxScale-1.4.0-Release-Notes.md | 112 ------ .../MaxScale-1.4.1-Release-Notes.md | 41 --- .../MaxScale-1.4.2-Release-Notes.md | 44 --- .../MaxScale-1.4.3-Release-Notes.md | 37 -- 16 files changed, 2348 deletions(-) delete mode 100644 Documentation/Release-Notes/MaxScale-0.5-Release-Notes.md delete mode 100644 Documentation/Release-Notes/MaxScale-0.6-Release-Notes.md delete mode 100644 Documentation/Release-Notes/MaxScale-0.7-Release-Notes.md delete mode 100644 Documentation/Release-Notes/MaxScale-1.0-Release-Notes.md delete mode 100644 Documentation/Release-Notes/MaxScale-1.0.1-Release-Notes.md delete mode 100644 Documentation/Release-Notes/MaxScale-1.0.3-Release-Notes.md delete mode 100644 Documentation/Release-Notes/MaxScale-1.0.4-Release-Notes.md delete mode 100644 Documentation/Release-Notes/MaxScale-1.0.5-Release-Notes.md delete mode 100644 Documentation/Release-Notes/MaxScale-1.1-Release-Notes.md delete mode 100644 Documentation/Release-Notes/MaxScale-1.1.1-Release-Notes.md delete mode 100644 Documentation/Release-Notes/MaxScale-1.2.0-Release-Notes.md delete mode 100644 Documentation/Release-Notes/MaxScale-1.3.0-Release-Notes.md delete mode 100644 Documentation/Release-Notes/MaxScale-1.4.0-Release-Notes.md delete mode 100644 Documentation/Release-Notes/MaxScale-1.4.1-Release-Notes.md delete mode 100644 Documentation/Release-Notes/MaxScale-1.4.2-Release-Notes.md delete mode 100644 Documentation/Release-Notes/MaxScale-1.4.3-Release-Notes.md diff --git a/Documentation/Release-Notes/MaxScale-0.5-Release-Notes.md b/Documentation/Release-Notes/MaxScale-0.5-Release-Notes.md deleted file mode 100644 index 5f09d7bfd..000000000 --- a/Documentation/Release-Notes/MaxScale-0.5-Release-Notes.md +++ /dev/null @@ -1,336 +0,0 @@ -# MariaDB MaxScale 0.5 Alpha Release Notes - -0.5 Alpha - -This document details the changes in version 0.5 since the release of the 0.4 alpha of the MaxScale product. - -# New Features - -## Read/Write Splitter Routing Module - -In previous versions the read/write splitter routing module has had a number of limitations on it use, in the alpha release the router now removes the most important restrictions. - -### Session Commands - -Session commands are those statements that make some change to the user’s login session that may cause different effects from subsequent statements executed. Since the read/write splitter executes statements on either a master server or a slave server, depending upon the statement to execute, it is important that these session modifications are executed on all connections to both slave and master servers. This is resolved in release 0.5 such that session modification commands are executed on all active connections and a single return is forward back to the client that made the request. - -### Transaction Support - -Transaction support has been added into this version of the read/write splitter, there is one known outstanding limitation. If autocommit is enabled inside an active transaction it is not considered as commit in read/write splitter. Once a transaction has started all statements are routed to a master until the transaction is committed or rolled back. - -## Authentication - -A number of issues and shortcomings in the authentication performed by MaxScale have been resolved by this release. - -### Host Considered in Authentication - -Previously MaxScale did not follow the same rules as MySQL when authenticating a login request, it would always use the wildcard password entries and would not check the incoming host was allowed to connect. MaxScale now checks the incoming IP address for a connection request and verifies this against the authentication data loaded from the backend servers. The same rules are applied when choosing the password entry to authenticate with. Note however that authentication from MaxScale to the backend database will fail if the MaxScale host is not allowed to login using the matching password for the user. - -### Stale Authentication Data - -In previous releases of MaxScale the authentication data would be read at startup time only and would not be refreshed. Therefore if a user was added or modified in the backend server this will not be picked up by MaxScale and that user would be unable to connect via MaxScale. MaxScale now reloads user authentication data when a failure occurs and will refresh its internal tables if the data has changed in the backend. Please note that this reload process is rate limited to prevent incorrect logins to MaxScale being used for a denial of service attack on the backend servers. - -### Enable Use Of "root" User - -Previously MaxScale would prevent the use of the root user to login to the backend servers via MaxScale. This may be enabled on a per service basis by adding an "enable_root_user" options in the service entry to enable it in the MaxScale configuration file. This allows the use of root to be controlled on a per service basis. - -## Network Support - -### Unix Domain Sockets - -MaxScale now supports Unix domain sockets for connecting to a local MaxScale server. The use of a Unix domain socket is controlled by adding a "socket" entry in the listener configuration entry for a service. - -### Network Interface Binding - -MaxScale has added the ability to bind a listener for a service to a network address via an "address" entry in the configuration file. - -# Server Version - -The server version reported when connected to a database via MaxScale has now been altered. This now shows the MaxScale name and version together with the backend server name. An example of this can be seen below for the 0.5 release. - --bash-4.1$ mysql -h 127.0.0.1 -P 4006 -uxxxx -pxxxx Welcome to the MariaDB monitor. Commands end with ; or \\g. Your MySQL connection id is 22320 Server version: MaxScale 0.5.0 MariaDB Server Copyright (c) 2000, 2012, Oracle, Monty Program Ab and others. Type 'help;' or '\\h' for help. Type '\\c' to clear the current input statement. MySQL [(none)]> \\ys -------------- mysql Ver 15.1 Distrib 5.5.28a-MariaDB, for Linux (i686) using readline 5.1 ... Server: MySQL Server version: MaxScale 0.5.0 MariaDB Server ... -------------- MySQL [(none)]> - -# Bug Fixes - -A number of bug fixes have been applied between the 0.4 alpha and this alpha release. The table below lists the bugs that have been resolved. The details for each of these may be found in bugs.skysql.com. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
IDSummary
141No "delete user" command in debugcli
175Buffer leak in dcb_read from Coverity run
178Uninitialised variables from Coverity run
179open with O_CREAT in second argument needs 3 arguments
363simple_mutex "name" memory handling ...
126"reload config" in debug interface causes maxscale server to segfault
149It is possible to delete all maxscale users
218there is no way to understand what is going on if MAXSCALE_HOME is incorrect
137"show users" and "reload users" refer to very different things in debugcli
154readwritesplit does not use router_options
160telnetd leaks memory
169Galera monitor is actually never compiled ....
172Several compile errors in galera_mon.c
174Resource leak in server.c
176Resource leak in gw_utils.c
362possible datadir_cleanup() problems ...
124readconnroute does not validate router_options
153MaxScale fails when max connections are exceeded
133MaxScale leaves lots of "data" directories sitting around $MAXSCALE_HOME
166readwritesplit causes MaxScale segfault when starting up
207Quitting telnet session causes maxscale to fail
161Memory leak in load_mysql_users.
177Resource leak in secrets.c
182On Startup logfiles are empty
135MaxScale unsafely handles empty passwords in getUsers
145.secret file for encrypted passwords cyclicly searched
171ifndef logic in build_gateway.inc doesn't work, MARIADB_SRC_PATH from env not picked up
173Resource leak in adminusers.c found by Coverity
376Confusing Server Version
370maxscale binary returns zero exit status on failures
150telnetd listener should bind to 127.0.0.1 by default
152listener configuration should support bind address
373Documentation: it's not clear what privileges the maxscale user needs
128Maxscale prints debug information to terminal session when run in background
129MaxScale refuses to connect to server and reports nonsense error as a result
147Maxscale's hashtable fails to handle deletion of entries.
148users data structure's stats have incorrect values.
384MaxScale crashes if backend authentication fails
210Bad timing in freeing readconnrouter's dcbs cause maxscale crash
403gwbuf_free doesn't protect freeing shared buffer
371If router module load fails, MaxScale goes to inifinite loop
385MaxScale (DEBUG-version) dasserts if backend dcb is closed in the middle of client dcb performing close_dcb
386Starting MaxScale with -c pointing at existing file causes erroneous behavior
209Error in backend hangs client connection
194maxscale crashes at start if module load fails
369typo in "QUERY_TYPE_UNKNWON"
163MaxScale crashes with multiple threads
162threads parameter in configuration file is not effective
400hastable_get_stats returns value of uninitialized value in 'nelems'
212Failing write causes maxscale to fail
222Double freeing mutex corrupts log
208current_connection_count is decreased multiple times per session, thus breaking load balancing logic
378Misspelling maxscale section name in config file crashes maxscale
399Every row in log starts with 0x0A00
205MaxScale crashes due SEGFAULT because return value of dcb_read is not checked
220Maxscale crash if socket listening fails in startup
372Log manager hangs MaxScale if log string (mostly query length) exceeds block size
397Free of uninitialised pointer if MAXSCALE_HOME is not set
402gw_decode_mysql_server_handshake asserts with mysql 5.1 backend
345MaxScale don't find backend servers if they are started after MaxScale
406Memory leak in dcb_alloc()
360MaxScale passwd option
151Get parse_sql failed on array INSERT
216Backend error handling doesn't update server's connection counter
127MaxScale should handle out-of-date backend auth data more gracefully
146"show dbusers" argument not documented
125readconnroute causes maxscale server crash if no slaves are available
375Tarball contains UID and maxscale base dir
- - diff --git a/Documentation/Release-Notes/MaxScale-0.6-Release-Notes.md b/Documentation/Release-Notes/MaxScale-0.6-Release-Notes.md deleted file mode 100644 index 4b2c445c3..000000000 --- a/Documentation/Release-Notes/MaxScale-0.6-Release-Notes.md +++ /dev/null @@ -1,31 +0,0 @@ -# MariaDB MaxScale 0.6 Alpha Release Notes - -0.6 Alpha - -This document details the changes in version 0.6 since the release of the 0.5 alpha of the MaxScale product. The 0.6 version is merely a set of bug fixes based on the previous 0.5 version. - -# Bug Fixes - -A number of bug fixes have been applied between the 0.5 alpha and this alpha release. The table below lists the bugs that have been resolved. The details for each of these may be found in bugs.skysql.com. - - - - - - - - - - - - - - -
IDSummary
423The new "version_string" parameter has been added to service section. -This allows a specific version string to be set for each service, this version string is used in the MySQL handshake from MaxScale to clients and is reported as the server version to clients. - -The version_string is optional, the default value will be taken from the embedded MariaDB library which supplies the parser to MaxScale.
418Statements are not routed to master if a transaction is started implicitly by setting autocommit=0. In such cases statements were previously routed as if they were not part of a transaction. - -This fix changes the behavior so that is autocommit is disabled, all statements are routed to the master and in case of session variable updates, to both master and slave.
- - diff --git a/Documentation/Release-Notes/MaxScale-0.7-Release-Notes.md b/Documentation/Release-Notes/MaxScale-0.7-Release-Notes.md deleted file mode 100644 index 21d9320ec..000000000 --- a/Documentation/Release-Notes/MaxScale-0.7-Release-Notes.md +++ /dev/null @@ -1,158 +0,0 @@ -# MariaDB MaxScale 0.7 Alpha Release Notes - -0.7 Alpha - -This document details the changes in version 0.7 since the release of the 0.6 alpha of the MaxScale product. - -# New Features - -## Galera Support - -Enhanced support for Galera cluster to allow Galera to be used as a High Available Cluster with no write contention between the nodes.. - -MaxScale will control access to a Galera Cluster such that one node is designated as the master node to which all write operations will be sent. Read operations will be sent to any of the remaining nodes that are part of the cluster. Should the currently elected master node fail MaxScale will automatically promote one of the remaining nodes to become the new master node. - -## Multiple Slave Connections - -The Read/Write Split query router has been enhanced to allow multiple slaves connections to be created. The number of slave connections is configurable via a parameter in the MaxScale configuration file. - -Adding multiple connections allows for better load balancing between the slaves and in a pre-requisite for providing improved fault tolerance within the Read/Write Splitter. The selection of which slave to use for a particular read operation can be controlled via options in the router configuration. - -## Debug Interface Enhancements - -A number of new list commands have been added to the debug interface to allow more concise tabular output of certain object types within the interface. - -**MaxScale>** help list - -Available options to the list command: - - filters List all the filters defined within MaxScale - - listeners List all the listeners defined within MaxScale - - modules Show all currently loaded modules - - services List all the services defined within MaxScale - - servers List all the servers defined within MaxScale - - sessions List all the active sessions within MaxScale - -**MaxScale>** - -Those objects that are defined in the configuration file can now be referenced by the names used in the configuration file rather than by using memory addresses. This means that services, servers, monitors and filters can all now be referenced using meaningful names provided by the user. Internal objects such as DCB’s and sessions, which are not named in the configuration file still require the use of memory addresses. - -Two modes of operation of the interface are now available, user mode and developer mode. The user mode restricts access to the feature that allow arbitrary structures to be examined and checks all memory address for validity before allowing access. - -## Maintenance Mode for Servers - -MaxScale now provides a maintenance mode for servers, this mode allows servers to be set such that no new connections will be opened to that server. Also, servers in maintenance mode are not monitored by MaxScale. This allows an administrator to set a server into maintenance mode when it is required to be taken out of use. The connections will then diminish over time and since no new connections are created, the administrator can remove the node from use to perform some maintenance activities. - -Nodes are placed into maintenance mode via the debug interface using the set server command. - -**MaxScale>** set server datanode3 maintenance - -Nodes are taken out of maintenance using the clear server command. - -**MaxScale>** clear server datanode3 maintenance - -## Configurable Monitoring Interval - -All monitor plugins now provide a configuration parameter that can be set to control how frequently the MaxScale monitoring is performed. - -## Replication Lag Heartbeat Monitor - -The mysqlmon monitor module now implements a replication heartbeat protocol that is used to determine the lag between updates to the master and those updates being applied to the slave. This information is then made available to routing modules and may be used to determine if a particular slave node may be used or which slave node is most up to date. - -## Filters API - -The first phase of the filter API is available as part of this release. This provides filtering for the statements from the client application to the router. Filtering for the returned results has not yet been implemented and will be available in a future version. - -Three example filters are including in the release - -1. Statement counting Filter - a simple filter that counts the number of SQL statements executed within a session. Results may be viewed via the debug interface. - -2. Query Logging Filter - a simple query logging filter that write all statements for a session into a log file for that session. - -3. Query Rewrite Filter - an example of how filters can alter the query contents. This filter allows a regular expression to be defined, along with replacement text that should be substituted for every match of that regular expression. - -## MariaDB 10 Replication Support - -The myqlmon monitor module has been updated to support the new syntax for show all slaves status in MariaDB in order to correctly determine the master and slave state of each server being monitor. Determination of MariaDB 10 is automatically performed by the monitor and no configuration is required. - -## API Versioning - -The module interface has been enhanced to allow the API version in use to be reported, along with the status of the module and a short description of the module. The status allows for differentiation of the release status of a plugin to be identified independently of the core of MaxScale. plugins may be designated as "in development", “alpha”, “beta” or “GA”. - -**MaxScale>** list modules - -Module Name | Module Type | Version | API | Status - ----------------------------------------------------------------- - -regexfilter | Filter | V1.0.0 | 1.0.0 | Alpha - -MySQLBackend | Protocol | V2.0.0 | 1.0.0 | Alpha - -telnetd | Protocol | V1.0.1 | 1.0.0 | Alpha - -MySQLClient | Protocol | V1.0.0 | 1.0.0 | Alpha - -mysqlmon | Monitor | V1.2.0 | 1.0.0 | Alpha - -readwritesplit | Router | V1.0.2 | 1.0.0 | Alpha - -readconnroute | Router | V1.0.2 | 1.0.0 | Alpha - -debugcli | Router | V1.1.1 | 1.0.0 | Alpha - -**MaxScale>** - -# Bug Fixes - -A number of bug fixes have been applied between the 0.6 alpha and this alpha release. The table below lists the bugs that have been resolved. The details for each of these may be found in bugs.skysql.com. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
IDSummary
443mysql/galera monitors hang when backend fails
424Read/Write Splitter closes connection without sending COM_QUIT
438Internal thread deadlock
436Sessions in invalid state
359Router options for Read/Write Split module
435Some automated tests have invalid SQL syntax
431rwsplit.sh test script has incorrect bash syntax
425MaxScale crashes after prolonged use
- - -# Linking - -Following reported issues with incompatibilities between MaxScale and the shared library used by MySQL this version of MaxScale will be statically linked with the MariaDB 5.5 embedded library that it requires. This library is used for internal purposes only and does not result in MaxScale support for other versions of MySQL or MariaDB being affected. - diff --git a/Documentation/Release-Notes/MaxScale-1.0-Release-Notes.md b/Documentation/Release-Notes/MaxScale-1.0-Release-Notes.md deleted file mode 100644 index 1d6649143..000000000 --- a/Documentation/Release-Notes/MaxScale-1.0-Release-Notes.md +++ /dev/null @@ -1,124 +0,0 @@ -# MariaDB MaxScale 1.0 Beta Release Notes - -1.0 Beta - -This document details the changes in version 1.0 since the release of the 0.7 alpha of the MaxScale product. - -# New Features - -## Complex Replication Structures - -The MaxScale monitor module for Master/Slave replication is now able to correctly identify tree structured replication environments and route write statements to the master server at the root level of the tree. Isolated database instances and now also correctly identified as external to the replication tree. - -## Read/Write Splitter Enhancements - -### Support For Prepared Statements - -Prepared statements are now correctly recognized by MaxScale, with the prepare stage being sent to all the eligible servers that could eventually run the statement. Statements are then execute on a single server. - -### Slave Failure Resilience - -The Read/Write splitter can not be used to establish multiple connections to different slave servers. The read load will be distributed across these slaves and slave failure will be masked from the application as MaxScale will automatically failover to another slave when one fails. - -### Configurable Load Balancing Options - -It is now possible to configure the criteria that the Read/Write Splitter uses for load balancing, the options are: - -* The total number of connections to the servers, from this MaxScale instance - -* The number of connections to the server for this particular MaxScale service - -* The number of statements currently being executed on the server on behalf of this MaxScale instance - -* Route statements to the slave that has the least replication lag - -### Replication Consistency - -The Read/Write splitter may now be configured to exclude nodes that are currently showing a replication lag greater than a configurable threshold. The replication lag is measured using the MySQL Monitor module of MaxScale. - -Alternatively it is possible to define that read operations should be routed to the slave that has the least measured replication lag. - -## Weighted Routing Options - -The distribution of connections and statement across the set of nodes can be controlled by attaching arbitrary parameters to the servers and then configuring the router to use that parameter value as a weighting factor when deciding which of the valid servers to which to connect or route queries. - -Several parameters may be used on each host and different routers may choose to use different parameters as the weighting parameter for that router. The use of weighting is optional, if no weighting parameter is given in the service definition then all eligible servers will have an equal distribution applied. - -Server weighting is supported by both the Read/Write Splitter and the connection router. - -## MaxAdmin Client - -A new administrative interface has been added that uses a MaxScale specific client application to interact with MaxScale to control and monitor the MaxScale activities. This client application may be used interactively or within scripts, passing commands to MaxScale via command line arguments. Command scripts are available, allowing command sets of commands to be stored in script files. - -MaxAdmin also supports command history via libedit on those distributions that support the libedit library. This allows for the use of the up and down cursor keys or selection of previous commands and editing of lines using vi or emacs style editing commands. - -## Pacemaker Support - -MaxScale now ships with an init.d script that is compatible with the use of Pacemaker and Heartbeat to provide for a highly available implementation of MaxScale. A tutorial on setting up MaxScale under Pacemaker control is included in the Documentation directory. - -## Filter API Enhancements - -The filter API has now been enhanced to operate not just on downstream query filtering but also upstream result set filtering. - -## Enhanced and New Filters - -Addition of new filters and enhancements to those existing filters that appeared in 0.7 of MaxScale. - -### Top Filter - -A new filter to capture and log the longest running queries within a client session. The filter can be configured to capture a specific number of queries that take the longest time between the query being submitted to the database server and the first result being returned. - -The queries captured can be defined using regular expressions to include and exclude queries that match these expressions. In addition the inclusion of a session may be based on the user name used to connect to the database or the source address of the client session. - -### Tee Filter - -A filter to optionally duplicate requests received from the client and send them to other services within MaxScale. This allows a single statement sent by a client to be routed to multiple storage backends via MaxScale. - -The queries duplicated can be defined using regular expressions to include and exclude queries that match these expressions. In addition the inclusion of a session may be based on the user name used to connect to the database or the source client session. - -### QLA and Regex Filter Improvements - -These filters have been enhanced to provide for the inclusion of sessions by specifying the username used to connect to the database or the source of the client connection as a criteria to trigger the use of these filters for particular sessions connected to the MaxScale service. - -# Bug Fixes - -A number of bug fixes have been applied between the 0.6 alpha and this alpha release. The table below lists the bugs that have been resolved. The details for each of these may be found in bugs.skysql.com. - - - - - - - - - - - - - - - - - - - - - - - - - - -
IDSummary
441Possible failure to return a value in setipaddress
396Build instruction suggest forcing install of RPM’s
452Make install copies the modules to an incorrect directory
450Read/Write splitter does not balance load between multiple slaves
449The router clientReply function does not handle GWBUF structures correctly
- - -# Packaging - -Both RPM and Debian packages are available for MaxScale in addition to the tar based releases previously distributed we now provide - -* CentOS/RedHat 5 RPM - -* CentOS/RedHat 6 RPM - -* Ubuntu 14.04 package - diff --git a/Documentation/Release-Notes/MaxScale-1.0.1-Release-Notes.md b/Documentation/Release-Notes/MaxScale-1.0.1-Release-Notes.md deleted file mode 100644 index 5586696a4..000000000 --- a/Documentation/Release-Notes/MaxScale-1.0.1-Release-Notes.md +++ /dev/null @@ -1,332 +0,0 @@ -# MariaDB MaxScale 1.0.1 Beta Release Notes - -1.0.1 Beta - -This document details the changes in version 1.0.1 since the release of the 1.0 beta of the MaxScale product. - -# New Features - -## CMake build system - -Building MaxScale is now easier than ever thanks to the introduction of CMake into the build process. Building with CMake removes the need to edit files, specify directory locations or change build flags, in all but the rarest of the cases, and building with non-standard configurations is a lot easier thanks to the easy configuration of all the build parameters. - -Here’s a short list of the most common build parameters,their functions and default values. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
VariablePurposeDefault value
INSTALL_DIRRoot location of the MaxScale install/usr/local/skysql/maxscale
STATIC_EMBEDDEDWhether to use the static or the dynamic version of the embedded libraryNo
OLEVELLevel of optimization used when compilingNo optimization
INSTALL_SYSTEM_FILESIf startup scripts should be installed into /etc/init.d and ldconfig configuration files to /etc/ld.so.conf.dYes
BUILD_TYPEThe type of the build. ‘None’ for normal, ‘Debug’ for debugging and ‘Optimized’ for an optimized build.None
- - -Details on all the configurable parameters and instructions on how to use CMake can be found in the README file. - -## Enhancements - -The polling mechanism in MaxScale has been modified to overcome a flaw which mean that connections with a heavy I/O load could starve other connections within MaxScale and prevent query execution. This has been resolved with a more fairer event scheduling mechanism within the MaxScale polling subsystem. This has led to improve overall performance in high load situations. - -# Bug Fixes - -A number of bug fixes have been applied between the 1.0 beta release and this release candidate. The table below lists the bugs that have been resolved. The details for each of these may be found in bugs.skysql.com. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
IDSummary
462Testall target fails in server/test to invalid MAXSCALE_HOME path specification
467max_slave_replication lag is not effective after session creation
468query_classifier : if parsing fails, parse tree and thread context are freed but used
469rwsplit counts every connection twice in master - connection counts leak
466hint_next_token doesn't detect = pair if there are no spaces around '='
470Maxscale crashes after a normal query if a query with named hint was used before
473Entering a hint with route server target as '=(' causes a crash
472Using a named hint after its initial use causes a crash
471Routing Hints route to server sometimes doesn't work
463MaxScale hangs receiving more than 16K in input
476mysql_common.c:protocol_archive_srv_command leaks memory and accesses freed memory
479Undefined filter reference in maxscale.cnf causes a crash
410maxscale.cnf server option is not parsed for spaces
417Galera monitor freezes on network failure of a server
488SHOW VARIABLES randomly failing with "Lost connection to MySQL server"
484Hashtable does not always release write lock during add
485Hashtable not locked soon enough in iterator get next item
493Can have same section name multiple times without warning
510Embedded library crashes on a call to free_embedded_thd
511Format strings in log_manager.cc should be const char*
509rw-split sensitive to order of terms in field list of SELECT
507rw-split router does not send last_insert_id() to master
490session handling for non-determinstic user variables broken
489@@hostname and @@server_id treated differently from @@wsrep_node_address
528Wrong service name in tee filter crashes maxscale on connect
530MaxScale socket permission
536log_manager doesn't write buffers to disk in the order they are written
447Error log is flooded with same warning if there are no slaves present
475The end comment tag in hints isn't properly detected.
181Missing log entry if server not reachable
486Hashtable problems when created with size less than one
516maxadmin CLI client sessions are not closed?
495Referring to a nonexisting server in servers=... doesn't even raise a warning
538maxscale should expose details of "Down" server
539MaxScale crashes in session_setup_filters
494The service 'CLI' is missing a definition of the servers that provide the service
180Documentation: No information found in the documentation about firewall settings
524Connecting to MaxScale from localhost tries matching @127.0.0.1 grant
481MySQL monitor doesn't set master server if the replication is broken
437Failure to detect MHA master switch
541Long queries cause MaxScale to block
492In dcb.c switch fallthrough appears to be used without comment
439Memory leak in getUsers
545RWSplit: session modification commands weren't routed to all if executed inside open transaction
543RWSplit router statistics counters are not updated correctly
544server with weight=0 gets one connection
525Crash when saving post in Wordpress
533Drupal installer hangs
497Can’t enable debug/trace logs in configuration file
430Temporary tables not working in MaxScale
527No signal handler for segfault etc
546Use of weightby router parameter causes error log write
506Don’t write shm/tmpfs by default without telling the user or giving a way to override it
552Long argument options to maxadmin and maxscale broke maxadmin commands
521Many commands in maxadmin client simply hang
478Parallel session command processing fails
499make clean leavessoem .o files behind
500"depend: no such file warnings during make
501log_manager, query classifier rebuilds unconditionally
502log_manager and query_classifier builds always rebuild utils
504clean rule for Documentation directory in wrong makefile
505utils/makefile builds stuff unconditionally, misses "depend" target
548MaxScale accesses freed client DCB and crashes
550modutil functions process length incorrectly
- - -# Packaging - -Both RPM and Debian packages are available for MaxScale in addition to the tar based releases previously distributed we now provide - -* CentOS/RedHat 5 RPM - -* CentOS/RedHat 6 RPM - -* Ubuntu 14.04 package - diff --git a/Documentation/Release-Notes/MaxScale-1.0.3-Release-Notes.md b/Documentation/Release-Notes/MaxScale-1.0.3-Release-Notes.md deleted file mode 100644 index db8329366..000000000 --- a/Documentation/Release-Notes/MaxScale-1.0.3-Release-Notes.md +++ /dev/null @@ -1,136 +0,0 @@ -# MariaDB MaxScale 1.0.3 Release Notes - -1.0.3 GA - -This document details the changes in version 1.0.3 since the release of the 1.0.2 Release Candidate of the MaxScale product. - -# New Features - -No new features have been introduced since the released candidate was released. - -# Bug Fixes - -A number of bug fixes have been applied between the 0.6 alpha and this alpha release. The table below lists the bugs that have been resolved. The details for each of these may be found in bugs.mariadb.com. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
IDSummary
644Buffered that were cloned using the gwbuf_clone routine failed to initialise the buffer lock structure correctly.
643Recursive filter definitions in the configuration file could cause MaxScale to loop
665An access to memory that had already been freed could be made within the MaxScale core
664MySQL Authentication code could access memory that had already been freed.
673MaxScale could crash if it had an empty user table and the MaxAdmin show dbusers command was run
670The tee filter could lose statement on the branch service if the branch service was significantly slower at executing statements compared with the main service.
653Memory corruption could occur with extremely long hostnames in the mysql.user table.
657If the branch service of a tee filter shutdown unexpectedly then MaxScale could fail
654Missing quotes in MaxAdmin show dbusers command could cause MaxAdmin to crash
677A race condition existed in the tee filter client reply handling
658The readconnroute router did not correctly close sessions when a backend database failed
662MaxScale startup hangs if no backend servers respond
676MaxScale writes a log entry, "Write to backend failed. Session closed." when changing default database via readwritesplit with max_slave_connections != 100%
650Tee filter does not correctly detect missing branch service
645Tee filter can hang MaxScale if the read/write splitter is used
678Tee filter does not always send full query to branch service
679A shared pointer in the service was leading to misleading service states
680The Read/Write Splitter can not load users if there are no databases available at startup
681The Read/Write Splitter could crash is the value of max_slave_connections was set to a low percentage and only a small number of backend servers are available
- - -# Known Issues - -There are a number bugs and known limitations within this version of MaxScale, the most serious of this are listed below. - -* The SQL construct "LOAD DATA LOCAL INFILE" is not fully supported. - -* The Read/Write Splitter is a little too strict when it receives errors from slave servers during execution of session commands. This can result in sessions being terminated in situation in which MaxScale could recover without terminating the sessions. - -* MaxScale can not manage authentication that uses wildcard matching in hostnames in the mysql.user table of the backend database. The only wildcards that can be used are in IP address entries. - -* When users have different passwords based on the host from which they connect MaxScale is unable to determine which password it should use to connect to the backend database. This results in failed connections and unusable usernames in MaxScale. - -# Packaging - -Both RPM and Debian packages are available for MaxScale in addition to the tar based releases previously distributed we now provide - -* CentOS/RedHat 5 - -* CentOS/RedHat 6 - -* CentOS/RedHat 7 - -* Debian 6 - -* Debian 7 - -* Ubuntu 12.04 LTS - -* Ubuntu 13.10 - -* Ubuntu 14.04 LTS - -* Fedora 19 - -* Fedora 20 - -* OpenSuSE 13 - diff --git a/Documentation/Release-Notes/MaxScale-1.0.4-Release-Notes.md b/Documentation/Release-Notes/MaxScale-1.0.4-Release-Notes.md deleted file mode 100644 index 9103c830a..000000000 --- a/Documentation/Release-Notes/MaxScale-1.0.4-Release-Notes.md +++ /dev/null @@ -1,140 +0,0 @@ -# MariaDB MaxScale 1.0.4 Release Notes - -1.0.4 GA - -This document details the changes in version 1.0.4 since the release of the 1.0.2 Release Candidate of the MaxScale product. - -## New Features - -No new features have been introduced since the released candidate was released. - -## Bug Fixes - -A number of bug fixes have been applied between the 0.6 alpha and this alpha release. The table below lists the bugs that have been resolved. The details for each of these may be found in bugs.mariadb.com. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
IDSummary
644Buffered that were cloned using the gwbuf_clone routine failed to initialise the buffer lock structure correctly.
643Recursive filter definitions in the configuration file could cause MaxScale to loop
665An access to memory that had already been freed could be made within the MaxScale core
664MySQL Authentication code could access memory that had already been freed.
673MaxScale could crash if it had an empty user table and the MaxAdmin show dbusers command was run
670The tee filter could lose statement on the branch service if the branch service was significantly slower at executing statements compared with the main service.
653Memory corruption could occur with extremely long hostnames in the mysql.user table.
657If the branch service of a tee filter shutdown unexpectedly then MaxScale could fail
654Missing quotes in MaxAdmin show dbusers command could cause MaxAdmin to crash
677A race condition existed in the tee filter client reply handling
658The readconnroute router did not correctly close sessions when a backend database failed
662MaxScale startup hangs if no backend servers respond
676MaxScale writes a log entry, "Write to backend failed. Session closed." when changing default database via readwritesplit with max_slave_connections != 100%
650Tee filter does not correctly detect missing branch service
645Tee filter can hang MaxScale if the read/write splitter is used
678Tee filter does not always send full query to branch service
679A shared pointer in the service was leading to misleading service states
680The Read/Write Splitter can not load users if there are no databases available at startup
681The Read/Write Splitter could crash is the value of max_slave_connections was set to a low percentage and only a small number of backend servers are available
- - -## Known Issues - -There are a number bugs and known limitations within this version of MaxScale, the most serious of this are listed below. - -* The SQL construct "LOAD DATA LOCAL INFILE" is not fully supported. - -* The Read/Write Splitter is a little too strict when it receives errors from slave servers during execution of session commands. This can result in sessions being terminated in situation in which MaxScale could recover without terminating the sessions. - -* MaxScale can not manage authentication that uses wildcard matching in hostnames in the mysql.user table of the backend database. The only wildcards that can be used are in IP address entries. - -* When users have different passwords based on the host from which they connect MaxScale is unable to determine which password it should use to connect to the backend database. This results in failed connections and unusable usernames in MaxScale. - -# Packaging - -Both RPM and Debian packages are available for MaxScale in addition to the tar based releases previously distributed we now provide - -* CentOS/RedHat 5 - -* CentOS/RedHat 6 - -* CentOS/RedHat 7 - -* Debian 6 - -* Debian 7 - -* Ubuntu 12.04 LTS - -* Ubuntu 13.10 - -* Ubuntu 14.04 LTS - -* Fedora 19 - -* Fedora 20 - -* OpenSuSE 13 - -# MaxScale Home Default Value - -The installation assumes that the default value for the environment variable MAXSCALE_HOME is set to /usr/local/mariadb/maxscale. This is hard coded in the service startup file that is placed in /etc/init.d/maxscale by the installation process. - diff --git a/Documentation/Release-Notes/MaxScale-1.0.5-Release-Notes.md b/Documentation/Release-Notes/MaxScale-1.0.5-Release-Notes.md deleted file mode 100644 index e979ff6c1..000000000 --- a/Documentation/Release-Notes/MaxScale-1.0.5-Release-Notes.md +++ /dev/null @@ -1,113 +0,0 @@ -# MariaDB MaxScale 1.0.5 Release Notes - -This document details the changes in version 1.0.5 since the release of the 1.0.4 GA of the MaxScale product. - -# New Features -No new features have been introduced since the GA version was released. SuSE Enterprise 11 and 12 packages are now also supplied. - -# Bug Fixes - -A number of bug fixes have been applied between the 1.0.4 initial GA release and this GA release. The table below lists the bugs that have been resolved. The details for each of these may be found in bugs.mariadb.com. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
IDSummary
519LOAD DATA LOCAL INFILE not handled?
714Error log flooded when too many connect errors causes the MaxScale host to be blocked
711Some MySQL Workbench Management actions hang with R/W split router
710make package install files in /etc/init.d
683Check for unsupported version of MariaDB
684Use mysql_config to determine include/lib directory paths and compiler options
689cmake ­DCMAKE_INSTALL_PREFIX has no effect
701set server maint fails on the command line
705Authentication fails when the user connects to a database with the SQL mode including ANSI_QUOTES
507R/W split does not send last_insert_id() to the master
700maxscale ­­version has no output
694RWSplit SELECT @a:=@a+1 as a, test.b from test breaks client session
685SELECT against readconnrouter fails when large volumes of data are returned and the tee filter is used
- -# Known Issues - -There are a number bugs and known limitations within this version of MaxScale, the most serious of this are listed below. - -* The Read/Write Splitter is a little too strict when it receives errors from slave servers during execution of session commands. This can result in sessions being terminated in situation in which MaxScale could recover without terminating the sessions. - -* MaxScale can not manage authentication that uses wildcard matching in hostnames in the mysql.user table of the backend database. The only wildcards that can be used are in IP address entries. - -* When users have different passwords based on the host from which they connect MaxScale is unable to determine which password it should use to connect to the backend database. This results in failed connections and unusable usernames in MaxScale. - -# Packaging - -Both RPM and Debian packages are available for MaxScale in addition to the tar based releases previously distributed we now provide - -* CentOS/RedHat 5 - -* CentOS/RedHat 6 - -* CentOS/RedHat 7 - -* Debian 6 - -* Debian 7 - -* Ubuntu 12.04 LTS - -* Ubuntu 13.10 - -* Ubuntu 14.04 LTS - -* Fedora 19 - -* Fedora 20 - -* OpenSuSE 13 - -* SuSE Enterprise 11 - -* SuSE Enterprise 12 - -# MaxScale Home Default Value - -The installation assumes that the default value for the environment variable MAXSCALE_HOME is set to /usr/local/skysql/maxscale. This is hard coded in the service startup file that is placed in /etc/init.d/maxscale by the installation process. diff --git a/Documentation/Release-Notes/MaxScale-1.1-Release-Notes.md b/Documentation/Release-Notes/MaxScale-1.1-Release-Notes.md deleted file mode 100644 index e2d0f505c..000000000 --- a/Documentation/Release-Notes/MaxScale-1.1-Release-Notes.md +++ /dev/null @@ -1,284 +0,0 @@ -# MariaDB MaxScale 1.1 Release Notes - -## 1.1 GA - -This document details the changes in version 1.1 since the release of the 1.0.5 GA Release of the MaxScale product. - -## New Features - -### High Performance Binlog Relay -Replicate Binlog from the master to slave through MaxScale as simplified relay server for reduced network load and disaster recovery - -### Database Firewall Filter -Block queries based on columns in the query, where condition, query type(select, insert, delete, update), presence of wildcard in column selection, regular expression match and time of the query - -### Schema Sharding Router -Route to databases sharded by schema without application level knowledge of shard configuration - -### Hint based routing -Pass hints in the SQL statement to influence the routing decision based on replication lag or time out - -### Named Server Routing -Routing to a named server if incoming query matches a regular expression - -### Canonical Query logging -Convert incoming queries to canonical form and push the query and response into RabbitMQ Broker for a RabbitMQ Client to later retrieve from - -### Nagios Plugin -Plugin scripts for monitoring MaxScale status and performance from a Nagios Server - -### Notification Service -Receive notification of security update and patches tailored to your MaxScale configuration - -### MySQL NDB cluster support -Connection based routing to MySQL NDB clusters - -### Updated installation path -MaxScale is now installed into `/usr/local/mariadb-maxscale` - -## Bug Fixes - -A number of bug fixes have been applied between the 1.0.5 GA and this GA release. The table below lists the bugs that have been resolved. The details for each of these may be found in https://jira.mariadb.org/projects/MXS or in the former http://bugs.mariadb.com Bug database - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
IDSummary
MXS-80"show sessions" can crash MaxScale
MXS-79schemarouter hangs if client connects with empty database
MXS-78"USE" statement gives unpredictable/unexpected results
MXS-76core/dbusers.c needs better error messages
MXS-74Crash when no arguments given to on_queries clause
MXS-72dbfwfilter on_queries clause appears to be ignored
MXS-71dbfwfilter at_times clause seems to erroneously block user
MXS-68Wrong rule name in dbfwfilter leads to MaxScale crash
MXS-65Omitting in users directive causes crash in libdbfwfilter.so(link_rules)
MXS-63Maxkeys and Maxpasswd log to /tpm
MXS-57MaxScale should write a message to the error log when config is not found
MXS-54Write failed auth attempt to trace log
MXS-50Removing 1.0.5 RPM gives error about /etc/ld.so.conf.d/maxscale.conf
MXS-47Session freeze when small tail packet
MXS-5Possible memory leak in readwritesplit router
736Memory leak while doing read/write splitting
733Init-script deletes bin/maxscale
732Build is broken: CentOS/RHEL 5 and SLES 11
730Regex filter and shorter than original replacement queries MaxScale
729PDO prepared statements bug introduced in Maxscale 1.0.5
721Documentation suggests SIGTERM to re-read config file
716$this->getReadConnection()->query('SET @id = 0;');
709"COPYRIGHT LICENSE README SETUP" files go to /usr/local/mariadb-maxscale/ after 'make package'
704"make testall" returns success status (exit code 0) even on failures
698Using invalid parameter in many maxadmin commands causes MaxScale to fail
693Freeing tee filter's orphaned sessions causes a segfault when embedded server closes
690CPU/architecture is hardcoded into debian/rules
686TestService fails because of the modules used in it aren't meant for actual use
677Race condition in tee filter clientReply
676"Write to backend failed. Session closed." when changing default database via readwritesplit with max_slave_connections != 100%
673MaxScale crashes if "Users table data" is empty and "show dbusers" is executed in maxadmin
670Tee filter: statement router loses statements when other router gets enough ahead
665Core: accessing freed memory when session is closed
659MaxScale doesn't shutdown if none of the configured services start
648use database is sent forever with tee filter to a readwrite split service
620enable_root_user=true generates errors to error log
612Service was started although no users could be loaded from database
600RWSplit: if session command fails in some backend, it is not dropped from routing session
587Hint filter don't work if listed before regex filter in configuration file
579serviceStartProtocol test crashes
506Don't write to shm/tmpfs by default without telling and without a way to override it
503TOC in the bundled PDFs doesn't link to actual sections
457Please provide a list of build dependencies for building MaxScale
361file_exists() *modifies* the file it checks for???
338Log manager spread down feature is disabled
159Memory leak. Dbusers are loaded into memory but not unloaded
- - -## Known Issues - -There are a number bugs and known limitations within this version of MaxScale, the most serious of this are listed below. - -* The Read/Write Splitter is a little too strict when it receives errors from slave servers during execution of session commands. This can result in sessions being terminated in situation in which MaxScale could recover without terminating the sessions. - -* MaxScale can not manage authentication that uses wildcard matching in hostnames in the mysql.user table of the backend database. The only wildcards that can be used are in IP address entries. - -* When users have different passwords based on the host from which they connect MaxScale is unable to determine which password it should use to connect to the backend database. This results in failed connections and unusable usernames in MaxScale. - -* Service init script is missing after upgrade from 1.0 in RPM-based system. Can be fixed by reinstalling the package ('yum reinstall maxscale' or 'rpm -i --force /maxscale-1.1.rpm') - -* Binlog Router Plugin is compatible with MySQL 5.6 - Binlog Router Plugin currently does not work for MariaDB 5.5 and MariaDB 10.0 - -* LONGBLOG are currently not supported. - -* Galera Cluster variables, such as @@wsrep_node_name, are not resolved by the embedded MariaDB parser. - -* The Database Firewall filter does not support multi-statements. Using them will result in an error being sent to the client. - -## Packaging - -Both RPM and Debian packages are available for MaxScale in addition to the tar based releases previously distributed we now provide - -* CentOS/RedHat 5 - -* CentOS/RedHat 6 - -* CentOS/RedHat 7 - -* Debian 6 - -* Debian 7 - -* Ubuntu 12.04 LTS - -* Ubuntu 13.10 - -* Ubuntu 14.04 LTS - -* Fedora 19 - -* Fedora 20 - -* OpenSuSE 13 - -* SuSE Linux Enterprise 11 - -* SuSE Linux Enterprise 12 diff --git a/Documentation/Release-Notes/MaxScale-1.1.1-Release-Notes.md b/Documentation/Release-Notes/MaxScale-1.1.1-Release-Notes.md deleted file mode 100644 index 59bb3506e..000000000 --- a/Documentation/Release-Notes/MaxScale-1.1.1-Release-Notes.md +++ /dev/null @@ -1,94 +0,0 @@ -# MariaDB MaxScale 1.1.1 Release Notes - -## 1.1.1 GA - -MaxScale 1.1 is the current stable (GA) release of MaxScale. Version 1.1.1 is mainly a bug fix release introducing fixes, but also introduces some improvements to existing functionality. - -## New Features - -### Improved memory management options - -Readwritesplit and schemarouter now both support upper limits to session state modifying commands. They both also allow the complete disabling of the history, making the sessions consume the smallest amount of memory while still making sure all slaves keep identical session states. - -### Improved trace logging - -The process of the user authentication data retrieval is logged into the trace log and the readconnroute router also outputs more information into the trace log. This allows for easier problem detection and configuration tuning. - -### More informative output from maxkeys and maxpasswd - -Using the password functionality in MaxScale is now a lot easier. Both programs now produce verbose and exact error messages. - -## Bug Fixes - -Here is a list of bugs fixed since the release of the 1.1.0 version of MaxScale. The bug IDs are from the **[MariaDB Jira](https://jira.mariadb.org/)**. - -* [MXS-99](https://jira.mariadb.org/browse/MXS-99): /etc/init.d/maxscale reload doesn't do anything -* [MXS-83](https://jira.mariadb.org/browse/MXS-83): linkage fails when system pcre library is recent -* [MXS-112](https://jira.mariadb.org/browse/MXS-112): Disable saving of session commands in the readwritesplit and schemarouter modules -* [MXS-114](https://jira.mariadb.org/browse/MXS-114): Disable recovery of disconnected slaves -* [MXS-73](https://jira.mariadb.org/browse/MXS-73): MaxScale uses nearly 100% CPU -* [MXS-36](https://jira.mariadb.org/browse/MXS-36): bugzillaId-671: wrong message if SHOW DATABASES privilege is missing -* [MXS-39](https://jira.mariadb.org/browse/MXS-39): bugzillaId-731:Boolean configuration parameters accept inconsistent parameters -* [MXS-64](https://jira.mariadb.org/browse/MXS-64): maxkeys and Maxpasswd do not produce informative error output -* [MXS-25](https://jira.mariadb.org/browse/MXS-25): bugzillaId-656: MySQL Monitor: claims that Master is available after master failure -* [MXS-82](https://jira.mariadb.org/browse/MXS-82): cmake warns when mariadb is compiled without mysql_release -* [MXS-69](https://jira.mariadb.org/browse/MXS-69): dbfwfilter should be pessimistic about rule syntax errors -* [MXS-98](https://jira.mariadb.org/browse/MXS-98): regexfilter log -* [MXS-28](https://jira.mariadb.org/browse/MXS-28): bugzillaId-433: Logging don't include assert information -* [MXS-75](https://jira.mariadb.org/browse/MXS-75): "wildcard" rule also blocks COUNT(*) -* [MXS-118](https://jira.mariadb.org/browse/MXS-118): Two monitors loaded at the same time result into not working installation -* [MXS-33](https://jira.mariadb.org/browse/MXS-33): bugzillaId-702: CLI: list services command shows negative values for the number of users of a service (Read Service). -* [MXS-17](https://jira.mariadb.org/browse/MXS-17): bugzillaId-736: Memory leak while doing read/write splitting -* [MXS-30](https://jira.mariadb.org/browse/MXS-30): bugzillaId-487: Buffer manager should not use pointer arithmetic on void* -* [MXS-81](https://jira.mariadb.org/browse/MXS-81): cmake fails when init scripts are missing -* [MXS-127](https://jira.mariadb.org/browse/MXS-127): disable_sescmd_history causes MaxScale to crash under load - -## Known Issues - -There are a number bugs and known limitations within this version of MaxScale, the most serious of these are listed below. - -* The Read/Write Splitter is a little too strict when it receives errors from slave servers during execution of session commands. This can result in sessions being terminated in situations from which MaxScale could recover without terminating the sessions. - -* MaxScale cannot manage authentication that uses wildcard matching in hostnames in the mysql.user table of the backend database. The only wildcards that can be used are in IP address entries. - -* When users have different passwords based on the host from which they connect MaxScale is unable to determine which password it should use to connect to the backend database. This results in failed connections and unusable usernames in MaxScale. - -* Binlog Router Plugin is compatible with MySQL 5.6 - Binlog Router Plugin currently does not work for MariaDB 5.5 and MariaDB 10.0 - - -* LONGBLOB are currently not supported. - -* Galera Cluster variables, such as @@wsrep_node_name, are not resolved by the embedded MariaDB parser. - -* The Database Firewall filter does not support multi-statements. Using them will result in an error being sent to the client. - -## Packaging - -Both RPM and Debian packages are available for MaxScale in addition to the tar based releases. Packages are now provided for: - -* CentOS/RedHat 5 - -* CentOS/RedHat 6 - -* CentOS/RedHat 7 - -* Debian 6 - -* Debian 7 - -* Ubuntu 12.04 LTS - -* Ubuntu 14.04 LTS - -* Fedora 19 - -* Fedora 20 - -* Fedora 21 - -* OpenSuSE 13 - -* SuSE Linux Enterprise 11 - -* SuSE Linux Enterprise 12 diff --git a/Documentation/Release-Notes/MaxScale-1.2.0-Release-Notes.md b/Documentation/Release-Notes/MaxScale-1.2.0-Release-Notes.md deleted file mode 100644 index e2da82780..000000000 --- a/Documentation/Release-Notes/MaxScale-1.2.0-Release-Notes.md +++ /dev/null @@ -1,99 +0,0 @@ -# MariaDB MaxScale 1.2 Release Notes - -## 1.2 GA - -This document details the changes in version 1.2 since the release of the 1.1.1 GA Release of the MaxScale product. - -###***PLEASE NOTICE: MaxScale installation directories have changed in this version*** -The 1.2 version of MaxScale differs from previous versions in its installation layout. Please take great care when upgrading MaxScale from previous versions to version 1.2. An automatic upgrade will not work due to the severe changes in the installation layout. - -## New Features - -### Non-root MaxScale -You can now run MaxScale as any user. The standard installation of a MaxScale package now creates the maxscale user and the maxscale group. - -### FHS-compliant installation -The 1.2 version of MaxScale now complies to the Filesystem Hierarchy Standard. This means that MAXSCALE_HOME is no longer necessary and directories can be moved to different locations. - -A quick list of changes in installation directories and file names: - - * Binaries go into `/usr/bin` - * Configuration files to `/etc` and the configuration file is now lower case: `maxscale.cnf` - * Logs to `/var/log/maxscale` - * The module and library directory have been combined into a single directory in `/usr/lib64/maxscale`. If you have custom modules please make sure they are located there. - * Data directory is `/var/lib/maxscale`. This is the default location for MaxScale-specific data. - * PID file can be found at `/var/run/maxscale` - -### Client side SSL encryption -MaxScale now supports SSL/TLS encrypted connections to MaxScale. - -### Launchable scripts -Now you can configure MaxScale monitor module to automatically launch a script when it detects change in the state of a backend server. The script can be any customer script defined by you to take diagnostic or reporting action. With this you can easily customize MaxScale's behavior. - -### Lsyncd configuration guide -A new tutorial has been added which helps you keep MaxScale's configuration files in sync across multiple hosts. This allows for easier HA setups with MaxScale and guarantees up-to-date configuration files on all nodes. The tutorial can be found [here](../Tutorials/MaxScale-HA-with-lsyncd.md). - -## Bug fixes - -Here is a list of bugs fixed since the release of MaxScale 1.1.1. - - * [MXS-24](https://jira.mariadb.org/browse/MXS-24): bugzillaId-604: Module load path documentation issues ... - * [MXS-40](https://jira.mariadb.org/browse/MXS-40): Display logged in users - * [MXS-113](https://jira.mariadb.org/browse/MXS-113): MaxScale seems to fail if built against MariaDB 10.0 libraries - * [MXS-116](https://jira.mariadb.org/browse/MXS-116): Do not run maxscale as root. - * [MXS-117](https://jira.mariadb.org/browse/MXS-117): Allow configuration of the log file directory - * [MXS-125](https://jira.mariadb.org/browse/MXS-125): inconsistency in maxkeys/maxpassword output and parameters - * [MXS-128](https://jira.mariadb.org/browse/MXS-128): cyclic dependency utils -> log_manager -> utils - * [MXS-136](https://jira.mariadb.org/browse/MXS-136): Check for MaxScale replication heartbeat table existence before creating - * [MXS-137](https://jira.mariadb.org/browse/MXS-137): cannot get sql for queries with length >= 0x80 - * [MXS-139](https://jira.mariadb.org/browse/MXS-139): Schemarouter authentication for wildcard grants fails without optimize_wildcard - * [MXS-140](https://jira.mariadb.org/browse/MXS-140): strip_db_esc does not work without auth_all_servers - * [MXS-162](https://jira.mariadb.org/browse/MXS-162): Fix Incorrect info in Configuration Guide - * [MXS-165](https://jira.mariadb.org/browse/MXS-165): Concurrency issue while incrementing sessions in qlafilter - * [MXS-166](https://jira.mariadb.org/browse/MXS-166): Memory leak when creating a new event - * [MXS-171](https://jira.mariadb.org/browse/MXS-171): Allow reads on master for readwritesplit - * [MXS-176](https://jira.mariadb.org/browse/MXS-176): Missing dependencies in documentation - * [MXS-179](https://jira.mariadb.org/browse/MXS-179): Keep configuration changes in synch across MaxScale Mate Nodes - * [MXS-180](https://jira.mariadb.org/browse/MXS-180): MariaDB10 binlog router compatibilty - * [MXS-181](https://jira.mariadb.org/browse/MXS-181): Poor performance on TCP connection due to Nagle's algoritm - * [MXS-182](https://jira.mariadb.org/browse/MXS-182): SHOW SLAVE STATUS and maxadmin "show services" for binlog router needs updated when used with MariaDB 10 Master - * [MXS-212](https://jira.mariadb.org/browse/MXS-212): Stopped services accept connections - * [MXS-225](https://jira.mariadb.org/browse/MXS-225): RPM Debug build packages have no debugging symbols - * [MXS-227](https://jira.mariadb.org/browse/MXS-227): Memory leak in Galera Monitor - * [MXS-244](https://jira.mariadb.org/browse/MXS-244): Memory leak when using prepared statements without arguments - -## Known Issues and Limitations - -There are a number bugs and known limitations within this version of MaxScale, the most serious of this are listed below. - -* MaxScale can not manage authentication that uses wildcard matching in hostnames in the mysql.user table of the backend database. The only wildcards that can be used are in IP address entries. - -* When users have different passwords based on the host from which they connect MaxScale is unable to determine which password it should use to connect to the backend database. This results in failed connections and unusable usernames in MaxScale. - -* LONGBLOB are currently not supported. - -* Galera Cluster variables, such as @@wsrep_node_name, are not resolved by the embedded MariaDB parser. - -* The Database Firewall filter does not support multi-statements. Using them will result in an error being sent to the client. - -## Packaging - -Both RPM and Debian packages are available for MaxScale in addition to the tar based releases previously distributed we now provide - -* CentOS/RedHat 5 - -* CentOS/RedHat 6 - -* CentOS/RedHat 7 - -* Debian 6 - -* Debian 7 - -* Ubuntu 12.04 LTS - -* Ubuntu 14.04 LTS - -* SuSE Linux Enterprise 11 - -* SuSE Linux Enterprise 12 diff --git a/Documentation/Release-Notes/MaxScale-1.3.0-Release-Notes.md b/Documentation/Release-Notes/MaxScale-1.3.0-Release-Notes.md deleted file mode 100644 index 9afe57729..000000000 --- a/Documentation/Release-Notes/MaxScale-1.3.0-Release-Notes.md +++ /dev/null @@ -1,267 +0,0 @@ -# MariaDB MaxScale 1.3 Release Notes - -This document describes the changes in release 1.3, when compared to -release 1.2.1. - -## 1.3.0 - -For any problems you encounter, please consider submitting a bug -report at [Jira](https://jira.mariadb.org). - -## New Features - -### Persistent Connections - -MaxScale 1.3.0 introduces the concept of *Persistent Connections*. With -that is meant that the connection from MaxScale to the backend server is -not terminated even if the connection from the client to MaxScale is. -If a client makes frequent short connections, there may be a benefit from -using the *Persistent Connection* feature as it may reduce the time it -takes from establishing a connection from the client through MaxScale to -the backend server. - -**NOTE**: The persistent connections do not track session state. This means -that changing the default database or modifying the session state will cause -those changes to be active even for new connections. If you use queries with -implicit databases or use connections with different client settings, you -should take great care when using persistent connections. - -Additional information is available in the following document: -* [Administration Tutorial](../Tutorials/Administration-Tutorial.md#persistent-connections) - -### Binlog Server - -There are new administrative commands: STOP SLAVE, START SLAVE, RESET SLAVE -and CHANGE MASTER TO. The master server details are now provided by a -master.ini file located in binlog directory and could be changed via -CHANGE MASTER TO command issued via MySQL connection to MaxScale. - -Before migrating to 1.3.0 it is necessary to put a writable master.ini file -into binlog directory, containing these parameters: - -``` -[binlog_configuration] -master_host=127.0.0.1 -master_port=3308 -master_user=repl -master_password=somepass -filestem=repl-bin -``` - -Users may change parameters according to their configuration. - -**Note**: the "servers" parameter is no longer required in the service -definition. - -Additional information is available in the following documents: -* [Binlogrouter Tutorial](../Tutorials/Replication-Proxy-Binlog-Router-Tutorial.md) -* [Upgrading Binlogrouter to 1.3](../Upgrading/Upgrading-BinlogRouter-To-Maxscale-1.3.md) -* [Binlogrouter Documentation](../Routers/Binlogrouter.md) - -### Logging Changes - -Before 1.3, MaxScale logged data to four different log files; *error*, -*message*, *trace* and *debug*. Complementary and/or alternatively, MaxScale -could also log to syslog, in which case messages intended for the error and -message file were logged there. What files were enabled and written to was -controlled by entries in the MaxScale configuration file. - -This has now been changed so that MaxScale logs to a single -file - *maxscale.log* - and each logged entry is prepended with *error*, -*warning*, *notice*, *info* or *debug*, depending on the seriousness or -priority of the message. The levels are the same as those of syslog. -MaxScale is still capable of complementary or alternatively logging to syslog. - -What used to be logged to the *message* file is now logged as a *notice* -message and what used to be written to the *trace* file, is logged as an -*info* message. - -By default, *notice*, *warning* and *error* messages are logged, while -*info* and *debug* messages are not. Exactly what kind of messages are -logged can be controlled via the MaxScale configuration file, but enabling -and disabling different kinds of messages can also be performed at runtime -from maxadmin. - -Earlier, the *error* and *message* files were written to the filesystem, -while the *trace* and *debug* files were written to shared memory. The -one and only log file of MaxScale is now by default written to the filesystem. -This will have performance implications if *info* and *debug* messages are -enabled. - -If you want to retain the possibility of turning on *info* and *debug* -messages, without it impacting the performance too much, the recommended -approach is to add the following entries to the MaxScale configuration file: - -``` -[maxscale] -syslog=1 -maxlog=0 -log_to_shm=1 -``` - -This will have the effect of MaxScale creating the *maxscale.log* into -shared memory, but not logging anything to it. However, all *notice*, -*warning* and *error* messages will be logged to syslog. - -Then, if there is a need to turn on *info* messages that can be done via -the maxadmin interface: - -``` -MaxScale> enable log-priority info -MaxScale> enable maxlog -``` - -Note that *info* and *debug* messages are never logged to syslog. - -### PCRE2 integration - -MaxScale now uses the PCRE2 library for regular expressions. This has been -integrated into the core configuration processing and most of the modules. -The main module which uses this is the regexfilter which now fully supports -the PCRE2 syntax with proper substitutions. For a closer look at how this -differs from the POSIX regular expression syntax take a look at the -[PCRE2 documentation](http://www.pcre.org/current/doc/html/pcre2syntax.html). - -**Please note**, that the substitution string follows different rules than -the traditional substitution strings. The usual way of referring to capture -groups in the substitution string is with the backslash character followed -by the capture group reference e.g. `\1` but the PCRE2 library uses the dollar -character followed by the group reference. To quote the PCRE2 native API manual: - -``` -In the replacement string, which is interpreted as a UTF string in UTF mode, and is checked for UTF validity unless the PCRE2_NO_UTF_CHECK option is set, a dollar character is an escape character that can specify the insertion of characters from capturing groups in the pattern. The following forms are recognized: - - $$ insert a dollar character - $ insert the contents of group - ${} insert the contents of group -``` - -### Improved launchable scripts - -The launchable scripts were modified to allow usage without wrapper scripts. -The scripts are now executed as they are in the configuration files with certain -keywords being replaced with the initiator, event and node list. For more -details, please read the [Monitor Common](../Monitors/Monitor-Common.md) document. - -## Bug fixes - -[Here is a list of bugs fixed since the release of MaxScale 1.2.1.](https://jira.mariadb.org/browse/MXS-550?jql=project%20%3D%20MXS%20AND%20issuetype%20%3D%20Bug%20AND%20resolution%20in%20(Fixed%2C%20Done)%20AND%20fixVersion%20%3D%201.3.0) - - * [MXS-559](https://jira.mariadb.org/browse/MXS-559): Crash due to debug assertion in readwritesplit - * [MXS-551](https://jira.mariadb.org/browse/MXS-551): Maxscale BETA 1.3.0 running as root - * [MXS-548](https://jira.mariadb.org/browse/MXS-548): Maxscale 1.2.1 crash on Ubuntu 4.04.3 x86_64 - * [MXS-508](https://jira.mariadb.org/browse/MXS-508): regex filter ignores username - * [MXS-505](https://jira.mariadb.org/browse/MXS-505): if Maxscale fails to start it goes to infinite "try-to-start and fail" loop - * [MXS-501](https://jira.mariadb.org/browse/MXS-501): USE hangs when Tee filter uses matching - * [MXS-500](https://jira.mariadb.org/browse/MXS-500): Tee filter hangs when statements aren't duplicated. - * [MXS-499](https://jira.mariadb.org/browse/MXS-499): Init script error on Debian Wheezy - * [MXS-494](https://jira.mariadb.org/browse/MXS-494): Weight calculation favors servers without connections - * [MXS-493](https://jira.mariadb.org/browse/MXS-493): SIGFPE when weightby parameter is 0 and using LEAST_GLOBAL_CONNECTIONS - * [MXS-492](https://jira.mariadb.org/browse/MXS-492): Segfault if server is missing weighting parameter - * [MXS-491](https://jira.mariadb.org/browse/MXS-491): MaxScale can time out systemd if startup of services takes too long - * [MXS-480](https://jira.mariadb.org/browse/MXS-480): Readwritesplit defaults cause connection pileup - * [MXS-479](https://jira.mariadb.org/browse/MXS-479): localtime must not be used in the multi-threaded program. - * [MXS-472](https://jira.mariadb.org/browse/MXS-472): Monitors update status in multiple steps - * [MXS-464](https://jira.mariadb.org/browse/MXS-464): Upgrade 1.2.0 to 1.2.1 blocking start of `maxscale` service - * [MXS-450](https://jira.mariadb.org/browse/MXS-450): Syslog default prefix is MaxScale not maxscale - * [MXS-447](https://jira.mariadb.org/browse/MXS-447): Monitors are started before they have been fully configured - * [MXS-436](https://jira.mariadb.org/browse/MXS-436): Invalid threads argument is ignored and MaxScale starts with one thread - * [MXS-431](https://jira.mariadb.org/browse/MXS-431): Backend authentication fails with schemarouter - * [MXS-429](https://jira.mariadb.org/browse/MXS-429): Binlog Router crashes due to segmentation fault with no meaningful error if no listener is configured - * [MXS-428](https://jira.mariadb.org/browse/MXS-428): Maxscale crashes at startup. - * [MXS-427](https://jira.mariadb.org/browse/MXS-427): Logging a large string causes a segmentation fault - * [MXS-417](https://jira.mariadb.org/browse/MXS-417): Single character wildcard doesn't work in MaxScale - * [MXS-416](https://jira.mariadb.org/browse/MXS-416): Orphan sessions appear after many network errors - * [MXS-415](https://jira.mariadb.org/browse/MXS-415): MaxScale 1.2.1 crashed with Signal 6 and 11 - * [MXS-414](https://jira.mariadb.org/browse/MXS-414): Maxscale crashed every day! - * [MXS-413](https://jira.mariadb.org/browse/MXS-413): MaxAdmin hangs with show session - * [MXS-412](https://jira.mariadb.org/browse/MXS-412): show dbusers segmentation fault - * [MXS-409](https://jira.mariadb.org/browse/MXS-409): prepare should not hit all servers - * [MXS-408](https://jira.mariadb.org/browse/MXS-408): Connections to backend databases do not clear promptly - * [MXS-407](https://jira.mariadb.org/browse/MXS-407): Maxscale binlogrouter binlog names are unncessarily length-limited - * [MXS-405](https://jira.mariadb.org/browse/MXS-405): Maxscale bin router crash - * [MXS-403](https://jira.mariadb.org/browse/MXS-403): Monitor callback to DCBs evades thread control causing crashes - * [MXS-394](https://jira.mariadb.org/browse/MXS-394): Faults in regex_replace function of regexfilter.c - * [MXS-392](https://jira.mariadb.org/browse/MXS-392): Update to "Rabbit MQ setup and MaxScale Integration" document - * [MXS-386](https://jira.mariadb.org/browse/MXS-386): max_sescmd_history should not close connections - * [MXS-385](https://jira.mariadb.org/browse/MXS-385): disable_sescmd_history can cause false data to be read. - * [MXS-379](https://jira.mariadb.org/browse/MXS-379): Incorrect handing of a GWBUF may cause SIGABRT. - * [MXS-376](https://jira.mariadb.org/browse/MXS-376): MaxScale terminates with SIGABRT. - * [MXS-373](https://jira.mariadb.org/browse/MXS-373): If config file is non-existent, maxscale crashes. - * [MXS-366](https://jira.mariadb.org/browse/MXS-366): Multi-source slave servers are not detected. - * [MXS-365](https://jira.mariadb.org/browse/MXS-365): Load data local infile connection abort when loading certain files - * [MXS-363](https://jira.mariadb.org/browse/MXS-363): rpm building seems to do something wrong with maxscale libraries - * [MXS-361](https://jira.mariadb.org/browse/MXS-361): crash on backend restart if persistent connections are in use - * [MXS-360](https://jira.mariadb.org/browse/MXS-360): Persistent connections: maxadmin reports 0 all the time even if connections are created - * [MXS-358](https://jira.mariadb.org/browse/MXS-358): Crash, Error in `/usr/bin/maxscale': free(): invalid next size (fast) - * [MXS-352](https://jira.mariadb.org/browse/MXS-352): With no backend connection, services aren't started - * [MXS-351](https://jira.mariadb.org/browse/MXS-351): Router error handling can cause crash by leaving dangling DCB pointer - * [MXS-345](https://jira.mariadb.org/browse/MXS-345): maxscale.conf in /etc/init.d prevents puppet from starting maxscale - * [MXS-342](https://jira.mariadb.org/browse/MXS-342): When ini_parse fails to parse config file, no log messages are printed. - * [MXS-333](https://jira.mariadb.org/browse/MXS-333): use_sql_variables_in=master doesn't work - * [MXS-329](https://jira.mariadb.org/browse/MXS-329): The session pointer in a DCB can be null unexpectedly - * [MXS-323](https://jira.mariadb.org/browse/MXS-323): mysql_client readwritesplit handleError seems using wrong dcb and cause wrong behavior - * [MXS-321](https://jira.mariadb.org/browse/MXS-321): Incorrect number of connections in maxadmin list view - * [MXS-310](https://jira.mariadb.org/browse/MXS-310): MaxScale 1.2 does not completely cleanly change to the maxscale user - * [MXS-297](https://jira.mariadb.org/browse/MXS-297): postinstall on debian copies wrong file in /etc/init.d - * [MXS-293](https://jira.mariadb.org/browse/MXS-293): Bug in init script, and maxscale --user=maxscale does run as root - * [MXS-291](https://jira.mariadb.org/browse/MXS-291): Random number generation has flaws - * [MXS-289](https://jira.mariadb.org/browse/MXS-289): Corrupted memory or empty value are in Master_host field of SHOW SLAVE STATUS when master connection is broken - * [MXS-286](https://jira.mariadb.org/browse/MXS-286): Fix the content and format of MaxScale-HA-with-Corosync-Pacemaker document - * [MXS-283](https://jira.mariadb.org/browse/MXS-283): SSL connections leak memory - * [MXS-282](https://jira.mariadb.org/browse/MXS-282): Add example to "Routing Hints" document - * [MXS-281](https://jira.mariadb.org/browse/MXS-281): SELECT INTO OUTFILE query goes several times to one slave - * [MXS-280](https://jira.mariadb.org/browse/MXS-280): SELECT INTO OUTFILE query succeeds even if backed fails - * [MXS-276](https://jira.mariadb.org/browse/MXS-276): Memory leak of buffer in connection router readQuery - * [MXS-274](https://jira.mariadb.org/browse/MXS-274): Memory Leak - * [MXS-271](https://jira.mariadb.org/browse/MXS-271): Schemarouter and unknown databases - * [MXS-269](https://jira.mariadb.org/browse/MXS-269): Crash in MySQL backend protocol - * [MXS-260](https://jira.mariadb.org/browse/MXS-260): Multiple MaxScale processes - * [MXS-258](https://jira.mariadb.org/browse/MXS-258): ERR_error_string could overflow in future - * [MXS-254](https://jira.mariadb.org/browse/MXS-254): Failure to read configuration file results in no error log messages - * [MXS-251](https://jira.mariadb.org/browse/MXS-251): Non-thread safe strerror - * [MXS-220](https://jira.mariadb.org/browse/MXS-220): LAST_INSERT_ID() query is redirect to slave if function call is in where clause - * [MXS-210](https://jira.mariadb.org/browse/MXS-210): Check MaxScale user privileges - * [MXS-202](https://jira.mariadb.org/browse/MXS-202): User password not handled correctly - * [MXS-197](https://jira.mariadb.org/browse/MXS-197): Incorrect sequence of operations with DCB - * [MXS-196](https://jira.mariadb.org/browse/MXS-196): DCB state is changed prior to polling operation - * [MXS-195](https://jira.mariadb.org/browse/MXS-195): maxscaled.c ineffective DCB disposal - * [MXS-184](https://jira.mariadb.org/browse/MXS-184): init script issues in CentOS 7 - * [MXS-183](https://jira.mariadb.org/browse/MXS-183): MaxScale crash after 'reload config' - * [MXS-111](https://jira.mariadb.org/browse/MXS-111): maxscale binlog events shown in show services seems to be double-counted for the master connection - * [MXS-54](https://jira.mariadb.org/browse/MXS-54): Write failed auth attempt to trace log - * [MXS-35](https://jira.mariadb.org/browse/MXS-35): bugzillaId-451: maxscale main() exit code is always 0 after it daemonizes - * [MXS-29](https://jira.mariadb.org/browse/MXS-29): bugzillaId-589: detect if MAXSCALE_SCHEMA.HEARTBEAT table is not replicated - * [MXS-3](https://jira.mariadb.org/browse/MXS-3): Remove code for atomic_add in skygw_utils.cc - -## Known Issues and Limitations - -There are a number bugs and known limitations within this version of MaxScale, -the most serious of this are listed below. - -* MaxScale can not manage authentication that uses wildcard matching in hostnames in the mysql.user table of the backend database. The only wildcards that can be used are in IP address entries. - -* When users have different passwords based on the host from which they connect MaxScale is unable to determine which password it should use to connect to the backend database. This results in failed connections and unusable usernames in MaxScale. - -* The readconnroute module does not support sending of LONGBLOB data. - -* Galera Cluster variables, such as @@wsrep_node_name, are not resolved by the embedded MariaDB parser. - -* The Database Firewall filter does not support multi-statements. Using them will result in an error being sent to the client. - -* The SSL support is known to be unstable. - -## Packaging - -RPM and Debian packages are provided for the Linux distributions supported -by MariaDB Enterprise. - -Packages can be downloaded [here](https://mariadb.com/resources/downloads). - -## Source Code - -The source code of MaxScale is tagged at GitHub with a tag, which is identical -with the version of MaxScale. For instance, the tag of version 1.2.1 of MaxScale -is 1.2.1. Further, *master* always refers to the latest released non-beta version. - -The source code is available [here](https://github.com/mariadb-corporation/MaxScale). diff --git a/Documentation/Release-Notes/MaxScale-1.4.0-Release-Notes.md b/Documentation/Release-Notes/MaxScale-1.4.0-Release-Notes.md deleted file mode 100644 index bf91481a2..000000000 --- a/Documentation/Release-Notes/MaxScale-1.4.0-Release-Notes.md +++ /dev/null @@ -1,112 +0,0 @@ - -# MariaDB MaxScale 1.4.0 (Beta) Release Notes - -Release 1.4.0 is a beta release. - -This document describes the changes in release 1.4.0, when compared to -release 1.3.0. - -## 1.4.0 - -For any problems you encounter, please consider submitting a bug -report at [Jira](https://jira.mariadb.org). - -## New Features - -### Firewall Filter - -The firewall filter now supports different actions when a rule is matched. -Currently possible actions are to either block the query, allow it or -ignore the match and allow it. - -Matching and non-matching queries can now be logged and by combining this new -logging functionality with the _ignore_ action, you can set up the filter in -a dry-run mode. For more information about the firewall filter, please refer to -[Database Firewall Filter](../Filters/Database-Firewall-Filter.md). - -### SSL - -Client-side SSL support has been in MaxScale for some time, but has -been known to have been unstable. In 1.4.0, client side SSL is now -believed to be stable and fully usable. - -The SSL configuration is now done on a per listener basis which -allows both SSL and non-SSL connections to a service. For more details -on how to configure this, please refer to the -[MaxScale Configuration Guide](../Getting-Started/Configuration-Guide.md#listener-and-ssl). - -### POSIX Extended Regular Expression Syntax - -The _qlafilter_, the _topfilter_ and the _namedserverfilter_ now -accept _extended_ as a filter option, which enables the POSIX Extended -Regular Expression syntax. - - -### Improved user grant detection - -MaxScale now allows users with only table level access to connect with -a default database. The service users will require SELECT privileges on -the `mysql.tables_priv` table: - -``` -GRANT SELECT ON mysql.tables_priv TO 'maxscale'@'maxscalehost' -``` -For more information, refer to the configuration guide: -[MaxScale Configuration Guide](../Getting-Started/Configuration-Guide.md#service). - -### Improved password encryption - -MaxScale 1.4.0 uses the MD5 version of the crypt function which is more secure -than the non-MD5 version. This means that a new password file needs to be -created with `maxkeys`. The configuration file should be updated to use the new -passwords. This can be done with the help of the `maxpasswd` utility. For more -details about how to do this, please refer to the installation guide: -[MariaDB MaxScale Installation Guide](../Getting-Started/MariaDB-MaxScale-Installation-Guide.md) - -## Removed Features - -* MaxScale no longer supports SSLv3. - -* The `enabled` mode, which allows both SSL and non-SSL connections on the same port, has been removed. - -## Bug fixes - -[Here is a list of bugs fixed since the release of MaxScale 1.3.0.](https://jira.mariadb.org/browse/MXS-600?jql=project%20%3D%20MXS%20AND%20issuetype%20%3D%20Bug%20AND%20resolution%20in%20(Fixed%2C%20Done)%20AND%20fixVersion%20%3D%201.4.0) - - * [MXS-400](https://jira.mariadb.org/browse/MXS-400): readwritesplit router doesn't allow connect when the only remaining server is master and slave - * [MXS-497](https://jira.mariadb.org/browse/MXS-497): MaxScale does not contemplate client multiple statements (CLIENT_MULTI_STATEMENTS) - * [MXS-504](https://jira.mariadb.org/browse/MXS-504): SSL connection handling needs work - * [MXS-511](https://jira.mariadb.org/browse/MXS-511): ReadWriteSplit router won't choose node as master and logs confusing "RUNNING MASTER" error message - * [MXS-563](https://jira.mariadb.org/browse/MXS-563): Maxscale fails to start - * [MXS-565](https://jira.mariadb.org/browse/MXS-565): Binlog Router doesn't handle 16MB larger transmissions - * [MXS-573](https://jira.mariadb.org/browse/MXS-573): Write permission to systemd service file - * [MXS-574](https://jira.mariadb.org/browse/MXS-574): Wrong parameter name in systemd service file - * [MXS-575](https://jira.mariadb.org/browse/MXS-575): Nagios scripts lack execute permissions - * [MXS-577](https://jira.mariadb.org/browse/MXS-577): Don't install systemd files and init.d scipts at the same time - * [MXS-581](https://jira.mariadb.org/browse/MXS-581): Only the first 8 characters of passwords are used - * [MXS-582](https://jira.mariadb.org/browse/MXS-582): crypt is not thread safe - * [MXS-585](https://jira.mariadb.org/browse/MXS-585): Intermittent connection failure with MaxScale 1.2/1.3 using MariaDB/J 1.3 - * [MXS-589](https://jira.mariadb.org/browse/MXS-589): Password encryption looks for the file in the wrong directory - * [MXS-592](https://jira.mariadb.org/browse/MXS-592): Build failure with MariaDB 10.1 when doing a debug build - * [MXS-594](https://jira.mariadb.org/browse/MXS-594): Binlog name gets trunkated - * [MXS-600](https://jira.mariadb.org/browse/MXS-600): Threads=auto parameter configuration fails - -## Known Issues and Limitations - -There are some limitations and known issues within this version of MaxScale. -For more information, please refer to the [Limitations](../About/Limitations.md) document. - -## Packaging - -RPM and Debian packages are provided for the Linux distributions supported -by MariaDB Enterprise. - -Packages can be downloaded [here](https://mariadb.com/resources/downloads). - -## Source Code - -The source code of MaxScale is tagged at GitHub with a tag, which is identical -with the version of MaxScale. For instance, the tag of version X.Y.Z of MaxScale -is X.Y.Z. Further, *master* always refers to the latest released non-beta version. - -The source code is available [here](https://github.com/mariadb-corporation/MaxScale). diff --git a/Documentation/Release-Notes/MaxScale-1.4.1-Release-Notes.md b/Documentation/Release-Notes/MaxScale-1.4.1-Release-Notes.md deleted file mode 100644 index b5473db85..000000000 --- a/Documentation/Release-Notes/MaxScale-1.4.1-Release-Notes.md +++ /dev/null @@ -1,41 +0,0 @@ - -# MariaDB MaxScale 1.4.1 Release Notes - -Release 1.4.1 is a GA release. - -This document describes the changes in release 1.4.1, when compared to -release [1.4.0](MaxScale-1.4.0-Release-Notes.md). - -For any problems you encounter, please consider submitting a bug -report at [Jira](https://jira.mariadb.org). - -## Bug fixes - -[Here is a list of bugs fixed since the release of MaxScale 1.4.0.](https://jira.mariadb.org/browse/MXS-646?jql=project%20%3D%20MXS%20AND%20issuetype%20%3D%20Bug%20AND%20resolution%20in%20(Fixed%2C%20Done)%20AND%20fixVersion%20%3D%201.4.1) - - * [MXS-646](https://jira.mariadb.org/browse/MXS-646): Namedserverfilter ignores user and source parameters - * [MXS-632](https://jira.mariadb.org/browse/MXS-632): Replace or update VERSION - * [MXS-630](https://jira.mariadb.org/browse/MXS-630): Requirement of tables_priv access not documented in "Upgrading" guide - * [MXS-629](https://jira.mariadb.org/browse/MXS-629): Lack of tables_priv privilege causes confusing error message - * [MXS-627](https://jira.mariadb.org/browse/MXS-627): Failure to connect to MaxScale with MariaDB Connector/J - * [MXS-585](https://jira.mariadb.org/browse/MXS-585): Intermittent connection failure with MaxScale 1.2/1.3 using MariaDB/J 1.3 - -## Known Issues and Limitations - -There are some limitations and known issues within this version of MaxScale. -For more information, please refer to the [Limitations](../About/Limitations.md) document. - -## Packaging - -RPM and Debian packages are provided for the Linux distributions supported -by MariaDB Enterprise. - -Packages can be downloaded [here](https://mariadb.com/resources/downloads). - -## Source Code - -The source code of MaxScale is tagged at GitHub with a tag, which is identical -with the version of MaxScale. For instance, the tag of version X.Y.Z of MaxScale -is X.Y.Z. Further, *master* always refers to the latest released non-beta version. - -The source code is available [here](https://github.com/mariadb-corporation/MaxScale). diff --git a/Documentation/Release-Notes/MaxScale-1.4.2-Release-Notes.md b/Documentation/Release-Notes/MaxScale-1.4.2-Release-Notes.md deleted file mode 100644 index 3954cd770..000000000 --- a/Documentation/Release-Notes/MaxScale-1.4.2-Release-Notes.md +++ /dev/null @@ -1,44 +0,0 @@ - -# MariaDB MaxScale 1.4.2 Release Notes - -Release 1.4.2 is a GA release. - -This document describes the changes in release 1.4.2, when compared to -release 1.4.1. - -For any problems you encounter, please consider submitting a bug -report at [Jira](https://jira.mariadb.org). - -## Bug fixes - -[Here is a list of bugs fixed since the release of MaxScale 1.4.1.](https://jira.mariadb.org/browse/MXS-683?jql=project%20%3D%20MXS%20AND%20issuetype%20%3D%20Bug%20AND%20resolution%20in%20(Fixed%2C%20Done)%20AND%20fixVersion%20%3D%201.4.2) - - * [MXS-684](https://jira.mariadb.org/browse/MXS-684): Password field still used with MySQL 5.7 - * [MXS-683](https://jira.mariadb.org/browse/MXS-683): qc_mysqlembedded reports as-name instead of original-name. - * [MXS-681](https://jira.mariadb.org/browse/MXS-681): Loading service users error - * [MXS-680](https://jira.mariadb.org/browse/MXS-680): qc_mysqlembedded fails to look into function when reporting affected fields - * [MXS-679](https://jira.mariadb.org/browse/MXS-679): qc_mysqlembedded excludes some fields, when reporting affected fields - * [MXS-662](https://jira.mariadb.org/browse/MXS-662): No Listener on different IPs but same port since 1.4.0 - * [MXS-661](https://jira.mariadb.org/browse/MXS-661): Log fills with 'Length (0) is 0 or query string allocation failed' - * [MXS-656](https://jira.mariadb.org/browse/MXS-656): after upgrade from 1.3 to 1.4, selecting master isn't working as expected - * [MXS-616](https://jira.mariadb.org/browse/MXS-616): Duplicated binlog event under heavy load. - -## Known Issues and Limitations - -There are some limitations and known issues within this version of MaxScale. -For more information, please refer to the [Limitations](../About/Limitations.md) document. - -## Packaging - -RPM and Debian packages are provided for the Linux distributions supported -by MariaDB Enterprise. - -Packages can be downloaded [here](https://mariadb.com/resources/downloads). - -## Source Code - -The source code of MaxScale is tagged at GitHub with a tag, which is identical -with the version of MaxScale. For instance, the tag of version X.Y.Z of MaxScale -is X.Y.Z. Further, *master* always refers to the latest released non-beta version. - -The source code is available [here](https://github.com/mariadb-corporation/MaxScale). diff --git a/Documentation/Release-Notes/MaxScale-1.4.3-Release-Notes.md b/Documentation/Release-Notes/MaxScale-1.4.3-Release-Notes.md deleted file mode 100644 index 0f381df19..000000000 --- a/Documentation/Release-Notes/MaxScale-1.4.3-Release-Notes.md +++ /dev/null @@ -1,37 +0,0 @@ - -# MariaDB MaxScale 1.4.3 Release Notes - -Release 1.4.3 is a GA release. - -This document describes the changes in release 1.4.3, when compared to -release 1.4.2. - -For any problems you encounter, please consider submitting a bug -report at [Jira](https://jira.mariadb.org). - -## Bug fixes - -[Here is a list of bugs fixed since the release of MaxScale 1.4.2.](https://jira.mariadb.org/browse/MXS-700?jql=project%20%3D%20MXS%20AND%20issuetype%20%3D%20Bug%20AND%20resolution%20in%20(Fixed%2C%20Done)%20AND%20fixVersion%20%3D%201.4.3) - - * [MXS-700](https://jira.mariadb.org/browse/MXS-700): Segfault on startup - * [MXS-699](https://jira.mariadb.org/browse/MXS-699): qc_mysqlembedded fails to return fields in comma expression - -## Known Issues and Limitations - -There are some limitations and known issues within this version of MaxScale. -For more information, please refer to the [Limitations](../About/Limitations.md) document. - -## Packaging - -RPM and Debian packages are provided for the Linux distributions supported -by MariaDB Enterprise. - -Packages can be downloaded [here](https://mariadb.com/resources/downloads). - -## Source Code - -The source code of MaxScale is tagged at GitHub with a tag, which is identical -with the version of MaxScale. For instance, the tag of version X.Y.Z of MaxScale -is X.Y.Z. Further, *master* always refers to the latest released non-beta version. - -The source code is available [here](https://github.com/mariadb-corporation/MaxScale). From 7d7d8a0560023f84bcc292d0b74b98a00fb5c910 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Fri, 17 Mar 2017 07:00:58 +0200 Subject: [PATCH 06/27] Print all output as raw bytes Printing all output as raw bytes allows MaxScale to control the formatting process. This also removes the need to convert the bytes to Python strings and the need to parse the JSON. --- server/modules/protocol/examples/cdc.py | 51 +++++++------------------ 1 file changed, 14 insertions(+), 37 deletions(-) diff --git a/server/modules/protocol/examples/cdc.py b/server/modules/protocol/examples/cdc.py index 7929b9cb2..e44664267 100755 --- a/server/modules/protocol/examples/cdc.py +++ b/server/modules/protocol/examples/cdc.py @@ -12,52 +12,32 @@ # Public License. import time -import json -import re import sys import socket import hashlib import argparse -import subprocess import selectors import binascii import os -# Read data as JSON -def read_json(): - decoder = json.JSONDecoder() - rbuf = bytes() - ep = selectors.EpollSelector() - ep.register(sock, selectors.EVENT_READ) +def read_data(): + sel = selectors.DefaultSelector() + sel.register(sock, selectors.EVENT_READ) while True: - pollrc = ep.select(timeout=int(opts.read_timeout) if int(opts.read_timeout) > 0 else None) try: + events = sel.select(timeout=int(opts.read_timeout) if int(opts.read_timeout) > 0 else None) buf = sock.recv(4096, socket.MSG_DONTWAIT) - rbuf += buf - while True: - rbuf = rbuf.lstrip() - data = decoder.raw_decode(rbuf.decode('ascii')) - rbuf = rbuf[data[1]:] - print(json.dumps(data[0])) - except ValueError as err: - sys.stdout.flush() - pass - except Exception: + if len(buf) > 0: + os.write(sys.stdout.fileno(), buf) + sys.stdout.flush() + else: + raise Exception('Socket was closed') + + except BlockingIOError: break - -# Read data as Avro -def read_avro(): - ep = selectors.EpollSelector() - ep.register(sock, selectors.EVENT_READ) - - while True: - pollrc = ep.select(timeout=int(opts.read_timeout) if int(opts.read_timeout) > 0 else None) - try: - buf = sock.recv(4096, socket.MSG_DONTWAIT) - os.write(sys.stdout.fileno(), buf) - sys.stdout.flush() - except Exception: + except Exception as ex: + print(ex, file=sys.stderr) break parser = argparse.ArgumentParser(description = "CDC Binary consumer", conflict_handler="resolve") @@ -91,7 +71,4 @@ response = str(sock.recv(1024)).encode('utf_8') # Request a data stream sock.send(bytes(("REQUEST-DATA " + opts.FILE + (" " + opts.GTID if opts.GTID else "")).encode())) -if opts.format == "JSON": - read_json() -elif opts.format == "AVRO": - read_avro() +read_data() From e1e7137a687d8cf29f553396012ab4ad2afa51d5 Mon Sep 17 00:00:00 2001 From: Johan Wikman Date: Wed, 22 Mar 2017 12:58:44 +0200 Subject: [PATCH 07/27] Remove references to old release notes --- Documentation/Documentation-Contents.md | 22 ---------------------- 1 file changed, 22 deletions(-) diff --git a/Documentation/Documentation-Contents.md b/Documentation/Documentation-Contents.md index 71e3031d8..1f391c64a 100644 --- a/Documentation/Documentation-Contents.md +++ b/Documentation/Documentation-Contents.md @@ -19,10 +19,6 @@ ## Upgrading MariaDB MaxScale - [Upgrading MariaDB MaxScale from 1.4 to 2.0](Upgrading/Upgrading-To-MaxScale-2.0.md) -- [Upgrading MariaDB MaxScale from 1.3 to 1.4](Upgrading/Upgrading-To-MaxScale-1.4.md) -- [Upgrading MariaDB MaxScale from 1.2 to 1.3](Upgrading/Upgrading-To-MaxScale-1.3.md) -- [Upgrading MariaDB MaxScale from 1.1.1 to 1.2](Upgrading/Upgrading-To-MaxScale-1.2.md) -- [Upgrading MariaDB MaxScale from 1.0.5 to 1.1.0](Upgrading/Upgrading-To-MaxScale-1.1.0.md) ## Reference @@ -115,21 +111,3 @@ Documentation for MaxScale protocol modules. - [Binlog Router Design (in development)](http://mariadb-corporation.github.io/MaxScale/Design-Documents/binlog-router-html-docs) - [DCB States (to be replaced in StarUML)](Design-Documents/DCB-States.pdf) - [Schema Sharding Router Technical Documentation](Design-Documents/SchemaRouter-technical.md) - -## Earlier Release Notes - - - [MariaDB MaxScale 1.4.3 Release Notes](Release-Notes/MaxScale-1.4.3-Release-Notes.md) - - [MariaDB MaxScale 1.4.2 Release Notes](Release-Notes/MaxScale-1.4.2-Release-Notes.md) - - [MariaDB MaxScale 1.4.1 Release Notes](Release-Notes/MaxScale-1.4.1-Release-Notes.md) - - [MariaDB MaxScale 1.4.0 Release Notes](Release-Notes/MaxScale-1.4.0-Release-Notes.md) - - [MariaDB MaxScale 1.3.0 Release Notes](Release-Notes/MaxScale-1.3.0-Release-Notes.md) - - [MariaDB MaxScale 1.2.0 Release Notes](Release-Notes/MaxScale-1.2.0-Release-Notes.md) - - [MariaDB MaxScale 1.1.1 Release Notes](Release-Notes/MaxScale-1.1.1-Release-Notes.md) - - [MariaDB MaxScale 1.1.0 Release Notes](Release-Notes/MaxScale-1.1-Release-Notes.md) - - [MariaDB MaxScale 1.0.3 Release Notes](Release-Notes/MaxScale-1.0.3-Release-Notes.md) - - [MariaDB MaxScale 1.0.1 Release Notes](Release-Notes/MaxScale-1.0.1-Release-Notes.md) - - [MariaDB MaxScale 1.0 Release Notes](Release-Notes/MaxScale-1.0-Release-Notes.md) - - [MariaDB MaxScale 0.7 Release Notes](Release-Notes/MaxScale-0.7-Release-Notes.md) - - [MariaDB MaxScale 0.6 Release Notes](Release-Notes/MaxScale-0.6-Release-Notes.md) - - [MariaDB MaxScale 0.5 Release Notes](Release-Notes/MaxScale-0.5-Release-Notes.md) - From c67894b01186eb08187800599770bf912995334f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Tue, 4 Apr 2017 16:42:39 +0300 Subject: [PATCH 08/27] Add Avro schema generator Python script The script is the Python version of the cdc_schema.go application. --- .../modules/protocol/examples/CMakeLists.txt | 1 + .../modules/protocol/examples/cdc_schema.py | 88 +++++++++++++++++++ 2 files changed, 89 insertions(+) create mode 100755 server/modules/protocol/examples/cdc_schema.py diff --git a/server/modules/protocol/examples/CMakeLists.txt b/server/modules/protocol/examples/CMakeLists.txt index 63ffb2ed4..6f75f3684 100644 --- a/server/modules/protocol/examples/CMakeLists.txt +++ b/server/modules/protocol/examples/CMakeLists.txt @@ -2,4 +2,5 @@ install(PROGRAMS cdc.py DESTINATION ${MAXSCALE_BINDIR}) install(PROGRAMS cdc_users.py DESTINATION ${MAXSCALE_BINDIR}) install(PROGRAMS cdc_last_transaction.py DESTINATION ${MAXSCALE_BINDIR}) install(PROGRAMS cdc_kafka_producer.py DESTINATION ${MAXSCALE_BINDIR}) +install(PROGRAMS cdc_schema.py DESTINATION ${MAXSCALE_BINDIR}) install(FILES cdc_schema.go DESTINATION ${MAXSCALE_SHAREDIR}) diff --git a/server/modules/protocol/examples/cdc_schema.py b/server/modules/protocol/examples/cdc_schema.py new file mode 100755 index 000000000..8b22bfd20 --- /dev/null +++ b/server/modules/protocol/examples/cdc_schema.py @@ -0,0 +1,88 @@ +#!/usr/bin/env python3 + +# Copyright (c) 2016 MariaDB Corporation Ab +# +# Use of this software is governed by the Business Source License included +# in the LICENSE.TXT file and at www.mariadb.com/bsl11. +# +# Change Date: 2019-07-01 +# +# On the date above, in accordance with the Business Source License, use +# of this software will be governed by version 2 or later of the General +# Public License. + +# +# This program requires the MySQL Connector/Python to work +# + +import mysql.connector as mysql +import json +import sys +import argparse + +parser = argparse.ArgumentParser(description = "CDC Schema Generator", conflict_handler="resolve", epilog="""This program generates CDC schema files for a specific table or all the tables in a database. The +schema files need to be generated if the binary log files do not contain the +CREATE TABLE events that define the table layout.""") +parser.add_argument("-h", "--host", dest="host", help="Network address where the connection is made", default="localhost") +parser.add_argument("-P", "--port", dest="port", help="Port where the connection is made", default="3306") +parser.add_argument("-u", "--user", dest="user", help="Username used when connecting", default="") +parser.add_argument("-p", "--password", dest="password", help="Password used when connecting", default="") +parser.add_argument("DATABASE", help="Generate Avro schemas for this database") + +opts = parser.parse_args(sys.argv[1:]) + +def parse_field(row): + res = dict() + name = row[1].lower().split('(')[0] + + if name in ("date", "datetime", "time", "timestamp", "year", "tinytext", "text", + "mediumtext", "longtext", "char", "varchar", "enum", "set"): + res["type"] = "string" + elif name in ("tinyblob", "blob", "mediumblob", "longblob", "binary", "varbinary"): + res["type"] = "bytes" + elif name in ("int", "smallint", "mediumint", "integer", "tinyint", "short", "bit"): + res["type"] = "int" + elif name in ("float"): + res["type"] = "float" + elif name in ("double", "decimal"): + res["type"] = "double" + elif name in ("null"): + res["type"] = "null" + elif name in ("long", "bigint"): + res["type"] = "long" + else: + res["type"] = "string" + + + res["name"] = row[0].lower() + + return res + +try: + conn = mysql.connect(user=opts.user, password=opts.password, host=opts.host, port=opts.port) + cursor = conn.cursor() + cursor.execute("SHOW TABLES FROM {}".format(opts.DATABASE)) + + tables = [] + for res in cursor: + tables.append(res[0]) + + + for t in tables: + schema = dict(namespace="MaxScaleChangeDataSchema.avro", type="record", name="ChangeRecord", fields=[]) + cursor.execute("DESCRIBE {}.{}".format(opts.DATABASE, t)) + + for res in cursor: + schema["fields"].append(parse_field(res)) + + dest = open("{}.{}.000001.avsc".format(opts.DATABASE, t), 'w') + dest.write(json.dumps(schema)) + dest.close() + + cursor.close() + conn.close() + +except Exception as e: + print(e) + exit(1) + From 41a9663611cb00e62ba334845d525ee9584ba25f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Tue, 4 Apr 2017 17:08:07 +0300 Subject: [PATCH 09/27] MXS-1191: Fix crash on column drop When a column is dropped from a table with a schema that has more columns than the row event has, avrorouter will crash. --- server/modules/routing/avro/avro_rbr.c | 3 +- server/modules/routing/avro/avro_schema.c | 57 ++++++++++++++++++----- 2 files changed, 46 insertions(+), 14 deletions(-) diff --git a/server/modules/routing/avro/avro_rbr.c b/server/modules/routing/avro/avro_rbr.c index feb4058ff..56fdc0ef7 100644 --- a/server/modules/routing/avro/avro_rbr.c +++ b/server/modules/routing/avro/avro_rbr.c @@ -506,9 +506,8 @@ uint8_t* process_row_event_data(TABLE_MAP *map, TABLE_CREATE *create, avro_value ptr += (ncolumns + 7) / 8; ss_dassert(ptr < end); - for (long i = 0; i < map->columns && npresent < ncolumns; i++) + for (long i = 0; i < map->columns && i < create->columns && npresent < ncolumns; i++) { - ss_dassert(create->columns == map->columns); ss_debug(int rc = )avro_value_get_by_name(record, create->column_names[i], &field, NULL); ss_dassert(rc == 0); diff --git a/server/modules/routing/avro/avro_schema.c b/server/modules/routing/avro/avro_schema.c index 604b18aea..8c339e9e9 100644 --- a/server/modules/routing/avro/avro_schema.c +++ b/server/modules/routing/avro/avro_schema.c @@ -795,6 +795,22 @@ void make_avro_token(char* dest, const char* src, int length) dest[length] = '\0'; } +int get_column_index(TABLE_CREATE *create, const char *tok) +{ + int idx = -1; + + for (int x = 0; x < create->columns; x++) + { + if (strcasecmp(create->column_names[x], tok) == 0) + { + idx = x; + break; + } + } + + return idx; +} + bool table_create_alter(TABLE_CREATE *create, const char *sql, const char *end) { const char *tbl = strcasestr(sql, "table"), *def; @@ -845,27 +861,45 @@ bool table_create_alter(TABLE_CREATE *create, const char *sql, const char *end) { tok = get_tok(tok + len, &len, end); - free(create->column_names[create->columns - 1]); - char ** tmp = realloc(create->column_names, sizeof(char*) * create->columns - 1); - ss_dassert(tmp); + int idx = get_column_index(create, tok); - if (tmp == NULL) + if (idx != -1) { - return false; + free(create->column_names[idx]); + for (int i = idx; i < (int)create->columns - 1; i++) + { + create->column_names[i] = create->column_names[i + 1]; + } + + char ** tmp = realloc(create->column_names, sizeof(char*) * create->columns - 1); + ss_dassert(tmp); + + if (tmp == NULL) + { + return false; + } + + create->column_names = tmp; + create->columns--; + updates++; } - create->column_names = tmp; - create->columns--; - updates++; tok = get_next_def(tok, end); len = 0; } else if (tok_eq(ptok, "change", plen) && tok_eq(tok, "column", len)) { tok = get_tok(tok + len, &len, end); - free(create->column_names[create->columns - 1]); - create->column_names[create->columns - 1] = strndup(tok, len); - updates++; + + int idx = get_column_index(create, tok); + + if (idx != -1) + { + free(create->column_names[idx]); + create->column_names[idx] = strndup(tok, len); + updates++; + } + tok = get_next_def(tok, end); len = 0; } @@ -976,7 +1010,6 @@ TABLE_MAP *table_map_alloc(uint8_t *ptr, uint8_t hdr_len, TABLE_CREATE* create) map->id = table_id; map->version = create->version; map->flags = flags; - ss_dassert(column_count == create->columns); map->columns = column_count; map->column_types = malloc(column_count); /** Allocate at least one byte for the metadata */ From 496189dd59baf20e890a5e3634f9344887d9334c Mon Sep 17 00:00:00 2001 From: MassimilianoPinto Date: Tue, 18 Apr 2017 10:18:15 +0200 Subject: [PATCH 10/27] MXS-1221: Nagios plugin scripts does not process -S option properly Nagios plugin scripts does not process -S option properly --- plugins/nagios/check_maxscale_monitors.pl | 2 +- plugins/nagios/check_maxscale_resources.pl | 2 +- plugins/nagios/check_maxscale_threads.pl | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/nagios/check_maxscale_monitors.pl b/plugins/nagios/check_maxscale_monitors.pl index 6fb266e5b..6b4fe8a8a 100755 --- a/plugins/nagios/check_maxscale_monitors.pl +++ b/plugins/nagios/check_maxscale_monitors.pl @@ -85,7 +85,7 @@ if (!defined $MAXADMIN || length($MAXADMIN) == 0) { $MAXADMIN = $MAXADMIN_DEFAULT; } if (defined $MAXADMIN_SOCKET && length($MAXADMIN_SOCKET) > 0) { - $MAXADMIN_SOCKET = ' -S $MAXADMIN_SOCKET'; + $MAXADMIN_SOCKET = ' -S ' . $MAXADMIN_SOCKET; } else { $MAXADMIN_SOCKET = ''; } diff --git a/plugins/nagios/check_maxscale_resources.pl b/plugins/nagios/check_maxscale_resources.pl index 9ab7ec8e6..e5b0e74b1 100755 --- a/plugins/nagios/check_maxscale_resources.pl +++ b/plugins/nagios/check_maxscale_resources.pl @@ -86,7 +86,7 @@ if (!defined $MAXADMIN || length($MAXADMIN) == 0) { } if (defined $MAXADMIN_SOCKET && length($MAXADMIN_SOCKET) > 0) { - $MAXADMIN_SOCKET = ' -S $MAXADMIN_SOCKET'; + $MAXADMIN_SOCKET = ' -S ' . $MAXADMIN_SOCKET; } else { $MAXADMIN_SOCKET = ''; } diff --git a/plugins/nagios/check_maxscale_threads.pl b/plugins/nagios/check_maxscale_threads.pl index 034d99290..d8e5a187f 100755 --- a/plugins/nagios/check_maxscale_threads.pl +++ b/plugins/nagios/check_maxscale_threads.pl @@ -86,7 +86,7 @@ if (!defined $MAXADMIN || length($MAXADMIN) == 0) { } if (defined $MAXADMIN_SOCKET && length($MAXADMIN_SOCKET) > 0) { - $MAXADMIN_SOCKET = ' -S $MAXADMIN_SOCKET'; + $MAXADMIN_SOCKET = ' -S ' . $MAXADMIN_SOCKET; } else { $MAXADMIN_SOCKET = ''; } From a418387d0a8fa2372f78eb2fe351122c6b3ab024 Mon Sep 17 00:00:00 2001 From: Esa Korhonen Date: Thu, 6 Apr 2017 13:51:15 +0300 Subject: [PATCH 11/27] MXS-1218 Poll statistics changed to 64bit to avoid looparound Statistics calculation, printing and MaxInfo are modified. n_fds remains 32bit. --- server/core/atomic.c | 29 ++++------ server/core/poll.c | 56 +++++++++---------- server/core/statistics.c | 16 +++--- server/include/atomic.h | 4 ++ server/include/maxscale/poll.h | 2 +- server/include/statistics.h | 7 ++- server/modules/routing/maxinfo/maxinfo_exec.c | 25 +++++---- 7 files changed, 70 insertions(+), 69 deletions(-) diff --git a/server/core/atomic.c b/server/core/atomic.c index 9c15d2782..fee35048d 100644 --- a/server/core/atomic.c +++ b/server/core/atomic.c @@ -23,12 +23,12 @@ * @endverbatim */ +#include + /** - * Implementation of an atomic add operation for the GCC environment, or the - * X86 processor. If we are working within GNU C then we can use the GCC - * atomic add built in function, which is portable across platforms that - * implement GCC. Otherwise, this function currently supports only X86 - * architecture (without further development). + * Implementation of an atomic add operation for the GCC environment. + * If we are working within GNU C then we can use the GCC atomic add + * built in function, which is portable across platforms that implement GCC. * * Adds a value to the contents of a location pointed to by the first parameter. * The add operation is atomic and the return value is the value stored in the @@ -39,17 +39,12 @@ * @param value Value to be added * @return The value of variable before the add occurred */ -int -atomic_add(int *variable, int value) +int atomic_add(int *variable, int value) { -#ifdef __GNUC__ - return (int) __sync_fetch_and_add (variable, value); -#else - asm volatile( - "lock; xaddl %%eax, %2;" - :"=a" (value) - : "a" (value), "m" (*variable) - : "memory" ); - return value; -#endif + return __sync_fetch_and_add(variable, value); +} + +int64_t atomic_add_int64(int64_t *variable, int64_t value) +{ + return __sync_fetch_and_add(variable, value); } diff --git a/server/core/poll.c b/server/core/poll.c index a2078f387..783c836f6 100644 --- a/server/core/poll.c +++ b/server/core/poll.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -162,11 +163,11 @@ static struct ts_stats_t *n_pollev; /*< Number of polls returning events */ ts_stats_t *n_nbpollev; /*< Number of polls returning events */ ts_stats_t *n_nothreads; /*< Number of times no threads are polling */ - int n_fds[MAXNFDS]; /*< Number of wakeups with particular n_fds value */ - int evq_length; /*< Event queue length */ - int evq_pending; /*< Number of pending descriptors in event queue */ - int evq_max; /*< Maximum event queue length */ - int wake_evqpending; /*< Woken from epoll_wait with pending events in queue */ + int32_t n_fds[MAXNFDS]; /*< Number of wakeups with particular n_fds value */ + int64_t evq_length; /*< Event queue length */ + int64_t evq_pending; /*< Number of pending descriptors in event queue */ + int64_t evq_max; /*< Maximum event queue length */ + int64_t wake_evqpending; /*< Woken from epoll_wait with pending events in queue */ ts_stats_t *blockingpolls; /*< Number of epoll_waits with a timeout specified */ } pollStats; @@ -615,7 +616,7 @@ poll_waitevents(void *arg) (max_poll_sleep * timeout_bias) / 10); if (nfds == 0 && pollStats.evq_pending) { - atomic_add(&pollStats.wake_evqpending, 1); + atomic_add_int64(&pollStats.wake_evqpending, 1); poll_spins = 0; } } @@ -1254,42 +1255,42 @@ dprintPollStats(DCB *dcb) int i; dcb_printf(dcb, "\nPoll Statistics.\n\n"); - dcb_printf(dcb, "No. of epoll cycles: %d\n", + dcb_printf(dcb, "No. of epoll cycles: %" PRId64 "\n", ts_stats_sum(pollStats.n_polls)); - dcb_printf(dcb, "No. of epoll cycles with wait: %d\n", + dcb_printf(dcb, "No. of epoll cycles with wait: %" PRId64 "\n", ts_stats_sum(pollStats.blockingpolls)); - dcb_printf(dcb, "No. of epoll calls returning events: %d\n", + dcb_printf(dcb, "No. of epoll calls returning events: %" PRId64 "\n", ts_stats_sum(pollStats.n_pollev)); - dcb_printf(dcb, "No. of non-blocking calls returning events: %d\n", + dcb_printf(dcb, "No. of non-blocking calls returning events: %" PRId64 "\n", ts_stats_sum(pollStats.n_nbpollev)); - dcb_printf(dcb, "No. of read events: %d\n", + dcb_printf(dcb, "No. of read events: %" PRId64 "\n", ts_stats_sum(pollStats.n_read)); - dcb_printf(dcb, "No. of write events: %d\n", + dcb_printf(dcb, "No. of write events: %" PRId64 "\n", ts_stats_sum(pollStats.n_write)); - dcb_printf(dcb, "No. of error events: %d\n", + dcb_printf(dcb, "No. of error events: %" PRId64 "\n", ts_stats_sum(pollStats.n_error)); - dcb_printf(dcb, "No. of hangup events: %d\n", + dcb_printf(dcb, "No. of hangup events: %" PRId64 "\n", ts_stats_sum(pollStats.n_hup)); - dcb_printf(dcb, "No. of accept events: %d\n", + dcb_printf(dcb, "No. of accept events: %" PRId64 "\n", ts_stats_sum(pollStats.n_accept)); - dcb_printf(dcb, "No. of times no threads polling: %d\n", + dcb_printf(dcb, "No. of times no threads polling: %" PRId64 "\n", ts_stats_sum(pollStats.n_nothreads)); - dcb_printf(dcb, "Current event queue length: %d\n", + dcb_printf(dcb, "Current event queue length: %" PRId64 "\n", pollStats.evq_length); - dcb_printf(dcb, "Maximum event queue length: %d\n", + dcb_printf(dcb, "Maximum event queue length: %" PRId64 "\n", pollStats.evq_max); - dcb_printf(dcb, "No. of DCBs with pending events: %d\n", + dcb_printf(dcb, "No. of DCBs with pending events: %" PRId64 "\n", pollStats.evq_pending); - dcb_printf(dcb, "No. of wakeups with pending queue: %d\n", + dcb_printf(dcb, "No. of wakeups with pending queue: %" PRId64 "\n", pollStats.wake_evqpending); dcb_printf(dcb, "No of poll completions with descriptors\n"); dcb_printf(dcb, "\tNo. of descriptors\tNo. of poll completions.\n"); for (i = 0; i < MAXNFDS - 1; i++) { - dcb_printf(dcb, "\t%2d\t\t\t%d\n", i + 1, pollStats.n_fds[i]); + dcb_printf(dcb, "\t%2d\t\t\t%" PRId32 "\n", i + 1, pollStats.n_fds[i]); } - dcb_printf(dcb, "\t>= %d\t\t\t%d\n", MAXNFDS, + dcb_printf(dcb, "\t>= %d\t\t\t%" PRId32 "\n", MAXNFDS, pollStats.n_fds[MAXNFDS - 1]); #if SPINLOCK_PROFILE @@ -1802,8 +1803,8 @@ dShowEventStats(DCB *pdcb) dcb_printf(pdcb, "\nEvent statistics.\n"); dcb_printf(pdcb, "Maximum queue time: %3lu00ms\n", queueStats.maxqtime); dcb_printf(pdcb, "Maximum execution time: %3lu00ms\n", queueStats.maxexectime); - dcb_printf(pdcb, "Maximum event queue length: %3d\n", pollStats.evq_max); - dcb_printf(pdcb, "Current event queue length: %3d\n", pollStats.evq_length); + dcb_printf(pdcb, "Maximum event queue length: %3" PRId64 "\n", pollStats.evq_max); + dcb_printf(pdcb, "Current event queue length: %3" PRId64 "\n", pollStats.evq_length); dcb_printf(pdcb, "\n"); dcb_printf(pdcb, " | Number of events\n"); dcb_printf(pdcb, "Duration | Queued | Executed\n"); @@ -1825,8 +1826,7 @@ dShowEventStats(DCB *pdcb) * @param stat The required statistic * @return The value of that statistic */ -int -poll_get_stat(POLL_STAT stat) +int64_t poll_get_stat(POLL_STAT stat) { switch (stat) { @@ -1847,9 +1847,9 @@ poll_get_stat(POLL_STAT stat) case POLL_STAT_EVQ_MAX: return pollStats.evq_max; case POLL_STAT_MAX_QTIME: - return (int)queueStats.maxqtime; + return (int64_t)queueStats.maxqtime; case POLL_STAT_MAX_EXECTIME: - return (int)queueStats.maxexectime; + return (int64_t)queueStats.maxexectime; } return 0; } diff --git a/server/core/statistics.c b/server/core/statistics.c index 26fe69ef0..cb76cad09 100644 --- a/server/core/statistics.c +++ b/server/core/statistics.c @@ -47,7 +47,7 @@ void ts_stats_end() ts_stats_t ts_stats_alloc() { ss_dassert(initialized); - return calloc(thread_count, sizeof(int)); + return calloc(thread_count, sizeof(int64_t)); } /** @@ -79,10 +79,10 @@ void ts_stats_set_thread_id(int id) * @param stats Statistics to add to * @param value Value to add */ -void ts_stats_add(ts_stats_t stats, int value) +void ts_stats_add(ts_stats_t stats, int64_t value) { ss_dassert(initialized); - ((int*)stats)[current_thread_id] += value; + ((int64_t*)stats)[current_thread_id] += value; } /** @@ -92,10 +92,10 @@ void ts_stats_add(ts_stats_t stats, int value) * @param stats Statistics to set * @param value Value to set to */ -void ts_stats_set(ts_stats_t stats, int value) +void ts_stats_set(ts_stats_t stats, int64_t value) { ss_dassert(initialized); - ((int*)stats)[current_thread_id] = value; + ((int64_t*)stats)[current_thread_id] = value; } /** @@ -104,13 +104,13 @@ void ts_stats_set(ts_stats_t stats, int value) * @param stats Statistics to read * @return Value of statistics */ -int ts_stats_sum(ts_stats_t stats) +int64_t ts_stats_sum(ts_stats_t stats) { ss_dassert(initialized); - int sum = 0; + int64_t sum = 0; for (int i = 0; i < thread_count; i++) { - sum += ((int*)stats)[i]; + sum += ((int64_t*)stats)[i]; } return sum; } diff --git a/server/include/atomic.h b/server/include/atomic.h index acc177aa9..5990f701b 100644 --- a/server/include/atomic.h +++ b/server/include/atomic.h @@ -26,9 +26,13 @@ * @endverbatim */ +#include + #ifdef __cplusplus extern "C" int atomic_add(int *variable, int value); +extern "C" int64_t atomic_add_int64(int64_t *variable, int64_t value); #else extern int atomic_add(int *variable, int value); +extern int64_t atomic_add_int64(int64_t *variable, int64_t value); #endif #endif diff --git a/server/include/maxscale/poll.h b/server/include/maxscale/poll.h index 044077f14..e2ed799bc 100644 --- a/server/include/maxscale/poll.h +++ b/server/include/maxscale/poll.h @@ -61,7 +61,7 @@ extern void dShowThreads(DCB *dcb); extern void poll_add_epollin_event_to_dcb(DCB* dcb, GWBUF* buf); extern void dShowEventQ(DCB *dcb); extern void dShowEventStats(DCB *dcb); -extern int poll_get_stat(POLL_STAT stat); +extern int64_t poll_get_stat(POLL_STAT stat); extern RESULTSET *eventTimesGetList(); extern void poll_fake_event(DCB *dcb, enum EPOLL_EVENTS ev); extern void poll_fake_hangup_event(DCB *dcb); diff --git a/server/include/statistics.h b/server/include/statistics.h index 00750cd23..1962598b6 100644 --- a/server/include/statistics.h +++ b/server/include/statistics.h @@ -23,6 +23,7 @@ * 21/01/16 Markus Makela Initial implementation * @endverbatim */ +#include typedef void* ts_stats_t; @@ -37,8 +38,8 @@ void ts_stats_set_thread_id(int id); ts_stats_t ts_stats_alloc(); void ts_stats_free(ts_stats_t stats); -void ts_stats_add(ts_stats_t stats, int value); -void ts_stats_set(ts_stats_t stats, int value); -int ts_stats_sum(ts_stats_t stats); +void ts_stats_add(ts_stats_t stats, int64_t value); +void ts_stats_set(ts_stats_t stats, int64_t value); +int64_t ts_stats_sum(ts_stats_t stats); #endif diff --git a/server/modules/routing/maxinfo/maxinfo_exec.c b/server/modules/routing/maxinfo/maxinfo_exec.c index 235a8d400..9d4be0e66 100644 --- a/server/modules/routing/maxinfo/maxinfo_exec.c +++ b/server/modules/routing/maxinfo/maxinfo_exec.c @@ -44,6 +44,7 @@ #include #include #include +#include static void exec_show(DCB *dcb, MAXINFO_TREE *tree); static void exec_select(DCB *dcb, MAXINFO_TREE *tree); @@ -995,7 +996,7 @@ maxinfo_zombie_dcbs() /** * Interface to poll stats for reads */ -static int +static int64_t maxinfo_read_events() { return poll_get_stat(POLL_STAT_READ); @@ -1004,7 +1005,7 @@ maxinfo_read_events() /** * Interface to poll stats for writes */ -static int +static int64_t maxinfo_write_events() { return poll_get_stat(POLL_STAT_WRITE); @@ -1013,7 +1014,7 @@ maxinfo_write_events() /** * Interface to poll stats for errors */ -static int +static int64_t maxinfo_error_events() { return poll_get_stat(POLL_STAT_ERROR); @@ -1022,7 +1023,7 @@ maxinfo_error_events() /** * Interface to poll stats for hangup */ -static int +static int64_t maxinfo_hangup_events() { return poll_get_stat(POLL_STAT_HANGUP); @@ -1031,7 +1032,7 @@ maxinfo_hangup_events() /** * Interface to poll stats for accepts */ -static int +static int64_t maxinfo_accept_events() { return poll_get_stat(POLL_STAT_ACCEPT); @@ -1040,7 +1041,7 @@ maxinfo_accept_events() /** * Interface to poll stats for event queue length */ -static int +static int64_t maxinfo_event_queue_length() { return poll_get_stat(POLL_STAT_EVQ_LEN); @@ -1049,7 +1050,7 @@ maxinfo_event_queue_length() /** * Interface to poll stats for event pending queue length */ -static int +static int64_t maxinfo_event_pending_queue_length() { return poll_get_stat(POLL_STAT_EVQ_PENDING); @@ -1058,7 +1059,7 @@ maxinfo_event_pending_queue_length() /** * Interface to poll stats for max event queue length */ -static int +static int64_t maxinfo_max_event_queue_length() { return poll_get_stat(POLL_STAT_EVQ_MAX); @@ -1067,7 +1068,7 @@ maxinfo_max_event_queue_length() /** * Interface to poll stats for max queue time */ -static int +static int64_t maxinfo_max_event_queue_time() { return poll_get_stat(POLL_STAT_MAX_QTIME); @@ -1076,7 +1077,7 @@ maxinfo_max_event_queue_time() /** * Interface to poll stats for max event execution time */ -static int +static int64_t maxinfo_max_event_exec_time() { return poll_get_stat(POLL_STAT_MAX_EXECTIME); @@ -1149,8 +1150,8 @@ status_row(RESULTSET *result, void *data) (char *)(*status[context->index].func)()); break; case VT_INT: - snprintf(buf, 80, "%ld", - (long)(*status[context->index].func)()); + snprintf(buf, 80, "%" PRId64, + (int64_t)(*status[context->index].func)()); resultset_row_set(row, 1, buf); break; default: From 6b9a2cd0b5545b45c65d68f49e01f4e39e20498a Mon Sep 17 00:00:00 2001 From: MassimilianoPinto Date: Fri, 21 Apr 2017 16:11:30 +0200 Subject: [PATCH 12/27] MXS-1244: MySQL monitor "detect_replication_lag=true" doesn't work with "mysql51_replication=true" Added the missing handle->master assignment --- server/modules/monitor/mysql_mon.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/server/modules/monitor/mysql_mon.c b/server/modules/monitor/mysql_mon.c index 1046fea10..031778982 100644 --- a/server/modules/monitor/mysql_mon.c +++ b/server/modules/monitor/mysql_mon.c @@ -576,6 +576,9 @@ static MONITOR_SERVERS *build_mysql51_replication_tree(MONITOR *mon) /* Set the Slave Role */ if (ismaster) { + MYSQL_MONITOR *handle = mon->handle; + handle->master = database; + MXS_DEBUG("Master server found at %s:%d with %d slaves", database->server->name, database->server->port, From e6b34ea85cb1bae47f51ac2c79129ea9b1910d13 Mon Sep 17 00:00:00 2001 From: MassimilianoPinto Date: Fri, 21 Apr 2017 17:09:28 +0200 Subject: [PATCH 13/27] Added depth and SERVER_SLAVE_OF_EXTERNAL_MASTER for build_mysql51_replication_tree Now build_mysql51_replication_tree assigns depth and handles SERVER_SLAVE_OF_EXTERNAL_MASTER status --- server/modules/monitor/mysql_mon.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/server/modules/monitor/mysql_mon.c b/server/modules/monitor/mysql_mon.c index 031778982..40333eb0d 100644 --- a/server/modules/monitor/mysql_mon.c +++ b/server/modules/monitor/mysql_mon.c @@ -536,6 +536,8 @@ static MONITOR_SERVERS *build_mysql51_replication_tree(MONITOR *mon) MONITOR_SERVERS* database = mon->databases; MONITOR_SERVERS *ptr, *rval = NULL; int i; + MYSQL_MONITOR *handle = mon->handle; + while (database) { bool ismaster = false; @@ -576,14 +578,16 @@ static MONITOR_SERVERS *build_mysql51_replication_tree(MONITOR *mon) /* Set the Slave Role */ if (ismaster) { - MYSQL_MONITOR *handle = mon->handle; handle->master = database; MXS_DEBUG("Master server found at %s:%d with %d slaves", database->server->name, database->server->port, nslaves); + monitor_set_pending_status(database, SERVER_MASTER); + database->server->depth = 0; // Add Depth 0 for Master + if (rval == NULL || rval->server->node_id > database->server->node_id) { rval = database; @@ -607,13 +611,17 @@ static MONITOR_SERVERS *build_mysql51_replication_tree(MONITOR *mon) if (ptr->server->slaves[i] == database->server->node_id) { database->server->master_id = ptr->server->node_id; + database->server->depth = 1; // Add Depth 1 for Slave break; } } ptr = ptr->next; } - if (database->server->master_id <= 0 && SERVER_IS_SLAVE(database->server)) + if (SERVER_IS_SLAVE(database->server) && + (database->server->master_id <= 0 || + database->server->master_id != handle->master->server->node_id)) { + monitor_clear_pending_status(database, SERVER_SLAVE); monitor_set_pending_status(database, SERVER_SLAVE_OF_EXTERNAL_MASTER); } database = database->next; From ade2cef8527406b8591fdf2c014422383fb4efc3 Mon Sep 17 00:00:00 2001 From: MassimilianoPinto Date: Mon, 24 Apr 2017 10:34:13 +0200 Subject: [PATCH 14/27] MXS-1244: added 'detect_stale_slave' in monitor params 'detect_stale_slave' added to monitor params --- server/core/config.c | 1 + 1 file changed, 1 insertion(+) diff --git a/server/core/config.c b/server/core/config.c index 03a725c31..202c4bdb1 100644 --- a/server/core/config.c +++ b/server/core/config.c @@ -172,6 +172,7 @@ static char *monitor_params[] = "monitor_interval", "detect_replication_lag", "detect_stale_master", + "detect_stale_slave", "disable_master_failback", "backend_connect_timeout", "backend_read_timeout", From 898bc3444eadae7a72d9c19a741ec678bcfe18cc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Tue, 9 May 2017 14:46:39 +0300 Subject: [PATCH 15/27] MXS-1216: Fix processing of long fixed strings The fixed length string processing assumed that the string lengths were contained in the first byte. This is not true for large fixed length strings that take more than 255 bytes to store. This consists of multi-byte character strings that can take up to 1024 bytes to store. --- server/modules/routing/avro/avro_rbr.c | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/server/modules/routing/avro/avro_rbr.c b/server/modules/routing/avro/avro_rbr.c index 56fdc0ef7..b5a96f5ac 100644 --- a/server/modules/routing/avro/avro_rbr.c +++ b/server/modules/routing/avro/avro_rbr.c @@ -550,12 +550,31 @@ uint8_t* process_row_event_data(TABLE_MAP *map, TABLE_CREATE *create, avro_value } else { - uint8_t bytes = *ptr; + /** + * The first byte in the metadata stores the real type of + * the string (ENUM and SET types are also stored as fixed + * length strings). + * + * The first two bits of the second byte contain the XOR'ed + * field length but as that information is not relevant for + * us, we just use this information to know whether to read + * one or two bytes for string length. + */ + + uint8_t bytes = *ptr++; + int len = metadata[metadata_offset] + + (((metadata[metadata_offset + 1] >> 4) & 0x3) ^ 0x3); + + if (len <= 255) + { + bytes += *ptr++ << 8; + } + char str[bytes + 1]; - memcpy(str, ptr + 1, bytes); + memcpy(str, ptr, bytes); str[bytes] = '\0'; avro_value_set_string(&field, str); - ptr += bytes + 1; + ptr += bytes; ss_dassert(ptr < end); } } From 30bd869f1c48edcaa8cdd1379e63ff046aa44f96 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Fri, 12 May 2017 11:22:04 +0300 Subject: [PATCH 16/27] MXS-1216: Fix crash on MariaDB 10.0 DATETIME(n) When a MariaDB 10.0 DATETIME field with a custom length was defined, the field offsets weren't calculated properly. As there is no metadata for pre-10.1 DATETIME types with decimal precision, the metadata (i.e. decimal count) needs to be gathered from the CREATE TABLE statement. This information is then used to calculate the correct field length when the value is decoded. This change does not fix the incorrect interpretation of the old DATETIME value. The converted values are still garbled due to the fact that the value needs to be shifted out of the decimal format before it can be properly converted. --- server/core/mysql_binlog.c | 22 +++- server/include/mysql_binlog.h | 2 +- server/modules/include/avrorouter.h | 2 + server/modules/routing/avro/avro_rbr.c | 21 +++- server/modules/routing/avro/avro_schema.c | 133 ++++++++++++++-------- 5 files changed, 128 insertions(+), 52 deletions(-) diff --git a/server/core/mysql_binlog.c b/server/core/mysql_binlog.c index ddf5825c0..90590a3da 100644 --- a/server/core/mysql_binlog.c +++ b/server/core/mysql_binlog.c @@ -392,6 +392,20 @@ size_t unpack_bit(uint8_t *ptr, uint8_t *null_mask, uint32_t col_count, return metadata[1]; } +/** + * If the TABLE_COL_TYPE_DATETIME type field is declared as a datetime with + * extra precision, the packed length is shorter than 8 bytes. + */ +size_t datetime_sizes[] = +{ + 5, // DATETIME(0) + 6, // DATETIME(1) + 6, // DATETIME(2) + 7, // DATETIME(3) + 7, // DATETIME(4) + 7, // DATETIME(5) + 8 // DATETIME(6) +}; /** * @brief Get the length of a temporal field @@ -399,7 +413,7 @@ size_t unpack_bit(uint8_t *ptr, uint8_t *null_mask, uint32_t col_count, * @param decimals How many decimals the field has * @return Number of bytes the temporal value takes */ -static size_t temporal_field_size(uint8_t type, uint8_t decimals) +static size_t temporal_field_size(uint8_t type, uint8_t decimals, int length) { switch (type) { @@ -414,7 +428,7 @@ static size_t temporal_field_size(uint8_t type, uint8_t decimals) return 3 + ((decimals + 1) / 2); case TABLE_COL_TYPE_DATETIME: - return 8; + return length < 0 || length > 6 ? 8 : datetime_sizes[length]; case TABLE_COL_TYPE_TIMESTAMP: return 4; @@ -442,7 +456,7 @@ static size_t temporal_field_size(uint8_t type, uint8_t decimals) * @param val Extracted packed value * @param tm Pointer where the unpacked temporal value is stored */ -size_t unpack_temporal_value(uint8_t type, uint8_t *ptr, uint8_t *metadata, struct tm *tm) +size_t unpack_temporal_value(uint8_t type, uint8_t *ptr, uint8_t *metadata, int length, struct tm *tm) { switch (type) { @@ -475,7 +489,7 @@ size_t unpack_temporal_value(uint8_t type, uint8_t *ptr, uint8_t *metadata, stru ss_dassert(false); break; } - return temporal_field_size(type, *metadata); + return temporal_field_size(type, *metadata, length); } void format_temporal_value(char *str, size_t size, uint8_t type, struct tm *tm) diff --git a/server/include/mysql_binlog.h b/server/include/mysql_binlog.h index b1a81eaa2..ebe5574e5 100644 --- a/server/include/mysql_binlog.h +++ b/server/include/mysql_binlog.h @@ -83,7 +83,7 @@ bool column_is_decimal(uint8_t type); bool fixed_string_is_enum(uint8_t type); /** Value unpacking */ -size_t unpack_temporal_value(uint8_t type, uint8_t *ptr, uint8_t* metadata, struct tm *tm); +size_t unpack_temporal_value(uint8_t type, uint8_t *ptr, uint8_t* metadata, int length, struct tm *tm); size_t unpack_enum(uint8_t *ptr, uint8_t *metadata, uint8_t *dest); size_t unpack_numeric_field(uint8_t *ptr, uint8_t type, uint8_t* metadata, uint8_t* val); size_t unpack_bit(uint8_t *ptr, uint8_t *null_mask, uint32_t col_count, diff --git a/server/modules/include/avrorouter.h b/server/modules/include/avrorouter.h index a233915b4..b621c3026 100644 --- a/server/modules/include/avrorouter.h +++ b/server/modules/include/avrorouter.h @@ -98,6 +98,8 @@ typedef struct table_create { uint64_t columns; char **column_names; + char **column_types; + int* column_lengths; char *table; char *database; int version; /**< How many versions of this table have been used */ diff --git a/server/modules/routing/avro/avro_rbr.c b/server/modules/routing/avro/avro_rbr.c index b5a96f5ac..bda08ea54 100644 --- a/server/modules/routing/avro/avro_rbr.c +++ b/server/modules/routing/avro/avro_rbr.c @@ -161,6 +161,11 @@ bool handle_table_map_event(AVRO_INSTANCE *router, REP_HEADER *hdr, uint8_t *ptr "table until a DDL statement for it is read.", table_ident); } + if (rval) + { + MXS_INFO("Table Map for '%s' at %lu", table_ident, router->current_pos); + } + return rval; } @@ -288,9 +293,13 @@ bool handle_row_event(AVRO_INSTANCE *router, REP_HEADER *hdr, uint8_t *ptr) * beforehand so we must continue processing them until we reach the end * of the event. */ int rows = 0; + MXS_INFO("Row Event for '%s' at %lu", table_ident, router->current_pos); while (ptr - start < hdr->event_size - BINLOG_EVENT_HDR_LEN) { + static uint64_t total_row_count = 1; + MXS_INFO("Row %lu", total_row_count++); + /** Add the current GTID and timestamp */ uint8_t *end = ptr + hdr->event_size - BINLOG_EVENT_HDR_LEN; int event_type = get_event_type(hdr->event_type); @@ -516,6 +525,7 @@ uint8_t* process_row_event_data(TABLE_MAP *map, TABLE_CREATE *create, avro_value npresent++; if (bit_is_set(null_bitmap, ncolumns, i)) { + MXS_INFO("[%ld] NULL", i); if (column_is_blob(map->column_types[i])) { uint8_t nullvalue = 0; @@ -545,6 +555,7 @@ uint8_t* process_row_event_data(TABLE_MAP *map, TABLE_CREATE *create, avro_value MXS_WARNING("ENUM/SET values larger than 255 values aren't supported."); } avro_value_set_string(&field, strval); + MXS_INFO("[%ld] ENUM: %lu bytes", i, bytes); ptr += bytes; ss_dassert(ptr < end); } @@ -594,6 +605,7 @@ uint8_t* process_row_event_data(TABLE_MAP *map, TABLE_CREATE *create, avro_value MXS_WARNING("BIT is not currently supported, values are stored as 0."); } avro_value_set_int(&field, value); + MXS_INFO("[%ld] BIT", i); ptr += bytes; ss_dassert(ptr < end); } @@ -602,6 +614,7 @@ uint8_t* process_row_event_data(TABLE_MAP *map, TABLE_CREATE *create, avro_value double f_value = 0.0; ptr += unpack_decimal_field(ptr, metadata + metadata_offset, &f_value); avro_value_set_double(&field, f_value); + MXS_INFO("[%ld] DOUBLE", i); ss_dassert(ptr < end); } else if (column_is_variable_string(map->column_types[i])) @@ -619,6 +632,7 @@ uint8_t* process_row_event_data(TABLE_MAP *map, TABLE_CREATE *create, avro_value ptr++; } + MXS_INFO("[%ld] VARCHAR: field: %d bytes, data: %lu bytes", i, bytes, sz); char buf[sz + 1]; memcpy(buf, ptr, sz); buf[sz] = '\0'; @@ -632,6 +646,7 @@ uint8_t* process_row_event_data(TABLE_MAP *map, TABLE_CREATE *create, avro_value uint64_t len = 0; memcpy(&len, ptr, bytes); ptr += bytes; + MXS_INFO("[%ld] BLOB: field: %d bytes, data: %lu bytes", i, bytes, len); if (len) { avro_value_set_bytes(&field, ptr, len); @@ -648,9 +663,12 @@ uint8_t* process_row_event_data(TABLE_MAP *map, TABLE_CREATE *create, avro_value { char buf[80]; struct tm tm; - ptr += unpack_temporal_value(map->column_types[i], ptr, &metadata[metadata_offset], &tm); + ptr += unpack_temporal_value(map->column_types[i], ptr, + &metadata[metadata_offset], + create->column_lengths[i], &tm); format_temporal_value(buf, sizeof(buf), map->column_types[i], &tm); avro_value_set_string(&field, buf); + MXS_INFO("[%ld] TEMPORAL: %s", i, buf); ss_dassert(ptr < end); } /** All numeric types (INT, LONG, FLOAT etc.) */ @@ -661,6 +679,7 @@ uint8_t* process_row_event_data(TABLE_MAP *map, TABLE_CREATE *create, avro_value ptr += unpack_numeric_field(ptr, map->column_types[i], &metadata[metadata_offset], lval); set_numeric_field_value(&field, map->column_types[i], &metadata[metadata_offset], lval); + MXS_INFO("[%ld] NUMERIC: %ld", i, *((int64_t*)lval)); ss_dassert(ptr < end); } ss_dassert(metadata_offset <= map->column_metadata_size); diff --git a/server/modules/routing/avro/avro_schema.c b/server/modules/routing/avro/avro_schema.c index 8c339e9e9..13bb44bc1 100644 --- a/server/modules/routing/avro/avro_schema.c +++ b/server/modules/routing/avro/avro_schema.c @@ -491,7 +491,6 @@ static const char *extract_field_name(const char* ptr, char* dest, size_t size) dest[bytes] = '\0'; make_valid_avro_identifier(dest); - ptr = next_field_definition(ptr); } else { @@ -501,62 +500,97 @@ static const char *extract_field_name(const char* ptr, char* dest, size_t size) return ptr; } +int extract_type_length(const char* ptr, char *dest) +{ + /** Skip any leading whitespace */ + while (isspace(*ptr) || *ptr == '`') + { + ptr++; + } + + /** The field type definition starts here */ + const char *start = ptr; + + /** Skip characters until we either hit a whitespace character or the start + * of the length definition. */ + while (!isspace(*ptr) && *ptr != '(') + { + ptr++; + } + + /** Store type */ + int typelen = ptr - start; + memcpy(dest, start, typelen); + dest[typelen] = '\0'; + + /** Skip whitespace */ + while (isspace(*ptr)) + { + ptr++; + } + + int rval = -1; // No length defined + + /** Start of length definition */ + if (*ptr == '(') + { + ptr++; + char *end; + int val = strtol(ptr, &end, 10); + + if (*end == ')') + { + rval = val; + } + } + + return rval; +} + +int count_columns(const char* ptr) +{ + int i = 2; + + while ((ptr = strchr(ptr, ','))) + { + ptr++; + i++; + } + + return i; +} + /** * Process a table definition into an array of column names * @param nameptr table definition * @return Number of processed columns or -1 on error */ -static int process_column_definition(const char *nameptr, char*** dest) +static int process_column_definition(const char *nameptr, char*** dest, char*** dest_types, int** dest_lens) { - /** Process columns in groups of 8 */ - size_t chunks = 1; - const size_t chunk_size = 8; - int i = 0; - char **names = malloc(sizeof(char*) * (chunks * chunk_size + 1)); - - if (names == NULL) - { - MXS_ERROR("Memory allocation failed when trying allocate %ld bytes of memory.", - sizeof(char*) * chunks); - return -1; - } + int n = count_columns(nameptr); + *dest = malloc(sizeof(char*) * n); + *dest_types = malloc(sizeof(char*) * n); + *dest_lens = malloc(sizeof(int) * n); + char **names = *dest; + char **types = *dest_types; + int *lengths = *dest_lens; char colname[512]; + int i = 0; while ((nameptr = extract_field_name(nameptr, colname, sizeof(colname)))) { - if (i >= chunks * chunk_size) - { - char **tmp = realloc(names, (++chunks * chunk_size + 1) * sizeof(char*)); - if (tmp == NULL) - { - for (int x = 0; x < i; x++) - { - free(names[x]); - } - free(names); - MXS_ERROR("Memory allocation failed when trying allocate %ld bytes of memory.", - sizeof(char*) * chunks); - return -1; - } - names = tmp; - } + ss_dassert(i < n); + char type[100] = ""; + int len = extract_type_length(nameptr, type); + nameptr = next_field_definition(nameptr); - if ((names[i++] = strdup(colname)) == NULL) - { - for (int x = 0; x < i; x++) - { - free(names[x]); - } - free(names); - MXS_ERROR("Memory allocation failed when trying allocate %lu bytes " - "of memory.", strlen(colname)); - return -1; - } + lengths[i] = len; + types[i] = strdup(type); + names[i] = strdup(colname); + i++; } - *dest = names; - return i; } @@ -600,7 +634,7 @@ TABLE_CREATE* table_create_alloc(const char* sql, const char* event_db) char database[MYSQL_DATABASE_MAXLEN + 1]; const char *db = event_db; - MXS_DEBUG("Create table statement: %.*s", stmt_len, statement_sql); + MXS_INFO("Create table: %s", sql); if (!get_table_name(sql, table)) { @@ -620,8 +654,10 @@ TABLE_CREATE* table_create_alloc(const char* sql, const char* event_db) db = database; } + int* lengths = NULL; char **names = NULL; - int n_columns = process_column_definition(statement_sql, &names); + char **types = NULL; + int n_columns = process_column_definition(statement_sql, &names, &types, &lengths); ss_dassert(n_columns > 0); /** We have appear to have a valid CREATE TABLE statement */ @@ -633,6 +669,8 @@ TABLE_CREATE* table_create_alloc(const char* sql, const char* event_db) rval->version = 1; rval->was_used = false; rval->column_names = names; + rval->column_lengths = lengths; + rval->column_types = types; rval->columns = n_columns; rval->database = strdup(db); rval->table = strdup(table); @@ -675,8 +713,11 @@ void* table_create_free(TABLE_CREATE* value) for (uint64_t i = 0; i < value->columns; i++) { free(value->column_names[i]); + free(value->column_types[i]); } free(value->column_names); + free(value->column_types); + free(value->column_lengths); free(value->table); free(value->database); free(value); @@ -822,7 +863,7 @@ bool table_create_alter(TABLE_CREATE *create, const char *sql, const char *end) if (tok) { - MXS_DEBUG("Altering table %.*s\n", len, tok); + MXS_INFO("Alter table '%.*s'; %.*s\n", len, tok, (int)(end - sql), sql); def = tok + len; } From da2f7b1efed30f0c9a99c079a41cf359107c383c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Fri, 12 May 2017 11:25:48 +0300 Subject: [PATCH 17/27] MXS-1216: Correct CHAR(n) handling The field length was wrongly compared to less than 255 for two byte field lengths. In addition to that, the metadata was interpreted in the wrong way. --- server/modules/routing/avro/avro_rbr.c | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/server/modules/routing/avro/avro_rbr.c b/server/modules/routing/avro/avro_rbr.c index bda08ea54..3d4e6d8ba 100644 --- a/server/modules/routing/avro/avro_rbr.c +++ b/server/modules/routing/avro/avro_rbr.c @@ -572,15 +572,23 @@ uint8_t* process_row_event_data(TABLE_MAP *map, TABLE_CREATE *create, avro_value * one or two bytes for string length. */ - uint8_t bytes = *ptr++; - int len = metadata[metadata_offset] + - (((metadata[metadata_offset + 1] >> 4) & 0x3) ^ 0x3); + uint16_t meta = metadata[metadata_offset + 1] + (metadata[metadata_offset] << 8); + int bytes = 0; + uint16_t extra_length = (((meta >> 4) & 0x300) ^ 0x300); + uint16_t field_length = (meta & 0xff) + extra_length; - if (len <= 255) + if (field_length > 255) { - bytes += *ptr++ << 8; + bytes = ptr[0] + (ptr[1] << 8); + ptr += 2; + } + else + { + bytes = *ptr++; } + MXS_INFO("[%ld] CHAR: field: %d bytes, data: %d bytes", i, field_length, bytes); + ss_dassert(bytes || *ptr == '\0'); char str[bytes + 1]; memcpy(str, ptr, bytes); str[bytes] = '\0'; From 9ed972e1ac4d59ad8aaf8838a2aa7da3da33af4d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Fri, 12 May 2017 11:28:07 +0300 Subject: [PATCH 18/27] Rename conflicting Avro fields When a user defined field conflicts with an internal MaxScale field, the field is suffixed with an underscore. --- server/modules/include/avrorouter.h | 17 +++++++++++++++++ server/modules/routing/avro/avro_schema.c | 5 +++++ 2 files changed, 22 insertions(+) diff --git a/server/modules/include/avrorouter.h b/server/modules/include/avrorouter.h index b621c3026..eeb02f6b6 100644 --- a/server/modules/include/avrorouter.h +++ b/server/modules/include/avrorouter.h @@ -71,6 +71,23 @@ static const char *avro_event_type = "event_type"; static const char *avro_timestamp = "timestamp"; static char *avro_client_ouput[] = { "Undefined", "JSON", "Avro" }; +static inline bool is_reserved_word(const char* word) +{ + return strcasecmp(word, avro_domain) == 0 || + strcasecmp(word, avro_server_id) == 0 || + strcasecmp(word, avro_sequence) == 0 || + strcasecmp(word, avro_event_number) == 0 || + strcasecmp(word, avro_event_type) == 0 || + strcasecmp(word, avro_timestamp) == 0; +} + +static inline void fix_reserved_word(char *tok) +{ + if (is_reserved_word(tok)) + { + strcat(tok, "_"); + } +} /** How a binlog file is closed */ typedef enum avro_binlog_end diff --git a/server/modules/routing/avro/avro_schema.c b/server/modules/routing/avro/avro_schema.c index 13bb44bc1..468c37c6a 100644 --- a/server/modules/routing/avro/avro_schema.c +++ b/server/modules/routing/avro/avro_schema.c @@ -584,6 +584,7 @@ static int process_column_definition(const char *nameptr, char*** dest, char*** char type[100] = ""; int len = extract_type_length(nameptr, type); nameptr = next_field_definition(nameptr); + fix_reserved_word(colname); lengths[i] = len; types[i] = strdup(type); @@ -834,11 +835,15 @@ void make_avro_token(char* dest, const char* src, int length) memcpy(dest, src, length); dest[length] = '\0'; + fix_reserved_word(dest); } int get_column_index(TABLE_CREATE *create, const char *tok) { int idx = -1; + char safe_tok[strlen(tok) + 2]; + strcpy(safe_tok, tok); + fix_reserved_word(safe_tok); for (int x = 0; x < create->columns; x++) { From 85b49520a2b524d49ea404fb3a58f46680915989 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Fri, 12 May 2017 11:30:19 +0300 Subject: [PATCH 19/27] Do checkpoint processing at end of binlog When the binlog has been read, it needs to be treated as if the transaction or row limit has been hit. This will cause all tables to be flushed to disk before the files are indexed. --- server/modules/routing/avro/avro_file.c | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/server/modules/routing/avro/avro_file.c b/server/modules/routing/avro/avro_file.c index 4d9819f0d..fc8834cff 100644 --- a/server/modules/routing/avro/avro_file.c +++ b/server/modules/routing/avro/avro_file.c @@ -460,6 +460,17 @@ void notify_all_clients(AVRO_INSTANCE *router) } } +void do_checkpoint(AVRO_INSTANCE *router, uint64_t *total_rows, uint64_t *total_commits) +{ + update_used_tables(router); + avro_flush_all_tables(router); + avro_save_conversion_state(router); + notify_all_clients(router); + *total_rows += router->row_count; + *total_commits += router->trx_count; + router->row_count = router->trx_count = 0; +} + /** * @brief Read all replication events from a binlog file. * @@ -541,6 +552,8 @@ avro_binlog_end_t avro_read_all_events(AVRO_INSTANCE *router) } else { + do_checkpoint(router, &total_rows, &total_commits); + MXS_INFO("Processed %lu transactions and %lu row events.", total_commits, total_rows); if (rotate_seen) @@ -734,13 +747,7 @@ avro_binlog_end_t avro_read_all_events(AVRO_INSTANCE *router) if (router->row_count >= router->row_target || router->trx_count >= router->trx_target) { - update_used_tables(router); - avro_flush_all_tables(router); - avro_save_conversion_state(router); - notify_all_clients(router); - total_rows += router->row_count; - total_commits += router->trx_count; - router->row_count = router->trx_count = 0; + do_checkpoint(router, &total_rows, &total_commits); } } From c464bb4ee123dc42b279534d14569ff323623ef2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Fri, 12 May 2017 16:47:20 +0300 Subject: [PATCH 20/27] MXS-1216: Store field real type and length in Avro schema The avro schema allows custom properties to be defined for the schema fields. The avrorouter stored extra information about the table into the schema for later use. Currently, this information is only generated by the avrorouter itself. Further improvements to the schema generator scripts need to be done. --- server/modules/routing/avro/avro_schema.c | 34 ++++++++++++++++++++--- 1 file changed, 30 insertions(+), 4 deletions(-) diff --git a/server/modules/routing/avro/avro_schema.c b/server/modules/routing/avro/avro_schema.c index 468c37c6a..444b571a4 100644 --- a/server/modules/routing/avro/avro_schema.c +++ b/server/modules/routing/avro/avro_schema.c @@ -126,9 +126,11 @@ char* json_new_schema_from_table(TABLE_MAP *map) for (uint64_t i = 0; i < map->columns; i++) { - json_array_append(array, json_pack_ex(&err, 0, "{s:s, s:s}", "name", - create->column_names[i], "type", - column_type_to_avro_type(map->column_types[i]))); + json_array_append(array, json_pack_ex(&err, 0, "{s:s, s:s, s:s, s:i}", + "name", create->column_names[i], + "type", column_type_to_avro_type(map->column_types[i]), + "real_type", create->column_types[i], + "length", create->column_lengths[i])); } json_object_set_new(schema, "fields", array); char* rval = json_dumps(schema, JSON_PRESERVE_ORDER); @@ -174,8 +176,10 @@ bool json_extract_field_names(const char* filename, TABLE_CREATE *table) { int array_size = json_array_size(arr); table->column_names = (char**)malloc(sizeof(char*) * (array_size)); + table->column_types = (char**)malloc(sizeof(char*) * (array_size)); + table->column_lengths = (int*)malloc(sizeof(int) * (array_size)); - if (table->column_names) + if (table->column_names && table->column_types && table->column_lengths) { int columns = 0; rval = true; @@ -186,6 +190,28 @@ bool json_extract_field_names(const char* filename, TABLE_CREATE *table) if (json_is_object(val)) { + json_t* value; + + if ((value = json_object_get(val, "real_type")) && json_is_string(value)) + { + table->column_types[columns] = strdup(json_string_value(value)); + } + else + { + table->column_types[columns] = strdup("unknown"); + MXS_WARNING("No \"real_type\" value defined. Treating as unknown type field."); + } + + if ((value = json_object_get(val, "length")) && json_is_integer(value)) + { + table->column_lengths[columns] = json_integer_value(value); + } + else + { + table->column_lengths[columns] = -1; + MXS_WARNING("No \"length\" value defined. Treating as default length field."); + } + json_t *name = json_object_get(val, "name"); if (name && json_is_string(name)) { From 70db289c608d7850b3db38fecaff1b82c30e5938 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Fri, 12 May 2017 16:49:15 +0300 Subject: [PATCH 21/27] MXS-1216: Fix DATETIME(n) value interpretation The DATETIME(n) values generated by a MariaDB 10.0 server were not interpreted correctly as the wrong algorithm was used to extract the values. DATETIME(0) values still do not work properly and they require further debugging and changes to the code. --- server/core/mysql_binlog.c | 109 +++++++++++++++++++++++++++---------- 1 file changed, 79 insertions(+), 30 deletions(-) diff --git a/server/core/mysql_binlog.c b/server/core/mysql_binlog.c index 90590a3da..93de0e3aa 100644 --- a/server/core/mysql_binlog.c +++ b/server/core/mysql_binlog.c @@ -26,6 +26,10 @@ #include #include +#include "mysql_client_server_protocol.h" + +static uint64_t unpack_bytes(uint8_t *ptr, size_t bytes); + /** * @brief Convert a table column type to a string * @@ -217,6 +221,35 @@ static void unpack_year(uint8_t *ptr, struct tm *dest) dest->tm_year = *ptr; } +/** Base-10 logarithm values */ +int64_t log_10_values[] = +{ + 1, + 10, + 100, + 1000, + 10000, + 100000, + 1000000, + 10000000, + 100000000 +}; + +/** + * If the TABLE_COL_TYPE_DATETIME type field is declared as a datetime with + * extra precision, the packed length is shorter than 8 bytes. + */ +size_t datetime_sizes[] = +{ + 5, // DATETIME(0) + 6, // DATETIME(1) + 6, // DATETIME(2) + 7, // DATETIME(3) + 7, // DATETIME(4) + 7, // DATETIME(5) + 8 // DATETIME(6) +}; + /** * @brief Unpack a DATETIME * @@ -225,21 +258,52 @@ static void unpack_year(uint8_t *ptr, struct tm *dest) * @param val Value read from the binary log * @param dest Pointer where the unpacked value is stored */ -static void unpack_datetime(uint8_t *ptr, struct tm *dest) +static void unpack_datetime(uint8_t *ptr, int length, struct tm *dest) { - uint64_t val = 0; - memcpy(&val, ptr, sizeof(val)); - uint32_t second = val - ((val / 100) * 100); - val /= 100; - uint32_t minute = val - ((val / 100) * 100); - val /= 100; - uint32_t hour = val - ((val / 100) * 100); - val /= 100; - uint32_t day = val - ((val / 100) * 100); - val /= 100; - uint32_t month = val - ((val / 100) * 100); - val /= 100; - uint32_t year = val; + int64_t val = 0; + uint32_t second, minute, hour, day, month, year; + + if (length == -1) + { + val = gw_mysql_get_byte8(ptr); + second = val - ((val / 100) * 100); + val /= 100; + minute = val - ((val / 100) * 100); + val /= 100; + hour = val - ((val / 100) * 100); + val /= 100; + day = val - ((val / 100) * 100); + val /= 100; + month = val - ((val / 100) * 100); + val /= 100; + year = val; + } + else + { + // TODO: Figure out why DATETIME(0) doesn't work like it others do + val = unpack_bytes(ptr, datetime_sizes[length]); + val *= log_10_values[6 - length]; + + if (val < 0) + { + val = -val; + } + + int subsecond = val % 1000000; + val /= 1000000; + + second = val % 60; + val /= 60; + minute = val % 60; + val /= 60; + hour = val % 24; + val /= 24; + day = val % 32; + val /= 32; + month = val % 13; + val /= 13; + year = val; + } memset(dest, 0, sizeof(struct tm)); dest->tm_year = year - 1900; @@ -392,21 +456,6 @@ size_t unpack_bit(uint8_t *ptr, uint8_t *null_mask, uint32_t col_count, return metadata[1]; } -/** - * If the TABLE_COL_TYPE_DATETIME type field is declared as a datetime with - * extra precision, the packed length is shorter than 8 bytes. - */ -size_t datetime_sizes[] = -{ - 5, // DATETIME(0) - 6, // DATETIME(1) - 6, // DATETIME(2) - 7, // DATETIME(3) - 7, // DATETIME(4) - 7, // DATETIME(5) - 8 // DATETIME(6) -}; - /** * @brief Get the length of a temporal field * @param type Field type @@ -465,7 +514,7 @@ size_t unpack_temporal_value(uint8_t type, uint8_t *ptr, uint8_t *metadata, int break; case TABLE_COL_TYPE_DATETIME: - unpack_datetime(ptr, tm); + unpack_datetime(ptr, length, tm); break; case TABLE_COL_TYPE_DATETIME2: From c988735a03bdc923e2303e1c7094be3229b4f535 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Fri, 12 May 2017 11:22:04 +0300 Subject: [PATCH 22/27] MXS-1216: Fix crash on MariaDB 10.0 DATETIME(n) When a MariaDB 10.0 DATETIME field with a custom length was defined, the field offsets weren't calculated properly. As there is no metadata for pre-10.1 DATETIME types with decimal precision, the metadata (i.e. decimal count) needs to be gathered from the CREATE TABLE statement. This information is then used to calculate the correct field length when the value is decoded. This change does not fix the incorrect interpretation of the old DATETIME value. The converted values are still garbled due to the fact that the value needs to be shifted out of the decimal format before it can be properly converted. --- server/core/mysql_binlog.c | 22 +++- server/include/mysql_binlog.h | 2 +- server/modules/include/avrorouter.h | 2 + server/modules/routing/avro/avro_rbr.c | 20 +++- server/modules/routing/avro/avro_schema.c | 133 ++++++++++++++-------- 5 files changed, 127 insertions(+), 52 deletions(-) diff --git a/server/core/mysql_binlog.c b/server/core/mysql_binlog.c index ddf5825c0..90590a3da 100644 --- a/server/core/mysql_binlog.c +++ b/server/core/mysql_binlog.c @@ -392,6 +392,20 @@ size_t unpack_bit(uint8_t *ptr, uint8_t *null_mask, uint32_t col_count, return metadata[1]; } +/** + * If the TABLE_COL_TYPE_DATETIME type field is declared as a datetime with + * extra precision, the packed length is shorter than 8 bytes. + */ +size_t datetime_sizes[] = +{ + 5, // DATETIME(0) + 6, // DATETIME(1) + 6, // DATETIME(2) + 7, // DATETIME(3) + 7, // DATETIME(4) + 7, // DATETIME(5) + 8 // DATETIME(6) +}; /** * @brief Get the length of a temporal field @@ -399,7 +413,7 @@ size_t unpack_bit(uint8_t *ptr, uint8_t *null_mask, uint32_t col_count, * @param decimals How many decimals the field has * @return Number of bytes the temporal value takes */ -static size_t temporal_field_size(uint8_t type, uint8_t decimals) +static size_t temporal_field_size(uint8_t type, uint8_t decimals, int length) { switch (type) { @@ -414,7 +428,7 @@ static size_t temporal_field_size(uint8_t type, uint8_t decimals) return 3 + ((decimals + 1) / 2); case TABLE_COL_TYPE_DATETIME: - return 8; + return length < 0 || length > 6 ? 8 : datetime_sizes[length]; case TABLE_COL_TYPE_TIMESTAMP: return 4; @@ -442,7 +456,7 @@ static size_t temporal_field_size(uint8_t type, uint8_t decimals) * @param val Extracted packed value * @param tm Pointer where the unpacked temporal value is stored */ -size_t unpack_temporal_value(uint8_t type, uint8_t *ptr, uint8_t *metadata, struct tm *tm) +size_t unpack_temporal_value(uint8_t type, uint8_t *ptr, uint8_t *metadata, int length, struct tm *tm) { switch (type) { @@ -475,7 +489,7 @@ size_t unpack_temporal_value(uint8_t type, uint8_t *ptr, uint8_t *metadata, stru ss_dassert(false); break; } - return temporal_field_size(type, *metadata); + return temporal_field_size(type, *metadata, length); } void format_temporal_value(char *str, size_t size, uint8_t type, struct tm *tm) diff --git a/server/include/mysql_binlog.h b/server/include/mysql_binlog.h index b1a81eaa2..ebe5574e5 100644 --- a/server/include/mysql_binlog.h +++ b/server/include/mysql_binlog.h @@ -83,7 +83,7 @@ bool column_is_decimal(uint8_t type); bool fixed_string_is_enum(uint8_t type); /** Value unpacking */ -size_t unpack_temporal_value(uint8_t type, uint8_t *ptr, uint8_t* metadata, struct tm *tm); +size_t unpack_temporal_value(uint8_t type, uint8_t *ptr, uint8_t* metadata, int length, struct tm *tm); size_t unpack_enum(uint8_t *ptr, uint8_t *metadata, uint8_t *dest); size_t unpack_numeric_field(uint8_t *ptr, uint8_t type, uint8_t* metadata, uint8_t* val); size_t unpack_bit(uint8_t *ptr, uint8_t *null_mask, uint32_t col_count, diff --git a/server/modules/include/avrorouter.h b/server/modules/include/avrorouter.h index a233915b4..b621c3026 100644 --- a/server/modules/include/avrorouter.h +++ b/server/modules/include/avrorouter.h @@ -98,6 +98,8 @@ typedef struct table_create { uint64_t columns; char **column_names; + char **column_types; + int* column_lengths; char *table; char *database; int version; /**< How many versions of this table have been used */ diff --git a/server/modules/routing/avro/avro_rbr.c b/server/modules/routing/avro/avro_rbr.c index b5a96f5ac..65c2c03d3 100644 --- a/server/modules/routing/avro/avro_rbr.c +++ b/server/modules/routing/avro/avro_rbr.c @@ -161,6 +161,11 @@ bool handle_table_map_event(AVRO_INSTANCE *router, REP_HEADER *hdr, uint8_t *ptr "table until a DDL statement for it is read.", table_ident); } + if (rval) + { + MXS_INFO("Table Map for '%s' at %lu", table_ident, router->current_pos); + } + return rval; } @@ -288,9 +293,13 @@ bool handle_row_event(AVRO_INSTANCE *router, REP_HEADER *hdr, uint8_t *ptr) * beforehand so we must continue processing them until we reach the end * of the event. */ int rows = 0; + MXS_INFO("Row Event for '%s' at %lu", table_ident, router->current_pos); while (ptr - start < hdr->event_size - BINLOG_EVENT_HDR_LEN) { + static uint64_t total_row_count = 1; + MXS_INFO("Row %lu", total_row_count++); + /** Add the current GTID and timestamp */ uint8_t *end = ptr + hdr->event_size - BINLOG_EVENT_HDR_LEN; int event_type = get_event_type(hdr->event_type); @@ -516,6 +525,7 @@ uint8_t* process_row_event_data(TABLE_MAP *map, TABLE_CREATE *create, avro_value npresent++; if (bit_is_set(null_bitmap, ncolumns, i)) { + MXS_INFO("[%ld] NULL", i); if (column_is_blob(map->column_types[i])) { uint8_t nullvalue = 0; @@ -545,6 +555,7 @@ uint8_t* process_row_event_data(TABLE_MAP *map, TABLE_CREATE *create, avro_value MXS_WARNING("ENUM/SET values larger than 255 values aren't supported."); } avro_value_set_string(&field, strval); + MXS_INFO("[%ld] ENUM: %lu bytes", i, bytes); ptr += bytes; ss_dassert(ptr < end); } @@ -594,6 +605,7 @@ uint8_t* process_row_event_data(TABLE_MAP *map, TABLE_CREATE *create, avro_value MXS_WARNING("BIT is not currently supported, values are stored as 0."); } avro_value_set_int(&field, value); + MXS_INFO("[%ld] BIT", i); ptr += bytes; ss_dassert(ptr < end); } @@ -602,6 +614,7 @@ uint8_t* process_row_event_data(TABLE_MAP *map, TABLE_CREATE *create, avro_value double f_value = 0.0; ptr += unpack_decimal_field(ptr, metadata + metadata_offset, &f_value); avro_value_set_double(&field, f_value); + MXS_INFO("[%ld] DOUBLE", i); ss_dassert(ptr < end); } else if (column_is_variable_string(map->column_types[i])) @@ -619,6 +632,7 @@ uint8_t* process_row_event_data(TABLE_MAP *map, TABLE_CREATE *create, avro_value ptr++; } + MXS_INFO("[%ld] VARCHAR: field: %d bytes, data: %lu bytes", i, bytes, sz); char buf[sz + 1]; memcpy(buf, ptr, sz); buf[sz] = '\0'; @@ -632,6 +646,7 @@ uint8_t* process_row_event_data(TABLE_MAP *map, TABLE_CREATE *create, avro_value uint64_t len = 0; memcpy(&len, ptr, bytes); ptr += bytes; + MXS_INFO("[%ld] BLOB: field: %d bytes, data: %lu bytes", i, bytes, len); if (len) { avro_value_set_bytes(&field, ptr, len); @@ -648,9 +663,12 @@ uint8_t* process_row_event_data(TABLE_MAP *map, TABLE_CREATE *create, avro_value { char buf[80]; struct tm tm; - ptr += unpack_temporal_value(map->column_types[i], ptr, &metadata[metadata_offset], &tm); + ptr += unpack_temporal_value(map->column_types[i], ptr, + &metadata[metadata_offset], + create->column_lengths[i], &tm); format_temporal_value(buf, sizeof(buf), map->column_types[i], &tm); avro_value_set_string(&field, buf); + MXS_INFO("[%ld] TEMPORAL: %s", i, buf); ss_dassert(ptr < end); } /** All numeric types (INT, LONG, FLOAT etc.) */ diff --git a/server/modules/routing/avro/avro_schema.c b/server/modules/routing/avro/avro_schema.c index 8c339e9e9..13bb44bc1 100644 --- a/server/modules/routing/avro/avro_schema.c +++ b/server/modules/routing/avro/avro_schema.c @@ -491,7 +491,6 @@ static const char *extract_field_name(const char* ptr, char* dest, size_t size) dest[bytes] = '\0'; make_valid_avro_identifier(dest); - ptr = next_field_definition(ptr); } else { @@ -501,62 +500,97 @@ static const char *extract_field_name(const char* ptr, char* dest, size_t size) return ptr; } +int extract_type_length(const char* ptr, char *dest) +{ + /** Skip any leading whitespace */ + while (isspace(*ptr) || *ptr == '`') + { + ptr++; + } + + /** The field type definition starts here */ + const char *start = ptr; + + /** Skip characters until we either hit a whitespace character or the start + * of the length definition. */ + while (!isspace(*ptr) && *ptr != '(') + { + ptr++; + } + + /** Store type */ + int typelen = ptr - start; + memcpy(dest, start, typelen); + dest[typelen] = '\0'; + + /** Skip whitespace */ + while (isspace(*ptr)) + { + ptr++; + } + + int rval = -1; // No length defined + + /** Start of length definition */ + if (*ptr == '(') + { + ptr++; + char *end; + int val = strtol(ptr, &end, 10); + + if (*end == ')') + { + rval = val; + } + } + + return rval; +} + +int count_columns(const char* ptr) +{ + int i = 2; + + while ((ptr = strchr(ptr, ','))) + { + ptr++; + i++; + } + + return i; +} + /** * Process a table definition into an array of column names * @param nameptr table definition * @return Number of processed columns or -1 on error */ -static int process_column_definition(const char *nameptr, char*** dest) +static int process_column_definition(const char *nameptr, char*** dest, char*** dest_types, int** dest_lens) { - /** Process columns in groups of 8 */ - size_t chunks = 1; - const size_t chunk_size = 8; - int i = 0; - char **names = malloc(sizeof(char*) * (chunks * chunk_size + 1)); - - if (names == NULL) - { - MXS_ERROR("Memory allocation failed when trying allocate %ld bytes of memory.", - sizeof(char*) * chunks); - return -1; - } + int n = count_columns(nameptr); + *dest = malloc(sizeof(char*) * n); + *dest_types = malloc(sizeof(char*) * n); + *dest_lens = malloc(sizeof(int) * n); + char **names = *dest; + char **types = *dest_types; + int *lengths = *dest_lens; char colname[512]; + int i = 0; while ((nameptr = extract_field_name(nameptr, colname, sizeof(colname)))) { - if (i >= chunks * chunk_size) - { - char **tmp = realloc(names, (++chunks * chunk_size + 1) * sizeof(char*)); - if (tmp == NULL) - { - for (int x = 0; x < i; x++) - { - free(names[x]); - } - free(names); - MXS_ERROR("Memory allocation failed when trying allocate %ld bytes of memory.", - sizeof(char*) * chunks); - return -1; - } - names = tmp; - } + ss_dassert(i < n); + char type[100] = ""; + int len = extract_type_length(nameptr, type); + nameptr = next_field_definition(nameptr); - if ((names[i++] = strdup(colname)) == NULL) - { - for (int x = 0; x < i; x++) - { - free(names[x]); - } - free(names); - MXS_ERROR("Memory allocation failed when trying allocate %lu bytes " - "of memory.", strlen(colname)); - return -1; - } + lengths[i] = len; + types[i] = strdup(type); + names[i] = strdup(colname); + i++; } - *dest = names; - return i; } @@ -600,7 +634,7 @@ TABLE_CREATE* table_create_alloc(const char* sql, const char* event_db) char database[MYSQL_DATABASE_MAXLEN + 1]; const char *db = event_db; - MXS_DEBUG("Create table statement: %.*s", stmt_len, statement_sql); + MXS_INFO("Create table: %s", sql); if (!get_table_name(sql, table)) { @@ -620,8 +654,10 @@ TABLE_CREATE* table_create_alloc(const char* sql, const char* event_db) db = database; } + int* lengths = NULL; char **names = NULL; - int n_columns = process_column_definition(statement_sql, &names); + char **types = NULL; + int n_columns = process_column_definition(statement_sql, &names, &types, &lengths); ss_dassert(n_columns > 0); /** We have appear to have a valid CREATE TABLE statement */ @@ -633,6 +669,8 @@ TABLE_CREATE* table_create_alloc(const char* sql, const char* event_db) rval->version = 1; rval->was_used = false; rval->column_names = names; + rval->column_lengths = lengths; + rval->column_types = types; rval->columns = n_columns; rval->database = strdup(db); rval->table = strdup(table); @@ -675,8 +713,11 @@ void* table_create_free(TABLE_CREATE* value) for (uint64_t i = 0; i < value->columns; i++) { free(value->column_names[i]); + free(value->column_types[i]); } free(value->column_names); + free(value->column_types); + free(value->column_lengths); free(value->table); free(value->database); free(value); @@ -822,7 +863,7 @@ bool table_create_alter(TABLE_CREATE *create, const char *sql, const char *end) if (tok) { - MXS_DEBUG("Altering table %.*s\n", len, tok); + MXS_INFO("Alter table '%.*s'; %.*s\n", len, tok, (int)(end - sql), sql); def = tok + len; } From 926930e2411a5a3a4d39f43f0a89603de0522e15 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Fri, 12 May 2017 11:25:48 +0300 Subject: [PATCH 23/27] MXS-1216: Correct CHAR(n) handling The field length was wrongly compared to less than 255 for two byte field lengths. In addition to that, the metadata was interpreted in the wrong way. --- server/modules/routing/avro/avro_rbr.c | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/server/modules/routing/avro/avro_rbr.c b/server/modules/routing/avro/avro_rbr.c index 65c2c03d3..5d684d159 100644 --- a/server/modules/routing/avro/avro_rbr.c +++ b/server/modules/routing/avro/avro_rbr.c @@ -572,15 +572,23 @@ uint8_t* process_row_event_data(TABLE_MAP *map, TABLE_CREATE *create, avro_value * one or two bytes for string length. */ - uint8_t bytes = *ptr++; - int len = metadata[metadata_offset] + - (((metadata[metadata_offset + 1] >> 4) & 0x3) ^ 0x3); + uint16_t meta = metadata[metadata_offset + 1] + (metadata[metadata_offset] << 8); + int bytes = 0; + uint16_t extra_length = (((meta >> 4) & 0x300) ^ 0x300); + uint16_t field_length = (meta & 0xff) + extra_length; - if (len <= 255) + if (field_length > 255) { - bytes += *ptr++ << 8; + bytes = ptr[0] + (ptr[1] << 8); + ptr += 2; + } + else + { + bytes = *ptr++; } + MXS_INFO("[%ld] CHAR: field: %d bytes, data: %d bytes", i, field_length, bytes); + ss_dassert(bytes || *ptr == '\0'); char str[bytes + 1]; memcpy(str, ptr, bytes); str[bytes] = '\0'; From 8a288110a9cd790c85778925e46816504ea6eba5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Fri, 12 May 2017 11:28:07 +0300 Subject: [PATCH 24/27] Rename conflicting Avro fields When a user defined field conflicts with an internal MaxScale field, the field is suffixed with an underscore. --- server/modules/include/avrorouter.h | 17 +++++++++++++++++ server/modules/routing/avro/avro_schema.c | 5 +++++ 2 files changed, 22 insertions(+) diff --git a/server/modules/include/avrorouter.h b/server/modules/include/avrorouter.h index b621c3026..eeb02f6b6 100644 --- a/server/modules/include/avrorouter.h +++ b/server/modules/include/avrorouter.h @@ -71,6 +71,23 @@ static const char *avro_event_type = "event_type"; static const char *avro_timestamp = "timestamp"; static char *avro_client_ouput[] = { "Undefined", "JSON", "Avro" }; +static inline bool is_reserved_word(const char* word) +{ + return strcasecmp(word, avro_domain) == 0 || + strcasecmp(word, avro_server_id) == 0 || + strcasecmp(word, avro_sequence) == 0 || + strcasecmp(word, avro_event_number) == 0 || + strcasecmp(word, avro_event_type) == 0 || + strcasecmp(word, avro_timestamp) == 0; +} + +static inline void fix_reserved_word(char *tok) +{ + if (is_reserved_word(tok)) + { + strcat(tok, "_"); + } +} /** How a binlog file is closed */ typedef enum avro_binlog_end diff --git a/server/modules/routing/avro/avro_schema.c b/server/modules/routing/avro/avro_schema.c index 13bb44bc1..468c37c6a 100644 --- a/server/modules/routing/avro/avro_schema.c +++ b/server/modules/routing/avro/avro_schema.c @@ -584,6 +584,7 @@ static int process_column_definition(const char *nameptr, char*** dest, char*** char type[100] = ""; int len = extract_type_length(nameptr, type); nameptr = next_field_definition(nameptr); + fix_reserved_word(colname); lengths[i] = len; types[i] = strdup(type); @@ -834,11 +835,15 @@ void make_avro_token(char* dest, const char* src, int length) memcpy(dest, src, length); dest[length] = '\0'; + fix_reserved_word(dest); } int get_column_index(TABLE_CREATE *create, const char *tok) { int idx = -1; + char safe_tok[strlen(tok) + 2]; + strcpy(safe_tok, tok); + fix_reserved_word(safe_tok); for (int x = 0; x < create->columns; x++) { From 6b6a7fa4a1b7ae555f9a58d476e5ad4b215fe0ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Fri, 12 May 2017 11:30:19 +0300 Subject: [PATCH 25/27] Do checkpoint processing at end of binlog When the binlog has been read, it needs to be treated as if the transaction or row limit has been hit. This will cause all tables to be flushed to disk before the files are indexed. --- server/modules/routing/avro/avro_file.c | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/server/modules/routing/avro/avro_file.c b/server/modules/routing/avro/avro_file.c index 4d9819f0d..fc8834cff 100644 --- a/server/modules/routing/avro/avro_file.c +++ b/server/modules/routing/avro/avro_file.c @@ -460,6 +460,17 @@ void notify_all_clients(AVRO_INSTANCE *router) } } +void do_checkpoint(AVRO_INSTANCE *router, uint64_t *total_rows, uint64_t *total_commits) +{ + update_used_tables(router); + avro_flush_all_tables(router); + avro_save_conversion_state(router); + notify_all_clients(router); + *total_rows += router->row_count; + *total_commits += router->trx_count; + router->row_count = router->trx_count = 0; +} + /** * @brief Read all replication events from a binlog file. * @@ -541,6 +552,8 @@ avro_binlog_end_t avro_read_all_events(AVRO_INSTANCE *router) } else { + do_checkpoint(router, &total_rows, &total_commits); + MXS_INFO("Processed %lu transactions and %lu row events.", total_commits, total_rows); if (rotate_seen) @@ -734,13 +747,7 @@ avro_binlog_end_t avro_read_all_events(AVRO_INSTANCE *router) if (router->row_count >= router->row_target || router->trx_count >= router->trx_target) { - update_used_tables(router); - avro_flush_all_tables(router); - avro_save_conversion_state(router); - notify_all_clients(router); - total_rows += router->row_count; - total_commits += router->trx_count; - router->row_count = router->trx_count = 0; + do_checkpoint(router, &total_rows, &total_commits); } } From a12d19591efc3e977b54288b79498b77ccc5c2a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Fri, 12 May 2017 16:47:20 +0300 Subject: [PATCH 26/27] MXS-1216: Store field real type and length in Avro schema The avro schema allows custom properties to be defined for the schema fields. The avrorouter stored extra information about the table into the schema for later use. Currently, this information is only generated by the avrorouter itself. Further improvements to the schema generator scripts need to be done. --- server/modules/routing/avro/avro_schema.c | 34 ++++++++++++++++++++--- 1 file changed, 30 insertions(+), 4 deletions(-) diff --git a/server/modules/routing/avro/avro_schema.c b/server/modules/routing/avro/avro_schema.c index 468c37c6a..444b571a4 100644 --- a/server/modules/routing/avro/avro_schema.c +++ b/server/modules/routing/avro/avro_schema.c @@ -126,9 +126,11 @@ char* json_new_schema_from_table(TABLE_MAP *map) for (uint64_t i = 0; i < map->columns; i++) { - json_array_append(array, json_pack_ex(&err, 0, "{s:s, s:s}", "name", - create->column_names[i], "type", - column_type_to_avro_type(map->column_types[i]))); + json_array_append(array, json_pack_ex(&err, 0, "{s:s, s:s, s:s, s:i}", + "name", create->column_names[i], + "type", column_type_to_avro_type(map->column_types[i]), + "real_type", create->column_types[i], + "length", create->column_lengths[i])); } json_object_set_new(schema, "fields", array); char* rval = json_dumps(schema, JSON_PRESERVE_ORDER); @@ -174,8 +176,10 @@ bool json_extract_field_names(const char* filename, TABLE_CREATE *table) { int array_size = json_array_size(arr); table->column_names = (char**)malloc(sizeof(char*) * (array_size)); + table->column_types = (char**)malloc(sizeof(char*) * (array_size)); + table->column_lengths = (int*)malloc(sizeof(int) * (array_size)); - if (table->column_names) + if (table->column_names && table->column_types && table->column_lengths) { int columns = 0; rval = true; @@ -186,6 +190,28 @@ bool json_extract_field_names(const char* filename, TABLE_CREATE *table) if (json_is_object(val)) { + json_t* value; + + if ((value = json_object_get(val, "real_type")) && json_is_string(value)) + { + table->column_types[columns] = strdup(json_string_value(value)); + } + else + { + table->column_types[columns] = strdup("unknown"); + MXS_WARNING("No \"real_type\" value defined. Treating as unknown type field."); + } + + if ((value = json_object_get(val, "length")) && json_is_integer(value)) + { + table->column_lengths[columns] = json_integer_value(value); + } + else + { + table->column_lengths[columns] = -1; + MXS_WARNING("No \"length\" value defined. Treating as default length field."); + } + json_t *name = json_object_get(val, "name"); if (name && json_is_string(name)) { From 5a0d2c54bd564688af44695067953ac16a09ee85 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Fri, 12 May 2017 16:49:15 +0300 Subject: [PATCH 27/27] MXS-1216: Fix DATETIME(n) value interpretation The DATETIME(n) values generated by a MariaDB 10.0 server were not interpreted correctly as the wrong algorithm was used to extract the values. DATETIME(0) values still do not work properly and they require further debugging and changes to the code. --- server/core/mysql_binlog.c | 109 +++++++++++++++++++++++++++---------- 1 file changed, 79 insertions(+), 30 deletions(-) diff --git a/server/core/mysql_binlog.c b/server/core/mysql_binlog.c index 90590a3da..93de0e3aa 100644 --- a/server/core/mysql_binlog.c +++ b/server/core/mysql_binlog.c @@ -26,6 +26,10 @@ #include #include +#include "mysql_client_server_protocol.h" + +static uint64_t unpack_bytes(uint8_t *ptr, size_t bytes); + /** * @brief Convert a table column type to a string * @@ -217,6 +221,35 @@ static void unpack_year(uint8_t *ptr, struct tm *dest) dest->tm_year = *ptr; } +/** Base-10 logarithm values */ +int64_t log_10_values[] = +{ + 1, + 10, + 100, + 1000, + 10000, + 100000, + 1000000, + 10000000, + 100000000 +}; + +/** + * If the TABLE_COL_TYPE_DATETIME type field is declared as a datetime with + * extra precision, the packed length is shorter than 8 bytes. + */ +size_t datetime_sizes[] = +{ + 5, // DATETIME(0) + 6, // DATETIME(1) + 6, // DATETIME(2) + 7, // DATETIME(3) + 7, // DATETIME(4) + 7, // DATETIME(5) + 8 // DATETIME(6) +}; + /** * @brief Unpack a DATETIME * @@ -225,21 +258,52 @@ static void unpack_year(uint8_t *ptr, struct tm *dest) * @param val Value read from the binary log * @param dest Pointer where the unpacked value is stored */ -static void unpack_datetime(uint8_t *ptr, struct tm *dest) +static void unpack_datetime(uint8_t *ptr, int length, struct tm *dest) { - uint64_t val = 0; - memcpy(&val, ptr, sizeof(val)); - uint32_t second = val - ((val / 100) * 100); - val /= 100; - uint32_t minute = val - ((val / 100) * 100); - val /= 100; - uint32_t hour = val - ((val / 100) * 100); - val /= 100; - uint32_t day = val - ((val / 100) * 100); - val /= 100; - uint32_t month = val - ((val / 100) * 100); - val /= 100; - uint32_t year = val; + int64_t val = 0; + uint32_t second, minute, hour, day, month, year; + + if (length == -1) + { + val = gw_mysql_get_byte8(ptr); + second = val - ((val / 100) * 100); + val /= 100; + minute = val - ((val / 100) * 100); + val /= 100; + hour = val - ((val / 100) * 100); + val /= 100; + day = val - ((val / 100) * 100); + val /= 100; + month = val - ((val / 100) * 100); + val /= 100; + year = val; + } + else + { + // TODO: Figure out why DATETIME(0) doesn't work like it others do + val = unpack_bytes(ptr, datetime_sizes[length]); + val *= log_10_values[6 - length]; + + if (val < 0) + { + val = -val; + } + + int subsecond = val % 1000000; + val /= 1000000; + + second = val % 60; + val /= 60; + minute = val % 60; + val /= 60; + hour = val % 24; + val /= 24; + day = val % 32; + val /= 32; + month = val % 13; + val /= 13; + year = val; + } memset(dest, 0, sizeof(struct tm)); dest->tm_year = year - 1900; @@ -392,21 +456,6 @@ size_t unpack_bit(uint8_t *ptr, uint8_t *null_mask, uint32_t col_count, return metadata[1]; } -/** - * If the TABLE_COL_TYPE_DATETIME type field is declared as a datetime with - * extra precision, the packed length is shorter than 8 bytes. - */ -size_t datetime_sizes[] = -{ - 5, // DATETIME(0) - 6, // DATETIME(1) - 6, // DATETIME(2) - 7, // DATETIME(3) - 7, // DATETIME(4) - 7, // DATETIME(5) - 8 // DATETIME(6) -}; - /** * @brief Get the length of a temporal field * @param type Field type @@ -465,7 +514,7 @@ size_t unpack_temporal_value(uint8_t type, uint8_t *ptr, uint8_t *metadata, int break; case TABLE_COL_TYPE_DATETIME: - unpack_datetime(ptr, tm); + unpack_datetime(ptr, length, tm); break; case TABLE_COL_TYPE_DATETIME2: