From f2734f895e5c008442b593927f37bc4563fe7c73 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Sun, 26 Mar 2017 13:54:57 +0300 Subject: [PATCH 01/32] Revert "Update dbfwfilter build configuration" This reverts commit 09ef292283311483354e6cd628318a5cafd96b2c. The old syntax is still required on SLES 11. --- server/modules/filter/dbfwfilter/ruleparser.y | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/modules/filter/dbfwfilter/ruleparser.y b/server/modules/filter/dbfwfilter/ruleparser.y index ba65ed381..33ad53cfe 100644 --- a/server/modules/filter/dbfwfilter/ruleparser.y +++ b/server/modules/filter/dbfwfilter/ruleparser.y @@ -27,7 +27,7 @@ %pure-parser /** Prefix all functions */ -%name-prefix "dbfw_yy" +%name-prefix="dbfw_yy" /** The pure parser requires one extra parameter */ %parse-param {void* scanner} From 4b2d94d0f84343f4d3bbf372d8612b413ecd0300 Mon Sep 17 00:00:00 2001 From: Johan Wikman Date: Tue, 28 Mar 2017 16:15:46 +0300 Subject: [PATCH 02/32] When a transaction has been started, no other checks are needed If we do check then we will end up turning off the read only flag that just was set on. --- server/modules/filter/cache/cachefiltersession.cc | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/server/modules/filter/cache/cachefiltersession.cc b/server/modules/filter/cache/cachefiltersession.cc index d2dc9e3da..279ace633 100644 --- a/server/modules/filter/cache/cachefiltersession.cc +++ b/server/modules/filter/cache/cachefiltersession.cc @@ -818,11 +818,15 @@ bool CacheFilterSession::should_consult_cache(GWBUF* pPacket) if (qc_query_is_type(type_mask, QUERY_TYPE_BEGIN_TRX)) { + if (log_decisions()) + { + zReason = "transaction start"; + } + // When a transaction is started, we initially assume it is read-only. m_is_read_only = true; } - - if (!session_trx_is_active(m_pSession)) + else if (!session_trx_is_active(m_pSession)) { if (log_decisions()) { From 1901a3bc0adca9d04788b36ecdd7245d15e58c08 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Tue, 28 Mar 2017 13:32:58 +0300 Subject: [PATCH 03/32] Bind to IPv6 addresses by default The `::` address covers both IPv4 and IPv6 addresses allowing both IP versions to be used by default. --- server/core/service.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/core/service.c b/server/core/service.c index 3a908b6f1..cd8dccb5f 100644 --- a/server/core/service.c +++ b/server/core/service.c @@ -311,7 +311,7 @@ serviceStartPort(SERVICE *service, SERV_LISTENER *port) } else { - sprintf(config_bind, "0.0.0.0|%d", port->port); + sprintf(config_bind, "::|%d", port->port); } /** Load the authentication users before before starting the listener */ From b458b6375601597ebb7a5a01ec66f72fe6357b81 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Wed, 29 Mar 2017 17:14:39 +0300 Subject: [PATCH 04/32] Use IPv6 for created listeners When listeners are created, use the default values of [::]:3306. --- server/core/config_runtime.c | 4 ++-- server/core/maxscale/config_runtime.h | 2 +- server/modules/authenticator/MySQLAuth/mysql_auth.c | 4 ++-- server/modules/routing/debugcli/debugcmd.c | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/server/core/config_runtime.c b/server/core/config_runtime.c index b572c72fe..a84e77fb6 100644 --- a/server/core/config_runtime.c +++ b/server/core/config_runtime.c @@ -461,7 +461,7 @@ bool runtime_create_listener(SERVICE *service, const char *name, const char *add if (addr == NULL || strcasecmp(addr, "default") == 0) { - addr = "0.0.0.0"; + addr = "::"; } if (port == NULL || strcasecmp(port, "default") == 0) { @@ -508,7 +508,7 @@ bool runtime_create_listener(SERVICE *service, const char *name, const char *add if (rval) { - const char *print_addr = addr ? addr : "0.0.0.0"; + const char *print_addr = addr ? addr : "::"; SERV_LISTENER *listener = serviceCreateListener(service, name, proto, addr, u_port, auth, auth_opt, ssl); diff --git a/server/core/maxscale/config_runtime.h b/server/core/maxscale/config_runtime.h index 952bc1768..b18ed9c66 100644 --- a/server/core/maxscale/config_runtime.h +++ b/server/core/maxscale/config_runtime.h @@ -125,7 +125,7 @@ bool runtime_alter_monitor(MXS_MONITOR *monitor, char *key, char *value); * * @param service Service where the listener is added * @param name Name of the listener - * @param addr Listening address, NULL for default of 0.0.0.0 + * @param addr Listening address, NULL for default of :: * @param port Listening port, NULL for default of 3306 * @param proto Listener protocol, NULL for default of "MySQLClient" * @param auth Listener authenticator, NULL for protocol default authenticator diff --git a/server/modules/authenticator/MySQLAuth/mysql_auth.c b/server/modules/authenticator/MySQLAuth/mysql_auth.c index 12da1e626..f2c3d27ba 100644 --- a/server/modules/authenticator/MySQLAuth/mysql_auth.c +++ b/server/modules/authenticator/MySQLAuth/mysql_auth.c @@ -630,8 +630,8 @@ static int mysql_auth_load_users(SERV_LISTENER *port) if (loaded < 0) { - MXS_ERROR("[%s] Unable to load users for listener %s listening at %s:%d.", service->name, - port->name, port->address ? port->address : "0.0.0.0", port->port); + MXS_ERROR("[%s] Unable to load users for listener %s listening at [%s]:%d.", service->name, + port->name, port->address ? port->address : "::", port->port); if (instance->inject_service_user) { diff --git a/server/modules/routing/debugcli/debugcmd.c b/server/modules/routing/debugcli/debugcmd.c index e200e5aba..f705679cb 100644 --- a/server/modules/routing/debugcli/debugcmd.c +++ b/server/modules/routing/debugcli/debugcmd.c @@ -1071,7 +1071,7 @@ struct subcommand createoptions[] = "Create a new server from the following parameters.\n\n" "SERVICE Service where this listener is added\n" "NAME Listener name\n" - "HOST Listener host address (default 0.0.0.0)\n" + "HOST Listener host address (default [::])\n" "PORT Listener port (default 3306)\n" "PROTOCOL Listener protocol (default MySQLClient)\n" "AUTHENTICATOR Authenticator module name (default MySQLAuth)\n" From a1d1413b24afe7611eaadd8d6d651390be1d5ed4 Mon Sep 17 00:00:00 2001 From: Johan Wikman Date: Thu, 30 Mar 2017 12:33:20 +0300 Subject: [PATCH 05/32] Add atomic_add for 64-bit integers. Now only GCC intrinsics are used. --- include/maxscale/atomic.h | 10 ++++------ server/core/atomic.c | 34 ++++++++++++++-------------------- 2 files changed, 18 insertions(+), 26 deletions(-) diff --git a/include/maxscale/atomic.h b/include/maxscale/atomic.h index 9b7e0cb97..2813ede48 100644 --- a/include/maxscale/atomic.h +++ b/include/maxscale/atomic.h @@ -21,11 +21,7 @@ MXS_BEGIN_DECLS /** - * Implementation of an atomic add operation for the GCC environment, or the - * X86 processor. If we are working within GNU C then we can use the GCC - * atomic add built in function, which is portable across platforms that - * implement GCC. Otherwise, this function currently supports only X86 - * architecture (without further development). + * Implementation of an atomic add operations for the GCC environment. * * Adds a value to the contents of a location pointed to by the first parameter. * The add operation is atomic and the return value is the value stored in the @@ -36,7 +32,9 @@ MXS_BEGIN_DECLS * @param value Value to be added * @return The value of variable before the add occurred */ -int atomic_add(int *variable, int value); +int atomic_add(int *variable, int value); +int64_t atomic_add_int64(int64_t *variable, int64_t value); +uint64_t atomic_add_uint64(uint64_t *variable, int64_t value); /** * @brief Impose a full memory barrier diff --git a/server/core/atomic.c b/server/core/atomic.c index 5e51cd568..cca7ea4d7 100644 --- a/server/core/atomic.c +++ b/server/core/atomic.c @@ -11,29 +11,23 @@ * Public License. */ +#include + /** * @file atomic.c - Implementation of atomic operations for MaxScale - * - * @verbatim - * Revision History - * - * Date Who Description - * 10/06/13 Mark Riddoch Initial implementation - * - * @endverbatim */ -int -atomic_add(int *variable, int value) +int atomic_add(int *variable, int value) { -#ifdef __GNUC__ - return (int) __sync_fetch_and_add (variable, value); -#else - asm volatile( - "lock; xaddl %%eax, %2;" - :"=a" (value) - : "a" (value), "m" (*variable) - : "memory" ); - return value; -#endif + return __sync_fetch_and_add(variable, value); +} + +int64_t atomic_add_int64(int64_t *variable, int64_t value) +{ + return __sync_fetch_and_add(variable, value); +} + +uint64_t atomic_add_uint64(uint64_t *variable, int64_t value) +{ + return __sync_fetch_and_add(variable, value); } From 726610b67dbf8e913839c98fd8c64006d6140a51 Mon Sep 17 00:00:00 2001 From: Johan Wikman Date: Thu, 30 Mar 2017 12:44:27 +0300 Subject: [PATCH 06/32] Use 64-bit integers for statistics in rwsplit Wraparound is not likely anymore. --- .../modules/routing/readwritesplit/readwritesplit.c | 13 +++++++------ .../modules/routing/readwritesplit/readwritesplit.h | 10 +++++----- .../modules/routing/readwritesplit/rwsplit_mysql.c | 2 +- .../routing/readwritesplit/rwsplit_route_stmt.c | 8 ++++---- 4 files changed, 17 insertions(+), 16 deletions(-) diff --git a/server/modules/routing/readwritesplit/readwritesplit.c b/server/modules/routing/readwritesplit/readwritesplit.c index 4c4d5ebab..7b0e3da46 100644 --- a/server/modules/routing/readwritesplit/readwritesplit.c +++ b/server/modules/routing/readwritesplit/readwritesplit.c @@ -13,6 +13,7 @@ #include "readwritesplit.h" +#include #include #include #include @@ -620,17 +621,17 @@ static void diagnostics(MXS_ROUTER *instance, DCB *dcb) all_pct = ((double)router->stats.n_all / (double)router->stats.n_queries) * 100.0; } - dcb_printf(dcb, "\tNumber of router sessions: %d\n", + dcb_printf(dcb, "\tNumber of router sessions: %" PRIu64 "\n", router->stats.n_sessions); dcb_printf(dcb, "\tCurrent no. of router sessions: %d\n", router->service->stats.n_current); - dcb_printf(dcb, "\tNumber of queries forwarded: %d\n", + dcb_printf(dcb, "\tNumber of queries forwarded: %" PRIu64 "\n", router->stats.n_queries); - dcb_printf(dcb, "\tNumber of queries forwarded to master: %d (%.2f%%)\n", + dcb_printf(dcb, "\tNumber of queries forwarded to master: %" PRIu64 " (%.2f%%)\n", router->stats.n_master, master_pct); - dcb_printf(dcb, "\tNumber of queries forwarded to slave: %d (%.2f%%)\n", + dcb_printf(dcb, "\tNumber of queries forwarded to slave: %" PRIu64 " (%.2f%%)\n", router->stats.n_slave, slave_pct); - dcb_printf(dcb, "\tNumber of queries forwarded to all: %d (%.2f%%)\n", + dcb_printf(dcb, "\tNumber of queries forwarded to all: %" PRIu64 " (%.2f%%)\n", router->stats.n_all, all_pct); if ((weightby = serviceGetWeightingParameter(router->service)) != NULL) @@ -782,7 +783,7 @@ static void clientReply(MXS_ROUTER *instance, gwbuf_clone(bref->bref_pending_cmd))) == 1) { ROUTER_INSTANCE* inst = (ROUTER_INSTANCE *)instance; - atomic_add(&inst->stats.n_queries, 1); + atomic_add_uint64(&inst->stats.n_queries, 1); /** * Add one query response waiter to backend reference */ diff --git a/server/modules/routing/readwritesplit/readwritesplit.h b/server/modules/routing/readwritesplit/readwritesplit.h index d687d0e4b..671bd9f9c 100644 --- a/server/modules/routing/readwritesplit/readwritesplit.h +++ b/server/modules/routing/readwritesplit/readwritesplit.h @@ -335,11 +335,11 @@ struct router_client_session */ typedef struct { - int n_sessions; /*< Number sessions created */ - int n_queries; /*< Number of queries forwarded */ - int n_master; /*< Number of stmts sent to master */ - int n_slave; /*< Number of stmts sent to slave */ - int n_all; /*< Number of stmts sent to all */ + uint64_t n_sessions; /*< Number sessions created */ + uint64_t n_queries; /*< Number of queries forwarded */ + uint64_t n_master; /*< Number of stmts sent to master */ + uint64_t n_slave; /*< Number of stmts sent to slave */ + uint64_t n_all; /*< Number of stmts sent to all */ } ROUTER_STATS; /** diff --git a/server/modules/routing/readwritesplit/rwsplit_mysql.c b/server/modules/routing/readwritesplit/rwsplit_mysql.c index ab767e250..68ca4a3ef 100644 --- a/server/modules/routing/readwritesplit/rwsplit_mysql.c +++ b/server/modules/routing/readwritesplit/rwsplit_mysql.c @@ -282,7 +282,7 @@ handle_target_is_all(route_target_t route_target, if (result) { - atomic_add(&inst->stats.n_all, 1); + atomic_add_uint64(&inst->stats.n_all, 1); } return result; } diff --git a/server/modules/routing/readwritesplit/rwsplit_route_stmt.c b/server/modules/routing/readwritesplit/rwsplit_route_stmt.c index 27fe33470..5f020065f 100644 --- a/server/modules/routing/readwritesplit/rwsplit_route_stmt.c +++ b/server/modules/routing/readwritesplit/rwsplit_route_stmt.c @@ -1069,7 +1069,7 @@ bool handle_slave_is_target(ROUTER_INSTANCE *inst, ROUTER_CLIENT_SES *rses, */ if (rwsplit_get_dcb(target_dcb, rses, BE_SLAVE, NULL, rlag_max)) { - atomic_add(&inst->stats.n_slave, 1); + atomic_add_uint64(&inst->stats.n_slave, 1); return true; } else @@ -1157,14 +1157,14 @@ bool handle_master_is_target(ROUTER_INSTANCE *inst, ROUTER_CLIENT_SES *rses, if (succp && master_dcb == curr_master_dcb) { - atomic_add(&inst->stats.n_master, 1); + atomic_add_uint64(&inst->stats.n_master, 1); *target_dcb = master_dcb; } else { if (succp && master_dcb == curr_master_dcb) { - atomic_add(&inst->stats.n_master, 1); + atomic_add_uint64(&inst->stats.n_master, 1); *target_dcb = master_dcb; } else @@ -1253,7 +1253,7 @@ handle_got_target(ROUTER_INSTANCE *inst, ROUTER_CLIENT_SES *rses, backend_ref_t *bref; - atomic_add(&inst->stats.n_queries, 1); + atomic_add_uint64(&inst->stats.n_queries, 1); /** * Add one query response waiter to backend reference */ From b4c119915bb720f6488d485a194cab84e7efe3db Mon Sep 17 00:00:00 2001 From: Johan Wikman Date: Thu, 30 Mar 2017 21:49:57 +0300 Subject: [PATCH 07/32] Fix gwbuf_rtrim - If everything in the first buffer of a buffer chain is consumed, then the whole chain and not just the first buffer was freed. NOTE: gwbuf_rtrim needs to be fixed so that it removes data from the tail of a chain and *not* from the end of the first buffer in a chain. That cannot ever be what is wanted. --- server/core/buffer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/core/buffer.c b/server/core/buffer.c index ddf3e24bf..cb10df951 100644 --- a/server/core/buffer.c +++ b/server/core/buffer.c @@ -640,7 +640,7 @@ gwbuf_rtrim(GWBUF *head, unsigned int n_bytes) if (GWBUF_EMPTY(head)) { rval = head->next; - gwbuf_free(head); + gwbuf_free_one(head); } return rval; } From 29fa4a608854d99e1131c2b66e64bb74d32f1ce6 Mon Sep 17 00:00:00 2001 From: Johan Wikman Date: Thu, 30 Mar 2017 22:02:34 +0300 Subject: [PATCH 08/32] Fix testbuffer.c Free memory allocated by tests, so that it is meaningful to run under valgrind in order to check for GWBUF leaks. --- server/core/test/testbuffer.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/server/core/test/testbuffer.c b/server/core/test/testbuffer.c index 8b3f87118..ffed77791 100644 --- a/server/core/test/testbuffer.c +++ b/server/core/test/testbuffer.c @@ -85,6 +85,8 @@ GWBUF* create_test_buffer() total += buffers[i]; } + MXS_FREE(data); + return head; } @@ -139,15 +141,14 @@ void copy_buffer(int n, int offset) ss_info_dassert(gwbuf_copy_data(buffer, 0, cutoff, dest) == cutoff, "All bytes should be read"); ss_info_dassert(memcmp(data, dest, sizeof(dest)) == 0, "Data should be OK"); gwbuf_free(buffer); + MXS_FREE(data); } /** gwbuf_split test - These tests assume allocation will always succeed */ void test_split() { size_t headsize = 10; - GWBUF* head = gwbuf_alloc(headsize); size_t tailsize = 20; - GWBUF* tail = gwbuf_alloc(tailsize); GWBUF* oldchain = gwbuf_append(gwbuf_alloc(headsize), gwbuf_alloc(tailsize)); ss_info_dassert(gwbuf_length(oldchain) == headsize + tailsize, "Allocated buffer should be 30 bytes"); @@ -178,6 +179,7 @@ void test_split() ss_info_dassert(newchain, "New chain should be non-NULL"); ss_info_dassert(gwbuf_length(newchain) == headsize + tailsize, "New chain should be 30 bytes long"); ss_info_dassert(oldchain == NULL, "Old chain should be NULL"); + gwbuf_free(newchain); /** Splitting of contiguous memory */ GWBUF* buffer = gwbuf_alloc(10); @@ -189,6 +191,8 @@ void test_split() ss_info_dassert(newbuf->tail == newbuf, "New buffer's tail should point to itself"); ss_info_dassert(buffer->next == NULL, "Old buffer's next pointer should be NULL"); ss_info_dassert(newbuf->next == NULL, "New buffer's next pointer should be NULL"); + gwbuf_free(buffer); + gwbuf_free(newbuf); /** Bad parameter tests */ GWBUF* ptr = NULL; @@ -198,7 +202,6 @@ void test_split() ss_info_dassert(gwbuf_split(&buffer, 0) == NULL, "gwbuf_split with length of 0 should return NULL"); ss_info_dassert(gwbuf_length(buffer) == 10, "Buffer should be 10 bytes"); gwbuf_free(buffer); - gwbuf_free(newbuf); /** Splitting near buffer boudaries */ for (int i = 0; i < n_buffers - 1; i++) @@ -386,6 +389,9 @@ void test_compare() ss_dassert(gwbuf_compare(lhs, rhs) == 0); ss_dassert(gwbuf_compare(rhs, lhs) == 0); + + gwbuf_free(lhs); + gwbuf_free(rhs); } /** @@ -484,7 +490,7 @@ test1() ss_info_dassert(100000 == buflen, "Incorrect buffer size"); ss_info_dassert(buffer == extra, "The buffer pointer should now point to the extra buffer"); ss_dfprintf(stderr, "\t..done\n"); - + gwbuf_free(buffer); /** gwbuf_clone_all test */ size_t headsize = 10; GWBUF* head = gwbuf_alloc(headsize); @@ -501,6 +507,8 @@ test1() ss_info_dassert(GWBUF_LENGTH(all_clones) == headsize, "First buffer should be 10 bytes"); ss_info_dassert(GWBUF_LENGTH(all_clones->next) == tailsize, "Second buffer should be 20 bytes"); ss_info_dassert(gwbuf_length(all_clones) == headsize + tailsize, "Total buffer length should be 30 bytes"); + gwbuf_free(all_clones); + gwbuf_free(head); test_split(); test_load_and_copy(); From 8284716e6ac1f30b7138f4adc4bc41d98befb462 Mon Sep 17 00:00:00 2001 From: Johan Wikman Date: Thu, 30 Mar 2017 22:14:58 +0300 Subject: [PATCH 09/32] Add test for gwbuf_clone to testbuffer.c - Under valgrind, this test reveals the leak of gebuf_clone. --- server/core/test/testbuffer.c | 48 +++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/server/core/test/testbuffer.c b/server/core/test/testbuffer.c index ffed77791..e1473b30c 100644 --- a/server/core/test/testbuffer.c +++ b/server/core/test/testbuffer.c @@ -394,6 +394,53 @@ void test_compare() gwbuf_free(rhs); } + + +void test_clone() +{ + GWBUF* original = gwbuf_alloc_and_load(1, "1"); + + original = gwbuf_append(original, gwbuf_alloc_and_load(1, "1")); + original = gwbuf_append(original, gwbuf_alloc_and_load(2, "12")); + original = gwbuf_append(original, gwbuf_alloc_and_load(3, "123")); + original = gwbuf_append(original, gwbuf_alloc_and_load(5, "12345")); + original = gwbuf_append(original, gwbuf_alloc_and_load(8, "12345678")); + original = gwbuf_append(original, gwbuf_alloc_and_load(13, "1234567890123")); + original = gwbuf_append(original, gwbuf_alloc_and_load(21, "123456789012345678901")); + + GWBUF* clone = gwbuf_clone(original); + + GWBUF* o = original; + GWBUF* c = clone; + + ss_dassert(gwbuf_length(o) == gwbuf_length(c)); + + while (o) + { + ss_dassert(c); + ss_dassert(GWBUF_LENGTH(o) == GWBUF_LENGTH(c)); + + const char* i = (char*)GWBUF_DATA(o); + const char* end = i + GWBUF_LENGTH(o); + const char* j = (char*)GWBUF_DATA(c); + + while (i != end) + { + ss_dassert(*i == *j); + ++i; + ++j; + } + + o = o->next; + c = c->next; + } + + ss_dassert(c == NULL); + + gwbuf_free(clone); + gwbuf_free(original); +} + /** * test1 Allocate a buffer and do lots of things * @@ -514,6 +561,7 @@ test1() test_load_and_copy(); test_consume(); test_compare(); + test_clone(); return 0; } From 8a86efc30ee7f0269c8c54cf424ab57971213fe7 Mon Sep 17 00:00:00 2001 From: Johan Wikman Date: Thu, 30 Mar 2017 22:16:51 +0300 Subject: [PATCH 10/32] Fix gwbuf_clone With this change, the test_clone() test in testbuffer.c no longer causes a leak according to valgrind. --- server/core/buffer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/core/buffer.c b/server/core/buffer.c index cb10df951..fdf2f284f 100644 --- a/server/core/buffer.c +++ b/server/core/buffer.c @@ -339,7 +339,7 @@ GWBUF* gwbuf_clone(GWBUF* buf) while (clonebuf && buf->next) { buf = buf->next; - clonebuf->next = gwbuf_clone(buf); + clonebuf->next = gwbuf_clone_one(buf); clonebuf = clonebuf->next; } From 963750c2405e4d78b0981a66f3893ee4a9d3fad3 Mon Sep 17 00:00:00 2001 From: Johan Wikman Date: Fri, 31 Mar 2017 13:28:09 +0300 Subject: [PATCH 11/32] Update version number to 2.1.2 --- VERSION.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION.cmake b/VERSION.cmake index 8fb2cfc9c..0ea1a1f68 100644 --- a/VERSION.cmake +++ b/VERSION.cmake @@ -5,7 +5,7 @@ set(MAXSCALE_VERSION_MAJOR "2" CACHE STRING "Major version") set(MAXSCALE_VERSION_MINOR "1" CACHE STRING "Minor version") -set(MAXSCALE_VERSION_PATCH "1" CACHE STRING "Patch version") +set(MAXSCALE_VERSION_PATCH "2" CACHE STRING "Patch version") # This should only be incremented if a package is rebuilt set(MAXSCALE_BUILD_NUMBER 1 CACHE STRING "Release number") From cbc1e864d925ed689ac7f80632682c3965555a4a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Wed, 29 Mar 2017 23:30:34 +0300 Subject: [PATCH 12/32] Use RFC 3986 compliant addresses in log messages When log messages are written with both address and port information, IPv6 addresses can cause confusion if the normal address:port formatting is used. The RFC 3986 suggests that all IPv6 addresses are expressed as a bracket enclosed address optionally followed by the port that is separate from the address by a colon. In practice, the "all interfaces" address and port number 3306 can be written in IPv4 numbers-and-dots notation as 0.0.0.0:3306 and in IPv6 notation as [::]:3306. Using the latter format in log messages keeps the output consistent with all types of addresses. The details of the standard can be found at the following addresses: https://www.ietf.org/rfc/rfc3986.txt https://www.rfc-editor.org/std/std66.txt --- .../MaxScale-2.1.2-Release-Notes.md | 19 ++++++++++++++++++ server/core/dcb.c | 10 +++++----- server/core/monitor.c | 12 +++++------ server/core/service.c | 4 ++-- .../modules/authenticator/MySQLAuth/dbusers.c | 2 +- .../authenticator/MySQLAuth/mysql_auth.c | 2 +- server/modules/filter/mqfilter/mqfilter.c | 4 ++-- server/modules/monitor/auroramon/auroramon.c | 2 +- server/modules/monitor/galeramon/galeramon.c | 2 +- server/modules/monitor/mmmon/mmmon.c | 2 +- server/modules/monitor/mysqlmon/mysql_mon.c | 8 ++++---- .../monitor/ndbclustermon/ndbclustermon.c | 2 +- .../MySQL/MySQLBackend/mysql_backend.c | 6 +++--- server/modules/routing/avrorouter/avro.c | 2 +- server/modules/routing/binlogrouter/blr.c | 12 +++++------ .../modules/routing/binlogrouter/blr_master.c | 18 ++++++++--------- .../modules/routing/binlogrouter/blr_slave.c | 8 ++++---- .../routing/readwritesplit/readwritesplit.c | 10 +++++----- .../routing/readwritesplit/rwsplit_mysql.c | 2 +- .../readwritesplit/rwsplit_route_stmt.c | 12 +++++------ .../readwritesplit/rwsplit_select_backends.c | 14 ++++++------- .../readwritesplit/rwsplit_session_cmd.c | 2 +- .../routing/schemarouter/schemarouter.c | 20 +++++++++---------- 23 files changed, 97 insertions(+), 78 deletions(-) diff --git a/Documentation/Release-Notes/MaxScale-2.1.2-Release-Notes.md b/Documentation/Release-Notes/MaxScale-2.1.2-Release-Notes.md index 4dd0d16c4..dcbabfbde 100644 --- a/Documentation/Release-Notes/MaxScale-2.1.2-Release-Notes.md +++ b/Documentation/Release-Notes/MaxScale-2.1.2-Release-Notes.md @@ -15,6 +15,25 @@ report at [Jira](https://jira.mariadb.org). ## Changed Features +### Formatting of IP Addresses and Ports + +All messaging that contains both the address and the port are now printed in an +IPv6 compatible format. The output uses the format defined in +[RFC 3986] (https://www.ietf.org/rfc/rfc3986.txt) and +[STD 66] (https://www.rfc-editor.org/std/std66.txt). + +In practice this means that the address is enclosed by brackets. The port is +separated from the address by a colon. Here is an example of the new format: + +``` +[192.168.0.201]:3306 +[fe80::fa16:54ff:fe8f:7e56]:3306 +[localhost]:3306 +``` + +The first is an IPv4 address, the second an IPv6 address and the last one is a +hostname. All of the addresses use port 3306. + ### Cache * The storage `storage_inmemory` is now the default, so the parameter diff --git a/server/core/dcb.c b/server/core/dcb.c index d591915bc..1e8ae2d0b 100644 --- a/server/core/dcb.c +++ b/server/core/dcb.c @@ -692,7 +692,7 @@ dcb_connect(SERVER *server, MXS_SESSION *session, const char *protocol) if (fd == DCBFD_CLOSED) { - MXS_DEBUG("%lu [dcb_connect] Failed to connect to server %s:%d, " + MXS_DEBUG("%lu [dcb_connect] Failed to connect to server [%s]:%d, " "from backend dcb %p, client dcp %p fd %d.", pthread_self(), server->name, @@ -706,7 +706,7 @@ dcb_connect(SERVER *server, MXS_SESSION *session, const char *protocol) } else { - MXS_DEBUG("%lu [dcb_connect] Connected to server %s:%d, " + MXS_DEBUG("%lu [dcb_connect] Connected to server [%s]:%d, " "from backend dcb %p, client dcp %p fd %d.", pthread_self(), server->name, @@ -3087,13 +3087,13 @@ int dcb_listen(DCB *listener, const char *config, const char *protocol_name) */ if (listen(listener_socket, INT_MAX) != 0) { - MXS_ERROR("Failed to start listening on '%s' with protocol '%s': %d, %s", - config, protocol_name, errno, mxs_strerror(errno)); + MXS_ERROR("Failed to start listening on '[%s]:%u' with protocol '%s': %d, %s", + host, port, protocol_name, errno, mxs_strerror(errno)); close(listener_socket); return -1; } - MXS_NOTICE("Listening for connections at %s with protocol %s", config, protocol_name); + MXS_NOTICE("Listening for connections at [%s]:%u with protocol %s", host, port, protocol_name); // assign listener_socket to dcb listener->fd = listener_socket; diff --git a/server/core/monitor.c b/server/core/monitor.c index f163b081e..b8cf33819 100644 --- a/server/core/monitor.c +++ b/server/core/monitor.c @@ -467,7 +467,7 @@ monitorShow(DCB *dcb, MXS_MONITOR *monitor) for (MXS_MONITOR_SERVERS *db = monitor->databases; db; db = db->next) { - dcb_printf(dcb, "%s%s:%d", sep, db->server->name, db->server->port); + dcb_printf(dcb, "%s[%s]:%d", sep, db->server->name, db->server->port); sep = ", "; } @@ -685,7 +685,7 @@ bool check_monitor_permissions(MXS_MONITOR* monitor, const char* query) { if (mon_connect_to_db(monitor, mondb) != MONITOR_CONN_OK) { - MXS_ERROR("[%s] Failed to connect to server '%s' (%s:%d) when" + MXS_ERROR("[%s] Failed to connect to server '%s' ([%s]:%d) when" " checking monitor user credentials and permissions: %s", monitor->name, mondb->server->unique_name, mondb->server->name, mondb->server->port, mysql_error(mondb->con)); @@ -965,7 +965,7 @@ static void mon_append_node_names(MXS_MONITOR_SERVERS* servers, char* dest, int { if (status == 0 || servers->server->status & status) { - snprintf(arr, sizeof(arr), "%s%s:%d", separator, servers->server->name, + snprintf(arr, sizeof(arr), "%s[%s]:%d", separator, servers->server->name, servers->server->port); separator = ","; int arrlen = strlen(arr); @@ -1049,7 +1049,7 @@ monitor_launch_script(MXS_MONITOR* mon, MXS_MONITOR_SERVERS* ptr, const char* sc if (externcmd_matches(cmd, "$INITIATOR")) { char initiator[strlen(ptr->server->name) + 24]; // Extra space for port - snprintf(initiator, sizeof(initiator), "%s:%d", ptr->server->name, ptr->server->port); + snprintf(initiator, sizeof(initiator), "[%s]:%d", ptr->server->name, ptr->server->port); externcmd_substitute_arg(cmd, "[$]INITIATOR", initiator); } @@ -1221,8 +1221,8 @@ void mon_log_connect_error(MXS_MONITOR_SERVERS* database, mxs_connect_result_t rval) { MXS_ERROR(rval == MONITOR_CONN_TIMEOUT ? - "Monitor timed out when connecting to server %s:%d : \"%s\"" : - "Monitor was unable to connect to server %s:%d : \"%s\"", + "Monitor timed out when connecting to server [%s]:%d : \"%s\"" : + "Monitor was unable to connect to server [%s]:%d : \"%s\"", database->server->name, database->server->port, mysql_error(database->con)); } diff --git a/server/core/service.c b/server/core/service.c index cd8dccb5f..4cc5f764b 100644 --- a/server/core/service.c +++ b/server/core/service.c @@ -1319,7 +1319,7 @@ printService(SERVICE *service) printf("\tBackend databases\n"); while (ptr) { - printf("\t\t%s:%d Protocol: %s\n", ptr->server->name, ptr->server->port, ptr->server->protocol); + printf("\t\t[%s]:%d Protocol: %s\n", ptr->server->name, ptr->server->port, ptr->server->protocol); ptr = ptr->next; } if (service->n_filters) @@ -1432,7 +1432,7 @@ void dprintService(DCB *dcb, SERVICE *service) { if (SERVER_REF_IS_ACTIVE(server)) { - dcb_printf(dcb, "\t\t%s:%d Protocol: %s Name: %s\n", + dcb_printf(dcb, "\t\t[%s]:%d Protocol: %s Name: %s\n", server->server->name, server->server->port, server->server->protocol, server->server->unique_name); } diff --git a/server/modules/authenticator/MySQLAuth/dbusers.c b/server/modules/authenticator/MySQLAuth/dbusers.c index 937d4cfbc..23e712c2f 100644 --- a/server/modules/authenticator/MySQLAuth/dbusers.c +++ b/server/modules/authenticator/MySQLAuth/dbusers.c @@ -494,7 +494,7 @@ static bool check_server_permissions(SERVICE *service, SERVER* server, { int my_errno = mysql_errno(mysql); - MXS_ERROR("[%s] Failed to connect to server '%s' (%s:%d) when" + MXS_ERROR("[%s] Failed to connect to server '%s' ([%s]:%d) when" " checking authentication user credentials and permissions: %d %s", service->name, server->unique_name, server->name, server->port, my_errno, mysql_error(mysql)); diff --git a/server/modules/authenticator/MySQLAuth/mysql_auth.c b/server/modules/authenticator/MySQLAuth/mysql_auth.c index f2c3d27ba..8aed71e89 100644 --- a/server/modules/authenticator/MySQLAuth/mysql_auth.c +++ b/server/modules/authenticator/MySQLAuth/mysql_auth.c @@ -349,7 +349,7 @@ mysql_auth_authenticate(DCB *dcb) } else if (dcb->service->log_auth_warnings) { - MXS_WARNING("%s: login attempt for user '%s'@%s:%d, authentication failed.", + MXS_WARNING("%s: login attempt for user '%s'@[%s]:%d, authentication failed.", dcb->service->name, client_data->user, dcb->remote, dcb_get_port(dcb)); if (is_localhost_address(&dcb->ip) && diff --git a/server/modules/filter/mqfilter/mqfilter.c b/server/modules/filter/mqfilter/mqfilter.c index 8f62eecd4..15b5a6cf2 100644 --- a/server/modules/filter/mqfilter/mqfilter.c +++ b/server/modules/filter/mqfilter/mqfilter.c @@ -1159,7 +1159,7 @@ routeQuery(MXS_FILTER *instance, MXS_FILTER_SESSION *session, GWBUF *queue) * Something matched the trigger, log the query */ - MXS_INFO("Routing message to: %s:%d %s as %s/%s, exchange: %s<%s> key:%s queue:%s", + MXS_INFO("Routing message to: [%s]:%d %s as %s/%s, exchange: %s<%s> key:%s queue:%s", my_instance->hostname, my_instance->port, my_instance->vhost, my_instance->username, my_instance->password, my_instance->exchange, @@ -1490,7 +1490,7 @@ diagnostic(MXS_FILTER *instance, MXS_FILTER_SESSION *fsession, DCB *dcb) if (my_instance) { - dcb_printf(dcb, "Connecting to %s:%d as '%s'.\nVhost: %s\tExchange: %s\nKey: %s\tQueue: %s\n\n", + dcb_printf(dcb, "Connecting to [%s]:%d as '%s'.\nVhost: %s\tExchange: %s\nKey: %s\tQueue: %s\n\n", my_instance->hostname, my_instance->port, my_instance->username, my_instance->vhost, my_instance->exchange, diff --git a/server/modules/monitor/auroramon/auroramon.c b/server/modules/monitor/auroramon/auroramon.c index baaacd220..6f83f1ecf 100644 --- a/server/modules/monitor/auroramon/auroramon.c +++ b/server/modules/monitor/auroramon/auroramon.c @@ -79,7 +79,7 @@ void update_server_status(MXS_MONITOR *monitor, MXS_MONITOR_SERVERS *database) } else { - MXS_ERROR("Failed to query server %s (%s:%d): %d, %s", + MXS_ERROR("Failed to query server %s ([%s]:%d): %d, %s", database->server->unique_name, database->server->name, database->server->port, mysql_errno(database->con), mysql_error(database->con)); diff --git a/server/modules/monitor/galeramon/galeramon.c b/server/modules/monitor/galeramon/galeramon.c index 5fe526311..b249b3949 100644 --- a/server/modules/monitor/galeramon/galeramon.c +++ b/server/modules/monitor/galeramon/galeramon.c @@ -448,7 +448,7 @@ monitorMain(void *arg) /* Log server status change */ if (mon_status_changed(ptr)) { - MXS_DEBUG("Backend server %s:%d state : %s", + MXS_DEBUG("Backend server [%s]:%d state : %s", ptr->server->name, ptr->server->port, STRSRVSTATUS(ptr->server)); diff --git a/server/modules/monitor/mmmon/mmmon.c b/server/modules/monitor/mmmon/mmmon.c index 1d149b7d0..ce12528e4 100644 --- a/server/modules/monitor/mmmon/mmmon.c +++ b/server/modules/monitor/mmmon/mmmon.c @@ -538,7 +538,7 @@ monitorMain(void *arg) if (mon_status_changed(ptr) || mon_print_fail_status(ptr)) { - MXS_DEBUG("Backend server %s:%d state : %s", + MXS_DEBUG("Backend server [%s]:%d state : %s", ptr->server->name, ptr->server->port, STRSRVSTATUS(ptr->server)); diff --git a/server/modules/monitor/mysqlmon/mysql_mon.c b/server/modules/monitor/mysqlmon/mysql_mon.c index 387f1e6e8..e8c90a4fa 100644 --- a/server/modules/monitor/mysqlmon/mysql_mon.c +++ b/server/modules/monitor/mysqlmon/mysql_mon.c @@ -547,7 +547,7 @@ static MXS_MONITOR_SERVERS *build_mysql51_replication_tree(MXS_MONITOR *mon) /* Set the Slave Role */ if (ismaster) { - MXS_DEBUG("Master server found at %s:%d with %d slaves", + MXS_DEBUG("Master server found at [%s]:%d with %d slaves", database->server->name, database->server->port, nslaves); @@ -1122,7 +1122,7 @@ monitorMain(void *arg) if (SRV_MASTER_STATUS(ptr->mon_prev_status)) { /** Master failed, can't recover */ - MXS_NOTICE("Server %s:%d lost the master status.", + MXS_NOTICE("Server [%s]:%d lost the master status.", ptr->server->name, ptr->server->port); } @@ -1131,12 +1131,12 @@ monitorMain(void *arg) if (mon_status_changed(ptr)) { #if defined(SS_DEBUG) - MXS_INFO("Backend server %s:%d state : %s", + MXS_INFO("Backend server [%s]:%d state : %s", ptr->server->name, ptr->server->port, STRSRVSTATUS(ptr->server)); #else - MXS_DEBUG("Backend server %s:%d state : %s", + MXS_DEBUG("Backend server [%s]:%d state : %s", ptr->server->name, ptr->server->port, STRSRVSTATUS(ptr->server)); diff --git a/server/modules/monitor/ndbclustermon/ndbclustermon.c b/server/modules/monitor/ndbclustermon/ndbclustermon.c index 1e40cf6c1..36d8ce5af 100644 --- a/server/modules/monitor/ndbclustermon/ndbclustermon.c +++ b/server/modules/monitor/ndbclustermon/ndbclustermon.c @@ -348,7 +348,7 @@ monitorMain(void *arg) if (ptr->server->status != ptr->mon_prev_status || SERVER_IS_DOWN(ptr->server)) { - MXS_DEBUG("Backend server %s:%d state : %s", + MXS_DEBUG("Backend server [%s]:%d state : %s", ptr->server->name, ptr->server->port, STRSRVSTATUS(ptr->server)); diff --git a/server/modules/protocol/MySQL/MySQLBackend/mysql_backend.c b/server/modules/protocol/MySQL/MySQLBackend/mysql_backend.c index e62ecab09..6276cae0f 100644 --- a/server/modules/protocol/MySQL/MySQLBackend/mysql_backend.c +++ b/server/modules/protocol/MySQL/MySQLBackend/mysql_backend.c @@ -280,7 +280,7 @@ static int gw_do_connect_to_backend(char *host, int port, int *fd) if (so == -1) { - MXS_ERROR("Establishing connection to backend server %s:%d failed.", host, port); + MXS_ERROR("Establishing connection to backend server [%s]:%d failed.", host, port); return rv; } @@ -294,7 +294,7 @@ static int gw_do_connect_to_backend(char *host, int port, int *fd) } else { - MXS_ERROR("Failed to connect backend server %s:%d due to: %d, %s.", + MXS_ERROR("Failed to connect backend server [%s]:%d due to: %d, %s.", host, port, errno, mxs_strerror(errno)); close(so); return rv; @@ -303,7 +303,7 @@ static int gw_do_connect_to_backend(char *host, int port, int *fd) *fd = so; MXS_DEBUG("%lu [gw_do_connect_to_backend] Connected to backend server " - "%s:%d, fd %d.", pthread_self(), host, port, so); + "[%s]:%d, fd %d.", pthread_self(), host, port, so); return rv; diff --git a/server/modules/routing/avrorouter/avro.c b/server/modules/routing/avrorouter/avro.c index 20cc5a9b1..31a8134b0 100644 --- a/server/modules/routing/avrorouter/avro.c +++ b/server/modules/routing/avrorouter/avro.c @@ -892,7 +892,7 @@ diagnostics(MXS_ROUTER *router, DCB *dcb) char sync_marker_hex[SYNC_MARKER_SIZE * 2 + 1]; dcb_printf(dcb, "\t\tClient UUID: %s\n", session->uuid); - dcb_printf(dcb, "\t\tClient_host_port: %s:%d\n", + dcb_printf(dcb, "\t\tClient_host_port: [%s]:%d\n", session->dcb->remote, dcb_get_port(session->dcb)); dcb_printf(dcb, "\t\tUsername: %s\n", session->dcb->user); dcb_printf(dcb, "\t\tClient DCB: %p\n", session->dcb); diff --git a/server/modules/routing/binlogrouter/blr.c b/server/modules/routing/binlogrouter/blr.c index ab6ea16b0..7501972db 100644 --- a/server/modules/routing/binlogrouter/blr.c +++ b/server/modules/routing/binlogrouter/blr.c @@ -1129,7 +1129,7 @@ closeSession(MXS_ROUTER *instance, MXS_ROUTER_SESSION *router_session) if (slave->state > 0) { - MXS_NOTICE("%s: Slave %s:%d, server id %d, disconnected after %ld seconds. " + MXS_NOTICE("%s: Slave [%s]:%d, server id %d, disconnected after %ld seconds. " "%d SQL commands, %d events sent (%lu bytes), binlog '%s', " "last position %lu", router->service->name, slave->dcb->remote, dcb_get_port(slave->dcb), @@ -1532,7 +1532,7 @@ diagnostics(MXS_ROUTER *router, DCB *dcb) dcb_printf(dcb, "\t\tSlave UUID: %s\n", session->uuid); } dcb_printf(dcb, - "\t\tSlave_host_port: %s:%d\n", + "\t\tSlave_host_port: [%s]:%d\n", session->dcb->remote, dcb_get_port(session->dcb)); dcb_printf(dcb, "\t\tUsername: %s\n", @@ -1772,7 +1772,7 @@ errorReply(MXS_ROUTER *instance, dcb_close(backend_dcb); MXS_ERROR("%s: Master connection error %lu '%s' in state '%s', " - "%s while connecting to master %s:%d", + "%s while connecting to master [%s]:%d", router->service->name, router->m_errno, router->m_errmsg, blrm_states[BLRM_TIMESTAMP], msg, router->service->dbref->server->name, @@ -1821,7 +1821,7 @@ errorReply(MXS_ROUTER *instance, spinlock_release(&router->lock); MXS_ERROR("%s: Master connection error %lu '%s' in state '%s', " - "%s attempting reconnect to master %s:%d", + "%s attempting reconnect to master [%s]:%d", router->service->name, mysql_errno, errmsg, blrm_states[router->master_state], msg, router->service->dbref->server->name, @@ -1830,7 +1830,7 @@ errorReply(MXS_ROUTER *instance, else { MXS_ERROR("%s: Master connection error %lu '%s' in state '%s', " - "%s attempting reconnect to master %s:%d", + "%s attempting reconnect to master [%s]:%d", router->service->name, router->m_errno, router->m_errmsg ? router->m_errmsg : "(memory failure)", blrm_states[router->master_state], msg, @@ -2468,7 +2468,7 @@ destroyInstance(MXS_ROUTER *instance) } } - MXS_INFO("%s is being stopped by MaxScale shudown. Disconnecting from master %s:%d, " + MXS_INFO("%s is being stopped by MaxScale shudown. Disconnecting from master [%s]:%d, " "read up to log %s, pos %lu, transaction safe pos %lu", inst->service->name, inst->service->dbref->server->name, diff --git a/server/modules/routing/binlogrouter/blr_master.c b/server/modules/routing/binlogrouter/blr_master.c index 65aa0e5cb..5d7fc04b0 100644 --- a/server/modules/routing/binlogrouter/blr_master.c +++ b/server/modules/routing/binlogrouter/blr_master.c @@ -203,7 +203,7 @@ blr_start_master(void* data) } router->master->remote = MXS_STRDUP_A(router->service->dbref->server->name); - MXS_NOTICE("%s: attempting to connect to master server %s:%d, binlog %s, pos %lu", + MXS_NOTICE("%s: attempting to connect to master server [%s]:%d, binlog %s, pos %lu", router->service->name, router->service->dbref->server->name, router->service->dbref->server->port, router->binlog_name, router->current_pos); @@ -706,7 +706,7 @@ blr_master_response(ROUTER_INSTANCE *router, GWBUF *buf) /* if semisync option is set, check for master semi-sync availability */ if (router->request_semi_sync) { - MXS_NOTICE("%s: checking Semi-Sync replication capability for master server %s:%d", + MXS_NOTICE("%s: checking Semi-Sync replication capability for master server [%s]:%d", router->service->name, router->service->dbref->server->name, router->service->dbref->server->port); @@ -739,7 +739,7 @@ blr_master_response(ROUTER_INSTANCE *router, GWBUF *buf) if (router->master_semi_sync == MASTER_SEMISYNC_NOT_AVAILABLE) { /* not installed */ - MXS_NOTICE("%s: master server %s:%d doesn't have semi_sync capability", + MXS_NOTICE("%s: master server [%s]:%d doesn't have semi_sync capability", router->service->name, router->service->dbref->server->name, router->service->dbref->server->port); @@ -753,7 +753,7 @@ blr_master_response(ROUTER_INSTANCE *router, GWBUF *buf) if (router->master_semi_sync == MASTER_SEMISYNC_DISABLED) { /* Installed but not enabled, right now */ - MXS_NOTICE("%s: master server %s:%d doesn't have semi_sync enabled right now, " + MXS_NOTICE("%s: master server [%s]:%d doesn't have semi_sync enabled right now, " "Requesting Semi-Sync Replication", router->service->name, router->service->dbref->server->name, @@ -762,7 +762,7 @@ blr_master_response(ROUTER_INSTANCE *router, GWBUF *buf) else { /* Installed and enabled */ - MXS_NOTICE("%s: master server %s:%d has semi_sync enabled, Requesting Semi-Sync Replication", + MXS_NOTICE("%s: master server [%s]:%d has semi_sync enabled, Requesting Semi-Sync Replication", router->service->name, router->service->dbref->server->name, router->service->dbref->server->port); @@ -803,7 +803,7 @@ blr_master_response(ROUTER_INSTANCE *router, GWBUF *buf) router->master->func.write(router->master, buf); MXS_NOTICE("%s: Request binlog records from %s at " - "position %lu from master server %s:%d", + "position %lu from master server [%s]:%d", router->service->name, router->binlog_name, router->current_pos, router->service->dbref->server->name, @@ -1512,7 +1512,7 @@ blr_handle_binlog_record(ROUTER_INSTANCE *router, GWBUF *pkt) MXS_DEBUG("%s: binlog record in file %s, pos %lu has " "SEMI_SYNC_ACK_REQ and needs a Semi-Sync ACK packet to " - "be sent to the master server %s:%d", + "be sent to the master server [%s]:%d", router->service->name, router->binlog_name, router->current_pos, router->service->dbref->server->name, @@ -2144,7 +2144,7 @@ blr_check_heartbeat(ROUTER_INSTANCE *router) { if ((t_now - router->stats.lastReply) > (router->heartbeat + BLR_NET_LATENCY_WAIT_TIME)) { - MXS_ERROR("No event received from master %s:%d in heartbeat period (%lu seconds), " + MXS_ERROR("No event received from master [%s]:%d in heartbeat period (%lu seconds), " "last event (%s %d) received %lu seconds ago. Assuming connection is dead " "and reconnecting.", router->service->dbref->server->name, @@ -2408,7 +2408,7 @@ bool blr_send_event(blr_thread_role_t role, } else { - MXS_ERROR("Failed to send an event of %u bytes to slave at %s:%d.", + MXS_ERROR("Failed to send an event of %u bytes to slave at [%s]:%d.", hdr->event_size, slave->dcb->remote, dcb_get_port(slave->dcb)); } diff --git a/server/modules/routing/binlogrouter/blr_slave.c b/server/modules/routing/binlogrouter/blr_slave.c index 696cf557c..b3d94ae31 100644 --- a/server/modules/routing/binlogrouter/blr_slave.c +++ b/server/modules/routing/binlogrouter/blr_slave.c @@ -2174,7 +2174,7 @@ blr_slave_binlog_dump(ROUTER_INSTANCE *router, ROUTER_SLAVE *slave, GWBUF *queue slave->state = BLRS_DUMPING; - MXS_NOTICE("%s: Slave %s:%d, server id %d requested binlog file %s from position %lu", + MXS_NOTICE("%s: Slave [%s]:%d, server id %d requested binlog file %s from position %lu", router->service->name, slave->dcb->remote, dcb_get_port(slave->dcb), slave->serverid, @@ -2670,7 +2670,7 @@ blr_slave_catchup(ROUTER_INSTANCE *router, ROUTER_SLAVE *slave, bool large) * but the new binlog file has not yet been created. Therefore * we ignore these issues during the rotate processing. */ - MXS_ERROR("%s: Slave %s:%d, server-id %d reached end of file for binlog file %s " + MXS_ERROR("%s: Slave [%s]:%d, server-id %d reached end of file for binlog file %s " "at %lu which is not the file currently being downloaded. " "Master binlog is %s, %lu. This may be caused by a " "previous failure of the master.", @@ -3468,7 +3468,7 @@ blr_stop_slave(ROUTER_INSTANCE* router, ROUTER_SLAVE* slave) spinlock_release(&router->lock); - MXS_NOTICE("%s: STOP SLAVE executed by %s@%s. Disconnecting from master %s:%d, " + MXS_NOTICE("%s: STOP SLAVE executed by %s@%s. Disconnecting from master [%s]:%d, " "read up to log %s, pos %lu, transaction safe pos %lu", router->service->name, slave->dcb->user, @@ -3626,7 +3626,7 @@ blr_start_slave(ROUTER_INSTANCE* router, ROUTER_SLAVE* slave) /** Start replication from master */ blr_start_master(router); - MXS_NOTICE("%s: START SLAVE executed by %s@%s. Trying connection to master %s:%d, " + MXS_NOTICE("%s: START SLAVE executed by %s@%s. Trying connection to master [%s]:%d, " "binlog %s, pos %lu, transaction safe pos %lu", router->service->name, slave->dcb->user, diff --git a/server/modules/routing/readwritesplit/readwritesplit.c b/server/modules/routing/readwritesplit/readwritesplit.c index 7b0e3da46..89a8a4d50 100644 --- a/server/modules/routing/readwritesplit/readwritesplit.c +++ b/server/modules/routing/readwritesplit/readwritesplit.c @@ -762,14 +762,14 @@ static void clientReply(MXS_ROUTER *instance, { bool succp; - MXS_INFO("Backend %s:%d processed reply and starts to execute active cursor.", + MXS_INFO("Backend [%s]:%d processed reply and starts to execute active cursor.", bref->ref->server->name, bref->ref->server->port); succp = execute_sescmd_in_backend(bref); if (!succp) { - MXS_INFO("Backend %s:%d failed to execute session command.", + MXS_INFO("Backend [%s]:%d failed to execute session command.", bref->ref->server->name, bref->ref->server->port); } } @@ -1087,7 +1087,7 @@ int router_handle_state_switch(DCB *dcb, DCB_REASON reason, void *data) goto return_rc; } - MXS_DEBUG("%lu [router_handle_state_switch] %s %s:%d in state %s", + MXS_DEBUG("%lu [router_handle_state_switch] %s [%s]:%d in state %s", pthread_self(), STRDCBREASON(reason), srv->name, srv->port, STRSRVSTATUS(srv)); CHK_SESSION(((MXS_SESSION *)dcb->session)); @@ -1318,7 +1318,7 @@ static void handleError(MXS_ROUTER *instance, } else if (!SERVER_IS_MASTER(srv) && !srv->master_err_is_logged) { - MXS_ERROR("Server %s:%d lost the master status. Readwritesplit " + MXS_ERROR("Server [%s]:%d lost the master status. Readwritesplit " "service can't locate the master. Client sessions " "will be closed.", srv->name, srv->port); srv->master_err_is_logged = true; @@ -1336,7 +1336,7 @@ static void handleError(MXS_ROUTER *instance, } else { - MXS_ERROR("Server %s:%d lost the master status but could not locate the " + MXS_ERROR("Server [%s]:%d lost the master status but could not locate the " "corresponding backend ref.", srv->name, srv->port); } } diff --git a/server/modules/routing/readwritesplit/rwsplit_mysql.c b/server/modules/routing/readwritesplit/rwsplit_mysql.c index 68ca4a3ef..e4e4e0d6e 100644 --- a/server/modules/routing/readwritesplit/rwsplit_mysql.c +++ b/server/modules/routing/readwritesplit/rwsplit_mysql.c @@ -369,7 +369,7 @@ void check_session_command_reply(GWBUF *writebuf, sescmd_cursor_t *scur, backend ss_dassert(len + 4 == GWBUF_LENGTH(scur->scmd_cur_cmd->my_sescmd_buf)); - MXS_ERROR("Failed to execute session command in %s:%d. Error was: %s %s", + MXS_ERROR("Failed to execute session command in [%s]:%d. Error was: %s %s", bref->ref->server->name, bref->ref->server->port, err, replystr); MXS_FREE(err); diff --git a/server/modules/routing/readwritesplit/rwsplit_route_stmt.c b/server/modules/routing/readwritesplit/rwsplit_route_stmt.c index 5f020065f..98277e048 100644 --- a/server/modules/routing/readwritesplit/rwsplit_route_stmt.c +++ b/server/modules/routing/readwritesplit/rwsplit_route_stmt.c @@ -209,7 +209,7 @@ bool route_session_write(ROUTER_CLIENT_SES *router_cli_ses, if (MXS_LOG_PRIORITY_IS_ENABLED(LOG_INFO) && BREF_IS_IN_USE((&backend_ref[i]))) { - MXS_INFO("Route query to %s \t%s:%d%s", + MXS_INFO("Route query to %s \t[%s]:%d%s", (SERVER_IS_MASTER(backend_ref[i].ref->server) ? "master" : "slave"), backend_ref[i].ref->server->name, @@ -316,7 +316,7 @@ bool route_session_write(ROUTER_CLIENT_SES *router_cli_ses, if (MXS_LOG_PRIORITY_IS_ENABLED(LOG_INFO)) { - MXS_INFO("Route query to %s \t%s:%d%s", + MXS_INFO("Route query to %s \t[%s]:%d%s", (SERVER_IS_MASTER(backend_ref[i].ref->server) ? "master" : "slave"), backend_ref[i].ref->server->name, @@ -339,7 +339,7 @@ bool route_session_write(ROUTER_CLIENT_SES *router_cli_ses, if (sescmd_cursor_is_active(scur) && &backend_ref[i] != router_cli_ses->rses_master_ref) { nsucc += 1; - MXS_INFO("Backend %s:%d already executing sescmd.", + MXS_INFO("Backend [%s]:%d already executing sescmd.", backend_ref[i].ref->server->name, backend_ref[i].ref->server->port); } @@ -351,7 +351,7 @@ bool route_session_write(ROUTER_CLIENT_SES *router_cli_ses, } else { - MXS_ERROR("Failed to execute session command in %s:%d", + MXS_ERROR("Failed to execute session command in [%s]:%d", backend_ref[i].ref->server->name, backend_ref[i].ref->server->port); } @@ -607,7 +607,7 @@ bool rwsplit_get_dcb(DCB **p_dcb, ROUTER_CLIENT_SES *rses, backend_type_t btype, } else { - MXS_INFO("Server %s:%d is too much behind the master, %d s. and can't be chosen.", + MXS_INFO("Server [%s]:%d is too much behind the master, %d s. and can't be chosen.", b->server->name, b->server->port, b->server->rlag); } } @@ -1230,7 +1230,7 @@ handle_got_target(ROUTER_INSTANCE *inst, ROUTER_CLIENT_SES *rses, ss_dassert(target_dcb != NULL); - MXS_INFO("Route query to %s \t%s:%d <", + MXS_INFO("Route query to %s \t[%s]:%d <", (SERVER_IS_MASTER(bref->ref->server) ? "master" : "slave"), bref->ref->server->name, bref->ref->server->port); /** diff --git a/server/modules/routing/readwritesplit/rwsplit_select_backends.c b/server/modules/routing/readwritesplit/rwsplit_select_backends.c index 2309f9cb8..b0e812ea8 100644 --- a/server/modules/routing/readwritesplit/rwsplit_select_backends.c +++ b/server/modules/routing/readwritesplit/rwsplit_select_backends.c @@ -277,7 +277,7 @@ bool select_connect_backend_servers(backend_ref_t **p_master_ref, { if (BREF_IS_IN_USE((&backend_ref[i]))) { - MXS_INFO("Selected %s in \t%s:%d", + MXS_INFO("Selected %s in \t[%s]:%d", STRSRVSTATUS(backend_ref[i].ref->server), backend_ref[i].ref->server->name, backend_ref[i].ref->server->port); @@ -443,7 +443,7 @@ static bool connect_server(backend_ref_t *bref, MXS_SESSION *session, bool execu } else { - MXS_ERROR("Failed to execute session command in %s (%s:%d). See earlier " + MXS_ERROR("Failed to execute session command in %s ([%s]:%d). See earlier " "errors for more details.", bref->ref->server->unique_name, bref->ref->server->name, @@ -456,7 +456,7 @@ static bool connect_server(backend_ref_t *bref, MXS_SESSION *session, bool execu } else { - MXS_ERROR("Unable to establish connection with server %s:%d", + MXS_ERROR("Unable to establish connection with server [%s]:%d", serv->name, serv->port); } @@ -489,26 +489,26 @@ static void log_server_connections(select_criteria_t select_criteria, switch (select_criteria) { case LEAST_GLOBAL_CONNECTIONS: - MXS_INFO("MaxScale connections : %d in \t%s:%d %s", + MXS_INFO("MaxScale connections : %d in \t[%s]:%d %s", b->server->stats.n_current, b->server->name, b->server->port, STRSRVSTATUS(b->server)); break; case LEAST_ROUTER_CONNECTIONS: - MXS_INFO("RWSplit connections : %d in \t%s:%d %s", + MXS_INFO("RWSplit connections : %d in \t[%s]:%d %s", b->connections, b->server->name, b->server->port, STRSRVSTATUS(b->server)); break; case LEAST_CURRENT_OPERATIONS: - MXS_INFO("current operations : %d in \t%s:%d %s", + MXS_INFO("current operations : %d in \t[%s]:%d %s", b->server->stats.n_current_ops, b->server->name, b->server->port, STRSRVSTATUS(b->server)); break; case LEAST_BEHIND_MASTER: - MXS_INFO("replication lag : %d in \t%s:%d %s", + MXS_INFO("replication lag : %d in \t[%s]:%d %s", b->server->rlag, b->server->name, b->server->port, STRSRVSTATUS(b->server)); default: diff --git a/server/modules/routing/readwritesplit/rwsplit_session_cmd.c b/server/modules/routing/readwritesplit/rwsplit_session_cmd.c index 72b474073..c22687fd5 100644 --- a/server/modules/routing/readwritesplit/rwsplit_session_cmd.c +++ b/server/modules/routing/readwritesplit/rwsplit_session_cmd.c @@ -216,7 +216,7 @@ GWBUF *sescmd_cursor_process_replies(GWBUF *replybuf, RW_CLOSE_BREF(&ses->rses_backend_ref[i]); } *reconnect = true; - MXS_INFO("Disabling slave %s:%d, result differs from " + MXS_INFO("Disabling slave [%s]:%d, result differs from " "master's result. Master: %d Slave: %d", ses->rses_backend_ref[i].ref->server->name, ses->rses_backend_ref[i].ref->server->port, diff --git a/server/modules/routing/schemarouter/schemarouter.c b/server/modules/routing/schemarouter/schemarouter.c index e4beff9d3..9baf801d4 100644 --- a/server/modules/routing/schemarouter/schemarouter.c +++ b/server/modules/routing/schemarouter/schemarouter.c @@ -2065,7 +2065,7 @@ static int routeQuery(MXS_ROUTER* instance, bref = get_bref_from_dcb(router_cli_ses, target_dcb); scur = &bref->bref_sescmd_cur; - MXS_INFO("Route query to \t%s:%d <", + MXS_INFO("Route query to \t[%s]:%d <", bref->bref_backend->server->name, bref->bref_backend->server->port); /** @@ -2413,7 +2413,7 @@ static void clientReply(MXS_ROUTER* instance, ss_dassert(len + 4 == GWBUF_LENGTH(scur->scmd_cur_cmd->my_sescmd_buf)); - MXS_ERROR("Failed to execute %s in %s:%d. %s %s", + MXS_ERROR("Failed to execute %s in [%s]:%d. %s %s", cmdstr, bref->bref_backend->server->name, bref->bref_backend->server->port, @@ -2481,7 +2481,7 @@ static void clientReply(MXS_ROUTER* instance, if (sescmd_cursor_is_active(scur)) { - MXS_INFO("Backend %s:%d processed reply and starts to execute " + MXS_INFO("Backend [%s]:%d processed reply and starts to execute " "active cursor.", bref->bref_backend->server->name, bref->bref_backend->server->port); @@ -2714,7 +2714,7 @@ static bool connect_backend_servers(backend_ref_t* backend_ref, { SERVER_REF* b = backend_ref[i].bref_backend; - MXS_INFO("MaxScale connections : %d (%d) in \t%s:%d %s", + MXS_INFO("MaxScale connections : %d (%d) in \t[%s]:%d %s", b->connections, b->server->stats.n_current, b->server->name, @@ -2782,7 +2782,7 @@ static bool connect_backend_servers(backend_ref_t* backend_ref, { succp = false; MXS_ERROR("Unable to establish " - "connection with slave %s:%d", + "connection with slave [%s]:%d", b->server->name, b->server->port); /* handle connect error */ @@ -2822,7 +2822,7 @@ static bool connect_backend_servers(backend_ref_t* backend_ref, if (BREF_IS_IN_USE((&backend_ref[i]))) { - MXS_INFO("Connected %s in \t%s:%d", + MXS_INFO("Connected %s in \t[%s]:%d", STRSRVSTATUS(b->server), b->server->name, b->server->port); @@ -3381,7 +3381,7 @@ static bool route_session_write(ROUTER_CLIENT_SES* router_cli_ses, if (MXS_LOG_PRIORITY_IS_ENABLED(LOG_INFO)) { - MXS_INFO("Route query to %s\t%s:%d%s", + MXS_INFO("Route query to %s\t[%s]:%d%s", (SERVER_IS_MASTER(backend_ref[i].bref_backend->server) ? "master" : "slave"), backend_ref[i].bref_backend->server->name, @@ -3488,7 +3488,7 @@ static bool route_session_write(ROUTER_CLIENT_SES* router_cli_ses, if (MXS_LOG_PRIORITY_IS_ENABLED(LOG_INFO)) { - MXS_INFO("Route query to %s\t%s:%d%s", + MXS_INFO("Route query to %s\t[%s]:%d%s", (SERVER_IS_MASTER(backend_ref[i].bref_backend->server) ? "master" : "slave"), backend_ref[i].bref_backend->server->name, @@ -3513,7 +3513,7 @@ static bool route_session_write(ROUTER_CLIENT_SES* router_cli_ses, { succp = true; - MXS_INFO("Backend %s:%d already executing sescmd.", + MXS_INFO("Backend [%s]:%d already executing sescmd.", backend_ref[i].bref_backend->server->name, backend_ref[i].bref_backend->server->port); } @@ -3524,7 +3524,7 @@ static bool route_session_write(ROUTER_CLIENT_SES* router_cli_ses, if (!succp) { MXS_ERROR("Failed to execute session " - "command in %s:%d", + "command in [%s]:%d", backend_ref[i].bref_backend->server->name, backend_ref[i].bref_backend->server->port); } From 70c834026c083af81dcfc2bcac79390035891a4c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Thu, 30 Mar 2017 09:56:33 +0300 Subject: [PATCH 13/32] Update launchable script documentation Updated examples with new address formatting. --- Documentation/Monitors/Monitor-Common.md | 35 ++++++++++++------------ 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/Documentation/Monitors/Monitor-Common.md b/Documentation/Monitors/Monitor-Common.md index fd77d5dd1..7d95dc283 100644 --- a/Documentation/Monitors/Monitor-Common.md +++ b/Documentation/Monitors/Monitor-Common.md @@ -57,7 +57,7 @@ The following substitutions will be made to the parameter value: For example, the previous example will be executed as: ``` -/home/user/myscript.sh initiator=192.168.0.10:3306 event=master_down live_nodes=192.168.0.201:3306,192.168.0.121:3306 +/home/user/myscript.sh initiator=[192.168.0.10]:3306 event=master_down live_nodes=[192.168.0.201]:3306,[192.168.0.121]:3306 ``` ### `events` @@ -72,20 +72,19 @@ events=master_down,slave_down Here is a table of all possible event types and their descriptions that the monitors can be called with. -Event Name|Description -----------|---------- -master_down|A Master server has gone down -master_up|A Master server has come up -slave_down|A Slave server has gone down -slave_up|A Slave server has come up -server_down|A server with no assigned role has gone down -server_up|A server with no assigned role has come up -ndb_down|A MySQL Cluster node has gone down -ndb_up|A MySQL Cluster node has come up -lost_master|A server lost Master status -lost_slave|A server lost Slave status -lost_ndb|A MySQL Cluster node lost node membership -new_master|A new Master was detected -new_slave|A new Slave was detected -new_ndb|A new MySQL Cluster node was found - +Event Name |Description +------------|---------- +master_down |A Master server has gone down +master_up |A Master server has come up +slave_down |A Slave server has gone down +slave_up |A Slave server has come up +server_down |A server with no assigned role has gone down +server_up |A server with no assigned role has come up +ndb_down |A MySQL Cluster node has gone down +ndb_up |A MySQL Cluster node has come up +lost_master |A server lost Master status +lost_slave |A server lost Slave status +lost_ndb |A MySQL Cluster node lost node membership +new_master |A new Master was detected +new_slave |A new Slave was detected +new_ndb |A new MySQL Cluster node was found From 4abdfa2ff7c91baa378fa66e705cdcd9cf8d7356 Mon Sep 17 00:00:00 2001 From: Johan Wikman Date: Fri, 31 Mar 2017 13:33:58 +0300 Subject: [PATCH 14/32] Update version number in branch 2.1 The next release will be 2.1.3, so better to have it in place now. --- VERSION.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION.cmake b/VERSION.cmake index 0ea1a1f68..c79617932 100644 --- a/VERSION.cmake +++ b/VERSION.cmake @@ -5,7 +5,7 @@ set(MAXSCALE_VERSION_MAJOR "2" CACHE STRING "Major version") set(MAXSCALE_VERSION_MINOR "1" CACHE STRING "Minor version") -set(MAXSCALE_VERSION_PATCH "2" CACHE STRING "Patch version") +set(MAXSCALE_VERSION_PATCH "3" CACHE STRING "Patch version") # This should only be incremented if a package is rebuilt set(MAXSCALE_BUILD_NUMBER 1 CACHE STRING "Release number") From 84ea6e6c8d8363454442d9e33fd9de9e215d82ec Mon Sep 17 00:00:00 2001 From: Johan Wikman Date: Fri, 31 Mar 2017 13:37:25 +0300 Subject: [PATCH 15/32] Update ChangeLog --- Documentation/Changelog.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Documentation/Changelog.md b/Documentation/Changelog.md index eb6d1760f..941c717c1 100644 --- a/Documentation/Changelog.md +++ b/Documentation/Changelog.md @@ -18,8 +18,10 @@ * Prepared statements are now in the database firewall filtered exactly like non-prepared statements. * The firewall filter can now filter based on function usage. +* MaxScale now supports IPv6 For more details, please refer to: +* [MariaDB MaxScale 2.1.2 Release Notes](Release-Notes/MaxScale-2.1.2-Release-Notes.md) * [MariaDB MaxScale 2.1.1 Release Notes](Release-Notes/MaxScale-2.1.1-Release-Notes.md) * [MariaDB MaxScale 2.1.0 Release Notes](Release-Notes/MaxScale-2.1.0-Release-Notes.md) From e6d2c96f5ba2464d3293b11eac3491fbe385ef65 Mon Sep 17 00:00:00 2001 From: MassimilianoPinto Date: Fri, 31 Mar 2017 10:11:54 +0200 Subject: [PATCH 16/32] MXS-1211: maxrows should be configurable to return error when limit has been exceeded New parameter added to maxsrows filter: max_resultset_return=empty|error|ok Default, 'empty' is to return an empty set, as the current implementation. 'err' will return an ERR reply with the input SQL statement 'ok' will return an OK packet --- include/maxscale/protocol/mysql.h | 1 + server/modules/filter/maxrows/maxrows.c | 341 +++++++++++++++++++++--- server/modules/filter/maxrows/maxrows.h | 2 + 3 files changed, 302 insertions(+), 42 deletions(-) diff --git a/include/maxscale/protocol/mysql.h b/include/maxscale/protocol/mysql.h index e0d43b322..0d00ef0c8 100644 --- a/include/maxscale/protocol/mysql.h +++ b/include/maxscale/protocol/mysql.h @@ -74,6 +74,7 @@ MXS_BEGIN_DECLS #define MYSQL_CHECKSUM_LEN 4 #define MYSQL_EOF_PACKET_LEN 9 #define MYSQL_OK_PACKET_MIN_LEN 11 +#define MYSQL_ERR_PACKET_MIN_LEN 9 /** * Offsets and sizes of various parts of the client packet. If the offset is diff --git a/server/modules/filter/maxrows/maxrows.c b/server/modules/filter/maxrows/maxrows.c index 0edac3543..77b691ffc 100644 --- a/server/modules/filter/maxrows/maxrows.c +++ b/server/modules/filter/maxrows/maxrows.c @@ -46,17 +46,47 @@ #include #include "maxrows.h" -static MXS_FILTER *createInstance(const char *name, char **options, MXS_CONFIG_PARAMETER *); -static MXS_FILTER_SESSION *newSession(MXS_FILTER *instance, MXS_SESSION *session); -static void closeSession(MXS_FILTER *instance, MXS_FILTER_SESSION *sdata); -static void freeSession(MXS_FILTER *instance, MXS_FILTER_SESSION *sdata); -static void setDownstream(MXS_FILTER *instance, MXS_FILTER_SESSION *sdata, MXS_DOWNSTREAM *downstream); -static void setUpstream(MXS_FILTER *instance, MXS_FILTER_SESSION *sdata, MXS_UPSTREAM *upstream); -static int routeQuery(MXS_FILTER *instance, MXS_FILTER_SESSION *sdata, GWBUF *queue); -static int clientReply(MXS_FILTER *instance, MXS_FILTER_SESSION *sdata, GWBUF *queue); -static void diagnostics(MXS_FILTER *instance, MXS_FILTER_SESSION *sdata, DCB *dcb); +static MXS_FILTER *createInstance(const char *name, + char **options, + MXS_CONFIG_PARAMETER *); +static MXS_FILTER_SESSION *newSession(MXS_FILTER *instance, + MXS_SESSION *session); +static void closeSession(MXS_FILTER *instance, + MXS_FILTER_SESSION *sdata); +static void freeSession(MXS_FILTER *instance, + MXS_FILTER_SESSION *sdata); +static void setDownstream(MXS_FILTER *instance, + MXS_FILTER_SESSION *sdata, + MXS_DOWNSTREAM *downstream); +static void setUpstream(MXS_FILTER *instance, + MXS_FILTER_SESSION *sdata, + MXS_UPSTREAM *upstream); +static int routeQuery(MXS_FILTER *instance, + MXS_FILTER_SESSION *sdata, + GWBUF *queue); +static int clientReply(MXS_FILTER *instance, + MXS_FILTER_SESSION *sdata, + GWBUF *queue); +static void diagnostics(MXS_FILTER *instance, + MXS_FILTER_SESSION *sdata, + DCB *dcb); static uint64_t getCapabilities(MXS_FILTER *instance); +enum maxrows_return_mode +{ + MAXROWS_RETURN_EMPTY = 0, + MAXROWS_RETURN_ERR, + MAXROWS_RETURN_OK +}; + +static const MXS_ENUM_VALUE return_option_values[] = +{ + {"empty", MAXROWS_RETURN_EMPTY}, + {"error", MAXROWS_RETURN_ERR}, + {"ok", MAXROWS_RETURN_OK}, + {NULL} +}; + /* Global symbols of the Module */ /** @@ -109,6 +139,13 @@ MXS_MODULE* MXS_CREATE_MODULE() MXS_MODULE_PARAM_COUNT, MAXROWS_DEFAULT_DEBUG }, + { + "max_resultset_return", + MXS_MODULE_PARAM_ENUM, + "empty", + MXS_MODULE_OPT_ENUM_UNIQUE, + return_option_values + }, {MXS_END_MODULE_PARAMS} } }; @@ -120,9 +157,10 @@ MXS_MODULE* MXS_CREATE_MODULE() typedef struct maxrows_config { - uint32_t max_resultset_rows; - uint32_t max_resultset_size; - uint32_t debug; + uint32_t max_resultset_rows; + uint32_t max_resultset_size; + uint32_t debug; + enum maxrows_return_mode m_return; } MAXROWS_CONFIG; typedef struct maxrows_instance @@ -154,7 +192,7 @@ static void maxrows_response_state_reset(MAXROWS_RESPONSE_STATE *state); typedef struct maxrows_session_data { - MAXROWS_INSTANCE *instance; /**< The maxrows instance the session is associated with. */ + MAXROWS_INSTANCE *instance; /**< The maxrows instance the session is associated with. */ MXS_DOWNSTREAM down; /**< The previous filter or equivalent. */ MXS_UPSTREAM up; /**< The next filter or equivalent. */ MAXROWS_RESPONSE_STATE res; /**< The response state. */ @@ -162,9 +200,11 @@ typedef struct maxrows_session_data maxrows_session_state_t state; bool large_packet; /**< Large packet (> 16MB)) indicator */ bool discard_resultset; /**< Discard resultset indicator */ + GWBUF *input_sql; /**< Input query */ } MAXROWS_SESSION_DATA; -static MAXROWS_SESSION_DATA *maxrows_session_data_create(MAXROWS_INSTANCE *instance, MXS_SESSION *session); +static MAXROWS_SESSION_DATA *maxrows_session_data_create(MAXROWS_INSTANCE *instance, + MXS_SESSION *session); static void maxrows_session_data_free(MAXROWS_SESSION_DATA *data); static int handle_expecting_fields(MAXROWS_SESSION_DATA *csdata); @@ -172,10 +212,14 @@ static int handle_expecting_nothing(MAXROWS_SESSION_DATA *csdata); static int handle_expecting_response(MAXROWS_SESSION_DATA *csdata); static int handle_rows(MAXROWS_SESSION_DATA *csdata); static int handle_ignoring_response(MAXROWS_SESSION_DATA *csdata); -static bool process_params(char **options, MXS_CONFIG_PARAMETER *params, MAXROWS_CONFIG* config); +static bool process_params(char **options, + MXS_CONFIG_PARAMETER *params, + MAXROWS_CONFIG* config); static int send_upstream(MAXROWS_SESSION_DATA *csdata); -static int send_eof_upstream(MAXROWS_SESSION_DATA *csdata, size_t offset); +static int send_eof_upstream(MAXROWS_SESSION_DATA *csdata); +static int send_error_upstream(MAXROWS_SESSION_DATA *csdata); +static int send_maxrows_reply_limit(MAXROWS_SESSION_DATA *csdata); /* API BEGIN */ @@ -189,15 +233,22 @@ static int send_eof_upstream(MAXROWS_SESSION_DATA *csdata, size_t offset); * * @return The instance data for this new instance */ -static MXS_FILTER *createInstance(const char *name, char **options, MXS_CONFIG_PARAMETER *params) +static MXS_FILTER *createInstance(const char *name, + char **options, + MXS_CONFIG_PARAMETER *params) { MAXROWS_INSTANCE *cinstance = MXS_CALLOC(1, sizeof(MAXROWS_INSTANCE)); if (cinstance) { cinstance->name = name; - cinstance->config.max_resultset_rows = config_get_integer(params, "max_resultset_rows"); - cinstance->config.max_resultset_size = config_get_integer(params, "max_resultset_size"); + cinstance->config.max_resultset_rows = config_get_integer(params, + "max_resultset_rows"); + cinstance->config.max_resultset_size = config_get_integer(params, + "max_resultset_size"); + cinstance->config.m_return = config_get_enum(params, + "max_resultset_return", + return_option_values); cinstance->config.debug = config_get_integer(params, "debug"); } @@ -283,7 +334,9 @@ static void setUpstream(MXS_FILTER *instance, MXS_FILTER_SESSION *sdata, MXS_UPS * @param sdata The filter session data * @param buffer Buffer containing an MySQL protocol packet. */ -static int routeQuery(MXS_FILTER *instance, MXS_FILTER_SESSION *sdata, GWBUF *packet) +static int routeQuery(MXS_FILTER *instance, + MXS_FILTER_SESSION *sdata, + GWBUF *packet) { MAXROWS_INSTANCE *cinstance = (MAXROWS_INSTANCE*)instance; MAXROWS_SESSION_DATA *csdata = (MAXROWS_SESSION_DATA*)sdata; @@ -293,7 +346,8 @@ static int routeQuery(MXS_FILTER *instance, MXS_FILTER_SESSION *sdata, GWBUF *pa // All of these should be guaranteed by RCAP_TYPE_TRANSACTION_TRACKING ss_dassert(GWBUF_IS_CONTIGUOUS(packet)); ss_dassert(GWBUF_LENGTH(packet) >= MYSQL_HEADER_LEN + 1); - ss_dassert(MYSQL_GET_PAYLOAD_LEN(data) + MYSQL_HEADER_LEN == GWBUF_LENGTH(packet)); + ss_dassert(MYSQL_GET_PAYLOAD_LEN(data) + + MYSQL_HEADER_LEN == GWBUF_LENGTH(packet)); maxrows_response_state_reset(&csdata->res); csdata->state = MAXROWS_IGNORING_RESPONSE; @@ -305,6 +359,23 @@ static int routeQuery(MXS_FILTER *instance, MXS_FILTER_SESSION *sdata, GWBUF *pa case MYSQL_COM_QUERY: case MYSQL_COM_STMT_EXECUTE: { + /* Set input query only with MAXROWS_RETURN_ERR */ + if (csdata->instance->config.m_return == MAXROWS_RETURN_ERR && + (csdata->input_sql = gwbuf_clone(packet)) == NULL) + { + csdata->state = MAXROWS_EXPECTING_NOTHING; + + /* Abort client connection on copy failure */ + poll_fake_hangup_event(csdata->session->client_dcb); + gwbuf_free(csdata->res.data); + gwbuf_free(packet); + MXS_FREE(csdata); + csdata->res.data = NULL; + packet = NULL; + csdata = NULL; + return 0; + } + csdata->state = MAXROWS_EXPECTING_RESPONSE; break; } @@ -318,7 +389,9 @@ static int routeQuery(MXS_FILTER *instance, MXS_FILTER_SESSION *sdata, GWBUF *pa MXS_NOTICE("Maxrows filter is sending data."); } - return csdata->down.routeQuery(csdata->down.instance, csdata->down.session, packet); + return csdata->down.routeQuery(csdata->down.instance, + csdata->down.session, + packet); } /** @@ -328,7 +401,9 @@ static int routeQuery(MXS_FILTER *instance, MXS_FILTER_SESSION *sdata, GWBUF *pa * @param sdata The filter session data * @param queue The query data */ -static int clientReply(MXS_FILTER *instance, MXS_FILTER_SESSION *sdata, GWBUF *data) +static int clientReply(MXS_FILTER *instance, + MXS_FILTER_SESSION *sdata, + GWBUF *data) { MAXROWS_INSTANCE *cinstance = (MAXROWS_INSTANCE*)instance; MAXROWS_SESSION_DATA *csdata = (MAXROWS_SESSION_DATA*)sdata; @@ -386,7 +461,8 @@ static int clientReply(MXS_FILTER *instance, MXS_FILTER_SESSION *sdata, GWBUF *d break; default: - MXS_ERROR("Internal filter logic broken, unexpected state: %d", csdata->state); + MXS_ERROR("Internal filter logic broken, unexpected state: %d", + csdata->state); ss_dassert(!true); rv = send_upstream(csdata); maxrows_response_state_reset(&csdata->res); @@ -462,6 +538,7 @@ static MAXROWS_SESSION_DATA *maxrows_session_data_create(MAXROWS_INSTANCE *insta MYSQL_session *mysql_session = (MYSQL_session*)session->client_dcb->data; data->instance = instance; data->session = session; + data->input_sql = NULL; data->state = MAXROWS_EXPECTING_NOTHING; } @@ -500,7 +577,10 @@ static int handle_expecting_fields(MAXROWS_SESSION_DATA *csdata) while (!insufficient && (buflen - csdata->res.offset >= MYSQL_HEADER_LEN)) { uint8_t header[MYSQL_HEADER_LEN + 1]; - gwbuf_copy_data(csdata->res.data, csdata->res.offset, MYSQL_HEADER_LEN + 1, header); + gwbuf_copy_data(csdata->res.data, + csdata->res.offset, + MYSQL_HEADER_LEN + 1, + header); size_t packetlen = MYSQL_HEADER_LEN + MYSQL_GET_PAYLOAD_LEN(header); @@ -584,7 +664,10 @@ static int handle_expecting_response(MAXROWS_SESSION_DATA *csdata) uint8_t header[MYSQL_HEADER_LEN + 1 + 8]; // Read packet header from buffer at current offset - gwbuf_copy_data(csdata->res.data, csdata->res.offset, MYSQL_HEADER_LEN + 1, header); + gwbuf_copy_data(csdata->res.data, + csdata->res.offset, + MYSQL_HEADER_LEN + 1, + header); switch ((int)MYSQL_GET_COMMAND(header)) { @@ -610,7 +693,7 @@ static int handle_expecting_response(MAXROWS_SESSION_DATA *csdata) if (csdata->discard_resultset) { - rv = send_eof_upstream(csdata, csdata->res.rows_offset); + rv = send_maxrows_reply_limit(csdata); csdata->state = MAXROWS_EXPECTING_NOTHING; } else @@ -652,7 +735,9 @@ static int handle_expecting_response(MAXROWS_SESSION_DATA *csdata) // Now we can figure out how many fields there are, but first we // need to copy some more data. gwbuf_copy_data(csdata->res.data, - MYSQL_HEADER_LEN + 1, n_bytes - 1, &header[MYSQL_HEADER_LEN + 1]); + MYSQL_HEADER_LEN + 1, + n_bytes - 1, + &header[MYSQL_HEADER_LEN + 1]); csdata->res.n_totalfields = mxs_leint_value(&header[4]); csdata->res.offset += MYSQL_HEADER_LEN + n_bytes; @@ -691,7 +776,10 @@ static int handle_rows(MAXROWS_SESSION_DATA *csdata) bool pending_large_data = csdata->large_packet; // header array holds a full EOF packet uint8_t header[MYSQL_EOF_PACKET_LEN]; - gwbuf_copy_data(csdata->res.data, csdata->res.offset, MYSQL_EOF_PACKET_LEN, header); + gwbuf_copy_data(csdata->res.data, + csdata->res.offset, + MYSQL_EOF_PACKET_LEN, + header); size_t packetlen = MYSQL_HEADER_LEN + MYSQL_GET_PAYLOAD_LEN(header); @@ -702,7 +790,9 @@ static int handle_rows(MAXROWS_SESSION_DATA *csdata) * max is 1 byte less than EOF_PACKET_LEN * If true skip data processing. */ - if (pending_large_data && (packetlen >= MYSQL_HEADER_LEN && packetlen < MYSQL_EOF_PACKET_LEN)) + if (pending_large_data && + (packetlen >= MYSQL_HEADER_LEN && + packetlen < MYSQL_EOF_PACKET_LEN)) { // Update offset, number of rows and break csdata->res.offset += packetlen; @@ -757,7 +847,7 @@ static int handle_rows(MAXROWS_SESSION_DATA *csdata) // Send data in buffer or empty resultset if (csdata->discard_resultset) { - rv = send_eof_upstream(csdata, csdata->res.rows_offset); + rv = send_maxrows_reply_limit(csdata); } else { @@ -789,8 +879,10 @@ static int handle_rows(MAXROWS_SESSION_DATA *csdata) */ if (packetlen < MYSQL_EOF_PACKET_LEN) { - MXS_ERROR("EOF packet has size of %lu instead of %d", packetlen, MYSQL_EOF_PACKET_LEN); - rv = send_eof_upstream(csdata, csdata->res.rows_offset); + MXS_ERROR("EOF packet has size of %lu instead of %d", + packetlen, + MYSQL_EOF_PACKET_LEN); + rv = send_maxrows_reply_limit(csdata); csdata->state = MAXROWS_EXPECTING_NOTHING; break; } @@ -811,7 +903,7 @@ static int handle_rows(MAXROWS_SESSION_DATA *csdata) // Discard data or send data if (csdata->discard_resultset) { - rv = send_eof_upstream(csdata, csdata->res.rows_offset); + rv = send_maxrows_reply_limit(csdata); } else { @@ -857,7 +949,8 @@ static int handle_rows(MAXROWS_SESSION_DATA *csdata) { if (csdata->instance->config.debug & MAXROWS_DEBUG_DISCARDING) { - MXS_INFO("max_resultset_rows %lu reached, not returning the resultset.", csdata->res.n_rows); + MXS_INFO("max_resultset_rows %lu reached, not returning the resultset.", + csdata->res.n_rows); } // Set the discard indicator @@ -901,7 +994,16 @@ static int send_upstream(MAXROWS_SESSION_DATA *csdata) { ss_dassert(csdata->res.data != NULL); - int rv = csdata->up.clientReply(csdata->up.instance, csdata->up.session, csdata->res.data); + /* Free a saved SQL not freed by send_error_upstream() */ + if (csdata->input_sql) + { + gwbuf_free(csdata->input_sql); + csdata->input_sql = NULL; + } + + int rv = csdata->up.clientReply(csdata->up.instance, + csdata->up.session, + csdata->res.data); csdata->res.data = NULL; return rv; @@ -914,18 +1016,21 @@ static int send_upstream(MAXROWS_SESSION_DATA *csdata) * at the end. * * @param csdata Session data - * @param offset The offset to server reply pointing to - * next byte after column definitions EOF - * of the first result set. * - * @return Whatever the upstream returns. + * @return Non-Zero if successful, 0 on errors */ -static int send_eof_upstream(MAXROWS_SESSION_DATA *csdata, size_t offset) +static int send_eof_upstream(MAXROWS_SESSION_DATA *csdata) { int rv = -1; /* Sequence byte is #3 */ uint8_t eof[MYSQL_EOF_PACKET_LEN] = {05, 00, 00, 01, 0xfe, 00, 00, 02, 00}; GWBUF *new_pkt = NULL; + /** + * The offset to server reply pointing to + * next byte after column definitions EOF + * of the first result set. + */ + size_t offset = csdata->res.rows_offset; ss_dassert(csdata->res.data != NULL); @@ -954,7 +1059,9 @@ static int send_eof_upstream(MAXROWS_SESSION_DATA *csdata, size_t offset) if (new_pkt) { /* new_pkt will be freed by write routine */ - rv = csdata->up.clientReply(csdata->up.instance, csdata->up.session, new_pkt); + rv = csdata->up.clientReply(csdata->up.instance, + csdata->up.session, + new_pkt); } } @@ -972,3 +1079,153 @@ static int send_eof_upstream(MAXROWS_SESSION_DATA *csdata, size_t offset) return rv; } + +/** + * Send OK packet data upstream. + * + * @param csdata Session data + * + * @return Non-Zero if successful, 0 on errors + */ +static int send_ok_upstream(MAXROWS_SESSION_DATA *csdata) +{ + /* Note: sequence id is always 01 (4th byte) */ + const static uint8_t ok[MYSQL_OK_PACKET_MIN_LEN] = { 07, 00, 00, 01, 00, 00, + 00, 02, 00, 00, 00 }; + + ss_dassert(csdata->res.data != NULL); + + GWBUF *packet = gwbuf_alloc(MYSQL_OK_PACKET_MIN_LEN); + if(!packet) + { + /* Abort clienrt connection */ + poll_fake_hangup_event(csdata->session->client_dcb); + gwbuf_free(csdata->res.data); + csdata->res.data = NULL; + return 0; + } + + uint8_t *ptr = GWBUF_DATA(packet); + memcpy(ptr, &ok, MYSQL_OK_PACKET_MIN_LEN); + + ss_dassert(csdata->res.data != NULL); + + int rv = csdata->up.clientReply(csdata->up.instance, + csdata->up.session, + packet); + gwbuf_free(csdata->res.data); + csdata->res.data = NULL; + + return rv; +} + +/** + * Send ERR packet data upstream. + * + * An error packet is sent to client including + * a message prefix plus the original SQL input + * + * @param csdata Session data + * @return Non-Zero if successful, 0 on errors + */ +static int send_error_upstream(MAXROWS_SESSION_DATA *csdata) +{ + GWBUF *err_pkt; + uint8_t hdr_err[MYSQL_ERR_PACKET_MIN_LEN]; + unsigned long bytes_copied; + char *err_msg_prefix = "Row limit/size exceeded for query: "; + int err_prefix_len = strlen(err_msg_prefix); + unsigned long pkt_len = MYSQL_ERR_PACKET_MIN_LEN + err_prefix_len; + unsigned long sql_len = gwbuf_length(csdata->input_sql) - + (MYSQL_HEADER_LEN + 1); + /** + * The input SQL statement added in the error message + * has a limit of MAXROWS_INPUT_SQL_MAX_LEN bytes + */ + sql_len = (sql_len > MAXROWS_INPUT_SQL_MAX_LEN) ? + MAXROWS_INPUT_SQL_MAX_LEN : sql_len; + uint8_t sql[sql_len]; + + ss_dassert(csdata->res.data != NULL); + + pkt_len += sql_len; + + bytes_copied = gwbuf_copy_data(csdata->input_sql, + MYSQL_HEADER_LEN + 1, + sql_len, + sql); + + if (!bytes_copied || + (err_pkt = gwbuf_alloc(MYSQL_HEADER_LEN + pkt_len)) == NULL) + { + /* Abort client connection */ + poll_fake_hangup_event(csdata->session->client_dcb); + gwbuf_free(csdata->res.data); + gwbuf_free(csdata->input_sql); + csdata->res.data = NULL; + csdata->input_sql = NULL; + + return 0; + } + + uint8_t *ptr = GWBUF_DATA(err_pkt); + memcpy(ptr, &hdr_err, MYSQL_ERR_PACKET_MIN_LEN); + unsigned int err_errno = 1415; + char err_state[7] = "#0A000"; + + /* Set the payload length of the whole error message */ + gw_mysql_set_byte3(&ptr[0], pkt_len); + /* Note: sequence id is always 01 (4th byte) */ + ptr[3] = 1; + /* Error indicator */ + ptr[4] = 0xff; + /* MySQL error code: 2 bytes */ + gw_mysql_set_byte2(&ptr[5], err_errno); + /* Status Message 6 bytes */ + memcpy((char *)&ptr[7], err_state, 6); + /* Copy error message prefix */ + memcpy(&ptr[13], err_msg_prefix, err_prefix_len); + /* Copy SQL input */ + memcpy(&ptr[13 + err_prefix_len], sql, sql_len); + + int rv = csdata->up.clientReply(csdata->up.instance, + csdata->up.session, + err_pkt); + + /* Free server result buffer */ + gwbuf_free(csdata->res.data); + /* Free input_sql buffer */ + gwbuf_free(csdata->input_sql); + + csdata->res.data = NULL; + csdata->input_sql = NULL; + + return rv; +} + +/** + * Send the proper reply to client when the maxrows + * limit/size is hit. + * + * @param csdata Session data + * @return Non-Zero if successful, 0 on errors + */ +static int send_maxrows_reply_limit(MAXROWS_SESSION_DATA *csdata) +{ + switch(csdata->instance->config.m_return) + { + case MAXROWS_RETURN_EMPTY: + return send_eof_upstream(csdata); + break; + case MAXROWS_RETURN_OK: + return send_ok_upstream(csdata); + break; + case MAXROWS_RETURN_ERR: + return send_error_upstream(csdata); + break; + default: + MXS_ERROR("MaxRows config value not expected!"); + ss_dassert(!true); + break; + } +} diff --git a/server/modules/filter/maxrows/maxrows.h b/server/modules/filter/maxrows/maxrows.h index c2db4b51c..72a524297 100644 --- a/server/modules/filter/maxrows/maxrows.h +++ b/server/modules/filter/maxrows/maxrows.h @@ -36,5 +36,7 @@ MXS_BEGIN_DECLS #define MAXROWS_DEFAULT_MAX_RESULTSET_SIZE "65536" // Integer value #define MAXROWS_DEFAULT_DEBUG "0" +// Max size of copied input SQL +#define MAXROWS_INPUT_SQL_MAX_LEN 1024 MXS_END_DECLS From d4da5caf3b1f4d0f9bbf329819eeb72961bd04e2 Mon Sep 17 00:00:00 2001 From: MassimilianoPinto Date: Mon, 3 Apr 2017 08:39:12 +0200 Subject: [PATCH 17/32] MXS-1211: MaxRows documentation update MXS-1211: MaxRows documentation update --- Documentation/Filters/Maxrows.md | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/Documentation/Filters/Maxrows.md b/Documentation/Filters/Maxrows.md index 5941affc9..f85a929ce 100644 --- a/Documentation/Filters/Maxrows.md +++ b/Documentation/Filters/Maxrows.md @@ -55,6 +55,28 @@ max_resultset_size=128 ``` The default value is 64. +#### `max_resultset_return` + +Specifies what the filter sends to the client when the +rows or size limit is hit, possible values: + +- an empty result set +- an error packet with input SQL +- an OK packet + + +``` +max_resultset_size=empty|error|ok +``` +The default result type is 'empty' + +Example output with ERR packet: + +``` +MariaDB [(test)]> select * from test.t4; +ERROR 1415 (0A000): Row limit/size exceeded for query: select * from test.t4 +``` + #### `debug` An integer value, using which the level of debug logging made by the maxrows From f3c83770903151a0a3b53593c3e05fa0af94cd5f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Fri, 31 Mar 2017 13:26:57 +0300 Subject: [PATCH 18/32] Route statements in larger batches The RCAP_TYPE_STMT_OUTPUT is not used in its previous form. It can be altered to route only complete packets back to the client. This allows routers to do safer parsing on the results. --- server/modules/protocol/MySQL/MySQLBackend/mysql_backend.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/server/modules/protocol/MySQL/MySQLBackend/mysql_backend.c b/server/modules/protocol/MySQL/MySQLBackend/mysql_backend.c index 6276cae0f..5da4d7797 100644 --- a/server/modules/protocol/MySQL/MySQLBackend/mysql_backend.c +++ b/server/modules/protocol/MySQL/MySQLBackend/mysql_backend.c @@ -778,11 +778,6 @@ gw_read_and_write(DCB *dcb) return 0; } } - else if (rcap_type_required(capabilities, RCAP_TYPE_STMT_OUTPUT) && - !rcap_type_required(capabilities, RCAP_TYPE_RESULTSET_OUTPUT)) - { - stmt = modutil_get_next_MySQL_packet(&read_buffer); - } else { stmt = read_buffer; From e650597eb5766f02d3ecfc4bd01b3989b4e741ff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Mon, 3 Apr 2017 09:58:21 +0300 Subject: [PATCH 19/32] MXS-1200: Document and increase line length limitations The line length limitation is now increased to 16384 bytes. It is now clearly documented in the limitations document. The configuration parser now uses memory from the heap instead of the stack. This should remove any problems caused by the larger line length. --- Documentation/About/Limitations.md | 6 ++++++ server/inih/CMakeLists.txt | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/Documentation/About/Limitations.md b/Documentation/About/Limitations.md index afeaf9aec..35aa2a1d0 100644 --- a/Documentation/About/Limitations.md +++ b/Documentation/About/Limitations.md @@ -4,6 +4,12 @@ This document lists known issues and limitations in MariaDB MaxScale and its plugins. Since limitations are related to specific plugins, this document is divided into several sections. +## Configuration limitations + +In versions 2.1.2 and earlier, the configuration files are limited to 1024 +characters per line. This limitation was increased to 16384 characters in +MaxScale 2.1.3. + ## Protocol limitations ### Limitations with MySQL Protocol support (MySQLClient) diff --git a/server/inih/CMakeLists.txt b/server/inih/CMakeLists.txt index 11c37320a..e8f0a6f37 100644 --- a/server/inih/CMakeLists.txt +++ b/server/inih/CMakeLists.txt @@ -1,2 +1,2 @@ -add_definitions(-DINI_MAX_LINE=1024 -DINI_ALLOW_MULTILINE) +add_definitions(-DINI_MAX_LINE=16384 -DINI_USE_STACK=0 -DINI_ALLOW_MULTILINE) add_library(inih ini.c) From 0f2f9d48b7dc5c92887ae4ecd56fb16fa64f6df1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Thu, 30 Mar 2017 20:42:17 +0300 Subject: [PATCH 20/32] Improve maxadmin help messages The help messages are now more descriptive and have usage information in them. This should help users use the commands without relying on the online documentation. --- server/modules/routing/debugcli/debugcmd.c | 623 ++++++++++++++------- 1 file changed, 418 insertions(+), 205 deletions(-) diff --git a/server/modules/routing/debugcli/debugcmd.c b/server/modules/routing/debugcli/debugcmd.c index f705679cb..abd8355a4 100644 --- a/server/modules/routing/debugcli/debugcmd.c +++ b/server/modules/routing/debugcli/debugcmd.c @@ -131,154 +131,187 @@ struct subcommand showoptions[] = "buffers", 0, dprintAllBuffers, "Show all buffers with backtrace", "Show all buffers with backtrace", - {0, 0, 0} + {0} }, #endif { "dcbs", 0, 0, dprintAllDCBs, "Show all DCBs", - "Show all descriptor control blocks (network connections)", + "Usage: show dcbs", {0} }, { "dbusers", 1, 1, service_print_users, "[deprecated] Show user statistics", - "Show statistics and user names for a service's user table.\n" - "\t\tExample : show dbusers ", - {ARG_TYPE_SERVICE, 0, 0} + "See `show authenticators`", + {ARG_TYPE_SERVICE} }, { "authenticators", 1, 1, service_print_users, - "Show authenticator diagnostics", - "Show authenticator diagnostics for a service.\n" - "\t\tExample : show authenticators ", - {ARG_TYPE_SERVICE, 0, 0} + "Show authenticator diagnostics for a service", + "Usage: show authenticators SERVICE\n" + "\n" + "Parameters:\n" + "SERVICE Service to inspect\n" + "\n" + "Example : show authenticators my-service", + {ARG_TYPE_SERVICE} }, { "epoll", 0, 0, dprintPollStats, - "Show the poll statistics", - "Show the epoll polling system statistics", - {0, 0, 0} + "Show the polling system statistics", + "Usage: show epoll", + {0} }, { "eventstats", 0, 0, dShowEventStats, "Show event queue statistics", - "Show event queue statistics", - {0, 0, 0} + "Usage: show eventstats", + {0} }, { "feedbackreport", 0, 0, moduleShowFeedbackReport, - "Show feedback report", "Show the report of MaxScale loaded modules, suitable for Notification Service", - {0, 0, 0} + "Usage: show feedbackreport", + {0} }, { "filter", 1, 1, dprintFilter, "Show filter details", - "Show details of a filter, the parameter is filter name", - {ARG_TYPE_FILTER, 0, 0} + "Usage: show filter FILTER\n" + "\n" + "Parameters:\n" + "FILTER Filter to show\n" + "\n" + "Example: show filter my-filter", + {ARG_TYPE_FILTER} }, { "filters", 0, 0, dprintAllFilters, "Show all filters", - "Show all filters that were read from the configuration file", - {0, 0, 0} + "Usage: show filters", + {0} }, { "log_throttling", 0, 0, show_log_throttling, - "Show log throttling setting", "Show the current log throttling setting (count, window (ms), suppression (ms))", - {0, 0, 0} + "Usage: show log_throttling", + {0} }, { "modules", 0, 0, dprintAllModules, - "Show loaded modules", "Show all currently loaded modules", - {0, 0, 0} + "Usage: show modules", + {0} }, { "monitor", 1, 1, monitorShow, "Show monitor details", - "Show details about a specific monitor, the parameter is monitor name", - {ARG_TYPE_MONITOR, 0, 0} + "Usage: show monitor MONITOR\n" + "\n" + "Parameters:\n" + "MONITOR Monitor to show\n" + "\n" + "Example: show monitor \"Cluster Monitor\"", + {ARG_TYPE_MONITOR} }, { "monitors", 0, 0, monitorShowAll, "Show all monitors", - "Show all the monitors", - {0, 0, 0} + "Usage: show monitors", + {0} }, { "persistent", 1, 1, dprintPersistentDCBs, - "Show persistent connection pool", - "Show persistent pool for a server, e.g. show persistent dbnode1", - {ARG_TYPE_SERVER, 0, 0} + "Show the persistent connection pool of a server", + "Usage: show persistent SERVER\n" + "\n" + "Parameters:\n" + "SERVER Server to show\n" + "\n" + "Example: show persistent db-server-1", + {ARG_TYPE_SERVER} }, { "server", 1, 1, dprintServer, "Show server details", - "Show details for a server, e.g. show server dbnode1", - {ARG_TYPE_SERVER, 0, 0} + "Usage: show server SERVER\n" + "\n" + "Parameters:\n" + "SERVER Server to show\n" + "\n" + "Example: show server db-server-1", + {ARG_TYPE_SERVER} }, { "servers", 0, 0, dprintAllServers, "Show all servers", - "Show all configured servers", - {0, 0, 0} + "Usage: show servers", + {0} }, { "serversjson", 0, 0, dprintAllServersJson, "Show all servers in JSON", - "Show all configured servers in JSON format", - {0, 0, 0} + "Usage: show serversjson", + {0} }, { "services", 0, 0, dprintAllServices, - "Show all service", "Show all configured services in MaxScale", - {0, 0, 0} + "Usage: show services", + {0} }, { "service", 1, 1, dprintService, - "Show service details", - "Show a single service in MaxScale, the parameter is the service name", - {ARG_TYPE_SERVICE, 0, 0} + "Show a single service in MaxScale", + "Usage: show service SERVICE\n" + "\n" + "Parameters:\n" + "SERVICE Service to show\n" + "\n" + "Example: show service my-service", + {ARG_TYPE_SERVICE} }, { "session", 1, 1, dprintSession, "Show session details", - "Show a single session in MaxScale, e.g. show session 5", - {ARG_TYPE_SESSION, 0, 0} + "Usage: show session SESSION\n" + "\n" + "Parameters:\n" + "SESSION Session ID of the session to show\n" + "\n" + "Example: show session 5", + {ARG_TYPE_SESSION} }, { "sessions", 0, 0, dprintAllSessions, - "Show all sessions", "Show all active sessions in MaxScale", - {0, 0, 0} + "Usage: show sessions", + {0} }, { "tasks", 0, 0, hkshow_tasks, - "Show housekeeper tasks", "Show all active housekeeper tasks in MaxScale", - {0, 0, 0} + "Usage: show tasks", + {0} }, { "threads", 0, 0, dShowThreads, - "Show workter thread status", "Show the status of the worker threads in MaxScale", - {0, 0, 0} + "Usage: show threads", + {0} }, { "users", 0, 0, telnetdShowUsers, "Show enabled Linux accounts", - "Show all maxadmin enabled Linux accounts and created maxadmin users", - {0, 0, 0} + "Usage: show users", + {0} }, { "version", 0, 0, showVersion, - "Show MaxScale version", "Show the MaxScale version number", - {0, 0, 0} + "Usage: show version", + {0} }, { EMPTY_OPTION} }; @@ -342,71 +375,74 @@ struct subcommand listoptions[] = { { "clients", 0, 0, dListClients, - "List all clients", "List all the client connections to MaxScale", - {0, 0, 0} + "Usage: list clients", + {0} }, { "dcbs", 0, 0, dListDCBs, - "List all DCBs", - "List all the DCBs active within MaxScale", - {0, 0, 0} + "List all active connections within MaxScale", + "Usage: list dcbs", + {0} }, { "filters", 0, 0, dListFilters, "List all filters", - "List all the filters defined within MaxScale", - {0, 0, 0} + "Usage: list filters", + {0} }, { "listeners", 0, 0, dListListeners, "List all listeners", - "List all the listeners defined within MaxScale", - {0, 0, 0} + "Usage: list listeners", + {0} }, { "modules", 0, 0, dprintAllModules, "List all currently loaded modules", - "List all currently loaded modules", - {0, 0, 0} + "Usage: list modules", + {0} }, { "monitors", 0, 0, monitorList, "List all monitors", - "List all monitors", - {0, 0, 0} + "Usage: list monitors", + {0} }, { "services", 0, 0, dListServices, - "List all the services", - "List all the services defined within MaxScale", - {0, 0, 0} + "List all services", + "Usage: list services", + {0} }, { "servers", 0, 0, dListServers, "List all servers", - "List all the servers defined within MaxScale", - {0, 0, 0} + "Usage: list servers", + {0} }, { "sessions", 0, 0, dListSessions, - "List all sessions", "List all the active sessions within MaxScale", - {0, 0, 0} + "Usage: list sessions", + {0} }, { "threads", 0, 0, dShowThreads, - "List polling threads", "List the status of the polling threads in MaxScale", - {0, 0, 0} + "Usage: list threads", + {0} }, { "commands", 0, 2, dListCommands, "List registered commands", - "Usage list commands [MODULE] [COMMAND]\n\n" + "Usage: list commands [MODULE] [COMMAND]\n" + "\n" "Parameters:\n" "MODULE Regular expressions for filtering module names\n" - "COMMAND Regular expressions for filtering module command names\n", + "COMMAND Regular expressions for filtering module command names\n" + "\n" + "Example: list commands my-module my-command", {ARG_TYPE_STRING, ARG_TYPE_STRING} }, { EMPTY_OPTION} @@ -442,32 +478,48 @@ struct subcommand shutdownoptions[] = "maxscale", 0, 0, shutdown_server, - "Shutdown MaxScale", "Initiate a controlled shutdown of MaxScale", - {0, 0, 0} + "Usage: shutdown maxscale", + {0} }, { "monitor", 1, 1, shutdown_monitor, - "Shutdown a monitor", - "E.g. shutdown monitor db-cluster-monitor", - {ARG_TYPE_MONITOR, 0, 0} + "Stop a monitor", + "Usage: shutdown monitor MONITOR\n" + "\n" + "Parameters:\n" + "MONITOR Monitor to stop\n" + "\n" + "Example: shutdown monitor db-cluster-monitor", + {ARG_TYPE_MONITOR} }, { "service", 1, 1, shutdown_service, "Stop a service", - "E.g. shutdown service \"Sales Database\"", - {ARG_TYPE_SERVICE, 0, 0} + "Usage: shutdown service SERVICE\n" + "\n" + "Parameters:\n" + "SERVICE Service to stop\n" + "\n" + "Example: shutdown service \"Sales Database\"", + {ARG_TYPE_SERVICE} }, { "listener", 2, 2, shutdown_listener, "Stop a listener", - "E.g. shutdown listener \"RW Service\" \"RW Listener\"", + "Usage: shutdown listener SERVICE LISTENER\n" + "\n" + "Parameters:\n" + "SERVICE Service where LISTENER points to\n" + "LISTENER The listener to stop\n" + "\n" + "Example: shutdown listener \"RW Service\" \"RW Listener\"", {ARG_TYPE_SERVICE, ARG_TYPE_STRING} }, { @@ -495,8 +547,8 @@ struct subcommand syncoptions[] = 0, 0, sync_logs, "Flush log files to disk", - "Flush log files to disk", - {0, 0, 0} + "Usage: flush logs", + {0} }, { EMPTY_OPTION @@ -527,19 +579,34 @@ struct subcommand restartoptions[] = { "monitor", 1, 1, restart_monitor, "Restart a monitor", - "E.g. restart monitor db-cluster-monitor", - {ARG_TYPE_MONITOR, 0, 0} + "Usage: restart monitor NAME\n" + "\n" + "Parameters:\n" + "NAME Monitor to restart\n" + "\n" + "Example: restart monitor db-cluster-monitor", + {ARG_TYPE_MONITOR} }, { "service", 1, 1, restart_service, "Restart a service", - "E.g. restart service \"Sales Database\"", - {ARG_TYPE_SERVICE, 0, 0} + "Usage: restart service NAME\n" + "\n" + "Parameters:\n" + "NAME Service to restart\n" + "\n" + "Example: restart service \"Sales Database\"", + {ARG_TYPE_SERVICE} }, { "listener", 2, 2, restart_listener, "Restart a listener", - "E.g. restart listener \"RW Service\" \"RW Listener\"", + "Usage: restart listener NAME\n" + "\n" + "Parameters:\n" + "NAME Listener to restart\n" + "\n" + "Example: restart listener \"RW Service\" \"RW Listener\"", {ARG_TYPE_SERVICE, ARG_TYPE_STRING} }, { EMPTY_OPTION } @@ -557,25 +624,52 @@ struct subcommand setoptions[] = { "server", 2, 2, set_server, "Set the status of a server", - "Set the status of a server. E.g. set server dbnode4 master", - {ARG_TYPE_SERVER, ARG_TYPE_STRING, 0} + "Usage: set server NAME STATUS\n" + "\n" + "Parameters:\n" + "NAME Server name\n" + "STATUS The status to set\n" + "\n" + "Example: set server dbnode4 master", + {ARG_TYPE_SERVER, ARG_TYPE_STRING} }, { "pollsleep", 1, 1, set_pollsleep, "Set poll sleep period", - "Set the maximum poll sleep period in milliseconds", - {ARG_TYPE_NUMERIC, 0, 0} + "Usage: set pollsleep VALUE\n" + "\n" + "Parameters:\n" + "VALUE Poll sleep in milliseconds\n" + "\n" + "Sets the maximum poll sleep period in milliseconds\n" + "\n" + "Example: set pollsleep 100", + {ARG_TYPE_NUMERIC} }, { "nbpolls", 1, 1, set_nbpoll, "Set non-blocking polls", - "Set the number of non-blocking polls", - {ARG_TYPE_NUMERIC, 0, 0} + "Usage: set nbpolls VALUE\n" + "\n" + "Parameters:\n" + "VALUE Number of non-blocking polls\n" + "\n" + "Sets the number of non-blocking polls\n" + "\n" + "Example: set nbpolls 5", + {ARG_TYPE_NUMERIC} }, { "log_throttling", 3, 3, set_log_throttling, - "Set log throttling", "Set the log throttling configuration", + "Usage: set log_throttling COUNT WINDOW SUPPRESS\n" + "\n" + "Parameters:\n" + "COUNT Number of messages to log before throttling\n" + "WINDOW The time window in milliseconds where COUNT messages can be logged\n" + "SUPPRESS The log suppression in milliseconds once COUNT messages have been logged\n" + "\n" + "Example: set log_throttling 5 1000 25000", {ARG_TYPE_NUMERIC, ARG_TYPE_NUMERIC, ARG_TYPE_NUMERIC} }, { EMPTY_OPTION } @@ -590,8 +684,14 @@ struct subcommand clearoptions[] = { "server", 2, 2, clear_server, "Clear server status", - "Clear the status of a server. E.g. clear server dbnode2 master", - {ARG_TYPE_SERVER, ARG_TYPE_STRING, 0} + "Usage: clear server NAME STATUS\n" + "\n" + "Parameters:\n" + "NAME Server name\n" + "STATUS The status to clear\n" + "\n" + "Example: clear server dbnode2 master", + {ARG_TYPE_SERVER, ARG_TYPE_STRING} }, { EMPTY_OPTION } }; @@ -607,14 +707,19 @@ struct subcommand reloadoptions[] = { "config", 0, 0, reload_config, "Reload the configuration", - "Reload the configuration data for MaxScale", - {0, 0, 0} + "Usage: reload config", + {0} }, { "dbusers", 1, 1, reload_dbusers, - "Reload users table", - "Reload the users for a service. E.g. reload dbusers \"splitter service\"", - {ARG_TYPE_SERVICE, 0, 0} + "Reload the database users for a service", + "Usage: reload dbusers SERVICE\n" + "\n" + "Parameters:\n" + "SERVICE Reload database users for this service\n" + "\n" + "Example: reload dbusers \"splitter service\"", + {ARG_TYPE_SERVICE} }, { EMPTY_OPTION } }; @@ -644,60 +749,71 @@ struct subcommand enableoptions[] = 1, 1, enable_log_priority, "Enable a logging priority", - "Enable a logging priority for MaxScale, parameters must be one of " - "'err', 'warning', 'notice', 'info' or 'debug'. " - "E.g.: 'enable log-priority info'.", - {ARG_TYPE_STRING, 0, 0} + "Usage: enable log-priority PRIORITY\n" + "\n" + "Parameters:" + "PRIORITY One of 'err', 'warning', 'notice','info' or 'debug'\n" + "\n" + "Example: enable log-priority info", + {ARG_TYPE_STRING} }, { "sessionlog-priority", 2, 2, enable_sess_log_priority, "[Deprecated] Enable a logging priority for a session", - "Usage: enable sessionlog-priority [err | warning | notice | info | debug] " - "\t E.g. enable sessionlog-priority info 123.", - {ARG_TYPE_STRING, ARG_TYPE_STRING, 0} + "This command is deprecated", + {ARG_TYPE_STRING, ARG_TYPE_STRING} }, { "root", 1, 1, enable_service_root, - "Enable root user access", - "Enable root access to a service, pass a service name to enable root access", - {ARG_TYPE_SERVICE, 0, 0} + "Enable root user access to a service", + "Usage: enable root SERVICE\n" + "\n" + "Parameters:\n" + "SERVICE Service where root user is enabled\n" + "\n" + "Example: enable root my-service", + {ARG_TYPE_SERVICE} }, { "feedback", 0, 0, enable_feedback_action, - "Enable MaxScale feedback", - "Enable MaxScale modules list sending via http to notification service", - {0, 0, 0} + "Enable MaxScale feedback to notification service", + "Usage: enable feedback", + {0} }, { "syslog", 0, 0, enable_syslog, - "Enable syslog", "Enable syslog logging", - {0, 0, 0} + "Usage: enable syslog", + {0} }, { "maxlog", 0, 0, enable_maxlog, "Enable MaxScale logging", - "Enable MaxScale logging", - {0, 0, 0} + "Usage: enable maxlog", + {0} }, { "account", 1, 1, enable_account, - "Activate a Linux user", - "Enable maxadmin usage for a Linux user. E.g.:\n" - " MaxScale> enable account alice", - {ARG_TYPE_STRING, 0, 0} + "Activate a Linux user account for MaxAdmin use", + "Usage: enable account USER\n" + "\n" + "Parameters:\n" + "USER The user account to enable\n" + "\n" + "Example: enable account alice", + {ARG_TYPE_STRING} }, { EMPTY_OPTION @@ -716,59 +832,71 @@ struct subcommand disableoptions[] = 1, 1, disable_log_priority, "Disable a logging priority", - "Options 'err' | 'warning' | 'notice' | 'info' | 'debug'. " - "E.g.: 'disable log-priority info'", - {ARG_TYPE_STRING, 0, 0} + "Usage: disable log-priority PRIORITY\n" + "\n" + "Parameters:" + "PRIORITY One of 'err', 'warning', 'notice','info' or 'debug'\n" + "\n" + "Example: disable log-priority info", + {ARG_TYPE_STRING} }, { "sessionlog-priority", 2, 2, disable_sess_log_priority, "[Deprecated] Disable a logging priority for a particular session", - "Usage: disable sessionlog-priority [err | warning | notice | info | debug] " - "\t E.g. enable sessionlog-priority info 123", - {ARG_TYPE_STRING, ARG_TYPE_STRING, 0} + "This command is deprecated", + {ARG_TYPE_STRING, ARG_TYPE_STRING} }, { "root", 1, 1, disable_service_root, "Disable root access", - "Disable root access to a service", - {ARG_TYPE_SERVICE, 0, 0} + "Usage: disable root SERVICE\n" + "\n" + "Parameters:\n" + "SERVICE Service where root user is disabled\n" + "\n" + "Example: disable root my-service", + {ARG_TYPE_SERVICE} }, { "feedback", 0, 0, disable_feedback_action, - "Disable feedback", - "Disable MaxScale modules list sending via http to notification service", - {0, 0, 0} + "Disable MaxScale feedback to notification service", + "Usage: disable feedback", + {0} }, { "syslog", 0, 0, disable_syslog, - "Disable syslog", "Disable syslog logging", - {0, 0, 0} + "Usage: disable syslog", + {0} }, { "maxlog", 0, 0, disable_maxlog, "Disable MaxScale logging", - "Disable MaxScale logging", - {0, 0, 0} + "Usage: disable maxlog", + {0} }, { "account", 1, 1, disable_account, "Disable Linux user", - "Disable maxadmin usage for Linux user. E.g.:\n" - " MaxScale> disable account alice", - {ARG_TYPE_STRING, 0, 0} + "Usage: disable account USER\n" + "\n" + "Parameters:\n" + "USER The user account to disable\n" + "\n" + "Example: disable account alice", + {ARG_TYPE_STRING} }, { EMPTY_OPTION @@ -805,18 +933,28 @@ struct subcommand addoptions[] = { { "user", 2, 2, telnetdAddUser, - "Add account for maxadmin", - "Add insecure account for using maxadmin over the network. E.g.:\n" - " MaxScale> add user bob somepass", - {ARG_TYPE_STRING, ARG_TYPE_STRING, 0} + "Add insecure account for using maxadmin over the network", + "Usage: add user USER PASSWORD\n" + "\n" + "Parameters:\n" + "USER User to add\n" + "PASSWORD Password for the user\n" + "\n" + "Example: add user bob somepass", + {ARG_TYPE_STRING, ARG_TYPE_STRING} }, { "server", 2, 12, cmd_AddServer, "Add a new server to a service", - "Usage: add server SERVER TARGET...\n\n" - "The TARGET must be a list of service and monitor names\n" - "e.g. add server my-db my-service 'Cluster Monitor'\n" - "A server can be assigned to a maximum of 11 objects in one command", + "Usage: add server SERVER TARGET...\n" + "\n" + "Parameters:\n" + "SERVER The server that is added to TARGET\n" + "TARGET List of service and/or monitor names separated by spaces\n" + "\n" + "A server can be assigned to a maximum of 11 objects in one command\n" + "\n" + "Example: add server my-db my-service \"Cluster Monitor\"", { ARG_TYPE_SERVER, ARG_TYPE_STRING, ARG_TYPE_STRING, ARG_TYPE_STRING, ARG_TYPE_STRING, ARG_TYPE_STRING, ARG_TYPE_STRING, ARG_TYPE_STRING, @@ -858,18 +996,28 @@ struct subcommand removeoptions[] = "user", 2, 2, telnetdRemoveUser, - "Remove account from maxadmin", - "Remove account for using maxadmin over the network. E.g.:\n" - " MaxAdmin> remove user bob somepass", + "Remove account for using maxadmin over the network", + "Usage: remove user USER PASSWORD\n" + "\n" + "Parameters:\n" + "USER User to remove\n" + "PASSWORD Password of the user\n" + "\n" + "Example: remove user bob somepass", {ARG_TYPE_STRING, ARG_TYPE_STRING} }, { "server", 2, 12, cmd_RemoveServer, "Remove a server from a service or a monitor", - "Usage: remove server SERVER TARGET...\n\n" - "The TARGET must be a list of service and monitor names\n" - "e.g. remove server my-db my-service 'Cluster Monitor'\n" - "A server can be removed from a maximum of 11 objects in one command", + "Usage: remove server SERVER TARGET...\n" + "\n" + "Parameters:\n" + "SERVER The server that is removed from TARGET\n" + "TARGET List of service and/or monitor names separated by spaces\n" + "\n" + "A server can be removed from a maximum of 11 objects in one command\n" + "\n" + "Example: remove server my-db my-service \"Cluster Monitor\"", { ARG_TYPE_SERVER, ARG_TYPE_STRING, ARG_TYPE_STRING, ARG_TYPE_STRING, ARG_TYPE_STRING, ARG_TYPE_STRING, ARG_TYPE_STRING, ARG_TYPE_STRING, @@ -956,17 +1104,17 @@ struct subcommand flushoptions[] = "log", 1, 1, flushlog, - "Flush log files", - "Flush the content of a log file, close that log, rename it and open a new log file", - {ARG_TYPE_STRING, 0, 0} + "Flush the content of a log file and reopen it", + "Usage: flush log", + {ARG_TYPE_STRING} }, { "logs", 0, 0, flushlogs, - "Flush log files", - "Flush the content of all log files, close those logs, rename them and open a new log files", - {0, 0, 0} + "Flush the content of a log file and reopen it", + "Usage: flush logs", + {0} }, { EMPTY_OPTION @@ -1049,15 +1197,19 @@ struct subcommand createoptions[] = { "server", 2, 6, createServer, "Create a new server", - "Usage: create server NAME HOST [PORT] [PROTOCOL] [AUTHENTICATOR] [OPTIONS]\n\n" - "Create a new server from the following parameters.\n\n" + "Usage: create server NAME HOST [PORT] [PROTOCOL] [AUTHENTICATOR] [OPTIONS]\n" + "\n" + "Parameters:\n" "NAME Server name\n" "HOST Server host address\n" "PORT Server port (default 3306)\n" "PROTOCOL Server protocol (default MySQLBackend)\n" "AUTHENTICATOR Authenticator module name (default MySQLAuth)\n" - "OPTIONS Options for the authenticator module\n\n" - "The first two parameters are required, the others are optional.\n", + "OPTIONS Comma separated list of options for the authenticator\n" + "\n" + "The first two parameters are required, the others are optional.\n" + "\n" + "Example: create server my-db-1 192.168.0.102 3306", { ARG_TYPE_STRING, ARG_TYPE_STRING, ARG_TYPE_STRING, ARG_TYPE_STRING, ARG_TYPE_STRING, ARG_TYPE_STRING @@ -1067,8 +1219,9 @@ struct subcommand createoptions[] = "listener", 2, 12, createListener, "Create a new listener for a service", "Usage: create listener SERVICE NAME [HOST] [PORT] [PROTOCOL] [AUTHENTICATOR] [OPTIONS]\n" - " [SSL_KEY] [SSL_CERT] [SSL_CA] [SSL_VERSION] [SSL_VERIFY_DEPTH]\n\n" - "Create a new server from the following parameters.\n\n" + " [SSL_KEY] [SSL_CERT] [SSL_CA] [SSL_VERSION] [SSL_VERIFY_DEPTH]\n" + "\n" + "Parameters\n" "SERVICE Service where this listener is added\n" "NAME Listener name\n" "HOST Listener host address (default [::])\n" @@ -1080,10 +1233,13 @@ struct subcommand createoptions[] = "SSL_CERT Path to SSL certificate\n" "SSL_CA Path to CA certificate\n" "SSL_VERSION SSL version (default MAX)\n" - "SSL_VERIFY_DEPTH Certificate verification depth\n\n" + "SSL_VERIFY_DEPTH Certificate verification depth\n" + "\n" "The first two parameters are required, the others are optional.\n" "Any of the optional parameters can also have the value 'default'\n" - "which will be replaced with the default value.\n", + "which will be replaced with the default value.\n" + "\n" + "Example: create listener my-service my-new-listener 192.168.0.101 4006", { ARG_TYPE_SERVICE, ARG_TYPE_STRING, ARG_TYPE_STRING, ARG_TYPE_STRING, ARG_TYPE_STRING, ARG_TYPE_STRING, ARG_TYPE_STRING, ARG_TYPE_STRING, @@ -1093,9 +1249,13 @@ struct subcommand createoptions[] = { "monitor", 2, 2, createMonitor, "Create a new monitor", - "Usage: create monitor NAME MODULE\n\n" + "Usage: create monitor NAME MODULE\n" + "\n" + "Parameters:\n" "NAME Monitor name\n" - "MODULE Monitor module\n", + "MODULE Monitor module\n" + "\n" + "Example: create monitor my-monitor mysqlmon", { ARG_TYPE_STRING, ARG_TYPE_STRING } @@ -1156,19 +1316,38 @@ struct subcommand destroyoptions[] = { "server", 1, 1, destroyServer, "Destroy a server", - "Usage: destroy server NAME", + "Usage: destroy server NAME\n" + "\n" + "Parameters:\n" + "NAME Server to destroy\n" + "\n" + "Example: destroy server my-db-1", {ARG_TYPE_SERVER} }, { "listener", 2, 2, destroyListener, "Destroy a listener", - "Usage: destroy listener SERVICE NAME", + "Usage: destroy listener SERVICE NAME\n" + "\n" + "Parameters:\n" + "NAME Listener to destroy\n" + "\n" + "The listener is stopped and it will be removed on the next restart of MaxScale\n" + "\n" + "Example: destroy listener my-listener", {ARG_TYPE_SERVICE, ARG_TYPE_STRING} }, { "monitor", 1, 1, destroyMonitor, "Destroy a monitor", - "Usage: destroy monitor NAME", + "Usage: destroy monitor NAME\n" + "\n" + "Parameters:\n" + "NAME Monitor to destroy\n" + "\n" + "The monitor is stopped and it will be removed on the next restart of MaxScale\n" + "\n" + "Example: destroy monitor my-monitor", {ARG_TYPE_MONITOR} }, { @@ -1266,8 +1445,8 @@ static void alterServer(DCB *dcb, SERVER *server, char *v1, char *v2, char *v3, } static void alterMonitor(DCB *dcb, MXS_MONITOR *monitor, char *v1, char *v2, char *v3, - char *v4, char *v5, char *v6, char *v7, char *v8, char *v9, - char *v10, char *v11) + char *v4, char *v5, char *v6, char *v7, char *v8, char *v9, + char *v10, char *v11) { char *values[11] = {v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11}; const int items = sizeof(values) / sizeof(values[0]); @@ -1306,8 +1485,14 @@ struct subcommand alteroptions[] = { "server", 2, 12, alterServer, "Alter server parameters", - "Usage: alter server NAME KEY=VALUE ...\n\n" - "This will alter an existing parameter of a server. The accepted values for KEY are:\n\n" + "Usage: alter server NAME KEY=VALUE ...\n" + "\n" + "Parameters:\n" + "NAME Server name\n" + "KEY=VALUE List of `key=value` pairs separated by spaces\n" + "\n" + "This will alter an existing parameter of a server. The accepted values for KEY are:\n" + "\n" "address Server address\n" "port Server port\n" "monuser Monitor user for this server\n" @@ -1317,9 +1502,12 @@ struct subcommand alteroptions[] = "ssl_cert Path to SSL certificate\n" "ssl_ca_cert Path to SSL CA certificate\n" "ssl_version SSL version\n" - "ssl_cert_verify_depth Certificate verification depth\n\n" + "ssl_cert_verify_depth Certificate verification depth\n" + "\n" "To configure SSL for a newly created server, the 'ssl', 'ssl_cert',\n" - "'ssl_key' and 'ssl_ca_cert' parameters must be given at the same time.\n", + "'ssl_key' and 'ssl_ca_cert' parameters must be given at the same time.\n" + "\n" + "Example: alter server my-db-1 address=192.168.0.202 port=3307", { ARG_TYPE_SERVER, ARG_TYPE_STRING, ARG_TYPE_STRING, ARG_TYPE_STRING, ARG_TYPE_STRING, ARG_TYPE_STRING, ARG_TYPE_STRING, ARG_TYPE_STRING, @@ -1329,9 +1517,24 @@ struct subcommand alteroptions[] = { "monitor", 2, 12, alterMonitor, "Alter monitor parameters", - "Usage: alter monitor NAME KEY=VALUE ...\n\n" + "Usage: alter monitor NAME KEY=VALUE ...\n" + "\n" + "Parameters:\n" + "NAME Monitor name\n" + "KEY=VALUE List of `key=value` pairs separated by spaces\n" + "\n" + "All monitors support the following values for KEY:\n" + "user Username used when connecting to servers\n" + "password Password used when connecting to servers\n" + "monitor_interval Monitoring interval in milliseconds\n" + "backend_connect_timeout Server coneection timeout in seconds\n" + "backend_write_timeout Server write timeout in seconds\n" + "backend_read_timeout Server read timeout in seconds\n" + "\n" "This will alter an existing parameter of a monitor. To remove parameters,\n" - "pass an empty value for a key e.g. 'maxadmin alter monitor my-monitor my-key='", + "pass an empty value for a key e.g. 'maxadmin alter monitor my-monitor my-key='\n" + "\n" + "Example: alter monitor my-monitor user=maxuser password=maxpwd", { ARG_TYPE_MONITOR, ARG_TYPE_STRING, ARG_TYPE_STRING, ARG_TYPE_STRING, ARG_TYPE_STRING, ARG_TYPE_STRING, ARG_TYPE_STRING, ARG_TYPE_STRING, @@ -1404,8 +1607,16 @@ struct subcommand calloptions[] = { "command", 2, 12, callModuleCommand, "Call module command", - "Usage: call command NAMESPACE COMMAND ARGS...\n\n" - "To list all registered commands, run 'list commands'.\n", + "Usage: call command MODULE COMMAND ARGS...\n" + "\n" + "Parameters:\n" + "MODULE The module name\n" + "COMMAND The command to call\n" + "ARGS... Arguments for the command\n" + "\n" + "To list all registered commands, run 'list commands'.\n" + "\n" + "Example: call command my-module my-command hello world!", { ARG_TYPE_STRING, ARG_TYPE_STRING, ARG_TYPE_STRING, ARG_TYPE_STRING, ARG_TYPE_STRING, ARG_TYPE_STRING, ARG_TYPE_STRING, ARG_TYPE_STRING, @@ -1520,6 +1731,9 @@ static void free_arg(int arg_type, void *value) static SPINLOCK debugcmd_lock = SPINLOCK_INIT; +static const char item_separator[] = + "----------------------------------------------------------------------------\n"; + /** * We have a complete line from the user, lookup the commands and execute them * @@ -1621,22 +1835,16 @@ execute_cmd(CLI_SESSION *cli) dcb_printf(dcb, "Available commands:\n"); for (i = 0; cmds[i].cmd; i++) { - if (cmds[i].options[1].arg1 == NULL) + dcb_printf(dcb, "%s:\n", cmds[i].cmd); + + for (j = 0; cmds[i].options[j].arg1; j++) { - dcb_printf(dcb, " %s %s\n", cmds[i].cmd, cmds[i].options[0].arg1); - } - else - { - dcb_printf(dcb, " %s [", cmds[i].cmd); - for (j = 0; cmds[i].options[j].arg1; j++) - { - dcb_printf(dcb, "%s%s", cmds[i].options[j].arg1, - cmds[i].options[j + 1].arg1 ? "|" : ""); - } - dcb_printf(dcb, "]\n"); + dcb_printf(dcb, " %s %s - %s\n", cmds[i].cmd, + cmds[i].options[j].arg1, cmds[i].options[j].help); } + dcb_printf(dcb, "\n"); } - dcb_printf(dcb, "\nType help command to see details of each command.\n"); + dcb_printf(dcb, "\nType `help COMMAND` to see details of each command.\n"); dcb_printf(dcb, "Where commands require names as arguments and these names contain\n"); dcb_printf(dcb, "whitespace either the \\ character may be used to escape the whitespace\n"); dcb_printf(dcb, "or the name may be enclosed in double quotes \".\n\n"); @@ -1648,10 +1856,15 @@ execute_cmd(CLI_SESSION *cli) if (!strcasecmp(args[1], cmds[i].cmd)) { found = 1; - dcb_printf(dcb, "Available options to the %s command:\n", args[1]); + dcb_printf(dcb, "Available options to the `%s` command:\n", cmds[i].cmd); for (j = 0; cmds[i].options[j].arg1; j++) { - dcb_printf(dcb, "'%s' - %s\n\n%s\n\n", cmds[i].options[j].arg1, + if (j != 0) + { + dcb_printf(dcb, item_separator); + } + + dcb_printf(dcb, "\n%s %s - %s\n\n%s\n\n", cmds[i].cmd, cmds[i].options[j].arg1, cmds[i].options[j].help, cmds[i].options[j].devhelp); } From 815780aa4ae2873d587121732979d6d9286ba40a Mon Sep 17 00:00:00 2001 From: Johan Wikman Date: Fri, 31 Mar 2017 16:57:13 +0300 Subject: [PATCH 21/32] In cache testrules.c ensure there seems to be at least one thread --- server/modules/filter/cache/rules.cc | 1 + server/modules/filter/cache/test/testrules.cc | 3 +++ 2 files changed, 4 insertions(+) diff --git a/server/modules/filter/cache/rules.cc b/server/modules/filter/cache/rules.cc index e7feb9f56..1983862f4 100644 --- a/server/modules/filter/cache/rules.cc +++ b/server/modules/filter/cache/rules.cc @@ -537,6 +537,7 @@ static CACHE_RULE *cache_rule_create_regexp(cache_rule_attribute_t attribute, pcre2_jit_compile(code, PCRE2_JIT_COMPLETE); int n_threads = config_threadcount(); + ss_dassert(n_threads > 0); pcre2_match_data **datas = alloc_match_datas(n_threads, code); diff --git a/server/modules/filter/cache/test/testrules.cc b/server/modules/filter/cache/test/testrules.cc index 32f72cd2a..932b51870 100644 --- a/server/modules/filter/cache/test/testrules.cc +++ b/server/modules/filter/cache/test/testrules.cc @@ -236,6 +236,9 @@ int main() if (mxs_log_init(NULL, ".", MXS_LOG_TARGET_DEFAULT)) { + MXS_CONFIG* pConfig = config_get_global_options(); + pConfig->n_threads = 1; + set_libdir(MXS_STRDUP_A("../../../../../query_classifier/qc_sqlite/")); if (qc_setup("qc_sqlite", "") && qc_process_init(QC_INIT_BOTH)) { From 8d2d6d8721658ccd02b8afcd7785c32b865bdade Mon Sep 17 00:00:00 2001 From: Johan Wikman Date: Fri, 31 Mar 2017 17:31:14 +0300 Subject: [PATCH 22/32] Join threads to prevent leaks --- server/core/test/testlogthrottling.cc | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/server/core/test/testlogthrottling.cc b/server/core/test/testlogthrottling.cc index de78bdff7..685d8ea7b 100644 --- a/server/core/test/testlogthrottling.cc +++ b/server/core/test/testlogthrottling.cc @@ -113,6 +113,7 @@ bool run(const MXS_LOG_THROTTLING& throttling, int priority, size_t n_generate, in.seekg(0, ios_base::end); THREAD_ARG args[N_THREADS]; + pthread_t tids[N_THREADS]; // Create the threads. for (size_t i = 0; i < N_THREADS; ++i) @@ -122,8 +123,7 @@ bool run(const MXS_LOG_THROTTLING& throttling, int priority, size_t n_generate, parg->n_generate = n_generate; parg->priority = priority; - pthread_t tid; - int rc = pthread_create(&tid, 0, thread_main, parg); + int rc = pthread_create(&tids[i], 0, thread_main, parg); ensure(rc == 0); } @@ -145,6 +145,12 @@ bool run(const MXS_LOG_THROTTLING& throttling, int priority, size_t n_generate, mxs_log_flush_sync(); + for (size_t i = 0; i < N_THREADS; ++i) + { + void* rv; + pthread_join(tids[i], &rv); + } + return check_messages(in, n_expect); } From 108978fcd28187efb3a39af00b6413c5feb2961e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Mon, 3 Apr 2017 12:38:08 +0300 Subject: [PATCH 23/32] Format dbfwfilter documentation Split lines that are longer than 80 characters. --- .../Filters/Database-Firewall-Filter.md | 122 +++++++++++++----- 1 file changed, 90 insertions(+), 32 deletions(-) diff --git a/Documentation/Filters/Database-Firewall-Filter.md b/Documentation/Filters/Database-Firewall-Filter.md index fe5576d74..46721d112 100644 --- a/Documentation/Filters/Database-Firewall-Filter.md +++ b/Documentation/Filters/Database-Firewall-Filter.md @@ -1,11 +1,19 @@ #Database Firewall filter ## Overview -The database firewall filter is used to block queries that match a set of rules. It can be used to prevent harmful queries from reaching the backend database instances or to limit access to the database based on a more flexible set of rules compared to the traditional GRANT-based privilege system. Currently the filter does not support multi-statements. + +The database firewall filter is used to block queries that match a set of +rules. It can be used to prevent harmful queries from reaching the backend +database instances or to limit access to the database based on a more flexible +set of rules compared to the traditional GRANT-based privilege system. Currently +the filter does not support multi-statements. ## Configuration -The database firewall filter only requires minimal configuration in the maxscale.cnf file. The actual rules of the database firewall filter are located in a separate text file. The following is an example of a database firewall filter configuration in maxscale.cnf. +The database firewall filter only requires minimal configuration in the +maxscale.cnf file. The actual rules of the database firewall filter are located +in a separate text file. The following is an example of a database firewall +filter configuration in maxscale.cnf. ``` [DatabaseFirewall] @@ -51,11 +59,11 @@ set to `allow`: - COM_PROCESS_KILL: Alias for `KILL ;` query - COM_PROCESS_INFO: Alias for `SHOW PROCESSLIST;` -You can have both blacklist and whitelist functionality by configuring one filter -with `action=allow` and another one with `action=block`. You can then use -different rule files with each filter, one for blacklisting and another one -for whitelisting. After this you only have to add both of these filters -to a service in the following way. +You can have both blacklist and whitelist functionality by configuring one +filter with `action=allow` and another one with `action=block`. You can then use +different rule files with each filter, one for blacklisting and another one for +whitelisting. After this you only have to add both of these filters to a service +in the following way. ``` [my-firewall-service] @@ -81,10 +89,10 @@ rules=/home/user/blacklist-rules.txt #### `log_match` -Log all queries that match a rule. For the `any` matching mode, the name of -the rule that matched is logged and for other matching modes, the name of -the last matching rule is logged. In addition to the rule name the matched -user and the query itself is logged. The log messages are logged at the notice level. +Log all queries that match a rule. For the `any` matching mode, the name of the +rule that matched is logged and for other matching modes, the name of the last +matching rule is logged. In addition to the rule name the matched user and the +query itself is logged. The log messages are logged at the notice level. #### `log_no_match` @@ -120,7 +128,9 @@ parameter (_allow_, _block_ or _ignore_). ### Mandatory rule parameters -The database firewall filter's rules expect a single mandatory parameter for a rule. You can define multiple rules to cover situations where you would like to apply multiple mandatory rules to a query. +The database firewall filter's rules expect a single mandatory parameter for a +rule. You can define multiple rules to cover situations where you would like to +apply multiple mandatory rules to a query. #### `wildcard` @@ -128,7 +138,8 @@ This rule blocks all queries that use the wildcard character *. #### `columns` -This rule expects a list of values after the `columns` keyword. These values are interpreted as column names and if a query targets any of these, it is blocked. +This rule expects a list of values after the `columns` keyword. These values are +interpreted as column names and if a query targets any of these, it is blocked. #### `function` @@ -140,29 +151,43 @@ not considered functions. #### `regex` -This rule blocks all queries matching a regex enclosed in single or double quotes. -The regex string expects a PCRE2 syntax regular expression. For more information -about the PCRE2 syntax, read the [PCRE2 documentation](http://www.pcre.org/current/doc/html/pcre2syntax.html). +This rule blocks all queries matching a regex enclosed in single or double +quotes. The regex string expects a PCRE2 syntax regular expression. For more +information about the PCRE2 syntax, read the [PCRE2 +documentation](http://www.pcre.org/current/doc/html/pcre2syntax.html). #### `limit_queries` -The limit_queries rule expects three parameters. The first parameter is the number of allowed queries during the time period. The second is the time period in seconds and the third is the amount of time for which the rule is considered active and blocking. +The limit_queries rule expects three parameters. The first parameter is the +number of allowed queries during the time period. The second is the time period +in seconds and the third is the amount of time for which the rule is considered +active and blocking. #### `no_where_clause` -This rule inspects the query and blocks it if it has no WHERE clause. For example, this would disallow a `DELETE FROM ...` query without a `WHERE` clause. This does not prevent wrongful usage of the `WHERE` clause e.g. `DELETE FROM ... WHERE 1=1`. +This rule inspects the query and blocks it if it has no WHERE clause. For +example, this would disallow a `DELETE FROM ...` query without a `WHERE` +clause. This does not prevent wrongful usage of the `WHERE` clause e.g. `DELETE +FROM ... WHERE 1=1`. ### Optional rule parameters -Each mandatory rule accepts one or more optional parameters. These are to be defined after the mandatory part of the rule. +Each mandatory rule accepts one or more optional parameters. These are to be +defined after the mandatory part of the rule. #### `at_times` -This rule expects a list of time ranges that define the times when the rule in question is active. The time formats are expected to be ISO-8601 compliant and to be separated by a single dash (the - character). For example, to define the active period of a rule to be 5pm to 7pm, you would include `at times 17:00:00-19:00:00` in the rule definition. The rule uses local time to check if the rule is active and has a precision of one second. +This rule expects a list of time ranges that define the times when the rule in +question is active. The time formats are expected to be ISO-8601 compliant and +to be separated by a single dash (the - character). For example, to define the +active period of a rule to be 5pm to 7pm, you would include `at times +17:00:00-19:00:00` in the rule definition. The rule uses local time to check if +the rule is active and has a precision of one second. #### `on_queries` -This limits the rule to be active only on certain types of queries. The possible values are: +This limits the rule to be active only on certain types of queries. The possible +values are: |Keyword|Matching operations | |-------|------------------------------| @@ -184,17 +209,35 @@ The `users` directive defines the users to which the rule should be applied. `users NAME... match { any | all | strict_all } rules RULE...` -The first keyword is `users`, which identifies this line as a user definition line. +The first keyword is `users`, which identifies this line as a user definition +line. -The second component is a list of user names and network addresses in the format *`user`*`@`*`0.0.0.0`*. The first part is the user name and the second part is the network address. You can use the `%` character as the wildcard to enable user name matching from any address or network matching for all users. After the list of users and networks the keyword match is expected. +The second component is a list of user names and network addresses in the format +*`user`*`@`*`0.0.0.0`*. The first part is the user name and the second part is +the network address. You can use the `%` character as the wildcard to enable +user name matching from any address or network matching for all users. After the +list of users and networks the keyword match is expected. -After this either the keyword `any` `all` or `strict_all` is expected. This defined how the rules are matched. If `any` is used when the first rule is matched the query is considered blocked and the rest of the rules are skipped. If instead the `all` keyword is used all rules must match for the query to be blocked. The `strict_all` is the same as `all` but it checks the rules from left to right in the order they were listed. If one of these does not match, the rest of the rules are not checked. This could be useful in situations where you would for example combine `limit_queries` and `regex` rules. By using `strict_all` you can have the `regex` rule first and the `limit_queries` rule second. This way the rule only matches if the `regex` rule matches enough times for the `limit_queries` rule to match. +After this either the keyword `any` `all` or `strict_all` is expected. This +defined how the rules are matched. If `any` is used when the first rule is +matched the query is considered blocked and the rest of the rules are +skipped. If instead the `all` keyword is used all rules must match for the query +to be blocked. The `strict_all` is the same as `all` but it checks the rules +from left to right in the order they were listed. If one of these does not +match, the rest of the rules are not checked. This could be useful in situations +where you would for example combine `limit_queries` and `regex` rules. By using +`strict_all` you can have the `regex` rule first and the `limit_queries` rule +second. This way the rule only matches if the `regex` rule matches enough times +for the `limit_queries` rule to match. -After the matching part comes the rules keyword after which a list of rule names is expected. This allows reusing of the rules and enables varying levels of query restriction. +After the matching part comes the rules keyword after which a list of rule names +is expected. This allows reusing of the rules and enables varying levels of +query restriction. ## Module commands -Read [Module Commands](../Reference/Module-Commands.md) documentation for details about module commands. +Read [Module Commands](../Reference/Module-Commands.md) documentation for +details about module commands. The dbfwfilter supports the following module commands. @@ -213,16 +256,23 @@ Shows the current statistics of the rules. ### Use Case 1 - Prevent rapid execution of specific queries -To prevent the excessive use of a database we want to set a limit on the rate of queries. We only want to apply this limit to certain queries that cause unwanted behavior. To achieve this we can use a regular expression. +To prevent the excessive use of a database we want to set a limit on the rate of +queries. We only want to apply this limit to certain queries that cause unwanted +behavior. To achieve this we can use a regular expression. -First we define the limit on the rate of queries. The first parameter for the rule sets the number of allowed queries to 10 queries and the second parameter sets the rate of sampling to 5 seconds. If a user executes queries faster than this, any further queries that match the regular expression are blocked for 60 seconds. +First we define the limit on the rate of queries. The first parameter for the +rule sets the number of allowed queries to 10 queries and the second parameter +sets the rate of sampling to 5 seconds. If a user executes queries faster than +this, any further queries that match the regular expression are blocked for 60 +seconds. ``` rule limit_rate_of_queries deny limit_queries 10 5 60 rule query_regex deny regex '.*select.*from.*user_data.*' ``` -To apply these rules we combine them into a single rule by adding a `users` line to the rule file. +To apply these rules we combine them into a single rule by adding a `users` line +to the rule file. ``` users %@% match all rules limit_rate_of_queries query_regex @@ -230,16 +280,24 @@ users %@% match all rules limit_rate_of_queries query_regex ### Use Case 2 - Only allow deletes with a where clause -We have a table which contains all the managers of a company. We want to prevent accidental deletes into this table where the where clause is missing. This poses a problem, we don't want to require all the delete queries to have a where clause. We only want to prevent the data in the managers table from being deleted without a where clause. +We have a table which contains all the managers of a company. We want to prevent +accidental deletes into this table where the where clause is missing. This poses +a problem, we don't want to require all the delete queries to have a where +clause. We only want to prevent the data in the managers table from being +deleted without a where clause. -To achieve this, we need two rules. The first rule defines that all delete operations must have a where clause. This rule alone does us no good so we need a second one. The second rule blocks all queries that match a regular expression. +To achieve this, we need two rules. The first rule defines that all delete +operations must have a where clause. This rule alone does us no good so we need +a second one. The second rule blocks all queries that match a regular +expression. ``` rule safe_delete deny no_where_clause on_queries delete rule managers_table deny regex '.*from.*managers.*' ``` -When we combine these two rules we get the result we want. To combine these two rules add the following line to the rule file. +When we combine these two rules we get the result we want. To combine these two +rules add the following line to the rule file. ``` users %@% match all rules safe_delete managers_table From d32a2e410b26e87d868ccc4eb99fce1994ce237d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Mon, 3 Apr 2017 12:40:06 +0300 Subject: [PATCH 24/32] Use `matched` instead of `blocked` As queries that match a rule aren't always blocked, it's more appropriate to use `matched` instead of `blocked`. --- Documentation/Filters/Database-Firewall-Filter.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Documentation/Filters/Database-Firewall-Filter.md b/Documentation/Filters/Database-Firewall-Filter.md index 46721d112..4e18302a9 100644 --- a/Documentation/Filters/Database-Firewall-Filter.md +++ b/Documentation/Filters/Database-Firewall-Filter.md @@ -139,13 +139,13 @@ This rule blocks all queries that use the wildcard character *. #### `columns` This rule expects a list of values after the `columns` keyword. These values are -interpreted as column names and if a query targets any of these, it is blocked. +interpreted as column names and if a query targets any of these, it is matched. #### `function` This rule expects a list of values after the `function` keyword. These values are interpreted as function names and if a query uses any of these, it is -blocked. The symbolic comparison operators (`<`, `>`, `>=` etc.) are also +matched. The symbolic comparison operators (`<`, `>`, `>=` etc.) are also considered functions whereas the text versions (`NOT`, `IS`, `IS NOT` etc.) are not considered functions. @@ -220,9 +220,9 @@ list of users and networks the keyword match is expected. After this either the keyword `any` `all` or `strict_all` is expected. This defined how the rules are matched. If `any` is used when the first rule is -matched the query is considered blocked and the rest of the rules are +matched the query is considered as matched and the rest of the rules are skipped. If instead the `all` keyword is used all rules must match for the query -to be blocked. The `strict_all` is the same as `all` but it checks the rules +to be considered as matched. The `strict_all` is the same as `all` but it checks the rules from left to right in the order they were listed. If one of these does not match, the rest of the rules are not checked. This could be useful in situations where you would for example combine `limit_queries` and `regex` rules. By using From 6ef90bf9b62e7418d793df83b45c6c2778c44a45 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Mon, 3 Apr 2017 12:44:22 +0300 Subject: [PATCH 25/32] Add a warning about `limit_queries` and `action=allow` The `limit_queries` rule should not be used in the `allow` mode. --- Documentation/Filters/Database-Firewall-Filter.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Documentation/Filters/Database-Firewall-Filter.md b/Documentation/Filters/Database-Firewall-Filter.md index 4e18302a9..ab1dcdff4 100644 --- a/Documentation/Filters/Database-Firewall-Filter.md +++ b/Documentation/Filters/Database-Firewall-Filter.md @@ -163,6 +163,8 @@ number of allowed queries during the time period. The second is the time period in seconds and the third is the amount of time for which the rule is considered active and blocking. +**WARNING:** Using `limit_queries` in `action=allow` is not supported. + #### `no_where_clause` This rule inspects the query and blocks it if it has no WHERE clause. For From 0c3f9ffa2ea8b7f58a083d5669df4c5200deb00b Mon Sep 17 00:00:00 2001 From: MassimilianoPinto Date: Mon, 3 Apr 2017 16:01:59 +0200 Subject: [PATCH 26/32] Maxrows: documentation update and use of MXS_MODULE_PARAM_SIZE for max_resultset_size parameter max_resultset_size parameter now uses MXS_MODULE_PARAM_SIZE and config_get_size Documentation change follows the change. --- Documentation/Filters/Maxrows.md | 10 ++++++---- server/modules/filter/maxrows/maxrows.c | 7 ++++--- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/Documentation/Filters/Maxrows.md b/Documentation/Filters/Maxrows.md index f85a929ce..a871a2f55 100644 --- a/Documentation/Filters/Maxrows.md +++ b/Documentation/Filters/Maxrows.md @@ -46,14 +46,16 @@ The default value is `-1`. #### `max_resultset_size` -Specifies the maximum size a resultset can have, measured in kibibytes, -in order to be sent to the client. A resultset larger than this, will +Specifies the maximum size a resultset can have in order +to be sent to the client. A resultset larger than this, will not be sent: an empty resultset will be sent instead. +The size can be specified as described +[here](../Getting-Started/Configuration-Guide.md#sizes). ``` -max_resultset_size=128 +max_resultset_size=128Ki ``` -The default value is 64. +The default value is 64Ki #### `max_resultset_return` diff --git a/server/modules/filter/maxrows/maxrows.c b/server/modules/filter/maxrows/maxrows.c index 77b691ffc..1676a3cc7 100644 --- a/server/modules/filter/maxrows/maxrows.c +++ b/server/modules/filter/maxrows/maxrows.c @@ -131,7 +131,7 @@ MXS_MODULE* MXS_CREATE_MODULE() }, { "max_resultset_size", - MXS_MODULE_PARAM_COUNT, + MXS_MODULE_PARAM_SIZE, MAXROWS_DEFAULT_MAX_RESULTSET_SIZE }, { @@ -244,8 +244,8 @@ static MXS_FILTER *createInstance(const char *name, cinstance->name = name; cinstance->config.max_resultset_rows = config_get_integer(params, "max_resultset_rows"); - cinstance->config.max_resultset_size = config_get_integer(params, - "max_resultset_size"); + cinstance->config.max_resultset_size = config_get_size(params, + "max_resultset_size"); cinstance->config.m_return = config_get_enum(params, "max_resultset_return", return_option_values); @@ -1226,6 +1226,7 @@ static int send_maxrows_reply_limit(MAXROWS_SESSION_DATA *csdata) default: MXS_ERROR("MaxRows config value not expected!"); ss_dassert(!true); + return 0; break; } } From 1ff83150f031e69804961900eb1a58133ca1bf82 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Tue, 4 Apr 2017 09:42:31 +0300 Subject: [PATCH 27/32] Revert "Route statements in larger batches" This reverts commit f3c83770903151a0a3b53593c3e05fa0af94cd5f. The functionality was used implicitly by modules that declare the RCAP_TYPE_CONTIGUOUS_OUTPUT capability. --- server/modules/protocol/MySQL/MySQLBackend/mysql_backend.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/server/modules/protocol/MySQL/MySQLBackend/mysql_backend.c b/server/modules/protocol/MySQL/MySQLBackend/mysql_backend.c index 5da4d7797..6276cae0f 100644 --- a/server/modules/protocol/MySQL/MySQLBackend/mysql_backend.c +++ b/server/modules/protocol/MySQL/MySQLBackend/mysql_backend.c @@ -778,6 +778,11 @@ gw_read_and_write(DCB *dcb) return 0; } } + else if (rcap_type_required(capabilities, RCAP_TYPE_STMT_OUTPUT) && + !rcap_type_required(capabilities, RCAP_TYPE_RESULTSET_OUTPUT)) + { + stmt = modutil_get_next_MySQL_packet(&read_buffer); + } else { stmt = read_buffer; From 860b14526c5cabf0ecefff526f30f3ac666ff34f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Tue, 4 Apr 2017 10:26:28 +0300 Subject: [PATCH 28/32] Fix mxs_mysql_is_result_set The function assumed that the packet payload was always at least two bytes. --- server/modules/protocol/MySQL/mysql_common.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/server/modules/protocol/MySQL/mysql_common.c b/server/modules/protocol/MySQL/mysql_common.c index a2db80fd4..23b896f92 100644 --- a/server/modules/protocol/MySQL/mysql_common.c +++ b/server/modules/protocol/MySQL/mysql_common.c @@ -1562,11 +1562,9 @@ bool mxs_mysql_is_result_set(GWBUF *buffer) case MYSQL_REPLY_EOF: /** Not a result set */ break; + default: - if (gwbuf_copy_data(buffer, MYSQL_HEADER_LEN + 1, 1, &cmd) && cmd > 1) - { - rval = true; - } + rval = true; break; } } From 9f14f3659f80fd355ea70cb87b7a5afd7e4e7778 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Tue, 4 Apr 2017 14:13:04 +0300 Subject: [PATCH 29/32] Only check user permissions on startup When users were loaded, the permissions for the service user were checked. The conditional that makes sure the check is executed only at startup was checking the listener's users instead of the SQLite handle which caused all reloads of users to check the permissions. --- server/modules/authenticator/MySQLAuth/mysql_auth.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/server/modules/authenticator/MySQLAuth/mysql_auth.c b/server/modules/authenticator/MySQLAuth/mysql_auth.c index 8aed71e89..e9339c823 100644 --- a/server/modules/authenticator/MySQLAuth/mysql_auth.c +++ b/server/modules/authenticator/MySQLAuth/mysql_auth.c @@ -607,12 +607,6 @@ static int mysql_auth_load_users(SERV_LISTENER *port) int rc = MXS_AUTH_LOADUSERS_OK; SERVICE *service = port->listener->service; MYSQL_AUTH *instance = (MYSQL_AUTH*)port->auth_instance; - - if (port->users == NULL && !check_service_permissions(port->service)) - { - return MXS_AUTH_LOADUSERS_FATAL; - } - bool skip_local = false; if (instance->handle == NULL) @@ -620,7 +614,8 @@ static int mysql_auth_load_users(SERV_LISTENER *port) skip_local = true; char path[PATH_MAX]; get_database_path(port, path, sizeof(path)); - if (!open_instance_database(path, &instance->handle)) + if (!check_service_permissions(port->service) || + !open_instance_database(path, &instance->handle)) { return MXS_AUTH_LOADUSERS_FATAL; } From 872f69b6814df370e1d06a5c38cef6b8d5511e47 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Tue, 4 Apr 2017 14:46:33 +0300 Subject: [PATCH 30/32] Also check IPv6 mapped IPv4 addresses as IPv4 If a client connects from an IPv4 address, but the listener listens on an IPv6 address, the client IP will be a IPv6 mapped IPv4 address e.g. ::ffff:127.0.0.1. A grant for an IPv4 address should still match an IPv6 mapped IPv4 address. --- server/modules/authenticator/MySQLAuth/dbusers.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/server/modules/authenticator/MySQLAuth/dbusers.c b/server/modules/authenticator/MySQLAuth/dbusers.c index 23e712c2f..d8d07d6e2 100644 --- a/server/modules/authenticator/MySQLAuth/dbusers.c +++ b/server/modules/authenticator/MySQLAuth/dbusers.c @@ -202,6 +202,20 @@ int validate_mysql_user(sqlite3 *handle, DCB *dcb, MYSQL_session *session, sqlite3_free(err); } + /** Check for IPv6 mapped IPv4 address */ + if (!res.ok && strchr(dcb->remote, ':') && strchr(dcb->remote, '.')) + { + const char *ipv4 = strrchr(dcb->remote, ':') + 1; + sprintf(sql, mysqlauth_validate_user_query, session->user, ipv4, ipv4, + session->db, session->db); + + if (sqlite3_exec(handle, sql, auth_cb, &res, &err) != SQLITE_OK) + { + MXS_ERROR("Failed to execute auth query: %s", err); + sqlite3_free(err); + } + } + if (!res.ok) { /** From dca086571b5d71b1aba891c97fb4fd5945d39f46 Mon Sep 17 00:00:00 2001 From: Esa Korhonen Date: Mon, 3 Apr 2017 13:26:13 +0300 Subject: [PATCH 31/32] TestMaxScalePCRE2: Fix memory leaks Not really leaks, but this reduces needless clutter in the valgrind output. --- server/core/test/testmaxscalepcre2.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/server/core/test/testmaxscalepcre2.c b/server/core/test/testmaxscalepcre2.c index ceb3d142e..5e7ca18b0 100644 --- a/server/core/test/testmaxscalepcre2.c +++ b/server/core/test/testmaxscalepcre2.c @@ -89,11 +89,19 @@ static int test2() test_assert(result == MXS_PCRE2_MATCH, "Substitution should substitute"); test_assert(strcmp(dest, expected) == 0, "Replaced text should match expected text"); + size = 1000; + dest = MXS_REALLOC(dest, size); result = mxs_pcre2_substitute(re2, subject, good_replace, &dest, &size); test_assert(result == MXS_PCRE2_NOMATCH, "Non-matching substitution should not substitute"); + size = 1000; + dest = MXS_REALLOC(dest, size); result = mxs_pcre2_substitute(re, subject, bad_replace, &dest, &size); test_assert(result == MXS_PCRE2_ERROR, "Bad substitution should return an error"); + + MXS_FREE(dest); + pcre2_code_free(re); + pcre2_code_free(re2); return 0; } From e0a98f65395426b547c64b026084da9be85561bd Mon Sep 17 00:00:00 2001 From: Esa Korhonen Date: Mon, 3 Apr 2017 16:35:15 +0300 Subject: [PATCH 32/32] Fix calls of pcre2_substitute If the output buffer given to pcre2_substitute is too small, an error value is written to the last parameter (output length). That value should not be used for calculations. This patch gives a copy as parameter instead. Coincidentally, this commit fixes the crashes of query classifier tests. Also, increase buffer growth rate in utils.c. --- server/core/config.c | 8 ++++--- server/core/maxscale_pcre2.c | 10 ++++---- server/core/utils.c | 24 ++++++++++++------- .../modules/filter/regexfilter/regexfilter.c | 7 ++++-- 4 files changed, 32 insertions(+), 17 deletions(-) diff --git a/server/core/config.c b/server/core/config.c index 932ef3bd2..d3065c7d6 100644 --- a/server/core/config.c +++ b/server/core/config.c @@ -290,12 +290,14 @@ char* config_clean_string_list(const char* str) const char *replace = "$1,"; int rval = 0; + size_t destsize_tmp = destsize; while ((rval = pcre2_substitute(re, (PCRE2_SPTR) str, PCRE2_ZERO_TERMINATED, 0, PCRE2_SUBSTITUTE_GLOBAL, data, NULL, (PCRE2_SPTR) replace, PCRE2_ZERO_TERMINATED, - (PCRE2_UCHAR*) dest, &destsize)) == PCRE2_ERROR_NOMEMORY) + (PCRE2_UCHAR*) dest, &destsize_tmp)) == PCRE2_ERROR_NOMEMORY) { - char* tmp = MXS_REALLOC(dest, destsize * 2); + destsize_tmp = 2 * destsize; + char* tmp = MXS_REALLOC(dest, destsize_tmp); if (tmp == NULL) { MXS_FREE(dest); @@ -303,7 +305,7 @@ char* config_clean_string_list(const char* str) break; } dest = tmp; - destsize *= 2; + destsize = destsize_tmp; } /** Remove the trailing comma */ diff --git a/server/core/maxscale_pcre2.c b/server/core/maxscale_pcre2.c index ab43e672d..1490f4f64 100644 --- a/server/core/maxscale_pcre2.c +++ b/server/core/maxscale_pcre2.c @@ -39,7 +39,7 @@ * @param subject Subject string * @param replace Replacement string * @param dest Destination buffer - * @param size Size of the desination buffer + * @param size Size of the destination buffer * @return MXS_PCRE2_MATCH if replacements were made, MXS_PCRE2_NOMATCH if nothing * was replaced or MXS_PCRE2_ERROR if memory reallocation failed */ @@ -52,18 +52,20 @@ mxs_pcre2_result_t mxs_pcre2_substitute(pcre2_code *re, const char *subject, con if (mdata) { + size_t size_tmp = *size; while ((rc = pcre2_substitute(re, (PCRE2_SPTR) subject, PCRE2_ZERO_TERMINATED, 0, PCRE2_SUBSTITUTE_GLOBAL, mdata, NULL, (PCRE2_SPTR) replace, PCRE2_ZERO_TERMINATED, - (PCRE2_UCHAR*) *dest, size)) == PCRE2_ERROR_NOMEMORY) + (PCRE2_UCHAR*) *dest, &size_tmp)) == PCRE2_ERROR_NOMEMORY) { - char *tmp = MXS_REALLOC(*dest, *size * 2); + size_tmp = 2 * (*size); + char *tmp = MXS_REALLOC(*dest, size_tmp); if (tmp == NULL) { break; } *dest = tmp; - *size *= 2; + *size = size_tmp; } if (rc > 0) diff --git a/server/core/utils.c b/server/core/utils.c index 2c6eb5748..57fe2d9af 100644 --- a/server/core/utils.c +++ b/server/core/utils.c @@ -548,7 +548,7 @@ strip_escape_chars(char* val) return true; } -#define BUFFER_GROWTH_RATE 1.2 +#define BUFFER_GROWTH_RATE 2.0 static pcre2_code* remove_comments_re = NULL; static const PCRE2_SPTR remove_comments_pattern = (PCRE2_SPTR) "(?:`[^`]*`\\K)|(\\/[*](?!(M?!)).*?[*]\\/)|(?:#.*|--[[:space:]].*)"; @@ -576,18 +576,19 @@ char* remove_mysql_comments(const char** src, const size_t* srcsize, char** dest char* output = *dest; size_t orig_len = *srcsize; size_t len = output ? *destsize : orig_len; - if (orig_len > 0) { if ((output || (output = (char*) malloc(len * sizeof (char)))) && (mdata = pcre2_match_data_create_from_pattern(remove_comments_re, NULL))) { + size_t len_tmp = len; while (pcre2_substitute(remove_comments_re, (PCRE2_SPTR) * src, orig_len, 0, PCRE2_SUBSTITUTE_GLOBAL, mdata, NULL, replace, PCRE2_ZERO_TERMINATED, - (PCRE2_UCHAR8*) output, &len) == PCRE2_ERROR_NOMEMORY) + (PCRE2_UCHAR8*) output, &len_tmp) == PCRE2_ERROR_NOMEMORY) { - char* tmp = (char*) realloc(output, (len = (size_t) (len * BUFFER_GROWTH_RATE + 1))); + len_tmp = (size_t) (len * BUFFER_GROWTH_RATE + 1); + char* tmp = (char*) realloc(output, len_tmp); if (tmp == NULL) { free(output); @@ -595,6 +596,7 @@ char* remove_mysql_comments(const char** src, const size_t* srcsize, char** dest break; } output = tmp; + len = len_tmp; } pcre2_match_data_free(mdata); } @@ -648,12 +650,14 @@ char* replace_values(const char** src, const size_t* srcsize, char** dest, size_ if ((output || (output = (char*) malloc(len * sizeof (char)))) && (mdata = pcre2_match_data_create_from_pattern(replace_values_re, NULL))) { + size_t len_tmp = len; while (pcre2_substitute(replace_values_re, (PCRE2_SPTR) * src, orig_len, 0, PCRE2_SUBSTITUTE_GLOBAL, mdata, NULL, replace, PCRE2_ZERO_TERMINATED, - (PCRE2_UCHAR8*) output, &len) == PCRE2_ERROR_NOMEMORY) + (PCRE2_UCHAR8*) output, &len_tmp) == PCRE2_ERROR_NOMEMORY) { - char* tmp = (char*) realloc(output, (len = (size_t) (len * BUFFER_GROWTH_RATE + 1))); + len_tmp = (size_t) (len * BUFFER_GROWTH_RATE + 1); + char* tmp = (char*) realloc(output, len_tmp); if (tmp == NULL) { free(output); @@ -661,6 +665,7 @@ char* replace_values(const char** src, const size_t* srcsize, char** dest, size_ break; } output = tmp; + len = len_tmp; } pcre2_match_data_free(mdata); } @@ -801,12 +806,14 @@ char* replace_quoted(const char** src, const size_t* srcsize, char** dest, size_ if ((output || (output = (char*) malloc(len * sizeof (char)))) && (mdata = pcre2_match_data_create_from_pattern(replace_quoted_re, NULL))) { + size_t len_tmp = len; while (pcre2_substitute(replace_quoted_re, (PCRE2_SPTR) * src, orig_len, 0, PCRE2_SUBSTITUTE_GLOBAL, mdata, NULL, replace, PCRE2_ZERO_TERMINATED, - (PCRE2_UCHAR8*) output, &len) == PCRE2_ERROR_NOMEMORY) + (PCRE2_UCHAR8*) output, &len_tmp) == PCRE2_ERROR_NOMEMORY) { - char* tmp = (char*) realloc(output, (len = (size_t) (len * BUFFER_GROWTH_RATE + 1))); + len_tmp = (size_t) (len * BUFFER_GROWTH_RATE + 1); + char* tmp = (char*) realloc(output, len_tmp); if (tmp == NULL) { free(output); @@ -814,6 +821,7 @@ char* replace_quoted(const char** src, const size_t* srcsize, char** dest, size_ break; } output = tmp; + len = len_tmp; } pcre2_match_data_free(mdata); } diff --git a/server/modules/filter/regexfilter/regexfilter.c b/server/modules/filter/regexfilter/regexfilter.c index cba2b308e..8774fc88b 100644 --- a/server/modules/filter/regexfilter/regexfilter.c +++ b/server/modules/filter/regexfilter/regexfilter.c @@ -427,19 +427,22 @@ regex_replace(const char *sql, pcre2_code *re, pcre2_match_data *match_data, con result_size = strlen(sql) + strlen(replace); result = MXS_MALLOC(result_size); + size_t result_size_tmp = result_size; while (result && pcre2_substitute(re, (PCRE2_SPTR) sql, PCRE2_ZERO_TERMINATED, 0, PCRE2_SUBSTITUTE_GLOBAL, match_data, NULL, (PCRE2_SPTR) replace, PCRE2_ZERO_TERMINATED, - (PCRE2_UCHAR*) result, (PCRE2_SIZE*) & result_size) == PCRE2_ERROR_NOMEMORY) + (PCRE2_UCHAR*) result, (PCRE2_SIZE*) & result_size_tmp) == PCRE2_ERROR_NOMEMORY) { + result_size_tmp = 1.5 * result_size; char *tmp; - if ((tmp = MXS_REALLOC(result, (result_size *= 1.5))) == NULL) + if ((tmp = MXS_REALLOC(result, result_size_tmp)) == NULL) { MXS_FREE(result); result = NULL; } result = tmp; + result_size = result_size_tmp; } } return result;