From 9f66278ac2419688b0c0179e054bc48817fbf04f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Tue, 3 Oct 2017 22:32:56 +0300 Subject: [PATCH 001/101] Fix typo in IP testing database The test had a typo in the SQL that created the database so the later queries that referred to it always failed. --- maxscale-system-test/testconnections.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/maxscale-system-test/testconnections.cpp b/maxscale-system-test/testconnections.cpp index 3f9151ed2..a55f93929 100644 --- a/maxscale-system-test/testconnections.cpp +++ b/maxscale-system-test/testconnections.cpp @@ -1652,12 +1652,12 @@ int TestConnections::get_client_ip(char * ip) unsigned int conn_num = 0; connect_rwsplit(); - if (execute_query(conn_rwsplit, "CREATE DATABASE IF NOT EXISTS db_to_check_clent_ip") != 0 ) + if (execute_query(conn_rwsplit, "CREATE DATABASE IF NOT EXISTS db_to_check_client_ip") != 0 ) { return ret; } close_rwsplit(); - conn = open_conn_db(rwsplit_port, maxscale_IP, "db_to_check_clent_ip", maxscale_user, + conn = open_conn_db(rwsplit_port, maxscale_IP, "db_to_check_client_ip", maxscale_user, maxscale_password, ssl); if (conn != NULL) From 617abd0d52e93ef66afad0212ce713f12479e40a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Wed, 4 Oct 2017 00:45:05 +0300 Subject: [PATCH 002/101] Fix read of uninitialized memory when DNS lookup fails If the DNS lookup fails, the destination string buffer is used as-is and thus it needs to be initialized to an empty string. --- server/modules/authenticator/MySQLAuth/dbusers.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/modules/authenticator/MySQLAuth/dbusers.c b/server/modules/authenticator/MySQLAuth/dbusers.c index e275d7ba7..eb72bba95 100644 --- a/server/modules/authenticator/MySQLAuth/dbusers.c +++ b/server/modules/authenticator/MySQLAuth/dbusers.c @@ -230,7 +230,7 @@ int validate_mysql_user(MYSQL_AUTH* instance, DCB *dcb, MYSQL_session *session, * Try authentication with the hostname instead of the IP. We do this only * as a last resort so we avoid the high cost of the DNS lookup. */ - char client_hostname[MYSQL_HOST_MAXLEN]; + char client_hostname[MYSQL_HOST_MAXLEN] = ""; get_hostname(dcb, client_hostname, sizeof(client_hostname) - 1); sprintf(sql, mysqlauth_validate_user_query, session->user, client_hostname, From 7dfa1577fc21139a699e78a88c94f68d801ff66f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Wed, 4 Oct 2017 00:47:31 +0300 Subject: [PATCH 003/101] Use new MaxScale object names in mxs1457_ignore_deleted The names of the monitors and services in tests that use MaxScale 2.2 need to use the hyphen-transformed versions of the object names. --- maxscale-system-test/mxs1457_ignore_deleted.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/maxscale-system-test/mxs1457_ignore_deleted.cpp b/maxscale-system-test/mxs1457_ignore_deleted.cpp index a0ee712eb..c7965aeda 100644 --- a/maxscale-system-test/mxs1457_ignore_deleted.cpp +++ b/maxscale-system-test/mxs1457_ignore_deleted.cpp @@ -21,7 +21,7 @@ int main(int argc, char *argv[]) * The monitor needs to be stopped before the slaves are stopped to prevent * it from detecting the broken replication. */ - test.ssh_maxscale(true, "maxadmin shutdown monitor \"MySQL Monitor\""); + test.ssh_maxscale(true, "maxadmin shutdown monitor MySQL-Monitor"); // Stop slaves and drop the user on the master test.repl->stop_slaves(); test.repl->connect(); @@ -33,7 +33,7 @@ int main(int argc, char *argv[]) test.add_result(mysql_errno(conn) == 0, "Connection with users from master should fail"); mysql_close(conn); - test.ssh_maxscale(true, "maxadmin remove server server1 \"RW Split Router\""); + test.ssh_maxscale(true, "maxadmin remove server server1 RW-Split-Router"); conn = open_conn_db(test.rwsplit_port, test.maxscale_ip(), "test", "auth_test", "test", false); test.add_result(mysql_errno(conn), "Connection should be OK: %s", mysql_error(conn)); test.try_query(conn, "SELECT 1"); From 9f74878794e3325aff1e2c5c8fa2fd53b4cc4457 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Wed, 4 Oct 2017 10:34:06 +0300 Subject: [PATCH 004/101] Run smoke tests by default The tests are now run as the shorter version by default. --- maxscale-system-test/testconnections.cpp | 29 ++++++++++++++++-------- 1 file changed, 19 insertions(+), 10 deletions(-) diff --git a/maxscale-system-test/testconnections.cpp b/maxscale-system-test/testconnections.cpp index a55f93929..cf86a20d5 100644 --- a/maxscale-system-test/testconnections.cpp +++ b/maxscale-system-test/testconnections.cpp @@ -63,10 +63,21 @@ void TestConnections::require_galera_version(const char *version) } TestConnections::TestConnections(int argc, char *argv[]): - no_backend_log_copy(false), use_snapshots(false), verbose(false), rwsplit_port(4006), - readconn_master_port(4008), readconn_slave_port(4009), binlog_port(5306), - global_result(0), binlog_cmd_option(0), enable_timeouts(true), use_ipv6(false), - no_galera(false), binlog_master_gtid(false), binlog_slave_gtid(false), + no_backend_log_copy(false), + use_snapshots(false), + verbose(false), + smoke(true), + rwsplit_port(4006), + readconn_master_port(4008), + readconn_slave_port(4009), + binlog_port(5306), + global_result(0), + binlog_cmd_option(0), + enable_timeouts(true), + use_ipv6(false), + no_galera(false), + binlog_master_gtid(false), + binlog_slave_gtid(false), no_vm_revert(true) { signal_set(SIGSEGV, sigfatal_handler); @@ -508,14 +519,12 @@ int TestConnections::read_env() } env = getenv("smoke"); - if ((env != NULL) && ((strcasecmp(env, "yes") == 0) || (strcasecmp(env, "true") == 0) )) + if (env) { - smoke = true; - } - else - { - smoke = false; + smoke = strcasecmp(env, "yes") == 0 || strcasecmp(env, "true") == 0 || + strcasecmp(env, "1") == 0 || strcasecmp(env, "on") == 0; } + env = getenv("threads"); if ((env != NULL)) { From 503e768d80a5ee25b8cbd7f7a007ed26c01c8538 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Wed, 4 Oct 2017 10:57:12 +0300 Subject: [PATCH 005/101] Add methods for changing the active MaxScale instance The new function allows two MaxScale instances to be controlled via the same TestConnections object. This will allow testing of Maxscale clusters. --- maxscale-system-test/testconnections.cpp | 44 ++++++++++++++++++++++++ maxscale-system-test/testconnections.h | 19 ++++++++++ 2 files changed, 63 insertions(+) diff --git a/maxscale-system-test/testconnections.cpp b/maxscale-system-test/testconnections.cpp index cf86a20d5..70ff3e63f 100644 --- a/maxscale-system-test/testconnections.cpp +++ b/maxscale-system-test/testconnections.cpp @@ -278,6 +278,13 @@ TestConnections::TestConnections(int argc, char *argv[]): if (maxscale_init) { init_maxscale(); + + if (!secondary_maxscale_IP.empty()) + { + set_active_maxscale(MXS_SECONDARY); + init_maxscale(); + set_active_maxscale(MXS_PRIMARY); + } } if (backend_ssl) @@ -372,12 +379,28 @@ int TestConnections::read_env() if (env != NULL) { sprintf(maxscale_IP, "%s", env); + primary_maxscale_IP = env; } + env = getenv("maxscale_network6"); if (env != NULL) { sprintf(maxscale_IP6, "%s", env); + primary_maxscale_IP6 = env; } + + env = getenv("maxscale2_IP"); + if (env != NULL) + { + secondary_maxscale_IP = env; + } + + env = getenv("maxscale2_network6"); + if (env != NULL) + { + secondary_maxscale_IP = env; + } + env = getenv("maxscale_user"); if (env != NULL) { @@ -2235,3 +2258,24 @@ char* TestConnections::maxscale_ip() const { return use_ipv6 ? (char*)maxscale_IP6 : (char*)maxscale_IP; } + +void TestConnections::set_active_maxscale(enum test_target target) +{ + switch (target) + { + case MXS_PRIMARY: + strcpy(maxscale_IP, primary_maxscale_IP.c_str()); + strcpy(maxscale_IP6, primary_maxscale_IP6.c_str()); + break; + + case MXS_SECONDARY: + strcpy(maxscale_IP, secondary_maxscale_IP.c_str()); + strcpy(maxscale_IP6, secondary_maxscale_IP6.c_str()); + break; + + default: + tprintf("Wrong enum value for 'set_active_maxscale': 0x%02x", target); + exit(1); + break; + } +} diff --git a/maxscale-system-test/testconnections.h b/maxscale-system-test/testconnections.h index 191609ae4..2fd263961 100644 --- a/maxscale-system-test/testconnections.h +++ b/maxscale-system-test/testconnections.h @@ -7,6 +7,12 @@ #include #include +enum test_target +{ + MXS_PRIMARY, + MXS_SECONDARY +}; + /** * @brief Class contains references to Master/Slave and Galera test setups * Test setup should consist of two setups: one Master/Slave and one Galera. @@ -123,6 +129,12 @@ public: */ char maxscale_IP[1024]; + /** IPv4 and IPv6 addresses for the primary and secondary instances */ + std::string primary_maxscale_IP; + std::string primary_maxscale_IP6; + std::string secondary_maxscale_IP; + std::string secondary_maxscale_IP6; + /** * @brief Maxscale_IP6 Maxscale machine IP address (IPv6) */ @@ -715,6 +727,13 @@ public: * @param dest Destination file name for actual configuration file */ void process_template(const char *src, const char *dest = "/etc/maxscale.cnf"); + + /** + * @brief Change the target MaxScale + * + * @param target Either MXS_PRIMARY or MXS_SECONDARY + */ + void set_active_maxscale(enum test_target target); }; /** From da2201d16c0daad7ca6259d84f81939c42df8236 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Wed, 4 Oct 2017 12:16:01 +0300 Subject: [PATCH 006/101] Use absolute paths in kerberos_setup The files were located in the vagrant user's home directory but superuser access is needed to copy them into /etc. --- maxscale-system-test/kerberos_setup.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/maxscale-system-test/kerberos_setup.cpp b/maxscale-system-test/kerberos_setup.cpp index c1b04a1c8..72556325a 100644 --- a/maxscale-system-test/kerberos_setup.cpp +++ b/maxscale-system-test/kerberos_setup.cpp @@ -37,10 +37,10 @@ int main(int argc, char *argv[]) { Test->repl->ssh_node(i, true, "yum install -y MariaDB-gssapi-server MariaDB-gssapi-client krb5-workstation pam_krb5"); Test->repl->copy_to_node(str, (char *) "~/", i); - Test->repl->ssh_node(i, true, "cp ~/krb5.conf /etc/"); + Test->repl->ssh_node(i, true, "cp %s/krb5.conf /etc/", Test->repl->access_homedir[i]); Test->repl->copy_to_node((char *) "hosts", (char *) "~/", i); - Test->repl->ssh_node(i, true, "cp ~/hosts /etc/"); + Test->repl->ssh_node(i, true, "cp %s/hosts /etc/", Test->repl->access_homedir[i]); } Test->tprintf("Copying 'hosts' and krb5.conf files to Maxscale node\n"); @@ -97,10 +97,10 @@ int main(int argc, char *argv[]) { sprintf(str, "%s/kerb.cnf", test_dir); Test->repl->copy_to_node(str, (char *) "~/", i); - Test->repl->ssh_node(i, true, "cp ~/kerb.cnf /etc/my.cnf.d/"); + Test->repl->ssh_node(i, true, "cp %s/kerb.cnf /etc/my.cnf.d/", Test->repl->access_homedir[i]); Test->repl->copy_to_node((char *) "krb5.keytab", (char *) "~/", i); - Test->repl->ssh_node(i, true, "cp ~/krb5.keytab /etc/"); + Test->repl->ssh_node(i, true, "cp %s/krb5.keytab /etc/", Test->repl->access_homedir[i]); Test->repl->ssh_node(i, false, "kinit mariadb/maxscale.test@MAXSCALE.TEST -k -t /etc/krb5.keytab"); } @@ -135,7 +135,7 @@ int main(int argc, char *argv[]) for (int i = 0; i < Test->repl->N; i++) { - Test->repl->ssh_node(i, true, "sudo rm -f /etc/my.cnf.d/kerb.cnf"); + Test->repl->ssh_node(i, true, "rm -f /etc/my.cnf.d/kerb.cnf"); } int rval = Test->global_result; From a3ba81ddbed765e80dc9bd60974ae9f40a0c16f2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Tue, 3 Oct 2017 13:40:30 +0300 Subject: [PATCH 007/101] Add example to REST API module command documentation Added an example that demonstrates how to call a module command with parameters via the REST API. --- Documentation/REST-API/Resources-MaxScale.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/Documentation/REST-API/Resources-MaxScale.md b/Documentation/REST-API/Resources-MaxScale.md index 9f0540644..b8870d7b3 100644 --- a/Documentation/REST-API/Resources-MaxScale.md +++ b/Documentation/REST-API/Resources-MaxScale.md @@ -504,6 +504,13 @@ For commands that can modify data: POST /v1/maxscale/modules/:module/:command ``` +Here is an example POST requests to the dbfwfilter module command _reload_ with +two parameters, the name of the filter instance and the path to a file: + +``` +POST /v1/maxscale/modules/dbfwfilter/reload?my-dbfwfilter-instance&/path/to/file.txt +``` + #### Response Command with output: From a4271cb94e04bf6aa9885d98a2bbd6c3a42b86ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Thu, 5 Oct 2017 08:06:10 +0300 Subject: [PATCH 008/101] Order member variable initialization in TestConnections The variables are now initialized in order and always in the constructor. --- maxscale-system-test/testconnections.cpp | 38 ++++++++++-------------- maxscale-system-test/testconnections.h | 2 +- 2 files changed, 17 insertions(+), 23 deletions(-) diff --git a/maxscale-system-test/testconnections.cpp b/maxscale-system-test/testconnections.cpp index 70ff3e63f..0cec09b9f 100644 --- a/maxscale-system-test/testconnections.cpp +++ b/maxscale-system-test/testconnections.cpp @@ -63,22 +63,28 @@ void TestConnections::require_galera_version(const char *version) } TestConnections::TestConnections(int argc, char *argv[]): - no_backend_log_copy(false), - use_snapshots(false), - verbose(false), - smoke(true), + enable_timeouts(true), + global_result(0), rwsplit_port(4006), readconn_master_port(4008), readconn_slave_port(4009), binlog_port(5306), - global_result(0), - binlog_cmd_option(0), - enable_timeouts(true), + conn_rwsplit(NULL), + conn_master(NULL), + conn_slave(NULL), use_ipv6(false), - no_galera(false), + use_snapshots(false), + no_backend_log_copy(false), + verbose(false), + smoke(true), + binlog_cmd_option(0), + ssl(false), + backend_ssl(false), binlog_master_gtid(false), binlog_slave_gtid(false), - no_vm_revert(true) + no_galera(false), + no_vm_revert(true), + threads(4) { signal_set(SIGSEGV, sigfatal_handler); signal_set(SIGABRT, sigfatal_handler); @@ -484,7 +490,7 @@ int TestConnections::read_env() { sprintf(maxscale_access_sudo, "%s", env); } - ssl = false; + env = getenv("ssl"); if ((env != NULL) && ((strcasecmp(env, "yes") == 0) || (strcasecmp(env, "true") == 0) )) { @@ -527,10 +533,6 @@ int TestConnections::read_env() { backend_ssl = true; } - else - { - backend_ssl = false; - } if (strcmp(maxscale_access_user, "root") == 0) { @@ -553,20 +555,12 @@ int TestConnections::read_env() { sscanf(env, "%d", &threads); } - else - { - threads = 4; - } env = getenv("use_snapshots"); if (env != NULL && ((strcasecmp(env, "yes") == 0) || (strcasecmp(env, "true") == 0) )) { use_snapshots = true; } - else - { - use_snapshots = false; - } env = getenv("take_snapshot_command"); if (env != NULL) { diff --git a/maxscale-system-test/testconnections.h b/maxscale-system-test/testconnections.h index 2fd263961..9bd036489 100644 --- a/maxscale-system-test/testconnections.h +++ b/maxscale-system-test/testconnections.h @@ -257,7 +257,7 @@ public: /** * @brief ssl if true ssl will be used */ - int ssl; + bool ssl; /** * @brief backend_ssl if true ssl configuratio for all servers will be added From 0cb72937cd672a6473cde314227d1314530c968f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Thu, 5 Oct 2017 08:09:58 +0300 Subject: [PATCH 009/101] Disable multi-MaxScale initialization by default Only tests that require multiple MaxScale instances should enable the multi-MaxScale mode. --- maxscale-system-test/testconnections.cpp | 8 +++++++- maxscale-system-test/testconnections.h | 3 +++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/maxscale-system-test/testconnections.cpp b/maxscale-system-test/testconnections.cpp index 0cec09b9f..c3843f71e 100644 --- a/maxscale-system-test/testconnections.cpp +++ b/maxscale-system-test/testconnections.cpp @@ -16,6 +16,7 @@ namespace maxscale { static bool start = true; static bool check_nodes = true; +static bool multiple_maxscales = false; static std::string required_repl_version; static std::string required_galera_version; } @@ -62,6 +63,11 @@ void TestConnections::require_galera_version(const char *version) maxscale::required_galera_version = version; } +void TestConnections::multiple_maxscales(bool value) +{ + maxscale::multiple_maxscales = value; +} + TestConnections::TestConnections(int argc, char *argv[]): enable_timeouts(true), global_result(0), @@ -285,7 +291,7 @@ TestConnections::TestConnections(int argc, char *argv[]): { init_maxscale(); - if (!secondary_maxscale_IP.empty()) + if (maxscale::multiple_maxscales && !secondary_maxscale_IP.empty()) { set_active_maxscale(MXS_SECONDARY); init_maxscale(); diff --git a/maxscale-system-test/testconnections.h b/maxscale-system-test/testconnections.h index 9bd036489..72cbcdfa6 100644 --- a/maxscale-system-test/testconnections.h +++ b/maxscale-system-test/testconnections.h @@ -337,6 +337,9 @@ public: static void require_repl_version(const char *version); static void require_galera_version(const char *version); + /** Initialize multiple MaxScale instances */ + void multiple_maxscales(bool value); + /** * @brief add_result adds result to global_result and prints error message if result is not 0 * @param result 0 if step PASSED From 8159798d853615eabee7e9aa8407d332cd788799 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Thu, 5 Oct 2017 08:14:39 +0300 Subject: [PATCH 010/101] Clean up setup_binlog_gtid Removed redundant operations and cleaned up unused code. --- maxscale-system-test/setup_binlog_gtid.cpp | 45 +++++----------------- 1 file changed, 10 insertions(+), 35 deletions(-) diff --git a/maxscale-system-test/setup_binlog_gtid.cpp b/maxscale-system-test/setup_binlog_gtid.cpp index a535a68a6..361e59289 100644 --- a/maxscale-system-test/setup_binlog_gtid.cpp +++ b/maxscale-system-test/setup_binlog_gtid.cpp @@ -1,48 +1,23 @@ /** - * @file setup_binlog_gtid.cpp test of simple binlog router setup - + * @file setup_binlog_gtid.cpp - Basic GTID testing of binlogrouter */ - -#include #include "testconnections.h" -#include "maxadmin_operations.h" -#include "sql_t1.h" - #include "test_binlog_fnc.h" - int main(int argc, char *argv[]) { - TestConnections * Test = new TestConnections(argc, argv); - Test->set_timeout(3000); - int options_set = 3; - if (Test->smoke) - { - options_set = 1; - } + TestConnections test(argc, argv); + test.binlog_master_gtid = true; + test.binlog_slave_gtid = true; - Test->repl->connect(); - execute_query(Test->repl->nodes[0], (char *) "DROP TABLE IF EXISTS t1;"); - Test->repl->close_connections(); - sleep(5); + test.start_binlog(); + test_binlog(&test); - Test->binlog_master_gtid = true; - Test->binlog_slave_gtid = true; -// for (int option = 0; option < options_set; option++) - //{ - // Test->binlog_cmd_option = option; - Test->start_binlog(); - test_binlog(Test); - //} + test.check_log_err("SET NAMES utf8mb4", false); + test.check_log_err("set autocommit=1", false); + test.check_log_err("select USER()", false); - Test->check_log_err("SET NAMES utf8mb4", false); - Test->check_log_err("set autocommit=1", false); - Test->check_log_err("select USER()", false); - - int rval = Test->global_result; - delete Test; - return rval; + return test.global_result; } - From d391db6c2a8da5c0621dea441f6bb4edee31afe8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Thu, 5 Oct 2017 08:20:08 +0300 Subject: [PATCH 011/101] Fix sync_slaves The function did not check whether the file name was valid. --- maxscale-system-test/mariadb_nodes.cpp | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/maxscale-system-test/mariadb_nodes.cpp b/maxscale-system-test/mariadb_nodes.cpp index 1d45b99ce..acd683f74 100644 --- a/maxscale-system-test/mariadb_nodes.cpp +++ b/maxscale-system-test/mariadb_nodes.cpp @@ -1315,19 +1315,27 @@ void Mariadb_nodes::sync_slaves(int node) if (res) { MYSQL_ROW row = mysql_fetch_row(res); - if (row && row[node] && row[1]) + if (row && row[0] && row[1]) { - const char* file_suffix = strchr(row[node], '.') + 1; - int filenum = atoi(file_suffix); - int pos = atoi(row[1]); - - for (int i = 0; i < this->N; i++) + const char* file_suffix = strchr(row[0], '.'); + if (file_suffix) { - if (i != node) + file_suffix++; + int filenum = atoi(file_suffix); + int pos = atoi(row[1]); + + for (int i = 0; i < this->N; i++) { - wait_until_pos(this->nodes[i], filenum, pos); + if (i != node) + { + wait_until_pos(this->nodes[i], filenum, pos); + } } } + else + { + printf("Cannot sync slaves, invalid binlog file name: %s", row[0]); + } } mysql_free_result(res); } From bbd5fe8288d8135a4918826f553ea30f901f50b6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Thu, 5 Oct 2017 11:09:09 +0300 Subject: [PATCH 012/101] Fix GTID setup in tests The GTID setup in tests executed code that was not supposed to be executed when GTID is in use. --- maxscale-system-test/testconnections.cpp | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/maxscale-system-test/testconnections.cpp b/maxscale-system-test/testconnections.cpp index c3843f71e..d6c706592 100644 --- a/maxscale-system-test/testconnections.cpp +++ b/maxscale-system-test/testconnections.cpp @@ -1058,14 +1058,14 @@ int TestConnections::start_binlog() fflush(stdout); tprintf("Maxscale binlog master pos : %s\n", log_pos); fflush(stdout); - } - tprintf("Setup all backend nodes except first one to be slaves of binlog Maxscale node\n"); - fflush(stdout); - for (i = 2; i < repl->N; i++) - { - try_query(repl->nodes[i], "stop slave;"); - repl->set_slave(repl->nodes[i], maxscale_IP, binlog_port, log_file, log_pos); + tprintf("Setup all backend nodes except first one to be slaves of binlog Maxscale node\n"); + fflush(stdout); + for (i = 2; i < repl->N; i++) + { + try_query(repl->nodes[i], "stop slave;"); + repl->set_slave(repl->nodes[i], maxscale_IP, binlog_port, log_file, log_pos); + } } repl->close_connections(); From 6dd9b4f23517f683999cc90cf562e9ec5e3b6c13 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Thu, 5 Oct 2017 13:51:14 +0300 Subject: [PATCH 013/101] Add note about MaxCtrl to release notes Added a note about MaxCtrl in the release notes. --- .../Release-Notes/MaxScale-2.2.0-Release-Notes.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/Documentation/Release-Notes/MaxScale-2.2.0-Release-Notes.md b/Documentation/Release-Notes/MaxScale-2.2.0-Release-Notes.md index ef5505532..24112776f 100644 --- a/Documentation/Release-Notes/MaxScale-2.2.0-Release-Notes.md +++ b/Documentation/Release-Notes/MaxScale-2.2.0-Release-Notes.md @@ -166,6 +166,15 @@ mycommands.txt`). ## New Features +### MaxCtrl Command Line Client + +The MaxCtrl is a new command line intended to replace MaxAdmin. This +client uses the REST API to communicate with MaxScale in a secure way. The +client is distributed separately in the `maxscale-client` package. + +For more information, refer to the [MaxCtrl](../Reference/MaxCtrl.md) +documentation. + ### MySQL Monitor Crash Safety The MySQL monitor keeps a journal of the state of the servers and the currently From 3dad78e33ae869236698cd1fde19be85217e5b30 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Thu, 5 Oct 2017 14:11:36 +0300 Subject: [PATCH 014/101] Fix issue processing script The script now properly processes repeating commas inside double quotes. --- Documentation/process.pl | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/Documentation/process.pl b/Documentation/process.pl index fd2f4b3db..69a3107c4 100755 --- a/Documentation/process.pl +++ b/Documentation/process.pl @@ -6,10 +6,12 @@ while (<>) { # Replace commas that are inside double quotes - s/("[^"]*),([^"]*")/$1$2/g; - + while (s/("[^"]*),([^"]*")/$1$2/g) + { + ; + } # Replace the double quotes themselves - s/"([^"]*)"/$1/g; + s/"//g; # Split the line and grab the issue number and description my @parts = split(/,/); From eca25b06abd57e3dd6d3b1151a71c19dafedb08c Mon Sep 17 00:00:00 2001 From: Johan Wikman Date: Thu, 5 Oct 2017 13:29:03 +0300 Subject: [PATCH 015/101] Update Change Log for 2.2 --- Documentation/Changelog.md | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/Documentation/Changelog.md b/Documentation/Changelog.md index faa61c742..63b333a0f 100644 --- a/Documentation/Changelog.md +++ b/Documentation/Changelog.md @@ -1,5 +1,29 @@ # Changelog +## MariaDB MaxScale 2.2 + +* Limited support from Pluggable Authentication Modules (PAM). +* Proxy protocol support for backend connections. +* REST-API for obtaining information about and for manipulating the + resources of MaxScale. +* MaxCtrl, a new command line client for administering MaxScale + implemented in terms of the REST-API. +* Firewall can now prevent the use of functions in conjunction with + certain columns. +* Parser of MaxScale extended to support window functions and CTEs. +* Prepared statements are now parsed and the execution of read only + ones will be routed to slaves. +* Server states are persisted, so in case of crash and restart MaxScale + has the correct server state quicker. +* Monitor scripts are executed synchronously, so they can safely perform + actions that change the server states. +* The Masking filter can now both obfuscate and partially mask columns. +* Binlog router supports MariaDB 10 GTID at both ends. +* KILL CONNECTION can now be used through MaxScale. + +For more details, please refer to: +* [MariaDB MaxScale 2.2.0 Release Notes](Release-Notes/MaxScale-2.2.0-Release-Notes.md) + ## MariaDB MaxScale 2.1 * MariaDB MaxScale is licensed under MariaDB BSL 1.1. * Hierarchical configuration files are now supported. From 8964581f039e07b4cd105bd91d2bfc31487d6a20 Mon Sep 17 00:00:00 2001 From: Johan Wikman Date: Thu, 5 Oct 2017 14:01:51 +0300 Subject: [PATCH 016/101] Update bug-list of MaxScale 2.2.0 release notes --- .../MaxScale-2.2.0-Release-Notes.md | 26 ++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/Documentation/Release-Notes/MaxScale-2.2.0-Release-Notes.md b/Documentation/Release-Notes/MaxScale-2.2.0-Release-Notes.md index 24112776f..adee01845 100644 --- a/Documentation/Release-Notes/MaxScale-2.2.0-Release-Notes.md +++ b/Documentation/Release-Notes/MaxScale-2.2.0-Release-Notes.md @@ -223,7 +223,31 @@ with functions. For more information about this new rule, read the ## Bug fixes -[Here is a list of bugs fixed since the release of MaxScale 2.1.X.]() +[Here is a list of bugs fixed in MaxScale 2.2.0.](https://jira.mariadb.org/issues/?jql=project%20%3D%20MXS%20AND%20issuetype%20%3D%20Bug%20AND%20status%20%3D%20Closed%20AND%20fixVersion%20%3D%202.2.0) + +* [MXS-1450](https://jira.mariadb.org/browse/MXS-1450) Maxadmin commands with a leading space are silently ignored +* [MXS-1449](https://jira.mariadb.org/browse/MXS-1449) Database change not allowed +* [MXS-1405](https://jira.mariadb.org/browse/MXS-1405) Script launched by monitors should run synchronously +* [MXS-1397](https://jira.mariadb.org/browse/MXS-1397) ReadWriteSplit's master connection can time out if session only issues read-only queries +* [MXS-1359](https://jira.mariadb.org/browse/MXS-1359) qc_sqlite crashes with a very large compound select +* [MXS-1351](https://jira.mariadb.org/browse/MXS-1351) Partially authenticated connections are put into the connection pool +* [MXS-1349](https://jira.mariadb.org/browse/MXS-1349) qc_mysqlembedded accesses wrong preparable statement field +* [MXS-1346](https://jira.mariadb.org/browse/MXS-1346) Function blocking per column +* [MXS-1345](https://jira.mariadb.org/browse/MXS-1345) Empty function list is not allowed +* [MXS-1340](https://jira.mariadb.org/browse/MXS-1340) Report true table and not alias name +* [MXS-1339](https://jira.mariadb.org/browse/MXS-1339) QC should return a particular table/database just once +* [MXS-1334](https://jira.mariadb.org/browse/MXS-1334) Build on FreeBSD 11 looks for libdl - how can it be told not to? +* [MXS-1322](https://jira.mariadb.org/browse/MXS-1322) Flushing log should reopen not reopen and truncate. +* [MXS-1307](https://jira.mariadb.org/browse/MXS-1307) Add CTE tests +* [MXS-1265](https://jira.mariadb.org/browse/MXS-1265) strerror_r calls result in compiler warnings +* [MXS-1262](https://jira.mariadb.org/browse/MXS-1262) Mantenance bit(s) should persist after maxscale restart +* [MXS-1221](https://jira.mariadb.org/browse/MXS-1221) Nagios plugin scripts does not process -S option properly +* [MXS-1214](https://jira.mariadb.org/browse/MXS-1214) Streaming Insert Filter gives errors +* [MXS-1203](https://jira.mariadb.org/browse/MXS-1203) Batch inserts through Maxscale with C/J stall +* [MXS-1198](https://jira.mariadb.org/browse/MXS-1198) Interface retry bind interval (of a listener) increases by ten seconds every time it fails (10,20,30,....) it should be a fixed interval (and maybe configurable) +* [MXS-1160](https://jira.mariadb.org/browse/MXS-1160) Load infile not working on Schemarouter +* [MXS-1146](https://jira.mariadb.org/browse/MXS-1146) JDBC connection dropping transaction when connecting to MaxScale directly +* [MXS-959](https://jira.mariadb.org/browse/MXS-959) KILL command on wrong connection ID ## Known Issues and Limitations From c6ed779dcfe9cbe25aa43554c4bb893103b8727f Mon Sep 17 00:00:00 2001 From: Johan Wikman Date: Thu, 5 Oct 2017 15:15:55 +0300 Subject: [PATCH 017/101] Fix buffer overflow in test program --- server/core/test/testqueuemanager.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/core/test/testqueuemanager.cc b/server/core/test/testqueuemanager.cc index 75d233144..5e4849949 100644 --- a/server/core/test/testqueuemanager.cc +++ b/server/core/test/testqueuemanager.cc @@ -228,7 +228,7 @@ thread_test(void *arg) static int test2() { - pthread_t tid[NUMBER_OF_THREADS - 1]; + pthread_t tid[NUMBER_OF_THREADS]; int err, i, limit; thread_queue = mxs_queue_alloc(TEST_QUEUE_SIZE, HEARTBEATS_TO_EXPIRE); From 75d298693c8947908a35889bc3e76005b143014a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Thu, 5 Oct 2017 14:51:16 +0300 Subject: [PATCH 018/101] Fix GCC7 warnings in binlogrouter GCC7 reported possible destination buffer overflow in binlogrouter. --- server/modules/routing/binlogrouter/blr_slave.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/server/modules/routing/binlogrouter/blr_slave.c b/server/modules/routing/binlogrouter/blr_slave.c index fd20caedd..fd5f4ce48 100644 --- a/server/modules/routing/binlogrouter/blr_slave.c +++ b/server/modules/routing/binlogrouter/blr_slave.c @@ -1183,7 +1183,7 @@ blr_slave_send_slave_status(ROUTER_INSTANCE *router, bool all_slaves) { GWBUF *pkt; - char column[251] = ""; + char column[2048] = ""; uint8_t *ptr; int len, actual_len, col_len, seqno, i; char *dyn_column = NULL; @@ -7428,10 +7428,9 @@ static bool blr_handle_set_stmt(ROUTER_INSTANCE *router, /* Parse the non empty GTID value */ if (heading[0] && !blr_parse_gtid(heading, >id_elms)) { - static const char *err_fmt = "Invalid format for GTID ('%s')" - " set request; use 'X-Y-Z'"; + const char err_fmt[] = "Invalid format for GTID ('%s')" + " set request; use 'X-Y-Z'"; char err_msg[sizeof(err_fmt) + GTID_MAX_LEN + 1]; - sprintf(err_msg, err_fmt, heading); MXS_ERROR("%s", err_msg); From 6ba20795b4089eb0af6ebf566ec28046c3e92d42 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Thu, 5 Oct 2017 14:52:46 +0300 Subject: [PATCH 019/101] Fix GCC7 warnings in cache filter The thread count did not have enough space for a INT_MAX. --- server/modules/filter/cache/cachept.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/modules/filter/cache/cachept.cc b/server/modules/filter/cache/cachept.cc index 28d10dbe9..644ab2412 100644 --- a/server/modules/filter/cache/cachept.cc +++ b/server/modules/filter/cache/cachept.cc @@ -164,7 +164,7 @@ CachePT* CachePT::Create(const std::string& name, while (!error && (i < n_threads)) { - char suffix[6]; // Enough for 99999 threads + char suffix[12]; // Enough for 99999 threads sprintf(suffix, "%d", i); string namest(name + "-" + suffix); From e474b78d95fab55f5e5173cc16640d41dcff55fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Thu, 5 Oct 2017 16:03:00 +0300 Subject: [PATCH 020/101] Remove unnecessary result processing in readwritesplit The result processing code did unnecessary work to confirm that the result buffers are contiguous. The code also assumed that multiple packets can be routed at the same time when in fact only one contiguous result packet is returned at a time. By assuming that the buffers are contiguous and contain only one packet, most of the copying and buffer manipulation can be avoided. --- .../routing/readwritesplit/readwritesplit.cc | 45 ++++++++++++++++--- .../routing/readwritesplit/rwsplitsession.cc | 10 ++--- .../routing/readwritesplit/rwsplitsession.hh | 10 ++--- 3 files changed, 49 insertions(+), 16 deletions(-) diff --git a/server/modules/routing/readwritesplit/readwritesplit.cc b/server/modules/routing/readwritesplit/readwritesplit.cc index 067e49572..f9e0f8285 100644 --- a/server/modules/routing/readwritesplit/readwritesplit.cc +++ b/server/modules/routing/readwritesplit/readwritesplit.cc @@ -508,6 +508,25 @@ static bool route_stored_query(RWSplitSession *rses) return rval; } +static bool is_eof(GWBUF* buffer) +{ + uint8_t* data = GWBUF_DATA(buffer); + return data[MYSQL_HEADER_LEN] == MYSQL_REPLY_EOF && + gw_mysql_get_byte3(data) + MYSQL_HEADER_LEN == MYSQL_EOF_PACKET_LEN; +} + +static bool is_large(GWBUF* buffer) +{ + return gw_mysql_get_byte3(GWBUF_DATA(buffer)) == GW_MYSQL_MAX_PACKET_LEN; +} + +static bool more_results_exist(GWBUF* buffer) +{ + ss_dassert(is_eof(buffer)); + uint16_t status = gw_mysql_get_byte2(GWBUF_DATA(buffer) + MYSQL_HEADER_LEN + 1 + 2); + return status & SERVER_MORE_RESULTS_EXIST; +} + /** * @brief Check if we have received a complete reply from the backend * @@ -531,11 +550,23 @@ bool reply_is_complete(SRWBackend backend, GWBUF *buffer) } else { - bool more = false; - modutil_state state = backend->get_modutil_state(); - int old_eof = backend->get_reply_state() == REPLY_STATE_RSET_ROWS ? 1 : 0; - int n_eof = modutil_count_signal_packets(buffer, old_eof, &more, &state); - backend->set_modutil_state(state); + bool large = backend->is_large_packet(); + int n_eof = backend->get_reply_state() == REPLY_STATE_RSET_ROWS ? 1 : 0; + + if (is_large(buffer)) + { + large = true; + } + else if (large) + { + large = false; + } + else if (is_eof(buffer)) + { + n_eof++; + } + + backend->set_large_packet(large); if (n_eof == 0) { @@ -557,7 +588,7 @@ bool reply_is_complete(SRWBackend backend, GWBUF *buffer) LOG_RS(backend, REPLY_STATE_DONE); backend->set_reply_state(REPLY_STATE_DONE); - if (more) + if (more_results_exist(buffer)) { /** The server will send more resultsets */ LOG_RS(backend, REPLY_STATE_START); @@ -1116,6 +1147,8 @@ static void clientReply(MXS_ROUTER *instance, GWBUF *writebuf, DCB *backend_dcb) { + ss_dassert(GWBUF_IS_CONTIGUOUS(writebuf) && + MYSQL_GET_PAYLOAD_LEN(GWBUF_DATA(writebuf)) + MYSQL_HEADER_LEN == gwbuf_length(writebuf)); RWSplitSession *rses = (RWSplitSession *)router_session; DCB *client_dcb = backend_dcb->session->client_dcb; diff --git a/server/modules/routing/readwritesplit/rwsplitsession.cc b/server/modules/routing/readwritesplit/rwsplitsession.cc index 737f698f9..21bc960ac 100644 --- a/server/modules/routing/readwritesplit/rwsplitsession.cc +++ b/server/modules/routing/readwritesplit/rwsplitsession.cc @@ -17,7 +17,7 @@ RWBackend::RWBackend(SERVER_REF* ref): mxs::Backend(ref), m_reply_state(REPLY_STATE_DONE), - m_modutil_state(MODUTIL_STATE_INIT) + m_large_packet(false) { } @@ -35,14 +35,14 @@ void RWBackend::set_reply_state(reply_state_t state) m_reply_state = state; } -void RWBackend::set_modutil_state(const modutil_state& state) +void RWBackend::set_large_packet(bool value) { - m_modutil_state = state; + m_large_packet = value; } -modutil_state RWBackend::get_modutil_state() const +bool RWBackend::is_large_packet() const { - return m_modutil_state; + return m_large_packet; } bool RWBackend::execute_session_command() diff --git a/server/modules/routing/readwritesplit/rwsplitsession.hh b/server/modules/routing/readwritesplit/rwsplitsession.hh index b1e3c741c..d2561c107 100644 --- a/server/modules/routing/readwritesplit/rwsplitsession.hh +++ b/server/modules/routing/readwritesplit/rwsplitsession.hh @@ -52,15 +52,15 @@ public: bool execute_session_command(); bool write(GWBUF* buffer, response_type type = EXPECT_RESPONSE); - void set_modutil_state(const modutil_state& state); - modutil_state get_modutil_state() const; + bool is_large_packet() const; + void set_large_packet(bool value); private: reply_state_t m_reply_state; BackendHandleMap m_ps_handles; /**< Internal ID to backend PS handle mapping */ - modutil_state m_modutil_state; /**< Used to store the state of the EOF packet - * calculation for result sets when the result - * contains very large rows */ + bool m_large_packet; /**< Used to store the state of the EOF packet + *calculation for result sets when the result + * contains very large rows */ }; typedef std::tr1::shared_ptr SRWBackend; From 9ece99646651fa8e0463e470f4c49081ddea90f5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Thu, 5 Oct 2017 17:03:36 +0300 Subject: [PATCH 021/101] Use custom result set detection functions The functions that the readwritesplit uses can assume that the buffer contains only one packet in contiguous memory. --- .../routing/readwritesplit/readwritesplit.cc | 28 +++++++++++++++++-- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/server/modules/routing/readwritesplit/readwritesplit.cc b/server/modules/routing/readwritesplit/readwritesplit.cc index f9e0f8285..6da89ae55 100644 --- a/server/modules/routing/readwritesplit/readwritesplit.cc +++ b/server/modules/routing/readwritesplit/readwritesplit.cc @@ -522,11 +522,33 @@ static bool is_large(GWBUF* buffer) static bool more_results_exist(GWBUF* buffer) { - ss_dassert(is_eof(buffer)); + ss_dassert(is_eof(buffer) || mxs_mysql_is_ok_packet(buffer)); uint16_t status = gw_mysql_get_byte2(GWBUF_DATA(buffer) + MYSQL_HEADER_LEN + 1 + 2); return status & SERVER_MORE_RESULTS_EXIST; } +static bool is_result_set(GWBUF *buffer) +{ + bool rval = false; + + switch (GWBUF_DATA(buffer)[MYSQL_HEADER_LEN]) + { + + case MYSQL_REPLY_OK: + case MYSQL_REPLY_ERR: + case MYSQL_REPLY_LOCAL_INFILE: + case MYSQL_REPLY_EOF: + /** Not a result set */ + break; + + default: + rval = true; + break; + } + + return rval; +} + /** * @brief Check if we have received a complete reply from the backend * @@ -539,9 +561,9 @@ bool reply_is_complete(SRWBackend backend, GWBUF *buffer) { mxs_mysql_cmd_t cmd = mxs_mysql_current_command(backend->dcb()->session); - if (backend->get_reply_state() == REPLY_STATE_START && !mxs_mysql_is_result_set(buffer)) + if (backend->get_reply_state() == REPLY_STATE_START && !is_result_set(buffer)) { - if (cmd == MXS_COM_STMT_PREPARE || !mxs_mysql_more_results_after_ok(buffer)) + if (cmd == MXS_COM_STMT_PREPARE || !more_results_exist(buffer)) { /** Not a result set, we have the complete response */ LOG_RS(backend, REPLY_STATE_DONE); From 8bcd30ea7ce110f7684425794e887d380be3fd3b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Thu, 5 Oct 2017 17:49:50 +0300 Subject: [PATCH 022/101] Inline backend related functions Inlined the getter/setter type functions that are often used. Profiling shows that inlining the RWBackend get/set functions for the reply state manipulation reduces the relative cost of the function to acceptable levels. Inlining the Backend state function did not have as large an effect but it appears contribute a slight performance boost. --- include/maxscale/backend.hh | 72 +++++++++++++++---- server/core/backend.cc | 72 ------------------- .../routing/readwritesplit/readwritesplit.cc | 21 +++--- .../routing/readwritesplit/rwsplitsession.cc | 20 ------ .../routing/readwritesplit/rwsplitsession.hh | 22 ++++-- 5 files changed, 88 insertions(+), 119 deletions(-) diff --git a/include/maxscale/backend.hh b/include/maxscale/backend.hh index b3df34430..ec02066e9 100644 --- a/include/maxscale/backend.hh +++ b/include/maxscale/backend.hh @@ -112,21 +112,32 @@ public: * * @return Pointer to server reference */ - SERVER_REF* backend() const; + inline SERVER_REF* backend() const + { + ss_dassert(m_backend); + return m_backend; + } /** * @brief Get pointer to server * * @return Pointer to server */ - SERVER* server() const; + inline SERVER* server() const + { + ss_dassert(m_backend); + return m_backend->server; + } /** * @brief Check if a connection to this backend can be made * * @return True if the backend has not failed and a connection can be attempted */ - bool can_connect() const; + inline bool can_connect() const + { + return !has_failed() && SERVER_IS_RUNNING(m_backend->server); + } /** * @brief Create a new connection @@ -149,7 +160,10 @@ public: * * @return Pointer to internal DCB */ - DCB* dcb() const; + inline DCB* dcb() const + { + return m_dcb; + } /** * @brief Write data to the backend server @@ -197,49 +211,70 @@ public: * * @return True if backend is in use */ - bool in_use() const; + inline bool in_use() const + { + return m_state & IN_USE; + } /** * @brief Check if the backend server reference is active * * @return True if the server reference is active */ - bool is_active() const; + inline bool is_active() const + { + return SERVER_REF_IS_ACTIVE(m_backend); + } /** * @brief Check if backend is waiting for a result * * @return True if backend is waiting for a result */ - bool is_waiting_result() const; + inline bool is_waiting_result() const + { + return m_state & WAITING_RESULT; + } /** * @brief Check if the backend is closed * * @return True if the backend is closed */ - bool is_closed() const; + inline bool is_closed() const + { + return m_closed; + } /** * @brief Check if the server is a master * * @return True if server is a master */ - bool is_master() const; + inline bool is_master() const + { + return SERVER_IS_MASTER(m_backend->server); + } /** * @brief Check if the server is a slave * * @return True if the server is a slave */ - bool is_slave() const; + inline bool is_slave() const + { + return SERVER_IS_SLAVE(m_backend->server); + } /** * @brief Check if the server is a relay server * * @return True if the server is a relay server */ - bool is_relay() const; + inline bool is_relay() const + { + return SERVER_IS_RELAY_SERVER(m_backend->server); + } /** * @brief Check if the backend has failed fatally @@ -250,7 +285,10 @@ public: * * @return True if a fatal failure has occurred in the backend server */ - bool has_failed() const; + inline bool has_failed() const + { + return m_state & FATAL_FAILURE; + } /** @@ -258,14 +296,20 @@ public: * * @return The unique object name of this server */ - const char* name() const; + inline const char* name() const + { + return m_backend->server->unique_name; + } /** * @brief Get the address and port as a string * * @return The address and port combined into one string */ - const char* uri() const; + inline const char* uri() const + { + return m_uri.c_str(); + } private: /** diff --git a/server/core/backend.cc b/server/core/backend.cc index 3b658c967..3dd915ca6 100644 --- a/server/core/backend.cc +++ b/server/core/backend.cc @@ -173,23 +173,6 @@ void Backend::set_state(backend_state state) m_state |= state; } -SERVER_REF* Backend::backend() const -{ - ss_dassert(m_backend); - return m_backend; -} - -SERVER* Backend::server() const -{ - ss_dassert(m_backend); - return m_backend->server; -} - -bool Backend::can_connect() const -{ - return !has_failed() && SERVER_IS_RUNNING(m_backend->server); -} - bool Backend::connect(MXS_SESSION* session) { bool rval = false; @@ -209,11 +192,6 @@ bool Backend::connect(MXS_SESSION* session) return rval; } -DCB* Backend::dcb() const -{ - return m_dcb; -} - bool Backend::write(GWBUF* buffer, response_type type) { bool rval = m_dcb->func.write(m_dcb, buffer) != 0; @@ -266,53 +244,3 @@ bool Backend::write_stored_command() return rval; } - -bool Backend::in_use() const -{ - return m_state & IN_USE; -} - -bool Backend::is_active() const -{ - return SERVER_REF_IS_ACTIVE(m_backend); -} - -bool Backend::is_waiting_result() const -{ - return m_state & WAITING_RESULT; -} - -bool Backend::is_closed() const -{ - return m_closed; -} - -bool Backend::is_master() const -{ - return SERVER_IS_MASTER(m_backend->server); -} - -bool Backend::is_slave() const -{ - return SERVER_IS_SLAVE(m_backend->server); -} - -bool Backend::is_relay() const -{ - return SERVER_IS_RELAY_SERVER(m_backend->server); -} - -bool Backend::has_failed() const -{ - return m_state & FATAL_FAILURE; -} - -const char* Backend::name() const -{ - return m_backend->server->unique_name; -} - -const char* Backend::uri() const -{ - return m_uri.c_str(); -} diff --git a/server/modules/routing/readwritesplit/readwritesplit.cc b/server/modules/routing/readwritesplit/readwritesplit.cc index 6da89ae55..a8ef8ffcd 100644 --- a/server/modules/routing/readwritesplit/readwritesplit.cc +++ b/server/modules/routing/readwritesplit/readwritesplit.cc @@ -508,26 +508,26 @@ static bool route_stored_query(RWSplitSession *rses) return rval; } -static bool is_eof(GWBUF* buffer) +static inline bool is_eof(GWBUF* buffer) { uint8_t* data = GWBUF_DATA(buffer); return data[MYSQL_HEADER_LEN] == MYSQL_REPLY_EOF && gw_mysql_get_byte3(data) + MYSQL_HEADER_LEN == MYSQL_EOF_PACKET_LEN; } -static bool is_large(GWBUF* buffer) +static inline bool is_large(GWBUF* buffer) { return gw_mysql_get_byte3(GWBUF_DATA(buffer)) == GW_MYSQL_MAX_PACKET_LEN; } -static bool more_results_exist(GWBUF* buffer) +static inline bool more_results_exist(GWBUF* buffer) { ss_dassert(is_eof(buffer) || mxs_mysql_is_ok_packet(buffer)); uint16_t status = gw_mysql_get_byte2(GWBUF_DATA(buffer) + MYSQL_HEADER_LEN + 1 + 2); return status & SERVER_MORE_RESULTS_EXIST; } -static bool is_result_set(GWBUF *buffer) +static inline bool is_result_set(GWBUF *buffer) { bool rval = false; @@ -549,6 +549,11 @@ static bool is_result_set(GWBUF *buffer) return rval; } +static inline uint8_t get_cmd(SRWBackend& backend) +{ + return mxs_mysql_current_command(backend->dcb()->session); +} + /** * @brief Check if we have received a complete reply from the backend * @@ -559,11 +564,9 @@ static bool is_result_set(GWBUF *buffer) */ bool reply_is_complete(SRWBackend backend, GWBUF *buffer) { - mxs_mysql_cmd_t cmd = mxs_mysql_current_command(backend->dcb()->session); - if (backend->get_reply_state() == REPLY_STATE_START && !is_result_set(buffer)) { - if (cmd == MXS_COM_STMT_PREPARE || !more_results_exist(buffer)) + if (!more_results_exist(buffer) || get_cmd(backend) == MXS_COM_STMT_PREPARE) { /** Not a result set, we have the complete response */ LOG_RS(backend, REPLY_STATE_DONE); @@ -596,7 +599,7 @@ bool reply_is_complete(SRWBackend backend, GWBUF *buffer) LOG_RS(backend, REPLY_STATE_RSET_COLDEF); backend->set_reply_state(REPLY_STATE_RSET_COLDEF); } - else if (n_eof == 1 && cmd != MXS_COM_FIELD_LIST) + else if (n_eof == 1 && get_cmd(backend) != MXS_COM_FIELD_LIST) { /** Waiting for the EOF packet after the rows */ LOG_RS(backend, REPLY_STATE_RSET_ROWS); @@ -606,7 +609,7 @@ bool reply_is_complete(SRWBackend backend, GWBUF *buffer) { /** We either have a complete result set or a response to * a COM_FIELD_LIST command */ - ss_dassert(n_eof == 2 || (n_eof == 1 && cmd == MXS_COM_FIELD_LIST)); + ss_dassert(n_eof == 2 || (n_eof == 1 && get_cmd(backend) == MXS_COM_FIELD_LIST)); LOG_RS(backend, REPLY_STATE_DONE); backend->set_reply_state(REPLY_STATE_DONE); diff --git a/server/modules/routing/readwritesplit/rwsplitsession.cc b/server/modules/routing/readwritesplit/rwsplitsession.cc index 21bc960ac..0fe945494 100644 --- a/server/modules/routing/readwritesplit/rwsplitsession.cc +++ b/server/modules/routing/readwritesplit/rwsplitsession.cc @@ -25,26 +25,6 @@ RWBackend::~RWBackend() { } -reply_state_t RWBackend::get_reply_state() const -{ - return m_reply_state; -} - -void RWBackend::set_reply_state(reply_state_t state) -{ - m_reply_state = state; -} - -void RWBackend::set_large_packet(bool value) -{ - m_large_packet = value; -} - -bool RWBackend::is_large_packet() const -{ - return m_large_packet; -} - bool RWBackend::execute_session_command() { bool expect_response = mxs_mysql_command_will_respond(next_session_command()->get_command()); diff --git a/server/modules/routing/readwritesplit/rwsplitsession.hh b/server/modules/routing/readwritesplit/rwsplitsession.hh index d2561c107..3f48170fc 100644 --- a/server/modules/routing/readwritesplit/rwsplitsession.hh +++ b/server/modules/routing/readwritesplit/rwsplitsession.hh @@ -43,8 +43,15 @@ public: RWBackend(SERVER_REF* ref); ~RWBackend(); - reply_state_t get_reply_state() const; - void set_reply_state(reply_state_t state); + inline reply_state_t get_reply_state() const + { + return m_reply_state; + } + + inline void set_reply_state(reply_state_t state) + { + m_reply_state = state; + } void add_ps_handle(uint32_t id, uint32_t handle); uint32_t get_ps_handle(uint32_t id) const; @@ -52,8 +59,15 @@ public: bool execute_session_command(); bool write(GWBUF* buffer, response_type type = EXPECT_RESPONSE); - bool is_large_packet() const; - void set_large_packet(bool value); + inline void set_large_packet(bool value) + { + m_large_packet = value; + } + + inline bool is_large_packet() const + { + return m_large_packet; + } private: reply_state_t m_reply_state; From f26203cec40a02e9d9658abf036026b1e5ea1f48 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Thu, 5 Oct 2017 18:30:41 +0300 Subject: [PATCH 023/101] Make each packet contiguous for RCAP_TYPE_STMT_OUTPUT As each packet is routed separately, they must be made contiguous before routing them. --- server/modules/protocol/MySQL/MySQLBackend/mysql_backend.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/server/modules/protocol/MySQL/MySQLBackend/mysql_backend.c b/server/modules/protocol/MySQL/MySQLBackend/mysql_backend.c index bd13c2701..df66e2b0d 100644 --- a/server/modules/protocol/MySQL/MySQLBackend/mysql_backend.c +++ b/server/modules/protocol/MySQL/MySQLBackend/mysql_backend.c @@ -922,6 +922,12 @@ gw_read_and_write(DCB *dcb) !result_collected) { stmt = modutil_get_next_MySQL_packet(&read_buffer); + + if (!GWBUF_IS_CONTIGUOUS(stmt)) + { + // Make sure the buffer is contiguous + stmt = gwbuf_make_contiguous(stmt); + } } else { From 5f13f1d35877dc1e3b223456ad23038e23cb4a7b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Fri, 6 Oct 2017 03:06:50 +0300 Subject: [PATCH 024/101] Fix handling of collected results The result collection did not reset properly when a non-resultset was returned for a request. As collected result need to be distinguishable from single packet responses, a new buffer type was added. The new buffer type is used by readwritesplit which uses result collection for preparation of prepared statements. Moved the current command tracking to the RWBackend class as the command tracked by the protocol is can change before a response to the executed command is received. Removed a false debug assertion in the mxs_mysql_extract_ps_response function that was triggered when a very large prepared statement response was processed in multiple parts. --- include/maxscale/buffer.h | 4 ++- .../MySQL/MySQLBackend/mysql_backend.c | 20 +++++++---- server/modules/protocol/MySQL/mysql_common.cc | 17 --------- .../routing/readwritesplit/readwritesplit.cc | 36 +++++++++++++------ .../routing/readwritesplit/rwsplitsession.cc | 2 ++ .../routing/readwritesplit/rwsplitsession.hh | 6 ++++ 6 files changed, 50 insertions(+), 35 deletions(-) diff --git a/include/maxscale/buffer.h b/include/maxscale/buffer.h index 362a49a3a..dd2caf59b 100644 --- a/include/maxscale/buffer.h +++ b/include/maxscale/buffer.h @@ -57,7 +57,8 @@ typedef enum GWBUF_TYPE_SESCMD = 0x04, GWBUF_TYPE_HTTP = 0x08, GWBUF_TYPE_IGNORABLE = 0x10, - GWBUF_TYPE_COLLECT_RESULT = 0x20 + GWBUF_TYPE_COLLECT_RESULT = 0x20, + GWBUF_TYPE_RESULT = 0x40, } gwbuf_type_t; #define GWBUF_IS_TYPE_UNDEFINED(b) (b->gwbuf_type == 0) @@ -65,6 +66,7 @@ typedef enum #define GWBUF_IS_TYPE_RESPONSE_END(b) (b->gwbuf_type & GWBUF_TYPE_RESPONSE_END) #define GWBUF_IS_TYPE_SESCMD(b) (b->gwbuf_type & GWBUF_TYPE_SESCMD) #define GWBUF_IS_IGNORABLE(b) (b->gwbuf_type & GWBUF_TYPE_IGNORABLE) +#define GWBUF_IS_COLLECTED_RESULT(b) (b->gwbuf_type & GWBUF_TYPE_RESULT) #define GWBUF_SHOULD_COLLECT_RESULT(b) (b->gwbuf_type & GWBUF_TYPE_COLLECT_RESULT) typedef enum diff --git a/server/modules/protocol/MySQL/MySQLBackend/mysql_backend.c b/server/modules/protocol/MySQL/MySQLBackend/mysql_backend.c index df66e2b0d..6dcdb18ca 100644 --- a/server/modules/protocol/MySQL/MySQLBackend/mysql_backend.c +++ b/server/modules/protocol/MySQL/MySQLBackend/mysql_backend.c @@ -758,14 +758,16 @@ gw_read_and_write(DCB *dcb) if (collecting_resultset(proto, capabilities)) { - if (expecting_resultset(proto) && - mxs_mysql_is_result_set(read_buffer)) + if (expecting_resultset(proto)) { - bool more = false; - if (modutil_count_signal_packets(read_buffer, 0, &more, NULL) != 2) + if (mxs_mysql_is_result_set(read_buffer)) { - dcb_readq_prepend(dcb, read_buffer); - return 0; + bool more = false; + if (modutil_count_signal_packets(read_buffer, 0, &more, NULL) != 2) + { + dcb_readq_prepend(dcb, read_buffer); + return 0; + } } // Collected the complete result @@ -937,6 +939,12 @@ gw_read_and_write(DCB *dcb) if (session_ok_to_route(dcb)) { + if (result_collected) + { + // Mark that this is a buffer containing a collected result + gwbuf_set_type(stmt, GWBUF_TYPE_RESULT); + } + session->service->router->clientReply(session->service->router_instance, session->router_session, stmt, dcb); diff --git a/server/modules/protocol/MySQL/mysql_common.cc b/server/modules/protocol/MySQL/mysql_common.cc index c4c90255c..404c56ae8 100644 --- a/server/modules/protocol/MySQL/mysql_common.cc +++ b/server/modules/protocol/MySQL/mysql_common.cc @@ -1648,23 +1648,6 @@ bool mxs_mysql_extract_ps_response(GWBUF* buffer, MXS_PS_RESPONSE* out) out->parameters = gw_mysql_get_byte2(params); out->warnings = gw_mysql_get_byte2(warnings); rval = true; - -#ifdef SS_DEBUG - // Make sure that the PS response contains the whole response - bool more; - modutil_state state; - int n_eof = modutil_count_signal_packets(buffer, 0, &more, &state); - int n_expected = 0; - if (out->columns) - { - n_expected++; - } - if (out->parameters) - { - n_expected++; - } - ss_dassert(n_eof == n_expected); -#endif } return rval; diff --git a/server/modules/routing/readwritesplit/readwritesplit.cc b/server/modules/routing/readwritesplit/readwritesplit.cc index a8ef8ffcd..b98465972 100644 --- a/server/modules/routing/readwritesplit/readwritesplit.cc +++ b/server/modules/routing/readwritesplit/readwritesplit.cc @@ -515,6 +515,11 @@ static inline bool is_eof(GWBUF* buffer) gw_mysql_get_byte3(data) + MYSQL_HEADER_LEN == MYSQL_EOF_PACKET_LEN; } +static inline bool is_ok(GWBUF* buffer) +{ + uint8_t* data = GWBUF_DATA(buffer); + return data[MYSQL_HEADER_LEN] == MYSQL_REPLY_OK; +} static inline bool is_large(GWBUF* buffer) { return gw_mysql_get_byte3(GWBUF_DATA(buffer)) == GW_MYSQL_MAX_PACKET_LEN; @@ -549,11 +554,6 @@ static inline bool is_result_set(GWBUF *buffer) return rval; } -static inline uint8_t get_cmd(SRWBackend& backend) -{ - return mxs_mysql_current_command(backend->dcb()->session); -} - /** * @brief Check if we have received a complete reply from the backend * @@ -564,9 +564,21 @@ static inline uint8_t get_cmd(SRWBackend& backend) */ bool reply_is_complete(SRWBackend backend, GWBUF *buffer) { - if (backend->get_reply_state() == REPLY_STATE_START && !is_result_set(buffer)) + if (GWBUF_IS_COLLECTED_RESULT(buffer)) { - if (!more_results_exist(buffer) || get_cmd(backend) == MXS_COM_STMT_PREPARE) + // This branch should only be taken with a PS response + ss_dassert(backend->get_reply_state() == REPLY_STATE_START); + ss_dassert(backend->current_command() == MXS_COM_STMT_PREPARE || + backend->current_command() == MXS_COM_QUERY); + + // This is a complete result of a request + LOG_RS(backend, REPLY_STATE_DONE); + backend->set_reply_state(REPLY_STATE_DONE); + } + else if (backend->get_reply_state() == REPLY_STATE_START && !is_result_set(buffer)) + { + if (backend->current_command() == MXS_COM_STMT_PREPARE || + !is_ok(buffer) || !more_results_exist(buffer)) { /** Not a result set, we have the complete response */ LOG_RS(backend, REPLY_STATE_DONE); @@ -599,7 +611,7 @@ bool reply_is_complete(SRWBackend backend, GWBUF *buffer) LOG_RS(backend, REPLY_STATE_RSET_COLDEF); backend->set_reply_state(REPLY_STATE_RSET_COLDEF); } - else if (n_eof == 1 && get_cmd(backend) != MXS_COM_FIELD_LIST) + else if (n_eof == 1 && backend->current_command() != MXS_COM_FIELD_LIST) { /** Waiting for the EOF packet after the rows */ LOG_RS(backend, REPLY_STATE_RSET_ROWS); @@ -609,7 +621,7 @@ bool reply_is_complete(SRWBackend backend, GWBUF *buffer) { /** We either have a complete result set or a response to * a COM_FIELD_LIST command */ - ss_dassert(n_eof == 2 || (n_eof == 1 && get_cmd(backend) == MXS_COM_FIELD_LIST)); + ss_dassert(n_eof == 2 || (n_eof == 1 && backend->current_command() == MXS_COM_FIELD_LIST)); LOG_RS(backend, REPLY_STATE_DONE); backend->set_reply_state(REPLY_STATE_DONE); @@ -1172,8 +1184,10 @@ static void clientReply(MXS_ROUTER *instance, GWBUF *writebuf, DCB *backend_dcb) { - ss_dassert(GWBUF_IS_CONTIGUOUS(writebuf) && - MYSQL_GET_PAYLOAD_LEN(GWBUF_DATA(writebuf)) + MYSQL_HEADER_LEN == gwbuf_length(writebuf)); + ss_dassert((GWBUF_IS_CONTIGUOUS(writebuf) && + MYSQL_GET_PAYLOAD_LEN(GWBUF_DATA(writebuf)) + + MYSQL_HEADER_LEN == gwbuf_length(writebuf)) || + GWBUF_IS_COLLECTED_RESULT(writebuf)); RWSplitSession *rses = (RWSplitSession *)router_session; DCB *client_dcb = backend_dcb->session->client_dcb; diff --git a/server/modules/routing/readwritesplit/rwsplitsession.cc b/server/modules/routing/readwritesplit/rwsplitsession.cc index 0fe945494..88456f4c6 100644 --- a/server/modules/routing/readwritesplit/rwsplitsession.cc +++ b/server/modules/routing/readwritesplit/rwsplitsession.cc @@ -60,6 +60,8 @@ bool RWBackend::write(GWBUF* buffer, response_type type) { uint8_t cmd = mxs_mysql_get_command(buffer); + m_command = cmd; + if (is_ps_command(cmd)) { uint32_t id = mxs_mysql_extract_ps_id(buffer); diff --git a/server/modules/routing/readwritesplit/rwsplitsession.hh b/server/modules/routing/readwritesplit/rwsplitsession.hh index 3f48170fc..29511104d 100644 --- a/server/modules/routing/readwritesplit/rwsplitsession.hh +++ b/server/modules/routing/readwritesplit/rwsplitsession.hh @@ -69,12 +69,18 @@ public: return m_large_packet; } + inline uint8_t current_command() const + { + return m_command; + } + private: reply_state_t m_reply_state; BackendHandleMap m_ps_handles; /**< Internal ID to backend PS handle mapping */ bool m_large_packet; /**< Used to store the state of the EOF packet *calculation for result sets when the result * contains very large rows */ + uint8_t m_command; }; typedef std::tr1::shared_ptr SRWBackend; From 81f9425c9b3d8b88de588b79c6601bb401d14b7b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Fri, 6 Oct 2017 11:34:33 +0300 Subject: [PATCH 025/101] Use references instead of copies of SRWBackend As the DCB passed as the clientReply parameter is guaranteed to match one of the DCBs in the RWBackends. By using a reference, the need to copy a shared_ptr is removed (along with the atomic operation that it implies) thus reducing the overhead in the clientReply and the functions it uses. --- .../routing/readwritesplit/readwritesplit.cc | 74 +++++++++---------- .../readwritesplit/rwsplit_internal.hh | 2 +- .../routing/readwritesplit/rwsplit_mysql.cc | 2 +- 3 files changed, 39 insertions(+), 39 deletions(-) diff --git a/server/modules/routing/readwritesplit/readwritesplit.cc b/server/modules/routing/readwritesplit/readwritesplit.cc index b98465972..3f2dd0db2 100644 --- a/server/modules/routing/readwritesplit/readwritesplit.cc +++ b/server/modules/routing/readwritesplit/readwritesplit.cc @@ -124,6 +124,30 @@ SRWBackend get_backend_from_dcb(RWSplitSession *rses, DCB *dcb) return SRWBackend(); } +static SRWBackend emptyref; + +static SRWBackend& get_backend_ref_from_dcb(RWSplitSession *rses, DCB *dcb) +{ + ss_dassert(dcb->dcb_role == DCB_ROLE_BACKEND_HANDLER); + CHK_DCB(dcb); + CHK_CLIENT_RSES(rses); + + for (SRWBackendList::iterator it = rses->backends.begin(); + it != rses->backends.end(); it++) + { + SRWBackend& backend = *it; + + if (backend->dcb() == dcb) + { + return backend; + } + } + + /** We should always have a valid backend reference */ + ss_dassert(false); + return emptyref; +} + /** * @brief Process router options * @@ -562,22 +586,13 @@ static inline bool is_result_set(GWBUF *buffer) * * @return True if the complete response has been received */ -bool reply_is_complete(SRWBackend backend, GWBUF *buffer) +bool reply_is_complete(SRWBackend& backend, GWBUF *buffer) { - if (GWBUF_IS_COLLECTED_RESULT(buffer)) + if (backend->get_reply_state() == REPLY_STATE_START && + (!is_result_set(buffer) || GWBUF_IS_COLLECTED_RESULT(buffer))) { - // This branch should only be taken with a PS response - ss_dassert(backend->get_reply_state() == REPLY_STATE_START); - ss_dassert(backend->current_command() == MXS_COM_STMT_PREPARE || - backend->current_command() == MXS_COM_QUERY); - - // This is a complete result of a request - LOG_RS(backend, REPLY_STATE_DONE); - backend->set_reply_state(REPLY_STATE_DONE); - } - else if (backend->get_reply_state() == REPLY_STATE_START && !is_result_set(buffer)) - { - if (backend->current_command() == MXS_COM_STMT_PREPARE || + if (GWBUF_IS_COLLECTED_RESULT(buffer) || + backend->current_command() == MXS_COM_STMT_PREPARE || !is_ok(buffer) || !more_results_exist(buffer)) { /** Not a result set, we have the complete response */ @@ -1190,16 +1205,10 @@ static void clientReply(MXS_ROUTER *instance, GWBUF_IS_COLLECTED_RESULT(writebuf)); RWSplitSession *rses = (RWSplitSession *)router_session; DCB *client_dcb = backend_dcb->session->client_dcb; - CHK_CLIENT_RSES(rses); + ss_dassert(!rses->rses_closed); - if (rses->rses_closed) - { - gwbuf_free(writebuf); - return; - } - - SRWBackend backend = get_backend_from_dcb(rses, backend_dcb); + SRWBackend& backend = get_backend_ref_from_dcb(rses, backend_dcb); if (backend->get_reply_state() == REPLY_STATE_DONE) { @@ -1253,32 +1262,23 @@ static void clientReply(MXS_ROUTER *instance, bool queue_routed = false; - if (rses->expected_responses == 0) + if (rses->expected_responses == 0 && rses->query_queue) { - for (SRWBackendList::iterator it = rses->backends.begin(); - it != rses->backends.end(); it++) - { - ss_dassert((*it)->get_reply_state() == REPLY_STATE_DONE || (*it)->is_closed()); - } - - queue_routed = rses->query_queue != NULL; + queue_routed = true; route_stored_query(rses); } - else - { - ss_dassert(rses->expected_responses > 0); - } - if (writebuf && client_dcb) + if (writebuf) { + ss_dassert(client_dcb); /** Write reply to client DCB */ MXS_SESSION_ROUTE_REPLY(backend_dcb->session, writebuf); } /** Check pending session commands */ else if (!queue_routed && backend->session_command_count()) { - MXS_INFO("Backend %s processed reply and starts to execute active cursor.", - backend->uri()); + MXS_DEBUG("Backend %s processed reply and starts to execute active cursor.", + backend->uri()); if (backend->execute_session_command()) { diff --git a/server/modules/routing/readwritesplit/rwsplit_internal.hh b/server/modules/routing/readwritesplit/rwsplit_internal.hh index cf8ff3460..ef9563dd4 100644 --- a/server/modules/routing/readwritesplit/rwsplit_internal.hh +++ b/server/modules/routing/readwritesplit/rwsplit_internal.hh @@ -47,7 +47,7 @@ bool route_single_stmt(RWSplit *inst, RWSplitSession *rses, GWBUF *querybuf); void closed_session_reply(GWBUF *querybuf); void print_error_packet(RWSplitSession *rses, GWBUF *buf, DCB *dcb); -void check_session_command_reply(GWBUF *writebuf, SRWBackend bref); +void check_session_command_reply(GWBUF *buffer, SRWBackend& backend); bool execute_sescmd_in_backend(SRWBackend& backend_ref); bool handle_target_is_all(route_target_t route_target, RWSplit *inst, RWSplitSession *rses, diff --git a/server/modules/routing/readwritesplit/rwsplit_mysql.cc b/server/modules/routing/readwritesplit/rwsplit_mysql.cc index 21d5a9229..824e5a057 100644 --- a/server/modules/routing/readwritesplit/rwsplit_mysql.cc +++ b/server/modules/routing/readwritesplit/rwsplit_mysql.cc @@ -291,7 +291,7 @@ void closed_session_reply(GWBUF *querybuf) * @param buffer Query buffer containing reply data * @param backend Router session data for a backend server */ -void check_session_command_reply(GWBUF *buffer, SRWBackend backend) +void check_session_command_reply(GWBUF *buffer, SRWBackend& backend) { if (MYSQL_IS_ERROR_PACKET(((uint8_t *)GWBUF_DATA(buffer)))) { From 8f076be142d57b781ec346e8d8952752a42ef111 Mon Sep 17 00:00:00 2001 From: Johan Wikman Date: Fri, 6 Oct 2017 12:59:31 +0300 Subject: [PATCH 026/101] Bail out earlier when walking parse tree No point walking the parse tree, if no information will be collected. --- query_classifier/qc_sqlite/qc_sqlite.cc | 124 +++++++++++++++--------- 1 file changed, 79 insertions(+), 45 deletions(-) diff --git a/query_classifier/qc_sqlite/qc_sqlite.cc b/query_classifier/qc_sqlite/qc_sqlite.cc index a984b1286..e4bb2d113 100644 --- a/query_classifier/qc_sqlite/qc_sqlite.cc +++ b/query_classifier/qc_sqlite/qc_sqlite.cc @@ -484,19 +484,27 @@ public: // PUBLIC for now at least. /** - * Returns whether a field is sequence related. + * Returns whether sequence related functions should be checked for. * - * @param zDatabase The database/schema or NULL. - * @param zTable The table or NULL. - * @param zColumn The column. + * Only if we are in Oracle mode or parsing as 10.3 we need to check. * - * @return True, if the field is sequence related, false otherwise. + * @return True, if they need to be checked for, false otherwise. */ - bool is_sequence_related_field(const char* zDatabase, - const char* zTable, - const char* zColumn) const + bool must_check_sequence_related_functions() const { - return is_sequence_related_function(zColumn); + return (m_sql_mode == QC_SQL_MODE_ORACLE) || (this_unit.parse_as == QC_PARSE_AS_103); + } + + /** + * Returns whether fields should be collected. + * + * @return True, if should be, false otherwise. + */ + bool must_collect_fields() const + { + // We must collect if fields should be collected and they have not + // been collected yet. + return (m_collect & QC_COLLECT_FIELDS) && !(m_collected & QC_COLLECT_FIELDS); } /** @@ -534,6 +542,22 @@ public: return rv; } + /** + * Returns whether a field is sequence related. + * + * @param zDatabase The database/schema or NULL. + * @param zTable The table or NULL. + * @param zColumn The column. + * + * @return True, if the field is sequence related, false otherwise. + */ + bool is_sequence_related_field(const char* zDatabase, + const char* zTable, + const char* zColumn) const + { + return is_sequence_related_function(zColumn); + } + static void honour_aliases(const QcAliases* pAliases, const char** pzDatabase, const char** pzTable) @@ -614,13 +638,14 @@ public: // NOTE: This must be first, so that the type mask is properly updated // NOTE: in case zColumn is "currval" etc. - if (is_sequence_related_field(zDatabase, zTable, zColumn)) + if (must_check_sequence_related_functions() && + is_sequence_related_field(zDatabase, zTable, zColumn)) { m_type_mask |= QUERY_TYPE_WRITE; return; } - if (!(m_collect & QC_COLLECT_FIELDS) || (m_collected & QC_COLLECT_FIELDS)) + if (!must_collect_fields()) { // If field information should not be collected, or if field information // has already been collected, we just return. @@ -669,42 +694,45 @@ public: bool should_collect_database = zDatabase && (should_collect_alias || should_collect(QC_COLLECT_DATABASES)); - const char* zCollected_database = NULL; - const char* zCollected_table = NULL; - - size_t nDatabase = zDatabase ? strlen(zDatabase) : 0; - size_t nTable = zTable ? strlen(zTable) : 0; - - char database[nDatabase + 1]; - char table[nTable + 1]; - - if (should_collect_database) + if (should_collect_table || should_collect_database) { - strcpy(database, zDatabase); - exposed_sqlite3Dequote(database); - } + const char* zCollected_database = NULL; + const char* zCollected_table = NULL; - if (should_collect_table) - { - if (strcasecmp(zTable, "DUAL") != 0) + size_t nDatabase = zDatabase ? strlen(zDatabase) : 0; + size_t nTable = zTable ? strlen(zTable) : 0; + + char database[nDatabase + 1]; + char table[nTable + 1]; + + if (should_collect_database) { - strcpy(table, zTable); - exposed_sqlite3Dequote(table); - - zCollected_table = update_table_names(database, nDatabase, table, nTable); + strcpy(database, zDatabase); + exposed_sqlite3Dequote(database); } - } - if (should_collect_database) - { - zCollected_database = update_database_names(database); - } + if (should_collect_table) + { + if (strcasecmp(zTable, "DUAL") != 0) + { + strcpy(table, zTable); + exposed_sqlite3Dequote(table); - if (pAliases && zCollected_table && zAlias) - { - QcAliasValue value(zCollected_database, zCollected_table); + zCollected_table = update_table_names(database, nDatabase, table, nTable); + } + } - pAliases->insert(QcAliases::value_type(zAlias, value)); + if (should_collect_database) + { + zCollected_database = update_database_names(database); + } + + if (pAliases && zCollected_table && zAlias) + { + QcAliasValue value(zCollected_database, zCollected_table); + + pAliases->insert(QcAliases::value_type(zAlias, value)); + } } } @@ -1145,9 +1173,12 @@ public: const char* zTable; const char* zColumn; - if (get_field_name(pExpr, &zDatabase, &zTable, &zColumn)) + if (must_check_sequence_related_functions() || must_collect_fields()) { - update_field_info(pAliases, zDatabase, zTable, zColumn, pExclude); + if (get_field_name(pExpr, &zDatabase, &zTable, &zColumn)) + { + update_field_info(pAliases, zDatabase, zTable, zColumn, pExclude); + } } } @@ -1167,11 +1198,14 @@ public: const IdList* pIds, const ExprList* pExclude) { - for (int i = 0; i < pIds->nId; ++i) + if (must_check_sequence_related_functions() || must_collect_fields()) { - IdList::IdList_item* pItem = &pIds->a[i]; + for (int i = 0; i < pIds->nId; ++i) + { + IdList::IdList_item* pItem = &pIds->a[i]; - update_field_info(pAliases, NULL, NULL, pItem->zName, pExclude); + update_field_info(pAliases, NULL, NULL, pItem->zName, pExclude); + } } } From 83b26a986f10da3d275a329e8d62a265a4a52b31 Mon Sep 17 00:00:00 2001 From: Johan Wikman Date: Fri, 6 Oct 2017 13:44:32 +0300 Subject: [PATCH 027/101] Make mxs_mysql_get_command inline --- include/maxscale/protocol/mysql.h | 15 ++++++++++++++- server/modules/protocol/MySQL/mysql_common.cc | 14 -------------- 2 files changed, 14 insertions(+), 15 deletions(-) diff --git a/include/maxscale/protocol/mysql.h b/include/maxscale/protocol/mysql.h index 5e1470797..62b26e7ca 100644 --- a/include/maxscale/protocol/mysql.h +++ b/include/maxscale/protocol/mysql.h @@ -36,6 +36,7 @@ #include #include +#include #include #include #include @@ -591,7 +592,19 @@ void mxs_mysql_set_current_db(MXS_SESSION* session, const char* db); * * @return The command byte */ -uint8_t mxs_mysql_get_command(GWBUF* buffer); +static inline uint8_t mxs_mysql_get_command(GWBUF* buffer) +{ + if (GWBUF_LENGTH(buffer) > MYSQL_HEADER_LEN) + { + return GWBUF_DATA(buffer)[4]; + } + else + { + uint8_t command = 0; + gwbuf_copy_data(buffer, MYSQL_HEADER_LEN, 1, &command); + return command; + } +} /** * @brief Extract PS response values diff --git a/server/modules/protocol/MySQL/mysql_common.cc b/server/modules/protocol/MySQL/mysql_common.cc index 404c56ae8..0d22b488d 100644 --- a/server/modules/protocol/MySQL/mysql_common.cc +++ b/server/modules/protocol/MySQL/mysql_common.cc @@ -1616,20 +1616,6 @@ void mxs_mysql_set_current_db(MXS_SESSION* session, const char* db) snprintf(data->db, sizeof(data->db), "%s", db); } -uint8_t mxs_mysql_get_command(GWBUF* buffer) -{ - if (GWBUF_LENGTH(buffer) > MYSQL_HEADER_LEN) - { - return GWBUF_DATA(buffer)[4]; - } - else - { - uint8_t command = 0; - gwbuf_copy_data(buffer, MYSQL_HEADER_LEN, 1, &command); - return command; - } -} - bool mxs_mysql_extract_ps_response(GWBUF* buffer, MXS_PS_RESPONSE* out) { bool rval = false; From e14234cb1eb45fd12c52f003c7fc0e1711313ecd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Fri, 6 Oct 2017 12:45:46 +0300 Subject: [PATCH 028/101] Clean up get_backend_from_dcb Replace the original version of the function with the reference version and use it everywhere. Added runtime assertions to check that an invalid DCB is never processed. --- .../routing/readwritesplit/readwritesplit.cc | 44 ++++++------------- .../readwritesplit/rwsplit_internal.hh | 1 - 2 files changed, 13 insertions(+), 32 deletions(-) diff --git a/server/modules/routing/readwritesplit/readwritesplit.cc b/server/modules/routing/readwritesplit/readwritesplit.cc index 3f2dd0db2..f935ca5fb 100644 --- a/server/modules/routing/readwritesplit/readwritesplit.cc +++ b/server/modules/routing/readwritesplit/readwritesplit.cc @@ -102,7 +102,8 @@ int rses_get_max_replication_lag(RWSplitSession *rses) * * @return backend reference pointer if succeed or NULL */ -SRWBackend get_backend_from_dcb(RWSplitSession *rses, DCB *dcb) + +static SRWBackend& get_backend_from_dcb(RWSplitSession *rses, DCB *dcb) { ss_dassert(dcb->dcb_role == DCB_ROLE_BACKEND_HANDLER); CHK_DCB(dcb); @@ -119,33 +120,14 @@ SRWBackend get_backend_from_dcb(RWSplitSession *rses, DCB *dcb) } } - /** We should always have a valid backend reference */ - ss_dassert(false); - return SRWBackend(); -} + /** We should always have a valid backend reference and in case we don't, + * something is terribly wrong. */ + MXS_ALERT("No reference to DCB %p found, aborting.", dcb); + raise(SIGABRT); -static SRWBackend emptyref; - -static SRWBackend& get_backend_ref_from_dcb(RWSplitSession *rses, DCB *dcb) -{ - ss_dassert(dcb->dcb_role == DCB_ROLE_BACKEND_HANDLER); - CHK_DCB(dcb); - CHK_CLIENT_RSES(rses); - - for (SRWBackendList::iterator it = rses->backends.begin(); - it != rses->backends.end(); it++) - { - SRWBackend& backend = *it; - - if (backend->dcb() == dcb) - { - return backend; - } - } - - /** We should always have a valid backend reference */ - ss_dassert(false); - return emptyref; + // To make the compiler happy, we a reference to a static value. + static SRWBackend this_should_not_happen; + return this_should_not_happen; } /** @@ -300,7 +282,7 @@ static void handle_error_reply_client(MXS_SESSION *ses, RWSplitSession *rses, mxs_session_state_t sesstate = ses->state; DCB *client_dcb = ses->client_dcb; - SRWBackend backend = get_backend_from_dcb(rses, backend_dcb); + SRWBackend& backend = get_backend_from_dcb(rses, backend_dcb); backend->close(); @@ -385,7 +367,7 @@ static bool handle_error_new_connection(RWSplit *inst, DCB *backend_dcb, GWBUF *errmsg) { RWSplitSession *myrses = *rses; - SRWBackend backend = get_backend_from_dcb(myrses, backend_dcb); + SRWBackend& backend = get_backend_from_dcb(myrses, backend_dcb); MXS_SESSION* ses = backend_dcb->session; bool route_stored = false; @@ -1208,7 +1190,7 @@ static void clientReply(MXS_ROUTER *instance, CHK_CLIENT_RSES(rses); ss_dassert(!rses->rses_closed); - SRWBackend& backend = get_backend_ref_from_dcb(rses, backend_dcb); + SRWBackend& backend = get_backend_from_dcb(rses, backend_dcb); if (backend->get_reply_state() == REPLY_STATE_DONE) { @@ -1333,7 +1315,7 @@ static void handleError(MXS_ROUTER *instance, MXS_SESSION *session = problem_dcb->session; ss_dassert(session); - SRWBackend backend = get_backend_from_dcb(rses, problem_dcb); + SRWBackend& backend = get_backend_from_dcb(rses, problem_dcb); switch (action) { diff --git a/server/modules/routing/readwritesplit/rwsplit_internal.hh b/server/modules/routing/readwritesplit/rwsplit_internal.hh index ef9563dd4..6db0c17c3 100644 --- a/server/modules/routing/readwritesplit/rwsplit_internal.hh +++ b/server/modules/routing/readwritesplit/rwsplit_internal.hh @@ -60,7 +60,6 @@ bool send_readonly_error(DCB *dcb); * The following are implemented in readwritesplit.c */ int router_handle_state_switch(DCB *dcb, DCB_REASON reason, void *data); -SRWBackend get_backend_from_dcb(RWSplitSession *rses, DCB *dcb); int rses_get_max_replication_lag(RWSplitSession *rses); /* From 27aa435080595150a7fb8e12fd404275eaf96613 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Fri, 6 Oct 2017 13:34:07 +0300 Subject: [PATCH 029/101] Add minor performance improvements to readwritesplit The multi-statement detection did not check for the existence of semicolons before doing the heavier processing. Calculcate the packet length only once for the result state management. --- .../routing/readwritesplit/readwritesplit.cc | 23 ++++++++----------- .../readwritesplit/rwsplit_route_stmt.cc | 1 - .../readwritesplit/rwsplit_tmp_table_multi.cc | 15 +++++++++++- 3 files changed, 23 insertions(+), 16 deletions(-) diff --git a/server/modules/routing/readwritesplit/readwritesplit.cc b/server/modules/routing/readwritesplit/readwritesplit.cc index f935ca5fb..16858c2d3 100644 --- a/server/modules/routing/readwritesplit/readwritesplit.cc +++ b/server/modules/routing/readwritesplit/readwritesplit.cc @@ -514,11 +514,11 @@ static bool route_stored_query(RWSplitSession *rses) return rval; } -static inline bool is_eof(GWBUF* buffer) +static inline bool is_eof(GWBUF* buffer, size_t len) { uint8_t* data = GWBUF_DATA(buffer); return data[MYSQL_HEADER_LEN] == MYSQL_REPLY_EOF && - gw_mysql_get_byte3(data) + MYSQL_HEADER_LEN == MYSQL_EOF_PACKET_LEN; + len == MYSQL_EOF_PACKET_LEN - MYSQL_HEADER_LEN; } static inline bool is_ok(GWBUF* buffer) @@ -526,10 +526,6 @@ static inline bool is_ok(GWBUF* buffer) uint8_t* data = GWBUF_DATA(buffer); return data[MYSQL_HEADER_LEN] == MYSQL_REPLY_OK; } -static inline bool is_large(GWBUF* buffer) -{ - return gw_mysql_get_byte3(GWBUF_DATA(buffer)) == GW_MYSQL_MAX_PACKET_LEN; -} static inline bool more_results_exist(GWBUF* buffer) { @@ -584,24 +580,23 @@ bool reply_is_complete(SRWBackend& backend, GWBUF *buffer) } else { - bool large = backend->is_large_packet(); int n_eof = backend->get_reply_state() == REPLY_STATE_RSET_ROWS ? 1 : 0; - if (is_large(buffer)) + size_t len = gw_mysql_get_byte3(GWBUF_DATA(buffer)); + + if (len == GW_MYSQL_MAX_PACKET_LEN) { - large = true; + backend->set_large_packet(true); } - else if (large) + else if (backend->is_large_packet()) { - large = false; + backend->set_large_packet(false); } - else if (is_eof(buffer)) + else if (is_eof(buffer, len)) { n_eof++; } - backend->set_large_packet(large); - if (n_eof == 0) { /** Waiting for the EOF packet after the column definitions */ diff --git a/server/modules/routing/readwritesplit/rwsplit_route_stmt.cc b/server/modules/routing/readwritesplit/rwsplit_route_stmt.cc index bf6035cc0..a77690174 100644 --- a/server/modules/routing/readwritesplit/rwsplit_route_stmt.cc +++ b/server/modules/routing/readwritesplit/rwsplit_route_stmt.cc @@ -783,7 +783,6 @@ handle_multi_temp_and_load(RWSplitSession *rses, GWBUF *querybuf, rses->target_node = rses->current_master; MXS_INFO("Multi-statement query or stored procedure call, routing " "all future queries to master."); - } else { diff --git a/server/modules/routing/readwritesplit/rwsplit_tmp_table_multi.cc b/server/modules/routing/readwritesplit/rwsplit_tmp_table_multi.cc index f31dc8527..43e62701c 100644 --- a/server/modules/routing/readwritesplit/rwsplit_tmp_table_multi.cc +++ b/server/modules/routing/readwritesplit/rwsplit_tmp_table_multi.cc @@ -179,6 +179,19 @@ void check_create_tmp_table(RWSplitSession *router_cli_ses, } } +inline bool have_semicolon(const char* ptr, int len) +{ + for (int i = 0; i < len; i++) + { + if (ptr[i] == ';') + { + return true; + } + } + + return false; +} + /** * @brief Detect multi-statement queries * @@ -203,7 +216,7 @@ bool check_for_multi_stmt(GWBUF *buf, void *protocol, uint8_t packet_type) /** Payload size without command byte */ int buflen = gw_mysql_get_byte3((uint8_t *)GWBUF_DATA(buf)) - 1; - if ((ptr = strnchr_esc_mysql(data, ';', buflen))) + if (have_semicolon(data, buflen) && (ptr = strnchr_esc_mysql(data, ';', buflen))) { /** Skip stored procedures etc. */ while (ptr && is_mysql_sp_end(ptr, buflen - (ptr - data))) From 225837cf6e791e288bcc5899c0db0b0b40e420d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Fri, 6 Oct 2017 15:04:04 +0300 Subject: [PATCH 030/101] Inline get_backend_from_dcb The function is used very often so inlining it should help. --- server/modules/routing/readwritesplit/readwritesplit.cc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/server/modules/routing/readwritesplit/readwritesplit.cc b/server/modules/routing/readwritesplit/readwritesplit.cc index 16858c2d3..d08192fa4 100644 --- a/server/modules/routing/readwritesplit/readwritesplit.cc +++ b/server/modules/routing/readwritesplit/readwritesplit.cc @@ -103,7 +103,7 @@ int rses_get_max_replication_lag(RWSplitSession *rses) * @return backend reference pointer if succeed or NULL */ -static SRWBackend& get_backend_from_dcb(RWSplitSession *rses, DCB *dcb) +static inline SRWBackend& get_backend_from_dcb(RWSplitSession *rses, DCB *dcb) { ss_dassert(dcb->dcb_role == DCB_ROLE_BACKEND_HANDLER); CHK_DCB(dcb); @@ -581,7 +581,6 @@ bool reply_is_complete(SRWBackend& backend, GWBUF *buffer) else { int n_eof = backend->get_reply_state() == REPLY_STATE_RSET_ROWS ? 1 : 0; - size_t len = gw_mysql_get_byte3(GWBUF_DATA(buffer)); if (len == GW_MYSQL_MAX_PACKET_LEN) From b00964dc546b22b5dbaf9f5cd998f4bd355d58de Mon Sep 17 00:00:00 2001 From: Johan Wikman Date: Fri, 6 Oct 2017 15:13:29 +0300 Subject: [PATCH 031/101] Allocate shared buffer and its data in one chunk The GWBUF shared buffer and its data is now allocated in one chunk so that the data directly follows the shared buffer. That way, creating a GWBUF will involve 2 and not 3 calls to malloc and freeing one will involve 2 and not 3 calls to free. --- include/maxscale/buffer.h | 4 ++-- server/core/buffer.cc | 14 +++----------- 2 files changed, 5 insertions(+), 13 deletions(-) diff --git a/include/maxscale/buffer.h b/include/maxscale/buffer.h index dd2caf59b..2cd5abe80 100644 --- a/include/maxscale/buffer.h +++ b/include/maxscale/buffer.h @@ -105,10 +105,10 @@ struct buffer_object_st */ typedef struct { - unsigned char *data; /*< Physical memory that was allocated */ - int refcount; /*< Reference count on the buffer */ + int32_t refcount; /*< Reference count on the buffer */ buffer_object_t *bufobj; /*< List of objects referred to by GWBUF */ uint32_t info; /*< Info bits */ + unsigned char data[1]; /*< Actual memory that was allocated */ } SHARED_BUF; /** diff --git a/server/core/buffer.cc b/server/core/buffer.cc index 80d98a2f4..98d1ce829 100644 --- a/server/core/buffer.cc +++ b/server/core/buffer.cc @@ -55,6 +55,7 @@ gwbuf_alloc(unsigned int size) { GWBUF *rval; SHARED_BUF *sbuf; + size_t sbuf_size = sizeof(SHARED_BUF) + (size ? size - 1 : 0); /* Allocate the buffer header */ if ((rval = (GWBUF *)MXS_MALLOC(sizeof(GWBUF))) == NULL) @@ -63,27 +64,19 @@ gwbuf_alloc(unsigned int size) } /* Allocate the shared data buffer */ - if ((sbuf = (SHARED_BUF *)MXS_MALLOC(sizeof(SHARED_BUF))) == NULL) + if ((sbuf = (SHARED_BUF *)MXS_MALLOC(sbuf_size)) == NULL) { MXS_FREE(rval); rval = NULL; goto retblock; } - /* Allocate the space for the actual data */ - if ((sbuf->data = (unsigned char *)MXS_MALLOC(size)) == NULL) - { - MXS_FREE(rval); - MXS_FREE(sbuf); - rval = NULL; - goto retblock; - } sbuf->refcount = 1; sbuf->info = GWBUF_INFO_NONE; sbuf->bufobj = NULL; spinlock_init(&rval->gwbuf_lock); - rval->start = sbuf->data; + rval->start = &sbuf->data; rval->end = (void *)((char *)rval->start + size); rval->sbuf = sbuf; rval->next = NULL; @@ -262,7 +255,6 @@ gwbuf_free_one(GWBUF *buf) bo = gwbuf_remove_buffer_object(buf, bo); } - MXS_FREE(buf->sbuf->data); MXS_FREE(buf->sbuf); } From 9d6c2010b3a926c13f94fdbc9aa47363a1e047e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Fri, 6 Oct 2017 15:28:44 +0300 Subject: [PATCH 032/101] Check before clearing statements stored in the session If the session has no stored statements, there's no need to clear them. --- include/maxscale/session.h | 12 ++++++++++++ .../modules/routing/readwritesplit/readwritesplit.cc | 7 +++++-- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/include/maxscale/session.h b/include/maxscale/session.h index e26336472..6bdd9f3a0 100644 --- a/include/maxscale/session.h +++ b/include/maxscale/session.h @@ -407,6 +407,18 @@ bool session_store_stmt(MXS_SESSION *session, GWBUF *buf, const struct server *s */ bool session_take_stmt(MXS_SESSION *session, GWBUF **buffer, const struct server **target); +/** + * @brief Check if the session has a stored statement + * + * @param session Session to check + * + * @return True if the session has a stored statement + */ +static inline bool session_have_stmt(MXS_SESSION *session) +{ + return session->stmt.buffer; +} + /** * Clear the stored statement * diff --git a/server/modules/routing/readwritesplit/readwritesplit.cc b/server/modules/routing/readwritesplit/readwritesplit.cc index d08192fa4..1a2b00170 100644 --- a/server/modules/routing/readwritesplit/readwritesplit.cc +++ b/server/modules/routing/readwritesplit/readwritesplit.cc @@ -1196,8 +1196,11 @@ static void clientReply(MXS_ROUTER *instance, return; } - /** Statement was successfully executed, free the stored statement */ - session_clear_stmt(backend_dcb->session); + if (session_have_stmt(backend_dcb->session)) + { + /** Statement was successfully executed, free the stored statement */ + session_clear_stmt(backend_dcb->session); + } if (reply_is_complete(backend, writebuf)) { From b80cf36f1f7d4e6acd88532950c7907c7c484d18 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Fri, 6 Oct 2017 16:43:27 +0300 Subject: [PATCH 033/101] Fix compilation failure in readwritesplit The debug assertion was missing a parameter. --- server/modules/routing/readwritesplit/readwritesplit.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/server/modules/routing/readwritesplit/readwritesplit.cc b/server/modules/routing/readwritesplit/readwritesplit.cc index 1a2b00170..8cc1d8c0b 100644 --- a/server/modules/routing/readwritesplit/readwritesplit.cc +++ b/server/modules/routing/readwritesplit/readwritesplit.cc @@ -529,7 +529,8 @@ static inline bool is_ok(GWBUF* buffer) static inline bool more_results_exist(GWBUF* buffer) { - ss_dassert(is_eof(buffer) || mxs_mysql_is_ok_packet(buffer)); + ss_dassert(is_eof(buffer, gw_mysql_get_byte3(GWBUF_DATA(buffer))) || + mxs_mysql_is_ok_packet(buffer)); uint16_t status = gw_mysql_get_byte2(GWBUF_DATA(buffer) + MYSQL_HEADER_LEN + 1 + 2); return status & SERVER_MORE_RESULTS_EXIST; } From b8035a604799827729a7b786e3ed1fbf562815ec Mon Sep 17 00:00:00 2001 From: Johan Wikman Date: Sat, 7 Oct 2017 09:03:55 +0300 Subject: [PATCH 034/101] Order members to ensure alignment 8 + 4 + 4 ensures 16 with 8 byte alignment, which means that 'data' is certain to be 8 byte aligned. 4 + 8 + 4 might result in something else in some funky environment. --- include/maxscale/buffer.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/maxscale/buffer.h b/include/maxscale/buffer.h index 2cd5abe80..cae47b33f 100644 --- a/include/maxscale/buffer.h +++ b/include/maxscale/buffer.h @@ -105,8 +105,8 @@ struct buffer_object_st */ typedef struct { - int32_t refcount; /*< Reference count on the buffer */ buffer_object_t *bufobj; /*< List of objects referred to by GWBUF */ + int32_t refcount; /*< Reference count on the buffer */ uint32_t info; /*< Info bits */ unsigned char data[1]; /*< Actual memory that was allocated */ } SHARED_BUF; From 2ca050156fc0901a4dbfea479116cfe51b3d100e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Sat, 7 Oct 2017 23:17:10 +0300 Subject: [PATCH 035/101] Fix backend protocol command tracking If a query was processed in the client protocol module when a prepared statement was being executed by the backend module, the current command would get overwritten. This caused a debug assertion in readwritesplit to trigger as the result was neither a single packet nor a collected result. The RCAP_TYPE_STMT_INPUT capability guarantees that a buffer contains a complete packet. This information can be used to track the currently executed command based on the buffer contents which allows asynchronicity betweent the client and backend protocol. In practice this only comes in play when routers queue queries for later execution. --- .../protocol/MySQL/MySQLBackend/mysql_backend.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/server/modules/protocol/MySQL/MySQLBackend/mysql_backend.c b/server/modules/protocol/MySQL/MySQLBackend/mysql_backend.c index 6dcdb18ca..6913e23d1 100644 --- a/server/modules/protocol/MySQL/MySQLBackend/mysql_backend.c +++ b/server/modules/protocol/MySQL/MySQLBackend/mysql_backend.c @@ -377,9 +377,20 @@ mxs_auth_state_t handle_server_response(DCB *dcb, GWBUF *buffer) static inline void prepare_for_write(DCB *dcb, GWBUF *buffer) { MySQLProtocol *proto = (MySQLProtocol*)dcb->protocol; + uint64_t capabilities = service_get_capabilities(dcb->session->service); - /** Copy the current command being executed to this backend */ - if (dcb->session->client_dcb && dcb->session->client_dcb->protocol) + /** + * Copy the current command being executed to this backend. For statement + * based routers, this is tracked by using the current command being executed. + * For routers that stream data, the client protocol command tracking data + * is used which does not guarantee that the correct command is tracked if + * something queues commands internally. + */ + if (rcap_type_required(capabilities, RCAP_TYPE_STMT_INPUT)) + { + proto->current_command = (mxs_mysql_cmd_t)MYSQL_GET_COMMAND(GWBUF_DATA(buffer)); + } + else if (dcb->session->client_dcb && dcb->session->client_dcb->protocol) { MySQLProtocol *client_proto = (MySQLProtocol*)dcb->session->client_dcb->protocol; proto->current_command = client_proto->current_command; From d97742bf66946f36c91fd6ee88b1621afeb9ac06 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Sun, 8 Oct 2017 22:06:43 +0300 Subject: [PATCH 036/101] Fix crash in backend command tracking The backend protocol command tracking didn't check whether the session was the dummy session. The DCB's session is always set to this value when it is put into the persistent pool. --- include/maxscale/session.h | 5 +++ .../MySQL/MySQLBackend/mysql_backend.c | 35 ++++++++++++------- 2 files changed, 27 insertions(+), 13 deletions(-) diff --git a/include/maxscale/session.h b/include/maxscale/session.h index 6bdd9f3a0..34f05ebd8 100644 --- a/include/maxscale/session.h +++ b/include/maxscale/session.h @@ -199,6 +199,11 @@ MXS_SESSION *session_alloc_with_id(struct service *, struct dcb *, uint64_t); MXS_SESSION *session_set_dummy(struct dcb *); +static inline bool session_is_dummy(MXS_SESSION* session) +{ + return session->state == SESSION_STATE_DUMMY; +} + const char *session_get_remote(const MXS_SESSION *); const char *session_get_user(const MXS_SESSION *); diff --git a/server/modules/protocol/MySQL/MySQLBackend/mysql_backend.c b/server/modules/protocol/MySQL/MySQLBackend/mysql_backend.c index 6913e23d1..83fd4bcd7 100644 --- a/server/modules/protocol/MySQL/MySQLBackend/mysql_backend.c +++ b/server/modules/protocol/MySQL/MySQLBackend/mysql_backend.c @@ -377,23 +377,32 @@ mxs_auth_state_t handle_server_response(DCB *dcb, GWBUF *buffer) static inline void prepare_for_write(DCB *dcb, GWBUF *buffer) { MySQLProtocol *proto = (MySQLProtocol*)dcb->protocol; - uint64_t capabilities = service_get_capabilities(dcb->session->service); /** - * Copy the current command being executed to this backend. For statement - * based routers, this is tracked by using the current command being executed. - * For routers that stream data, the client protocol command tracking data - * is used which does not guarantee that the correct command is tracked if - * something queues commands internally. + * The DCB's session is set to the dummy session when it is put into the + * persistent connection pool. If this is not the dummy session, track + * the current command being executed. */ - if (rcap_type_required(capabilities, RCAP_TYPE_STMT_INPUT)) + if (!session_is_dummy(dcb->session)) { - proto->current_command = (mxs_mysql_cmd_t)MYSQL_GET_COMMAND(GWBUF_DATA(buffer)); - } - else if (dcb->session->client_dcb && dcb->session->client_dcb->protocol) - { - MySQLProtocol *client_proto = (MySQLProtocol*)dcb->session->client_dcb->protocol; - proto->current_command = client_proto->current_command; + uint64_t capabilities = service_get_capabilities(dcb->session->service); + + /** + * Copy the current command being executed to this backend. For statement + * based routers, this is tracked by using the current command being executed. + * For routers that stream data, the client protocol command tracking data + * is used which does not guarantee that the correct command is tracked if + * something queues commands internally. + */ + if (rcap_type_required(capabilities, RCAP_TYPE_STMT_INPUT)) + { + proto->current_command = (mxs_mysql_cmd_t)MYSQL_GET_COMMAND(GWBUF_DATA(buffer)); + } + else if (dcb->session->client_dcb && dcb->session->client_dcb->protocol) + { + MySQLProtocol *client_proto = (MySQLProtocol*)dcb->session->client_dcb->protocol; + proto->current_command = client_proto->current_command; + } } if (GWBUF_IS_TYPE_SESCMD(buffer)) From c5ff130b33f157260cae926d04417464351747fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Sun, 8 Oct 2017 23:15:18 +0300 Subject: [PATCH 037/101] Process backend packets only once When the router requires statement based output, the gathering of complete packets can be skipped as the process of splitting the complete packets into individual packets implies that only complete packets are handled. Also added a quicker check for stored protocol commands than a call to protocol_get_srv_command. --- .../MySQL/MySQLBackend/mysql_backend.c | 98 ++++++++++--------- 1 file changed, 53 insertions(+), 45 deletions(-) diff --git a/server/modules/protocol/MySQL/MySQLBackend/mysql_backend.c b/server/modules/protocol/MySQL/MySQLBackend/mysql_backend.c index 83fd4bcd7..d4f332fe3 100644 --- a/server/modules/protocol/MySQL/MySQLBackend/mysql_backend.c +++ b/server/modules/protocol/MySQL/MySQLBackend/mysql_backend.c @@ -745,7 +745,8 @@ gw_read_and_write(DCB *dcb) bool result_collected = false; MySQLProtocol *proto = (MySQLProtocol *)dcb->protocol; - if (rcap_type_required(capabilities, RCAP_TYPE_STMT_OUTPUT) || (proto->ignore_replies != 0)) + if (rcap_type_required(capabilities, RCAP_TYPE_CONTIGUOUS_OUTPUT) || + proto->collect_result || proto->ignore_replies != 0) { GWBUF *tmp = modutil_get_complete_packets(&read_buffer); /* Put any residue into the read queue */ @@ -760,53 +761,48 @@ gw_read_and_write(DCB *dcb) read_buffer = tmp; - if (rcap_type_required(capabilities, RCAP_TYPE_CONTIGUOUS_OUTPUT) || - proto->collect_result || - proto->ignore_replies != 0) + if ((tmp = gwbuf_make_contiguous(read_buffer))) { - if ((tmp = gwbuf_make_contiguous(read_buffer))) - { - read_buffer = tmp; - } - else - { - /** Failed to make the buffer contiguous */ - gwbuf_free(read_buffer); - poll_fake_hangup_event(dcb); - return 0; - } + read_buffer = tmp; + } + else + { + /** Failed to make the buffer contiguous */ + gwbuf_free(read_buffer); + poll_fake_hangup_event(dcb); + return 0; + } - if (collecting_resultset(proto, capabilities)) + if (collecting_resultset(proto, capabilities)) + { + if (expecting_resultset(proto)) { - if (expecting_resultset(proto)) + if (mxs_mysql_is_result_set(read_buffer)) { - if (mxs_mysql_is_result_set(read_buffer)) - { - bool more = false; - if (modutil_count_signal_packets(read_buffer, 0, &more, NULL) != 2) - { - dcb_readq_prepend(dcb, read_buffer); - return 0; - } - } - - // Collected the complete result - proto->collect_result = false; - result_collected = true; - } - else if (expecting_ps_response(proto) && - mxs_mysql_is_prep_stmt_ok(read_buffer)) - { - if (!complete_ps_response(read_buffer)) + bool more = false; + if (modutil_count_signal_packets(read_buffer, 0, &more, NULL) != 2) { dcb_readq_prepend(dcb, read_buffer); return 0; } - - // Collected the complete result - proto->collect_result = false; - result_collected = true; } + + // Collected the complete result + proto->collect_result = false; + result_collected = true; + } + else if (expecting_ps_response(proto) && + mxs_mysql_is_prep_stmt_ok(read_buffer)) + { + if (!complete_ps_response(read_buffer)) + { + dcb_readq_prepend(dcb, read_buffer); + return 0; + } + + // Collected the complete result + proto->collect_result = false; + result_collected = true; } } } @@ -909,7 +905,8 @@ gw_read_and_write(DCB *dcb) * If protocol has session command set, concatenate whole * response into one buffer. */ - if (protocol_get_srv_command((MySQLProtocol *)dcb->protocol, true) != MXS_COM_UNDEFINED) + if (proto->protocol_command.scom_cmd != MXS_COM_UNDEFINED && + protocol_get_srv_command(proto, true) != MXS_COM_UNDEFINED) { if (result_collected) { @@ -943,12 +940,23 @@ gw_read_and_write(DCB *dcb) !rcap_type_required(capabilities, RCAP_TYPE_RESULTSET_OUTPUT) && !result_collected) { - stmt = modutil_get_next_MySQL_packet(&read_buffer); - - if (!GWBUF_IS_CONTIGUOUS(stmt)) + if ((stmt = modutil_get_next_MySQL_packet(&read_buffer))) { - // Make sure the buffer is contiguous - stmt = gwbuf_make_contiguous(stmt); + if (!GWBUF_IS_CONTIGUOUS(stmt)) + { + // Make sure the buffer is contiguous + stmt = gwbuf_make_contiguous(stmt); + } + } + else + { + // All complete packets are processed, store partial packets for later use + if (read_buffer) + { + dcb_readq_prepend(dcb, read_buffer); + } + + return return_code; } } else From 75316712e7bedb6b709f348bd3d7ba06578ade93 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Sun, 8 Oct 2017 23:25:05 +0300 Subject: [PATCH 038/101] Fix error messages in sync_slaves Fixed missing newlines in the error output printf calls of sync_slaves. Changed the order of commands pers_02 executes to a more correct way. --- maxscale-system-test/mariadb_nodes.cpp | 9 +++++---- maxscale-system-test/pers_02.cpp | 3 ++- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/maxscale-system-test/mariadb_nodes.cpp b/maxscale-system-test/mariadb_nodes.cpp index acd683f74..4b8dc3403 100644 --- a/maxscale-system-test/mariadb_nodes.cpp +++ b/maxscale-system-test/mariadb_nodes.cpp @@ -1299,14 +1299,15 @@ static void wait_until_pos(MYSQL *mysql, int filenum, int pos) void Mariadb_nodes::sync_slaves(int node) { - if (this->nodes[node] == NULL) + if (this->nodes[node] == NULL && this->connect()) { - this->connect(); + printf("Failed to connect to all nodes.\n"); + return; } if (mysql_query(this->nodes[node], "SHOW MASTER STATUS")) { - printf("Failed to execute SHOW MASTER STATUS: %s", mysql_error(this->nodes[node])); + printf("Failed to execute SHOW MASTER STATUS: %s\n", mysql_error(this->nodes[node])); } else { @@ -1334,7 +1335,7 @@ void Mariadb_nodes::sync_slaves(int node) } else { - printf("Cannot sync slaves, invalid binlog file name: %s", row[0]); + printf("Cannot sync slaves, invalid binlog file name: %s\n", row[0]); } } mysql_free_result(res); diff --git a/maxscale-system-test/pers_02.cpp b/maxscale-system-test/pers_02.cpp index af588cecb..511e12497 100644 --- a/maxscale-system-test/pers_02.cpp +++ b/maxscale-system-test/pers_02.cpp @@ -25,9 +25,10 @@ int main(int argc, char *argv[]) Test->create_connections(75, true, true, true, true); Test->stop_timeout(); + Test->repl->close_connections(); Test->repl->stop_nodes(); Test->repl->start_replication(); - Test->repl->close_connections(); + Test->repl->connect(); Test->repl->sync_slaves(); Test->set_timeout(60); From c8b9bf09b8422d92506c32b4ff69d85ec5caba1a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Sun, 8 Oct 2017 23:28:01 +0300 Subject: [PATCH 039/101] Fix typo in readwritesplit comments The comment about the static variable being returned as a reference was missing the `return` word. --- server/modules/routing/readwritesplit/readwritesplit.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/modules/routing/readwritesplit/readwritesplit.cc b/server/modules/routing/readwritesplit/readwritesplit.cc index 8cc1d8c0b..b84acd831 100644 --- a/server/modules/routing/readwritesplit/readwritesplit.cc +++ b/server/modules/routing/readwritesplit/readwritesplit.cc @@ -125,7 +125,7 @@ static inline SRWBackend& get_backend_from_dcb(RWSplitSession *rses, DCB *dcb) MXS_ALERT("No reference to DCB %p found, aborting.", dcb); raise(SIGABRT); - // To make the compiler happy, we a reference to a static value. + // To make the compiler happy, we return a reference to a static value. static SRWBackend this_should_not_happen; return this_should_not_happen; } From 9805df7af05003397cc596c08003cbf21580f3c6 Mon Sep 17 00:00:00 2001 From: MassimilianoPinto Date: Mon, 9 Oct 2017 11:00:02 +0200 Subject: [PATCH 040/101] Update Avro router documentation Update Avro router documentation --- Documentation/Routers/Avrorouter.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/Documentation/Routers/Avrorouter.md b/Documentation/Routers/Avrorouter.md index ed3d4e2a8..7267160d0 100644 --- a/Documentation/Routers/Avrorouter.md +++ b/Documentation/Routers/Avrorouter.md @@ -144,6 +144,11 @@ If you need to start from a binlog file other than 1, you need to set the value of this option to the correct index. The avrorouter will always start from the beginning of the binary log file. +**Note**: MaxScale version 2.2 introduces MariaDB GTID support +in Binlog Server: currently, if used with Avrorouter, the option `mariadb10_master_gtid` +must be set to off in the Binlog Server configuration in order to correclty +read the binlog files. + ### Avro file options These options control how large the Avro file data blocks can get. From 80892c16575d5bf8c92d1a781204fa96a12c2d61 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Mon, 9 Oct 2017 10:50:56 +0300 Subject: [PATCH 041/101] Update MaxCtrl documentation Fixed the usage help for each command. --- Documentation/Reference/MaxCtrl.md | 105 +++++++++++++++-------------- maxctrl/autodoc.sh | 2 +- 2 files changed, 54 insertions(+), 53 deletions(-) diff --git a/Documentation/Reference/MaxCtrl.md b/Documentation/Reference/MaxCtrl.md index 20ee388b0..31eabdf39 100644 --- a/Documentation/Reference/MaxCtrl.md +++ b/Documentation/Reference/MaxCtrl.md @@ -73,55 +73,55 @@ Commands: ### list servers -Usage: `maxctrl.js list servers` +`Usage: list servers` List all servers in MaxScale. ### list services -Usage: `maxctrl.js list services` +`Usage: list services` List all services and the servers they use. ### list listeners -Usage: `maxctrl.js list listeners ` +`Usage: list listeners ` List listeners for a service. ### list monitors -Usage: `maxctrl.js list monitors` +`Usage: list monitors` List all monitors in MaxScale. ### list sessions -Usage: `maxctrl.js list sessions` +`Usage: list sessions` List all client sessions. ### list filters -Usage: `maxctrl.js list filters` +`Usage: list filters` List all filters in MaxScale. ### list modules -Usage: `maxctrl.js list modules` +`Usage: list modules` List all currently loaded modules. ### list users -Usage: `maxctrl.js list users` +`Usage: list users` List the users that can be used to connect to the MaxScale REST API. ### list commands -Usage: `maxctrl.js list commands` +`Usage: list commands` List all available module commands. @@ -145,7 +145,7 @@ Commands: ### show server -Usage: `maxctrl.js show server ` +`Usage: show server ` Show detailed information about a server. The `Parameters` field contains the currently configured parameters for this server. See `help alter server` for @@ -153,7 +153,7 @@ more details about altering server parameters. ### show service -Usage: `maxctrl.js show service ` +`Usage: show service ` Show detailed information about a service. The `Parameters` field contains the currently configured parameters for this service. See `help alter service` for @@ -161,7 +161,7 @@ more details about altering service parameters. ### show monitor -Usage: `maxctrl.js show monitor ` +`Usage: show monitor ` Show detailed information about a monitor. The `Parameters` field contains the currently configured parameters for this monitor. See `help alter monitor` for @@ -169,7 +169,7 @@ more details about altering monitor parameters. ### show session -Usage: `maxctrl.js show session ` +`Usage: show session ` Show detailed information about a single session. The list of sessions can be retrieved with the `list sessions` command. The is the session ID of a @@ -177,32 +177,32 @@ particular session. ### show filter -Usage: `maxctrl.js show filter ` +`Usage: show filter ` The list of services that use this filter is show in the `Services` field. ### show module -Usage: `maxctrl.js show module ` +`Usage: show module ` This command shows all available parameters as well as detailed version information of a loaded module. ### show maxscale -Usage: `maxctrl.js show maxscale` +`Usage: show maxscale` See `help alter maxscale` for more details about altering MaxScale parameters. ### show logging -Usage: `maxctrl.js show logging` +`Usage: show logging` See `help alter logging` for more details about altering logging parameters. ### show commands -Usage: `maxctrl.js show commands ` +`Usage: show commands ` This command shows the parameters the command expects with the parameter descriptions. @@ -219,7 +219,7 @@ Commands: ### set server -Usage: `maxctrl.js set server ` +`Usage: set server ` If is monitored by a monitor, this command should only be used to set the server into the `maintenance` state. Any other states will be overridden by @@ -239,7 +239,7 @@ Commands: ### clear server -Usage: `maxctrl.js clear server ` +`Usage: clear server ` This command clears a server state set by the `set server ` command @@ -261,13 +261,13 @@ Enable account options: ### enable log-priority -Usage: `maxctrl.js enable log-priority ` +`Usage: enable log-priority ` The `debug` log priority is only available for debug builds of MaxScale. ### enable account -Usage: `maxctrl.js enable account ` +`Usage: enable account ` The Linux user accounts are used by the MaxAdmin UNIX Domain Socket interface @@ -284,13 +284,13 @@ Commands: ### disable log-priority -Usage: `maxctrl.js disable log-priority ` +`Usage: disable log-priority ` The `debug` log priority is only available for debug builds of MaxScale. ### disable account -Usage: `maxctrl.js disable account ` +`Usage: disable account ` The Linux user accounts are used by the MaxAdmin UNIX Domain Socket interface @@ -335,7 +335,7 @@ Create user options: ### create server -Usage: `maxctrl.js create server ` +`Usage: create server ` The created server will not be used by any services or monitors unless the --services or --monitors options are given. The list of servers a service or a @@ -343,20 +343,20 @@ monitor uses can be altered with the `link` and `unlink` commands. ### create monitor -Usage: `maxctrl.js create monitor ` +`Usage: create monitor ` The list of servers given with the --servers option should not contain any servers that are already monitored by another monitor. ### create listener -Usage: `maxctrl.js create listener ` +`Usage: create listener ` The new listener will be taken into use immediately. ### create user -Usage: `maxctrl.js create user ` +`Usage: create user ` The created user can be used with the MaxScale REST API as well as the MaxAdmin network interface. By default the created user will have read-only privileges. @@ -377,20 +377,20 @@ Commands: ### destroy server -Usage: `maxctrl.js destroy server ` +`Usage: destroy server ` The server must be unlinked from all services and monitor before it can be destroyed. ### destroy monitor -Usage: `maxctrl.js destroy monitor ` +`Usage: destroy monitor ` The monitor must be unlinked from all servers before it can be destroyed. ### destroy listener -Usage: `maxctrl.js destroy listener ` +`Usage: destroy listener ` Destroying a monitor causes it to be removed on the next restart. Destroying a listener at runtime stops it from accepting new connections but it will still be @@ -399,7 +399,7 @@ to replace destroyed listeners without restarting MaxScale. ### destroy user -Usage: `maxctrl.js destroy user ` +`Usage: destroy user ` The last remaining administrative user cannot be removed. Create a replacement administrative user before attempting to remove the last administrative user. @@ -417,7 +417,7 @@ Commands: ### link service -Usage: `maxctrl.js link service ` +`Usage: link service ` This command links servers to a service, making them available for any connections that use the service. Before a server is linked to a service, it @@ -427,7 +427,7 @@ use the old list of servers. ### link monitor -Usage: `maxctrl.js link monitor ` +`Usage: link monitor ` Linking a server to a monitor will add it to the list of servers that are monitored by that monitor. A server can be monitored by only one monitor at a @@ -446,7 +446,7 @@ Commands: ### unlink service -Usage: `maxctrl.js unlink service ` +`Usage: unlink service ` This command unlinks servers from a service, removing them from the list of available servers for that service. New connections to the service will not use @@ -454,7 +454,7 @@ the unlinked servers but existing connections can still use the servers. ### unlink monitor -Usage: `maxctrl.js unlink monitor ` +`Usage: unlink monitor ` This command unlinks servers from a monitor, removing them from the list of monitored servers. The servers will be left in their current state when they are @@ -474,19 +474,19 @@ Commands: ### start service -Usage: `maxctrl.js start service ` +`Usage: start service ` This starts a service stopped by `stop service ` ### start monitor -Usage: `maxctrl.js start monitor ` +`Usage: start monitor ` This starts a monitor stopped by `stop monitor ` ### start maxscale -Usage: `maxctrl.js start maxscale` +`Usage: start maxscale` This command will execute the `start service` command for all services in MaxScale. @@ -505,7 +505,7 @@ Commands: ### stop service -Usage: `maxctrl.js stop service ` +`Usage: stop service ` Stopping a service will prevent all the listeners for that service from accepting new connections. Existing connections will still be handled normally @@ -513,14 +513,14 @@ until they are closed. ### stop monitor -Usage: `maxctrl.js stop monitor ` +`Usage: stop monitor ` Stopping a monitor will pause the monitoring of the servers. This can be used to manually control server states with the `set server` command. ### stop maxscale -Usage: `maxctrl.js stop maxscale` +`Usage: stop maxscale` This command will execute the `stop service` command for all services in MaxScale. @@ -541,19 +541,19 @@ Commands: ### alter server -Usage: `maxctrl.js alter server ` +`Usage: alter server ` To display the server parameters, execute `show server ` ### alter monitor -Usage: `maxctrl.js alter monitor ` +`Usage: alter monitor ` To display the monitor parameters, execute `show monitor ` ### alter service -Usage: `maxctrl.js alter service ` +`Usage: alter service ` To display the service parameters, execute `show service `. The following list of parameters can be altered at runtime: @@ -574,13 +574,13 @@ following list of parameters can be altered at runtime: ### alter logging -Usage: `maxctrl.js alter logging ` +`Usage: alter logging ` To display the logging parameters, execute `show logging` ### alter maxscale -Usage: `maxctrl.js alter maxscale ` +`Usage: alter maxscale ` To display the MaxScale parameters, execute `show maxscale`. The following list of parameters can be altered at runtime: @@ -589,7 +589,8 @@ of parameters can be altered at runtime: "auth_connect_timeout", "auth_read_timeout", "auth_write_timeout", - "admin_auth" + "admin_auth", + "admin_log_auth_failures" ] ## rotate @@ -604,7 +605,7 @@ Commands: ### rotate logs -Usage: `maxctrl.js rotate logs` +`Usage: rotate logs` This command is intended to be used with the `logrotate` command. @@ -620,7 +621,7 @@ Commands: ### call command -Usage: `maxctrl.js call command [params...]` +`Usage: call command [params...]` To inspect the list of module commands, execute `list commands` @@ -637,7 +638,7 @@ Commands: ### cluster diff -Usage: `maxctrl.js cluster diff ` +`Usage: cluster diff ` The list of host servers is controlled with the --hosts option. The target server should not be in the host list. Value of must be in HOST:PORT @@ -645,7 +646,7 @@ format ### cluster sync -Usage: `maxctrl.js cluster sync ` +`Usage: cluster sync ` This command will alter all MaxScale instances given in the --hosts option to represent the MaxScale. If the synchronization of a MaxScale instance diff --git a/maxctrl/autodoc.sh b/maxctrl/autodoc.sh index d14d5886f..2a1d8a0a1 100755 --- a/maxctrl/autodoc.sh +++ b/maxctrl/autodoc.sh @@ -23,7 +23,7 @@ do echo "### $i $j" echo USAGE=`node maxctrl.js help $i $j|head -n 1` - echo "Usage: \`$USAGE\`" + echo "\`$USAGE\`" echo "" # Print the detailed command explanation if it has one From 13265876a93d8c0aa65c6da85a4cd40c2d687ccd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Mon, 9 Oct 2017 12:29:55 +0300 Subject: [PATCH 042/101] Add MaxCtrl test for start/stop maxscale Added the missing test case for starting and stopping MaxScale. --- maxctrl/test/startstop.js | 28 +++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/maxctrl/test/startstop.js b/maxctrl/test/startstop.js index fd58391a4..f0c851a56 100644 --- a/maxctrl/test/startstop.js +++ b/maxctrl/test/startstop.js @@ -1,33 +1,51 @@ require('../test_utils.js')() -describe("Start/Stop Commands", function() { +describe('Start/Stop Commands', function() { before(startMaxScale) it('stop service', function() { return verifyCommand('stop service Read-Connection-Router', 'services/Read-Connection-Router') .then(function(res) { - res.data.attributes.state.should.equal("Stopped") + res.data.attributes.state.should.equal('Stopped') }) }) it('start service', function() { return verifyCommand('start service Read-Connection-Router', 'services/Read-Connection-Router') .then(function(res) { - res.data.attributes.state.should.equal("Started") + res.data.attributes.state.should.equal('Started') }) }) it('stop monitor', function() { return verifyCommand('stop monitor MySQL-Monitor', 'monitors/MySQL-Monitor') .then(function(res) { - res.data.attributes.state.should.equal("Stopped") + res.data.attributes.state.should.equal('Stopped') }) }) it('start monitor', function() { return verifyCommand('start monitor MySQL-Monitor', 'monitors/MySQL-Monitor') .then(function(res) { - res.data.attributes.state.should.equal("Running") + res.data.attributes.state.should.equal('Running') + }) + }) + + it('stop maxscale', function() { + return verifyCommand('stop maxscale', 'services') + .then(function(res) { + res.data.forEach((i) => { + i.attributes.state.should.equal('Stopped') + }) + }) + }) + + it('start maxscale', function() { + return verifyCommand('start maxscale', 'services') + .then(function(res) { + res.data.forEach((i) => { + i.attributes.state.should.equal('Started') + }) }) }) From 2c2f86cebb23db08a03cbcffe6bb6a8a721bbbb6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Mon, 9 Oct 2017 13:38:12 +0300 Subject: [PATCH 043/101] Fix OK packet status extraction in readwritesplit As the row count and last insert ID are length-encoded integers, they need to be handled with the correct functions. --- server/modules/routing/readwritesplit/readwritesplit.cc | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/server/modules/routing/readwritesplit/readwritesplit.cc b/server/modules/routing/readwritesplit/readwritesplit.cc index b84acd831..b16edbe5c 100644 --- a/server/modules/routing/readwritesplit/readwritesplit.cc +++ b/server/modules/routing/readwritesplit/readwritesplit.cc @@ -30,6 +30,7 @@ #include #include #include +#include #include "rwsplit_internal.hh" #include "rwsplitsession.hh" @@ -531,7 +532,13 @@ static inline bool more_results_exist(GWBUF* buffer) { ss_dassert(is_eof(buffer, gw_mysql_get_byte3(GWBUF_DATA(buffer))) || mxs_mysql_is_ok_packet(buffer)); - uint16_t status = gw_mysql_get_byte2(GWBUF_DATA(buffer) + MYSQL_HEADER_LEN + 1 + 2); + ss_dassert(GWBUF_IS_CONTIGUOUS(buffer)); + + uint8_t* ptr = GWBUF_DATA(buffer) + MYSQL_HEADER_LEN + 1; + ptr += mxs_leint_bytes(ptr); + ptr += mxs_leint_bytes(ptr); + + uint16_t status = gw_mysql_get_byte2(ptr); return status & SERVER_MORE_RESULTS_EXIST; } From ede52c8af9f1f0069d53efc413e444e878ad671b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Mon, 9 Oct 2017 14:05:38 +0300 Subject: [PATCH 044/101] Add OK packet processing test Added a test case which exercises the OK packet handling in readwritesplit. --- maxscale-system-test/CMakeLists.txt | 3 +++ maxscale-system-test/large_insert_hang.cpp | 28 ++++++++++++++++++++++ 2 files changed, 31 insertions(+) create mode 100644 maxscale-system-test/large_insert_hang.cpp diff --git a/maxscale-system-test/CMakeLists.txt b/maxscale-system-test/CMakeLists.txt index 62ab25737..8c3a53585 100644 --- a/maxscale-system-test/CMakeLists.txt +++ b/maxscale-system-test/CMakeLists.txt @@ -295,6 +295,9 @@ add_test_script(load_balancing_pers10 load_balancing load_pers10 LABELS readwrit # Test with extremely big blob inserting add_test_executable(longblob.cpp longblob longblob LABELS readwritesplit readconnroute UNSTABLE HEAVY REPL_BACKEND) +# Check that inserts of specific size don't cause a hang +add_test_executable(large_insert_hang.cpp large_insert_hang replication LABELS readwritesplit REPL_BACKEND) + # Test with extremely big blob inserting/selecting with > 16 mb data blocks add_test_executable(mxs1110_16mb.cpp mxs1110_16mb longblob_filters LABELS readwritesplit readconnroute HEAVY REPL_BACKEND) diff --git a/maxscale-system-test/large_insert_hang.cpp b/maxscale-system-test/large_insert_hang.cpp new file mode 100644 index 000000000..26075a3ca --- /dev/null +++ b/maxscale-system-test/large_insert_hang.cpp @@ -0,0 +1,28 @@ +/** + * Check that the OK packet flags are read correctly + */ + +#include "testconnections.h" + +int main(int argc, char *argv[]) +{ + TestConnections test(argc, argv); + test.set_timeout(60); + + test.connect_maxscale(); + test.try_query(test.conn_rwsplit, "CREATE OR REPLACE TABLE test.t1(id int)"); + + std::stringstream ss; + ss << "INSERT INTO test.t1 VALUES (0)"; + + for (int i = 0; i < 2299; i++) + { + ss << ",(" << i << ")"; + } + + test.try_query(test.conn_rwsplit, query.str().c_str()); + test.try_query(test.conn_rwsplit, "DROP TABLE test.t1"); + test.close_maxscale_connections(); + + return test.global_result; +} From 6b7ccea4e3e53435c60ff96955ad969ad3a2b10e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Mon, 9 Oct 2017 16:01:40 +0300 Subject: [PATCH 045/101] Fix large_insert_hang compilation failure Added missing changes that weren't added to last commit. --- maxscale-system-test/large_insert_hang.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/maxscale-system-test/large_insert_hang.cpp b/maxscale-system-test/large_insert_hang.cpp index 26075a3ca..7428b1e8f 100644 --- a/maxscale-system-test/large_insert_hang.cpp +++ b/maxscale-system-test/large_insert_hang.cpp @@ -3,11 +3,12 @@ */ #include "testconnections.h" +#include int main(int argc, char *argv[]) { TestConnections test(argc, argv); - test.set_timeout(60); + test.set_timeout(30); test.connect_maxscale(); test.try_query(test.conn_rwsplit, "CREATE OR REPLACE TABLE test.t1(id int)"); @@ -20,7 +21,7 @@ int main(int argc, char *argv[]) ss << ",(" << i << ")"; } - test.try_query(test.conn_rwsplit, query.str().c_str()); + test.try_query(test.conn_rwsplit, ss.str().c_str()); test.try_query(test.conn_rwsplit, "DROP TABLE test.t1"); test.close_maxscale_connections(); From 3015e6c97de083c87854e040dc66507c2daf3379 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Mon, 9 Oct 2017 16:14:36 +0300 Subject: [PATCH 046/101] Add file names and line numbers to stacktraces The GLIBC backtrace functionality doesn't generate file names and line numbers in the generated stacktrace. This can to be done manually by executing a set of system commands. Conceptually doing non-signal-safe operations in a signal handler is very wrong but as stacktraces are only printed when something has gone horribly wrong, there is no real need to worry about making things worse. As a safeguard for fatal errors while the stacktrace is being generated, it is first dumped into the standard error output of the process. This will function even if malloc is corrupted. --- server/core/gateway.cc | 94 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 93 insertions(+), 1 deletion(-) diff --git a/server/core/gateway.cc b/server/core/gateway.cc index b6094b339..ce9bca627 100644 --- a/server/core/gateway.cc +++ b/server/core/gateway.cc @@ -373,6 +373,96 @@ volatile sig_atomic_t fatal_handling = 0; static int signal_set(int sig, void (*handler)(int)); +void get_command_output(char* output, size_t size, const char* format, ...) +{ + va_list valist; + va_start(valist, format); + int cmd_len = vsnprintf(NULL, 0, format, valist); + va_end(valist); + + va_start(valist, format); + char cmd[cmd_len + 1]; + vsnprintf(cmd, cmd_len + 1, format, valist); + va_end(valist); + + *output = '\0'; + FILE* file = popen(cmd, "r"); + + if (file) + { + size_t nread = fread(output, 1, size, file); + nread = nread < size ? nread : size - 1; + output[nread--] = '\0'; + + // Trim trailing newlines + while (output + nread > output && output[nread] == '\n') + { + output[nread--] = '\0'; + } + + pclose(file); + } +} + +void extract_file_and_line(const char* symbols, char* cmd, size_t size) +{ + const char* filename_end = strchr(symbols, '('); + const char* symname_end = strchr(symbols, ')'); + + if (filename_end && symname_end) + { + // This appears to be a symbol in a library + char filename[PATH_MAX + 1]; + char symname[512]; + char offset[512]; + snprintf(filename, sizeof(filename), "%.*s", (int)(filename_end - symbols), symbols); + + const char* symname_start = filename_end + 1; + + if (*symname_start != '+') + { + // We have a string form symbol name and an offset, we need to + // extract the symbol address + + const char* addr_offset = symname_start; + + while (addr_offset < symname_end && *addr_offset != '+') + { + addr_offset++; + } + + snprintf(symname, sizeof(symname), "%.*s", (int)(addr_offset - symname_start), symname_start); + + if (addr_offset < symname_end && *addr_offset == '+') + { + addr_offset++; + } + + snprintf(offset, sizeof(offset), "%.*s", (int)(symname_end - addr_offset), addr_offset); + + // Get the hexadecimal address of the symbol + get_command_output(cmd, size, + "nm %s |grep ' %s$'|sed -e 's/ .*//' -e 's/^/0x/'", + filename, symname); + long long symaddr = strtoll(cmd, NULL, 16); + long long offsetaddr = strtoll(offset, NULL, 16); + + // Calculate the file and line now that we have the raw offset into + // the library + get_command_output(cmd, size, + "addr2line -e %s 0x%x", + filename, symaddr + offsetaddr); + } + else + { + // Raw offset into library + symname_start++; + snprintf(symname, sizeof(symname), "%.*s", (int)(symname_end - symname_start), symname_start); + get_command_output(cmd, size, "addr2line -e %s %s", filename, symname); + } + } +} + static void sigfatal_handler(int i) { @@ -407,7 +497,9 @@ sigfatal_handler(int i) { for (int n = 0; n < count; n++) { - MXS_ALERT(" %s\n", symbols[n]); + char cmd[PATH_MAX + 1024] = ""; + extract_file_and_line(symbols[n], cmd, sizeof(cmd)); + MXS_ALERT(" %s: %s", symbols[n], cmd); } MXS_FREE(symbols); } From d64cd5cab8089063881d2bbefd5055bdfafa696b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Mon, 9 Oct 2017 16:14:36 +0300 Subject: [PATCH 047/101] Return results as sets of packets Returning the results of a query as a set of packets is currently more efficient. This is mainly due to the fact that each individual packet for single packet routing is allocated from the heap which causes a significant loss in performance. Took the new capability into use in readwritesplit and modified the reply_is_complete function to work with non-contiguous results. --- include/maxscale/routing.h | 2 + server/core/modutil.cc | 5 + .../MySQL/MySQLBackend/mysql_backend.c | 100 ++++++++---------- .../routing/readwritesplit/readwritesplit.cc | 84 ++------------- 4 files changed, 65 insertions(+), 126 deletions(-) diff --git a/include/maxscale/routing.h b/include/maxscale/routing.h index 9a381e927..b7ac075b7 100644 --- a/include/maxscale/routing.h +++ b/include/maxscale/routing.h @@ -42,6 +42,8 @@ typedef enum routing_capability RCAP_TYPE_CONTIGUOUS_OUTPUT = 0x0030, /* 0b0000000000110000 */ /** Result sets are delivered in one buffer; implies RCAP_TYPE_STMT_OUTPUT. */ RCAP_TYPE_RESULTSET_OUTPUT = 0x0050, /* 0b0000000001110000 */ + /** Results are delivered as a set of complete packets */ + RCAP_TYPE_PACKET_OUTPUT = 0x0080, /* 0b0000000010000000 */ } mxs_routing_capability_t; diff --git a/server/core/modutil.cc b/server/core/modutil.cc index 317f4f9ae..3b2577a57 100644 --- a/server/core/modutil.cc +++ b/server/core/modutil.cc @@ -676,6 +676,11 @@ int modutil_count_signal_packets(GWBUF *reply, int n_found, bool* more, modutil_ } offset += pktlen; + if (offset >= GWBUF_LENGTH(reply) && reply->next) + { + offset -= GWBUF_LENGTH(reply); + reply = reply->next; + } } int total = err + eof + n_found; diff --git a/server/modules/protocol/MySQL/MySQLBackend/mysql_backend.c b/server/modules/protocol/MySQL/MySQLBackend/mysql_backend.c index d4f332fe3..faa779e1f 100644 --- a/server/modules/protocol/MySQL/MySQLBackend/mysql_backend.c +++ b/server/modules/protocol/MySQL/MySQLBackend/mysql_backend.c @@ -745,8 +745,9 @@ gw_read_and_write(DCB *dcb) bool result_collected = false; MySQLProtocol *proto = (MySQLProtocol *)dcb->protocol; - if (rcap_type_required(capabilities, RCAP_TYPE_CONTIGUOUS_OUTPUT) || - proto->collect_result || proto->ignore_replies != 0) + if (rcap_type_required(capabilities, RCAP_TYPE_PACKET_OUTPUT) || + rcap_type_required(capabilities, RCAP_TYPE_CONTIGUOUS_OUTPUT) || + proto->ignore_replies != 0) { GWBUF *tmp = modutil_get_complete_packets(&read_buffer); /* Put any residue into the read queue */ @@ -761,48 +762,53 @@ gw_read_and_write(DCB *dcb) read_buffer = tmp; - if ((tmp = gwbuf_make_contiguous(read_buffer))) + if (rcap_type_required(capabilities, RCAP_TYPE_CONTIGUOUS_OUTPUT) || + proto->collect_result || + proto->ignore_replies != 0) { - read_buffer = tmp; - } - else - { - /** Failed to make the buffer contiguous */ - gwbuf_free(read_buffer); - poll_fake_hangup_event(dcb); - return 0; - } - - if (collecting_resultset(proto, capabilities)) - { - if (expecting_resultset(proto)) + if ((tmp = gwbuf_make_contiguous(read_buffer))) { - if (mxs_mysql_is_result_set(read_buffer)) + read_buffer = tmp; + } + else + { + /** Failed to make the buffer contiguous */ + gwbuf_free(read_buffer); + poll_fake_hangup_event(dcb); + return 0; + } + + if (collecting_resultset(proto, capabilities)) + { + if (expecting_resultset(proto)) { - bool more = false; - if (modutil_count_signal_packets(read_buffer, 0, &more, NULL) != 2) + if (mxs_mysql_is_result_set(read_buffer)) + { + bool more = false; + if (modutil_count_signal_packets(read_buffer, 0, &more, NULL) != 2) + { + dcb_readq_prepend(dcb, read_buffer); + return 0; + } + } + + // Collected the complete result + proto->collect_result = false; + result_collected = true; + } + else if (expecting_ps_response(proto) && + mxs_mysql_is_prep_stmt_ok(read_buffer)) + { + if (!complete_ps_response(read_buffer)) { dcb_readq_prepend(dcb, read_buffer); return 0; } - } - // Collected the complete result - proto->collect_result = false; - result_collected = true; - } - else if (expecting_ps_response(proto) && - mxs_mysql_is_prep_stmt_ok(read_buffer)) - { - if (!complete_ps_response(read_buffer)) - { - dcb_readq_prepend(dcb, read_buffer); - return 0; + // Collected the complete result + proto->collect_result = false; + result_collected = true; } - - // Collected the complete result - proto->collect_result = false; - result_collected = true; } } } @@ -905,8 +911,7 @@ gw_read_and_write(DCB *dcb) * If protocol has session command set, concatenate whole * response into one buffer. */ - if (proto->protocol_command.scom_cmd != MXS_COM_UNDEFINED && - protocol_get_srv_command(proto, true) != MXS_COM_UNDEFINED) + if (protocol_get_srv_command((MySQLProtocol *)dcb->protocol, true) != MXS_COM_UNDEFINED) { if (result_collected) { @@ -940,23 +945,12 @@ gw_read_and_write(DCB *dcb) !rcap_type_required(capabilities, RCAP_TYPE_RESULTSET_OUTPUT) && !result_collected) { - if ((stmt = modutil_get_next_MySQL_packet(&read_buffer))) - { - if (!GWBUF_IS_CONTIGUOUS(stmt)) - { - // Make sure the buffer is contiguous - stmt = gwbuf_make_contiguous(stmt); - } - } - else - { - // All complete packets are processed, store partial packets for later use - if (read_buffer) - { - dcb_readq_prepend(dcb, read_buffer); - } + stmt = modutil_get_next_MySQL_packet(&read_buffer); - return return_code; + if (!GWBUF_IS_CONTIGUOUS(stmt)) + { + // Make sure the buffer is contiguous + stmt = gwbuf_make_contiguous(stmt); } } else diff --git a/server/modules/routing/readwritesplit/readwritesplit.cc b/server/modules/routing/readwritesplit/readwritesplit.cc index b16edbe5c..24260c7b7 100644 --- a/server/modules/routing/readwritesplit/readwritesplit.cc +++ b/server/modules/routing/readwritesplit/readwritesplit.cc @@ -515,55 +515,6 @@ static bool route_stored_query(RWSplitSession *rses) return rval; } -static inline bool is_eof(GWBUF* buffer, size_t len) -{ - uint8_t* data = GWBUF_DATA(buffer); - return data[MYSQL_HEADER_LEN] == MYSQL_REPLY_EOF && - len == MYSQL_EOF_PACKET_LEN - MYSQL_HEADER_LEN; -} - -static inline bool is_ok(GWBUF* buffer) -{ - uint8_t* data = GWBUF_DATA(buffer); - return data[MYSQL_HEADER_LEN] == MYSQL_REPLY_OK; -} - -static inline bool more_results_exist(GWBUF* buffer) -{ - ss_dassert(is_eof(buffer, gw_mysql_get_byte3(GWBUF_DATA(buffer))) || - mxs_mysql_is_ok_packet(buffer)); - ss_dassert(GWBUF_IS_CONTIGUOUS(buffer)); - - uint8_t* ptr = GWBUF_DATA(buffer) + MYSQL_HEADER_LEN + 1; - ptr += mxs_leint_bytes(ptr); - ptr += mxs_leint_bytes(ptr); - - uint16_t status = gw_mysql_get_byte2(ptr); - return status & SERVER_MORE_RESULTS_EXIST; -} - -static inline bool is_result_set(GWBUF *buffer) -{ - bool rval = false; - - switch (GWBUF_DATA(buffer)[MYSQL_HEADER_LEN]) - { - - case MYSQL_REPLY_OK: - case MYSQL_REPLY_ERR: - case MYSQL_REPLY_LOCAL_INFILE: - case MYSQL_REPLY_EOF: - /** Not a result set */ - break; - - default: - rval = true; - break; - } - - return rval; -} - /** * @brief Check if we have received a complete reply from the backend * @@ -575,11 +526,12 @@ static inline bool is_result_set(GWBUF *buffer) bool reply_is_complete(SRWBackend& backend, GWBUF *buffer) { if (backend->get_reply_state() == REPLY_STATE_START && - (!is_result_set(buffer) || GWBUF_IS_COLLECTED_RESULT(buffer))) + (!mxs_mysql_is_result_set(buffer) || GWBUF_IS_COLLECTED_RESULT(buffer))) { if (GWBUF_IS_COLLECTED_RESULT(buffer) || backend->current_command() == MXS_COM_STMT_PREPARE || - !is_ok(buffer) || !more_results_exist(buffer)) + !mxs_mysql_is_ok_packet(buffer) || + !mxs_mysql_more_results_after_ok(buffer)) { /** Not a result set, we have the complete response */ LOG_RS(backend, REPLY_STATE_DONE); @@ -588,21 +540,11 @@ bool reply_is_complete(SRWBackend& backend, GWBUF *buffer) } else { - int n_eof = backend->get_reply_state() == REPLY_STATE_RSET_ROWS ? 1 : 0; - size_t len = gw_mysql_get_byte3(GWBUF_DATA(buffer)); - - if (len == GW_MYSQL_MAX_PACKET_LEN) - { - backend->set_large_packet(true); - } - else if (backend->is_large_packet()) - { - backend->set_large_packet(false); - } - else if (is_eof(buffer, len)) - { - n_eof++; - } + bool more = false; + modutil_state state = {backend->is_large_packet()}; + int n_old_eof = backend->get_reply_state() == REPLY_STATE_RSET_ROWS ? 1 : 0; + int n_eof = modutil_count_signal_packets(buffer, n_old_eof, &more, &state); + backend->set_large_packet(state.state); if (n_eof == 0) { @@ -624,7 +566,7 @@ bool reply_is_complete(SRWBackend& backend, GWBUF *buffer) LOG_RS(backend, REPLY_STATE_DONE); backend->set_reply_state(REPLY_STATE_DONE); - if (more_results_exist(buffer)) + if (more) { /** The server will send more resultsets */ LOG_RS(backend, REPLY_STATE_START); @@ -1183,10 +1125,6 @@ static void clientReply(MXS_ROUTER *instance, GWBUF *writebuf, DCB *backend_dcb) { - ss_dassert((GWBUF_IS_CONTIGUOUS(writebuf) && - MYSQL_GET_PAYLOAD_LEN(GWBUF_DATA(writebuf)) + - MYSQL_HEADER_LEN == gwbuf_length(writebuf)) || - GWBUF_IS_COLLECTED_RESULT(writebuf)); RWSplitSession *rses = (RWSplitSession *)router_session; DCB *client_dcb = backend_dcb->session->client_dcb; CHK_CLIENT_RSES(rses); @@ -1280,7 +1218,7 @@ static void clientReply(MXS_ROUTER *instance, */ static uint64_t getCapabilities(MXS_ROUTER* instance) { - return RCAP_TYPE_STMT_INPUT | RCAP_TYPE_TRANSACTION_TRACKING | RCAP_TYPE_STMT_OUTPUT; + return RCAP_TYPE_STMT_INPUT | RCAP_TYPE_TRANSACTION_TRACKING | RCAP_TYPE_PACKET_OUTPUT; } /** @@ -1431,7 +1369,7 @@ MXS_MODULE *MXS_CREATE_MODULE() MXS_MODULE_API_ROUTER, MXS_MODULE_GA, MXS_ROUTER_VERSION, "A Read/Write splitting router for enhancement read scalability", "V1.1.0", - RCAP_TYPE_STMT_INPUT | RCAP_TYPE_TRANSACTION_TRACKING | RCAP_TYPE_STMT_OUTPUT, + RCAP_TYPE_STMT_INPUT | RCAP_TYPE_TRANSACTION_TRACKING | RCAP_TYPE_PACKET_OUTPUT, &MyObject, NULL, /* Process init. */ NULL, /* Process finish. */ From 08bdbb45db3937f31ec970050fd7b6cb9c8f786d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Mon, 9 Oct 2017 19:52:35 +0300 Subject: [PATCH 048/101] Add missing initialization of MySQLProtocol::collect_result The variable was not initialized. --- server/modules/protocol/MySQL/mysql_common.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/server/modules/protocol/MySQL/mysql_common.cc b/server/modules/protocol/MySQL/mysql_common.cc index 0d22b488d..40648399a 100644 --- a/server/modules/protocol/MySQL/mysql_common.cc +++ b/server/modules/protocol/MySQL/mysql_common.cc @@ -70,6 +70,7 @@ MySQLProtocol* mysql_protocol_init(DCB* dcb, int fd) p->stored_query = NULL; p->extra_capabilities = 0; p->ignore_replies = 0; + p->collect_result = false; #if defined(SS_DEBUG) p->protocol_chk_top = CHK_NUM_PROTOCOL; p->protocol_chk_tail = CHK_NUM_PROTOCOL; From 30a99b2632ffdb6e40c493e0c3d625bb860d1eba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Tue, 10 Oct 2017 06:10:11 +0300 Subject: [PATCH 049/101] Fix unintentional fallthrough When LEAST_BEHIND_MASTER routing criteria was used, the info level logging function would fall through to the default case. In debug builds, this would trigger a debug assertion. --- .../modules/routing/readwritesplit/rwsplit_select_backends.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/server/modules/routing/readwritesplit/rwsplit_select_backends.cc b/server/modules/routing/readwritesplit/rwsplit_select_backends.cc index 80ef9b988..ef64b0480 100644 --- a/server/modules/routing/readwritesplit/rwsplit_select_backends.cc +++ b/server/modules/routing/readwritesplit/rwsplit_select_backends.cc @@ -231,6 +231,8 @@ static void log_server_connections(select_criteria_t criteria, const SRWBackendL MXS_INFO("replication lag : %d in \t[%s]:%d %s", b->server->rlag, b->server->name, b->server->port, STRSRVSTATUS(b->server)); + break; + default: ss_dassert(!true); break; From 139b9743068dda299c9ec8bf3b2874873bc6741c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Tue, 10 Oct 2017 07:11:37 +0300 Subject: [PATCH 050/101] Add function for logging buffer contents as hex The gwbuf_hexdump write the contents of the buffer into the info log. This is quite helpful for debugging of protocol related problems. --- include/maxscale/buffer.h | 7 ++++++ server/core/buffer.cc | 53 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+) diff --git a/include/maxscale/buffer.h b/include/maxscale/buffer.h index cae47b33f..fcdd1ed6c 100644 --- a/include/maxscale/buffer.h +++ b/include/maxscale/buffer.h @@ -414,4 +414,11 @@ void *gwbuf_get_buffer_object_data(GWBUF* buf, bufobj_id_t id); extern void dprintAllBuffers(void *pdcb); #endif +/** + * Debug function for dumping buffer contents to INFO log + * + * @param buffer Buffer to dump + */ +void gwbuf_hexdump(GWBUF* buffer); + MXS_END_DECLS diff --git a/server/core/buffer.cc b/server/core/buffer.cc index 98d1ce829..cb5587623 100644 --- a/server/core/buffer.cc +++ b/server/core/buffer.cc @@ -12,14 +12,18 @@ */ #include + #include #include +#include + #include #include #include #include #include #include +#include #if defined(BUFFER_TRACE) #include @@ -879,3 +883,52 @@ size_t gwbuf_copy_data(const GWBUF *buffer, size_t offset, size_t bytes, uint8_t return bytes_read; } + +static std::string dump_one_buffer(GWBUF* buffer) +{ + std::string rval; + int len = GWBUF_LENGTH(buffer); + uint8_t* data = GWBUF_DATA(buffer); + + while (len > 0) + { + // Process the buffer in 40 byte chunks + int n = MXS_MIN(40, len); + char output[n * 2 + 1]; + gw_bin2hex(output, data, n); + char* ptr = output; + + while (ptr < output + n * 2) + { + rval.append(ptr, 2); + rval += " "; + ptr += 2; + } + len -= n; + data += n; + rval += "\n"; + } + + return rval; +} + +void gwbuf_hexdump(GWBUF* buffer) +{ + std::stringstream ss; + + ss << "Buffer " << buffer << ":\n"; + + for (GWBUF* b = buffer; b; b = b->next) + { + ss << dump_one_buffer(b); + } + + int n = ss.str().length(); + + if (n > 1024) + { + n = 1024; + } + + MXS_INFO("%.*s", n, ss.str().c_str()); +} From cdf68ab86e6a2868ad002b5ef87b5e56d4914faa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Tue, 10 Oct 2017 07:14:05 +0300 Subject: [PATCH 051/101] Fix muti-result handling in modutil_count_signal_packets The function assumed that the buffer would not contain a trailing OK packet that completes a multi-result response. --- server/core/modutil.cc | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/server/core/modutil.cc b/server/core/modutil.cc index 3b2577a57..06316f599 100644 --- a/server/core/modutil.cc +++ b/server/core/modutil.cc @@ -25,6 +25,7 @@ #include #include #include +#include /** These are used when converting MySQL wildcards to regular expressions */ static SPINLOCK re_lock = SPINLOCK_INIT; @@ -627,13 +628,14 @@ GWBUF* modutil_get_complete_packets(GWBUF **p_readbuf) return complete; } -int modutil_count_signal_packets(GWBUF *reply, int n_found, bool* more, modutil_state* state) +int modutil_count_signal_packets(GWBUF *reply, int n_found, bool* more_dest, modutil_state* state) { unsigned int len = gwbuf_length(reply); int eof = 0; int err = 0; size_t offset = 0; bool skip_next = state ? state->state : false; + bool more = false; while (offset < len) { @@ -664,15 +666,28 @@ int modutil_count_signal_packets(GWBUF *reply, int n_found, bool* more, modutil_ { eof++; } + else if (more && command == MYSQL_REPLY_OK) + { + // This should not be the first packet + ss_dassert(pktlen >= MYSQL_OK_PACKET_MIN_LEN && offset > 0); + + uint8_t data[payloadlen - 1]; + gwbuf_copy_data(reply, offset + MYSQL_HEADER_LEN + 1, sizeof(data), data); + + uint8_t* ptr = data; + ptr += mxs_leint_bytes(ptr); + ptr += mxs_leint_bytes(ptr); + + uint16_t* status = (uint16_t*)ptr; + more = (*status) & SERVER_MORE_RESULTS_EXIST; + } } if (offset + pktlen >= len || (eof + err + n_found) >= 2) { gwbuf_copy_data(reply, offset, sizeof(header), header); uint16_t* status = (uint16_t*)(header + MYSQL_HEADER_LEN + 1 + 2); // Skip command and warning count - *more = ((*status) & SERVER_MORE_RESULTS_EXIST); - offset += pktlen; - break; + more = ((*status) & SERVER_MORE_RESULTS_EXIST); } offset += pktlen; @@ -690,6 +705,7 @@ int modutil_count_signal_packets(GWBUF *reply, int n_found, bool* more, modutil_ state->state = skip_next; } + *more_dest = more; return total; } From d9922ac8957abfdcac9236dff0e69c02be595926 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Tue, 10 Oct 2017 07:25:47 +0300 Subject: [PATCH 052/101] Fix debug assertion in modutil_count_signal_packets The original offset needs to be separately tracked to assert that an OK packet is not the first packet in the buffer. The functional offset into the buffer is modified to reduce the need to iterate over buffers that have already been processed. --- server/core/modutil.cc | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/server/core/modutil.cc b/server/core/modutil.cc index 06316f599..a5850ac6b 100644 --- a/server/core/modutil.cc +++ b/server/core/modutil.cc @@ -631,6 +631,7 @@ GWBUF* modutil_get_complete_packets(GWBUF **p_readbuf) int modutil_count_signal_packets(GWBUF *reply, int n_found, bool* more_dest, modutil_state* state) { unsigned int len = gwbuf_length(reply); + ss_debug(int real_offset = 0); int eof = 0; int err = 0; size_t offset = 0; @@ -669,7 +670,8 @@ int modutil_count_signal_packets(GWBUF *reply, int n_found, bool* more_dest, mod else if (more && command == MYSQL_REPLY_OK) { // This should not be the first packet - ss_dassert(pktlen >= MYSQL_OK_PACKET_MIN_LEN && offset > 0); + ss_dassert(pktlen >= MYSQL_OK_PACKET_MIN_LEN); + ss_dassert(real_offset > 0); uint8_t data[payloadlen - 1]; gwbuf_copy_data(reply, offset + MYSQL_HEADER_LEN + 1, sizeof(data), data); @@ -691,6 +693,8 @@ int modutil_count_signal_packets(GWBUF *reply, int n_found, bool* more_dest, mod } offset += pktlen; + ss_debug(real_offset += pktlen); + if (offset >= GWBUF_LENGTH(reply) && reply->next) { offset -= GWBUF_LENGTH(reply); From 42cb6bbff7706273fb44950cb63611ed3e5ac616 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Tue, 10 Oct 2017 08:25:18 +0300 Subject: [PATCH 053/101] Fix multi-statement execution in readwritesplit A multi-statements can return multiple resultsets in one response. To accommodate for this, both the readwritesplit and modutil code must be altered. By ignoring complete resultsets in readwritesplit, the code can deduce whether a result is complete or not. --- server/core/modutil.cc | 18 ++++++++--------- .../routing/readwritesplit/readwritesplit.cc | 20 +++++++++++++++++++ 2 files changed, 28 insertions(+), 10 deletions(-) diff --git a/server/core/modutil.cc b/server/core/modutil.cc index a5850ac6b..172f317aa 100644 --- a/server/core/modutil.cc +++ b/server/core/modutil.cc @@ -628,10 +628,9 @@ GWBUF* modutil_get_complete_packets(GWBUF **p_readbuf) return complete; } -int modutil_count_signal_packets(GWBUF *reply, int n_found, bool* more_dest, modutil_state* state) +int modutil_count_signal_packets(GWBUF *reply, int n_found, bool* more_out, modutil_state* state) { unsigned int len = gwbuf_length(reply); - ss_debug(int real_offset = 0); int eof = 0; int err = 0; size_t offset = 0; @@ -661,18 +660,18 @@ int modutil_count_signal_packets(GWBUF *reply, int n_found, bool* more_dest, mod if (command == MYSQL_REPLY_ERR) { - err++; + /** Any errors in the packet stream mean that the result set + * generation was aborted due to an error. No more results will + * follow after this. */ + *more_out = false; + return 2; } else if (command == MYSQL_REPLY_EOF && pktlen == MYSQL_EOF_PACKET_LEN) { eof++; } - else if (more && command == MYSQL_REPLY_OK) + else if (command == MYSQL_REPLY_OK && pktlen >= MYSQL_OK_PACKET_MIN_LEN) { - // This should not be the first packet - ss_dassert(pktlen >= MYSQL_OK_PACKET_MIN_LEN); - ss_dassert(real_offset > 0); - uint8_t data[payloadlen - 1]; gwbuf_copy_data(reply, offset + MYSQL_HEADER_LEN + 1, sizeof(data), data); @@ -693,7 +692,6 @@ int modutil_count_signal_packets(GWBUF *reply, int n_found, bool* more_dest, mod } offset += pktlen; - ss_debug(real_offset += pktlen); if (offset >= GWBUF_LENGTH(reply) && reply->next) { @@ -709,7 +707,7 @@ int modutil_count_signal_packets(GWBUF *reply, int n_found, bool* more_dest, mod state->state = skip_next; } - *more_dest = more; + *more_out = more; return total; } diff --git a/server/modules/routing/readwritesplit/readwritesplit.cc b/server/modules/routing/readwritesplit/readwritesplit.cc index 24260c7b7..e9eda014b 100644 --- a/server/modules/routing/readwritesplit/readwritesplit.cc +++ b/server/modules/routing/readwritesplit/readwritesplit.cc @@ -537,6 +537,16 @@ bool reply_is_complete(SRWBackend& backend, GWBUF *buffer) LOG_RS(backend, REPLY_STATE_DONE); backend->set_reply_state(REPLY_STATE_DONE); } + else + { + // This is an OK packet and more results will follow + ss_dassert(mxs_mysql_is_ok_packet(buffer) && + mxs_mysql_more_results_after_ok(buffer)); + + LOG_RS(backend, REPLY_STATE_RSET_COLDEF); + backend->set_reply_state(REPLY_STATE_RSET_COLDEF); + return reply_is_complete(backend, buffer); + } } else { @@ -546,6 +556,16 @@ bool reply_is_complete(SRWBackend& backend, GWBUF *buffer) int n_eof = modutil_count_signal_packets(buffer, n_old_eof, &more, &state); backend->set_large_packet(state.state); + if (n_eof > 2) + { + /** + * We have multiple results in the buffer, we only care about + * the state of the last one. Skip the complete result sets and act + * like we're processing a single result set. + */ + n_eof = n_eof % 2 ? 1 : 2; + } + if (n_eof == 0) { /** Waiting for the EOF packet after the column definitions */ From f5f39efcdcb2eac5494d558d025d9d2320984223 Mon Sep 17 00:00:00 2001 From: Johan Wikman Date: Tue, 10 Oct 2017 13:37:21 +0300 Subject: [PATCH 054/101] Update 2.2.0 release notes --- .../Release-Notes/MaxScale-2.2.0-Release-Notes.md | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/Documentation/Release-Notes/MaxScale-2.2.0-Release-Notes.md b/Documentation/Release-Notes/MaxScale-2.2.0-Release-Notes.md index adee01845..41a038091 100644 --- a/Documentation/Release-Notes/MaxScale-2.2.0-Release-Notes.md +++ b/Documentation/Release-Notes/MaxScale-2.2.0-Release-Notes.md @@ -3,7 +3,7 @@ Release 2.2.0 is a Beta release. This document describes the changes in release 2.2.0, when compared to -release 2.1.X. +release 2.1. For any problems you encounter, please consider submitting a bug report at [Jira](https://jira.mariadb.org). @@ -215,10 +215,19 @@ executed directly on the relevant backend server. In addition to this, there are minor limitations to the `KILL` command handling. See [Limitations](../About/Limitations.md) for more information. -### New `uses_function` rule for dbfwfilter +### Obfuscation and partial masking added to the masking filter. + +A value can now be obfuscated instead of just masked. Further, it is +possible to specify with a regular expression that only a specific part +of a value should be masked. For more information, please read the +[masking filter](../Filters/Masking.md) documentation. + +### New rules for dbfwfilter The `uses_function` type rule prevents certain columns from being used -with functions. For more information about this new rule, read the +with functions. It is now also possible to match a function if it is +used in conjunction with specific columns. For more information about +the new rules, read the [dbfwfilter](../Filters/Database-Firewall-Filter.md) documentation. ## Bug fixes From 820e86ac7df6f7e71b2b0168c811f8dccf11587a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Tue, 10 Oct 2017 15:11:28 +0300 Subject: [PATCH 055/101] Fix buffer length calculation in modutil_count_signal_packets The optimization of the buffer iteration did not decrement the total buffer length. --- server/core/modutil.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/server/core/modutil.cc b/server/core/modutil.cc index 172f317aa..4fc711c85 100644 --- a/server/core/modutil.cc +++ b/server/core/modutil.cc @@ -695,6 +695,7 @@ int modutil_count_signal_packets(GWBUF *reply, int n_found, bool* more_out, modu if (offset >= GWBUF_LENGTH(reply) && reply->next) { + len -= GWBUF_LENGTH(reply); offset -= GWBUF_LENGTH(reply); reply = reply->next; } From c3627c83be8c448420f6eff127f912ee7ea7db95 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Tue, 10 Oct 2017 16:37:02 +0300 Subject: [PATCH 056/101] Fix hang on multi-statemet query If multiple queries that only generate OK packets were executed, the result returned by the server would consist of a chain of OK packets. This special case needs to be handled by the modutil_count_signal_packets. The current implementation is very ugly as it simulates a result with at least one resultset in it. A better implementation would hide it behind a simple boolean return value and an internal state object. --- server/core/modutil.cc | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/server/core/modutil.cc b/server/core/modutil.cc index 4fc711c85..2ee5ff595 100644 --- a/server/core/modutil.cc +++ b/server/core/modutil.cc @@ -636,6 +636,7 @@ int modutil_count_signal_packets(GWBUF *reply, int n_found, bool* more_out, modu size_t offset = 0; bool skip_next = state ? state->state : false; bool more = false; + bool only_ok = true; while (offset < len) { @@ -682,6 +683,10 @@ int modutil_count_signal_packets(GWBUF *reply, int n_found, bool* more_out, modu uint16_t* status = (uint16_t*)ptr; more = (*status) & SERVER_MORE_RESULTS_EXIST; } + else + { + only_ok = false; + } } if (offset + pktlen >= len || (eof + err + n_found) >= 2) @@ -709,6 +714,12 @@ int modutil_count_signal_packets(GWBUF *reply, int n_found, bool* more_out, modu } *more_out = more; + + if (only_ok && !more) + { + total = 2; + } + return total; } From 8a69232e2601f6c0957e14917747cdd18a2109c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Tue, 10 Oct 2017 21:32:10 +0300 Subject: [PATCH 057/101] Fix usage of partial packets when full packets are expected The authentication phase expects full packets. If the packets aren't complete a debug assertion would get hit. To detect this, the result of the extracted buffer needs to be checked. --- server/modules/protocol/MySQL/MySQLClient/mysql_client.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server/modules/protocol/MySQL/MySQLClient/mysql_client.cc b/server/modules/protocol/MySQL/MySQLClient/mysql_client.cc index 14ef09c08..58591ea1b 100644 --- a/server/modules/protocol/MySQL/MySQLClient/mysql_client.cc +++ b/server/modules/protocol/MySQL/MySQLClient/mysql_client.cc @@ -494,12 +494,12 @@ int gw_read_client_event(DCB* dcb) dcb_readq_set(dcb, read_buffer); if (nbytes_read < 3 || (0 == max_bytes && nbytes_read < (int)(MYSQL_GET_PAYLOAD_LEN((uint8_t *) GWBUF_DATA(read_buffer)) + 4)) || - (0 != max_bytes && nbytes_read < max_bytes)) + (0 != max_bytes && nbytes_read < max_bytes) || + (read_buffer = modutil_get_next_MySQL_packet(&dcb->readq)) == NULL) { return 0; } - read_buffer = modutil_get_next_MySQL_packet(&dcb->readq); ss_dassert(read_buffer); nbytes_read = gwbuf_length(read_buffer); From 88325a9d36f7a6416ebcd3fd79e9eab34f65ca76 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Tue, 10 Oct 2017 22:31:41 +0300 Subject: [PATCH 058/101] Fix resultset handling with binary data When binary data was processed, it was possible that the values were misinterpreted as OK packets which caused debug assertions to trigger. In addition to this, readwritesplit did not handle the case when all packets were routed individually. --- server/core/modutil.cc | 7 ++++++- .../modules/routing/readwritesplit/readwritesplit.cc | 12 +++++++++++- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/server/core/modutil.cc b/server/core/modutil.cc index 2ee5ff595..7618edf40 100644 --- a/server/core/modutil.cc +++ b/server/core/modutil.cc @@ -649,10 +649,12 @@ int modutil_count_signal_packets(GWBUF *reply, int n_found, bool* more_out, modu if (payloadlen == GW_MYSQL_MAX_PACKET_LEN) { + only_ok = false; skip_next = true; } else if (skip_next) { + only_ok = false; skip_next = false; } else @@ -670,9 +672,12 @@ int modutil_count_signal_packets(GWBUF *reply, int n_found, bool* more_out, modu else if (command == MYSQL_REPLY_EOF && pktlen == MYSQL_EOF_PACKET_LEN) { eof++; + only_ok = false; } - else if (command == MYSQL_REPLY_OK && pktlen >= MYSQL_OK_PACKET_MIN_LEN) + else if (command == MYSQL_REPLY_OK && pktlen >= MYSQL_OK_PACKET_MIN_LEN && + (eof + n_found) % 2 == 0) { + // An OK packet that is not in the middle of a resultset stream uint8_t data[payloadlen - 1]; gwbuf_copy_data(reply, offset + MYSQL_HEADER_LEN + 1, sizeof(data), data); diff --git a/server/modules/routing/readwritesplit/readwritesplit.cc b/server/modules/routing/readwritesplit/readwritesplit.cc index e9eda014b..50cc8a8e4 100644 --- a/server/modules/routing/readwritesplit/readwritesplit.cc +++ b/server/modules/routing/readwritesplit/readwritesplit.cc @@ -515,6 +515,12 @@ static bool route_stored_query(RWSplitSession *rses) return rval; } +static inline bool have_next_packet(GWBUF* buffer) +{ + uint32_t len = MYSQL_GET_PAYLOAD_LEN(GWBUF_DATA(buffer)) + MYSQL_HEADER_LEN; + return gwbuf_length(buffer) > len; +} + /** * @brief Check if we have received a complete reply from the backend * @@ -545,7 +551,11 @@ bool reply_is_complete(SRWBackend& backend, GWBUF *buffer) LOG_RS(backend, REPLY_STATE_RSET_COLDEF); backend->set_reply_state(REPLY_STATE_RSET_COLDEF); - return reply_is_complete(backend, buffer); + + if (have_next_packet(buffer)) + { + return reply_is_complete(backend, buffer); + } } } else From fc488ee278983af6e958bca23f291297a9833e3f Mon Sep 17 00:00:00 2001 From: Johan Wikman Date: Wed, 11 Oct 2017 09:27:01 +0300 Subject: [PATCH 059/101] Update release notes --- .../Release-Notes/MaxScale-2.2.0-Release-Notes.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/Documentation/Release-Notes/MaxScale-2.2.0-Release-Notes.md b/Documentation/Release-Notes/MaxScale-2.2.0-Release-Notes.md index 41a038091..6fa665e98 100644 --- a/Documentation/Release-Notes/MaxScale-2.2.0-Release-Notes.md +++ b/Documentation/Release-Notes/MaxScale-2.2.0-Release-Notes.md @@ -166,6 +166,12 @@ mycommands.txt`). ## New Features +### REST API + +MariaDB MaxScale now exposes a REST-API for obtaining information about +and for manipulating the resources of MaxScale. For more information please +refer to the [REST API](../REST-API/API.md) documentation. + ### MaxCtrl Command Line Client The MaxCtrl is a new command line intended to replace MaxAdmin. This @@ -175,6 +181,13 @@ client is distributed separately in the `maxscale-client` package. For more information, refer to the [MaxCtrl](../Reference/MaxCtrl.md) documentation. +### Limited support from Pluggable Authentication Modules (PAM). + +Pluggable authentication module (PAM) is a general purpose authentication API. +An application using PAM can authenticate a user without knowledge about the +underlying authentication implementation. For more information please refer to +the [PAM Authenticator](../Authenticators/PAM-Authenticator.md) documentation. + ### MySQL Monitor Crash Safety The MySQL monitor keeps a journal of the state of the servers and the currently From e94dc2aadef7527e12d8329170297d7294351df2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Wed, 11 Oct 2017 11:29:02 +0300 Subject: [PATCH 060/101] MXS-1468: Add test case Added test case that reproduces the bug. --- maxscale-system-test/CMakeLists.txt | 4 ++ .../cnf/maxscale.cnf.template.mxs1468 | 18 +++++++++ maxscale-system-test/mxs1468.cpp | 37 +++++++++++++++++++ 3 files changed, 59 insertions(+) create mode 100755 maxscale-system-test/cnf/maxscale.cnf.template.mxs1468 create mode 100644 maxscale-system-test/mxs1468.cpp diff --git a/maxscale-system-test/CMakeLists.txt b/maxscale-system-test/CMakeLists.txt index 90d31c0ff..9d32e9568 100644 --- a/maxscale-system-test/CMakeLists.txt +++ b/maxscale-system-test/CMakeLists.txt @@ -502,6 +502,10 @@ add_test_executable(mxs1451_skip_auth.cpp mxs1451_skip_auth mxs1451_skip_auth LA # https://jira.mariadb.org/browse/MXS-1457 add_test_executable(mxs1457_ignore_deleted.cpp mxs1457_ignore_deleted mxs1457_ignore_deleted LABELS REPL_BACKEND) +# MXS-1468: Using dynamic commands to create readwritesplit configs fail after restart +# https://jira.mariadb.org/browse/MXS-1468 +add_test_executable(mxs1468.cpp mxs1468 mxs1468 LABELS REPL_BACKEND) + # 'namedserverfilter' test add_test_executable(namedserverfilter.cpp namedserverfilter namedserverfilter LABELS namedserverfilter LIGHT REPL_BACKEND) diff --git a/maxscale-system-test/cnf/maxscale.cnf.template.mxs1468 b/maxscale-system-test/cnf/maxscale.cnf.template.mxs1468 new file mode 100755 index 000000000..e395186fe --- /dev/null +++ b/maxscale-system-test/cnf/maxscale.cnf.template.mxs1468 @@ -0,0 +1,18 @@ +[maxscale] +threads=###threads### + +[rwsplit-service] +type=service +router=readwritesplit +user=maxskysql +passwd=skysql + +[CLI] +type=service +router=cli + +[CLI Listener] +type=listener +service=CLI +protocol=maxscaled +socket=default diff --git a/maxscale-system-test/mxs1468.cpp b/maxscale-system-test/mxs1468.cpp new file mode 100644 index 000000000..70161fce7 --- /dev/null +++ b/maxscale-system-test/mxs1468.cpp @@ -0,0 +1,37 @@ +/** + * MXS-1468: Using dynamic commands to create readwritesplit configs fail after restart + * + * https://jira.mariadb.org/browse/MXS-1468 + */ + +#include "testconnections.h" + +int main(int argc, char** argv) +{ + TestConnections test(argc, argv); + + test.verbose = true; + test.ssh_maxscale(true, + "maxadmin create monitor cluster-monitor mysqlmon;" + "maxadmin alter monitor cluster-monitor user=maxskysql password=skysql monitor_interval=1000;" + "maxadmin restart monitor cluster-monitor;" + "maxadmin create listener rwsplit-service rwsplit-listener 0.0.0.0 4006;" + "maxadmin create listener rwsplit-service rwsplit-listener2 0.0.0.0 4008;" + "maxadmin create listener rwsplit-service rwsplit-listener3 0.0.0.0 4009;" + "maxadmin list listeners;" + "maxadmin create server prod_mysql01 %s 3306;" + "maxadmin create server prod_mysql02 %s 3306;" + "maxadmin create server prod_mysql03 %s 3306;" + "maxadmin list servers;" + "maxadmin add server prod_mysql02 cluster-monitor rwsplit-service;" + "maxadmin add server prod_mysql01 cluster-monitor rwsplit-service;" + "maxadmin add server prod_mysql03 cluster-monitor rwsplit-service;" + "maxadmin list servers;", test.repl->IP[0], test.repl->IP[1], test.repl->IP[2]); + test.verbose = false; + + test.tprintf("Restarting MaxScale"); + test.add_result(test.restart_maxscale(), "Restart should succeed"); + test.check_maxscale_alive(); + + return test.global_result; +} From eac6d239fc63256833c03f4f6a069fbff4b9d2a6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Wed, 11 Oct 2017 11:30:58 +0300 Subject: [PATCH 061/101] MXS-1468: Fix created monitor serialization When servers were added to monitors that were created at runtime, the server list serialization overwrote the original persisted configuration of the monitor. To solve this problem, the serialization of the server list and the monitor parameters were combined. --- server/core/config_runtime.c | 4 +- server/core/monitor.c | 118 ++++++++--------------------------- 2 files changed, 29 insertions(+), 93 deletions(-) diff --git a/server/core/config_runtime.c b/server/core/config_runtime.c index 6aacc7522..0a963f887 100644 --- a/server/core/config_runtime.c +++ b/server/core/config_runtime.c @@ -45,7 +45,7 @@ bool runtime_link_server(SERVER *server, const char *target) { if (monitorAddServer(monitor, server)) { - monitor_serialize_servers(monitor); + monitor_serialize(monitor); rval = true; } } @@ -80,7 +80,7 @@ bool runtime_unlink_server(SERVER *server, const char *target) else if (monitor) { monitorRemoveServer(monitor, server); - monitor_serialize_servers(monitor); + monitor_serialize(monitor); } const char *type = service ? "service" : "monitor"; diff --git a/server/core/monitor.c b/server/core/monitor.c index 2c71d8f58..975fae18f 100644 --- a/server/core/monitor.c +++ b/server/core/monitor.c @@ -1266,52 +1266,6 @@ MXS_MONITOR* monitor_server_in_use(const SERVER *server) return rval; } -/** - * Creates a monitor configuration at the location pointed by @c filename - * - * @param monitor Monitor to serialize into a configuration - * @param filename Filename where configuration is written - * @return True on success, false on error - */ -static bool create_monitor_server_config(const MXS_MONITOR *monitor, const char *filename) -{ - int file = open(filename, O_EXCL | O_CREAT | O_WRONLY, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); - - if (file == -1) - { - char errbuf[MXS_STRERROR_BUFLEN]; - MXS_ERROR("Failed to open file '%s' when serializing monitor '%s': %d, %s", - filename, monitor->name, errno, strerror_r(errno, errbuf, sizeof(errbuf))); - return false; - } - - /** - * Only additional parameters are added to the configuration. This prevents - * duplication or addition of parameters that don't support it. - * - * TODO: Check for return values on all of the dprintf calls - */ - dprintf(file, "[%s]\n", monitor->name); - - if (monitor->databases) - { - dprintf(file, "servers="); - for (MXS_MONITOR_SERVERS *db = monitor->databases; db; db = db->next) - { - if (db != monitor->databases) - { - dprintf(file, ","); - } - dprintf(file, "%s", db->server->unique_name); - } - dprintf(file, "\n"); - } - - close(file); - - return true; -} - static bool create_monitor_config(const MXS_MONITOR *monitor, const char *filename) { int file = open(filename, O_EXCL | O_CREAT | O_WRONLY, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); @@ -1331,56 +1285,38 @@ static bool create_monitor_config(const MXS_MONITOR *monitor, const char *filena * TODO: Check for return values on all of the dprintf calls */ dprintf(file, "[%s]\n", monitor->name); - dprintf(file, "type=monitor\n"); - dprintf(file, "module=%s\n", monitor->module_name); - dprintf(file, "user=%s\n", monitor->user); - dprintf(file, "password=%s\n", monitor->password); - dprintf(file, "monitor_interval=%lu\n", monitor->interval); - dprintf(file, "backend_connect_timeout=%d\n", monitor->connect_timeout); - dprintf(file, "backend_write_timeout=%d\n", monitor->write_timeout); - dprintf(file, "backend_read_timeout=%d\n", monitor->read_timeout); + + if (monitor->created_online) + { + dprintf(file, "type=monitor\n"); + dprintf(file, "module=%s\n", monitor->module_name); + dprintf(file, "user=%s\n", monitor->user); + dprintf(file, "password=%s\n", monitor->password); + dprintf(file, "monitor_interval=%lu\n", monitor->interval); + dprintf(file, "backend_connect_timeout=%d\n", monitor->connect_timeout); + dprintf(file, "backend_write_timeout=%d\n", monitor->write_timeout); + dprintf(file, "backend_read_timeout=%d\n", monitor->read_timeout); + } + + if (monitor->databases) + { + dprintf(file, "servers="); + for (MXS_MONITOR_SERVERS *db = monitor->databases; db; db = db->next) + { + if (db != monitor->databases) + { + dprintf(file, ","); + } + dprintf(file, "%s", db->server->unique_name); + } + dprintf(file, "\n"); + } + close(file); return true; } -bool monitor_serialize_servers(const MXS_MONITOR *monitor) -{ - bool rval = false; - char filename[PATH_MAX]; - snprintf(filename, sizeof(filename), "%s/%s.cnf.tmp", get_config_persistdir(), - monitor->name); - - if (unlink(filename) == -1 && errno != ENOENT) - { - char err[MXS_STRERROR_BUFLEN]; - MXS_ERROR("Failed to remove temporary monitor configuration at '%s': %d, %s", - filename, errno, strerror_r(errno, err, sizeof(err))); - } - else if (create_monitor_server_config(monitor, filename)) - { - char final_filename[PATH_MAX]; - strcpy(final_filename, filename); - - char *dot = strrchr(final_filename, '.'); - ss_dassert(dot); - *dot = '\0'; - - if (rename(filename, final_filename) == 0) - { - rval = true; - } - else - { - char err[MXS_STRERROR_BUFLEN]; - MXS_ERROR("Failed to rename temporary monitor configuration at '%s': %d, %s", - filename, errno, strerror_r(errno, err, sizeof(err))); - } - } - - return rval; -} - bool monitor_serialize(const MXS_MONITOR *monitor) { bool rval = false; From 6e4cb3b2932344ed7846b1303629bcdb72243241 Mon Sep 17 00:00:00 2001 From: Johan Wikman Date: Thu, 12 Oct 2017 11:16:34 +0300 Subject: [PATCH 062/101] Update release date --- Documentation/Release-Notes/MaxScale-2.2.0-Release-Notes.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/Release-Notes/MaxScale-2.2.0-Release-Notes.md b/Documentation/Release-Notes/MaxScale-2.2.0-Release-Notes.md index 6fa665e98..d5fe4fcf8 100644 --- a/Documentation/Release-Notes/MaxScale-2.2.0-Release-Notes.md +++ b/Documentation/Release-Notes/MaxScale-2.2.0-Release-Notes.md @@ -1,4 +1,4 @@ -# MariaDB MaxScale 2.2.0 Release Notes +# MariaDB MaxScale 2.2.0 Release Notes -- 2017-10-12 Release 2.2.0 is a Beta release. From 9617b559058d3e5494684ec8d9083d463f8c428a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Wed, 11 Oct 2017 08:52:56 +0300 Subject: [PATCH 063/101] Run MaxCtrl test suite in the regression test suite The MaxCtrl test suite is now a part of the regression test suite. The cluster tests are expected to fail as that is yet to be implemented. Also fixed the return value of TestConnections::ssh_maxscale. --- maxscale-system-test/CMakeLists.txt | 3 + .../cnf/maxscale.cnf.template.maxctrl | 131 ++++++++++++++++++ maxscale-system-test/test_maxctrl.cpp | 26 ++++ maxscale-system-test/test_maxctrl.sh | 70 ++++++++++ maxscale-system-test/testconnections.cpp | 2 +- 5 files changed, 231 insertions(+), 1 deletion(-) create mode 100644 maxscale-system-test/cnf/maxscale.cnf.template.maxctrl create mode 100644 maxscale-system-test/test_maxctrl.cpp create mode 100755 maxscale-system-test/test_maxctrl.sh diff --git a/maxscale-system-test/CMakeLists.txt b/maxscale-system-test/CMakeLists.txt index 8c3a53585..ef082b9e5 100644 --- a/maxscale-system-test/CMakeLists.txt +++ b/maxscale-system-test/CMakeLists.txt @@ -636,6 +636,9 @@ add_test_executable(temporal_tables.cpp temporal_tables replication LABELS readw # Test routing hints add_test_executable(test_hints.cpp test_hints hints2 LABELS hintfilter LIGHT REPL_BACKEND) +# Run MaxCtrl test suite +add_test_executable(test_maxctrl.cpp test_maxctrl maxctrl LABELS REPL_BACKEND) + # Binlogrouter tests, these heavily alter the replication so they are run last add_test_executable(avro.cpp avro avro LABELS avrorouter binlogrouter LIGHT BREAKS_REPL) add_test_executable(avro_alter.cpp avro_alter avro LABELS avrorouter binlogrouter LIGHT BREAKS_REPL) diff --git a/maxscale-system-test/cnf/maxscale.cnf.template.maxctrl b/maxscale-system-test/cnf/maxscale.cnf.template.maxctrl new file mode 100644 index 000000000..5080829f5 --- /dev/null +++ b/maxscale-system-test/cnf/maxscale.cnf.template.maxctrl @@ -0,0 +1,131 @@ +[maxscale] +threads=4 +admin_auth=false +log_info=1 + +[MySQL Monitor] +type=monitor +module=mysqlmon +servers=server1,server2,server3,server4 +user=maxskysql +password=skysql +monitor_interval=10000 + +[RW Split Router] +type=service +router=readwritesplit +servers=server1,server2,server3,server4 +user=maxskysql +password=skysql +max_slave_connections=100% + +[SchemaRouter Router] +type=service +router=schemarouter +servers=server1,server2,server3,server4 +user=maxskysql +password=skysql +auth_all_servers=1 + +[RW Split Hint Router] +type=service +router=readwritesplit +servers=server1,server2,server3,server4 +user=maxskysql +password=skysql +max_slave_connections=100% +filters=Hint + +[Read Connection Router] +type=service +router=readconnroute +router_options=master +servers=server1 +user=maxskysql +password=skysql +filters=QLA + +[Hint] +type=filter +module=hintfilter + +[recurse3] +type=filter +module=tee +service=RW Split Router + +[recurse2] +type=filter +module=tee +service=Read Connection Router + +[recurse1] +type=filter +module=tee +service=RW Split Hint Router + +[QLA] +type=filter +module=qlafilter +log_type=unified +append=false +flush=true +filebase=/tmp/qla.log + +[CLI] +type=service +router=cli + +[Read Connection Listener] +type=listener +service=Read Connection Router +protocol=MySQLClient +port=4008 + +[RW Split Listener] +type=listener +service=RW Split Router +protocol=MySQLClient +port=4006 + +[SchemaRouter Listener] +type=listener +service=SchemaRouter Router +protocol=MySQLClient +port=4010 + +[RW Split Hint Listener] +type=listener +service=RW Split Hint Router +protocol=MySQLClient +port=4009 + +[CLI Listener] +type=listener +service=CLI +protocol=maxscaled +socket=default + +[server1] +type=server +address=###node_server_IP_1### +port=###node_server_port_1### +protocol=MySQLBackend + +[server2] +type=server +address=###node_server_IP_2### +port=###node_server_port_2### +protocol=MySQLBackend + +[server3] +type=server +address=###node_server_IP_3### +port=###node_server_port_3### +protocol=MySQLBackend + +[server4] +type=server +address=###node_server_IP_4### +port=###node_server_port_4### +protocol=MySQLBackend diff --git a/maxscale-system-test/test_maxctrl.cpp b/maxscale-system-test/test_maxctrl.cpp new file mode 100644 index 000000000..4cf0bc603 --- /dev/null +++ b/maxscale-system-test/test_maxctrl.cpp @@ -0,0 +1,26 @@ +/** + * Run MaxCtrl test suite on the MaxScale machine + */ + +#include "testconnections.h" + +int main(int argc, char *argv[]) +{ + TestConnections test(argc, argv); + + test.copy_to_maxscale("test_maxctrl.sh", "~"); + + // TODO: Don't handle test dependencies in tests + test.tprintf("Installing NPM"); + test.ssh_maxscale(true,"yum -y install epel-release;yum -y install npm;"); + + test.tprintf("Starting test"); + test.verbose = true; + int rv = test.ssh_maxscale(false, "./test_maxctrl.sh"); + test.verbose = false; + + test.tprintf("Removing NPM"); + test.ssh_maxscale(true, "yum -y remove npm epel-release"); + + return rv; +} diff --git a/maxscale-system-test/test_maxctrl.sh b/maxscale-system-test/test_maxctrl.sh new file mode 100755 index 000000000..b40919f0f --- /dev/null +++ b/maxscale-system-test/test_maxctrl.sh @@ -0,0 +1,70 @@ +#!/bin/bash + +# Check branch name +ref=$(maxscale --version-full 2>&1|grep -o ' - .*'|sed 's/ - //') + +if [ -z "$ref" ] +then + echo "Error: No commit ID in --version-full output" + exit 1 +fi + +if [ ! -d MaxScale ] +then + git clone https://www.github.com/mariadb-corporation/MaxScale.git +fi + +cd MaxScale +git checkout $ref +cd maxctrl + +cat < start_maxscale.sh +#!/bin/sh +sudo systemctl start maxscale +EOF + +cat <start_double_maxscale.sh +#!/bin/sh +exit 1 +EOF + +cat <stop_maxscale.sh +#!/bin/sh + +sudo systemctl stop maxscale + +sudo rm -rf /var/lib/maxscale/* +sudo rm -rf /var/cache/maxscale/* +sudo rm -rf /var/run/maxscale/* + +if [ -f /tmp/maxadmin.sock ] +then + sudo rm /tmp/maxadmin.sock +fi + +EOF + +cat <stop_double_maxscale.sh +#!/bin/sh + +sudo systemctl stop maxscale + +sudo rm -rf /var/lib/maxscale/* +sudo rm -rf /var/cache/maxscale/* +sudo rm -rf /var/run/maxscale/* + +if [ -f /tmp/maxadmin.sock ] +then + sudo rm /tmp/maxadmin.sock +fi + +EOF + +chmod +x *.sh +npm i + +# Export the value for --basedir where maxscale binaries are located +export MAXSCALE_DIR=/usr +./stop_maxscale.sh + +npm test diff --git a/maxscale-system-test/testconnections.cpp b/maxscale-system-test/testconnections.cpp index d6c706592..5bb722b81 100644 --- a/maxscale-system-test/testconnections.cpp +++ b/maxscale-system-test/testconnections.cpp @@ -1484,7 +1484,7 @@ int TestConnections::ssh_maxscale(bool sudo, const char* format, ...) free(sys); free(cmd); - return rc; + return WEXITSTATUS(rc); } int TestConnections::copy_to_maxscale(const char* src, const char* dest) From 47b1004134bb8cf1fbf544209f4c4432b6ff13ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Wed, 11 Oct 2017 14:59:20 +0300 Subject: [PATCH 064/101] Add proper cluster testing scripts The scripts start MaxScale processes on two different servers. The support framework for the tests is in place but the tests themselves still need to be modified to allow the use of non-localhost addresses. --- maxctrl/test_utils.js | 8 ++-- maxscale-system-test/maxctrl_scripts.sh.in | 47 ++++++++++++++++++++++ maxscale-system-test/test_maxctrl.cpp | 10 +++++ maxscale-system-test/test_maxctrl.sh | 45 ++------------------- maxscale-system-test/testconnections.cpp | 19 +++++++++ maxscale-system-test/testconnections.h | 5 ++- 6 files changed, 87 insertions(+), 47 deletions(-) create mode 100644 maxscale-system-test/maxctrl_scripts.sh.in diff --git a/maxctrl/test_utils.js b/maxctrl/test_utils.js index 65fe6ae4e..e81bf4ae0 100644 --- a/maxctrl/test_utils.js +++ b/maxctrl/test_utils.js @@ -20,7 +20,7 @@ module.exports = function() { return new Promise(function(resolve, reject) { child_process.execFile("./start_maxscale.sh", function(err, stdout, stderr) { if (err) { - reject() + reject(err) } else { resolve() } @@ -33,7 +33,7 @@ module.exports = function() { return new Promise(function(resolve, reject) { child_process.execFile("./start_double_maxscale.sh", function(err, stdout, stderr) { if (err) { - reject() + reject(err) } else { resolve() } @@ -46,7 +46,7 @@ module.exports = function() { return new Promise(function(resolve, reject) { child_process.execFile("./stop_maxscale.sh", function(err, stdout, stderr) { if (err) { - reject() + reject(err) } else { resolve() } @@ -59,7 +59,7 @@ module.exports = function() { return new Promise(function(resolve, reject) { child_process.execFile("./stop_double_maxscale.sh", function(err, stdout, stderr) { if (err) { - reject() + reject(err) } else { resolve() } diff --git a/maxscale-system-test/maxctrl_scripts.sh.in b/maxscale-system-test/maxctrl_scripts.sh.in new file mode 100644 index 000000000..cae92084c --- /dev/null +++ b/maxscale-system-test/maxctrl_scripts.sh.in @@ -0,0 +1,47 @@ +#!/bin/bash + +cat < start_maxscale.sh +#!/bin/bash +sudo systemctl start maxscale +EOF + +cat <stop_maxscale.sh +#!/bin/bash + +sudo systemctl stop maxscale + +sudo rm -rf /var/lib/maxscale/* +sudo rm -rf /var/cache/maxscale/* +sudo rm -rf /var/run/maxscale/* + +if [ -f /tmp/maxadmin.sock ] +then + sudo rm /tmp/maxadmin.sock +fi + +EOF + +cat <start_double_maxscale.sh +#!/bin/bash +sudo systemctl start maxscale +ssh -i ~/maxscale_key -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o LogLevel=quiet $galera_003_whoami@$galera_003_network "sudo systemctl start maxscale" +EOF + +cat <stop_double_maxscale.sh +#!/bin/bash + +sudo systemctl stop maxscale +sudo rm -rf /var/lib/maxscale/* +sudo rm -rf /var/cache/maxscale/* +sudo rm -rf /var/run/maxscale/* +test ! -f /tmp/maxadmin.sock || sudo rm /tmp/maxadmin.sock + +ssh -i ~/maxscale_key -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o LogLevel=quiet $galera_003_whoami@$galera_003_network "sudo systemctl stop maxscale" +ssh -i ~/maxscale_key -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o LogLevel=quiet $galera_003_whoami@$galera_003_network "sudo rm -rf /var/lib/maxscale/*" +ssh -i ~/maxscale_key -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o LogLevel=quiet $galera_003_whoami@$galera_003_network "sudo rm -rf /var/cache/maxscale/*" +ssh -i ~/maxscale_key -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o LogLevel=quiet $galera_003_whoami@$galera_003_network "sudo rm -rf /var/run/maxscale/*" +ssh -i ~/maxscale_key -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o LogLevel=quiet $galera_003_whoami@$galera_003_network "sudo rm -rf /tmp/maxadmin.sock" + +EOF + +chmod +x *.sh diff --git a/maxscale-system-test/test_maxctrl.cpp b/maxscale-system-test/test_maxctrl.cpp index 4cf0bc603..a47161e8b 100644 --- a/maxscale-system-test/test_maxctrl.cpp +++ b/maxscale-system-test/test_maxctrl.cpp @@ -6,9 +6,19 @@ int main(int argc, char *argv[]) { + // Use galera_003 as the secondary MaxScale node + TestConnections::set_secondary_maxscale("galera_003_network", "galera_003_network6"); TestConnections test(argc, argv); + // This is not very nice as it's a bit too intrusive + system("envsubst < maxctrl_scripts.sh.in > maxctrl_scripts.sh"); + system("chmod +x maxctrl_scripts.sh"); test.copy_to_maxscale("test_maxctrl.sh", "~"); + test.copy_to_maxscale("maxctrl_scripts.sh", "~"); + test.ssh_maxscale(true,"ssh-keygen -f maxscale_key -P \"\""); + test.copy_from_maxscale((char*)"~/maxscale_key.pub", (char*)"."); + test.galera->copy_to_node("./maxscale_key.pub", "~", 3); + test.galera->ssh_node(3, false, "cat ~/maxscale_key.pub >> ~/.ssh/authorized_keys"); // TODO: Don't handle test dependencies in tests test.tprintf("Installing NPM"); diff --git a/maxscale-system-test/test_maxctrl.sh b/maxscale-system-test/test_maxctrl.sh index b40919f0f..177275757 100755 --- a/maxscale-system-test/test_maxctrl.sh +++ b/maxscale-system-test/test_maxctrl.sh @@ -18,49 +18,10 @@ cd MaxScale git checkout $ref cd maxctrl -cat < start_maxscale.sh -#!/bin/sh -sudo systemctl start maxscale -EOF - -cat <start_double_maxscale.sh -#!/bin/sh -exit 1 -EOF - -cat <stop_maxscale.sh -#!/bin/sh - -sudo systemctl stop maxscale - -sudo rm -rf /var/lib/maxscale/* -sudo rm -rf /var/cache/maxscale/* -sudo rm -rf /var/run/maxscale/* - -if [ -f /tmp/maxadmin.sock ] -then - sudo rm /tmp/maxadmin.sock -fi - -EOF - -cat <stop_double_maxscale.sh -#!/bin/sh - -sudo systemctl stop maxscale - -sudo rm -rf /var/lib/maxscale/* -sudo rm -rf /var/cache/maxscale/* -sudo rm -rf /var/run/maxscale/* - -if [ -f /tmp/maxadmin.sock ] -then - sudo rm /tmp/maxadmin.sock -fi - -EOF - +# Create the scripts that start and stop MaxScale +~/maxctrl_scripts.sh chmod +x *.sh + npm i # Export the value for --basedir where maxscale binaries are located diff --git a/maxscale-system-test/testconnections.cpp b/maxscale-system-test/testconnections.cpp index 5bb722b81..ce8cae0f1 100644 --- a/maxscale-system-test/testconnections.cpp +++ b/maxscale-system-test/testconnections.cpp @@ -68,6 +68,25 @@ void TestConnections::multiple_maxscales(bool value) maxscale::multiple_maxscales = value; } +void TestConnections::set_secondary_maxscale(const char* ip_var, const char* ip6_var) +{ + const char* ip = getenv(ip_var); + const char* ip6 = getenv(ip6_var); + + if (ip || ip6) + { + TestConnections::multiple_maxscales(true); + if (ip) + { + setenv("maxscale2_IP", ip, 1); + } + if (ip6) + { + setenv("maxscale2_network6", ip6, 1); + } + } +} + TestConnections::TestConnections(int argc, char *argv[]): enable_timeouts(true), global_result(0), diff --git a/maxscale-system-test/testconnections.h b/maxscale-system-test/testconnections.h index 72cbcdfa6..8607b000b 100644 --- a/maxscale-system-test/testconnections.h +++ b/maxscale-system-test/testconnections.h @@ -338,7 +338,10 @@ public: static void require_galera_version(const char *version); /** Initialize multiple MaxScale instances */ - void multiple_maxscales(bool value); + static void multiple_maxscales(bool value); + + /** Set secondary MaxScale address */ + static void set_secondary_maxscale(const char* ip_var, const char* ip6_var); /** * @brief add_result adds result to global_result and prints error message if result is not 0 From 938484a73ec9ae4062625bef866a8acd8645ac19 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Wed, 11 Oct 2017 15:47:56 +0300 Subject: [PATCH 065/101] Allow remote server to be used for maxctrl test suite The test suite now works with remote servers and is now fully integrated to the regression test suite. --- maxctrl/test/cluster.js | 86 +++++++++++++++------------ maxctrl/test_utils.js | 7 +++ maxscale-system-test/test_maxctrl.cpp | 5 +- maxscale-system-test/test_maxctrl.sh | 7 ++- 4 files changed, 63 insertions(+), 42 deletions(-) diff --git a/maxctrl/test/cluster.js b/maxctrl/test/cluster.js index c39cd625a..21dd4f8b1 100644 --- a/maxctrl/test/cluster.js +++ b/maxctrl/test/cluster.js @@ -157,14 +157,14 @@ describe('Cluster Sync', function() { before(startDoubleMaxScale) it('sync after server creation', function() { - return doCommand('create server server5 127.0.0.1 3003 --hosts 127.0.0.1:8990') - .then(() => verifyCommand('cluster sync 127.0.0.1:8990 --hosts 127.0.0.1:8989', + return doCommand('create server server5 127.0.0.1 3003 --hosts ' + secondary_host) + .then(() => verifyCommand('cluster sync ' + secondary_host + ' --hosts ' + primary_host, 'servers/server5')) }) it('sync after server alteration', function() { - return doCommand('alter server server2 port 3000 --hosts 127.0.0.1:8990') - .then(() => verifyCommand('cluster sync 127.0.0.1:8990 --hosts 127.0.0.1:8989', + return doCommand('alter server server2 port 3000 --hosts ' + secondary_host) + .then(() => verifyCommand('cluster sync ' + secondary_host + ' --hosts ' + primary_host, 'servers/server2')) .then(function(res) { res.data.attributes.parameters.port.should.equal(3000) @@ -172,21 +172,21 @@ describe('Cluster Sync', function() { }) it('sync after server deletion', function() { - return doCommand('destroy server server5 --hosts 127.0.0.1:8990') - .then(() => verifyCommand('cluster sync 127.0.0.1:8990 --hosts 127.0.0.1:8989', + return doCommand('destroy server server5 --hosts ' + secondary_host) + .then(() => verifyCommand('cluster sync ' + secondary_host + ' --hosts ' + primary_host, 'servers/server5')) .should.be.rejected }) it('sync after monitor creation', function() { - return doCommand('create monitor my-monitor-2 mysqlmon --hosts 127.0.0.1:8990') - .then(() => verifyCommand('cluster sync 127.0.0.1:8990 --hosts 127.0.0.1:8989', + return doCommand('create monitor my-monitor-2 mysqlmon --hosts ' + secondary_host) + .then(() => verifyCommand('cluster sync ' + secondary_host + ' --hosts ' + primary_host, 'monitors/my-monitor-2')) }) it('sync after monitor alteration', function() { - return doCommand('alter monitor MySQL-Monitor monitor_interval 12345 --hosts 127.0.0.1:8990') - .then(() => verifyCommand('cluster sync 127.0.0.1:8990 --hosts 127.0.0.1:8989', + return doCommand('alter monitor MySQL-Monitor monitor_interval 12345 --hosts ' + secondary_host) + .then(() => verifyCommand('cluster sync ' + secondary_host + ' --hosts ' + primary_host, 'monitors/MySQL-Monitor')) .then(function(res) { res.data.attributes.parameters.monitor_interval.should.equal(12345) @@ -194,17 +194,17 @@ describe('Cluster Sync', function() { }) it('sync after monitor deletion', function() { - return doCommand('destroy monitor my-monitor-2 --hosts 127.0.0.1:8990') - .then(() => doCommand('show monitor my-monitor-2 --hosts 127.0.0.1:8989')) - .then(() => doCommand('show monitor my-monitor-2 --hosts 127.0.0.1:8990').should.be.rejected) - .then(() => doCommand('cluster sync 127.0.0.1:8990 --hosts 127.0.0.1:8989')) - .then(() => doCommand('show monitor my-monitor-2 --hosts 127.0.0.1:8989').should.be.rejected) - .then(() => doCommand('show monitor my-monitor-2 --hosts 127.0.0.1:8990').should.be.rejected) + return doCommand('destroy monitor my-monitor-2 --hosts ' + secondary_host) + .then(() => doCommand('show monitor my-monitor-2 --hosts ' + primary_host)) + .then(() => doCommand('show monitor my-monitor-2 --hosts ' + secondary_host).should.be.rejected) + .then(() => doCommand('cluster sync ' + secondary_host + ' --hosts ' + primary_host)) + .then(() => doCommand('show monitor my-monitor-2 --hosts ' + primary_host).should.be.rejected) + .then(() => doCommand('show monitor my-monitor-2 --hosts ' + secondary_host).should.be.rejected) }) it('sync after service alteration', function() { - return doCommand('alter service RW-Split-Router enable_root_user true --hosts 127.0.0.1:8990') - .then(() => verifyCommand('cluster sync 127.0.0.1:8990 --hosts 127.0.0.1:8989', + return doCommand('alter service RW-Split-Router enable_root_user true --hosts ' + secondary_host) + .then(() => verifyCommand('cluster sync ' + secondary_host + ' --hosts ' + primary_host, 'services/RW-Split-Router')) .then(function(res) { res.data.attributes.parameters.enable_root_user.should.be.true @@ -214,16 +214,28 @@ describe('Cluster Sync', function() { // As the listeners cannot be truly deleted, since there's no code for actually closing a socket at runtime, // we do the listener tests last it('sync listener creation/deletion', function() { - return doCommand('create listener RW-Split-Router my-listener-2 5999 --hosts 127.0.0.1:8990') - // As both MaxScales are on the same machine, both can't listen on the same port. The sync should fail due to this - .then(() => doCommand('cluster sync 127.0.0.1:8990 --hosts 127.0.0.1:8989').should.be.rejected) - // Create the listener on the second MaxScale to avoid it being synced later on - .then(() => doCommand('create listener RW-Split-Router my-listener-2 5998 --hosts 127.0.0.1:8989')) - // Sync after creation should succeed - .then(() => doCommand('cluster sync 127.0.0.1:8990 --hosts 127.0.0.1:8989')) - // Destroy the created server, should succeed - .then(() => doCommand('destroy listener RW-Split-Router my-listener-2')) - .then(() => doCommand('cluster sync 127.0.0.1:8990 --hosts 127.0.0.1:8989')) + if (primary_host == '127.0.0.1:8989' && secondary_host == '127.0.0.1:8990') { + // Test with both MaxScales on the same machine + + return doCommand('create listener RW-Split-Router my-listener-2 5999 --hosts ' + secondary_host) + // As both MaxScales are on the same machine, both can't listen on the same port. The sync should fail due to this + .then(() => doCommand('cluster sync ' + secondary_host + ' --hosts ' + primary_host).should.be.rejected) + // Create the listener on the second MaxScale to avoid it being synced later on + .then(() => doCommand('create listener RW-Split-Router my-listener-2 5998 --hosts ' + primary_host)) + // Sync after creation should succeed + .then(() => doCommand('cluster sync ' + secondary_host + ' --hosts ' + primary_host)) + // Destroy the created server, should succeed + .then(() => doCommand('destroy listener RW-Split-Router my-listener-2')) + .then(() => doCommand('cluster sync ' + secondary_host + ' --hosts ' + primary_host)) + } else { + // MaxScales are on different machines + + return doCommand('create listener RW-Split-Router my-listener-2 5999 --hosts ' + secondary_host) + // As both MaxScales are on the same machine, both can't listen on the same port. The sync should fail due to this + .then(() => doCommand('cluster sync ' + secondary_host + ' --hosts ' + primary_host)) + .then(() => doCommand('destroy listener RW-Split-Router my-listener-2')) + .then(() => doCommand('cluster sync ' + secondary_host + ' --hosts ' + primary_host)) + } }) after(stopDoubleMaxScale) @@ -273,36 +285,36 @@ describe('Cluster Diff', function() { before(startDoubleMaxScale) it('diff after server creation', function() { - return doCommand('create server server5 127.0.0.1 3003 --hosts 127.0.0.1:8990') - .then(() => doCommand('cluster diff 127.0.0.1:8990 --hosts 127.0.0.1:8989')) + return doCommand('create server server5 127.0.0.1 3003 --hosts ' + secondary_host) + .then(() => doCommand('cluster diff ' + secondary_host + ' --hosts ' + primary_host)) .then(function(res) { var d = parseDiff(res) d.removed.servers.length.should.equal(1) d.removed.servers[0].id.should.equal('server5') }) - .then(() => doCommand('cluster sync 127.0.0.1:8990 --hosts 127.0.0.1:8989')) + .then(() => doCommand('cluster sync ' + secondary_host + ' --hosts ' + primary_host)) }) it('diff after server alteration', function() { - return doCommand('alter server server2 port 3000 --hosts 127.0.0.1:8990') - .then(() => doCommand('cluster diff 127.0.0.1:8990 --hosts 127.0.0.1:8989')) + return doCommand('alter server server2 port 3000 --hosts ' + secondary_host) + .then(() => doCommand('cluster diff ' + secondary_host + ' --hosts ' + primary_host)) .then(function(res) { var d = parseDiff(res) d.changed.servers.length.should.equal(1) d.changed.servers[0].id.should.equal('server2') }) - .then(() => doCommand('cluster sync 127.0.0.1:8990 --hosts 127.0.0.1:8989')) + .then(() => doCommand('cluster sync ' + secondary_host + ' --hosts ' + primary_host)) }) it('diff after server deletion', function() { - return doCommand('destroy server server5 --hosts 127.0.0.1:8990') - .then(() => doCommand('cluster diff 127.0.0.1:8990 --hosts 127.0.0.1:8989')) + return doCommand('destroy server server5 --hosts ' + secondary_host) + .then(() => doCommand('cluster diff ' + secondary_host + ' --hosts ' + primary_host)) .then(function(res) { var d = parseDiff(res) d.added.servers.length.should.equal(1) d.added.servers[0].id.should.equal('server5') }) - .then(() => doCommand('cluster sync 127.0.0.1:8990 --hosts 127.0.0.1:8989')) + .then(() => doCommand('cluster sync ' + secondary_host + ' --hosts ' + primary_host)) }) after(stopDoubleMaxScale) diff --git a/maxctrl/test_utils.js b/maxctrl/test_utils.js index e81bf4ae0..0cf145ecc 100644 --- a/maxctrl/test_utils.js +++ b/maxctrl/test_utils.js @@ -15,6 +15,13 @@ module.exports = function() { this.expect = chai.expect this.host = 'http://localhost:8989/v1/' + this.primary_host = '127.0.0.1:8989' + this.secondary_host = '127.0.0.1:8990' + + if (process.env.maxscale2_API) { + this.secondary_host = process.env.maxscale2_API + } + // Start MaxScale, this should be called in the `before` handler of each test unit this.startMaxScale = function() { return new Promise(function(resolve, reject) { diff --git a/maxscale-system-test/test_maxctrl.cpp b/maxscale-system-test/test_maxctrl.cpp index a47161e8b..f92129f70 100644 --- a/maxscale-system-test/test_maxctrl.cpp +++ b/maxscale-system-test/test_maxctrl.cpp @@ -18,7 +18,8 @@ int main(int argc, char *argv[]) test.ssh_maxscale(true,"ssh-keygen -f maxscale_key -P \"\""); test.copy_from_maxscale((char*)"~/maxscale_key.pub", (char*)"."); test.galera->copy_to_node("./maxscale_key.pub", "~", 3); - test.galera->ssh_node(3, false, "cat ~/maxscale_key.pub >> ~/.ssh/authorized_keys"); + test.galera->ssh_node(3, false, "cat ~/maxscale_key.pub >> ~/.ssh/authorized_keys;" + "sudo iptables -I INPUT -p tcp --dport 8989 -j ACCEPT;"); // TODO: Don't handle test dependencies in tests test.tprintf("Installing NPM"); @@ -26,7 +27,7 @@ int main(int argc, char *argv[]) test.tprintf("Starting test"); test.verbose = true; - int rv = test.ssh_maxscale(false, "./test_maxctrl.sh"); + int rv = test.ssh_maxscale(false, "export maxscale2_API=%s:8989; ./test_maxctrl.sh", test.galera->IP[3]); test.verbose = false; test.tprintf("Removing NPM"); diff --git a/maxscale-system-test/test_maxctrl.sh b/maxscale-system-test/test_maxctrl.sh index 177275757..0f5ce0b07 100755 --- a/maxscale-system-test/test_maxctrl.sh +++ b/maxscale-system-test/test_maxctrl.sh @@ -12,11 +12,12 @@ fi if [ ! -d MaxScale ] then git clone https://www.github.com/mariadb-corporation/MaxScale.git + cd MaxScale + git checkout $ref + cd .. fi -cd MaxScale -git checkout $ref -cd maxctrl +cd MaxScale/maxctrl # Create the scripts that start and stop MaxScale ~/maxctrl_scripts.sh From 2534c9b824414feeee16a22113046111e5eb090f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Thu, 12 Oct 2017 12:37:55 +0300 Subject: [PATCH 066/101] Update replication-manager test The test now checks whether replication-manager launched by a script works. This is closer to how one of the original blog posts define the setup. --- .../maxscale.cnf.template.replication_manager | 2 + maxscale-system-test/replication_manager.cpp | 78 ++++++++++++------- 2 files changed, 52 insertions(+), 28 deletions(-) diff --git a/maxscale-system-test/cnf/maxscale.cnf.template.replication_manager b/maxscale-system-test/cnf/maxscale.cnf.template.replication_manager index d685f2424..7ba16b355 100644 --- a/maxscale-system-test/cnf/maxscale.cnf.template.replication_manager +++ b/maxscale-system-test/cnf/maxscale.cnf.template.replication_manager @@ -12,6 +12,8 @@ monitor_interval=1000 detect_standalone_master=true failcount=2 allow_cluster_recovery=true +events=master_down +script=/home/vagrant/replication-manager --hosts=$LIST --user=skysql:skysql --rpluser=skysql:skysql --switchover-at-sync=false --log-level=3 --logfile=/tmp/mrm.log switchover [RW Split Router] type=service diff --git a/maxscale-system-test/replication_manager.cpp b/maxscale-system-test/replication_manager.cpp index 710c31a93..07321e74f 100644 --- a/maxscale-system-test/replication_manager.cpp +++ b/maxscale-system-test/replication_manager.cpp @@ -21,9 +21,9 @@ void get_output(TestConnections& test) test.tprintf("%s", output); free(output); - test.tprintf("replication-manager output:"); + test.tprintf("MaxScale output:"); output = test.ssh_maxscale_output(true, - "cat /var/log/replication-manager.log && sudo truncate -s 0 /var/log/replication-manager.log"); + "cat /var/log/maxscale/maxscale.log && sudo truncate -s 0 /var/log/maxscale/maxscale.log"); test.tprintf("%s", output); free(output); } @@ -66,6 +66,21 @@ void check(TestConnections& test) mysql_close(conn); } +int get_server_id(TestConnections& test) +{ + MYSQL *conn = test.open_rwsplit_connection(); + int id = -1; + char str[1024]; + + if (find_field(conn, "SELECT @@server_id", "@@server_id", str) == 0) + { + id = atoi(str); + } + + mysql_close(conn); + return id; +} + static bool interactive = false; void get_input() @@ -83,21 +98,25 @@ int main(int argc, char** argv) prepare(); TestConnections test(argc, argv); - test.tprintf("Installing replication-manager"); - int rc = system("./manage_mrm.sh install > manage_mrm.log"); - if (!WIFEXITED(rc) || WEXITSTATUS(rc) != 0) - { - test.tprintf("Failed to install replication-manager, see manage_mrm.log for more details"); - return -1; - } - // Wait a few seconds - sleep(5); + // TODO: Figure out how to do this without having replication-manager pre-installed on the system + // + // test.tprintf("Installing replication-manager"); + // int rc = system("./manage_mrm.sh install > manage_mrm.log"); + // if (!WIFEXITED(rc) || WEXITSTATUS(rc) != 0) + // { + // test.tprintf("Failed to install replication-manager, see manage_mrm.log for more details"); + // return -1; + // } + + // // Wait a few seconds + // sleep(5); test.tprintf("Creating table and inserting data"); get_input(); test.connect_maxscale(); test.try_query(test.conn_rwsplit, "CREATE OR REPLACE TABLE test.t1(id INT)"); + test.repl->sync_slaves(); check(test); get_output(test); @@ -127,31 +146,34 @@ int main(int argc, char** argv) check(test); get_output(test); - test.tprintf("Starting all nodes and wait for replication-manager to fix the replication"); - get_input(); - test.repl->start_node(0, (char*)""); - sleep(5); - test.repl->start_node(1, (char*)""); - sleep(5); - test.repl->start_node(2, (char*)""); - sleep(5); + test.tprintf("Fix replication and recreate table"); + test.close_maxscale_connections(); + test.repl->fix_replication(); + test.connect_maxscale(); + test.try_query(test.conn_rwsplit, "CREATE OR REPLACE TABLE test.t1(id INT)"); + test.repl->sync_slaves(); + inserts = 0; check(test); get_output(test); - test.tprintf("Dropping tables"); - get_input(); - test.close_maxscale_connections(); - test.connect_maxscale(); - test.try_query(test.conn_rwsplit, "DROP TABLE test.t1"); - test.close_maxscale_connections(); + test.tprintf("Disable replication on a slave and kill master, check that it is not promoted"); + execute_query(test.repl->nodes[1], "STOP SLAVE; RESET SLAVE; RESET SLAVE ALL;"); + test.repl->stop_node(0); + sleep(10); + check(test); get_output(test); - test.tprintf("Removing replication-manager"); - get_input(); - system("./manage_mrm.sh remove >> manage_mrm.log"); + int id = get_server_id(test); + test.add_result(id == test.repl->get_server_id(1), "Invalid slave should not be used"); + + // TODO: Figure this also out, remove the component if it's not needed + // test.tprintf("Removing replication-manager"); + // get_input(); + // system("./manage_mrm.sh remove >> manage_mrm.log"); + test.repl->fix_replication(); return test.global_result; } From 1666c9f0b6456cf987f80b5b4541f646e82a2969 Mon Sep 17 00:00:00 2001 From: Johan Wikman Date: Mon, 9 Oct 2017 12:29:52 +0300 Subject: [PATCH 067/101] MXS-1464 Add config 'substitute_variables' With this variables set to true, if $VAR is used as a value in the configuration file, then `$VAR` will be replaced with the value of the environment variable VAR. --- .../Getting-Started/Configuration-Guide.md | 26 +++++++++++++++++++ include/maxscale/config.h | 2 ++ server/core/config.cc | 4 +-- server/core/gateway.cc | 22 ++++++++++++++++ 4 files changed, 52 insertions(+), 2 deletions(-) diff --git a/Documentation/Getting-Started/Configuration-Guide.md b/Documentation/Getting-Started/Configuration-Guide.md index 77dc1e597..7343e61c0 100644 --- a/Documentation/Getting-Started/Configuration-Guide.md +++ b/Documentation/Getting-Started/Configuration-Guide.md @@ -561,6 +561,32 @@ This will log all statements that cannot be parsed completely. This may be useful if you suspect that MariaDB MaxScale routes statements to the wrong server (e.g. to a slave instead of to a master). +#### `substitute_variables` + +Enable or disable the substitution of environment variables in the MaxScale +configuration file. If the substitution of variables is enabled and a +configuration line like +``` +some_parameter=$SOME_VALUE +``` +is encountered, then `$SOME_VALUE` will be replaced with the actual value +of the environment variable `SOME_VALUE`. Note: +* Variable substitution will be made _only_ if '$' is the first character + of the value. +* _Everything_ following '$' is interpreted as the name of the environment + variable. +* Referring to a non-existing environment variable is a fatal error. + +By default, the value of `substitute_variables` is `false`. +``` +substitute_variables=true +``` +The setting of `substitute_variables` will have an effect on all parameters +in the all other sections, irrespective of where the `[maxscale]` section +is placed in the configuration file. However, in the `[maxscale]` section, +to ensure that substitution will take place, place the +`substitute_variables=true` line first. + ### REST API Configuration The MaxScale REST API is an HTTP interface that provides JSON format data diff --git a/include/maxscale/config.h b/include/maxscale/config.h index b9f92336d..beef3986a 100644 --- a/include/maxscale/config.h +++ b/include/maxscale/config.h @@ -157,6 +157,7 @@ extern const char CN_SSL_CERT_VERIFY_DEPTH[]; extern const char CN_SSL_KEY[]; extern const char CN_SSL_VERSION[]; extern const char CN_STRIP_DB_ESC[]; +extern const char CN_SUBSTITUTE_VARIABLES[]; extern const char CN_THREADS[]; extern const char CN_THREAD_STACK_SIZE[]; extern const char CN_TYPE[]; @@ -224,6 +225,7 @@ typedef struct char admin_ssl_ca_cert[PATH_MAX]; /**< Admin SSL CA cert */ int query_retries; /**< Number of times a interrupted query is retried */ time_t query_retry_timeout; /**< Timeout for query retries */ + bool substitute_variables; /**< Should environment variables be substituted */ } MXS_CONFIG; /** diff --git a/server/core/config.cc b/server/core/config.cc index bb5af12a1..a72fd824e 100644 --- a/server/core/config.cc +++ b/server/core/config.cc @@ -135,6 +135,7 @@ const char CN_SSL_CERT_VERIFY_DEPTH[] = "ssl_cert_verify_depth"; const char CN_SSL_KEY[] = "ssl_key"; const char CN_SSL_VERSION[] = "ssl_version"; const char CN_STRIP_DB_ESC[] = "strip_db_esc"; +const char CN_SUBSTITUTE_VARIABLES[] = "substitute_variables"; const char CN_THREADS[] = "threads"; const char CN_THREAD_STACK_SIZE[] = "thread_stack_size"; const char CN_TYPE[] = "type"; @@ -457,8 +458,7 @@ void fix_section_name(char *section) * @param value The Parameter value * @return zero on error */ -static int -ini_handler(void *userdata, const char *section, const char *name, const char *value) +static int ini_handler(void *userdata, const char *section, const char *name, const char *value) { CONFIG_CONTEXT *cntxt = (CONFIG_CONTEXT *)userdata; CONFIG_CONTEXT *ptr = cntxt; diff --git a/server/core/gateway.cc b/server/core/gateway.cc index ce9bca627..c5a2c2900 100644 --- a/server/core/gateway.cc +++ b/server/core/gateway.cc @@ -2617,6 +2617,24 @@ void set_log_augmentation(const char* value) static int cnf_preparser(void* data, const char* section, const char* name, const char* value) { MXS_CONFIG* cnf = config_get_global_options(); + + if (cnf->substitute_variables) + { + if (*value == '$') + { + char* env_value = getenv(value + 1); + + if (!env_value) + { + MXS_ERROR("The environment variable %s, used as value for parameter %s " + "in section %s, does not exist.", value, name, section); + return 0; + } + + value = env_value; + } + } + char *tmp; /** These are read from the configuration file. These will not override * command line parameters but will override default values. */ @@ -2790,6 +2808,10 @@ static int cnf_preparser(void* data, const char* section, const char* name, cons cnf->log_to_shm = config_truth_value((char*)value); } } + else if (strcmp(name, CN_SUBSTITUTE_VARIABLES) == 0) + { + cnf->substitute_variables = config_truth_value(value); + } } return 1; From 22f4b02b445d5f18155c0640e68cd37e36b4677c Mon Sep 17 00:00:00 2001 From: Johan Wikman Date: Mon, 9 Oct 2017 15:11:53 +0300 Subject: [PATCH 068/101] MXS-1464 Substitute environment variables If 'substitute_variables' has been set to true, then the value of a parameter like `some_param=$SOME_VAR' is replaced with the value of the environment variable 'SOME_VAR'. It is a fatal error to refer to a variable that does not exist. --- server/core/config.cc | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/server/core/config.cc b/server/core/config.cc index a72fd824e..a10ab4161 100644 --- a/server/core/config.cc +++ b/server/core/config.cc @@ -460,8 +460,25 @@ void fix_section_name(char *section) */ static int ini_handler(void *userdata, const char *section, const char *name, const char *value) { - CONFIG_CONTEXT *cntxt = (CONFIG_CONTEXT *)userdata; - CONFIG_CONTEXT *ptr = cntxt; + CONFIG_CONTEXT *cntxt = (CONFIG_CONTEXT *)userdata; + CONFIG_CONTEXT *ptr = cntxt; + + if (config_get_global_options()->substitute_variables) + { + if (*value == '$') + { + char* env_value = getenv(value + 1); + + if (!env_value) + { + MXS_ERROR("The environment variable %s, used as value for parameter %s " + "in section %s, does not exist.", value, name, section); + return 0; + } + + value = env_value; + } + } if (strcmp(section, CN_GATEWAY) == 0 || strcasecmp(section, CN_MAXSCALE) == 0) { From d1df1837a4844a6e89dc3d96214d2d97ae04b143 Mon Sep 17 00:00:00 2001 From: Johan Wikman Date: Mon, 9 Oct 2017 15:52:24 +0300 Subject: [PATCH 069/101] MXS-1464 When pre-parsing, only check maxscale section When pre-parsing the configuration file, the existence of environment variables is only done for the [maxscale] section. For other sections a nicer error message is obtained if the comlplaint is made when the configuration file is actually loaded. Mechanism for providing custom error message from the pre-parsing function added. --- server/core/config.cc | 2 +- server/core/gateway.cc | 74 +++++++++++++++++++++++++++--------------- 2 files changed, 49 insertions(+), 27 deletions(-) diff --git a/server/core/config.cc b/server/core/config.cc index a10ab4161..d23515170 100644 --- a/server/core/config.cc +++ b/server/core/config.cc @@ -472,7 +472,7 @@ static int ini_handler(void *userdata, const char *section, const char *name, co if (!env_value) { MXS_ERROR("The environment variable %s, used as value for parameter %s " - "in section %s, does not exist.", value, name, section); + "in section %s, does not exist.", value + 1, name, section); return 0; } diff --git a/server/core/gateway.cc b/server/core/gateway.cc index c5a2c2900..a7d43b9ef 100644 --- a/server/core/gateway.cc +++ b/server/core/gateway.cc @@ -2608,38 +2608,47 @@ void set_log_augmentation(const char* value) /** * Pre-parse the configuration file for various directory paths. - * @param data Parameter passed by inih + * @param data Pointer to variable where custom dynamically allocated + * error message can be stored. * @param section Section name - * @param name Parameter name - * @param value Parameter value + * @param name Parameter name + * @param value Parameter value * @return 0 on error, 1 when successful */ static int cnf_preparser(void* data, const char* section, const char* name, const char* value) { MXS_CONFIG* cnf = config_get_global_options(); - if (cnf->substitute_variables) - { - if (*value == '$') - { - char* env_value = getenv(value + 1); - - if (!env_value) - { - MXS_ERROR("The environment variable %s, used as value for parameter %s " - "in section %s, does not exist.", value, name, section); - return 0; - } - - value = env_value; - } - } - char *tmp; /** These are read from the configuration file. These will not override * command line parameters but will override default values. */ if (strcasecmp(section, "maxscale") == 0) { + if (cnf->substitute_variables) + { + if (*value == '$') + { + char* env_value = getenv(value + 1); + + if (!env_value) + { + char** s = (char**)data; + + static const char FORMAT[] = "The environment variable %s does not exist."; + *s = (char*)MXS_MALLOC(sizeof(FORMAT) + strlen(value)); + + if (*s) + { + sprintf(*s, FORMAT, value + 1); + } + + return 0; + } + + value = env_value; + } + } + if (strcmp(name, "logdir") == 0) { if (strcmp(get_logdir(), default_logdir) == 0) @@ -2981,23 +2990,36 @@ static bool daemonize(void) */ static bool sniff_configuration(const char* filepath) { - int rv = ini_parse(filepath, cnf_preparser, NULL); + char* s = NULL; + + int rv = ini_parse(filepath, cnf_preparser, &s); if (rv != 0) { + const char FORMAT_CUSTOM[] = + "Failed to pre-parse configuration file %s. Error on line %d. %s"; const char FORMAT_SYNTAX[] = - "Error: Failed to pre-parse configuration file %s. Error on line %d."; + "Failed to pre-parse configuration file %s. Error on line %d."; const char FORMAT_OPEN[] = - "Error: Failed to pre-parse configuration file %s. Failed to open file."; + "Failed to pre-parse configuration file %s. Failed to open file."; const char FORMAT_MALLOC[] = - "Error: Failed to pre-parse configuration file %s. Memory allocation failed."; + "Failed to pre-parse configuration file %s. Memory allocation failed."; + size_t extra = strlen(filepath) + UINTLEN(abs(rv)) + (s ? strlen(s) : 0); // We just use the largest one. - char errorbuffer[sizeof(FORMAT_MALLOC) + strlen(filepath) + UINTLEN(abs(rv))]; + char errorbuffer[sizeof(FORMAT_MALLOC) + extra]; if (rv > 0) { - snprintf(errorbuffer, sizeof(errorbuffer), FORMAT_SYNTAX, filepath, rv); + if (s) + { + snprintf(errorbuffer, sizeof(errorbuffer), FORMAT_CUSTOM, filepath, rv, s); + MXS_FREE(s); + } + else + { + snprintf(errorbuffer, sizeof(errorbuffer), FORMAT_SYNTAX, filepath, rv); + } } else if (rv == -1) { From 9cec8fa0fbb0650bed1ed99327dd97678cfd92bf Mon Sep 17 00:00:00 2001 From: Johan Wikman Date: Thu, 12 Oct 2017 14:13:17 +0300 Subject: [PATCH 070/101] MXS-1464 Update ChangeLog and release notes --- Documentation/Changelog.md | 2 + .../MaxScale-2.2.1-Release-Notes.md | 48 +++++++++++++++++++ 2 files changed, 50 insertions(+) create mode 100644 Documentation/Release-Notes/MaxScale-2.2.1-Release-Notes.md diff --git a/Documentation/Changelog.md b/Documentation/Changelog.md index 63b333a0f..9649e927b 100644 --- a/Documentation/Changelog.md +++ b/Documentation/Changelog.md @@ -20,8 +20,10 @@ * The Masking filter can now both obfuscate and partially mask columns. * Binlog router supports MariaDB 10 GTID at both ends. * KILL CONNECTION can now be used through MaxScale. +* Environment variables can now be used in the MaxScale configuration file. For more details, please refer to: +* [MariaDB MaxScale 2.2.1 Release Notes](Release-Notes/MaxScale-2.2.1-Release-Notes.md) * [MariaDB MaxScale 2.2.0 Release Notes](Release-Notes/MaxScale-2.2.0-Release-Notes.md) ## MariaDB MaxScale 2.1 diff --git a/Documentation/Release-Notes/MaxScale-2.2.1-Release-Notes.md b/Documentation/Release-Notes/MaxScale-2.2.1-Release-Notes.md new file mode 100644 index 000000000..85ce98fe6 --- /dev/null +++ b/Documentation/Release-Notes/MaxScale-2.2.1-Release-Notes.md @@ -0,0 +1,48 @@ +# MariaDB MaxScale 2.2.1 Release Notes + +Release 2.2.1 is a Beta release. + +This document describes the changes in release 2.2.1, when compared to +release 2.2.0. + +For any problems you encounter, please consider submitting a bug +report at [Jira](https://jira.mariadb.org). + +## Changed Features + +## Dropped Features + +## New Features + +### Environment Variables in the configuration file + +If the global configuration entry `substitute_variables` is set to true, +then if the first character of a value in the configuration file is a `$` +then everything following that is interpreted as an environment variable +and the configuration value is replaced with the value of the environment +variable. For more information please consult the +[Configuration Guide](Getting-Started/Configuration-Guide.md). + +## Bug fixes + +[Here is a list of bugs fixed in MaxScale 2.2.1.](https://jira.mariadb.org/issues/?jql=project%20%3D%20MXS%20AND%20issuetype%20%3D%20Bug%20AND%20status%20%3D%20Closed%20AND%20fixVersion%20%3D%202.2.1) + +## Known Issues and Limitations + +There are some limitations and known issues within this version of MaxScale. +For more information, please refer to the [Limitations](../About/Limitations.md) document. + +## Packaging + +RPM and Debian packages are provided for the Linux distributions supported +by MariaDB Enterprise. + +Packages can be downloaded [here](https://mariadb.com/resources/downloads). + +## Source Code + +The source code of MaxScale is tagged at GitHub with a tag, which is identical +with the version of MaxScale. For instance, the tag of version X.Y.Z of MaxScale +is X.Y.Z. Further, *master* always refers to the latest released non-beta version. + +The source code is available [here](https://github.com/mariadb-corporation/MaxScale). From f0d16e3ac531f20623c6678db0b32b4a8b6e946a Mon Sep 17 00:00:00 2001 From: MassimilianoPinto Date: Fri, 13 Oct 2017 16:20:05 +0200 Subject: [PATCH 071/101] Typo fixed Typo fixed --- .../Tutorials/Replication-Proxy-Binlog-Router-Tutorial.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Documentation/Tutorials/Replication-Proxy-Binlog-Router-Tutorial.md b/Documentation/Tutorials/Replication-Proxy-Binlog-Router-Tutorial.md index efa8c33df..6b51da409 100644 --- a/Documentation/Tutorials/Replication-Proxy-Binlog-Router-Tutorial.md +++ b/Documentation/Tutorials/Replication-Proxy-Binlog-Router-Tutorial.md @@ -458,12 +458,12 @@ error logs and in *SHOW SLAVE STATUS*, ##### MariaDB 10 GTID If _mariadb10_master_gtid_ is On changing the master doesn't require the setting of a -new _file_ and _pos_, just specify new host and port with CHANGE MASTER; depending on the _binlog_structure_ values some additional steps migth be required. +new _file_ and _pos_, just specify new host and port with CHANGE MASTER; depending on the _binlog_structure_ values some additional steps might be required. If _binlog_structure=flat_, in order to keep previous binlog files untouched in MaxScale _binlogdir_ (no overwriting), the next in sequence file must exist in the Master server, as per above scenario _file and pos_ (2). -It migth also happen that each server in the replication setup has its own binlog file name +It might also happen that each server in the replication setup has its own binlog file name convention (server1_bin, server2_bin etc) or the user doesn't want to care at all about name and sequence. The _binlog_structure_ option set to _tree_ value simplifies the change master process: as the binlog files are saved using a hierarchy model From 6618049c24e906972c56bbcb4a9562d4181044d5 Mon Sep 17 00:00:00 2001 From: Johan Wikman Date: Fri, 13 Oct 2017 14:36:43 +0300 Subject: [PATCH 072/101] Add note about PL/SQL support --- Documentation/Changelog.md | 2 ++ .../Release-Notes/MaxScale-2.2.1-Release-Notes.md | 9 +++++++++ 2 files changed, 11 insertions(+) diff --git a/Documentation/Changelog.md b/Documentation/Changelog.md index 9649e927b..90397a074 100644 --- a/Documentation/Changelog.md +++ b/Documentation/Changelog.md @@ -11,6 +11,8 @@ * Firewall can now prevent the use of functions in conjunction with certain columns. * Parser of MaxScale extended to support window functions and CTEs. +* Parser of MaxScale extended to support PL/SQL compatibility features + of upcoming 10.3 release. * Prepared statements are now parsed and the execution of read only ones will be routed to slaves. * Server states are persisted, so in case of crash and restart MaxScale diff --git a/Documentation/Release-Notes/MaxScale-2.2.1-Release-Notes.md b/Documentation/Release-Notes/MaxScale-2.2.1-Release-Notes.md index 85ce98fe6..7704331dc 100644 --- a/Documentation/Release-Notes/MaxScale-2.2.1-Release-Notes.md +++ b/Documentation/Release-Notes/MaxScale-2.2.1-Release-Notes.md @@ -14,6 +14,15 @@ report at [Jira](https://jira.mariadb.org). ## New Features +### PL/SQL Comaptibility + +The parser of MaxScale has been extended to support the PL/SQL compatibility +features of the upcoming 10.3 release. For more information on how to enable +this mode, please refer to the +[configuration guide](../Getting-Started/Configuration-Guide.md#sql_mode). + +This functionality was available already in MaxScale 2.2.0. + ### Environment Variables in the configuration file If the global configuration entry `substitute_variables` is set to true, From 501c35d7b2cd3f08b1dcabcc1e67432748fc0e96 Mon Sep 17 00:00:00 2001 From: Johan Wikman Date: Mon, 16 Oct 2017 14:57:30 +0300 Subject: [PATCH 073/101] Correct typo in cache documentation --- Documentation/Filters/Cache.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/Filters/Cache.md b/Documentation/Filters/Cache.md index 7d0e7601c..8b8cdb517 100644 --- a/Documentation/Filters/Cache.md +++ b/Documentation/Filters/Cache.md @@ -239,7 +239,7 @@ respect to `SELECT` statements. The allowed values are: statements are cacheable, but must verify that. ``` -select=assume_cacheable +selects=assume_cacheable ``` Default is `verify_cacheable`. In this case, the `SELECT` statements will be From 37c804e0d3e225bce629f8c1752868c4a5b67c0a Mon Sep 17 00:00:00 2001 From: Johan Wikman Date: Thu, 19 Oct 2017 12:56:12 +0300 Subject: [PATCH 074/101] MXS-1327 Warn if debug priority enabled in release mode Turning debug on has no effect if MaxScale has been built in release mode. A warning will now be displayed to the user if that is attempted. --- server/modules/routing/debugcli/debugcmd.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/server/modules/routing/debugcli/debugcmd.c b/server/modules/routing/debugcli/debugcmd.c index 737425267..2bd89b563 100644 --- a/server/modules/routing/debugcli/debugcmd.c +++ b/server/modules/routing/debugcli/debugcmd.c @@ -2509,6 +2509,14 @@ static void enable_log_priority(DCB *dcb, char *arg1) if (priority != -1) { mxs_log_set_priority_enabled(priority, true); + +#if !defined(SS_DEBUG) + if (priority == LOG_DEBUG) + { + dcb_printf(dcb, + "Enabling '%s' has no effect, as MaxScale has been built in release mode.\n", arg1); + } +#endif } else { From ff8916046cef172a1591332c614960233b0955c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Sat, 21 Oct 2017 14:55:56 +0300 Subject: [PATCH 075/101] Add link to documentation in --help output The link points to the MaxScale 2.1 documentation root. --- server/core/gateway.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/server/core/gateway.cc b/server/core/gateway.cc index da4aefd08..b4cbd47b8 100644 --- a/server/core/gateway.cc +++ b/server/core/gateway.cc @@ -961,7 +961,8 @@ static void usage(void) "if '--basedir /path/maxscale' is specified, then, for instance, the log\n" "dir will be '/path/maxscale/var/log/maxscale', the config dir will be\n" "'/path/maxscale/etc' and the default config file will be\n" - "'/path/maxscale/etc/maxscale.cnf'.\n", + "'/path/maxscale/etc/maxscale.cnf'.\n\n" + "MaxScale documentation: https://mariadb.com/kb/en/mariadb-enterprise/mariadb-maxscale-21/ \n", progname, get_configdir(), default_cnf_fname, get_configdir(), get_logdir(), get_cachedir(), get_libdir(), From ee635f59f3a68b2aa713f0f8f5ba3789c86a5bc5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Sat, 21 Oct 2017 15:08:03 +0300 Subject: [PATCH 076/101] Update MaxScale manpage The manpage now only links to the relevant online documentation. --- Documentation/maxscale.1 | 72 ++++++---------------------------------- 1 file changed, 10 insertions(+), 62 deletions(-) diff --git a/Documentation/maxscale.1 b/Documentation/maxscale.1 index bbf1eb1e5..cd2ea64d0 100644 --- a/Documentation/maxscale.1 +++ b/Documentation/maxscale.1 @@ -1,11 +1,8 @@ .TH maxscale 1 .SH NAME -maxscale - The Dynamic Data Routing Platform -.SH SYNOPSIS -.B maxscale -[\fIOPTIONS...\fR] +maxscale - An intelligent database proxy .SH DESCRIPTION -MariaDB MaxScale is a dynamic data routing platform that allows the forwarding +MariaDB MaxScale is an intelligent database proxy that allows the forwarding of database statements to one or more database servers using complex rules, a semantic understanding of the database statements and the roles of the various servers within the backend cluster of databases. @@ -15,73 +12,24 @@ functionality transparently to the applications. In addition it provides a highly scalable and flexible architecture, with plugin components to support different protocols and routing decisions. -.SH OPTIONS -.TP -.BR "-d, --nodaemon" -Run MaxScale in the terminal process -.TP -.BR -f " \fIFILE\fB, --config=\fIFILE\fR" -Relative or absolute pathname of MaxScale configuration file to load. -.TP -.BR -l "[\fIfile|shm|stdout\fB], --log=[\fIfile|shm|stdout\fB]" -Log to file, shared memory or standard output. The default is to log to file. -.TP -.BR -L " \fIPATH\fB, --logdir=\fIPATH\fB" -Path to log file directory. -.TP -.BR -D " \fIPATH\fB, --datadir=\fIPATH\fB" -Path to data directory. This is where the embedded mysql tables are stored in addition to other MaxScale specific data. -.TP -.BR -C " \fIPATH\fB, --configdir=\fIPATH\fB" -Path to configuration file directory. MaxScale will look for the \fImaxscale.cnf\fR file from this folder. -.TP -.BR -B " \fIPATH\fB, --libdir=\fIPATH\fB" -Path to module directory. Modules are only searched from this folder. -.TP -.BR -A " \fIPATH\fB, --cachedir=\fIPATH\fB" -Path to cache directory. This is where MaxScale stores cached authentication data. -.TP -.BR -P " \fIPATH\fB, --piddir=\fIPATH\fB" -Location of MaxScale's PID file. -.TP -.BR -E " \fIPATH\fB, --execdir=\fIPATH\fB" -Location of the executable files. When internal processes are launched from within MaxScale the binaries are assumed to be in this directory. If you have a custom location for binary executable files you need to add this parameter. -.TP -.BR -U " \fIUSER\fB, --user=\fIUSER\fB" -Run MaxScale as another user. The user ID and group ID of this user are used to run MaxScale. -.TP -.BR -s " [\fIyes\fB|\fIno\fB], --syslog=[\fIyes\fB|\fIno\fB]" -Log messages to syslog. -.TP -.BR -S " [\fIyes\fB|\fIno\fB], \fB--maxlog=[\fIyes\fB|\fIno\fB]" -Log messages to MaxScale's own log files. -.TP -.BR "-v, --version" -Print version information and exit. -.TP -.BR "-V, --version-full" -Print full version information including the Git commit the binary was built from and exit. -.TP -.BR "-?, --help" -Show the help information for MaxScale and exit. -.SH EXAMPLES -Tutorials on GitHub: +Quickstart Guide: .RS -.I https://github.com/mariadb-corporation/MaxScale/blob/master/Documentation/Documentation-Contents.md#tutorials +.I https://mariadb.com/kb/en/mariadb-enterprise/mariadb-maxscale-21-setting-up-mariadb-maxscale/ .RE -.SH SEE ALSO -The MariaDB MaxScale documentation on the MariaDB Knowledge Base: + +Installation Guide: .RS -.I https://mariadb.com/kb/en/mariadb-enterprise/mariadb-maxscale/ +.I https://mariadb.com/kb/en/mariadb-enterprise/maxscale-21-installation-guide/ .RE -The MariaDB MaxScale documentation on GitHub: +MaxScale Documentation: .RS -.I https://github.com/mariadb-corporation/MaxScale/blob/master/Documentation/Documentation-Contents.md +.I https://mariadb.com/kb/en/mariadb-enterprise/mariadb-maxscale-21-maxscale-21-contents/ .RE + .SH BUGS You can see a list of known bugs and report new bugs at: From aa3764db3ca9b18438869bb017e3ecdc95f9dd53 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Sun, 22 Oct 2017 09:15:57 +0300 Subject: [PATCH 077/101] Let MaxScale build the Jansson library The CMake files in MaxScale automatically download the correct version of the Jansson library when it is configured. The build scripts used the default branch which can change. --- BUILD/install_build_deps.sh | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/BUILD/install_build_deps.sh b/BUILD/install_build_deps.sh index f9897c2fa..e8714c64a 100755 --- a/BUILD/install_build_deps.sh +++ b/BUILD/install_build_deps.sh @@ -109,22 +109,6 @@ cd tcl8.6.5/unix sudo make install cd ../../.. - -# Jansson -git clone https://github.com/akheron/jansson.git -if [ $? != 0 ] -then - echo "Error cloning jansson" - exit 1 -fi - -mkdir -p jansson/build -pushd jansson/build -cmake .. -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_C_FLAGS=-fPIC -DJANSSON_INSTALL_LIB_DIR=$install_libdir -make -sudo make install -popd - # Avro C API wget -r -l1 -nH --cut-dirs=2 --no-parent -A.tar.gz --no-directories http://mirror.netinch.com/pub/apache/avro/stable/c if [ $? != 0 ] From 65dc9e0d30f5c70cb05fbb610da54f396c1613b0 Mon Sep 17 00:00:00 2001 From: MassimilianoPinto Date: Mon, 23 Oct 2017 11:25:19 +0200 Subject: [PATCH 078/101] MXS-1484: set binlog storage to TREE mode When mariadb10_master_gtid is on the storage of binlog file is automatically set to TREE mode. --- Documentation/Routers/Binlogrouter.md | 17 ------- ...eplication-Proxy-Binlog-Router-Tutorial.md | 45 ++++--------------- server/modules/routing/binlogrouter/blr.c | 32 +++---------- 3 files changed, 14 insertions(+), 80 deletions(-) diff --git a/Documentation/Routers/Binlogrouter.md b/Documentation/Routers/Binlogrouter.md index b29b2de6e..87eee1c33 100644 --- a/Documentation/Routers/Binlogrouter.md +++ b/Documentation/Routers/Binlogrouter.md @@ -336,22 +336,6 @@ in the binlog files with ignorable events. - It's not possible to specify the GTID _domain_id: the master one is being used for all operations. All slave servers must use the same replication domain as the master server. -### `binlog_structure` - -This option controls the way binlog file are saved in the _binlogdir_: -there are two possible values, `flat | tree` - -The `tree` mode can only be set with `mariadb10_master_gtid=On` - -- `flat` is the default value, files are saved as usual. -- `tree` enables the saving of files using this hierarchy model: -_binlogdir_/_domain_id_/_server_id_/_filename_ - -The _tree_ structure easily allows the changing of the master server -without caring about binlog filename and sequence: -just change _host_ and _port_, the replication will -resume from last GTID MaxScale has seen. - ### `master_retry_count` This option sets the maximum number of connection retries when the master server is disconnected or not reachable. @@ -392,7 +376,6 @@ follows. encryption_key_file=/var/binlogs/enc_key.txt, mariadb10_slave_gtid=On, mariadb10_master_gtid=Off, - binlog_structure=flat, slave_hostname=maxscale-blr-1, master_retry_count=1000, connect_retry=60 diff --git a/Documentation/Tutorials/Replication-Proxy-Binlog-Router-Tutorial.md b/Documentation/Tutorials/Replication-Proxy-Binlog-Router-Tutorial.md index 6b51da409..40f89cb30 100644 --- a/Documentation/Tutorials/Replication-Proxy-Binlog-Router-Tutorial.md +++ b/Documentation/Tutorials/Replication-Proxy-Binlog-Router-Tutorial.md @@ -68,7 +68,6 @@ A **complete example** of a service entry for a binlog router service would be a encryption_key_file=/var/binlogs/enc_key.txt, mariadb10_slave_gtid=On, mariadb10_master_gtid=Off, - binlog_structure=flat, slave_hostname=maxscale-blr-1, master_retry_count=1000, connect_retry=60 @@ -285,7 +284,7 @@ The default is that a slave connection must not include any GTID feature: `MASTER_USE_GTID=no` Starting from MaxScale 2.2 it's also possible to register to MariaDB 10.X master using -**GTID** using the two new options *mariadb10_master_gtid* and *binlog_structure*. +**GTID** using the new option *mariadb10_master_gtid*. Current GTID implementation limitations: @@ -458,19 +457,12 @@ error logs and in *SHOW SLAVE STATUS*, ##### MariaDB 10 GTID If _mariadb10_master_gtid_ is On changing the master doesn't require the setting of a -new _file_ and _pos_, just specify new host and port with CHANGE MASTER; depending on the _binlog_structure_ values some additional steps might be required. +new _file_ and _pos_, just specify new host and port with CHANGE MASTER. -If _binlog_structure=flat_, in order to keep previous binlog files untouched in MaxScale _binlogdir_ (no overwriting), -the next in sequence file must exist in the Master server, as per above scenario _file and pos_ (2). - -It might also happen that each server in the replication setup has its own binlog file name -convention (server1_bin, server2_bin etc) or the user doesn't want to care at all about -name and sequence. The _binlog_structure_ option set to _tree_ value simplifies the change -master process: as the binlog files are saved using a hierarchy model +As the binlog files will be automatically saved using a hierarchy model (_binlogdir/domain_id/server_id/_filename_), MaxScale can work with any filename and any sequence and no binlog file will be overwritten by accident. - **Scenario** example: Let's start saying it's a good practice to issue in the new Master `FLUSH TABLES` which @@ -508,38 +500,17 @@ MariaDB> SELECT @@global.gtid_current_pos; ``` Starting the replication in MaxScale, `START SLAVE`, -will result in new events being downloaded and stored. - -If _binlog_structure=flat_ (default), the binlog events are saved in the new file -`mysql-bin.000061`, which should have been created in the Master before starting -replication from MaxScale, see above scenario (2) - -If _binlog_structure=tree_, the binlog events are saved in the new file -`0/10333/mysql-bin.000001` (which is the current file in the new master) - -The latter example clearly shows that the binlog file has a different sequence number -(1 instead of 61) and possibly a new name. +will result in new events being downloaded and stored in the new file +`0/10333/mysql-bin.000001` (which should be the current file in the new master) As usual, check for any error in log files and with MariaDB> SHOW SLAVE STATUS; Issuing the admin command `SHOW BINARY LOGS` it's possible to see the list -of log files which have been downloaded: - -``` -MariaDB> SHOW BINARY LOGS; -+------------------+-----------+ -| Log_name | File_size | -+------------------+-----------+ -| mysql-bin.000113 | 2214 | -... -| mysql-bin.000117 | 535 | -+------------------+-----------+ -``` - -It's possible to follow the _master change_ history if option `binlog_structure=tree`: -the displayed log file names have a prefix with replication domain_id and server_id. +of log files which have been downloaded and to follow the _master change_ +history: the displayed log file names have a prefix with +replication domain_id and server_id. ``` MariaDB> SHOW BINARY LOGS; diff --git a/server/modules/routing/binlogrouter/blr.c b/server/modules/routing/binlogrouter/blr.c index 3edba714a..e102ed24a 100644 --- a/server/modules/routing/binlogrouter/blr.c +++ b/server/modules/routing/binlogrouter/blr.c @@ -379,10 +379,8 @@ createInstance(SERVICE *service, char **options) /* Set router uuid */ inst->uuid = config_copy_string(params, "uuid"); - /* Enable Flat or Tree storage of binlog files */ - inst->storage_type = config_get_enum(params, - "binlog_structure", - binlog_storage_values); + /* Set Flat storage of binlog files as default */ + inst->storage_type = BLR_BINLOG_STORAGE_FLAT; if (inst->uuid == NULL) { @@ -549,13 +547,6 @@ createInstance(SERVICE *service, char **options) { inst->mariadb10_master_gtid = config_truth_value(value); } - else if (strcmp(options[i], "binlog_structure") == 0) - { - /* Enable Flat or Tree storage of binlog files */ - inst->storage_type = strcasecmp(value, "tree") == 0 ? - BLR_BINLOG_STORAGE_TREE : - BLR_BINLOG_STORAGE_FLAT; - } else if (strcmp(options[i], "encryption_algorithm") == 0) { int ret = blr_check_encryption_algorithm(value); @@ -780,24 +771,12 @@ createInstance(SERVICE *service, char **options) inst->mariadb10_compat = true; } - /** - * Force GTID slave request handling if GTID Master registration is On - */ if (inst->mariadb10_master_gtid) { + /* Force GTID slave request handling */ inst->mariadb10_gtid = true; - } - - if (!inst->mariadb10_master_gtid && - inst->storage_type == BLR_BINLOG_STORAGE_TREE) - { - MXS_ERROR("%s: binlog_structure 'tree' mode can be enabled only" - " with MariaDB Master GTID registration feature." - " Please enable it with option" - " 'mariadb10_master_gtid = on'", - service->name); - free_instance(inst); - return NULL; + /* Force binlog storage as tree */ + inst->storage_type = BLR_BINLOG_STORAGE_TREE; } /* Log binlog structure storage mode */ @@ -806,6 +785,7 @@ createInstance(SERVICE *service, char **options) inst->storage_type == BLR_BINLOG_STORAGE_FLAT ? "'flat' mode" : "'tree' mode using GTID domain_id and server_id"); + /* Enable MariaDB the GTID maps store */ if (inst->mariadb10_compat && inst->mariadb10_gtid) From b223fc9482b4435ed64e596a7881376adb34bd35 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Fri, 13 Oct 2017 15:56:10 +0300 Subject: [PATCH 079/101] Fix REST API main document title The title still stated that it was design documentation on the API instead of being the authoritative documentation of it. --- Documentation/REST-API/API.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/REST-API/API.md b/Documentation/REST-API/API.md index bdd9696c4..ee066af3d 100644 --- a/Documentation/REST-API/API.md +++ b/Documentation/REST-API/API.md @@ -1,4 +1,4 @@ -# REST API design document +# REST API This document describes the version 1 of the MaxScale REST API. From 0b1e2ae0a55e488cb58c638479bb94bd18fe8e29 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Sat, 14 Oct 2017 06:35:56 +0300 Subject: [PATCH 080/101] Improve error messages for service parameter changes Made the error messages clearer when an attempt to change service parameters that cannot be altered at runtime is made. --- server/core/config_runtime.cc | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/server/core/config_runtime.cc b/server/core/config_runtime.cc index 19fa59fdb..14ebcf1cc 100644 --- a/server/core/config_runtime.cc +++ b/server/core/config_runtime.cc @@ -1661,7 +1661,22 @@ bool runtime_alter_service_from_json(SERVICE* service, json_t* new_json) } else { - runtime_error("Parameter '%s' cannot be modified", key); + const MXS_MODULE *mod = get_module(service->routerModule, MODULE_ROUTER); + std::string v = mxs::json_to_string(value); + + if (config_param_is_valid(mod->parameters, key, v.c_str(), NULL)) + { + runtime_error("Runtime modifications to router parameters is not supported: %s=%s", key, v.c_str()); + } + else if (!is_dynamic_param(key)) + { + runtime_error("Runtime modifications to static service parameters is not supported: %s=%s", key, v.c_str()); + } + else + { + runtime_error("Parameter '%s' cannot be modified at runtime", key); + } + rval = false; } } From 284cd504127c0391ac63bb41e948ec38dbbe3c18 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Sun, 22 Oct 2017 09:57:12 +0300 Subject: [PATCH 081/101] Bind REST API to localhost by default Binding to localhost by default is more secure than binding to all interfaces. --- Documentation/Getting-Started/Configuration-Guide.md | 3 +-- include/maxscale/config.h | 2 +- maxscale-system-test/cnf/maxscale.cnf.template.maxctrl | 1 + 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Documentation/Getting-Started/Configuration-Guide.md b/Documentation/Getting-Started/Configuration-Guide.md index 7343e61c0..59b0adc19 100644 --- a/Documentation/Getting-Started/Configuration-Guide.md +++ b/Documentation/Getting-Started/Configuration-Guide.md @@ -598,8 +598,7 @@ configuration file. #### `admin_host` The network interface where the HTTP admin interface listens on. The default -value is the IPv6 address `::` which listens on all available network -interfaces. +value is the IPv4 address `127.0.0.1` which only listens for local connections. #### `admin_port` diff --git a/include/maxscale/config.h b/include/maxscale/config.h index beef3986a..f5896070b 100644 --- a/include/maxscale/config.h +++ b/include/maxscale/config.h @@ -32,7 +32,7 @@ MXS_BEGIN_DECLS /** Default port where the REST API listens */ #define DEFAULT_ADMIN_HTTP_PORT 8989 -#define DEFAULT_ADMIN_HOST "::" +#define DEFAULT_ADMIN_HOST "127.0.0.1" #define RELEASE_STR_LENGTH 256 #define SYSNAME_LEN 256 diff --git a/maxscale-system-test/cnf/maxscale.cnf.template.maxctrl b/maxscale-system-test/cnf/maxscale.cnf.template.maxctrl index 5080829f5..a696b996c 100644 --- a/maxscale-system-test/cnf/maxscale.cnf.template.maxctrl +++ b/maxscale-system-test/cnf/maxscale.cnf.template.maxctrl @@ -2,6 +2,7 @@ threads=4 admin_auth=false log_info=1 +admin_host=:: [MySQL Monitor] type=monitor From d371ecb30fbf1a0cefe8fbc0be5ce3cea53f0277 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Sun, 22 Oct 2017 19:30:55 +0300 Subject: [PATCH 082/101] Only remove explicitly deleted relationships Only when a relationship is defined as a null JSON value, it should be deleted. If it is missing, it should be ignored. --- server/core/config_runtime.cc | 104 ++++++++++++++--------- server/core/test/rest-api/test/server.js | 4 +- 2 files changed, 67 insertions(+), 41 deletions(-) diff --git a/server/core/config_runtime.cc b/server/core/config_runtime.cc index 14ebcf1cc..f116c5dfb 100644 --- a/server/core/config_runtime.cc +++ b/server/core/config_runtime.cc @@ -973,51 +973,61 @@ bool runtime_destroy_monitor(MXS_MONITOR *monitor) } static bool extract_relations(json_t* json, StringSet& relations, - const char** relation_types, + const char* relation_type, bool (*relation_check)(const std::string&, const std::string&)) { bool rval = true; + json_t* arr = mxs_json_pointer(json, relation_type); - for (int i = 0; relation_types[i]; i++) + if (arr && json_is_array(arr)) { - json_t* arr = mxs_json_pointer(json, relation_types[i]); + size_t size = json_array_size(arr); - if (arr && json_is_array(arr)) + for (size_t j = 0; j < size; j++) { - size_t size = json_array_size(arr); + json_t* obj = json_array_get(arr, j); + json_t* id = json_object_get(obj, CN_ID); + json_t* type = mxs_json_pointer(obj, CN_TYPE); - for (size_t j = 0; j < size; j++) + if (id && json_is_string(id) && + type && json_is_string(type)) { - json_t* obj = json_array_get(arr, j); - json_t* id = json_object_get(obj, CN_ID); - json_t* type = mxs_json_pointer(obj, CN_TYPE); + std::string id_value = json_string_value(id); + std::string type_value = json_string_value(type); - if (id && json_is_string(id) && - type && json_is_string(type)) + if (relation_check(type_value, id_value)) { - std::string id_value = json_string_value(id); - std::string type_value = json_string_value(type); - - if (relation_check(type_value, id_value)) - { - relations.insert(id_value); - } - else - { - rval = false; - } + relations.insert(id_value); } else { rval = false; } } + else + { + rval = false; + } } } return rval; } +static inline bool is_null_relation(json_t* json, const char* relation) +{ + std::string str(relation); + size_t pos = str.rfind("/data"); + + ss_dassert(pos != std::string::npos); + str = str.substr(0, pos); + + json_t* data = mxs_json_pointer(json, relation); + json_t* base = mxs_json_pointer(json, str.c_str()); + + return (data && json_is_null(data)) || (base && json_is_null(base)); +} + static inline const char* get_string_or_null(json_t* json, const char* path) { const char* rval = NULL; @@ -1133,13 +1143,6 @@ static bool server_contains_required_fields(json_t* json) return rval; } -const char* server_relation_types[] = -{ - MXS_JSON_PTR_RELATIONSHIPS_SERVICES, - MXS_JSON_PTR_RELATIONSHIPS_MONITORS, - NULL -}; - static bool server_relation_is_valid(const std::string& type, const std::string& value) { return (type == CN_SERVICES && service_find(value.c_str())) || @@ -1289,7 +1292,8 @@ SERVER* runtime_create_server_from_json(json_t* json) StringSet relations; - if (extract_relations(json, relations, server_relation_types, server_relation_is_valid)) + if (extract_relations(json, relations, MXS_JSON_PTR_RELATIONSHIPS_SERVICES, server_relation_is_valid) && + extract_relations(json, relations, MXS_JSON_PTR_RELATIONSHIPS_MONITORS, server_relation_is_valid)) { if (runtime_create_server(name, address, port.c_str(), protocol, authenticator, authenticator_options)) { @@ -1322,12 +1326,33 @@ bool server_to_object_relations(SERVER* server, json_t* old_json, json_t* new_js return true; } - bool rval = false; + const char* server_relation_types[] = + { + MXS_JSON_PTR_RELATIONSHIPS_SERVICES, + MXS_JSON_PTR_RELATIONSHIPS_MONITORS, + NULL + }; + + bool rval = true; StringSet old_relations; StringSet new_relations; - if (extract_relations(old_json, old_relations, server_relation_types, server_relation_is_valid) && - extract_relations(new_json, new_relations, server_relation_types, server_relation_is_valid)) + for (int i = 0; server_relation_types[i]; i++) + { + // Extract only changed or deleted relationships + if (is_null_relation(new_json, server_relation_types[i]) || + mxs_json_pointer(new_json, server_relation_types[i])) + { + if (!extract_relations(new_json, new_relations, server_relation_types[i], server_relation_is_valid) || + !extract_relations(old_json, old_relations, server_relation_types[i], server_relation_is_valid)) + { + rval = false; + break; + } + } + } + + if (rval) { StringSet removed_relations; StringSet added_relations; @@ -1340,10 +1365,10 @@ bool server_to_object_relations(SERVER* server, json_t* old_json, json_t* new_js old_relations.begin(), old_relations.end(), std::inserter(added_relations, added_relations.begin())); - if (unlink_server_from_objects(server, removed_relations) && - link_server_to_objects(server, added_relations)) + if (!unlink_server_from_objects(server, removed_relations) || + !link_server_to_objects(server, added_relations)) { - rval = true; + rval = false; } } @@ -1434,7 +1459,7 @@ static bool validate_monitor_json(json_t* json) else { StringSet relations; - if (extract_relations(json, relations, object_relation_types, object_relation_is_valid)) + if (extract_relations(json, relations, MXS_JSON_PTR_RELATIONSHIPS_SERVERS, object_relation_is_valid)) { rval = true; } @@ -1517,9 +1542,10 @@ bool object_to_server_relations(const char* target, json_t* old_json, json_t* ne bool rval = false; StringSet old_relations; StringSet new_relations; + const char* object_relation = MXS_JSON_PTR_RELATIONSHIPS_SERVERS; - if (extract_relations(old_json, old_relations, object_relation_types, object_relation_is_valid) && - extract_relations(new_json, new_relations, object_relation_types, object_relation_is_valid)) + if (extract_relations(old_json, old_relations, object_relation, object_relation_is_valid) && + extract_relations(new_json, new_relations, object_relation, object_relation_is_valid)) { StringSet removed_relations; StringSet added_relations; diff --git a/server/core/test/rest-api/test/server.js b/server/core/test/rest-api/test/server.js index bf975fc6e..025cbba14 100644 --- a/server/core/test/rest-api/test/server.js +++ b/server/core/test/rest-api/test/server.js @@ -68,8 +68,8 @@ describe("Server Relationships", function() { }); it("remove relationships", function() { - delete rel_server.data.relationships["services"] - delete rel_server.data.relationships["monitors"] + rel_server.data.relationships["services"] = null + rel_server.data.relationships["monitors"] = null return request.patch(base_url + "/servers/" + rel_server.data.id, {json: rel_server}) .should.be.fulfilled }); From 582a65f77c7ee8089a86b27894a9ca4e2e445bfc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Mon, 23 Oct 2017 10:31:14 +0300 Subject: [PATCH 083/101] Do not return empty relationships If no relationships of a particular type are defined for a resource, the key for that relationship should not be defined. --- include/maxscale/monitor.h | 2 +- include/maxscale/service.h | 2 +- server/core/monitor.cc | 19 ++++++++++++++++--- server/core/server.cc | 15 +++++++++++++-- server/core/service.cc | 19 ++++++++++++++++--- 5 files changed, 47 insertions(+), 10 deletions(-) diff --git a/include/maxscale/monitor.h b/include/maxscale/monitor.h index 25e657400..4c55775b1 100644 --- a/include/maxscale/monitor.h +++ b/include/maxscale/monitor.h @@ -322,7 +322,7 @@ json_t* monitor_list_to_json(const char* host); * @param server Server to inspect * @param host Hostname of this server * - * @return Array of monitor links + * @return Array of monitor links or NULL if no relations exist */ json_t* monitor_relations_to_server(const SERVER* server, const char* host); diff --git a/include/maxscale/service.h b/include/maxscale/service.h index 9217a3652..ac1574647 100644 --- a/include/maxscale/service.h +++ b/include/maxscale/service.h @@ -341,7 +341,7 @@ json_t* service_listener_to_json(const SERVICE* service, const char* name, const * @param server Server to inspect * @param host Hostname of this server * - * @return Array of service links + * @return Array of service links or NULL if no relations exist */ json_t* service_relations_to_server(const SERVER* server, const char* host); diff --git a/server/core/monitor.cc b/server/core/monitor.cc index cc22d093f..b704c8f18 100644 --- a/server/core/monitor.cc +++ b/server/core/monitor.cc @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -1802,8 +1803,7 @@ json_t* monitor_list_to_json(const char* host) json_t* monitor_relations_to_server(const SERVER* server, const char* host) { - json_t* rel = mxs_json_relationship(host, MXS_JSON_API_MONITORS); - + std::vector names; spinlock_acquire(&monLock); for (MXS_MONITOR* mon = allMonitors; mon; mon = mon->next) @@ -1816,7 +1816,7 @@ json_t* monitor_relations_to_server(const SERVER* server, const char* host) { if (db->server == server) { - mxs_json_add_relation(rel, mon->name, CN_MONITORS); + names.push_back(mon->name); break; } } @@ -1827,6 +1827,19 @@ json_t* monitor_relations_to_server(const SERVER* server, const char* host) spinlock_release(&monLock); + json_t* rel = NULL; + + if (!names.empty()) + { + rel = mxs_json_relationship(host, MXS_JSON_API_MONITORS); + + for (std::vector::iterator it = names.begin(); + it != names.end(); it++) + { + mxs_json_add_relation(rel, it->c_str(), CN_MONITORS); + } + } + return rel; } diff --git a/server/core/server.cc b/server/core/server.cc index 0aac85b8c..0ad8bf10e 100644 --- a/server/core/server.cc +++ b/server/core/server.cc @@ -1502,8 +1502,19 @@ static json_t* server_to_json_data(const SERVER* server, const char* host) /** Relationships */ json_t* rel = json_object(); - json_object_set_new(rel, CN_SERVICES, service_relations_to_server(server, host)); - json_object_set_new(rel, CN_MONITORS, monitor_relations_to_server(server, host)); + json_t* service_rel = service_relations_to_server(server, host); + json_t* monitor_rel = monitor_relations_to_server(server, host); + + if (service_rel) + { + json_object_set_new(rel, CN_SERVICES, service_rel); + } + + if (monitor_rel) + { + json_object_set_new(rel, CN_MONITORS, monitor_rel); + } + json_object_set_new(rval, CN_RELATIONSHIPS, rel); /** Attributes */ json_object_set_new(rval, CN_ATTRIBUTES, server_json_attributes(server)); diff --git a/server/core/service.cc b/server/core/service.cc index 544c111d4..7906a8bc4 100644 --- a/server/core/service.cc +++ b/server/core/service.cc @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -2700,8 +2701,7 @@ json_t* service_relations_to_filter(const MXS_FILTER_DEF* filter, const char* ho json_t* service_relations_to_server(const SERVER* server, const char* host) { - json_t* rel = mxs_json_relationship(host, MXS_JSON_API_SERVICES); - + std::vector names; spinlock_acquire(&service_spin); for (SERVICE *service = allServices; service; service = service->next) @@ -2712,7 +2712,7 @@ json_t* service_relations_to_server(const SERVER* server, const char* host) { if (ref->server == server && SERVER_REF_IS_ACTIVE(ref)) { - mxs_json_add_relation(rel, service->name, CN_SERVICES); + names.push_back(service->name); } } @@ -2721,6 +2721,19 @@ json_t* service_relations_to_server(const SERVER* server, const char* host) spinlock_release(&service_spin); + json_t* rel = NULL; + + if (!names.empty()) + { + rel = mxs_json_relationship(host, MXS_JSON_API_SERVICES); + + for (std::vector::iterator it = names.begin(); + it != names.end(); it++) + { + mxs_json_add_relation(rel, it->c_str(), CN_SERVICES); + } + } + return rel; } From b7b50959ac3d5efd3604bf9cd2c049933468aa61 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Mon, 23 Oct 2017 11:28:25 +0300 Subject: [PATCH 084/101] Update REST API tests Extended the relationship checks in the REST API tests to actually verify that the returned objects represent what is expected. --- server/core/test/rest-api/test/monitor.js | 32 ++++++++++++----------- server/core/test/rest-api/test/server.js | 8 +++--- 2 files changed, 22 insertions(+), 18 deletions(-) diff --git a/server/core/test/rest-api/test/monitor.js b/server/core/test/rest-api/test/monitor.js index eb58a0e19..89b7a006f 100644 --- a/server/core/test/rest-api/test/monitor.js +++ b/server/core/test/rest-api/test/monitor.js @@ -48,30 +48,32 @@ describe("Monitor Relationships", function() { }) it("remove relationships from old monitor", function() { - - return request.get(base_url + "/monitors/MySQL-Monitor") - .then(function(resp) { - var mon = JSON.parse(resp) - delete mon.data.relationships.servers - return request.patch(base_url + "/monitors/MySQL-Monitor", {json: mon}) - }) - .should.be.fulfilled + var mon = { data: { + relationships: { + servers: null + }}} + return request.patch(base_url + "/monitors/MySQL-Monitor", {body: mon}) + .then(() => request.get(base_url + "/monitors/MySQL-Monitor", { json: true })) + .then((res) => { + res.data.relationships.servers.should.be.undefined + }) }); it("add relationships to new monitor", function() { - - return request.get(base_url + "/monitors/" + monitor.data.id) - .then(function(resp) { - var mon = JSON.parse(resp) - mon.data.relationships.servers = [ + var mon = { data: { + relationships: { + servers: [ {id: "server1", type: "servers"}, {id: "server2", type: "servers"}, {id: "server3", type: "servers"}, {id: "server4", type: "servers"}, ] - return request.patch(base_url + "/monitors/" + monitor.data.id, {json: mon}) + }}} + return request.patch(base_url + "/monitors/" + monitor.data.id, {body: mon}) + .then(() => request.get(base_url + "/monitors/" + monitor.data.id, { json: true })) + .then((res) => { + res.data.relationships.servers.data.should.have.lengthOf(4) }) - .should.be.fulfilled }); it("move relationships back to old monitor", function() { diff --git a/server/core/test/rest-api/test/server.js b/server/core/test/rest-api/test/server.js index 025cbba14..83fc20c11 100644 --- a/server/core/test/rest-api/test/server.js +++ b/server/core/test/rest-api/test/server.js @@ -57,14 +57,16 @@ describe("Server Relationships", function() { var rel_server = JSON.parse(JSON.stringify(server)) rel_server.data.relationships = rel - it("create new server", function() { + it("create new server with relationships", function() { return request.post(base_url + "/servers/", {json: rel_server}) .should.be.fulfilled }); it("request server", function() { - return request.get(base_url + "/servers/" + rel_server.data.id) - .should.be.fulfilled + return request.get(base_url + "/servers/" + rel_server.data.id, { json: true }) + .then((res) => { + res.data.relationships.services.data.should.have.lengthOf(2) + }) }); it("remove relationships", function() { From 7111724851cd17e2980feb509d2c1ea0e31e8c7a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Mon, 23 Oct 2017 12:32:38 +0300 Subject: [PATCH 085/101] Fix REST API monitor relationship test The test did not properly move the relationships from the old monitor to the new one. The test to passed as the relationship modification was not really tested. --- server/core/test/rest-api/test/monitor.js | 53 ++++++++++++----------- 1 file changed, 27 insertions(+), 26 deletions(-) diff --git a/server/core/test/rest-api/test/monitor.js b/server/core/test/rest-api/test/monitor.js index 89b7a006f..ace99007c 100644 --- a/server/core/test/rest-api/test/monitor.js +++ b/server/core/test/rest-api/test/monitor.js @@ -52,24 +52,26 @@ describe("Monitor Relationships", function() { relationships: { servers: null }}} - return request.patch(base_url + "/monitors/MySQL-Monitor", {body: mon}) + return request.patch(base_url + "/monitors/MySQL-Monitor", {json: mon}) .then(() => request.get(base_url + "/monitors/MySQL-Monitor", { json: true })) .then((res) => { - res.data.relationships.servers.should.be.undefined + res.data.relationships.should.not.have.keys("servers") }) }); it("add relationships to new monitor", function() { var mon = { data: { relationships: { - servers: [ - {id: "server1", type: "servers"}, - {id: "server2", type: "servers"}, - {id: "server3", type: "servers"}, - {id: "server4", type: "servers"}, - ] + servers: { + data:[ + {id: "server1", type: "servers"}, + {id: "server2", type: "servers"}, + {id: "server3", type: "servers"}, + {id: "server4", type: "servers"}, + ] + } }}} - return request.patch(base_url + "/monitors/" + monitor.data.id, {body: mon}) + return request.patch(base_url + "/monitors/" + monitor.data.id, {json: mon}) .then(() => request.get(base_url + "/monitors/" + monitor.data.id, { json: true })) .then((res) => { res.data.relationships.servers.data.should.have.lengthOf(4) @@ -77,27 +79,26 @@ describe("Monitor Relationships", function() { }); it("move relationships back to old monitor", function() { - - return request.get(base_url + "/monitors/" + monitor.data.id) - .then(function(resp) { - var mon = JSON.parse(resp) - delete mon.data.relationships.servers - return request.patch(base_url + "/monitors/" + monitor.data.id, {json: mon}) + var mon = {data: {relationships: {servers: null}}} + return request.patch(base_url + "/monitors/" + monitor.data.id, {json: mon}) + .then(() => request.get(base_url + "/monitors/" + monitor.data.id, { json: true })) + .then((res) => { + res.data.relationships.should.not.have.keys("servers") }) .then(function() { - return request.get(base_url + "/monitors/MySQL-Monitor") - }) - .then(function(resp) { - var mon = JSON.parse(resp) - mon.data.relationships.servers = [ - {id: "server1", type: "servers"}, - {id: "server2", type: "servers"}, - {id: "server3", type: "servers"}, - {id: "server4", type: "servers"}, - ] + mon.data.relationships.servers = { + data: [ + {id: "server1", type: "servers"}, + {id: "server2", type: "servers"}, + {id: "server3", type: "servers"}, + {id: "server4", type: "servers"}, + ]} return request.patch(base_url + "/monitors/MySQL-Monitor", {json: mon}) }) - .should.be.fulfilled + .then(() => request.get(base_url + "/monitors/MySQL-Monitor", { json: true })) + .then((res) => { + res.data.relationships.servers.data.should.have.lengthOf(4) + }) }); it("destroy created monitor", function() { From 6918842585fd9fb678d67ef779081719ec5a2c61 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Mon, 23 Oct 2017 12:53:38 +0300 Subject: [PATCH 086/101] Add direct relationship updating to REST API The JSON API specification states that all resources must support direct modification of resource relationships by providing only the definition for a particular relationship type to a /:type/:id/relationships/:type endpoint. The relevant part of the JSON API specification: http://jsonapi.org/format/#crud-updating-to-many-relationships --- server/core/config_runtime.cc | 85 +++++++++++++++++++++++++-- server/core/maxscale/config_runtime.h | 31 ++++++++++ server/core/resource.cc | 59 +++++++++++++++++++ 3 files changed, 171 insertions(+), 4 deletions(-) diff --git a/server/core/config_runtime.cc b/server/core/config_runtime.cc index f116c5dfb..5c4b2ca48 100644 --- a/server/core/config_runtime.cc +++ b/server/core/config_runtime.cc @@ -1415,11 +1415,46 @@ bool runtime_alter_server_from_json(SERVER* server, json_t* new_json) return rval; } -const char* object_relation_types[] = +static bool is_valid_relationship_body(json_t* json) { - MXS_JSON_PTR_RELATIONSHIPS_SERVERS, - NULL -}; + bool rval = true; + + json_t* obj = mxs_json_pointer(json, MXS_JSON_PTR_DATA); + + if (!obj) + { + runtime_error("Field '%s' is not defined", MXS_JSON_PTR_DATA); + rval = false; + } + else if (!json_is_array(obj)) + { + runtime_error("Field '%s' is not an array", MXS_JSON_PTR_DATA); + rval = false; + } + + return rval; +} + +bool runtime_alter_server_relationships_from_json(SERVER* server, const char* type, json_t* json) +{ + bool rval = false; + mxs::Closer old_json(server_to_json(server, "")); + ss_dassert(old_json.get()); + + if (is_valid_relationship_body(json)) + { + mxs::Closer j(json_pack("{s: {s: {s: {s: O}}}}", "data", + "relationships", type, "data", + json_object_get(json, "data"))); + + if (server_to_object_relations(server, old_json.get(), j.get())) + { + rval = true; + } + } + + return rval; +} static bool object_relation_is_valid(const std::string& type, const std::string& value) { @@ -1624,6 +1659,48 @@ bool runtime_alter_monitor_from_json(MXS_MONITOR* monitor, json_t* new_json) return rval; } +bool runtime_alter_monitor_relationships_from_json(MXS_MONITOR* monitor, json_t* json) +{ + bool rval = false; + mxs::Closer old_json(monitor_to_json(monitor, "")); + ss_dassert(old_json.get()); + + if (is_valid_relationship_body(json)) + { + mxs::Closer j(json_pack("{s: {s: {s: {s: O}}}}", "data", + "relationships", "servers", "data", + json_object_get(json, "data"))); + + if (object_to_server_relations(monitor->name, old_json.get(), j.get())) + { + rval = true; + } + } + + return rval; +} + +bool runtime_alter_service_relationships_from_json(SERVICE* service, json_t* json) +{ + bool rval = false; + mxs::Closer old_json(service_to_json(service, "")); + ss_dassert(old_json.get()); + + if (is_valid_relationship_body(json)) + { + mxs::Closer j(json_pack("{s: {s: {s: {s: O}}}}", "data", + "relationships", "servers", "data", + json_object_get(json, "data"))); + + if (object_to_server_relations(service->name, old_json.get(), j.get())) + { + rval = true; + } + } + + return rval; +} + /** * @brief Check if the service parameter can be altered at runtime * diff --git a/server/core/maxscale/config_runtime.h b/server/core/maxscale/config_runtime.h index 5014bddc0..60297f8c6 100644 --- a/server/core/maxscale/config_runtime.h +++ b/server/core/maxscale/config_runtime.h @@ -220,6 +220,17 @@ SERVER* runtime_create_server_from_json(json_t* json); */ bool runtime_alter_server_from_json(SERVER* server, json_t* new_json); +/** + * @brief Alter server relationships + * + * @param server Server to alter + * @param type Type of the relation, either @c services or @c monitors + * @param json JSON that defines the relationship data + * + * @return True if the relationships were successfully modified + */ +bool runtime_alter_server_relationships_from_json(SERVER* server, const char* type, json_t* json); + /** * @brief Create a new monitor from JSON * @@ -239,6 +250,16 @@ MXS_MONITOR* runtime_create_monitor_from_json(json_t* json); */ bool runtime_alter_monitor_from_json(MXS_MONITOR* monitor, json_t* new_json); +/** + * @brief Alter monitor relationships + * + * @param monitor Monitor to alter + * @param json JSON that defines the new relationships + * + * @return True if the relationships were successfully modified + */ +bool runtime_alter_monitor_relationships_from_json(MXS_MONITOR* monitor, json_t* json); + /** * @brief Alter a service using JSON * @@ -249,6 +270,16 @@ bool runtime_alter_monitor_from_json(MXS_MONITOR* monitor, json_t* new_json); */ bool runtime_alter_service_from_json(SERVICE* service, json_t* new_json); +/** + * @brief Alter service relationships + * + * @param service Service to alter + * @param json JSON that defines the new relationships + * + * @return True if the relationships were successfully modified + */ +bool runtime_alter_service_relationships_from_json(SERVICE* service, json_t* json); + /** * @brief Create a listener from JSON * diff --git a/server/core/resource.cc b/server/core/resource.cc index 25fc3d09d..fa1ccc36f 100644 --- a/server/core/resource.cc +++ b/server/core/resource.cc @@ -285,6 +285,29 @@ HttpResponse cb_alter_server(const HttpRequest& request) return HttpResponse(MHD_HTTP_FORBIDDEN, runtime_get_json_error()); } +HttpResponse do_alter_server_relationship(const HttpRequest& request, const char* type) +{ + SERVER* server = server_find_by_unique_name(request.uri_part(1).c_str()); + ss_dassert(server && request.get_json()); + + if (runtime_alter_server_relationships_from_json(server, type, request.get_json())) + { + return HttpResponse(MHD_HTTP_NO_CONTENT); + } + + return HttpResponse(MHD_HTTP_FORBIDDEN, runtime_get_json_error()); +} + +HttpResponse cb_alter_server_service_relationship(const HttpRequest& request) +{ + return do_alter_server_relationship(request, "services"); +} + +HttpResponse cb_alter_server_monitor_relationship(const HttpRequest& request) +{ + return do_alter_server_relationship(request, "monitors"); +} + HttpResponse cb_create_monitor(const HttpRequest& request) { ss_dassert(request.get_json()); @@ -323,6 +346,19 @@ HttpResponse cb_alter_monitor(const HttpRequest& request) return HttpResponse(MHD_HTTP_FORBIDDEN, runtime_get_json_error()); } +HttpResponse cb_alter_monitor_server_relationship(const HttpRequest& request) +{ + MXS_MONITOR* monitor = monitor_find(request.uri_part(1).c_str()); + ss_dassert(monitor && request.get_json()); + + if (runtime_alter_monitor_relationships_from_json(monitor, request.get_json())) + { + return HttpResponse(MHD_HTTP_NO_CONTENT); + } + + return HttpResponse(MHD_HTTP_FORBIDDEN, runtime_get_json_error()); +} + HttpResponse cb_alter_service(const HttpRequest& request) { SERVICE* service = service_find(request.uri_part(1).c_str()); @@ -336,6 +372,19 @@ HttpResponse cb_alter_service(const HttpRequest& request) return HttpResponse(MHD_HTTP_FORBIDDEN, runtime_get_json_error()); } +HttpResponse cb_alter_service_server_relationship(const HttpRequest& request) +{ + SERVICE* service = service_find(request.uri_part(1).c_str()); + ss_dassert(service && request.get_json()); + + if (runtime_alter_service_relationships_from_json(service, request.get_json())) + { + return HttpResponse(MHD_HTTP_NO_CONTENT); + } + + return HttpResponse(MHD_HTTP_FORBIDDEN, runtime_get_json_error()); +} + HttpResponse cb_alter_logs(const HttpRequest& request) { ss_dassert(request.get_json()); @@ -792,6 +841,16 @@ public: m_patch.push_back(SResource(new Resource(cb_alter_logs, 2, "maxscale", "logs"))); m_patch.push_back(SResource(new Resource(cb_alter_maxscale, 1, "maxscale"))); + /** Update resource relationships directly */ + m_patch.push_back(SResource(new Resource(cb_alter_server_service_relationship, 4, + "servers", ":server", "relationships", "services"))); + m_patch.push_back(SResource(new Resource(cb_alter_server_monitor_relationship, 4, + "servers", ":server", "relationships", "monitors"))); + m_patch.push_back(SResource(new Resource(cb_alter_monitor_server_relationship, 4, + "monitors", ":monitor", "relationships", "servers"))); + m_patch.push_back(SResource(new Resource(cb_alter_service_server_relationship, 4, + "services", ":service", "relationships", "servers"))); + /** All patch resources require a request body */ for (ResourceList::iterator it = m_patch.begin(); it != m_patch.end(); it++) { From cd9a84d762e7502e35cdf13e7d8fc4a8e300d517 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Mon, 23 Oct 2017 13:08:11 +0300 Subject: [PATCH 087/101] Add tests for direct relationship updating The tests check that direct updates to the relationships endpoints work. --- server/core/test/rest-api/test/monitor.js | 48 +++++++++++++++++++++++ server/core/test/rest-api/test/server.js | 27 +++++++++++++ server/core/test/rest-api/test/service.js | 27 +++++++++++++ 3 files changed, 102 insertions(+) diff --git a/server/core/test/rest-api/test/monitor.js b/server/core/test/rest-api/test/monitor.js index ace99007c..e98160ba1 100644 --- a/server/core/test/rest-api/test/monitor.js +++ b/server/core/test/rest-api/test/monitor.js @@ -101,6 +101,54 @@ describe("Monitor Relationships", function() { }) }); + it("add relationships via `relationships` endpoint", function() { + var old = { data: [ + { id: "server2", type: "servers" }, + { id: "server3", type: "servers" }, + { id: "server4", type: "servers" } + ]} + var created = { data: [ + { id: "server1", type: "servers" } + ]} + + return request.patch(base_url + "/monitors/MySQL-Monitor/relationships/servers", {json: old}) + .then(() => request.patch(base_url + "/monitors/" + monitor.data.id + "/relationships/servers", {json: created})) + .then(() => request.get(base_url + "/monitors/MySQL-Monitor", { json: true })) + .then((res) => { + res.data.relationships.servers.data.should.have.lengthOf(3) + }) + .then(() => request.get(base_url + "/monitors/" + monitor.data.id , { json: true })) + .then((res) => { + res.data.relationships.servers.data.should.have.lengthOf(1) + .that.deep.includes({ id: "server1", type: "servers" }) + }) + }); + + it("bad request body with `relationships` endpoint should be rejected", function() { + return request.patch(base_url + "/monitors/" + monitor.data.id + "/relationships/servers", {json: {data: null}}) + .should.be.rejected + }) + + it("remove relationships via `relationships` endpoint", function() { + var old = { data: [ + { id: "server1", type: "servers" }, + { id: "server2", type: "servers" }, + { id: "server3", type: "servers" }, + { id: "server4", type: "servers" } + ]} + + return request.patch(base_url + "/monitors/" + monitor.data.id + "/relationships/servers", {json: {data: []}}) + .then(() => request.patch(base_url + "/monitors/MySQL-Monitor/relationships/servers", {json: old})) + .then(() => request.get(base_url + "/monitors/MySQL-Monitor", { json: true })) + .then((res) => { + res.data.relationships.servers.data.should.have.lengthOf(4) + }) + .then(() => request.get(base_url + "/monitors/" + monitor.data.id , { json: true })) + .then((res) => { + res.data.relationships.should.not.have.keys("servers") + }) + }); + it("destroy created monitor", function() { return request.delete(base_url + "/monitors/" + monitor.data.id) .should.be.fulfilled diff --git a/server/core/test/rest-api/test/server.js b/server/core/test/rest-api/test/server.js index 83fc20c11..0741e4790 100644 --- a/server/core/test/rest-api/test/server.js +++ b/server/core/test/rest-api/test/server.js @@ -69,6 +69,33 @@ describe("Server Relationships", function() { }) }); + it("add relationships with `relationships` endpoint", function() { + return request.patch(base_url + "/servers/" + rel_server.data.id + "/relationships/monitors", + { json: { data: [ { "id": "MySQL-Monitor", "type": "monitors" }]}}) + .then(() => request.get(base_url + "/servers/" + rel_server.data.id, {json: true})) + .then((res) => { + res.data.relationships.monitors.data.should.have.lengthOf(1) + .that.has.deep.include({ "id": "MySQL-Monitor", "type": "monitors" }) + }) + }); + + it("bad request body with `relationships` endpoint should be rejected", function() { + var body = {data: null} + return request.patch(base_url + "/servers/" + rel_server.data.id + "/relationships/monitors", { json: body }) + .should.be.rejected + }); + + it("remove relationships with `relationships` endpoint", function() { + var body = {data: []} + return request.patch(base_url + "/servers/" + rel_server.data.id + "/relationships/monitors", { json: body }) + .then(() => request.get(base_url + "/servers/" + rel_server.data.id, {json: true})) + .then((res) => { + // Only monitor relationship should be undefined + res.data.relationships.should.not.have.keys("monitors") + res.data.relationships.should.have.keys("services") + }) + }); + it("remove relationships", function() { rel_server.data.relationships["services"] = null rel_server.data.relationships["monitors"] = null diff --git a/server/core/test/rest-api/test/service.js b/server/core/test/rest-api/test/service.js index f74196d21..f0718cc8d 100644 --- a/server/core/test/rest-api/test/service.js +++ b/server/core/test/rest-api/test/service.js @@ -63,6 +63,33 @@ describe("Service", function() { }) }); + it("bad request body with `relationships` endpoint should be rejected", function() { + return request.patch(base_url + "/services/RW-Split-Router/relationships/servers", {json: {data: null}}) + .should.be.rejected + }) + + it("remove service relationship via `relationships` endpoint", function() { + return request.patch(base_url + "/services/RW-Split-Router/relationships/servers", { json: {data: []}}) + .then(() => request.get(base_url + "/services/RW-Split-Router", { json: true })) + .then((res) => { + res.data.relationships.should.not.have.keys("servers") + }) + }); + + it("add service relationship via `relationships` endpoint", function() { + return request.patch(base_url + "/services/RW-Split-Router/relationships/servers", + { json: { data: [ + {id: "server1", type: "servers"}, + {id: "server2", type: "servers"}, + {id: "server3", type: "servers"}, + {id: "server4", type: "servers"}, + ]}}) + .then(() => request.get(base_url + "/services/RW-Split-Router", { json: true})) + .then((res) => { + res.data.relationships.servers.data.should.have.lengthOf(4) + }) + }); + const listener = { "links": { "self": "http://localhost:8989/v1/services/RW-Split-Router/listeners" From 0bfe89b86ba1ca10508c6740de1d32b4ab5973d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Mon, 23 Oct 2017 13:28:19 +0300 Subject: [PATCH 088/101] Update REST API documentation Added relationships endpoint documentation. --- Documentation/REST-API/Resources-Monitor.md | 48 ++++++++++++++++++++ Documentation/REST-API/Resources-Server.md | 49 +++++++++++++++++++++ Documentation/REST-API/Resources-Service.md | 48 ++++++++++++++++++++ 3 files changed, 145 insertions(+) diff --git a/Documentation/REST-API/Resources-Monitor.md b/Documentation/REST-API/Resources-Monitor.md index 61a09f500..84495a0a2 100644 --- a/Documentation/REST-API/Resources-Monitor.md +++ b/Documentation/REST-API/Resources-Monitor.md @@ -298,6 +298,54 @@ Invalid request body: `Status: 403 Forbidden` +### Update monitor relationships + +``` +PATCH /v1/monitors/:name/relationships/servers +``` + +The _:name_ in the URI must map to a monitor name with all whitespace replaced +with hyphens. + +The request body must be a JSON object that defines only the _data_ field. The +value of the _data_ field must be an array of relationship objects that define +the _id_ and _type_ fields of the relationship. This object will replace the +existing relationships of the monitor. + +The following is an example request and request body that defines a single +server relationship for a monitor. + +``` +PATCH /v1/monitors/my-monitor/relationships/servers + +{ + data: [ + { "id": "my-server", "type": "servers" } + ] +} +``` + +All relationships for a monitor can be deleted by sending an empty array as the +_data_ field value. The following example removes all servers from a monitor. + +``` +PATCH /v1/monitors/my-monitor/relationships/servers + +{ + data: [] +} +``` + +#### Response + +Monitor relationships modified: + +`Status: 204 No Content` + +Invalid JSON body: + +`Status: 403 Forbidden` + ### Destroy a monitor Destroy a created monitor. The monitor must not have relationships to any diff --git a/Documentation/REST-API/Resources-Server.md b/Documentation/REST-API/Resources-Server.md index cd3c0316c..1409790f7 100644 --- a/Documentation/REST-API/Resources-Server.md +++ b/Documentation/REST-API/Resources-Server.md @@ -467,6 +467,55 @@ Invalid JSON body: `Status: 403 Forbidden` +### Update server relationships + +``` +PATCH /v1/servers/:name/relationships/:type +``` + +The _:name_ in the URI must map to a server name with all whitespace replaced +with hyphens. The _:type_ in the URI must be either _services_, for service +relationships, or _monitors_, for monitor relationships. + +The request body must be a JSON object that defines only the _data_ field. The +value of the _data_ field must be an array of relationship objects that define +the _id_ and _type_ fields of the relationship. This object will replace the +existing relationships of the particular type from the server. + +The following is an example request and request body that defines a single +service relationship for a server. + +``` +PATCH /v1/servers/my-db-server/relationships/services + +{ + data: [ + { "id": "my-rwsplit-service", "type": "services" } + ] +} +``` + +All relationships for a server can be deleted by sending an empty array as the +_data_ field value. The following example removes the server from all services. + +``` +PATCH /v1/servers/my-db-server/relationships/services + +{ + data: [] +} +``` + +#### Response + +Server relationships modified: + +`Status: 204 No Content` + +Invalid JSON body: + +`Status: 403 Forbidden` + ### Destroy a server ``` diff --git a/Documentation/REST-API/Resources-Service.md b/Documentation/REST-API/Resources-Service.md index 382b46c3c..bb59c05b1 100644 --- a/Documentation/REST-API/Resources-Service.md +++ b/Documentation/REST-API/Resources-Service.md @@ -420,6 +420,54 @@ Service is modified: `Status: 204 No Content` +### Update service relationships + +``` +PATCH /v1/services/:name/relationships/servers +``` + +The _:name_ in the URI must map to a service name with all whitespace replaced +with hyphens. + +The request body must be a JSON object that defines only the _data_ field. The +value of the _data_ field must be an array of relationship objects that define +the _id_ and _type_ fields of the relationship. This object will replace the +existing relationships of the service. + +The following is an example request and request body that defines a single +server relationship for a service. + +``` +PATCH /v1/services/my-rw-service/relationships/servers + +{ + data: [ + { "id": "my-server", "type": "servers" } + ] +} +``` + +All relationships for a service can be deleted by sending an empty array as the +_data_ field value. The following example removes all servers from a service. + +``` +PATCH /v1/services/my-rw-service/relationships/servers + +{ + data: [] +} +``` + +#### Response + +Service relationships modified: + +`Status: 204 No Content` + +Invalid JSON body: + +`Status: 403 Forbidden` + ### Stop a service Stops a started service. From d6812b91a0f5285e7046e3ca3a1cf07df007a70f Mon Sep 17 00:00:00 2001 From: MassimilianoPinto Date: Mon, 23 Oct 2017 16:02:57 +0200 Subject: [PATCH 089/101] MXS-1485: MariaDB 10 GTID is always on for slave connections MariaDB 10 GTID is always on for slave connections. Remove mariadb10_slave_gtid option --- Documentation/Routers/Binlogrouter.md | 54 +++++++++---------- ...eplication-Proxy-Binlog-Router-Tutorial.md | 22 ++++---- server/modules/routing/binlogrouter/blr.c | 9 +--- 3 files changed, 38 insertions(+), 47 deletions(-) diff --git a/Documentation/Routers/Binlogrouter.md b/Documentation/Routers/Binlogrouter.md index 87eee1c33..69a11b4c3 100644 --- a/Documentation/Routers/Binlogrouter.md +++ b/Documentation/Routers/Binlogrouter.md @@ -159,15 +159,39 @@ the router options. ### `mariadb10-compatibility` This parameter allows binlogrouter to replicate from a MariaDB 10.0 master -server. If `mariadb10_slave_gtid` is not enabled GTID will not be used in the -replication. This parameter is enabled by default since MaxScale 2.2.0. In -earlier versions the parameter was disabled by default. +server: this parameter is enabled by default since MaxScale 2.2.0. +In earlier versions the parameter was disabled by default. ``` # Example router_options=mariadb10-compatibility=1 ``` + +Additionally, since MaxScale 2.2.1, MariaDB 10.x slave servers +can connect to binlog server using GTID value instead of binlog name and position. + +Example of a MariaDB 10.x slave connection to MaxScale + +``` +MariaDB> SET @@global.gtid_slave_pos='0-10122-230'; +MariaDB> CHANGE MASTER TO + MASTER_HOST='192.168.10.8', + MASTER_PORT=5306, + MASTER_USE_GTID=Slave_pos; +MariaDB> START SLAVE; +``` + +**Note:** + +- Slave servers can connect either with _file_ and _pos_ or GTID. + +- MaxScale saves all the incoming MariaDB GTIDs (DDLs and DMLs) +in a sqlite3 database located in _binlogdir_ (`gtid_maps.db`). +When a slave server connects with a GTID request a lookup is made for +the value match and following binlog events will be sent. + + ### `transaction_safety` This parameter is used to enable/disable incomplete transactions detection in @@ -271,29 +295,6 @@ Example: 3;bbbbbbbbbaaaaaaabbbbbccccceeeddddd3333333ddddaaaaffffffeeeeecccd ``` - -### `mariadb10_slave_gtid` -If enabled this option allows MariaDB 10.x slave servers to connect to binlog -server using GTID value instead of binlog_file name and position. -MaxScale saves all the incoming MariaDB GTIDs (DDLs and DMLs) -in a sqlite3 database located in _binlogdir_ (`gtid_maps.db`). -When a slave server connects with a GTID request a lookup is made for -the value match and following binlog events will be sent. -Default option value is _off_. - -Example of a MariaDB 10.x slave connection to MaxScale - -``` -MariaDB> SET @@global.gtid_slave_pos='0-10122-230'; -MariaDB> CHANGE MASTER TO - MASTER_HOST='192.168.10.8', - MASTER_PORT=5306, - MASTER_USE_GTID=Slave_pos; -MariaDB> START SLAVE; -``` - -**Note:** Slave servers can connect either with _file_ and _pos_ or GTID. - ### `mariadb10_master_gtid` This option allows MaxScale binlog router to register with MariaDB 10.X master using GTID instead of _binlog_file_ name @@ -374,7 +375,6 @@ follows. encrypt_binlog=1, encryption_algorithm=aes_ctr, encryption_key_file=/var/binlogs/enc_key.txt, - mariadb10_slave_gtid=On, mariadb10_master_gtid=Off, slave_hostname=maxscale-blr-1, master_retry_count=1000, diff --git a/Documentation/Tutorials/Replication-Proxy-Binlog-Router-Tutorial.md b/Documentation/Tutorials/Replication-Proxy-Binlog-Router-Tutorial.md index 40f89cb30..a53a6a949 100644 --- a/Documentation/Tutorials/Replication-Proxy-Binlog-Router-Tutorial.md +++ b/Documentation/Tutorials/Replication-Proxy-Binlog-Router-Tutorial.md @@ -66,7 +66,6 @@ A **complete example** of a service entry for a binlog router service would be a encrypt_binlog=On, encryption_algorithm=aes_ctr, encryption_key_file=/var/binlogs/enc_key.txt, - mariadb10_slave_gtid=On, mariadb10_master_gtid=Off, slave_hostname=maxscale-blr-1, master_retry_count=1000, @@ -198,7 +197,7 @@ If a slave is connected to MaxScale with SSL, an entry will be present in the Sl Slave connected with SSL: Established ``` -If option `mariadb10_slave_gtid=On` last seen GTID is shown: +If option `mariadb10-compatibility=On` last seen GTID is shown: ``` Last seen MariaDB GTID: 0-10124-282 @@ -253,7 +252,7 @@ Master_SSL_Verify_Server_Cert: No Master_Info_File: /home/maxscale/binlog/first/binlogs/master.ini ``` -If the option `mariadb10_slave_gtid` is set to On, the last seen GTID is shown: +If the option `mariadb10-compatibility` is set to On, the last seen GTID is shown: ``` Using_Gtid: No @@ -277,11 +276,8 @@ slaves must not use *MASTER_AUTO_POSITION = 1* option. It also works with a MariaDB 10.X setup (master and slaves). -Starting from MaxScale 2.2 the slave connections may include **GTID** feature -`MASTER_USE_GTID=Slave_pos` if option *mariadb10_slave_gtid* has been set. - -The default is that a slave connection must not include any GTID -feature: `MASTER_USE_GTID=no` +Starting from MaxScale 2.2.1 the slave connections might optionally include +**GTID** feature `MASTER_USE_GTID=Slave_pos`: only option *mariadb10-compatibility* is required. Starting from MaxScale 2.2 it's also possible to register to MariaDB 10.X master using **GTID** using the new option *mariadb10_master_gtid*. @@ -545,8 +541,8 @@ be issued for the new configuration. ### Removing binary logs from binlogdir -Since version 2.2, if `mariadb10_slave_gtid` or `mariadb10_master_gtid` -are set to On, it's possible to remove the binlog files from _binlogdir_ +Since version 2.2.1, if `mariadb10-compatibility`is set to On, +it's possible to remove the binlog files from _binlogdir_ and delete related entries in GTID repository using the admin command `PURGE BINARY LOGS TO 'file'` @@ -653,8 +649,8 @@ Example: ``` ##### MariaDB 10 GTID -If connecting slaves are MariaDB 10.x it's also possible to connect with GTID, -*mariadb10_slave_gtid=On* has to be set in configuration before starting MaxScale. +Since MaxScale 2.2.1 the MariaDB 10.x connecting slaves can optionally connect with GTID, +*mariadb10-compatibility=On* has to be set in configuration before starting MaxScale. ``` SET @@global.gtid_slave_pos=''; @@ -688,7 +684,7 @@ MariaDB> CHANGE MASTER TO MariaDB> START SLAVE; ``` -Additionally, if *mariadb10_slave_gtid=On*, it's also possible to retrieve the list of binlog files downloaded from the master with the new admin command _SHOW BINARY LOGS_: +Additionally it's also possible to retrieve the list of binlog files downloaded from the master with the new admin command _SHOW BINARY LOGS_: ``` MariaDB> SHOW BINARY LOGS; diff --git a/server/modules/routing/binlogrouter/blr.c b/server/modules/routing/binlogrouter/blr.c index e102ed24a..e7c485b67 100644 --- a/server/modules/routing/binlogrouter/blr.c +++ b/server/modules/routing/binlogrouter/blr.c @@ -190,7 +190,6 @@ MXS_MODULE* MXS_CREATE_MODULE() MXS_MODULE_OPT_NONE, enc_algo_values }, {"encryption_key_file", MXS_MODULE_PARAM_PATH, NULL, MXS_MODULE_OPT_PATH_R_OK}, - {"mariadb10_slave_gtid", MXS_MODULE_PARAM_BOOL, "false"}, {"mariadb10_master_gtid", MXS_MODULE_PARAM_BOOL, "false"}, { "binlog_structure", MXS_MODULE_PARAM_ENUM, "flat", @@ -359,8 +358,8 @@ createInstance(SERVICE *service, char **options) inst->request_semi_sync = config_get_bool(params, "semisync"); inst->master_semi_sync = 0; - /* Enable MariaDB GTID tracking for slaves */ - inst->mariadb10_gtid = config_get_bool(params, "mariadb10_slave_gtid"); + /* Enable MariaDB GTID tracking for slaves if MariaDB 10 compat is set */ + inst->mariadb10_gtid = inst->mariadb10_compat; /* Enable MariaDB GTID registration to master */ inst->mariadb10_master_gtid = config_get_bool(params, "mariadb10_master_gtid"); @@ -539,10 +538,6 @@ createInstance(SERVICE *service, char **options) { inst->encryption.enabled = config_truth_value(value); } - else if (strcmp(options[i], "mariadb10_slave_gtid") == 0) - { - inst->mariadb10_gtid = config_truth_value(value); - } else if (strcmp(options[i], "mariadb10_master_gtid") == 0) { inst->mariadb10_master_gtid = config_truth_value(value); From 9d35c705f366c52335ad1651772bd2eea0c5970b Mon Sep 17 00:00:00 2001 From: Johan Wikman Date: Tue, 24 Oct 2017 10:31:51 +0300 Subject: [PATCH 090/101] Update 2.1.10 release notes --- Documentation/Release-Notes/MaxScale-2.1.10-Release-Notes.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Documentation/Release-Notes/MaxScale-2.1.10-Release-Notes.md b/Documentation/Release-Notes/MaxScale-2.1.10-Release-Notes.md index 294c92397..b2b2bd86b 100644 --- a/Documentation/Release-Notes/MaxScale-2.1.10-Release-Notes.md +++ b/Documentation/Release-Notes/MaxScale-2.1.10-Release-Notes.md @@ -40,10 +40,14 @@ To enable this functionality, add `query_retries=` under the [Here is a list of bugs fixed in MaxScale 2.1.10.](https://jira.mariadb.org/issues/?jql=project%20%3D%20MXS%20AND%20issuetype%20%3D%20Bug%20AND%20status%20%3D%20Closed%20AND%20fixVersion%20%3D%202.1.10) +* [MXS-1468](https://jira.mariadb.org/browse/MXS-1468) Using dynamic commands to create readwritesplit configs fail after restart +* [MXS-1459](https://jira.mariadb.org/browse/MXS-1459) Binlog checksum default value is wrong if a slave connects with checksum = NONE before master registration or master is not accessible at startup +* [MXS-1457](https://jira.mariadb.org/browse/MXS-1457) Deleted servers are not ignored when users are loaded * [MXS-1456](https://jira.mariadb.org/browse/MXS-1456) OOM when script variable is empty * [MXS-1451](https://jira.mariadb.org/browse/MXS-1451) Password is not stored with skip_authentication=true * [MXS-1450](https://jira.mariadb.org/browse/MXS-1450) Maxadmin commands with a leading space are silently ignored * [MXS-1449](https://jira.mariadb.org/browse/MXS-1449) Database change not allowed +* [MXS-1163](https://jira.mariadb.org/browse/MXS-1163) Log flood using binlog server on Ubuntu Yakkety Yak ## Packaging From 00b78e1f0f1873afd13f688e54a6b088c1933fbb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Mon, 23 Oct 2017 19:35:03 +0300 Subject: [PATCH 091/101] Update 2.2.1 release notes Added note about new REST API endpoints. --- .../Release-Notes/MaxScale-2.2.1-Release-Notes.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/Documentation/Release-Notes/MaxScale-2.2.1-Release-Notes.md b/Documentation/Release-Notes/MaxScale-2.2.1-Release-Notes.md index 7704331dc..61a903cd3 100644 --- a/Documentation/Release-Notes/MaxScale-2.2.1-Release-Notes.md +++ b/Documentation/Release-Notes/MaxScale-2.2.1-Release-Notes.md @@ -14,6 +14,16 @@ report at [Jira](https://jira.mariadb.org). ## New Features +### REST API Relationship Endpoints + +The _servers_, _monitors_ and _services_ types now support direct updating of +relationships via the `relationships` endpoints. This conforms to the JSON API +specification on updating resource relationships. + +For more information, refer to the REST API documentation. An example of this +can be found in the +[Server Resource documentation](../REST-API/Resources-Server.md#update-server-relationships). + ### PL/SQL Comaptibility The parser of MaxScale has been extended to support the PL/SQL compatibility From 7df5b3d24a11ac6ac950a693329b1e7bad5e004d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Mon, 23 Oct 2017 19:42:07 +0300 Subject: [PATCH 092/101] Install git in test_maxctrl Git was not installed in the test. This should not be done in tests but it is an acceptable short-term workaround. Also run the script as root. --- maxscale-system-test/test_maxctrl.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/maxscale-system-test/test_maxctrl.cpp b/maxscale-system-test/test_maxctrl.cpp index f92129f70..0227d383c 100644 --- a/maxscale-system-test/test_maxctrl.cpp +++ b/maxscale-system-test/test_maxctrl.cpp @@ -23,11 +23,11 @@ int main(int argc, char *argv[]) // TODO: Don't handle test dependencies in tests test.tprintf("Installing NPM"); - test.ssh_maxscale(true,"yum -y install epel-release;yum -y install npm;"); + test.ssh_maxscale(true,"yum -y install epel-release;yum -y install npm git;"); test.tprintf("Starting test"); test.verbose = true; - int rv = test.ssh_maxscale(false, "export maxscale2_API=%s:8989; ./test_maxctrl.sh", test.galera->IP[3]); + int rv = test.ssh_maxscale(true, "export maxscale2_API=%s:8989; ./test_maxctrl.sh", test.galera->IP[3]); test.verbose = false; test.tprintf("Removing NPM"); From 555aa6d2c8fdc0db058dadddf1a4cb6520044b23 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Tue, 24 Oct 2017 08:43:38 +0300 Subject: [PATCH 093/101] Fix test crash in sync_slaves The crash happens if the slave is not configured for replication or the connection is broken when results are read. Adding missing return value checks will fix it. --- maxscale-system-test/mariadb_nodes.cpp | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/maxscale-system-test/mariadb_nodes.cpp b/maxscale-system-test/mariadb_nodes.cpp index 4b8dc3403..a8a530c4c 100644 --- a/maxscale-system-test/mariadb_nodes.cpp +++ b/maxscale-system-test/mariadb_nodes.cpp @@ -1270,6 +1270,7 @@ static void wait_until_pos(MYSQL *mysql, int filenum, int pos) { int slave_filenum = 0; int slave_pos = 0; + bool error = false; do { @@ -1284,17 +1285,23 @@ static void wait_until_pos(MYSQL *mysql, int filenum, int pos) if (res) { MYSQL_ROW row = mysql_fetch_row(res); + error = true; - if (row && row[6] && row[21]) + if (row && row[5] && row[21]) { - char *file_suffix = strchr(row[5], '.') + 1; - slave_filenum = atoi(file_suffix); - slave_pos = atoi(row[21]); + char *file_suffix = strchr(row[5], '.'); + if (file_suffix) + { + file_suffix++; + slave_filenum = atoi(file_suffix); + slave_pos = atoi(row[21]); + error = false; + } } mysql_free_result(res); } } - while (slave_filenum < filenum || slave_pos < pos); + while ((slave_filenum < filenum || slave_pos < pos) && !error); } void Mariadb_nodes::sync_slaves(int node) From efeaecaef2e1d6c54ffcfb2493535b85da635779 Mon Sep 17 00:00:00 2001 From: Johan Wikman Date: Tue, 24 Oct 2017 09:41:23 +0300 Subject: [PATCH 094/101] MXS-1486 When there is fresh data, update the cache entry If something is SELECTed that should be cached for some, but not for the current user, the cached entry it nevertheless updated. That way the cached data will always be the last fetched value and it is also possible to use this behaviour for explicitly updating the cache entry. --- .../filter/cache/cachefiltersession.cc | 135 +++++++++--------- .../filter/cache/cachefiltersession.hh | 2 - 2 files changed, 66 insertions(+), 71 deletions(-) diff --git a/server/modules/filter/cache/cachefiltersession.cc b/server/modules/filter/cache/cachefiltersession.cc index 1ec9a7deb..fe345396c 100644 --- a/server/modules/filter/cache/cachefiltersession.cc +++ b/server/modules/filter/cache/cachefiltersession.cc @@ -291,73 +291,95 @@ int CacheFilterSession::routeQuery(GWBUF* pPacket) { if (m_pCache->should_store(m_zDefaultDb, pPacket)) { - if (m_pCache->should_use(m_pSession)) + cache_result_t result = m_pCache->get_key(m_zDefaultDb, pPacket, &m_key); + + if (CACHE_RESULT_IS_OK(result)) { - GWBUF* pResponse; - cache_result_t result = get_cached_response(pPacket, &pResponse); - - if (CACHE_RESULT_IS_OK(result)) + if (m_pCache->should_use(m_pSession)) { - if (CACHE_RESULT_IS_STALE(result)) + uint32_t flags = CACHE_FLAGS_INCLUDE_STALE; + GWBUF* pResponse; + result = m_pCache->get_value(m_key, flags, &pResponse); + + if (CACHE_RESULT_IS_OK(result)) { - // The value was found, but it was stale. Now we need to - // figure out whether somebody else is already fetching it. - - if (m_pCache->must_refresh(m_key, this)) + if (CACHE_RESULT_IS_STALE(result)) { - // We were the first ones who hit the stale item. It's - // our responsibility now to fetch it. - if (log_decisions()) + // The value was found, but it was stale. Now we need to + // figure out whether somebody else is already fetching it. + + if (m_pCache->must_refresh(m_key, this)) { - MXS_NOTICE("Cache data is stale, fetching fresh from server."); + // We were the first ones who hit the stale item. It's + // our responsibility now to fetch it. + if (log_decisions()) + { + MXS_NOTICE("Cache data is stale, fetching fresh from server."); + } + + // As we don't use the response it must be freed. + gwbuf_free(pResponse); + + m_refreshing = true; + fetch_from_server = true; + } + else + { + // Somebody is already fetching the new value. So, let's + // use the stale value. No point in hitting the server twice. + if (log_decisions()) + { + MXS_NOTICE("Cache data is stale but returning it, fresh " + "data is being fetched already."); + } + fetch_from_server = false; } - - // As we don't use the response it must be freed. - gwbuf_free(pResponse); - - m_refreshing = true; - fetch_from_server = true; } else { - // Somebody is already fetching the new value. So, let's - // use the stale value. No point in hitting the server twice. if (log_decisions()) { - MXS_NOTICE("Cache data is stale but returning it, fresh " - "data is being fetched already."); + MXS_NOTICE("Using fresh data from cache."); } fetch_from_server = false; } } else { - if (log_decisions()) - { - MXS_NOTICE("Using fresh data from cache."); - } - fetch_from_server = false; + fetch_from_server = true; + } + + if (fetch_from_server) + { + m_state = CACHE_EXPECTING_RESPONSE; + } + else + { + m_state = CACHE_EXPECTING_NOTHING; + gwbuf_free(pPacket); + DCB *dcb = m_pSession->client_dcb; + + // TODO: This is not ok. Any filters before this filter, will not + // TODO: see this data. + rv = dcb->func.write(dcb, pResponse); } } else { - fetch_from_server = true; - } - - if (fetch_from_server) - { + // We will not use any value in the cache, but we will update + // the existing value. + if (log_decisions()) + { + MXS_NOTICE("Unconditionally fetching data from the server, " + "refreshing cache entry."); + } m_state = CACHE_EXPECTING_RESPONSE; } - else - { - m_state = CACHE_EXPECTING_NOTHING; - gwbuf_free(pPacket); - DCB *dcb = m_pSession->client_dcb; - - // TODO: This is not ok. Any filters before this filter, will not - // TODO: see this data. - rv = dcb->func.write(dcb, pResponse); - } + } + else + { + MXS_ERROR("Could not create cache key."); + m_state = CACHE_IGNORING_RESPONSE; } } else @@ -775,31 +797,6 @@ void CacheFilterSession::reset_response_state() m_res.offset = 0; } -/** - * Route a query via the cache. - * - * @param key A SELECT packet. - * @param value The result. - * @return True if the query was satisfied from the query. - */ -cache_result_t CacheFilterSession::get_cached_response(const GWBUF *pQuery, GWBUF **ppResponse) -{ - cache_result_t result = m_pCache->get_key(m_zDefaultDb, pQuery, &m_key); - - if (CACHE_RESULT_IS_OK(result)) - { - uint32_t flags = CACHE_FLAGS_INCLUDE_STALE; - - result = m_pCache->get_value(m_key, flags, ppResponse); - } - else - { - MXS_ERROR("Could not create cache key."); - } - - return result; -} - /** * Store the data. * diff --git a/server/modules/filter/cache/cachefiltersession.hh b/server/modules/filter/cache/cachefiltersession.hh index d1e51985b..e19d309be 100644 --- a/server/modules/filter/cache/cachefiltersession.hh +++ b/server/modules/filter/cache/cachefiltersession.hh @@ -102,8 +102,6 @@ private: void reset_response_state(); - cache_result_t get_cached_response(const GWBUF *pQuery, GWBUF **ppResponse); - bool log_decisions() const { return m_pCache->config().debug & CACHE_DEBUG_DECISIONS ? true : false; From 94c066ff65768ba48619ad00959656f825231652 Mon Sep 17 00:00:00 2001 From: MassimilianoPinto Date: Tue, 24 Oct 2017 10:05:27 +0200 Subject: [PATCH 095/101] Update 2.2.1 release notes Update 2.2.1 release notes, added changed features for Binlog server --- Documentation/Release-Notes/MaxScale-2.2.1-Release-Notes.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Documentation/Release-Notes/MaxScale-2.2.1-Release-Notes.md b/Documentation/Release-Notes/MaxScale-2.2.1-Release-Notes.md index 61a903cd3..7745e2d1f 100644 --- a/Documentation/Release-Notes/MaxScale-2.2.1-Release-Notes.md +++ b/Documentation/Release-Notes/MaxScale-2.2.1-Release-Notes.md @@ -9,6 +9,12 @@ For any problems you encounter, please consider submitting a bug report at [Jira](https://jira.mariadb.org). ## Changed Features +### Binlog server + +- MariaDB 10 GTID is always enabled for slave connections. +- Automatically set binlog storage to 'tree' mode when +_mariadb10_master_gtid_ option is on. + ## Dropped Features From de800766efd6658135e7c23f42342718ddabc691 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Tue, 24 Oct 2017 23:24:39 +0300 Subject: [PATCH 096/101] Build the replicaton-manager test The test wasn't built as it is not a part of the test suite. The executable should be built but it should not be added to the test suite. Changed the management script to only add the configuration and added a call to it at the start of the test. --- maxscale-system-test/CMakeLists.txt | 2 +- maxscale-system-test/manage_mrm.sh | 3 --- maxscale-system-test/replication_manager.cpp | 20 +++++++++----------- 3 files changed, 10 insertions(+), 15 deletions(-) diff --git a/maxscale-system-test/CMakeLists.txt b/maxscale-system-test/CMakeLists.txt index ef082b9e5..d0cb0d0cf 100644 --- a/maxscale-system-test/CMakeLists.txt +++ b/maxscale-system-test/CMakeLists.txt @@ -563,7 +563,7 @@ add_test_executable(rwsplit_multi_stmt.cpp rwsplit_multi_stmt rwsplit_multi_stmt add_test_executable(rwsplit_read_only_trx.cpp rwsplit_read_only_trx rwsplit_read_only_trx LABELS readwritesplit REPL_BACKEND) # Test replication-manager with MaxScale -#add_test_executable(replication_manager.cpp replication_manager replication_manager LABELS maxscale REPL_BACKEND) +add_test_executable_notest(replication_manager.cpp replication_manager replication_manager LABELS maxscale REPL_BACKEND) #add_test_executable_notest(replication_manager_2nodes.cpp replication_manager_2nodes replication_manager_2nodes LABELS maxscale REPL_BACKEND) #add_test_executable_notest(replication_manager_3nodes.cpp replication_manager_3nodes replication_manager_3nodes LABELS maxscale REPL_BACKEND) diff --git a/maxscale-system-test/manage_mrm.sh b/maxscale-system-test/manage_mrm.sh index 72f1bd3db..e68fe7a79 100755 --- a/maxscale-system-test/manage_mrm.sh +++ b/maxscale-system-test/manage_mrm.sh @@ -117,9 +117,6 @@ EOF do_ssh < manage_mrm.log"); - // if (!WIFEXITED(rc) || WEXITSTATUS(rc) != 0) - // { - // test.tprintf("Failed to install replication-manager, see manage_mrm.log for more details"); - // return -1; - // } + test.tprintf("Installing replication-manager"); + int rc = system("new_replication_manager=yes ./manage_mrm.sh install > manage_mrm.log"); + if (!WIFEXITED(rc) || WEXITSTATUS(rc) != 0) + { + test.tprintf("Failed to install replication-manager, see manage_mrm.log for more details"); + return -1; + } - // // Wait a few seconds - // sleep(5); + // Wait a few seconds + sleep(5); test.tprintf("Creating table and inserting data"); get_input(); From f805716700800066d21542bbcaf5dcc7f9e53edc Mon Sep 17 00:00:00 2001 From: MassimilianoPinto Date: Thu, 26 Oct 2017 11:32:06 +0200 Subject: [PATCH 097/101] MXS-1497: Don't skip events with LOG_EVENT_IGNORABLE_F flag Currently binlog server doesn't send to slaves these event types: - MARIADB10_START_ENCRYPTION_EVENT - IGNORABLE_EVENT It also skips events with LOG_EVENT_IGNORABLE_F flag. This modification allows sending events with that flag. --- server/modules/routing/binlogrouter/blr_slave.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/server/modules/routing/binlogrouter/blr_slave.c b/server/modules/routing/binlogrouter/blr_slave.c index f1f6eefe5..1eb6571bc 100644 --- a/server/modules/routing/binlogrouter/blr_slave.c +++ b/server/modules/routing/binlogrouter/blr_slave.c @@ -62,8 +62,7 @@ * 11/07/2016 Massimiliano Pinto Added SSL backend support * 24/08/2016 Massimiliano Pinto Added slave notification via CS_WAIT_DATA * 16/09/2016 Massimiliano Pinto Special events created by MaxScale are not sent to slaves: - * MARIADB10_START_ENCRYPTION_EVENT or IGNORABLE_EVENT - * Events with LOG_EVENT_IGNORABLE_F are skipped as well. + * MARIADB10_START_ENCRYPTION_EVENT or IGNORABLE_EVENT. * * @endverbatim */ @@ -2384,8 +2383,7 @@ blr_slave_catchup(ROUTER_INSTANCE *router, ROUTER_SLAVE *slave, bool large) /* Don't sent special events generated by MaxScale */ if (hdr.event_type == MARIADB10_START_ENCRYPTION_EVENT || - hdr.event_type == IGNORABLE_EVENT || - (hdr.flags & LOG_EVENT_IGNORABLE_F)) + hdr.event_type == IGNORABLE_EVENT) { /* In case of file rotation or pos = 4 the events are sent from position 4. * new FDE at pos 4 is read. From 96c3f0dda3b5a9640c4995f46ac8efec77686269 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Thu, 26 Oct 2017 21:54:47 +0300 Subject: [PATCH 098/101] Build explicit version of Jansson in build scripts As the Avro C API depends on the Jansson library, the build scripts must build it. This is not optimal as the Jansson version needs to be updated in two places. --- BUILD/install_build_deps.sh | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/BUILD/install_build_deps.sh b/BUILD/install_build_deps.sh index e8714c64a..9fb10ecfd 100755 --- a/BUILD/install_build_deps.sh +++ b/BUILD/install_build_deps.sh @@ -109,6 +109,24 @@ cd tcl8.6.5/unix sudo make install cd ../../.. + +# Jansson +git clone https://github.com/akheron/jansson.git +if [ $? != 0 ] +then + echo "Error cloning jansson" + exit 1 +fi + +cd jansson +git checkout v2.9 +mkdir build +cd build +cmake .. -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_C_FLAGS=-fPIC -DJANSSON_INSTALL_LIB_DIR=$install_libdir +make +sudo make install +cd ../../ + # Avro C API wget -r -l1 -nH --cut-dirs=2 --no-parent -A.tar.gz --no-directories http://mirror.netinch.com/pub/apache/avro/stable/c if [ $? != 0 ] From d9bd977c35ce28e58f00f7dde34950a441239022 Mon Sep 17 00:00:00 2001 From: MassimilianoPinto Date: Fri, 27 Oct 2017 14:07:53 +0200 Subject: [PATCH 099/101] MXS-1499: Add missing fields to SHOW ALL SLAVES STATUS Now SHOW ALL SLAVES STATUS reports new fields: Retried_transactions; Max_relay_log_size, Executed_log_entries, Slave_received_heartbeats, Slave_heartbeat_period, Gtid_Slave_Pos" --- .../modules/routing/binlogrouter/blr_slave.c | 87 ++++++++++++++++--- 1 file changed, 77 insertions(+), 10 deletions(-) diff --git a/server/modules/routing/binlogrouter/blr_slave.c b/server/modules/routing/binlogrouter/blr_slave.c index fd5f4ce48..95fa3e5fa 100644 --- a/server/modules/routing/binlogrouter/blr_slave.c +++ b/server/modules/routing/binlogrouter/blr_slave.c @@ -89,6 +89,7 @@ #include #include #include +#include /** * This struct is used by sqlite3_exec callback routine @@ -1168,6 +1169,20 @@ static const char *mariadb10_gtid_status_columns[] = NULL }; +/* + * Extra Columns to send in "SHOW ALL SLAVES STATUS" MariaDB 10 command + */ +static const char *mariadb10_extra_status_columns[] = +{ + "Retried_transactions", + "Max_relay_log_size", + "Executed_log_entries", + "Slave_received_heartbeats", + "Slave_heartbeat_period", + "Gtid_Slave_Pos", + NULL +}; + /** * Send the response to the SQL command "SHOW SLAVE STATUS" or * SHOW ALL SLAVES STATUS @@ -1192,19 +1207,13 @@ blr_slave_send_slave_status(ROUTER_INSTANCE *router, int gtid_cols = 0; /* Count SHOW SLAVE STATUS the columns */ - while (slave_status_columns[ncols]) - { - ncols++; - } + ncols += MXS_ARRAY_NELEMS(slave_status_columns) - 1; /* Add the new SHOW ALL SLAVES STATUS columns */ if (all_slaves) { - int k = 0; - while (all_slaves_status_columns[k++]) - { - ncols++; - } + ncols += MXS_ARRAY_NELEMS(all_slaves_status_columns) - 1; + ncols += MXS_ARRAY_NELEMS(mariadb10_extra_status_columns) - 1; } /* Get the right GTID columns array */ @@ -1257,6 +1266,20 @@ blr_slave_send_slave_status(ROUTER_INSTANCE *router, seqno++); } + /* Send extra columns for SHOW ALL SLAVES STATUS */ + if (all_slaves) + { + for (i = 0; mariadb10_extra_status_columns[i]; i++) + { + blr_slave_send_columndef(router, + slave, + mariadb10_extra_status_columns[i], + BLR_TYPE_STRING, + 40, + seqno++); + } + } + /* Send EOF for columns def */ blr_slave_send_eof(router, slave, seqno++); @@ -1649,6 +1672,50 @@ blr_slave_send_slave_status(ROUTER_INSTANCE *router, ptr += col_len; } + if (all_slaves) + { + // Retried_transactions + sprintf(column, "%d", 0); + col_len = strlen(column); + *ptr++ = col_len; // Length of result string + memcpy((char *)ptr, column, col_len); // Result string + ptr += col_len; + + *ptr++ = 0; // Max_relay_log_size + *ptr++ = 0; // Executed_log_entries + + // Slave_received_heartbeats + sprintf(column, "%d", router->stats.n_heartbeats); + col_len = strlen(column); + *ptr++ = col_len; // Length of result string + memcpy((char *)ptr, column, col_len); // Result string + ptr += col_len; + + // Slave_heartbeat_period + sprintf(column, "%lu", router->heartbeat); + col_len = strlen(column); + *ptr++ = col_len; // Length of result string + memcpy((char *)ptr, column, col_len); // Result string + ptr += col_len; + + //Gtid_Slave_Pos + if (!router->mariadb10_gtid) + { + // No GTID support send empty values + *ptr++ = 0; + } + else + { + sprintf(column, + "%s", + router->last_mariadb_gtid); + col_len = strlen(column); + *ptr++ = col_len; // Length of result string + memcpy(ptr, column, col_len); // Result string + ptr += col_len; + } + } + *ptr++ = 0; actual_len = ptr - (uint8_t *)GWBUF_DATA(pkt); @@ -7462,7 +7529,7 @@ static bool blr_handle_set_stmt(ROUTER_INSTANCE *router, return true; } } - else if (strstr(word, "@slave_connect_state") != NULL) + else if (strcasestr(word, "@slave_connect_state") != NULL) { /* If not mariadb an error message will be returned */ if (slave->mariadb10_compat && From 465c015005d472fc755e6e854aafa81b36d9f09d Mon Sep 17 00:00:00 2001 From: Johan Wikman Date: Mon, 30 Oct 2017 09:46:40 +0200 Subject: [PATCH 100/101] Update release date in release notes --- Documentation/Release-Notes/MaxScale-2.1.10-Release-Notes.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/Release-Notes/MaxScale-2.1.10-Release-Notes.md b/Documentation/Release-Notes/MaxScale-2.1.10-Release-Notes.md index b2b2bd86b..65dc05134 100644 --- a/Documentation/Release-Notes/MaxScale-2.1.10-Release-Notes.md +++ b/Documentation/Release-Notes/MaxScale-2.1.10-Release-Notes.md @@ -1,4 +1,4 @@ -# MariaDB MaxScale 2.1.10 Release Notes +# MariaDB MaxScale 2.1.10 Release Notes -- 2017-10-30 Release 2.1.10 is a GA release. From 63cbf56cb2b80642ac7b2a6b4614c066beba131d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Sat, 28 Oct 2017 16:03:23 +0300 Subject: [PATCH 101/101] MXS-1500: Fix `real_type` values The characters in the type weren't checked for correctness which caused the processing to read more characters than was intended. --- server/modules/routing/avrorouter/avro_schema.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/modules/routing/avrorouter/avro_schema.c b/server/modules/routing/avrorouter/avro_schema.c index a8698f1d9..eabcac725 100644 --- a/server/modules/routing/avrorouter/avro_schema.c +++ b/server/modules/routing/avrorouter/avro_schema.c @@ -569,7 +569,7 @@ int extract_type_length(const char* ptr, char *dest) /** Skip characters until we either hit a whitespace character or the start * of the length definition. */ - while (*ptr && !isspace(*ptr) && *ptr != '(') + while (*ptr && isalpha(*ptr)) { ptr++; }