From 9b1d1303ce223428dc875781e16da46f6f839785 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Sun, 15 Jul 2018 21:17:39 +0300 Subject: [PATCH 01/28] MXS-1977: Archive session command on completion The legacy session command implementation is still partially used and a cleanup call was missing. This should be removed in the next major release. --- server/modules/protocol/MySQL/mariadbbackend/mysql_backend.c | 1 + 1 file changed, 1 insertion(+) diff --git a/server/modules/protocol/MySQL/mariadbbackend/mysql_backend.c b/server/modules/protocol/MySQL/mariadbbackend/mysql_backend.c index 350c54d32..65b0f8371 100644 --- a/server/modules/protocol/MySQL/mariadbbackend/mysql_backend.c +++ b/server/modules/protocol/MySQL/mariadbbackend/mysql_backend.c @@ -965,6 +965,7 @@ gw_read_and_write(DCB *dcb) stmt = read_buffer; read_buffer = NULL; gwbuf_set_type(stmt, GWBUF_TYPE_RESPONSE_END | GWBUF_TYPE_SESCMD_RESPONSE); + protocol_archive_srv_command(proto); } else { From 4fb4ed416b6787cf3293bb2191d9a933f34dde3e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Mon, 16 Jul 2018 05:53:44 +0300 Subject: [PATCH 02/28] MXS-1977: Fix protocol and readwritesplit memory leaks The protocol could leak memory in rare cases where several commands were queued at the same time. Readwritesplit also didn't free the memory it acquired via qc_get_table_names. --- server/modules/protocol/MySQL/mysql_common.cc | 3 +- .../readwritesplit/rwsplit_tmp_table_multi.cc | 36 +++++++++++-------- 2 files changed, 24 insertions(+), 15 deletions(-) diff --git a/server/modules/protocol/MySQL/mysql_common.cc b/server/modules/protocol/MySQL/mysql_common.cc index d1b396504..282e4cb38 100644 --- a/server/modules/protocol/MySQL/mysql_common.cc +++ b/server/modules/protocol/MySQL/mysql_common.cc @@ -637,8 +637,9 @@ void protocol_remove_srv_command(MySQLProtocol* p) } else { - p->protocol_command = *(s->scom_next); + server_command_t tmp = *(s->scom_next); MXS_FREE(s->scom_next); + p->protocol_command = tmp; } } diff --git a/server/modules/routing/readwritesplit/rwsplit_tmp_table_multi.cc b/server/modules/routing/readwritesplit/rwsplit_tmp_table_multi.cc index 43e62701c..a9e134eeb 100644 --- a/server/modules/routing/readwritesplit/rwsplit_tmp_table_multi.cc +++ b/server/modules/routing/readwritesplit/rwsplit_tmp_table_multi.cc @@ -51,24 +51,32 @@ static bool foreach_table(RWSplitSession* rses, GWBUF* querybuf, bool (*func)(RW int n_tables; char** tables = qc_get_table_names(querybuf, &n_tables, true); - for (int i = 0; i < n_tables; i++) + if (tables) { - const char* db = mxs_mysql_get_current_db(rses->client_dcb->session); - std::string table; - - if (strchr(tables[i], '.') == NULL) + for (int i = 0; i < n_tables; i++) { - table += db; - table += "."; + if (rval) + { + const char* db = mxs_mysql_get_current_db(rses->client_dcb->session); + std::string table; + + if (strchr(tables[i], '.') == NULL) + { + table += db; + table += "."; + } + + table += tables[i]; + + if (!func(rses, table)) + { + rval = false; + } + } + MXS_FREE(tables[i]); } - table += tables[i]; - - if (!func(rses, table)) - { - rval = false; - break; - } + MXS_FREE(tables); } return rval; From f77bf24df91769a4d1355087bdbfc9352d032dc4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Mon, 16 Jul 2018 09:11:21 +0300 Subject: [PATCH 03/28] Check remaining query length in SetSqlModeParser SetSqlModeParser would read uninitialized memory if a query consisting of only comments would be executed. --- .../modules/protocol/MySQL/mariadbclient/setsqlmodeparser.hh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/server/modules/protocol/MySQL/mariadbclient/setsqlmodeparser.hh b/server/modules/protocol/MySQL/mariadbclient/setsqlmodeparser.hh index f0400584d..4ae428e17 100644 --- a/server/modules/protocol/MySQL/mariadbclient/setsqlmodeparser.hh +++ b/server/modules/protocol/MySQL/mariadbclient/setsqlmodeparser.hh @@ -156,7 +156,10 @@ public: bypass_whitespace(); - if (is_set(m_pI)) + // Check that there's enough characters to contain a SET keyword + bool long_enough = m_pEnd - m_pI > 3 ; + + if (long_enough && is_set(m_pI)) { rv = parse(pSql_mode); } From 25e2e1b7c6d71dfd47992811c8537ce38db8384f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Tue, 17 Jul 2018 12:32:59 +0300 Subject: [PATCH 04/28] MXS-1976: Reword explanation Made the wording clearer. --- Documentation/Reference/MaxAdmin.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/Documentation/Reference/MaxAdmin.md b/Documentation/Reference/MaxAdmin.md index 2e7d122dc..5242f53d2 100644 --- a/Documentation/Reference/MaxAdmin.md +++ b/Documentation/Reference/MaxAdmin.md @@ -692,9 +692,10 @@ _shutdown service_ command. This will not affect the connections that are already in place for a service, but will stop any new connections from being accepted. -Stopping a service will not cause new connections to be rejected. All new -connections that were creted while the service was stopped will be processed -normally once the service is restared. +Connection requests are not processed while a service is stopped. New connection +requests will remain in a queue that is processed once the service is +restarted. A client application will see old connections work normally but new +connections are unresponsive as long as the service is stopped. ``` MaxScale> shutdown service RWSplit From 8d9ccce2dd34d226fbc71e8c4a8052cce6ad57d4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Tue, 17 Jul 2018 13:05:52 +0300 Subject: [PATCH 05/28] Fix in-source build of MaxCtrl The npm install command must generate the version.js file in order for the in-source builds to work. --- maxctrl/CMakeLists.txt | 2 +- maxctrl/configure_version.cmake | 2 ++ maxctrl/package.json | 3 ++- 3 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 maxctrl/configure_version.cmake diff --git a/maxctrl/CMakeLists.txt b/maxctrl/CMakeLists.txt index 67fe828e0..4ab468542 100644 --- a/maxctrl/CMakeLists.txt +++ b/maxctrl/CMakeLists.txt @@ -4,7 +4,7 @@ if (BUILD_MAXCTRL) if (NPM_FOUND AND NODEJS_FOUND AND NODEJS_VERSION VERSION_GREATER "6.0.0") - configure_file(${CMAKE_CURRENT_SOURCE_DIR}/lib/version.js.in ${CMAKE_CURRENT_BINARY_DIR}/lib/version.js @ONLY) + include(configure_version.cmake) add_custom_command(OUTPUT ${CMAKE_BINARY_DIR}/maxctrl/maxctrl COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/build.sh ${CMAKE_SOURCE_DIR} diff --git a/maxctrl/configure_version.cmake b/maxctrl/configure_version.cmake new file mode 100644 index 000000000..d8d40a87c --- /dev/null +++ b/maxctrl/configure_version.cmake @@ -0,0 +1,2 @@ +include(../VERSION22.cmake) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/lib/version.js.in ${CMAKE_CURRENT_BINARY_DIR}/lib/version.js @ONLY) diff --git a/maxctrl/package.json b/maxctrl/package.json index 4531d968d..43c11bbd5 100644 --- a/maxctrl/package.json +++ b/maxctrl/package.json @@ -5,7 +5,8 @@ "repository": "https://github.com/mariadb-corporation/MaxScale", "main": "maxctrl.js", "scripts": { - "test": "nyc mocha --timeout 15000 --slow 10000" + "test": "nyc mocha --timeout 15000 --slow 10000", + "preinstall": "cmake -P configure_version.cmake" }, "keywords": [ "maxscale" From f41cbaf2fc654c290ebcafaef575613f16e480b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Fri, 15 Jun 2018 10:14:37 +0300 Subject: [PATCH 06/28] Update MaxScale-Tutorial.md Added a clarifying comment about where the users must be created. --- Documentation/Tutorials/MaxScale-Tutorial.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Documentation/Tutorials/MaxScale-Tutorial.md b/Documentation/Tutorials/MaxScale-Tutorial.md index 6cb09c4fb..81e0ec9fc 100644 --- a/Documentation/Tutorials/MaxScale-Tutorial.md +++ b/Documentation/Tutorials/MaxScale-Tutorial.md @@ -25,7 +25,8 @@ when you select the distribution you are downloading from. After installation, we need to create a database user. We do this as we need to connect to the backend databases to retrieve the user authentication -information. To create this user, execute the following SQL commands. +information. To create this user, execute the following SQL commands on +the master server of your database cluster. ``` CREATE USER 'maxscale'@'%' IDENTIFIED BY 'maxscale_pw'; From 7f1f65b411354564afe41a919ec68fb7594732d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Fri, 15 Jun 2018 12:36:13 +0300 Subject: [PATCH 07/28] Update Configuring-MariaDB-Monitor.md Added a note to monitor creation tutorial about automated failover. --- Documentation/Tutorials/Configuring-MariaDB-Monitor.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/Documentation/Tutorials/Configuring-MariaDB-Monitor.md b/Documentation/Tutorials/Configuring-MariaDB-Monitor.md index 15c3a15fb..6524d8554 100644 --- a/Documentation/Tutorials/Configuring-MariaDB-Monitor.md +++ b/Documentation/Tutorials/Configuring-MariaDB-Monitor.md @@ -27,7 +27,13 @@ the monitor waits between each monitoring loop. The monitor user requires the REPLICATION CLIENT privileges to do basic monitoring. To create a user with the proper grants, execute the following SQL. -``` +```sql CREATE USER 'monitor_user'@'%' IDENTIFIED BY 'my_password'; GRANT REPLICATION CLIENT on *.* to 'monitor_user'@'%'; ``` + +**Note:** If the automatic failover of the MariaDB Monitor will used, the user +will require additional grants. Execute the following SQL to grant them. +```sql +GRANT SUPER on *.* to 'monitor_user'@'%'; +``` From ddcaa5603bf2d902620cfe286e2c4d4b9cd8d5dc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Fri, 15 Jun 2018 13:19:47 +0300 Subject: [PATCH 08/28] Update Configuring-MariaDB-Monitor.md Added the missing RELOAD privilege. --- Documentation/Tutorials/Configuring-MariaDB-Monitor.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/Tutorials/Configuring-MariaDB-Monitor.md b/Documentation/Tutorials/Configuring-MariaDB-Monitor.md index 6524d8554..499732b48 100644 --- a/Documentation/Tutorials/Configuring-MariaDB-Monitor.md +++ b/Documentation/Tutorials/Configuring-MariaDB-Monitor.md @@ -35,5 +35,5 @@ GRANT REPLICATION CLIENT on *.* to 'monitor_user'@'%'; **Note:** If the automatic failover of the MariaDB Monitor will used, the user will require additional grants. Execute the following SQL to grant them. ```sql -GRANT SUPER on *.* to 'monitor_user'@'%'; +GRANT SUPER, RELOAD on *.* to 'monitor_user'@'%'; ``` From bf3a68339510546282b9560070da8b7cd7eaf06b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Fri, 15 Jun 2018 13:22:51 +0300 Subject: [PATCH 09/28] Update MariaDB-Monitor.md Added missing RELOAD privilege to monitor docs. --- Documentation/Monitors/MariaDB-Monitor.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Documentation/Monitors/MariaDB-Monitor.md b/Documentation/Monitors/MariaDB-Monitor.md index 39631929e..5b2d77f20 100644 --- a/Documentation/Monitors/MariaDB-Monitor.md +++ b/Documentation/Monitors/MariaDB-Monitor.md @@ -251,7 +251,7 @@ master), _switchover_ (swapping a slave with a running master) and _rejoin_ (joining a standalone server to the cluster). The features and the parameters controlling them are presented in this section. -These features require that the monitor user (`user`) has the SUPER privilege. +These features require that the monitor user (`user`) has the SUPER and RELOAD privileges. In addition, the monitor needs to know which username and password a slave should use when starting replication. These are given in `replication_user` and `replication_password`. @@ -370,7 +370,7 @@ error is logged and automatic failover is disabled. If this happens, the cluster must be fixed manually and the failover needs to be re-enabled via the REST API or MaxAdmin. -The monitor user must have the SUPER privilege for failover to work. +The monitor user must have the SUPER and RELOAD privileges for failover to work. #### `auto_rejoin` From 609a6723a85735a81cf507f5ebb67c2f56fa5f44 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Wed, 18 Jul 2018 14:21:59 +0300 Subject: [PATCH 10/28] MXS-1950: Log error on failed COM_CHANGE_USER If a client is executing a COM_CHANGE_USER command and the reauthentication of the client fails, no error message would be logged about the failure of the reauthentication process and only a routing failure message would be logged. --- server/modules/protocol/MySQL/mariadbclient/mysql_client.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/server/modules/protocol/MySQL/mariadbclient/mysql_client.cc b/server/modules/protocol/MySQL/mariadbclient/mysql_client.cc index 22db11462..e633939ed 100644 --- a/server/modules/protocol/MySQL/mariadbclient/mysql_client.cc +++ b/server/modules/protocol/MySQL/mariadbclient/mysql_client.cc @@ -1716,6 +1716,9 @@ static int route_by_statement(MXS_SESSION* session, uint64_t capabilities, GWBUF rc = 0; gwbuf_free(packetbuf); packetbuf = NULL; + MXS_ERROR("User reauthentication failed for '%s'@'%s'", + session->client_dcb->user, + session->client_dcb->remote); } } From 6b8d9dc5d9f422cf85a83cb7a800110b53ed928b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Wed, 18 Jul 2018 09:29:12 +0300 Subject: [PATCH 11/28] Print an error on invalid request JSON When a request to the REST API is made with invalid JSON, it's hard to see why the request fails due to the fact that no error is sent. --- server/core/admin.cc | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/server/core/admin.cc b/server/core/admin.cc index a8e14d1ed..266226b5c 100644 --- a/server/core/admin.cc +++ b/server/core/admin.cc @@ -106,8 +106,10 @@ int Client::process(string url, string method, const char* upload_data, size_t * if (m_data.length() && (json = json_loadb(m_data.c_str(), m_data.size(), 0, &err)) == NULL) { - MHD_Response *response = - MHD_create_response_from_buffer(0, NULL, MHD_RESPMEM_PERSISTENT); + string msg = string("{\"errors\": [ { \"detail\": \"Invalid JSON in request: ") + + err.text + "\" } ] }"; + MHD_Response *response = MHD_create_response_from_buffer(msg.size(), &msg[0], + MHD_RESPMEM_MUST_COPY); MHD_queue_response(m_connection, MHD_HTTP_BAD_REQUEST, response); MHD_destroy_response(response); return MHD_YES; From b5584e3fd0e04006b5de97bfbfaff9c1df7672e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Thu, 19 Jul 2018 20:00:32 +0300 Subject: [PATCH 12/28] Backport test utility functions The resultset processing functions are helpful in writing tests that process resultsets. --- maxscale-system-test/mariadb_func.cpp | 30 +++++++++++++++++++++++++++ maxscale-system-test/mariadb_func.h | 24 +++++++++++++++++++++ 2 files changed, 54 insertions(+) diff --git a/maxscale-system-test/mariadb_func.cpp b/maxscale-system-test/mariadb_func.cpp index e1ffda07f..1ef1842da 100644 --- a/maxscale-system-test/mariadb_func.cpp +++ b/maxscale-system-test/mariadb_func.cpp @@ -515,6 +515,36 @@ int find_field(MYSQL* conn, const char* sql, const char* field_name, char* value return ret; } +Result get_result(MYSQL* conn, std::string sql) +{ + Result rval; + MYSQL_RES* res; + + if (mysql_query(conn, sql.c_str()) == 0 && (res = mysql_store_result(conn))) + { + MYSQL_ROW row = mysql_fetch_row(res); + + while (row) + { + rval.emplace_back(&row[0], &row[mysql_num_fields(res)]); + row = mysql_fetch_row(res); + } + mysql_free_result(res); + } + else + { + printf("Error: Query failed: %s\n", mysql_error(conn)); + } + + return rval; +} + +Row get_row(MYSQL* conn, std::string sql) +{ + Result res = get_result(conn, sql); + return res.empty() ? Row{} : res[0]; +} + int get_int_version(std::string version) { std::istringstream str(version); diff --git a/maxscale-system-test/mariadb_func.h b/maxscale-system-test/mariadb_func.h index 34afeb42e..e7157789c 100644 --- a/maxscale-system-test/mariadb_func.h +++ b/maxscale-system-test/mariadb_func.h @@ -24,6 +24,10 @@ #include #include #include +#include + +typedef std::vector Row; +typedef std::vector Result; /** * Opens connection to DB: wropper over mysql_real_connect @@ -204,6 +208,26 @@ int get_conn_num(MYSQL* conn, std::string ip, std::string hostname, std::string */ int find_field(MYSQL* conn, const char* sql, const char* field_name, char* value); +/** + * Execute a query and return the first row + * + * @param conn The connection to use + * @param sql The query to execute + * + * @return The first row as a list of strings + */ +Row get_row(MYSQL* conn, std::string sql); + +/** + * Execute a query and return the result + * + * @param conn The connection to use + * @param sql The query to execute + * + * @return The result as a list of rows + */ +Result get_result(MYSQL* conn, std::string sql); + int get_int_version(std::string version); #endif // MARIADB_FUNC_H From ee6e2b28b2b100fc4964254f190fe71f394bab90 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Thu, 19 Jul 2018 20:25:01 +0300 Subject: [PATCH 13/28] MXS-1985: Add concurrent KILL test case The test case runs parallel KILL queries and reproduces the problem. --- maxscale-system-test/CMakeLists.txt | 4 ++ maxscale-system-test/mxs1985_kill_hang.cpp | 55 ++++++++++++++++++++++ 2 files changed, 59 insertions(+) create mode 100644 maxscale-system-test/mxs1985_kill_hang.cpp diff --git a/maxscale-system-test/CMakeLists.txt b/maxscale-system-test/CMakeLists.txt index fd16e0ffa..d18cea2ff 100644 --- a/maxscale-system-test/CMakeLists.txt +++ b/maxscale-system-test/CMakeLists.txt @@ -961,6 +961,10 @@ set_tests_properties(mxs1958_insert_priv PROPERTIES WILL_FAIL TRUE) # https://jira.mariadb.org/browse/MXS-1961 add_test_executable(mxs1961_standalone_rejoin.cpp mxs1961_standalone_rejoin mxs1961_standalone_rejoin LABELS REPL_BACKEND) +# MXS-1985: MaxScale hangs on concurrent KILL processing +# https://jira.mariadb.org/browse/MXS-1985 +add_test_executable(mxs1985_kill_hang.cpp mxs1985_kill_hang replication LABELS REPL_BACKEND) + configure_file(templates.h.in templates.h @ONLY) include(CTest) diff --git a/maxscale-system-test/mxs1985_kill_hang.cpp b/maxscale-system-test/mxs1985_kill_hang.cpp new file mode 100644 index 000000000..2ee70392e --- /dev/null +++ b/maxscale-system-test/mxs1985_kill_hang.cpp @@ -0,0 +1,55 @@ +/** + * MXS-1985: MaxScale hangs on concurrent KILL processing + */ + +#include "testconnections.h" + +#include +#include +#include + +using namespace std; + +static atomic running{true}; + +int main(int argc, char *argv[]) +{ + TestConnections test(argc, argv); + vector threads; + + for (int i = 0; i < 20; i++) + { + threads.emplace_back([&, i]() + { + while (running) + { + MYSQL* c = test.maxscales->open_rwsplit_connection(); + + // It doesn't really matter if the connection ID exists, this is just a + // handy way of generating cross-thread communication. + for (auto&& a: get_result(c, "SELECT id FROM information_schema.processlist" + " WHERE user like '%skysql%'")) + { + if (execute_query_silent(c, std::string("KILL " + a[0]).c_str())) + { + break; + } + } + + mysql_close(c); + } + }); + } + + sleep(10); + running = false; + + // If MaxScale hangs, at least one thread will not return in time + test.set_timeout(30); + for (auto&& a: threads) + { + a.join(); + } + + return test.global_result; +} From 30ac15817fbd03bd264c83567c677be0b6d7a1f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Thu, 19 Jul 2018 21:28:16 +0300 Subject: [PATCH 14/28] Assert that query queue contains complete packets The query queue in readwritesplit must not contain partial packets. If it does, something is broken as only complete packets should ever be in it. --- server/modules/routing/readwritesplit/readwritesplit.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/server/modules/routing/readwritesplit/readwritesplit.cc b/server/modules/routing/readwritesplit/readwritesplit.cc index 0973693dc..db3d86012 100644 --- a/server/modules/routing/readwritesplit/readwritesplit.cc +++ b/server/modules/routing/readwritesplit/readwritesplit.cc @@ -471,6 +471,7 @@ static bool route_stored_query(RWSplitSession *rses) { GWBUF* query_queue = modutil_get_next_MySQL_packet(&rses->query_queue); query_queue = gwbuf_make_contiguous(query_queue); + ss_dassert(query_queue); /** Store the query queue locally for the duration of the routeQuery call. * This prevents recursive calls into this function. */ From 101dad74a777eec51551c85615212983fe849775 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Thu, 19 Jul 2018 22:50:12 +0300 Subject: [PATCH 15/28] MXS-1985: Add debug assertions to dcb_foreach The dcb_foreach function is not safe to use from multiple threads at the same time. This should be asserted by checking that the function is called only from the main worker. The addition of this assertion also implies that only administrative operations should use the dcb_foreach function. To accommodate this change, the KILL command iteration needs to be adjusted. --- include/maxscale/dcb.h | 2 ++ server/core/dcb.cc | 1 + 2 files changed, 3 insertions(+) diff --git a/include/maxscale/dcb.h b/include/maxscale/dcb.h index 613da8450..578a74e55 100644 --- a/include/maxscale/dcb.h +++ b/include/maxscale/dcb.h @@ -356,6 +356,8 @@ static inline void dcb_readq_set(DCB *dcb, GWBUF *buffer) * * @deprecated You should not use this function, use dcb_foreach_parallel instead * + * @warning This must only be called from the main thread, otherwise deadlocks occur + * * @param func Function to call. The function should return @c true to continue iteration * and @c false to stop iteration earlier. The first parameter is a DCB and the second * is the value of @c data that the user provided. diff --git a/server/core/dcb.cc b/server/core/dcb.cc index eb1012a0a..86a7e49ab 100644 --- a/server/core/dcb.cc +++ b/server/core/dcb.cc @@ -2937,6 +2937,7 @@ private: bool dcb_foreach(bool(*func)(DCB *dcb, void *data), void *data) { + ss_dassert(Worker::get_current() == Worker::get(0)); SerialDcbTask task(func, data); Worker::execute_serially(task); return task.more(); From 21eef8a670f2b5fdc3143a63f6de0c33846cc2bd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Fri, 20 Jul 2018 04:30:54 +0300 Subject: [PATCH 16/28] MXS-1985: Kill connections inside workers The LocalClient micro-client required a reference to the session that was valid at construction time. This is the reason why the previous implementation used dcb_foreach to first gather the targets and then execute queries on them. By replacing this reference with pointers to the raw data it requires, we lift the requirement of the orignating session being alive at construction time. Now that the LocalClient no longer holds a reference to the session, the killing of the connection does not have to be done on the same thread that started the process. This prevents the deadlock that occurred when concurrect dcb_foreach calls were made. Replaced the unused dcb_foreach_parallel with a version of dcb_foreach that allows iteration of DCBs local to this worker. The dcb_foreach_local is the basis upon which all DCB access outside of administrative tasks should be built on. This change will introduce a regression in functionality: The client will no longer receive an error if no connections match the KILL query criteria. This is done to avoid having to synchronize the workers after they have performed the killing of their own connections. --- include/maxscale/dcb.h | 18 +- include/maxscale/protocol/mariadb_client.hh | 8 +- server/core/dcb.cc | 32 +--- server/modules/filter/tee/teesession.cc | 4 +- .../modules/protocol/MySQL/mariadb_client.cc | 23 +-- server/modules/protocol/MySQL/mysql_common.cc | 167 +++++++++++------- 6 files changed, 126 insertions(+), 126 deletions(-) diff --git a/include/maxscale/dcb.h b/include/maxscale/dcb.h index 578a74e55..a6e08a1d0 100644 --- a/include/maxscale/dcb.h +++ b/include/maxscale/dcb.h @@ -367,21 +367,15 @@ static inline void dcb_readq_set(DCB *dcb, GWBUF *buffer) bool dcb_foreach(bool (*func)(DCB *dcb, void *data), void *data); /** - * @brief Call a function for each connected DCB + * @brief Call a function for each connected DCB on the current worker * - * @note This function can call @c func from multiple thread at one time. + * @param func Function to call. The function should return @c true to continue + * iteration and @c false to stop iteration earlier. The first parameter + * is the current DCB. * - * @param func Function to call. The function should return @c true to continue iteration - * and @c false to stop iteration earlier. The first is a DCB and - * the second is this thread's value in the @c data array that - * the user provided. - * - * @param data Array of user provided data passed as the second parameter to @c func. - * The array must have more space for pointers thann the return - * value of `config_threadcount()`. The value passed to @c func will - * be the value of the array at the index of the current thread's ID. + * @param data User provided data passed as the second parameter to @c func */ -void dcb_foreach_parallel(bool (*func)(DCB *dcb, void *data), void **data); +void dcb_foreach_local(bool (*func)(DCB *dcb, void *data), void *data); /** * @brief Return the port number this DCB is connected to diff --git a/include/maxscale/protocol/mariadb_client.hh b/include/maxscale/protocol/mariadb_client.hh index 161e89a59..9e28d3b18 100644 --- a/include/maxscale/protocol/mariadb_client.hh +++ b/include/maxscale/protocol/mariadb_client.hh @@ -37,8 +37,8 @@ public: * * @return New virtual client or NULL on error */ - static LocalClient* create(MXS_SESSION* session, SERVICE* service); - static LocalClient* create(MXS_SESSION* session, SERVER* server); + static LocalClient* create(MYSQL_session* session, MySQLProtocol* proto, SERVICE* service); + static LocalClient* create(MYSQL_session* session, MySQLProtocol* proto, SERVER* server); /** * Queue a new query for execution @@ -57,8 +57,8 @@ public: void self_destruct(); private: - static LocalClient* create(MXS_SESSION* session, const char* ip, uint64_t port); - LocalClient(MXS_SESSION* session, int fd); + static LocalClient* create(MYSQL_session* session, MySQLProtocol* proto, const char* ip, uint64_t port); + LocalClient(MYSQL_session* session, MySQLProtocol* proto, int fd); static uint32_t poll_handler(struct mxs_poll_data* data, int wid, uint32_t events); void process(uint32_t events); GWBUF* read_complete_packet(); diff --git a/server/core/dcb.cc b/server/core/dcb.cc index 86a7e49ab..3036a3098 100644 --- a/server/core/dcb.cc +++ b/server/core/dcb.cc @@ -2943,39 +2943,17 @@ bool dcb_foreach(bool(*func)(DCB *dcb, void *data), void *data) return task.more(); } -/** Helper class for parallel iteration over all DCBs */ -class ParallelDcbTask : public WorkerTask +void dcb_foreach_local(bool(*func)(DCB *dcb, void *data), void *data) { -public: + int thread_id = Worker::get_current_id(); - ParallelDcbTask(bool(*func)(DCB *, void *), void **data): - m_func(func), - m_data(data) + for (DCB *dcb = this_unit.all_dcbs[thread_id]; dcb; dcb = dcb->thread.next) { - } - - void execute(Worker& worker) - { - int thread_id = worker.id(); - - for (DCB *dcb = this_unit.all_dcbs[thread_id]; dcb; dcb = dcb->thread.next) + if (!func(dcb, data)) { - if (!m_func(dcb, m_data[thread_id])) - { - break; - } + break; } } - -private: - bool(*m_func)(DCB *dcb, void *data); - void** m_data; -}; - -void dcb_foreach_parallel(bool(*func)(DCB *dcb, void *data), void **data) -{ - ParallelDcbTask task(func, data); - Worker::execute_concurrently(task); } int dcb_get_port(const DCB *dcb) diff --git a/server/modules/filter/tee/teesession.cc b/server/modules/filter/tee/teesession.cc index 1696f5af4..5f83cce19 100644 --- a/server/modules/filter/tee/teesession.cc +++ b/server/modules/filter/tee/teesession.cc @@ -101,7 +101,9 @@ TeeSession* TeeSession::create(Tee* my_instance, MXS_SESSION* session) return NULL; } - if ((client = LocalClient::create(session, my_instance->get_service())) == NULL) + if ((client = LocalClient::create((MYSQL_session*)session->client_dcb->data, + (MySQLProtocol*)session->client_dcb->protocol, + my_instance->get_service())) == NULL) { return NULL; } diff --git a/server/modules/protocol/MySQL/mariadb_client.cc b/server/modules/protocol/MySQL/mariadb_client.cc index 74c9cddc5..2fbd8180d 100644 --- a/server/modules/protocol/MySQL/mariadb_client.cc +++ b/server/modules/protocol/MySQL/mariadb_client.cc @@ -25,20 +25,15 @@ static const uint32_t poll_events = EPOLLIN | EPOLLOUT | EPOLLET | ERROR_EVENTS; -LocalClient::LocalClient(MXS_SESSION* session, int fd): +LocalClient::LocalClient(MYSQL_session* session, MySQLProtocol* proto, int fd): m_state(VC_WAITING_HANDSHAKE), m_sock(fd), m_expected_bytes(0), - m_client({}), - m_protocol({}), + m_client(*session), + m_protocol(*proto), m_self_destruct(false) { MXS_POLL_DATA::handler = LocalClient::poll_handler; - MySQLProtocol* client = (MySQLProtocol*)session->client_dcb->protocol; - m_protocol.charset = client->charset; - m_protocol.client_capabilities = client->client_capabilities; - m_protocol.extra_capabilities = client->extra_capabilities; - gw_get_shared_session_auth_info(session->client_dcb, &m_client); } LocalClient::~LocalClient() @@ -237,7 +232,7 @@ uint32_t LocalClient::poll_handler(struct mxs_poll_data* data, int wid, uint32_t return 0; } -LocalClient* LocalClient::create(MXS_SESSION* session, const char* ip, uint64_t port) +LocalClient* LocalClient::create(MYSQL_session* session, MySQLProtocol* proto, const char* ip, uint64_t port) { LocalClient* rval = NULL; sockaddr_storage addr; @@ -245,7 +240,7 @@ LocalClient* LocalClient::create(MXS_SESSION* session, const char* ip, uint64_t if (fd > 0 && (connect(fd, (struct sockaddr*)&addr, sizeof(addr)) == 0 || errno == EINPROGRESS)) { - LocalClient* relay = new (std::nothrow) LocalClient(session, fd); + LocalClient* relay = new (std::nothrow) LocalClient(session, proto, fd); if (relay) { @@ -271,7 +266,7 @@ LocalClient* LocalClient::create(MXS_SESSION* session, const char* ip, uint64_t return rval; } -LocalClient* LocalClient::create(MXS_SESSION* session, SERVICE* service) +LocalClient* LocalClient::create(MYSQL_session* session, MySQLProtocol* proto, SERVICE* service) { LocalClient* rval = NULL; LISTENER_ITERATOR iter; @@ -282,7 +277,7 @@ LocalClient* LocalClient::create(MXS_SESSION* session, SERVICE* service) if (listener->port > 0) { /** Pick the first network listener */ - rval = create(session, "127.0.0.1", service->ports->port); + rval = create(session, proto, "127.0.0.1", service->ports->port); break; } } @@ -290,7 +285,7 @@ LocalClient* LocalClient::create(MXS_SESSION* session, SERVICE* service) return rval; } -LocalClient* LocalClient::create(MXS_SESSION* session, SERVER* server) +LocalClient* LocalClient::create(MYSQL_session* session, MySQLProtocol* proto, SERVER* server) { - return create(session, server->name, server->port); + return create(session, proto, server->name, server->port); } diff --git a/server/modules/protocol/MySQL/mysql_common.cc b/server/modules/protocol/MySQL/mysql_common.cc index 282e4cb38..6121f2ef4 100644 --- a/server/modules/protocol/MySQL/mysql_common.cc +++ b/server/modules/protocol/MySQL/mysql_common.cc @@ -19,7 +19,7 @@ #include #include -#include +#include #include #include @@ -30,6 +30,7 @@ #include #include #include +#include uint8_t null_client_sha1[MYSQL_SCRAMBLE_LEN] = ""; @@ -1586,17 +1587,60 @@ bool mxs_mysql_command_will_respond(uint8_t cmd) cmd != MXS_COM_STMT_CLOSE; } -typedef std::vector< std::pair > TargetList; +namespace +{ + +// Servers and queries to execute on them +typedef std::map TargetList; struct KillInfo { - uint64_t target_id; + typedef bool (*DcbCallback)(DCB *dcb, void *data); + + KillInfo(std::string query, MXS_SESSION* ses, DcbCallback callback): + origin(mxs_worker_get_current_id()), + query_base(query), + protocol(*(MySQLProtocol*)ses->client_dcb->protocol), + cb(callback) + { + gw_get_shared_session_auth_info(ses->client_dcb, &session); + } + + int origin; + std::string query_base; + MYSQL_session session; + MySQLProtocol protocol; + DcbCallback cb; TargetList targets; }; +static bool kill_func(DCB *dcb, void *data); + +struct ConnKillInfo: public KillInfo +{ + ConnKillInfo(uint64_t id, std::string query, MXS_SESSION* ses): + KillInfo(query, ses, kill_func), + target_id(id) + {} + + uint64_t target_id; +}; + +static bool kill_user_func(DCB *dcb, void *data); + +struct UserKillInfo: public KillInfo +{ + UserKillInfo(std::string name, std::string query, MXS_SESSION* ses): + KillInfo(query, ses, kill_user_func), + user(name) + {} + + std::string user; +}; + static bool kill_func(DCB *dcb, void *data) { - KillInfo* info = (KillInfo*)data; + ConnKillInfo* info = static_cast(data); if (dcb->dcb_role == DCB_ROLE_BACKEND_HANDLER && dcb->session->ses_id == info->target_id) @@ -1606,7 +1650,9 @@ static bool kill_func(DCB *dcb, void *data) if (proto->thread_id) { // DCB is connected and we know the thread ID so we can kill it - info->targets.push_back(std::make_pair(dcb->server, proto->thread_id)); + std::stringstream ss; + ss << info->query_base << proto->thread_id; + info->targets[dcb->server] = ss.str(); } else { @@ -1619,82 +1665,29 @@ static bool kill_func(DCB *dcb, void *data) return true; } -void mxs_mysql_execute_kill(MXS_SESSION* issuer, uint64_t target_id, kill_type_t type) -{ - // Gather a list of servers and connection IDs to kill - KillInfo info = {target_id}; - dcb_foreach(kill_func, &info); - - if (info.targets.empty()) - { - // No session found, send an error - std::stringstream err; - err << "Unknown thread id: " << target_id; - mysql_send_standard_error(issuer->client_dcb, 1, 1094, err.str().c_str()); - } - else - { - // Execute the KILL on all of the servers - for (TargetList::iterator it = info.targets.begin(); - it != info.targets.end(); it++) - { - LocalClient* client = LocalClient::create(issuer, it->first); - const char* hard = (type & KT_HARD) ? "HARD " : - (type & KT_SOFT) ? "SOFT " : - ""; - const char* query = (type & KT_QUERY) ? "QUERY " : ""; - std::stringstream ss; - ss << "KILL " << hard << query << it->second; - GWBUF* buffer = modutil_create_query(ss.str().c_str()); - client->queue_query(buffer); - gwbuf_free(buffer); - - // The LocalClient needs to delete itself once the queries are done - client->self_destruct(); - } - mxs_mysql_send_ok(issuer->client_dcb, 1, 0, NULL); - } -} - -typedef std::set ServerSet; - -struct KillUserInfo -{ - std::string user; - ServerSet targets; -}; - - static bool kill_user_func(DCB *dcb, void *data) { - KillUserInfo* info = (KillUserInfo*)data; + UserKillInfo* info = (UserKillInfo*)data; if (dcb->dcb_role == DCB_ROLE_BACKEND_HANDLER && strcasecmp(dcb->session->client_dcb->user, info->user.c_str()) == 0) { - info->targets.insert(dcb->server); + info->targets[dcb->server] = info->query_base; } return true; } -void mxs_mysql_execute_kill_user(MXS_SESSION* issuer, const char* user, kill_type_t type) +static void worker_func(int thread_id, void* data) { - // Gather a list of servers and connection IDs to kill - KillUserInfo info = {user}; - dcb_foreach(kill_user_func, &info); + KillInfo* info = static_cast(data); + dcb_foreach_local(info->cb, info); - // Execute the KILL on all of the servers - for (ServerSet::iterator it = info.targets.begin(); - it != info.targets.end(); it++) + for (TargetList::iterator it = info->targets.begin(); + it != info->targets.end(); it++) { - LocalClient* client = LocalClient::create(issuer, *it); - const char* hard = (type & KT_HARD) ? "HARD " : - (type & KT_SOFT) ? "SOFT " : ""; - const char* query = (type & KT_QUERY) ? "QUERY " : ""; - std::stringstream ss; - ss << "KILL " << hard << query << "USER " << user; - GWBUF* buffer = modutil_create_query(ss.str().c_str()); + LocalClient* client = LocalClient::create(&info->session, &info->protocol, it->first); + GWBUF* buffer = modutil_create_query(it->second.c_str()); client->queue_query(buffer); gwbuf_free(buffer); @@ -1702,5 +1695,43 @@ void mxs_mysql_execute_kill_user(MXS_SESSION* issuer, const char* user, kill_typ client->self_destruct(); } - mxs_mysql_send_ok(issuer->client_dcb, info.targets.size(), 0, NULL); + delete info; +} + +} + +void mxs_mysql_execute_kill(MXS_SESSION* issuer, uint64_t target_id, kill_type_t type) +{ + const char* hard = (type & KT_HARD) ? "HARD " : (type & KT_SOFT) ? "SOFT " : ""; + const char* query = (type & KT_QUERY) ? "QUERY " : ""; + std::stringstream ss; + ss << "KILL " << hard << query; + + for (int i = 0; i < config_threadcount(); i++) + { + MXS_WORKER* worker = mxs_worker_get(i); + ss_dassert(worker); + mxs_worker_post_message(worker, MXS_WORKER_MSG_CALL, (intptr_t)worker_func, + (intptr_t)new ConnKillInfo(target_id, ss.str(), issuer)); + } + + mxs_mysql_send_ok(issuer->client_dcb, 1, 0, NULL); +} + +void mxs_mysql_execute_kill_user(MXS_SESSION* issuer, const char* user, kill_type_t type) +{ + const char* hard = (type & KT_HARD) ? "HARD " : (type & KT_SOFT) ? "SOFT " : ""; + const char* query = (type & KT_QUERY) ? "QUERY " : ""; + std::stringstream ss; + ss << "KILL " << hard << query << "USER " << user; + + for (int i = 0; i < config_threadcount(); i++) + { + MXS_WORKER* worker = mxs_worker_get(i); + ss_dassert(worker); + mxs_worker_post_message(worker, MXS_WORKER_MSG_CALL, (intptr_t)worker_func, + (intptr_t)new UserKillInfo(user, ss.str(), issuer)); + } + + mxs_mysql_send_ok(issuer->client_dcb, 1, 0, NULL); } From d68f20b75ba201a0da46757f91c82a337e314c79 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Fri, 20 Jul 2018 04:50:58 +0300 Subject: [PATCH 17/28] Also copy version files for MaxCtrl builds When MaxCtrl is being built, the source is copied into the build directory to prevent polluting the source tree with node_modules. This means that any relative references MaxCtrl builds make outside of the maxctrl directory must be copied to the build directory. --- maxctrl/build.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/maxctrl/build.sh b/maxctrl/build.sh index 75126b93e..8edcc4d2a 100755 --- a/maxctrl/build.sh +++ b/maxctrl/build.sh @@ -12,6 +12,7 @@ if [ "$PWD" != "$src" ] then # Copy sources to working directory cp -r -t $PWD/maxctrl $src/maxctrl/* + cp -r -t $PWD/ $src/VERSION*.cmake fi cd $PWD/maxctrl From bbe4f42935d68dd6cf487011cacdb7ba1ae37b87 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Fri, 20 Jul 2018 11:16:39 +0300 Subject: [PATCH 18/28] Add more packet splitting debug assertions Having more debug assertions in functions that split packets guarantees that they work as expected. --- server/core/buffer.cc | 1 + server/core/modutil.cc | 12 ++++++++++++ .../protocol/MySQL/mariadbclient/mysql_client.cc | 12 ------------ 3 files changed, 13 insertions(+), 12 deletions(-) diff --git a/server/core/buffer.cc b/server/core/buffer.cc index f49e95653..f870c838d 100644 --- a/server/core/buffer.cc +++ b/server/core/buffer.cc @@ -795,6 +795,7 @@ gwbuf_make_contiguous(GWBUF *orig) if (orig == NULL) { + ss_info_dassert(!true, "gwbuf_make_contiguous: NULL buffer"); return NULL; } if (orig->next == NULL) diff --git a/server/core/modutil.cc b/server/core/modutil.cc index 527e64ae7..ad1bdde80 100644 --- a/server/core/modutil.cc +++ b/server/core/modutil.cc @@ -485,6 +485,17 @@ int modutil_send_mysql_err_packet(DCB *dcb, return dcb->func.write(dcb, buf); } +// Helper function for debug assertions +static bool only_one_packet(GWBUF* buffer) +{ + ss_dassert(buffer); + uint8_t header[4] = {}; + gwbuf_copy_data(buffer, 0, MYSQL_HEADER_LEN, header); + size_t packet_len = gw_mysql_get_byte3(header); + size_t buffer_len = gwbuf_length(buffer); + return packet_len + MYSQL_HEADER_LEN == buffer_len; +} + /** * Return the first packet from a buffer. * @@ -531,6 +542,7 @@ GWBUF* modutil_get_next_MySQL_packet(GWBUF** p_readbuf) } } + ss_dassert(!packet || only_one_packet(packet)); return packet; } diff --git a/server/modules/protocol/MySQL/mariadbclient/mysql_client.cc b/server/modules/protocol/MySQL/mariadbclient/mysql_client.cc index e633939ed..915b37d71 100644 --- a/server/modules/protocol/MySQL/mariadbclient/mysql_client.cc +++ b/server/modules/protocol/MySQL/mariadbclient/mysql_client.cc @@ -1576,17 +1576,6 @@ static bool reauthenticate_client(MXS_SESSION* session, GWBUF* packetbuf) return rval; } -// Helper function for debug assertions -static bool only_one_packet(GWBUF* buffer) -{ - ss_dassert(buffer); - uint8_t header[4] = {}; - gwbuf_copy_data(buffer, 0, MYSQL_HEADER_LEN, header); - size_t packet_len = gw_mysql_get_byte3(header); - size_t buffer_len = gwbuf_length(buffer); - return packet_len + MYSQL_HEADER_LEN == buffer_len; -} - /** * Detect if buffer includes partial mysql packet or multiple packets. * Store partial packet to dcb_readqueue. Send complete packets one by one @@ -1616,7 +1605,6 @@ static int route_by_statement(MXS_SESSION* session, uint64_t capabilities, GWBUF if (packetbuf != NULL) { - ss_dassert(only_one_packet(packetbuf)); CHK_GWBUF(packetbuf); MySQLProtocol* proto = (MySQLProtocol*)session->client_dcb->protocol; From 37f32464f2188d5b96a86ab7470bfeb78092c5ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Fri, 20 Jul 2018 12:22:15 +0300 Subject: [PATCH 19/28] Rewrite mxs564_big_dump Streamlined the test to perform as much testing as fast as possible. The gradual ramp up did not provide any concrete benefits compared to testing everything at once. Replaced structures with C++11 alternatives where possible and removed unused, redundant or dead code. --- maxscale-system-test/mxs564_big_dump.cpp | 259 ++++++----------------- maxscale-system-test/testconnections.cpp | 2 + 2 files changed, 62 insertions(+), 199 deletions(-) diff --git a/maxscale-system-test/mxs564_big_dump.cpp b/maxscale-system-test/mxs564_big_dump.cpp index f7e3a087c..c8aa07e2f 100644 --- a/maxscale-system-test/mxs564_big_dump.cpp +++ b/maxscale-system-test/mxs564_big_dump.cpp @@ -9,228 +9,89 @@ * - check Maxscale is alive */ - #include "testconnections.h" #include "sql_t1.h" -//#include "get_com_select_insert.h" -typedef struct +#include +#include +#include + +static std::atomic running{true}; + +void query_thread(TestConnections* t) { - int exit_flag; - int thread_id; - long i; - int rwsplit_only; - TestConnections * Test; - MYSQL * conn1; - MYSQL * conn2; - MYSQL * conn3; -} openclose_thread_data; -void *query_thread1( void *ptr ); + TestConnections& test = *t; // For some reason CentOS 7 doesn't like passing references to std::thread + std::string sql(1000000, '\0'); + create_insert_string(&sql[0], 1000, 2); + + MYSQL* conn1 = test.maxscales->open_rwsplit_connection(); + MYSQL* conn2 = test.maxscales->open_readconn_master_connection(); + + test.add_result(mysql_errno(conn1), "Error connecting to readwritesplit: %s", mysql_error(conn1)); + test.add_result(mysql_errno(conn2), "Error connecting to readconnroute: %s", mysql_error(conn2)); + + test.try_query(conn1, "SET SESSION SQL_LOG_BIN=0"); + test.try_query(conn2, "SET SESSION SQL_LOG_BIN=0"); + + while (running) + { + test.try_query(conn1, "%s", sql.c_str()); + test.try_query(conn2, "%s", sql.c_str()); + } + + mysql_close(conn1); + mysql_close(conn2); +} int main(int argc, char *argv[]) { - TestConnections * Test = new TestConnections(argc, argv); - Test->stop_timeout(); + TestConnections test(argc, argv); - int threads_num = 4; - openclose_thread_data data[threads_num]; + int master = test.maxscales->find_master_maxadmin(test.galera); + test.tprintf("Master: %d", master); + std::set slaves{0, 1, 2, 3}; + slaves.erase(master); - int i; - int run_time = 100; + test.maxscales->connect(); + test.try_query(test.maxscales->conn_rwsplit[0], "DROP TABLE IF EXISTS t1"); + test.try_query(test.maxscales->conn_rwsplit[0], "CREATE TABLE t1 (x1 int, fl int)"); + test.maxscales->disconnect(); - if (Test->smoke) + std::vector threads; + + for (int i = 0; i < 4; i++) { - run_time = 10; + threads.emplace_back(query_thread, &test); } - for (i = 0; i < threads_num; i++) + for (auto&& i : slaves) { - data[i].i = 0; - data[i].exit_flag = 0; - data[i].Test = Test; - data[i].rwsplit_only = 1; - data[i].thread_id = i; + test.tprintf("Blocking node %d", i); + test.galera->block_node(i); + test.maxscales->wait_for_monitor(); } + test.tprintf("Unblocking nodes\n"); - pthread_t thread1[threads_num]; - - //Test->repl->flush_hosts(); - Test->set_timeout(20); - int master = Test->maxscales->find_master_maxadmin(Test->galera); - Test->stop_timeout(); - Test->tprintf(("Master is %d\n"), master); - int k = 0; - int x = 0; - int slaves[2]; - while (k < 2 ) + for (auto&& i : slaves) { - if (x != master) - { - slaves[k] = x; - k++; - x++; - } - else - { - x++; - } - } - Test->tprintf(("Slave1 is %d\n"), slaves[0]); - Test->tprintf(("Slave2 is %d\n"), slaves[1]); - - Test->set_timeout(20); - Test->repl->connect(); - Test->maxscales->connect_maxscale(0); - Test->set_timeout(20); - create_t1(Test->maxscales->conn_rwsplit[0]); - Test->repl->execute_query_all_nodes((char *) "set global max_connections = 2000;"); - - Test->set_timeout(20); - Test->try_query(Test->maxscales->conn_rwsplit[0], (char *) "DROP TABLE IF EXISTS t1"); - Test->try_query(Test->maxscales->conn_rwsplit[0], (char *) "CREATE TABLE t1 (x1 int, fl int)"); - - for (i = 0; i < threads_num; i++) - { - data[i].rwsplit_only = 1; - } - /* Create independent threads each of them will execute function */ - for (i = 0; i < threads_num; i++) - { - pthread_create(&thread1[i], NULL, query_thread1, &data[i]); - } - Test->tprintf("Threads are running %d seconds \n", run_time); - - Test->set_timeout(3 * run_time + 60); - sleep(20); - sleep(run_time); - Test->tprintf("Blocking slave %d\n", slaves[0]); - Test->galera->block_node(slaves[0]); - sleep(run_time); - Test->galera->block_node(slaves[1]); - Test->tprintf("Blocking slave %d\n", slaves[1]); - sleep(run_time); - Test->tprintf("Unblocking slaves\n"); - Test->galera->unblock_node(slaves[0]); - Test->galera->unblock_node(slaves[1]); - - Test->set_timeout(120); - Test->tprintf("Waiting for all threads exit\n"); - for (i = 0; i < threads_num; i++) - { - data[i].exit_flag = 1; - pthread_join(thread1[i], NULL); - Test->tprintf("exit %d\n", i); + test.galera->unblock_node(i); } - Test->tprintf("all maxscales->routers[0] are involved, threads are running %d seconds more\n", run_time); + test.maxscales->wait_for_monitor(); - for (i = 0; i < threads_num; i++) + running = false; + test.set_timeout(120); + test.tprintf("Waiting for all threads to exit"); + + for (auto&& a : threads) { - data[i].rwsplit_only = 0; - } - for (i = 0; i < threads_num; i++) - { - pthread_create(&thread1[i], NULL, query_thread1, &data[i]); + a.join(); } - Test->set_timeout(3 * run_time + 60); - sleep(20); - sleep(run_time); - Test->tprintf("Blocking node %d\n", slaves[0]); - Test->galera->block_node(slaves[0]); - sleep(run_time); - Test->tprintf("Blocking node %d\n", slaves[1]); - Test->galera->block_node(slaves[1]); - sleep(run_time); - Test->tprintf("Unblocking nodes\n"); - Test->galera->unblock_node(slaves[0]); - Test->galera->unblock_node(slaves[1]); + test.maxscales->connect(); + execute_query(test.maxscales->conn_rwsplit[0], "DROP TABLE t1"); + test.maxscales->disconnect(); - Test->set_timeout(120); - Test->tprintf("Waiting for all threads exit\n"); - for (i = 0; i < threads_num; i++) - { - data[i].exit_flag = 1; - pthread_join(thread1[i], NULL); - } - - sleep(5); - - Test->set_timeout(60); - Test->tprintf("set global max_connections = 100 for all backends\n"); - Test->repl->execute_query_all_nodes((char *) "set global max_connections = 100;"); - Test->tprintf("Drop t1\n"); - Test->try_query(Test->maxscales->conn_rwsplit[0], (char *) "DROP TABLE IF EXISTS t1;"); - Test->maxscales->close_maxscale_connections(0); - - Test->tprintf("Checking if Maxscale alive\n"); - Test->check_maxscale_alive(0); - //Test->tprintf("Checking log for unwanted errors\n"); - //Test->check_log_err(0, (char *) "due to authentication failure", false); - //Test->check_log_err(0, (char *) "fatal signal 11", false); - //Test->check_log_err(0, (char *) "due to handshake failure", false); - //Test->check_log_err(0, (char *) "Refresh rate limit exceeded for load of users' table", false); - - int rval = Test->global_result; - delete Test; - return rval; -} - -void *query_thread1( void *ptr ) -{ - openclose_thread_data * data = (openclose_thread_data *) ptr; - char sql[1000000]; - sleep(data->thread_id); - create_insert_string(sql, 1000, 2); - - data->conn1 = data->Test->maxscales->open_rwsplit_connection(0); - if ((data->conn1 == NULL) || (mysql_errno(data->conn1) != 0 )) - { - data->Test->add_result(1, "Error connecting to RWSplit\n"); - return NULL; - } - - data->Test->try_query(data->conn1, (char *) "SET SESSION SQL_LOG_BIN=0;"); - - if (data->rwsplit_only == 0) - { - data->conn2 = data->Test->maxscales->open_readconn_master_connection(0); - if ((data->conn2 == NULL) || (mysql_errno(data->conn2) != 0 )) - { - data->Test->add_result(1, "Error connecting to ReadConn Master\n"); - return NULL; - } - data->Test->try_query(data->conn2, (char *) "SET SESSION SQL_LOG_BIN=0;"); - } - - while (data->exit_flag == 0) - { - if (data->Test->try_query(data->conn1, sql)) - { - data->Test->add_result(1, "Query to ReadConn Master failed\n"); - return NULL; - } - if (data->rwsplit_only == 0) - { - if (data->Test->try_query(data->conn2, sql)) - { - data->Test->add_result(1, "Query to RWSplit failed\n"); - return NULL; - } - } - data->i++; - } - if (data->conn1 != NULL) - { - mysql_close(data->conn1); - } - if (data->rwsplit_only == 0) - { - if (data->conn2 != NULL) - { - mysql_close(data->conn2); - } - } - return NULL; + return test.global_result; } diff --git a/maxscale-system-test/testconnections.cpp b/maxscale-system-test/testconnections.cpp index 194a804da..e141d6f63 100644 --- a/maxscale-system-test/testconnections.cpp +++ b/maxscale-system-test/testconnections.cpp @@ -1688,6 +1688,8 @@ void *log_copy_thread( void *ptr ) Test->tprintf("\n **** Copying all logs *** \n"); Test->copy_all_logs_periodic(); } + + return NULL; } int TestConnections::insert_select(int m, int N) From 896c7deb03a56ce3683b2de8757749545675a895 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Mon, 23 Jul 2018 09:23:17 +0300 Subject: [PATCH 20/28] Use gwbuf_make_contiguous only with non-NULL buffers A NULL buffer should never be made contiguous as that points to a flaw in program logic. --- server/modules/protocol/MySQL/mariadbclient/mysql_client.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/server/modules/protocol/MySQL/mariadbclient/mysql_client.cc b/server/modules/protocol/MySQL/mariadbclient/mysql_client.cc index 915b37d71..692f80bbb 100644 --- a/server/modules/protocol/MySQL/mariadbclient/mysql_client.cc +++ b/server/modules/protocol/MySQL/mariadbclient/mysql_client.cc @@ -1600,11 +1600,11 @@ static int route_by_statement(MXS_SESSION* session, uint64_t capabilities, GWBUF // Process client request one packet at a time packetbuf = modutil_get_next_MySQL_packet(p_readbuf); - // TODO: Do this only when RCAP_TYPE_CONTIGUOUS_INPUT is requested - packetbuf = gwbuf_make_contiguous(packetbuf); - if (packetbuf != NULL) { + // TODO: Do this only when RCAP_TYPE_CONTIGUOUS_INPUT is requested + packetbuf = gwbuf_make_contiguous(packetbuf); + CHK_GWBUF(packetbuf); MySQLProtocol* proto = (MySQLProtocol*)session->client_dcb->protocol; From ea5c5f3a0714e1b4d4477ff25bfb2073dc2d6e78 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Mon, 23 Jul 2018 10:19:11 +0300 Subject: [PATCH 21/28] Never return NULL from gwbuf_make_contiguous By aborting the process if memory runs out when a buffer needs to be made contiguous, we rule out other, more subtle, errors. Failing as soon as a possible when memory allocation fails gives better error messages. --- include/maxscale/buffer.h | 11 ++++------- server/core/buffer.cc | 32 ++++++++++++++------------------ 2 files changed, 18 insertions(+), 25 deletions(-) diff --git a/include/maxscale/buffer.h b/include/maxscale/buffer.h index b5d75dc67..02a208e19 100644 --- a/include/maxscale/buffer.h +++ b/include/maxscale/buffer.h @@ -368,16 +368,13 @@ extern char *gwbuf_get_property(GWBUF *buf, char *name); /** * Convert a chain of GWBUF structures into a single GWBUF structure * - * @param orig The chain to convert + * @param orig The chain to convert, must not be used after the function call * - * @return NULL if @c buf is NULL or if a memory allocation fails, - * @c buf if @c buf already is contiguous, and otherwise - * a contigious copy of @c buf. + * @return A contiguous version of @c buf. * - * @attention If a non-NULL value is returned, the @c buf should no - * longer be used as it may have been freed. + * @attention Never returns NULL, memory allocation failures abort the process */ -extern GWBUF *gwbuf_make_contiguous(GWBUF *buf); +extern GWBUF* gwbuf_make_contiguous(GWBUF *buf); /** * Add a buffer object to GWBUF buffer. diff --git a/server/core/buffer.cc b/server/core/buffer.cc index f870c838d..a938c1f67 100644 --- a/server/core/buffer.cc +++ b/server/core/buffer.cc @@ -786,13 +786,8 @@ gwbuf_get_property(GWBUF *buf, char *name) return NULL; } -GWBUF * -gwbuf_make_contiguous(GWBUF *orig) +GWBUF* gwbuf_make_contiguous(GWBUF *orig) { - GWBUF *newbuf; - uint8_t *ptr; - int len; - if (orig == NULL) { ss_info_dassert(!true, "gwbuf_make_contiguous: NULL buffer"); @@ -803,20 +798,21 @@ gwbuf_make_contiguous(GWBUF *orig) return orig; } - if ((newbuf = gwbuf_alloc(gwbuf_length(orig))) != NULL) - { - newbuf->gwbuf_type = orig->gwbuf_type; - newbuf->hint = hint_dup(orig->hint); - ptr = GWBUF_DATA(newbuf); + GWBUF* newbuf = gwbuf_alloc(gwbuf_length(orig)); + MXS_ABORT_IF_NULL(newbuf); - while (orig) - { - len = GWBUF_LENGTH(orig); - memcpy(ptr, GWBUF_DATA(orig), len); - ptr += len; - orig = gwbuf_consume(orig, len); - } + newbuf->gwbuf_type = orig->gwbuf_type; + newbuf->hint = hint_dup(orig->hint); + uint8_t* ptr = GWBUF_DATA(newbuf); + + while (orig) + { + int len = GWBUF_LENGTH(orig); + memcpy(ptr, GWBUF_DATA(orig), len); + ptr += len; + orig = gwbuf_consume(orig, len); } + return newbuf; } From 4b7cd7a281cf8669628e428633fa4f7eabe948f5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Mon, 23 Jul 2018 13:00:02 +0300 Subject: [PATCH 22/28] Dump queue contents on unexpectedly NULL buffer When the query queue does not contain a complete packet (i.e. modutil_get_next_MySQL_packet return NULL), an informative dump of how many bytes and what is stored is logged. --- include/maxscale/buffer.h | 7 ++++--- server/core/buffer.cc | 4 ++-- server/modules/routing/readwritesplit/readwritesplit.cc | 8 ++++++++ 3 files changed, 14 insertions(+), 5 deletions(-) diff --git a/include/maxscale/buffer.h b/include/maxscale/buffer.h index 02a208e19..00187b65b 100644 --- a/include/maxscale/buffer.h +++ b/include/maxscale/buffer.h @@ -403,10 +403,11 @@ extern void dprintAllBuffers(void *pdcb); #endif /** - * Debug function for dumping buffer contents to INFO log + * Debug function for dumping buffer contents to log * - * @param buffer Buffer to dump + * @param buffer Buffer to dump + * @param log_level Log priority where the message is written */ -void gwbuf_hexdump(GWBUF* buffer); +void gwbuf_hexdump(GWBUF* buffer, int log_level); MXS_END_DECLS diff --git a/server/core/buffer.cc b/server/core/buffer.cc index a938c1f67..fe52e369a 100644 --- a/server/core/buffer.cc +++ b/server/core/buffer.cc @@ -892,7 +892,7 @@ static std::string dump_one_buffer(GWBUF* buffer) return rval; } -void gwbuf_hexdump(GWBUF* buffer) +void gwbuf_hexdump(GWBUF* buffer, int log_level) { std::stringstream ss; @@ -910,5 +910,5 @@ void gwbuf_hexdump(GWBUF* buffer) n = 1024; } - MXS_INFO("%.*s", n, ss.str().c_str()); + MXS_LOG_MESSAGE(log_level, "%.*s", n, ss.str().c_str()); } diff --git a/server/modules/routing/readwritesplit/readwritesplit.cc b/server/modules/routing/readwritesplit/readwritesplit.cc index db3d86012..cb5de21bb 100644 --- a/server/modules/routing/readwritesplit/readwritesplit.cc +++ b/server/modules/routing/readwritesplit/readwritesplit.cc @@ -473,6 +473,14 @@ static bool route_stored_query(RWSplitSession *rses) query_queue = gwbuf_make_contiguous(query_queue); ss_dassert(query_queue); + if (query_queue == NULL) + { + MXS_ALERT("Queued query unexpectedly empty. Bytes queued: %d Hexdump: ", + gwbuf_length(rses->query_queue)); + gwbuf_hexdump(rses->query_queue, LOG_ALERT); + return true; + } + /** Store the query queue locally for the duration of the routeQuery call. * This prevents recursive calls into this function. */ GWBUF *temp_storage = rses->query_queue; From 10115601ca9484b1f11716e8d269710220380c55 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Wed, 25 Jul 2018 00:07:09 +0300 Subject: [PATCH 23/28] Add 2.2.12 release notes Added release notes for the 2.2.12 release. --- Documentation/Changelog.md | 1 + .../MaxScale-2.2.12-Release-Notes.md | 44 +++++++++++++++++++ 2 files changed, 45 insertions(+) create mode 100644 Documentation/Release-Notes/MaxScale-2.2.12-Release-Notes.md diff --git a/Documentation/Changelog.md b/Documentation/Changelog.md index cc7f6321d..dc17bdfef 100644 --- a/Documentation/Changelog.md +++ b/Documentation/Changelog.md @@ -28,6 +28,7 @@ the master. There is also limited capability for rejoining nodes. For more details, please refer to: +* [MariaDB MaxScale 2.2.12 Release Notes](Release-Notes/MaxScale-2.2.12-Release-Notes.md) * [MariaDB MaxScale 2.2.11 Release Notes](Release-Notes/MaxScale-2.2.11-Release-Notes.md) * [MariaDB MaxScale 2.2.10 Release Notes](Release-Notes/MaxScale-2.2.10-Release-Notes.md) * [MariaDB MaxScale 2.2.9 Release Notes](Release-Notes/MaxScale-2.2.9-Release-Notes.md) diff --git a/Documentation/Release-Notes/MaxScale-2.2.12-Release-Notes.md b/Documentation/Release-Notes/MaxScale-2.2.12-Release-Notes.md new file mode 100644 index 000000000..45cb7aa8c --- /dev/null +++ b/Documentation/Release-Notes/MaxScale-2.2.12-Release-Notes.md @@ -0,0 +1,44 @@ +# MariaDB MaxScale 2.2.12 Release Notes + +Release 2.2.12 is a GA release. + +This document describes the changes in release 2.2.12, when compared to +release 2.2.11. + +For any problems you encounter, please consider submitting a bug +report on [our Jira](https://jira.mariadb.org/projects/MXS). + +## New Features + +### Configuration Exporting + +The runtime configuration can now be dumped into a file with the +`--export-config` command line option. This allows changes done at runtime to be +collected into a single file for easier exporting. + +## Bug fixes + +* [MXS-1985](https://jira.mariadb.org/browse/MXS-1985) Concurrent KILL commands cause deadlock +* [MXS-1977](https://jira.mariadb.org/browse/MXS-1977) Maxscale 2.2.6 memory leak +* [MXS-1949](https://jira.mariadb.org/browse/MXS-1949) Warning for user load failure logged even when service has no users +* [MXS-1942](https://jira.mariadb.org/browse/MXS-1942) maxctrl --version is not helpful + +## Known Issues and Limitations + +There are some limitations and known issues within this version of MaxScale. +For more information, please refer to the [Limitations](../About/Limitations.md) document. + +## Packaging + +RPM and Debian packages are provided for supported the Linux distributions. + +Packages can be downloaded [here](https://mariadb.com/downloads/mariadb-tx/maxscale). + +## Source Code + +The source code of MaxScale is tagged at GitHub with a tag, which is identical +with the version of MaxScale. For instance, the tag of version X.Y.Z of MaxScale +is `maxscale-X.Y.Z`. Further, the default branch is always the latest GA version +of MaxScale. + +The source code is available [here](https://github.com/mariadb-corporation/MaxScale). From e2a913013af34e5001b766f26b2013b4af22ab90 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Wed, 25 Jul 2018 00:11:57 +0300 Subject: [PATCH 24/28] Update release procedure documentation The documentation contents and upgrading documents no longer has a link to the release notes. --- maxscale-system-test/Documentation/RELEASE.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/maxscale-system-test/Documentation/RELEASE.md b/maxscale-system-test/Documentation/RELEASE.md index 53cd40ab9..bca8fbbfe 100644 --- a/maxscale-system-test/Documentation/RELEASE.md +++ b/maxscale-system-test/Documentation/RELEASE.md @@ -3,9 +3,7 @@ ## Pre-release Checklist * Create new release notes and add all fixed bugs, use a previous one as a template -* Update the link to the latest release notes in Documentation-Contents.md * Add link to release notes and document major changes in Changelog.md -* Add link to release notes in the Upgrading guide ## 1. Tag From 3243f741a0f6f17806b646af5ebc9ada41460096 Mon Sep 17 00:00:00 2001 From: Esa Korhonen Date: Tue, 24 Jul 2018 19:05:41 +0300 Subject: [PATCH 25/28] MXS-1961 Standalone master loses master status when an alternative master emerges Fixes the bug by requiring that only running slaves are considered when choosing a master. --- server/modules/monitor/mariadbmon/mariadbmon.cc | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/server/modules/monitor/mariadbmon/mariadbmon.cc b/server/modules/monitor/mariadbmon/mariadbmon.cc index 8d9fe779e..79d2e292f 100644 --- a/server/modules/monitor/mariadbmon/mariadbmon.cc +++ b/server/modules/monitor/mariadbmon/mariadbmon.cc @@ -80,12 +80,6 @@ enum mysql_server_version MYSQL_SERVER_VERSION_51 }; -enum slave_down_setting_t -{ - ACCEPT_DOWN, - REJECT_DOWN -}; - enum print_repl_warnings_t { WARNINGS_ON, @@ -99,7 +93,7 @@ static bool stop_monitor(MXS_MONITOR *); static void diagnostics(DCB *, const MXS_MONITOR *); static json_t* diagnostics_json(const MXS_MONITOR *); static MXS_MONITORED_SERVER *getServerByNodeId(MXS_MONITORED_SERVER *, long); -static MXS_MONITORED_SERVER *getSlaveOfNodeId(MXS_MONITORED_SERVER *, long, slave_down_setting_t); +static MXS_MONITORED_SERVER *getSlaveOfNodeId(MXS_MONITORED_SERVER *, long); static MXS_MONITORED_SERVER *get_replication_tree(MXS_MONITOR *, int); static void set_master_heartbeat(MYSQL_MONITOR *, MXS_MONITORED_SERVER *); static void set_slave_heartbeat(MXS_MONITOR *, MXS_MONITORED_SERVER *); @@ -2385,7 +2379,7 @@ monitorMain(void *arg) ss_dassert(serv_info); if (ptr->server->node_id > 0 && ptr->server->master_id > 0 && - getSlaveOfNodeId(mon->monitored_servers, ptr->server->node_id, REJECT_DOWN) && + getSlaveOfNodeId(mon->monitored_servers, ptr->server->node_id) && getServerByNodeId(mon->monitored_servers, ptr->server->master_id) && (!handle->multimaster || serv_info->group == 0)) { @@ -2696,13 +2690,13 @@ getServerByNodeId(MXS_MONITORED_SERVER *ptr, long node_id) * @return The slave server of this node_id */ static MXS_MONITORED_SERVER * -getSlaveOfNodeId(MXS_MONITORED_SERVER *ptr, long node_id, slave_down_setting_t slave_down_setting) +getSlaveOfNodeId(MXS_MONITORED_SERVER *ptr, long node_id) { SERVER *current; while (ptr) { current = ptr->server; - if (current->master_id == node_id && (slave_down_setting == ACCEPT_DOWN || !SERVER_IS_DOWN(current))) + if (!SERVER_IS_DOWN(current) && current->master_id == node_id) { return ptr; } @@ -3009,7 +3003,7 @@ static MXS_MONITORED_SERVER *get_replication_tree(MXS_MONITOR *mon, int num_serv getServerByNodeId(mon->monitored_servers, node_id) == NULL) { MXS_MONITORED_SERVER *find_slave; - find_slave = getSlaveOfNodeId(mon->monitored_servers, current->node_id, ACCEPT_DOWN); + find_slave = getSlaveOfNodeId(mon->monitored_servers, current->node_id); if (find_slave == NULL) { From e64e4bc34f4cd7cb2f8dd2b9f528af76d58e76ff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Wed, 25 Jul 2018 22:40:09 +0300 Subject: [PATCH 26/28] Use the word REST API in documentation The HTTP admin interface was ambiguous. --- Documentation/Getting-Started/Configuration-Guide.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Documentation/Getting-Started/Configuration-Guide.md b/Documentation/Getting-Started/Configuration-Guide.md index 8c69a31c9..8e0abb874 100644 --- a/Documentation/Getting-Started/Configuration-Guide.md +++ b/Documentation/Getting-Started/Configuration-Guide.md @@ -43,6 +43,7 @@ listener | A listener is the network endpoint that is used to listen connection failover | When a connection currently being used between MariaDB MaxScale and the database server fails a replacement will be automatically created to another server by MariaDB MaxScale without client intervention backend database | A term used to refer to a database that sits behind MariaDB MaxScale and is accessed by applications via MariaDB MaxScale. filter | A module that can be placed between the client and the MariaDB MaxScale router module. All client data passes through the filter module and may be examined or modified by the filter modules. Filters may be chained together to form processing pipelines. +REST API | HTTP administrative interface ## Configuration @@ -726,17 +727,16 @@ configuration file. #### `admin_host` -The network interface where the HTTP admin interface listens on. The default -value is the IPv4 address `127.0.0.1` which only listens for local connections. +The network interface where the REST API listens on. The default value is the +IPv4 address `127.0.0.1` which only listens for local connections. #### `admin_port` -The port where the HTTP admin interface listens on. The default value is port -8989. +The port where the REST API listens on. The default value is port 8989. #### `admin_auth` -Enable HTTP admin interface authentication using HTTP Basic Access +Enable REST API authentication using HTTP Basic Access authentication. This is not a secure method of authentication without HTTPS but it does add a small layer of security. This option is enabled by default. From 4dc1638f78e4777d824dd62f7e6cdec56cf4de8a Mon Sep 17 00:00:00 2001 From: Marko Date: Tue, 5 Jun 2018 11:30:55 +0300 Subject: [PATCH 27/28] Silence the -Wunused-result warning -Wunused-result warning in test_logthrottling.cc was causing error when trying to build MaxScale from source. This warning can be silenced with by putting the function triggering the warning in if-clause. --- server/core/test/test_logthrottling.cc | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/server/core/test/test_logthrottling.cc b/server/core/test/test_logthrottling.cc index eda19665b..09f477ab6 100644 --- a/server/core/test/test_logthrottling.cc +++ b/server/core/test/test_logthrottling.cc @@ -216,7 +216,7 @@ int main(int argc, char* argv[]) // window, we should get no messages. if (!run(t, LOG_WARNING, 100, 0)) { - rc = EXIT_FAILURE; + rc = EXIT_FAILURE;<< strerror(errno) << } cout << "Sleeping 6 seconds." << endl; @@ -274,7 +274,10 @@ int main(int argc, char* argv[]) // A crude method to remove all files but it works string cmd = "rm -r "; cmd += logdir; - system(cmd.c_str()); + if (system(cmd.c_str()) == -1) + { + cerr << "Could not remove all files due to " << strerror(errno) << endl; + } return rc; } From 571d52f5576c4005d5b4cec20d641ebc4ed0a5d2 Mon Sep 17 00:00:00 2001 From: Marko Date: Tue, 5 Jun 2018 15:49:22 +0300 Subject: [PATCH 28/28] Typo fix in logthrottling_test --- server/core/test/test_logthrottling.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server/core/test/test_logthrottling.cc b/server/core/test/test_logthrottling.cc index 09f477ab6..13fdc7d31 100644 --- a/server/core/test/test_logthrottling.cc +++ b/server/core/test/test_logthrottling.cc @@ -216,7 +216,7 @@ int main(int argc, char* argv[]) // window, we should get no messages. if (!run(t, LOG_WARNING, 100, 0)) { - rc = EXIT_FAILURE;<< strerror(errno) << + rc = EXIT_FAILURE; } cout << "Sleeping 6 seconds." << endl; @@ -276,7 +276,7 @@ int main(int argc, char* argv[]) cmd += logdir; if (system(cmd.c_str()) == -1) { - cerr << "Could not remove all files due to " << strerror(errno) << endl; + cerr << "Could not remove all files"; } return rc;