From b08d4e37b5b555299bd98de60fd099c20ec87569 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Wed, 3 Apr 2019 12:40:43 +0300 Subject: [PATCH 01/22] MXS-2416: Pass deleter to std::shared_ptr As shared_ptr doesn't automatically use std::default_delete, it needs to be explicitly passed to the constructor. --- server/core/session.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/core/session.cc b/server/core/session.cc index 618efe3ea..d61f16cd3 100644 --- a/server/core/session.cc +++ b/server/core/session.cc @@ -1490,7 +1490,7 @@ void Session::retain_statement(GWBUF* pBuffer) { mxb_assert(m_last_queries.size() <= m_retain_last_statements); - std::shared_ptr sBuffer(gwbuf_clone(pBuffer)); + std::shared_ptr sBuffer(gwbuf_clone(pBuffer), std::default_delete()); m_last_queries.push_front(QueryInfo(sBuffer)); From 556c83f83aec5a57a4303e8463e9333b1db9cd7f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Thu, 4 Apr 2019 08:59:28 +0300 Subject: [PATCH 02/22] MXS-2417: Add test case --- maxscale-system-test/CMakeLists.txt | 3 +++ ....cnf.template.mxs2417_ignore_persisted_cnf | 3 +++ .../mxs2417_ignore_persisted_cnf.cpp | 24 +++++++++++++++++++ 3 files changed, 30 insertions(+) create mode 100644 maxscale-system-test/cnf/maxscale.cnf.template.mxs2417_ignore_persisted_cnf create mode 100644 maxscale-system-test/mxs2417_ignore_persisted_cnf.cpp diff --git a/maxscale-system-test/CMakeLists.txt b/maxscale-system-test/CMakeLists.txt index b8f7eabd3..e03567ef2 100644 --- a/maxscale-system-test/CMakeLists.txt +++ b/maxscale-system-test/CMakeLists.txt @@ -939,6 +939,9 @@ add_test_executable(mxs2300_history_pruning.cpp mxs2300_history_pruning mxs2300_ # MXS-2326: Routing hints aren't cloned in gwbuf_clone add_test_executable(mxs2326_hint_clone.cpp mxs2326_hint_clone mxs2326_hint_clone LABELS readwritesplit REPL_BACKEND) +# MXS-2417: Ignore persisted configs with load_persisted_configs=false +add_test_executable(mxs2417_ignore_persisted_cnf.cpp mxs2417_ignore_persisted_cnf mxs2417_ignore_persisted_cnf LABELS REPL_BACKEND) + ############################################ # BEGIN: binlogrouter and avrorouter tests # ############################################ diff --git a/maxscale-system-test/cnf/maxscale.cnf.template.mxs2417_ignore_persisted_cnf b/maxscale-system-test/cnf/maxscale.cnf.template.mxs2417_ignore_persisted_cnf new file mode 100644 index 000000000..2e90be3d1 --- /dev/null +++ b/maxscale-system-test/cnf/maxscale.cnf.template.mxs2417_ignore_persisted_cnf @@ -0,0 +1,3 @@ +[maxscale] +threads=###threads### +load_persisted_configs=false diff --git a/maxscale-system-test/mxs2417_ignore_persisted_cnf.cpp b/maxscale-system-test/mxs2417_ignore_persisted_cnf.cpp new file mode 100644 index 000000000..75049f0d5 --- /dev/null +++ b/maxscale-system-test/mxs2417_ignore_persisted_cnf.cpp @@ -0,0 +1,24 @@ +/** + * MXS-2417: Ignore persisted configs with load_persisted_configs=false + * https://jira.mariadb.org/browse/MXS-2417 + */ + +#include "testconnections.h" + +int main(int argc, char* argv[]) +{ + TestConnections test(argc, argv); + + test.tprintf("Creating a server and verifying it exists"); + test.check_maxctrl("create server server1234 127.0.0.1 3306"); + test.check_maxctrl("show server server1234"); + + test.tprintf("Restarting MaxScale"); + test.maxscales->restart_maxscale(); + + test.tprintf("Creating the server again and verifying it is successful"); + test.check_maxctrl("create server server1234 127.0.0.1 3306"); + test.check_maxctrl("show server server1234"); + + return test.global_result; +} From 6421af1bb4aba209028ab8a6f1ec611c6479523a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Thu, 4 Apr 2019 16:55:32 +0300 Subject: [PATCH 03/22] Backport query queue changes to 2.3 Backported the changes that convert the query queue in readwritesplit into a proper queue. This changes combines both 5e3198f8313b7bb33df386eb35986bfae1db94a3 and 6042a53cb31046b1100743723567906c5d8208e2 into one commit. --- .../readwritesplit/rwsplit_route_stmt.cc | 2 +- .../routing/readwritesplit/rwsplitsession.cc | 59 ++++++++----------- .../routing/readwritesplit/rwsplitsession.hh | 14 ++--- 3 files changed, 31 insertions(+), 44 deletions(-) diff --git a/server/modules/routing/readwritesplit/rwsplit_route_stmt.cc b/server/modules/routing/readwritesplit/rwsplit_route_stmt.cc index a9418d9f3..91edc010c 100644 --- a/server/modules/routing/readwritesplit/rwsplit_route_stmt.cc +++ b/server/modules/routing/readwritesplit/rwsplit_route_stmt.cc @@ -320,7 +320,7 @@ bool RWSplitSession::route_single_stmt(GWBUF* querybuf) else if (target->has_session_commands()) { // We need to wait until the session commands are executed - m_query_queue = gwbuf_append(m_query_queue, gwbuf_clone(querybuf)); + m_query_queue.emplace_back(gwbuf_clone(querybuf)); MXS_INFO("Queuing query until '%s' completes session command", target->name()); } else diff --git a/server/modules/routing/readwritesplit/rwsplitsession.cc b/server/modules/routing/readwritesplit/rwsplitsession.cc index 02a9ff990..0fa0b9536 100644 --- a/server/modules/routing/readwritesplit/rwsplitsession.cc +++ b/server/modules/routing/readwritesplit/rwsplitsession.cc @@ -34,7 +34,6 @@ RWSplitSession::RWSplitSession(RWSplit* instance, , m_client(session->client_dcb) , m_sescmd_count(1) , m_expected_responses(0) - , m_query_queue(NULL) , m_router(instance) , m_sent_sescmd(0) , m_recv_sescmd(0) @@ -108,7 +107,6 @@ void close_all_connections(SRWBackendList& backends) void RWSplitSession::close() { - gwbuf_free(m_query_queue); close_all_connections(m_backends); m_current_query.reset(); @@ -138,11 +136,11 @@ int32_t RWSplitSession::routeQuery(GWBUF* querybuf) { MXS_INFO("New query received while transaction replay is active: %s", mxs::extract_sql(querybuf).c_str()); - m_query_queue = gwbuf_append(m_query_queue, querybuf); + m_query_queue.emplace_back(querybuf); return 1; } - if ((m_query_queue == NULL || GWBUF_IS_REPLAYED(querybuf)) + if ((m_query_queue.empty() || GWBUF_IS_REPLAYED(querybuf)) && (m_expected_responses == 0 || m_qc.load_data_state() == QueryClassifier::LOAD_DATA_ACTIVE || m_qc.large_query())) @@ -181,12 +179,12 @@ int32_t RWSplitSession::routeQuery(GWBUF* querybuf) * We are already processing a request from the client. Store the * new query and wait for the previous one to complete. */ - mxb_assert(m_expected_responses > 0 || m_query_queue); + mxb_assert(m_expected_responses > 0 || !m_query_queue.empty()); MXS_INFO("Storing query (len: %d cmd: %0x), expecting %d replies to current command", gwbuf_length(querybuf), GWBUF_DATA(querybuf)[4], m_expected_responses); - m_query_queue = gwbuf_append(m_query_queue, querybuf); + m_query_queue.emplace_back(querybuf); querybuf = NULL; rval = 1; @@ -221,38 +219,23 @@ bool RWSplitSession::route_stored_query() /** Loop over the stored statements as long as the routeQuery call doesn't * append more data to the queue. If it appends data to the queue, we need * to wait for a response before attempting another reroute */ - while (m_query_queue) + while (!m_query_queue.empty()) { MXS_INFO(">>> Routing stored queries"); - GWBUF* query_queue = modutil_get_next_MySQL_packet(&m_query_queue); - query_queue = gwbuf_make_contiguous(query_queue); - mxb_assert(query_queue); - - if (query_queue == NULL) - { - MXS_ALERT("Queued query unexpectedly empty. Bytes queued: %d Hexdump: ", - gwbuf_length(m_query_queue)); - gwbuf_hexdump(m_query_queue, LOG_ALERT); - return true; - } + auto query = std::move(m_query_queue.front()); + m_query_queue.pop_front(); /** Store the query queue locally for the duration of the routeQuery call. * This prevents recursive calls into this function. */ - GWBUF* temp_storage = m_query_queue; - m_query_queue = NULL; + decltype(m_query_queue) temp_storage; + temp_storage.swap(m_query_queue); // TODO: Move the handling of queued queries to the client protocol // TODO: module where the command tracking is done automatically. - uint8_t cmd = mxs_mysql_get_command(query_queue); + uint8_t cmd = mxs_mysql_get_command(query.get()); mysql_protocol_set_current_command(m_client, (mxs_mysql_cmd_t)cmd); - if (cmd == MXS_COM_QUERY || cmd == MXS_COM_STMT_PREPARE) - { - // The query needs to be explicitly parsed as it was processed multiple times - qc_parse(query_queue, QC_COLLECT_ALL); - } - - if (!routeQuery(query_queue)) + if (!routeQuery(query.release())) { rval = false; MXS_ERROR("Failed to route queued query."); @@ -260,17 +243,21 @@ bool RWSplitSession::route_stored_query() MXS_INFO("<<< Stored queries routed"); - if (m_query_queue == NULL) + if (m_query_queue.empty()) { /** Query successfully routed and no responses are expected */ - m_query_queue = temp_storage; + m_query_queue.swap(temp_storage); } else { - /** Routing was stopped, we need to wait for a response before retrying. - * temp_storage holds the tail end of the queue and query_queue contains the query we attempted - * to route. Append temp_storage to query_queue to keep the order of the queries correct. */ - m_query_queue = gwbuf_append(m_query_queue, temp_storage); + /** + * Routing was stopped, we need to wait for a response before retrying. + * temp_storage holds the tail end of the queue and m_query_queue contains the query we attempted + * to route. + */ + mxb_assert(m_query_queue.size() == 1); + temp_storage.push_front(std::move(m_query_queue.front())); + m_query_queue = std::move(temp_storage); break; } } @@ -468,7 +455,7 @@ void RWSplitSession::trx_replay_next_stmt() MXS_INFO("Resuming execution: %s", mxs::extract_sql(m_interrupted_query.get()).c_str()); retry_query(m_interrupted_query.release(), 0); } - else if (m_query_queue) + else if (!m_query_queue.empty()) { route_stored_query(); } @@ -754,7 +741,7 @@ void RWSplitSession::clientReply(GWBUF* writebuf, DCB* backend_dcb) m_expected_responses++; } } - else if (m_expected_responses == 0 && m_query_queue + else if (m_expected_responses == 0 && !m_query_queue.empty() && (!m_is_replay_active || processed_sescmd)) { /** diff --git a/server/modules/routing/readwritesplit/rwsplitsession.hh b/server/modules/routing/readwritesplit/rwsplitsession.hh index 8472b9deb..85ef72782 100644 --- a/server/modules/routing/readwritesplit/rwsplitsession.hh +++ b/server/modules/routing/readwritesplit/rwsplitsession.hh @@ -16,6 +16,7 @@ #include "trx.hh" #include +#include #include #include @@ -140,18 +141,17 @@ public: int m_nbackends; /**< Number of backend servers (obsolete) */ DCB* m_client; /**< The client DCB */ uint64_t m_sescmd_count; /**< Number of executed session commands (starts from 1) */ - int m_expected_responses; /**< Number of expected responses to the current - * query */ - GWBUF* m_query_queue; /**< Queued commands waiting to be executed */ + int m_expected_responses; /**< Number of expected responses to the current query */ + + std::deque m_query_queue; /**< Queued commands waiting to be executed */ RWSplit* m_router; /**< The router instance */ mxs::SessionCommandList m_sescmd_list; /**< List of executed session commands */ ResponseMap m_sescmd_responses; /**< Response to each session command */ SlaveResponseList m_slave_responses; /**< Slaves that replied before the master */ uint64_t m_sent_sescmd; /**< ID of the last sent session command*/ - uint64_t m_recv_sescmd; /**< ID of the most recently completed session - * command */ - ExecMap m_exec_map; /**< Map of COM_STMT_EXECUTE statement IDs to - * Backends */ + uint64_t m_recv_sescmd; /**< ID of the most recently completed session command */ + ExecMap m_exec_map; /**< Map of COM_STMT_EXECUTE statement IDs to Backends */ + std::string m_gtid_pos; /**< Gtid position for causal read */ wait_gtid_state m_wait_gtid; /**< State of MASTER_GTID_WAIT reply */ uint32_t m_next_seq; /**< Next packet's sequence number */ From ec890b33cd689f231899b6a63a71e31bd79ac07c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Thu, 4 Apr 2019 17:20:04 +0300 Subject: [PATCH 04/22] Prevent checksum mismatch on second trx replay If a transaction replay has to be executed twice due to a failure of the original candidate master, the query queue could contain replayed queries. The replayed queries would be placed into the queue if a new connection needs to be created before the transaction replay can start. --- server/modules/routing/readwritesplit/rwsplitsession.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/server/modules/routing/readwritesplit/rwsplitsession.cc b/server/modules/routing/readwritesplit/rwsplitsession.cc index 0fa0b9536..138144aaf 100644 --- a/server/modules/routing/readwritesplit/rwsplitsession.cc +++ b/server/modules/routing/readwritesplit/rwsplitsession.cc @@ -813,6 +813,11 @@ bool RWSplitSession::start_trx_replay() m_trx.close(); m_trx = m_orig_trx; m_current_query.copy_from(m_orig_stmt); + + // Erase all replayed queries from the query queue to prevent checksum mismatches + m_query_queue.erase(std::remove_if(m_query_queue.begin(), m_query_queue.end(), [](mxs::Buffer b) { + return GWBUF_IS_REPLAYED(b.get()); + }), m_query_queue.end()); } if (m_trx.have_stmts() || m_current_query.get()) From 9722c0887aa01c2889eee5d01effcc8ec49d7071 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Thu, 4 Apr 2019 15:33:34 +0300 Subject: [PATCH 05/22] Log connection ID when reading server handshake By logging the connection ID for each created connection, failures can be traced back from the backend server all the way up to the client application. --- server/modules/protocol/MySQL/mysql_common.cc | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/server/modules/protocol/MySQL/mysql_common.cc b/server/modules/protocol/MySQL/mysql_common.cc index c575304f4..648ce60bc 100644 --- a/server/modules/protocol/MySQL/mysql_common.cc +++ b/server/modules/protocol/MySQL/mysql_common.cc @@ -1080,6 +1080,14 @@ int gw_decode_mysql_server_handshake(MySQLProtocol* conn, uint8_t* payload) // get ThreadID: 4 bytes uint32_t tid = gw_mysql_get_byte4(payload); + + // LocalClient also uses this code and it doesn't populate the server pointer + // TODO: fix it + if (conn->owner_dcb && conn->owner_dcb->server) + { + MXS_INFO("Connected to '%s' with thread id %u", conn->owner_dcb->server->name, tid); + } + /* TODO: Correct value of thread id could be queried later from backend if * there is any worry it might be larger than 32bit allows. */ conn->thread_id = tid; From b54e67223f665f581a80660a17bfbf46e1cd84ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Fri, 5 Apr 2019 00:11:00 +0300 Subject: [PATCH 06/22] MXS-2423: Add missing parameters to maxscale endpoint Also updated the REST API documentation to include the newer output (automating this update would be valuable). --- Documentation/REST-API/Resources-MaxScale.md | 15 ++++++++---- include/maxscale/session.h | 10 ++++++++ server/core/config.cc | 4 ++++ server/core/session.cc | 24 ++++++++++++++++++++ 4 files changed, 48 insertions(+), 5 deletions(-) diff --git a/Documentation/REST-API/Resources-MaxScale.md b/Documentation/REST-API/Resources-MaxScale.md index 1c1b1fe66..c6e067632 100644 --- a/Documentation/REST-API/Resources-MaxScale.md +++ b/Documentation/REST-API/Resources-MaxScale.md @@ -52,12 +52,17 @@ file locations, configuration options and version information. "admin_ssl_key": "", "admin_ssl_cert": "", "admin_ssl_ca_cert": "", - "query_classifier": "" + "query_classifier": "", + "query_classifier_cache_size": 416215859, + "retain_last_statements": 2, + "dump_last_statements": "never", + "load_persisted_configs": false }, - "version": "2.2.0", - "commit": "aa1a413cd961d467083d1974c2a027f612201845", - "started_at": "Wed, 06 Sep 2017 06:51:54 GMT", - "uptime": 1227 + "version": "2.3.6", + "commit": "47158faf12c156775c39388652a77f8a8c542d28", + "started_at": "Thu, 04 Apr 2019 21:04:06 GMT", + "activated_at": "Thu, 04 Apr 2019 21:04:06 GMT", + "uptime": 337 }, "id": "maxscale", "type": "maxscale" diff --git a/include/maxscale/session.h b/include/maxscale/session.h index a60876384..154a730f3 100644 --- a/include/maxscale/session.h +++ b/include/maxscale/session.h @@ -620,6 +620,11 @@ char* session_set_variable_value(MXS_SESSION* session, */ void session_set_retain_last_statements(uint32_t n); +/** + * Get retain_last_statements + */ +uint32_t session_get_retain_last_statements(); + /** * @brief Retain provided statement, if configured to do so. * @@ -669,6 +674,11 @@ void session_set_dump_statements(session_dump_statements_t value); */ session_dump_statements_t session_get_dump_statements(); +/** + * String version of session_get_dump_statements + */ +const char* session_get_dump_statements_str(); + /** * @brief Route the query again after a delay * diff --git a/server/core/config.cc b/server/core/config.cc index f72a50133..8a986a385 100644 --- a/server/core/config.cc +++ b/server/core/config.cc @@ -4548,6 +4548,10 @@ json_t* config_maxscale_to_json(const char* host) CN_QUERY_CLASSIFIER_CACHE_SIZE, json_integer(cnf->qc_cache_properties.max_size)); + json_object_set_new(param, CN_RETAIN_LAST_STATEMENTS, json_integer(session_get_retain_last_statements())); + json_object_set_new(param, CN_DUMP_LAST_STATEMENTS, json_string(session_get_dump_statements_str())); + json_object_set_new(param, CN_LOAD_PERSISTED_CONFIGS, json_boolean(cnf->load_persisted_configs)); + json_t* attr = json_object(); time_t started = maxscale_started(); time_t activated = started + MXS_CLOCK_TO_SEC(cnf->promoted_at); diff --git a/server/core/session.cc b/server/core/session.cc index d61f16cd3..49120c47d 100644 --- a/server/core/session.cc +++ b/server/core/session.cc @@ -1055,6 +1055,11 @@ void session_set_retain_last_statements(uint32_t n) this_unit.retain_last_statements = n; } +uint32_t session_get_retain_last_statements() +{ + return this_unit.retain_last_statements; +} + void session_set_dump_statements(session_dump_statements_t value) { this_unit.dump_statements = value; @@ -1065,6 +1070,25 @@ session_dump_statements_t session_get_dump_statements() return this_unit.dump_statements; } +const char* session_get_dump_statements_str() +{ + switch (this_unit.dump_statements) + { + case SESSION_DUMP_STATEMENTS_NEVER: + return "never"; + + case SESSION_DUMP_STATEMENTS_ON_CLOSE: + return "on_close"; + + case SESSION_DUMP_STATEMENTS_ON_ERROR: + return "on_error"; + + default: + mxb_assert(!true); + return "unknown"; + } +} + void session_retain_statement(MXS_SESSION* pSession, GWBUF* pBuffer) { static_cast(pSession)->retain_statement(pBuffer); From aad29404c6eb02f210f86d9c22b80193e151eb04 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Fri, 5 Apr 2019 01:12:42 +0300 Subject: [PATCH 07/22] Fix parameter value error The argumets were given in the wrong order. --- server/core/config.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/core/config.cc b/server/core/config.cc index 8a986a385..af4ff6b79 100644 --- a/server/core/config.cc +++ b/server/core/config.cc @@ -3161,9 +3161,9 @@ static bool check_config_objects(CONFIG_CONTEXT* context) MXS_ERROR("Invalid value for parameter '%s' for object '%s' " "of type '%s': %s (was expecting %s)", params->name, - params->value, obj->object, type.c_str(), + params->value, param_type_to_str(fix_params, params->name)); rval = false; } From 05515cca1665544181b73e1dafc90a944df7af86 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Fri, 5 Apr 2019 11:04:04 +0300 Subject: [PATCH 08/22] MXS-2259: Limit size of client reads Given the assumption that queries are rarely 16MB long and that realistically the only time that happens is during a large dump of data, we can limit the size of a single read to at most one MariaDB/MySQL packet at a time. This change allows the network throttling to engage a lot sooner and reduces the maximum overshoot of throtting to 16MB. --- include/maxscale/dcb.h | 1 + server/core/dcb.cc | 4 ++-- .../protocol/MySQL/mariadbclient/mysql_client.cc | 12 +++++++++++- 3 files changed, 14 insertions(+), 3 deletions(-) diff --git a/include/maxscale/dcb.h b/include/maxscale/dcb.h index c6c7052fb..82533417b 100644 --- a/include/maxscale/dcb.h +++ b/include/maxscale/dcb.h @@ -265,6 +265,7 @@ DCB* dcb_accept(DCB* listener); DCB* dcb_alloc(dcb_role_t, struct servlistener*); DCB* dcb_connect(struct server*, struct session*, const char*); int dcb_read(DCB*, GWBUF**, int); +int dcb_bytes_readable(DCB* dcb); int dcb_drain_writeq(DCB*); void dcb_close(DCB*); diff --git a/server/core/dcb.cc b/server/core/dcb.cc index 4770e7b05..3b72d4748 100644 --- a/server/core/dcb.cc +++ b/server/core/dcb.cc @@ -100,7 +100,6 @@ static inline DCB* dcb_find_in_list(DCB* dcb); static void dcb_stop_polling_and_shutdown(DCB* dcb); static bool dcb_maybe_add_persistent(DCB*); static inline bool dcb_write_parameter_check(DCB* dcb, GWBUF* queue); -static int dcb_bytes_readable(DCB* dcb); static int dcb_read_no_bytes_available(DCB* dcb, int nreadtotal); static int dcb_create_SSL(DCB* dcb, SSL_LISTENER* ssl); static int dcb_read_SSL(DCB* dcb, GWBUF** head); @@ -638,9 +637,10 @@ int dcb_read(DCB* dcb, * Find the number of bytes available for the DCB's socket * * @param dcb The DCB to read from + * * @return -1 on error, otherwise the total number of bytes available */ -static int dcb_bytes_readable(DCB* dcb) +int dcb_bytes_readable(DCB* dcb) { int bytesavailable; diff --git a/server/modules/protocol/MySQL/mariadbclient/mysql_client.cc b/server/modules/protocol/MySQL/mariadbclient/mysql_client.cc index a4af1fc0f..68f87612f 100644 --- a/server/modules/protocol/MySQL/mariadbclient/mysql_client.cc +++ b/server/modules/protocol/MySQL/mariadbclient/mysql_client.cc @@ -489,7 +489,10 @@ int gw_read_client_event(DCB* dcb) { max_bytes = 36; } - return_code = dcb_read(dcb, &read_buffer, max_bytes); + + const uint32_t max_single_read = GW_MYSQL_MAX_PACKET_LEN + MYSQL_HEADER_LEN; + return_code = dcb_read(dcb, &read_buffer, max_bytes > 0 ? max_bytes : max_single_read); + if (return_code < 0) { dcb_close(dcb); @@ -499,6 +502,13 @@ int gw_read_client_event(DCB* dcb) return return_code; } + if (nbytes_read == max_single_read && dcb_bytes_readable(dcb) > 0) + { + // We read a maximally long packet, route it first. This is done in case there's a lot more data + // waiting and we have to start throttling the reads. + poll_fake_read_event(dcb); + } + return_code = 0; switch (protocol->protocol_auth_state) From 7fb840ac9e4b9b915642cc09a8174cc5328cd654 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Fri, 5 Apr 2019 13:58:20 +0300 Subject: [PATCH 09/22] Sort CN_ definitions --- include/maxscale/config.h | 28 ++++++++++++++-------------- server/core/config.cc | 28 ++++++++++++++-------------- 2 files changed, 28 insertions(+), 28 deletions(-) diff --git a/include/maxscale/config.h b/include/maxscale/config.h index 5a243281f..c609255c8 100644 --- a/include/maxscale/config.h +++ b/include/maxscale/config.h @@ -90,17 +90,17 @@ MXS_BEGIN_DECLS */ extern const char CN_ACCOUNT[]; extern const char CN_ADDRESS[]; -extern const char CN_ARG_MAX[]; -extern const char CN_ARG_MIN[]; extern const char CN_ADMIN_AUTH[]; extern const char CN_ADMIN_ENABLED[]; -extern const char CN_ADMIN_LOG_AUTH_FAILURES[]; extern const char CN_ADMIN_HOST[]; +extern const char CN_ADMIN_LOG_AUTH_FAILURES[]; extern const char CN_ADMIN_PORT[]; -extern const char CN_ADMIN_SSL_KEY[]; -extern const char CN_ADMIN_SSL_CERT[]; extern const char CN_ADMIN_SSL_CA_CERT[]; +extern const char CN_ADMIN_SSL_CERT[]; +extern const char CN_ADMIN_SSL_KEY[]; extern const char CN_ARGUMENTS[]; +extern const char CN_ARG_MAX[]; +extern const char CN_ARG_MIN[]; extern const char CN_ATTRIBUTES[]; extern const char CN_AUTHENTICATOR[]; extern const char CN_AUTHENTICATOR_DIAGNOSTICS[]; @@ -113,12 +113,13 @@ extern const char CN_AUTO[]; extern const char CN_CACHE_SIZE[]; extern const char CN_CLASSIFY[]; extern const char CN_CONNECTION_TIMEOUT[]; -extern const char CN_DUMP_LAST_STATEMENTS[]; extern const char CN_DATA[]; extern const char CN_DEFAULT[]; extern const char CN_DESCRIPTION[]; extern const char CN_DISK_SPACE_THRESHOLD[]; +extern const char CN_DUMP_LAST_STATEMENTS[]; extern const char CN_ENABLE_ROOT_USER[]; +extern const char CN_EXTRA_PORT[]; extern const char CN_FIELDS[]; extern const char CN_FILTERS[]; extern const char CN_FILTER[]; @@ -128,8 +129,10 @@ extern const char CN_GATEWAY[]; extern const char CN_HAS_WHERE_CLAUSE[]; extern const char CN_ID[]; extern const char CN_INET[]; -extern const char CN_LISTENER[]; +extern const char CN_LINKS[]; extern const char CN_LISTENERS[]; +extern const char CN_LISTENER[]; +extern const char CN_LOAD_PERSISTED_CONFIGS[]; extern const char CN_LOCALHOST_MATCH_WILDCARD_HOST[]; extern const char CN_LOG_AUTH_WARNINGS[]; extern const char CN_LOG_THROTTLING[]; @@ -138,8 +141,8 @@ extern const char CN_MAX_CONNECTIONS[]; extern const char CN_MAX_RETRY_INTERVAL[]; extern const char CN_META[]; extern const char CN_METHOD[]; -extern const char CN_MODULE[]; extern const char CN_MODULES[]; +extern const char CN_MODULE[]; extern const char CN_MODULE_COMMAND[]; extern const char CN_MONITORS[]; extern const char CN_MONITOR[]; @@ -155,7 +158,6 @@ extern const char CN_PASSIVE[]; extern const char CN_PASSWORD[]; extern const char CN_POLL_SLEEP[]; extern const char CN_PORT[]; -extern const char CN_EXTRA_PORT[]; extern const char CN_PROTOCOL[]; extern const char CN_QUERY_CLASSIFIER[]; extern const char CN_QUERY_CLASSIFIER_ARGS[]; @@ -163,8 +165,6 @@ extern const char CN_QUERY_CLASSIFIER_CACHE_SIZE[]; extern const char CN_QUERY_RETRIES[]; extern const char CN_QUERY_RETRY_TIMEOUT[]; extern const char CN_RELATIONSHIPS[]; -extern const char CN_LINKS[]; -extern const char CN_LOAD_PERSISTED_CONFIGS[]; extern const char CN_REQUIRED[]; extern const char CN_RETAIN_LAST_STATEMENTS[]; extern const char CN_RETRY_ON_FAILURE[]; @@ -180,14 +180,14 @@ extern const char CN_SESSIONS[]; extern const char CN_SESSION_TRACK_TRX_STATE[]; extern const char CN_SKIP_PERMISSION_CHECKS[]; extern const char CN_SOCKET[]; -extern const char CN_STATE[]; extern const char CN_SSL[]; extern const char CN_SSL_CA_CERT[]; extern const char CN_SSL_CERT[]; extern const char CN_SSL_CERT_VERIFY_DEPTH[]; -extern const char CN_SSL_VERIFY_PEER_CERTIFICATE[]; extern const char CN_SSL_KEY[]; +extern const char CN_SSL_VERIFY_PEER_CERTIFICATE[]; extern const char CN_SSL_VERSION[]; +extern const char CN_STATE[]; extern const char CN_STRIP_DB_ESC[]; extern const char CN_SUBSTITUTE_VARIABLES[]; extern const char CN_THREADS[]; @@ -196,8 +196,8 @@ extern const char CN_TICKS[]; extern const char CN_TYPE[]; extern const char CN_TYPE_MASK[]; extern const char CN_UNIX[]; -extern const char CN_USER[]; extern const char CN_USERS[]; +extern const char CN_USER[]; extern const char CN_VERSION_STRING[]; extern const char CN_WEIGHTBY[]; extern const char CN_WRITEQ_HIGH_WATER[]; diff --git a/server/core/config.cc b/server/core/config.cc index af4ff6b79..bbf906559 100644 --- a/server/core/config.cc +++ b/server/core/config.cc @@ -71,17 +71,17 @@ using std::string; const char CN_ACCOUNT[] = "account"; const char CN_ADDRESS[] = "address"; -const char CN_ARG_MAX[] = "arg_max"; -const char CN_ARG_MIN[] = "arg_min"; const char CN_ADMIN_AUTH[] = "admin_auth"; const char CN_ADMIN_ENABLED[] = "admin_enabled"; -const char CN_ADMIN_LOG_AUTH_FAILURES[] = "admin_log_auth_failures"; const char CN_ADMIN_HOST[] = "admin_host"; +const char CN_ADMIN_LOG_AUTH_FAILURES[] = "admin_log_auth_failures"; const char CN_ADMIN_PORT[] = "admin_port"; -const char CN_ADMIN_SSL_KEY[] = "admin_ssl_key"; -const char CN_ADMIN_SSL_CERT[] = "admin_ssl_cert"; const char CN_ADMIN_SSL_CA_CERT[] = "admin_ssl_ca_cert"; +const char CN_ADMIN_SSL_CERT[] = "admin_ssl_cert"; +const char CN_ADMIN_SSL_KEY[] = "admin_ssl_key"; const char CN_ARGUMENTS[] = "arguments"; +const char CN_ARG_MAX[] = "arg_max"; +const char CN_ARG_MIN[] = "arg_min"; const char CN_ATTRIBUTES[] = "attributes"; const char CN_AUTHENTICATOR[] = "authenticator"; const char CN_AUTHENTICATOR_DIAGNOSTICS[] = "authenticator_diagnostics"; @@ -100,6 +100,7 @@ const char CN_DESCRIPTION[] = "description"; const char CN_DISK_SPACE_THRESHOLD[] = "disk_space_threshold"; const char CN_DUMP_LAST_STATEMENTS[] = "dump_last_statements"; const char CN_ENABLE_ROOT_USER[] = "enable_root_user"; +const char CN_EXTRA_PORT[] = "extra_port"; const char CN_FIELDS[] = "fields"; const char CN_FILTERS[] = "filters"; const char CN_FILTER[] = "filter"; @@ -109,9 +110,12 @@ const char CN_GATEWAY[] = "gateway"; const char CN_HAS_WHERE_CLAUSE[] = "has_where_clause"; const char CN_ID[] = "id"; const char CN_INET[] = "inet"; -const char CN_LISTENER[] = "listener"; +const char CN_LINKS[] = "links"; const char CN_LISTENERS[] = "listeners"; +const char CN_LISTENER[] = "listener"; +const char CN_LOAD_PERSISTED_CONFIGS[] = "load_persisted_configs"; const char CN_LOCALHOST_MATCH_WILDCARD_HOST[] = "localhost_match_wildcard_host"; +const char CN_LOCAL_ADDRESS[] = "local_address"; const char CN_LOG_AUTH_WARNINGS[] = "log_auth_warnings"; const char CN_LOG_THROTTLING[] = "log_throttling"; const char CN_MAXSCALE[] = "maxscale"; @@ -119,8 +123,8 @@ const char CN_MAX_CONNECTIONS[] = "max_connections"; const char CN_MAX_RETRY_INTERVAL[] = "max_retry_interval"; const char CN_META[] = "meta"; const char CN_METHOD[] = "method"; -const char CN_MODULE[] = "module"; const char CN_MODULES[] = "modules"; +const char CN_MODULE[] = "module"; const char CN_MODULE_COMMAND[] = "module_command"; const char CN_MONITORS[] = "monitors"; const char CN_MONITOR[] = "monitor"; @@ -136,7 +140,6 @@ const char CN_PASSIVE[] = "passive"; const char CN_PASSWORD[] = "password"; const char CN_POLL_SLEEP[] = "poll_sleep"; const char CN_PORT[] = "port"; -const char CN_EXTRA_PORT[] = "extra_port"; const char CN_PROTOCOL[] = "protocol"; const char CN_QUERY_CLASSIFIER[] = "query_classifier"; const char CN_QUERY_CLASSIFIER_ARGS[] = "query_classifier_args"; @@ -144,9 +147,6 @@ const char CN_QUERY_CLASSIFIER_CACHE_SIZE[] = "query_classifier_cache_size"; const char CN_QUERY_RETRIES[] = "query_retries"; const char CN_QUERY_RETRY_TIMEOUT[] = "query_retry_timeout"; const char CN_RELATIONSHIPS[] = "relationships"; -const char CN_LINKS[] = "links"; -const char CN_LOAD_PERSISTED_CONFIGS[] = "load_persisted_configs"; -const char CN_LOCAL_ADDRESS[] = "local_address"; const char CN_REQUIRED[] = "required"; const char CN_RETAIN_LAST_STATEMENTS[] = "retain_last_statements"; const char CN_RETRY_ON_FAILURE[] = "retry_on_failure"; @@ -163,14 +163,14 @@ const char CN_SESSION_TRACK_TRX_STATE[] = "session_track_trx_state"; const char CN_SKIP_PERMISSION_CHECKS[] = "skip_permission_checks"; const char CN_SOCKET[] = "socket"; const char CN_SQL_MODE[] = "sql_mode"; -const char CN_STATE[] = "state"; const char CN_SSL[] = "ssl"; const char CN_SSL_CA_CERT[] = "ssl_ca_cert"; const char CN_SSL_CERT[] = "ssl_cert"; const char CN_SSL_CERT_VERIFY_DEPTH[] = "ssl_cert_verify_depth"; -const char CN_SSL_VERIFY_PEER_CERTIFICATE[] = "ssl_verify_peer_certificate"; const char CN_SSL_KEY[] = "ssl_key"; +const char CN_SSL_VERIFY_PEER_CERTIFICATE[] = "ssl_verify_peer_certificate"; const char CN_SSL_VERSION[] = "ssl_version"; +const char CN_STATE[] = "state"; const char CN_STRIP_DB_ESC[] = "strip_db_esc"; const char CN_SUBSTITUTE_VARIABLES[] = "substitute_variables"; const char CN_THREADS[] = "threads"; @@ -179,9 +179,9 @@ const char CN_TICKS[] = "ticks"; const char CN_TYPE[] = "type"; const char CN_TYPE_MASK[] = "type_mask"; const char CN_UNIX[] = "unix"; -const char CN_USER[] = "user"; const char CN_USERS[] = "users"; const char CN_USERS_REFRESH_TIME[] = "users_refresh_time"; +const char CN_USER[] = "user"; const char CN_VERSION_STRING[] = "version_string"; const char CN_WEIGHTBY[] = "weightby"; const char CN_WRITEQ_HIGH_WATER[] = "writeq_high_water"; From 0cb15976e8aafab3d65d327d7421bec34bd15b1e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Tue, 2 Apr 2019 09:32:44 +0300 Subject: [PATCH 10/22] Backport: Add force option to set endpoint The new `force=yes` option closes all connections to the server that is being put into maintenance mode. This will immediately close all open connections to the server without allowing results to return. --- include/maxscale/config.h | 2 ++ maxctrl/lib/set.js | 9 +++++++++ server/core/config.cc | 2 ++ server/core/resource.cc | 5 +++++ 4 files changed, 18 insertions(+) diff --git a/include/maxscale/config.h b/include/maxscale/config.h index c609255c8..3fee697ab 100644 --- a/include/maxscale/config.h +++ b/include/maxscale/config.h @@ -124,6 +124,7 @@ extern const char CN_FIELDS[]; extern const char CN_FILTERS[]; extern const char CN_FILTER[]; extern const char CN_FILTER_DIAGNOSTICS[]; +extern const char CN_FORCE[]; extern const char CN_FUNCTIONS[]; extern const char CN_GATEWAY[]; extern const char CN_HAS_WHERE_CLAUSE[]; @@ -202,6 +203,7 @@ extern const char CN_VERSION_STRING[]; extern const char CN_WEIGHTBY[]; extern const char CN_WRITEQ_HIGH_WATER[]; extern const char CN_WRITEQ_LOW_WATER[]; +extern const char CN_YES[]; /* * Global configuration items that are read (or pre_parsed) to be available for diff --git a/maxctrl/lib/set.js b/maxctrl/lib/set.js index af230b93f..72fc75836 100644 --- a/maxctrl/lib/set.js +++ b/maxctrl/lib/set.js @@ -17,6 +17,12 @@ exports.desc = 'Set object state' exports.handler = function() {} exports.builder = function(yargs) { yargs + .group(['force'], 'Set options:') + .option('force', { + describe: 'Forcefully close all connections to the target server', + type: 'boolean', + default: false + }) .command('server ', 'Set server state', function(yargs) { return yargs.epilog('If is monitored by a monitor, this command should ' + 'only be used to set the server into the `maintenance` state. ' + @@ -27,6 +33,9 @@ exports.builder = function(yargs) { .usage('Usage: set server ') }, function(argv) { var target = 'servers/' + argv.server + '/set?state=' + argv.state + if (argv.force) { + target += '&force=yes' + } maxctrl(argv, function(host) { return doRequest(host, target, null, {method: 'PUT'}) }) diff --git a/server/core/config.cc b/server/core/config.cc index bbf906559..0fc27870f 100644 --- a/server/core/config.cc +++ b/server/core/config.cc @@ -105,6 +105,7 @@ const char CN_FIELDS[] = "fields"; const char CN_FILTERS[] = "filters"; const char CN_FILTER[] = "filter"; const char CN_FILTER_DIAGNOSTICS[] = "filter_diagnostics"; +const char CN_FORCE[] = "force"; const char CN_FUNCTIONS[] = "functions"; const char CN_GATEWAY[] = "gateway"; const char CN_HAS_WHERE_CLAUSE[] = "has_where_clause"; @@ -186,6 +187,7 @@ const char CN_VERSION_STRING[] = "version_string"; const char CN_WEIGHTBY[] = "weightby"; const char CN_WRITEQ_HIGH_WATER[] = "writeq_high_water"; const char CN_WRITEQ_LOW_WATER[] = "writeq_low_water"; +const char CN_YES[] = "yes"; extern const char CN_LOGDIR[] = "logdir"; diff --git a/server/core/resource.cc b/server/core/resource.cc index 8e359a56c..c62215f17 100644 --- a/server/core/resource.cc +++ b/server/core/resource.cc @@ -774,6 +774,11 @@ HttpResponse cb_set_server(const HttpRequest& request) string errmsg; if (mxs::server_set_status(server, opt, &errmsg)) { + if (status_is_in_maint(opt) && request.get_option(CN_FORCE) == CN_YES) + { + dcb_hangup_foreach(server); + } + return HttpResponse(MHD_HTTP_NO_CONTENT); } else From a102efa01fbbb1fe28e9ab6c3081f6f79e9b2669 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Tue, 2 Apr 2019 09:41:55 +0300 Subject: [PATCH 11/22] Backport: Document the force option for set Added documentation for the new option and mentioned it in the release notes. --- Documentation/REST-API/Resources-Server.md | 12 +++++ .../MaxScale-2.3.6-Release-Notes.md | 48 +++++++++++++++++++ 2 files changed, 60 insertions(+) create mode 100644 Documentation/Release-Notes/MaxScale-2.3.6-Release-Notes.md diff --git a/Documentation/REST-API/Resources-Server.md b/Documentation/REST-API/Resources-Server.md index c9acaaff1..4ef3321ee 100644 --- a/Documentation/REST-API/Resources-Server.md +++ b/Documentation/REST-API/Resources-Server.md @@ -574,6 +574,18 @@ the following URL must be made: PUT /v1/servers/db-server-1/set?state=maintenance ``` +This endpoint also supports the `force=yes` parameter that will cause all +connections to the server to be closed if `state=maintenance` is also set. By +default setting a server into maintenance mode will cause connections to be +closed only after the next request is sent. + +The following example forcefully closes all connections to server _db-server-1_ +and sets it into maintenance mode: + +``` +PUT /v1/servers/db-server-1/set?state=maintenance&force=yes +``` + #### Response Server state modified: diff --git a/Documentation/Release-Notes/MaxScale-2.3.6-Release-Notes.md b/Documentation/Release-Notes/MaxScale-2.3.6-Release-Notes.md new file mode 100644 index 000000000..0ac6a919e --- /dev/null +++ b/Documentation/Release-Notes/MaxScale-2.3.6-Release-Notes.md @@ -0,0 +1,48 @@ +# MariaDB MaxScale 2.3.6 Release Notes + +Release 2.3.6 is a GA release. + +This document describes the changes in release 2.3.6, when compared to the +previous release in the same series. + +For any problems you encounter, please consider submitting a bug +report on [our Jira](https://jira.mariadb.org/projects/MXS). + +## New Features + +* [MXS-2417](https://jira.mariadb.org/browse/MXS-2417) MaxScale main config should take precedence over runtime config on restart + +### REST API & MaxCtrl: Hard maintenance mode + +The new `--force` option for the `set server` command in MaxCtrl allows all +connections to the server in question to be closed when it is set into +maintenance mode. This causes idle connections to be closed immediately. + +For more information, read the +[REST-API](../REST-API/Resources-Server.md#set-server-state) documentation for +the `set` endpoint. + +## Bug fixes + +* [MXS-2419](https://jira.mariadb.org/browse/MXS-2419) Hangs on query during multiple transaction replays +* [MXS-2418](https://jira.mariadb.org/browse/MXS-2418) Crash on transaction replay if log_info is on and session starts with no master + +## Known Issues and Limitations + +There are some limitations and known issues within this version of MaxScale. +For more information, please refer to the [Limitations](../About/Limitations.md) document. + +## Packaging + +RPM and Debian packages are provided for supported the Linux distributions. + +Packages can be downloaded [here](https://mariadb.com/downloads/mariadb-tx/maxscale). + +## Source Code + +The source code of MaxScale is tagged at GitHub with a tag, which is identical +with the version of MaxScale. For instance, the tag of version X.Y.Z of MaxScale +is `maxscale-X.Y.Z`. Further, the default branch is always the latest GA version +of MaxScale. + +The source code is available [here](https://github.com/mariadb-corporation/MaxScale). From 55bb3e9250efa38729466dd2b5a215a648f74ffa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Sun, 7 Apr 2019 16:21:45 +0300 Subject: [PATCH 12/22] Make tests less verbose The numeric values of the labels aren't of value when inspecting test results. For this reason, it makes sense to put them behind the verbose flag to make test framework debugging happen purpose. --- maxscale-system-test/labels_table.cpp | 13 +++++++++++-- maxscale-system-test/nodes.cpp | 5 +++++ maxscale-system-test/testconnections.cpp | 15 +++++++++++---- maxscale-system-test/testconnections.h | 2 +- 4 files changed, 28 insertions(+), 7 deletions(-) diff --git a/maxscale-system-test/labels_table.cpp b/maxscale-system-test/labels_table.cpp index 79910be7f..c4a6e3a6e 100644 --- a/maxscale-system-test/labels_table.cpp +++ b/maxscale-system-test/labels_table.cpp @@ -2,6 +2,7 @@ #include #include #include "labels_table.h" +#include "testconnections.h" std::string get_mdbci_lables(const char *labels_string) { @@ -9,12 +10,20 @@ std::string get_mdbci_lables(const char *labels_string) for (size_t i = 0; i < sizeof(labels_table) / sizeof(labels_table_t); i++) { - printf("%lu\t %s\n", i, labels_table[i].test_label); + if (TestConnections::verbose) + { + printf("%lu\t %s\n", i, labels_table[i].test_label); + } + if (strstr(labels_string, labels_table[i].test_label)) { mdbci_labels += "," + std::string(labels_table[i].mdbci_label); } } - printf("mdbci labels %s\n", mdbci_labels.c_str()); + + if (TestConnections::verbose) + { + printf("mdbci labels %s\n", mdbci_labels.c_str()); + } return mdbci_labels; } diff --git a/maxscale-system-test/nodes.cpp b/maxscale-system-test/nodes.cpp index 7431182bd..048fc97a8 100644 --- a/maxscale-system-test/nodes.cpp +++ b/maxscale-system-test/nodes.cpp @@ -168,6 +168,11 @@ int Nodes::ssh_node(int node, const char* ssh, bool sudo) verbose ? "" : " > /dev/null"); } + if (verbose) + { + std::cout << ssh << std::endl; + } + int rc = 1; FILE* in = popen(cmd, "w"); diff --git a/maxscale-system-test/testconnections.cpp b/maxscale-system-test/testconnections.cpp index 588d7a1f6..28661e152 100644 --- a/maxscale-system-test/testconnections.cpp +++ b/maxscale-system-test/testconnections.cpp @@ -107,6 +107,8 @@ void TestConnections::restart_galera(bool value) maxscale::restart_galera = value; } +bool TestConnections::verbose = false; + TestConnections::TestConnections(int argc, char* argv[]) : enable_timeouts(true) , global_result(0) @@ -114,7 +116,6 @@ TestConnections::TestConnections(int argc, char* argv[]) , local_maxscale(false) , no_backend_log_copy(false) , no_maxscale_log_copy(false) - , verbose(false) , smoke(true) , binlog_cmd_option(0) , ssl(false) @@ -288,7 +289,7 @@ TestConnections::TestConnections(int argc, char* argv[]) mdbci_call_needed = true; tprintf("Machines with label '%s' are not running, MDBCI UP call is needed", label.c_str()); } - else + else if (verbose) { tprintf("Machines with label '%s' are running, MDBCI UP call is not needed", label.c_str()); } @@ -306,13 +307,19 @@ TestConnections::TestConnections(int argc, char* argv[]) if (mdbci_labels.find(std::string("REPL_BACKEND")) == std::string::npos) { no_repl = true; - tprintf("No need to use Master/Slave"); + if (verbose) + { + tprintf("No need to use Master/Slave"); + } } if (mdbci_labels.find(std::string("GALERA_BACKEND")) == std::string::npos) { no_galera = true; - tprintf("No need to use Galera"); + if (verbose) + { + tprintf("No need to use Galera"); + } } get_logs_command = (char *) malloc(strlen(test_dir) + 14); diff --git a/maxscale-system-test/testconnections.h b/maxscale-system-test/testconnections.h index 369f7b34e..2f5160d61 100644 --- a/maxscale-system-test/testconnections.h +++ b/maxscale-system-test/testconnections.h @@ -186,7 +186,7 @@ public: /** * @brief verbose if true more printing activated */ - bool verbose; + static bool verbose; /** * @brief smoke if true all tests are executed in quick mode From 5e3af05d48870f25b1ca1c48bff0424b3ddf16b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Sun, 7 Apr 2019 19:41:38 +0300 Subject: [PATCH 13/22] Speed up test startup The VM connectivity and log truncation is now done in parallel. --- maxscale-system-test/mariadb_nodes.cpp | 19 +++++++++------ maxscale-system-test/nodes.cpp | 30 ++++++++++-------------- maxscale-system-test/nodes.h | 6 ++--- maxscale-system-test/testconnections.cpp | 6 ++--- 4 files changed, 31 insertions(+), 30 deletions(-) diff --git a/maxscale-system-test/mariadb_nodes.cpp b/maxscale-system-test/mariadb_nodes.cpp index 0fa2b7ee6..9bf8efdd1 100644 --- a/maxscale-system-test/mariadb_nodes.cpp +++ b/maxscale-system-test/mariadb_nodes.cpp @@ -18,6 +18,8 @@ #include #include #include +#include +#include #include "envv.h" using std::cout; @@ -1084,19 +1086,22 @@ std::string Mariadb_nodes::get_lowest_version() int Mariadb_nodes::truncate_mariadb_logs() { - int local_result = 0; + std::vector> results; + for (int node = 0; node < N; node++) { if (strcmp(IP[node], "127.0.0.1") != 0) { - local_result += ssh_node_f(node, true, - "truncate -s 0 /var/lib/mysql/*.err;" - "truncate -s 0 /var/log/syslog;" - "truncate -s 0 /var/log/messages;" - "rm -f /etc/my.cnf.d/binlog_enc*;"); + auto f = std::async(std::launch::async, &Nodes::ssh_node_f, this, node, true, + "truncate -s 0 /var/lib/mysql/*.err;" + "truncate -s 0 /var/log/syslog;" + "truncate -s 0 /var/log/messages;" + "rm -f /etc/my.cnf.d/binlog_enc*;"); + results.push_back(std::move(f)); } } - return local_result; + + return std::count_if(results.begin(), results.end(), std::mem_fn(&std::future::get)); } int Mariadb_nodes::configure_ssl(bool require) diff --git a/maxscale-system-test/nodes.cpp b/maxscale-system-test/nodes.cpp index 048fc97a8..412fcafee 100644 --- a/maxscale-system-test/nodes.cpp +++ b/maxscale-system-test/nodes.cpp @@ -2,6 +2,9 @@ #include #include #include +#include +#include +#include #include "envv.h" @@ -9,36 +12,29 @@ Nodes::Nodes() { } -int Nodes::check_node_ssh(int node) +bool Nodes::check_node_ssh(int node) { - int res = 0; + bool res = true; - if (ssh_node(node, (char*) "ls > /dev/null", false) != 0) + if (ssh_node(node, "ls > /dev/null", false) != 0) { - printf("Node %d is not available\n", node); - fflush(stdout); - res = 1; - } - else - { - fflush(stdout); + std::cout << "Node " << node << " is not available" << std::endl; + res = false; } + return res; } -int Nodes::check_nodes() +bool Nodes::check_nodes() { - std::cout << "Checking nodes..." << std::endl; + std::vector> f; for (int i = 0; i < N; i++) { - if (check_node_ssh(i) != 0) - { - return 1; - } + f.push_back(std::async(std::launch::async, &Nodes::check_node_ssh, this, i)); } - return 0; + return std::all_of(f.begin(), f.end(), std::mem_fn(&std::future::get)); } void Nodes::generate_ssh_cmd(char* cmd, int node, const char* ssh, bool sudo) diff --git a/maxscale-system-test/nodes.h b/maxscale-system-test/nodes.h index 615fee934..ba2b26192 100644 --- a/maxscale-system-test/nodes.h +++ b/maxscale-system-test/nodes.h @@ -168,9 +168,9 @@ public: /** * @brief Check node via ssh and restart it if it is not resposible * @param node Node index - * @return 0 if node is ok, 1 if start failed + * @return True if node is ok, false if start failed */ - int check_nodes(); + bool check_nodes(); /** * @brief read_basic_env Read IP, sshkey, etc - common parameters for all kinds of nodes @@ -206,5 +206,5 @@ public: int stop_vm(int node); private: - int check_node_ssh(int node); + bool check_node_ssh(int node); }; diff --git a/maxscale-system-test/testconnections.cpp b/maxscale-system-test/testconnections.cpp index 28661e152..a57126054 100644 --- a/maxscale-system-test/testconnections.cpp +++ b/maxscale-system-test/testconnections.cpp @@ -343,7 +343,7 @@ TestConnections::TestConnections(int argc, char* argv[]) repl->use_ipv6 = use_ipv6; repl->take_snapshot_command = take_snapshot_command; repl->revert_snapshot_command = revert_snapshot_command; - if (repl->check_nodes()) + if (!repl->check_nodes()) { if (call_mdbci("--recreate")) { @@ -363,7 +363,7 @@ TestConnections::TestConnections(int argc, char* argv[]) galera->use_ipv6 = false; galera->take_snapshot_command = take_snapshot_command; galera->revert_snapshot_command = revert_snapshot_command; - if (galera->check_nodes()) + if (!galera->check_nodes()) { if (call_mdbci("--recreate")) { @@ -377,7 +377,7 @@ TestConnections::TestConnections(int argc, char* argv[]) } maxscales = new Maxscales("maxscale", test_dir, verbose, use_valgrind, network_config); - if (maxscales->check_nodes() || + if (!maxscales->check_nodes() || ((maxscales->N < 2) && (mdbci_labels.find(std::string("SECOND_MAXSCALE")) != std::string::npos)) ) { From 0932d1016951192ea9696c88d38eee1f13e8eb9a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Sun, 7 Apr 2019 20:11:07 +0300 Subject: [PATCH 14/22] Do node checks in parallel The checking of MariaDB and Galera nodes is now done asynchronously while the MaxScale check is done which leads to faster testing. Rough measurements show that doing all the work in parallel reduces test startup time by two seconds. Most of the time appears to still be in the MaxScale startup which takes on average three to four seconds per test. --- maxscale-system-test/testconnections.cpp | 102 ++++++++++------------- maxscale-system-test/testconnections.h | 10 ++- 2 files changed, 50 insertions(+), 62 deletions(-) diff --git a/maxscale-system-test/testconnections.cpp b/maxscale-system-test/testconnections.cpp index a57126054..b733d6fbd 100644 --- a/maxscale-system-test/testconnections.cpp +++ b/maxscale-system-test/testconnections.cpp @@ -12,6 +12,7 @@ #include #include #include +#include #include #include "mariadb_func.h" @@ -337,19 +338,16 @@ TestConnections::TestConnections(int argc, char* argv[]) exit(0); } + std::future repl_future; + std::future galera_future; + if (!no_repl) { repl = new Mariadb_nodes("node", test_dir, verbose, network_config); repl->use_ipv6 = use_ipv6; repl->take_snapshot_command = take_snapshot_command; repl->revert_snapshot_command = revert_snapshot_command; - if (!repl->check_nodes()) - { - if (call_mdbci("--recreate")) - { - exit(MDBCI_FAUILT); - } - } + repl_future = std::async(std::launch::async, &Mariadb_nodes::check_nodes, repl); } else { @@ -363,13 +361,7 @@ TestConnections::TestConnections(int argc, char* argv[]) galera->use_ipv6 = false; galera->take_snapshot_command = take_snapshot_command; galera->revert_snapshot_command = revert_snapshot_command; - if (!galera->check_nodes()) - { - if (call_mdbci("--recreate")) - { - exit(MDBCI_FAUILT); - } - } + galera_future = std::async(std::launch::async, &Galera_nodes::check_nodes, galera); } else { @@ -377,23 +369,26 @@ TestConnections::TestConnections(int argc, char* argv[]) } maxscales = new Maxscales("maxscale", test_dir, verbose, use_valgrind, network_config); - if (!maxscales->check_nodes() || - ((maxscales->N < 2) && (mdbci_labels.find(std::string("SECOND_MAXSCALE")) != std::string::npos)) - ) + + bool maxscale_ok = maxscales->check_nodes(); + bool repl_ok = no_repl || repl_future.get(); + bool galera_ok = no_galera || galera_future.get(); + bool node_error = !maxscale_ok || !repl_ok || !galera_ok; + + if (node_error || too_many_maxscales()) { + tprintf("Recreating VMs: %s", node_error ? "node check failed" : "too many maxscales"); + if (call_mdbci("--recreate")) { exit(MDBCI_FAUILT); } } - if (reinstall_maxscale) + if (reinstall_maxscale && reinstall_maxscales()) { - if (reinstall_maxscales()) - { - tprintf("Failed to install Maxscale: target is %s", target); - exit(MDBCI_FAUILT); - } + tprintf("Failed to install Maxscale: target is %s", target); + exit(MDBCI_FAUILT); } std::string src = std::string(test_dir) + "/mdbci/add_core_cnf.sh"; @@ -414,39 +409,33 @@ TestConnections::TestConnections(int argc, char* argv[]) } } - if (repl) + if (repl && maxscale::required_repl_version.length()) { - if (maxscale::required_repl_version.length()) - { - int ver_repl_required = get_int_version(maxscale::required_repl_version); - std::string ver_repl = repl->get_lowest_version(); - int int_ver_repl = get_int_version(ver_repl); + int ver_repl_required = get_int_version(maxscale::required_repl_version); + std::string ver_repl = repl->get_lowest_version(); + int int_ver_repl = get_int_version(ver_repl); - if (int_ver_repl < ver_repl_required) - { - tprintf("Test requires a higher version of backend servers, skipping test."); - tprintf("Required version: %s", maxscale::required_repl_version.c_str()); - tprintf("Master-slave version: %s", ver_repl.c_str()); - exit(0); - } + if (int_ver_repl < ver_repl_required) + { + tprintf("Test requires a higher version of backend servers, skipping test."); + tprintf("Required version: %s", maxscale::required_repl_version.c_str()); + tprintf("Master-slave version: %s", ver_repl.c_str()); + exit(0); } } - if (galera) + if (galera && maxscale::required_galera_version.length()) { - if (maxscale::required_galera_version.length()) - { - int ver_galera_required = get_int_version(maxscale::required_galera_version); - std::string ver_galera = galera->get_lowest_version(); - int int_ver_galera = get_int_version(ver_galera); + int ver_galera_required = get_int_version(maxscale::required_galera_version); + std::string ver_galera = galera->get_lowest_version(); + int int_ver_galera = get_int_version(ver_galera); - if (int_ver_galera < ver_galera_required) - { - tprintf("Test requires a higher version of backend servers, skipping test."); - tprintf("Required version: %s", maxscale::required_galera_version.c_str()); - tprintf("Galera version: %s", ver_galera.c_str()); - exit(0); - } + if (int_ver_galera < ver_galera_required) + { + tprintf("Test requires a higher version of backend servers, skipping test."); + tprintf("Required version: %s", maxscale::required_galera_version.c_str()); + tprintf("Galera version: %s", ver_galera.c_str()); + exit(0); } } @@ -456,22 +445,15 @@ TestConnections::TestConnections(int argc, char* argv[]) galera->start_replication(); } - if (maxscale::check_nodes) { - if (repl) + if (repl && !repl->fix_replication()) { - if (!repl->fix_replication() ) - { - exit(BROKEN_VM_FAUILT); - } + exit(BROKEN_VM_FAUILT); } - if (galera) + if (galera && !galera->fix_replication()) { - if (!galera->fix_replication()) - { - exit(BROKEN_VM_FAUILT); - } + exit(BROKEN_VM_FAUILT); } } diff --git a/maxscale-system-test/testconnections.h b/maxscale-system-test/testconnections.h index 2f5160d61..aebcb86aa 100644 --- a/maxscale-system-test/testconnections.h +++ b/maxscale-system-test/testconnections.h @@ -700,9 +700,15 @@ public: private: void report_result(const char* format, va_list argp); - void copy_one_mariadb_log(Mariadb_nodes *nrepl, int i, std::string filename); + void copy_one_mariadb_log(Mariadb_nodes* nrepl, int i, std::string filename); - std::vector> m_on_destroy; + bool too_many_maxscales() const + { + return maxscales->N < 2 + && mdbci_labels.find("SECOND_MAXSCALE") != std::string::npos; + } + + std::vector> m_on_destroy; }; /** From 9a5b60a0711016a7554ff06c384e28515b0f5b5a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Tue, 9 Apr 2019 10:00:50 +0300 Subject: [PATCH 15/22] Add forced maintenance mode tests Tested that the force option works and is accepted. --- maxctrl/test/states.js | 14 ++++++++++++++ server/core/test/rest-api/test/server.js | 12 ++++++++++++ 2 files changed, 26 insertions(+) diff --git a/maxctrl/test/states.js b/maxctrl/test/states.js index f104df737..502276bd7 100644 --- a/maxctrl/test/states.js +++ b/maxctrl/test/states.js @@ -22,6 +22,20 @@ describe("Set/Clear Commands", function() { }) }) + it('force maintenance mode', function() { + return verifyCommand('set server server1 maintenance --force', 'servers/server1') + .then(function(res) { + res.data.attributes.state.should.match(/Maintenance/) + }) + }) + + it('clear maintenance mode', function() { + return verifyCommand('clear server server1 maintenance', 'servers/server1') + .then(function(res) { + res.data.attributes.state.should.not.match(/Maintenance/) + }) + }) + it('reject set incorrect state', function() { return doCommand('set server server2 something') .should.be.rejected diff --git a/server/core/test/rest-api/test/server.js b/server/core/test/rest-api/test/server.js index 36bc95ec7..ca6cd7da2 100644 --- a/server/core/test/rest-api/test/server.js +++ b/server/core/test/rest-api/test/server.js @@ -156,6 +156,18 @@ describe("Server State", function() { }) }); + it("force server into maintenance", function() { + return request.put(base_url + "/servers/" + server.data.id + "/set?state=maintenance&force=yes") + .then(function(resp) { + return request.get(base_url + "/servers/" + server.data.id) + }) + .then(function(resp) { + var srv = JSON.parse(resp) + srv.data.attributes.state.should.match(/Maintenance/) + srv.data.attributes.statistics.connections.should.be.equal(0) + }) + }); + it("clear maintenance", function() { return request.put(base_url + "/servers/" + server.data.id + "/clear?state=maintenance") .then(function(resp) { From bc5f9da6c44befef20aa4af1d9662153d0bbb7cd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Tue, 9 Apr 2019 10:56:38 +0300 Subject: [PATCH 16/22] Add classification test case Also removed the dead code that was never used to get coverage to 100%. --- maxctrl/lib/classify.js | 20 +++++++------------- maxctrl/test/classify.js | 27 +++++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 13 deletions(-) create mode 100644 maxctrl/test/classify.js diff --git a/maxctrl/lib/classify.js b/maxctrl/lib/classify.js index 0f50f6fe6..06d1928de 100644 --- a/maxctrl/lib/classify.js +++ b/maxctrl/lib/classify.js @@ -28,11 +28,13 @@ exports.handler = function (argv) { return doRequest(host, 'maxscale/query_classifier/classify?sql=' + argv.statement, (res) => { - var a = res.data.attributes.parameters.functions.map((f) => { - return f.name + ': (' + f.arguments.join(', ') + ')' - }); + if (res.data.attributes.parameters.functions) { + var a = res.data.attributes.parameters.functions.map((f) => { + return f.name + ': (' + f.arguments.join(', ') + ')' + }); - res.data.attributes.parameters.functions = a; + res.data.attributes.parameters.functions = a; + } return formatResource(classify_fields, res.data) }) @@ -44,14 +46,6 @@ exports.builder = function(yargs) { .epilog('Classify the statement using MaxScale and display the result. ' + 'The possible values for "Parse result", "Type mask" and "Operation" ' + 'can be looked up in ' + - 'https://github.com/mariadb-corporation/MaxScale/blob/' + - '2.3/include/maxscale/query_classifier.h') + 'https://github.com/mariadb-corporation/MaxScale/blob/2.3/include/maxscale/query_classifier.h') .help() - .command('*', 'the default command', {}, function(argv) { - console.log("*"); - maxctrl(argv, function(host) { - console.log(argv.statement); - return error('Unknown command. See output of `help stop` for a list of commands.') - }) - }) } diff --git a/maxctrl/test/classify.js b/maxctrl/test/classify.js new file mode 100644 index 000000000..d50892182 --- /dev/null +++ b/maxctrl/test/classify.js @@ -0,0 +1,27 @@ +require('../test_utils.js')() + +describe("Classify Commands", function() { + before(startMaxScale) + + it('classifies query', function() { + return doCommand('--tsv classify SELECT\t1') + .should.eventually.match(/QC_QUERY_PARSED/) + }) + + it('classifies query with function', function() { + return doCommand('--tsv classify SELECT\tspecial_function("hello",5)') + .should.eventually.match(/special_function/) + }) + + it('classifies invalid query', function() { + return doCommand('--tsv classify This-should-fail') + .should.eventually.match(/QC_QUERY_INVALID/) + }) + + it('rejects no query', function() { + return doCommand('classify') + .should.be.rejected + }) + + after(stopMaxScale) +}); From e6526dd9eaf76a3090d6a229efa6740fb37d8121 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Fri, 29 Mar 2019 14:14:26 +0200 Subject: [PATCH 17/22] Add extra info logging to readwritesplit Added logging into RWBackend reply state processing code to know more. --- include/maxscale/protocol/rwbackend.hh | 21 +++++++++++++++++++++ server/modules/protocol/MySQL/rwbackend.cc | 3 +++ 2 files changed, 24 insertions(+) diff --git a/include/maxscale/protocol/rwbackend.hh b/include/maxscale/protocol/rwbackend.hh index 3dcbfdef8..d8b1841d5 100644 --- a/include/maxscale/protocol/rwbackend.hh +++ b/include/maxscale/protocol/rwbackend.hh @@ -54,6 +54,27 @@ public: return m_reply_state; } + const char* reply_state_str() const + { + switch (m_reply_state) + { + case REPLY_STATE_START: + return "START"; + + case REPLY_STATE_DONE: + return "DONE"; + + case REPLY_STATE_RSET_COLDEF: + return "COLDEF"; + + case REPLY_STATE_RSET_ROWS: + return "ROWS"; + + default: + return "UNKNOWN"; + } + } + void add_ps_handle(uint32_t id, uint32_t handle); uint32_t get_ps_handle(uint32_t id) const; diff --git a/server/modules/protocol/MySQL/rwbackend.cc b/server/modules/protocol/MySQL/rwbackend.cc index 5924d60b0..e95b6406b 100644 --- a/server/modules/protocol/MySQL/rwbackend.cc +++ b/server/modules/protocol/MySQL/rwbackend.cc @@ -254,6 +254,9 @@ void RWBackend::process_reply(GWBUF* buffer) } } + MXS_DEBUG("cmd: %02hhx bytes: %u packets: %d state: %s", mxs_mysql_get_command(buffer), + gwbuf_length(buffer), modutil_count_packets(buffer), reply_state_str()); + if (get_reply_state() == REPLY_STATE_DONE) { ack_write(); From 746bd53668fd31ecc1d022d0256b6b9f14c26f80 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Tue, 9 Apr 2019 13:59:38 +0300 Subject: [PATCH 18/22] Simplify RWBackend result handling By processing the packets one at a time, the reply state is updated correctly regardless of how many packets are received. This removes the need for the clunky code that used modutil_count_signal_packets to detect the end of the result set. --- include/maxscale/protocol/rwbackend.hh | 7 + server/modules/protocol/MySQL/rwbackend.cc | 305 ++++++++++++++------- 2 files changed, 213 insertions(+), 99 deletions(-) diff --git a/include/maxscale/protocol/rwbackend.hh b/include/maxscale/protocol/rwbackend.hh index d8b1841d5..64c8fd91f 100644 --- a/include/maxscale/protocol/rwbackend.hh +++ b/include/maxscale/protocol/rwbackend.hh @@ -28,6 +28,7 @@ enum reply_state_t REPLY_STATE_START, /**< Query sent to backend */ REPLY_STATE_DONE, /**< Complete reply received */ REPLY_STATE_RSET_COLDEF, /**< Resultset response, waiting for column definitions */ + REPLY_STATE_RSET_COLDEF_EOF,/**< Resultset response, waiting for EOF for column definitions */ REPLY_STATE_RSET_ROWS /**< Resultset response, waiting for rows */ }; @@ -67,6 +68,9 @@ public: case REPLY_STATE_RSET_COLDEF: return "COLDEF"; + case REPLY_STATE_RSET_COLDEF_EOF: + return "COLDEF_EOF"; + case REPLY_STATE_RSET_ROWS: return "ROWS"; @@ -137,6 +141,8 @@ public: return m_reply_state == REPLY_STATE_DONE; } + void process_packets(GWBUF* buffer); + // Controlled by the session ResponseStat& response_stat(); private: @@ -148,6 +154,7 @@ private: uint32_t m_expected_rows; /**< Number of rows a COM_STMT_FETCH is retrieving */ bool m_local_infile_requested; /**< Whether a LOCAL INFILE was requested */ ResponseStat m_response_stat; + uint64_t m_num_coldefs = 0; inline bool is_opening_cursor() const { diff --git a/server/modules/protocol/MySQL/rwbackend.cc b/server/modules/protocol/MySQL/rwbackend.cc index e95b6406b..abeb0db82 100644 --- a/server/modules/protocol/MySQL/rwbackend.cc +++ b/server/modules/protocol/MySQL/rwbackend.cc @@ -129,9 +129,23 @@ void RWBackend::close(close_type type) bool RWBackend::consume_fetched_rows(GWBUF* buffer) { - m_expected_rows -= modutil_count_packets(buffer); - mxb_assert(m_expected_rows >= 0); - return m_expected_rows == 0; + bool rval = false; + bool more = false; + int n_eof = modutil_count_signal_packets(buffer, 0, &more, &m_modutil_state); + + // If the server responded with an error, n_eof > 0 + if (n_eof > 0) + { + rval = true; + } + else + { + m_expected_rows -= modutil_count_packets(buffer); + mxb_assert(m_expected_rows >= 0); + rval = m_expected_rows == 0; + } + + return rval; } static inline bool have_next_packet(GWBUF* buffer) @@ -140,6 +154,189 @@ static inline bool have_next_packet(GWBUF* buffer) return gwbuf_length(buffer) > len; } +template +uint64_t get_encoded_int(Iter it) +{ + uint64_t len = *it++; + + switch (len) + { + case 0xfc: + len = *it++; + len |= ((uint64_t)*it++) << 8; + break; + + case 0xfd: + len = *it++; + len |= ((uint64_t)*it++) << 8; + len |= ((uint64_t)*it++) << 16; + break; + + case 0xfe: + len = *it++; + len |= ((uint64_t)*it++) << 8; + len |= ((uint64_t)*it++) << 16; + len |= ((uint64_t)*it++) << 24; + len |= ((uint64_t)*it++) << 32; + len |= ((uint64_t)*it++) << 40; + len |= ((uint64_t)*it++) << 48; + len |= ((uint64_t)*it++) << 56; + break; + + default: + break; + } + + return len; +} + +template +Iter skip_encoded_int(Iter it) +{ + switch (*it) + { + case 0xfc: + return std::next(it, 3); + + case 0xfd: + return std::next(it, 4); + + case 0xfe: + return std::next(it, 9); + + default: + return std::next(it); + } +} + +template +uint64_t is_last_ok(Iter it) +{ + ++it; // Skip the command byte + it = skip_encoded_int(it); // Affected rows + it = skip_encoded_int(it); // Last insert ID + uint16_t status = *it++; + status |= (*it++) << 8; + return (status & SERVER_MORE_RESULTS_EXIST) == 0; +} + +template +uint64_t is_last_eof(Iter it) +{ + std::advance(it, 3); // Skip the command byte and warning count + uint16_t status = *it++; + status |= (*it++) << 8; + return (status & SERVER_MORE_RESULTS_EXIST) == 0; +} + +void RWBackend::process_packets(GWBUF* result) +{ + mxs::Buffer buffer(result); + auto it = buffer.begin(); + + while (it != buffer.end()) + { + // Extract packet length and command byte + uint32_t len = *it++; + len |= (*it++) << 8; + len |= (*it++) << 16; + ++it; // Skip the sequence + mxb_assert(it != buffer.end()); + auto end = std::next(it, len); + uint8_t cmd = *it; + + switch (m_reply_state) + { + case REPLY_STATE_START: + m_local_infile_requested = false; + + switch (cmd) + { + case MYSQL_REPLY_OK: + if (is_last_ok(it)) + { + // No more results + set_reply_state(REPLY_STATE_DONE); + } + break; + + case MYSQL_REPLY_LOCAL_INFILE: + m_local_infile_requested = true; + set_reply_state(REPLY_STATE_DONE); + break; + + case MYSQL_REPLY_ERR: + // Nothing ever follows an error packet + set_reply_state(REPLY_STATE_DONE); + break; + + case MYSQL_REPLY_EOF: + // EOF packets are never expected as the first response + mxb_assert(!true); + break; + + default: + + if (current_command() == MXS_COM_FIELD_LIST) + { + // COM_FIELD_LIST sends a strange kind of a result set + set_reply_state(REPLY_STATE_RSET_ROWS); + } + else + { + // Start of a result set + m_num_coldefs = get_encoded_int(it); + set_reply_state(REPLY_STATE_RSET_COLDEF); + } + + break; + } + break; + + case REPLY_STATE_DONE: + // This should never happen + mxb_assert(!true); + MXS_ERROR("Unexpected result state. cmd: 0x%02hhx, len: %u", cmd, len); + break; + + case REPLY_STATE_RSET_COLDEF: + mxb_assert(m_num_coldefs > 0); + --m_num_coldefs; + + if (m_num_coldefs == 0) + { + set_reply_state(REPLY_STATE_RSET_COLDEF_EOF); + // Skip this state when DEPRECATE_EOF capability is supported + } + break; + + case REPLY_STATE_RSET_COLDEF_EOF: + mxb_assert(cmd == MYSQL_REPLY_EOF && len == MYSQL_EOF_PACKET_LEN - MYSQL_HEADER_LEN); + set_reply_state(REPLY_STATE_RSET_ROWS); + + if (is_opening_cursor()) + { + set_cursor_opened(); + MXS_INFO("Cursor successfully opened"); + set_reply_state(REPLY_STATE_DONE); + } + break; + + case REPLY_STATE_RSET_ROWS: + if ((cmd == MYSQL_REPLY_EOF && len == MYSQL_EOF_PACKET_LEN - MYSQL_HEADER_LEN) + || cmd == MYSQL_REPLY_ERR) + { + set_reply_state(is_last_eof(it) ? REPLY_STATE_DONE : REPLY_STATE_START); + } + break; + } + + it = end; + } + + buffer.release(); +} + /** * @brief Process a possibly partial response from the backend * @@ -149,114 +346,24 @@ void RWBackend::process_reply(GWBUF* buffer) { if (current_command() == MXS_COM_STMT_FETCH) { - bool more = false; - int n_eof = modutil_count_signal_packets(buffer, 0, &more, &m_modutil_state); - // If the server responded with an error, n_eof > 0 - if (n_eof > 0 || consume_fetched_rows(buffer)) + if (consume_fetched_rows(buffer)) { set_reply_state(REPLY_STATE_DONE); } } - else if (current_command() == MXS_COM_STATISTICS) + else if (current_command() == MXS_COM_STATISTICS || GWBUF_IS_COLLECTED_RESULT(buffer)) { - // COM_STATISTICS returns a single string and thus requires special handling + // COM_STATISTICS returns a single string and thus requires special handling. + // Collected result are all in one buffer and need no processing. set_reply_state(REPLY_STATE_DONE); } - else if (get_reply_state() == REPLY_STATE_START - && (!mxs_mysql_is_result_set(buffer) || GWBUF_IS_COLLECTED_RESULT(buffer))) - { - m_local_infile_requested = false; - - if (GWBUF_IS_COLLECTED_RESULT(buffer) - || current_command() == MXS_COM_STMT_PREPARE - || !mxs_mysql_is_ok_packet(buffer) - || !mxs_mysql_more_results_after_ok(buffer)) - { - /** Not a result set, we have the complete response */ - set_reply_state(REPLY_STATE_DONE); - - if (mxs_mysql_is_local_infile(buffer)) - { - m_local_infile_requested = true; - } - } - else - { - // This is an OK packet and more results will follow - mxb_assert(mxs_mysql_is_ok_packet(buffer) - && mxs_mysql_more_results_after_ok(buffer)); - - if (have_next_packet(buffer)) - { - // TODO: Don't clone the buffer - GWBUF* tmp = gwbuf_clone(buffer); - tmp = gwbuf_consume(tmp, mxs_mysql_get_packet_len(tmp)); - - // Consume repeating OK packets - while (mxs_mysql_more_results_after_ok(buffer) && have_next_packet(tmp)) - { - tmp = gwbuf_consume(tmp, mxs_mysql_get_packet_len(tmp)); - mxb_assert(tmp); - } - - process_reply(tmp); - gwbuf_free(tmp); - return; - } - } - } else { - bool more = false; - int n_old_eof = get_reply_state() == REPLY_STATE_RSET_ROWS ? 1 : 0; - int n_eof = modutil_count_signal_packets(buffer, n_old_eof, &more, &m_modutil_state); - - if (n_eof > 2) - { - /** - * We have multiple results in the buffer, we only care about - * the state of the last one. Skip the complete result sets and act - * like we're processing a single result set. - */ - n_eof = n_eof % 2 ? 1 : 2; - } - - if (n_eof == 0) - { - /** Waiting for the EOF packet after the column definitions */ - set_reply_state(REPLY_STATE_RSET_COLDEF); - } - else if (n_eof == 1 && current_command() != MXS_COM_FIELD_LIST) - { - /** Waiting for the EOF packet after the rows */ - set_reply_state(REPLY_STATE_RSET_ROWS); - - if (is_opening_cursor()) - { - set_cursor_opened(); - MXS_INFO("Cursor successfully opened"); - set_reply_state(REPLY_STATE_DONE); - } - } - else - { - /** We either have a complete result set or a response to - * a COM_FIELD_LIST command */ - mxb_assert(n_eof == 2 || (n_eof == 1 && current_command() == MXS_COM_FIELD_LIST)); - set_reply_state(REPLY_STATE_DONE); - - if (more) - { - /** The server will send more resultsets */ - set_reply_state(REPLY_STATE_START); - } - } + // Normal result, process it one packet at a time + process_packets(buffer); } - MXS_DEBUG("cmd: %02hhx bytes: %u packets: %d state: %s", mxs_mysql_get_command(buffer), - gwbuf_length(buffer), modutil_count_packets(buffer), reply_state_str()); - if (get_reply_state() == REPLY_STATE_DONE) { ack_write(); From d2ecaa83a6e3bf54de9619b150a8219cba83175b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Wed, 10 Apr 2019 16:25:03 +0300 Subject: [PATCH 19/22] Move result start handling into separate function The largest part of the code deals with the start of a response. Moving this into a subfunction makes the function clearer as the switch statement inside a switch statement is removed. --- include/maxscale/protocol/rwbackend.hh | 1 + server/modules/protocol/MySQL/rwbackend.cc | 98 ++++++++++++---------- 2 files changed, 54 insertions(+), 45 deletions(-) diff --git a/include/maxscale/protocol/rwbackend.hh b/include/maxscale/protocol/rwbackend.hh index 64c8fd91f..a13a3cb03 100644 --- a/include/maxscale/protocol/rwbackend.hh +++ b/include/maxscale/protocol/rwbackend.hh @@ -142,6 +142,7 @@ public: } void process_packets(GWBUF* buffer); + void process_reply_start(mxs::Buffer::iterator it); // Controlled by the session ResponseStat& response_stat(); diff --git a/server/modules/protocol/MySQL/rwbackend.cc b/server/modules/protocol/MySQL/rwbackend.cc index abeb0db82..cf4be4a88 100644 --- a/server/modules/protocol/MySQL/rwbackend.cc +++ b/server/modules/protocol/MySQL/rwbackend.cc @@ -229,6 +229,53 @@ uint64_t is_last_eof(Iter it) return (status & SERVER_MORE_RESULTS_EXIST) == 0; } +void RWBackend::process_reply_start(mxs::Buffer::iterator it) +{ + uint8_t cmd = *it; + m_local_infile_requested = false; + + switch (cmd) + { + case MYSQL_REPLY_OK: + if (is_last_ok(it)) + { + // No more results + set_reply_state(REPLY_STATE_DONE); + } + break; + + case MYSQL_REPLY_LOCAL_INFILE: + m_local_infile_requested = true; + set_reply_state(REPLY_STATE_DONE); + break; + + case MYSQL_REPLY_ERR: + // Nothing ever follows an error packet + set_reply_state(REPLY_STATE_DONE); + break; + + case MYSQL_REPLY_EOF: + // EOF packets are never expected as the first response + mxb_assert(!true); + break; + + default: + if (current_command() == MXS_COM_FIELD_LIST) + { + // COM_FIELD_LIST sends a strange kind of a result set + set_reply_state(REPLY_STATE_RSET_ROWS); + } + else + { + // Start of a result set + m_num_coldefs = get_encoded_int(it); + set_reply_state(REPLY_STATE_RSET_COLDEF); + } + + break; + } +} + void RWBackend::process_packets(GWBUF* result) { mxs::Buffer buffer(result); @@ -248,49 +295,7 @@ void RWBackend::process_packets(GWBUF* result) switch (m_reply_state) { case REPLY_STATE_START: - m_local_infile_requested = false; - - switch (cmd) - { - case MYSQL_REPLY_OK: - if (is_last_ok(it)) - { - // No more results - set_reply_state(REPLY_STATE_DONE); - } - break; - - case MYSQL_REPLY_LOCAL_INFILE: - m_local_infile_requested = true; - set_reply_state(REPLY_STATE_DONE); - break; - - case MYSQL_REPLY_ERR: - // Nothing ever follows an error packet - set_reply_state(REPLY_STATE_DONE); - break; - - case MYSQL_REPLY_EOF: - // EOF packets are never expected as the first response - mxb_assert(!true); - break; - - default: - - if (current_command() == MXS_COM_FIELD_LIST) - { - // COM_FIELD_LIST sends a strange kind of a result set - set_reply_state(REPLY_STATE_RSET_ROWS); - } - else - { - // Start of a result set - m_num_coldefs = get_encoded_int(it); - set_reply_state(REPLY_STATE_RSET_COLDEF); - } - - break; - } + process_reply_start(it); break; case REPLY_STATE_DONE: @@ -323,11 +328,14 @@ void RWBackend::process_packets(GWBUF* result) break; case REPLY_STATE_RSET_ROWS: - if ((cmd == MYSQL_REPLY_EOF && len == MYSQL_EOF_PACKET_LEN - MYSQL_HEADER_LEN) - || cmd == MYSQL_REPLY_ERR) + if (cmd == MYSQL_REPLY_EOF && len == MYSQL_EOF_PACKET_LEN - MYSQL_HEADER_LEN) { set_reply_state(is_last_eof(it) ? REPLY_STATE_DONE : REPLY_STATE_START); } + else if (cmd == MYSQL_REPLY_ERR) + { + set_reply_state(REPLY_STATE_DONE); + } break; } From 1652b18a7bb042759c045a8a4036b6857e47edc6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Thu, 11 Apr 2019 10:21:41 +0300 Subject: [PATCH 20/22] Fix whitespace in canonicalized queries Trailing whitespace was not removed and whitespace wasn't normalized to spaces. --- server/core/modutil.cc | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/server/core/modutil.cc b/server/core/modutil.cc index 3c2b285a7..30acb0708 100644 --- a/server/core/modutil.cc +++ b/server/core/modutil.cc @@ -1459,9 +1459,16 @@ std::string get_canonical(GWBUF* querybuf) break; } } - else if (is_space(*it) && (i == 0 || is_space(rval[i - 1]))) + else if (is_space(*it)) { - // Repeating space, skip it + if (i == 0 || is_space(rval[i - 1])) + { + // Leading or repeating whitespace, skip it + } + else + { + rval[i++] = ' '; + } } else if (*it == '/' && is_next(it, buf.end(), "/*")) { @@ -1565,6 +1572,12 @@ std::string get_canonical(GWBUF* querybuf) mxb_assert(it != buf.end()); } + // Remove trailing whitespace + while (i > 0 && is_space(rval[i - 1])) + { + --i; + } + // Shrink the buffer so that the internal bookkeeping of std::string remains up to date rval.resize(i); From 62f2a86a5f5500688bb8cd3689a2a4d6e444da62 Mon Sep 17 00:00:00 2001 From: Johan Wikman Date: Fri, 12 Apr 2019 10:30:36 +0300 Subject: [PATCH 21/22] MXS-2431 Add test that reveals the problem --- query_classifier/test/maxscale.test | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/query_classifier/test/maxscale.test b/query_classifier/test/maxscale.test index 9c8e9825e..2a5758211 100644 --- a/query_classifier/test/maxscale.test +++ b/query_classifier/test/maxscale.test @@ -121,3 +121,11 @@ SELECT X(coordinates), Y(coordinates), ST_X(coordinates), ST_Y(coordinates) FROM # MXS-2248 SELECT curdate() + interval '60' day; + +# MXS-2431 +XA BEGIN 'xid'; +XA END 'xid'; +XA PREPARE 'xid'; +XA COMMIT 'xid'; +XA ROLLBACK 'xid' +XA RECOVER 'xid'; From 4131f09c16f8058a6b9e9fcf242776ec6e74dcd2 Mon Sep 17 00:00:00 2001 From: Johan Wikman Date: Fri, 12 Apr 2019 10:33:53 +0300 Subject: [PATCH 22/22] MXS-2431 Recognize the XA keyword Recognize the XA keyword and classify the statement as write. Needs to be dealt with explicitly as sqlite3 assumes there are no keywords starting with the letter X. --- query_classifier/qc_sqlite/qc_sqlite.cc | 27 ++++++++++--------- .../qc_sqlite/sqlite-src-3110100/src/parse.y | 1 + .../sqlite-src-3110100/src/tokenize.c | 16 +++++++++++ .../sqlite-src-3110100/tool/mkkeywordhash.c | 1 + 4 files changed, 33 insertions(+), 12 deletions(-) diff --git a/query_classifier/qc_sqlite/qc_sqlite.cc b/query_classifier/qc_sqlite/qc_sqlite.cc index bfc5c524a..0ae6e2023 100644 --- a/query_classifier/qc_sqlite/qc_sqlite.cc +++ b/query_classifier/qc_sqlite/qc_sqlite.cc @@ -1575,18 +1575,16 @@ public: } } - void mxs_sqlite3BeginTrigger(Parse* pParse, /* The parse context of the CREATE TRIGGER statement - * */ - Token* pName1, /* The name of the trigger */ - Token* pName2, /* The name of the trigger */ - int tr_tm, /* One of TK_BEFORE, TK_AFTER, TK_INSTEAD */ - int op, /* One of TK_INSERT, TK_UPDATE, TK_DELETE */ - IdList* pColumns, /* column list if this is an UPDATE OF trigger */ - SrcList* pTableName, /* The name of the table/view the trigger applies to - * */ - Expr* pWhen, /* WHEN clause */ - int isTemp, /* True if the TEMPORARY keyword is present */ - int noErr) /* Suppress errors if the trigger already exists */ + void mxs_sqlite3BeginTrigger(Parse* pParse, /* The parse context of the CREATE TRIGGER statement */ + Token* pName1, /* The name of the trigger */ + Token* pName2, /* The name of the trigger */ + int tr_tm, /* One of TK_BEFORE, TK_AFTER, TK_INSTEAD */ + int op, /* One of TK_INSERT, TK_UPDATE, TK_DELETE */ + IdList* pColumns, /* column list if this is an UPDATE OF trigger */ + SrcList* pTableName, /* The name of the table/view the trigger applies to */ + Expr* pWhen, /* WHEN clause */ + int isTemp, /* True if the TEMPORARY keyword is present */ + int noErr) /* Suppress errors if the trigger already exists */ { mxb_assert(this_thread.initialized); @@ -2632,6 +2630,11 @@ public: m_type_mask = (QUERY_TYPE_WRITE | QUERY_TYPE_COMMIT); break; + case TK_XA: + m_status = QC_QUERY_TOKENIZED; + m_type_mask = QUERY_TYPE_WRITE; + break; + default: ; } diff --git a/query_classifier/qc_sqlite/sqlite-src-3110100/src/parse.y b/query_classifier/qc_sqlite/sqlite-src-3110100/src/parse.y index 67088e537..917b68f68 100644 --- a/query_classifier/qc_sqlite/sqlite-src-3110100/src/parse.y +++ b/query_classifier/qc_sqlite/sqlite-src-3110100/src/parse.y @@ -639,6 +639,7 @@ columnid(A) ::= nm(X). { VALUE VIEW /*VIRTUAL*/ /*WITH*/ WORK + XA %endif . %wildcard ANY. diff --git a/query_classifier/qc_sqlite/sqlite-src-3110100/src/tokenize.c b/query_classifier/qc_sqlite/sqlite-src-3110100/src/tokenize.c index 84e4a4538..a698d29ba 100644 --- a/query_classifier/qc_sqlite/sqlite-src-3110100/src/tokenize.c +++ b/query_classifier/qc_sqlite/sqlite-src-3110100/src/tokenize.c @@ -626,6 +626,22 @@ int sqlite3GetToken(const unsigned char *z, int *tokenType){ /* If it is not a BLOB literal, then it must be an ID, since no ** SQL keywords start with the letter 'x'. Fall through */ } +#endif +#ifdef MAXSCALE + // It may be the "XA" keyword. + // If the next character is 'a' or 'A', followed by whitespace or a + // comment, then we are indeed dealing with the "XA" keyword. + if (( z[1]=='a' || z[1]=='A' ) && + (sqlite3Isspace(z[2]) || // Whitespace + (z[2]=='/' && z[3]=='*') || // Beginning of /* comment + (z[2]=='#') || // # eol comment + (z[2]=='-' && z[3]=='-' && sqlite3Isspace(z[4])))) { // -- eol comment + extern int maxscaleKeyword(int); + + *tokenType = TK_XA; + maxscaleKeyword(*tokenType); + return 2; + } #endif case CC_ID: { i = 1; diff --git a/query_classifier/qc_sqlite/sqlite-src-3110100/tool/mkkeywordhash.c b/query_classifier/qc_sqlite/sqlite-src-3110100/tool/mkkeywordhash.c index 675c543c4..a8746a7df 100644 --- a/query_classifier/qc_sqlite/sqlite-src-3110100/tool/mkkeywordhash.c +++ b/query_classifier/qc_sqlite/sqlite-src-3110100/tool/mkkeywordhash.c @@ -500,6 +500,7 @@ static Keyword aKeywordTable[] = { #ifdef MAXSCALE { "WORK", "TK_WORK", ALWAYS }, { "WRITE", "TK_WRITE", ALWAYS }, + { "XA", "TK_XA", ALWAYS }, #endif { "ZEROFILL", "TK_ZEROFILL", ALWAYS }, };