diff --git a/include/maxscale/query_classifier.h b/include/maxscale/query_classifier.h index fce557878..116e59a1b 100644 --- a/include/maxscale/query_classifier.h +++ b/include/maxscale/query_classifier.h @@ -104,6 +104,7 @@ typedef enum qc_query_op QUERY_OP_EXPLAIN, QUERY_OP_GRANT, QUERY_OP_INSERT, + QUERY_OP_LOAD_LOCAL, QUERY_OP_LOAD, QUERY_OP_REVOKE, QUERY_OP_SELECT, diff --git a/maxscale-system-test/CMakeLists.txt b/maxscale-system-test/CMakeLists.txt index f044ac787..1e3009b7a 100644 --- a/maxscale-system-test/CMakeLists.txt +++ b/maxscale-system-test/CMakeLists.txt @@ -713,6 +713,10 @@ add_test_executable(mxs1831_unknown_param.cpp mxs1831_unknown_param replication # https://jira.mariadb.org/browse/MXS-1873 add_test_executable(mxs1873_large_sescmd.cpp mxs1873_large_sescmd replication LABELS readwritesplit REPL_BACKEND) +# MXS-1896: LOAD DATA INFILE is mistaken for LOAD DATA LOCAL INFILE +# https://jira.mariadb.org/browse/MXS-1896 +add_test_executable(mxs1896_load_data_infile.cpp mxs1896_load_data_infile replication LABELS readwritesplit REPL_BACKEND) + # 'namedserverfilter' test add_test_executable(namedserverfilter.cpp namedserverfilter namedserverfilter LABELS namedserverfilter LIGHT REPL_BACKEND) diff --git a/maxscale-system-test/mxs1743_rconn_bitmask.cpp b/maxscale-system-test/mxs1743_rconn_bitmask.cpp index 3d3bba969..4543e37cd 100644 --- a/maxscale-system-test/mxs1743_rconn_bitmask.cpp +++ b/maxscale-system-test/mxs1743_rconn_bitmask.cpp @@ -4,12 +4,12 @@ * https://jira.mariadb.org/browse/MXS-1743 */ #include "testconnections.h" +#include int main(int argc, char** argv) { TestConnections test(argc, argv); - test.tprintf("Testing with both master and slave up"); test.maxscales->connect(); test.try_query(test.maxscales->conn_master[0], "SELECT 1"); @@ -31,6 +31,46 @@ int main(int argc, char** argv) test.try_query(test.maxscales->conn_master[0], "SELECT 1"); test.maxscales->disconnect(); test.repl->unblock_node(1); + sleep(5); + + test.tprintf("Checking that both the master and slave are used"); + std::vector connections; + + test.repl->connect(); + execute_query_silent(test.repl->nodes[0], "DROP USER IF EXISTS 'mxs1743'@'%'"); + test.try_query(test.repl->nodes[0], "%s", "CREATE USER 'mxs1743'@'%' IDENTIFIED BY 'mxs1743'"); + test.try_query(test.repl->nodes[0], "%s", "GRANT ALL ON *.* TO 'mxs1743'@'%'"); + test.repl->sync_slaves(); + + for (int i = 0; i < 20; i++) + { + // Open a connection and make sure it works + MYSQL* conn = open_conn(test.maxscales->readconn_master_port[0], test.maxscales->IP[0], + "mxs1743", "mxs1743", false); + test.try_query(conn, "SELECT 1"); + connections.push_back(conn); + } + + // Give the connections a few seconds to establish + sleep(5); + + std::string query = "SELECT COUNT(*) AS connections FROM information_schema.processlist WHERE user = 'mxs1743'"; + char master_connections[1024]; + char slave_connections[1024]; + find_field(test.repl->nodes[0], query.c_str(), "connections", master_connections); + find_field(test.repl->nodes[1], query.c_str(), "connections", slave_connections); + + test.assert(strcmp(master_connections, slave_connections) == 0, + "Master and slave shoud have the same amount of connections: %s != %s", + master_connections, slave_connections); + + for (auto a: connections) + { + mysql_close(a); + } + + execute_query_silent(test.repl->nodes[0], "DROP USER 'mxs1743'@'%'"); + test.repl->disconnect(); return test.global_result; } diff --git a/maxscale-system-test/mxs1896_load_data_infile.cpp b/maxscale-system-test/mxs1896_load_data_infile.cpp new file mode 100644 index 000000000..31bf863b5 --- /dev/null +++ b/maxscale-system-test/mxs1896_load_data_infile.cpp @@ -0,0 +1,32 @@ +/** + * MXS-1896: LOAD DATA INFILE is mistaken for LOAD DATA LOCAL INFILE + * + * https://jira.mariadb.org/browse/MXS-1896 + */ + +#include "testconnections.h" + +int main(int argc, char** argv) +{ + TestConnections test(argc, argv); + + test.set_timeout(30); + test.maxscales->connect(); + + test.try_query(test.maxscales->conn_rwsplit[0], "DROP TABLE IF EXISTS test.t1"); + test.try_query(test.maxscales->conn_rwsplit[0], "CREATE TABLE test.t1(id INT)"); + test.try_query(test.maxscales->conn_rwsplit[0], "INSERT INTO test.t1 VALUES (1), (2), (3)"); + test.try_query(test.maxscales->conn_rwsplit[0], "SELECT * FROM test.t1 INTO OUTFILE '/tmp/test.csv'"); + test.try_query(test.maxscales->conn_rwsplit[0], "LOAD DATA INFILE '/tmp/test.csv' INTO TABLE test.t1"); + test.try_query(test.maxscales->conn_rwsplit[0], "DROP TABLE test.t1"); + + test.maxscales->disconnect(); + + // Clean up the generated files + for (int i = 0; i < 4; i++) + { + test.repl->ssh_node_f(i, true, "rm -f /tmp/test.csv"); + } + + return test.global_result; +} diff --git a/query_classifier/qc_mysqlembedded/qc_mysqlembedded.cc b/query_classifier/qc_mysqlembedded/qc_mysqlembedded.cc index 8ea9ce177..07c813cf2 100644 --- a/query_classifier/qc_mysqlembedded/qc_mysqlembedded.cc +++ b/query_classifier/qc_mysqlembedded/qc_mysqlembedded.cc @@ -2001,7 +2001,7 @@ int32_t qc_mysql_get_operation(GWBUF* querybuf, int32_t* operation) break; case SQLCOM_LOAD: - *operation = QUERY_OP_LOAD; + *operation = QUERY_OP_LOAD_LOCAL; break; case SQLCOM_GRANT: diff --git a/query_classifier/qc_sqlite/qc_sqlite.cc b/query_classifier/qc_sqlite/qc_sqlite.cc index af49d29d8..a71e501e2 100644 --- a/query_classifier/qc_sqlite/qc_sqlite.cc +++ b/query_classifier/qc_sqlite/qc_sqlite.cc @@ -2327,13 +2327,13 @@ public: exposed_sqlite3SrcListDelete(pParse->db, pFullName); } - void maxscaleLoadData(Parse* pParse, SrcList* pFullName) + void maxscaleLoadData(Parse* pParse, SrcList* pFullName, int local) { ss_dassert(this_thread.initialized); m_status = QC_QUERY_PARSED; m_type_mask = QUERY_TYPE_WRITE; - m_operation = QUERY_OP_LOAD; + m_operation = local ? QUERY_OP_LOAD_LOCAL: QUERY_OP_LOAD; if (pFullName) { @@ -3299,7 +3299,7 @@ extern void maxscaleExecuteImmediate(Parse*, Token* pName, ExprSpan* pExprSpan, extern void maxscaleExplain(Parse*, Token* pNext); extern void maxscaleFlush(Parse*, Token* pWhat); extern void maxscaleHandler(Parse*, mxs_handler_t, SrcList* pFullName, Token* pName); -extern void maxscaleLoadData(Parse*, SrcList* pFullName); +extern void maxscaleLoadData(Parse*, SrcList* pFullName, int local); extern void maxscaleLock(Parse*, mxs_lock_t, SrcList*); extern void maxscalePrepare(Parse*, Token* pName, Expr* pStmt); extern void maxscalePrivileges(Parse*, int kind); @@ -4201,14 +4201,14 @@ void maxscaleHandler(Parse* pParse, mxs_handler_t type, SrcList* pFullName, Toke QC_EXCEPTION_GUARD(pInfo->maxscaleHandler(pParse, type, pFullName, pName)); } -void maxscaleLoadData(Parse* pParse, SrcList* pFullName) +void maxscaleLoadData(Parse* pParse, SrcList* pFullName, int local) { QC_TRACE(); QcSqliteInfo* pInfo = this_thread.pInfo; ss_dassert(pInfo); - QC_EXCEPTION_GUARD(pInfo->maxscaleLoadData(pParse, pFullName)); + QC_EXCEPTION_GUARD(pInfo->maxscaleLoadData(pParse, pFullName, local)); } void maxscaleLock(Parse* pParse, mxs_lock_t type, SrcList* pTables) diff --git a/query_classifier/qc_sqlite/sqlite-src-3110100/src/parse.y b/query_classifier/qc_sqlite/sqlite-src-3110100/src/parse.y index 21707bee2..bd08e3219 100644 --- a/query_classifier/qc_sqlite/sqlite-src-3110100/src/parse.y +++ b/query_classifier/qc_sqlite/sqlite-src-3110100/src/parse.y @@ -120,7 +120,7 @@ extern void maxscaleExecuteImmediate(Parse*, Token* pName, ExprSpan* pExprSpan, extern void maxscaleExplain(Parse*, Token* pNext); extern void maxscaleFlush(Parse*, Token* pWhat); extern void maxscaleHandler(Parse*, mxs_handler_t, SrcList* pFullName, Token* pName); -extern void maxscaleLoadData(Parse*, SrcList* pFullName); +extern void maxscaleLoadData(Parse*, SrcList* pFullName, int local); extern void maxscaleLock(Parse*, mxs_lock_t, SrcList*); extern void maxscalePrepare(Parse*, Token* pName, Expr* pStmt); extern void maxscalePrivileges(Parse*, int kind); @@ -2913,19 +2913,21 @@ handler ::= HANDLER nm(X) CLOSE. { //////////////////////// The LOAD DATA INFILE statement //////////////////////////////////// // +%type ld_local_opt {int} + cmd ::= load_data. ld_priority_opt ::= . ld_priority_opt ::= LOW_PRIORITY. ld_priority_opt ::= CONCURRENT. -ld_local_opt ::= . -ld_local_opt ::= LOCAL. +ld_local_opt(A) ::= . {A = 0;} +ld_local_opt(A) ::= LOCAL. {A = 1;} ld_charset_opt ::= . ld_charset_opt ::= CHARACTER SET ids. -load_data ::= LOAD DATA ld_priority_opt ld_local_opt +load_data ::= LOAD DATA ld_priority_opt ld_local_opt(Y) INFILE STRING ignore_or_replace_opt INTO TABLE fullname(X) /* ld_partition_opt */ @@ -2935,7 +2937,7 @@ load_data ::= LOAD DATA ld_priority_opt ld_local_opt /* ld_col_name_or_user_var_opt */ /* ld_set */. { - maxscaleLoadData(pParse, X); + maxscaleLoadData(pParse, X, Y); } //////////////////////// The LOCK/UNLOCK statement //////////////////////////////////// diff --git a/server/core/config.cc b/server/core/config.cc index 6d9244e9a..931617841 100644 --- a/server/core/config.cc +++ b/server/core/config.cc @@ -579,7 +579,7 @@ static int ini_handler(void *userdata, const char *section, const char *name, co if (strcmp(section, CN_GATEWAY) == 0 || strcasecmp(section, CN_MAXSCALE) == 0) { - if (is_root_config_file) + if (is_root_config_file || is_persisted_config) { return handle_global_item(name, value); } diff --git a/server/core/query_classifier.cc b/server/core/query_classifier.cc index 2cf3dfa96..787abd21b 100644 --- a/server/core/query_classifier.cc +++ b/server/core/query_classifier.cc @@ -389,6 +389,9 @@ const char* qc_op_to_string(qc_query_op_t op) case QUERY_OP_LOAD: return "QUERY_OP_LOAD"; + case QUERY_OP_LOAD_LOCAL: + return "QUERY_OP_LOAD_LOCAL"; + case QUERY_OP_REVOKE: return "QUERY_OP_REVOKE"; diff --git a/server/modules/filter/dbfwfilter/dbfwfilter.hh b/server/modules/filter/dbfwfilter/dbfwfilter.hh index 738ef128d..1cc3dec9a 100644 --- a/server/modules/filter/dbfwfilter/dbfwfilter.hh +++ b/server/modules/filter/dbfwfilter/dbfwfilter.hh @@ -85,6 +85,7 @@ static inline fw_op_t qc_op_to_fw_op(qc_query_op_t op) case QUERY_OP_INSERT: return FW_OP_INSERT; + case QUERY_OP_LOAD_LOCAL: case QUERY_OP_LOAD: return FW_OP_LOAD; @@ -288,4 +289,4 @@ char* create_error(const char* format, ...); */ bool rule_matches(Dbfw* my_instance, DbfwSession* my_session, GWBUF *queue, SRule rule, char* query); -bool rule_is_active(SRule rule); \ No newline at end of file +bool rule_is_active(SRule rule); diff --git a/server/modules/routing/avrorouter/avro_file.cc b/server/modules/routing/avrorouter/avro_file.cc index 5d31c6bf6..903d10e68 100644 --- a/server/modules/routing/avrorouter/avro_file.cc +++ b/server/modules/routing/avrorouter/avro_file.cc @@ -588,6 +588,10 @@ avro_binlog_end_t avro_read_all_events(Avro *router) int n_events = hdr.event_size - event_header_length - BLRM_FDE_EVENT_TYPES_OFFSET - FDE_EXTRA_BYTES; uint8_t* checksum = ptr + hdr.event_size - event_header_length - FDE_EXTRA_BYTES; + // Precaution to prevent writing too much in case new events are added + int real_len = MXS_MIN(n_events, (int)sizeof(router->event_type_hdr_lens)); + memcpy(router->event_type_hdr_lens, ptr + BLRM_FDE_EVENT_TYPES_OFFSET, real_len); + router->event_types = n_events; router->binlog_checksum = checksum[0]; } diff --git a/server/modules/routing/readconnroute/readconnroute.cc b/server/modules/routing/readconnroute/readconnroute.cc index f2560e44d..d14a5af4f 100644 --- a/server/modules/routing/readconnroute/readconnroute.cc +++ b/server/modules/routing/readconnroute/readconnroute.cc @@ -344,7 +344,7 @@ newSession(MXS_ROUTER *instance, MXS_SESSION *session) continue; } - if (ref == master_host && (inst->bitvalue & SERVER_MASTER)) + if (ref == master_host && inst->bitvalue == SERVER_MASTER) { /* If option is "master" return only the root Master as there could be * intermediate masters (Relay Servers) and they must not be selected. diff --git a/server/modules/routing/readwritesplit/rwsplitsession.cc b/server/modules/routing/readwritesplit/rwsplitsession.cc index 3011a1313..5ba1ee918 100644 --- a/server/modules/routing/readwritesplit/rwsplitsession.cc +++ b/server/modules/routing/readwritesplit/rwsplitsession.cc @@ -321,7 +321,7 @@ void RWSplitSession::correct_packet_sequence(GWBUF *buffer) } } -static void log_unexpected_response(DCB* dcb, GWBUF* buffer) +static void log_unexpected_response(SRWBackend& backend, GWBUF* buffer, GWBUF* current_query) { if (mxs_mysql_is_err_packet(buffer)) { @@ -336,21 +336,23 @@ static void log_unexpected_response(DCB* dcb, GWBUF* buffer) if (errcode == ER_CONNECTION_KILLED) { MXS_INFO("Connection from '%s'@'%s' to '%s' was killed", - dcb->session->client_dcb->user, - dcb->session->client_dcb->remote, - dcb->server->name); + backend->dcb()->session->client_dcb->user, + backend->dcb()->session->client_dcb->remote, + backend->name()); } else { MXS_WARNING("Server '%s' sent an unexpected error: %hu, %s", - dcb->server->name, errcode, errstr.c_str()); + backend->name(), errcode, errstr.c_str()); } } else { + std::string sql = current_query ? mxs::extract_sql(current_query, 1024) : ""; MXS_ERROR("Unexpected internal state: received response 0x%02hhx from " - "server '%s' when no response was expected", - mxs_mysql_get_command(buffer), dcb->server->name); + "server '%s' when no response was expected. Command: 0x%02hhx " + "Query: %s", mxs_mysql_get_command(buffer), backend->name(), + backend->current_command(), sql.c_str()); ss_dassert(false); } } @@ -431,7 +433,7 @@ void RWSplitSession::clientReply(GWBUF *writebuf, DCB *backend_dcb) /** If we receive an unexpected response from the server, the internal * logic cannot handle this situation. Routing the reply straight to * the client should be the safest thing to do at this point. */ - log_unexpected_response(backend_dcb, writebuf); + log_unexpected_response(backend, writebuf, m_current_query.get()); MXS_SESSION_ROUTE_REPLY(backend_dcb->session, writebuf); return; } diff --git a/server/modules/routing/schemarouter/schemaroutersession.cc b/server/modules/routing/schemarouter/schemaroutersession.cc index 4004016e0..31e8c6980 100644 --- a/server/modules/routing/schemarouter/schemaroutersession.cc +++ b/server/modules/routing/schemarouter/schemaroutersession.cc @@ -424,7 +424,7 @@ int32_t SchemaRouterSession::routeQuery(GWBUF* pPacket) /** We know where to route this query */ SSRBackend bref = get_bref_from_dcb(target_dcb); - if (op == QUERY_OP_LOAD) + if (op == QUERY_OP_LOAD_LOCAL) { m_load_target = bref->backend()->server; }