diff --git a/maxscale-system-test/CMakeLists.txt b/maxscale-system-test/CMakeLists.txt index 38f6be498..be5e203f0 100644 --- a/maxscale-system-test/CMakeLists.txt +++ b/maxscale-system-test/CMakeLists.txt @@ -202,6 +202,9 @@ add_test_executable(mxs2167_extra_port.cpp mxs2167_extra_port mxs2167_extra_port # Test KILL QUERY functionality add_test_executable(kill_query.cpp kill_query replication LABELS REPL_BACKEND) +# MXS-2250: DESCRIBE on temporary table should work. +add_test_executable(mxs2250_describe_temp_table.cpp mxs2250_describe_temp_table mxs2250_describe_temp_table LABELS REPL_BACKEND) + ############################################ # BEGIN: Tests that require GTID # ############################################ diff --git a/maxscale-system-test/cnf/maxscale.cnf.template.mxs2250_describe_temp_table b/maxscale-system-test/cnf/maxscale.cnf.template.mxs2250_describe_temp_table new file mode 100644 index 000000000..5e81561ef --- /dev/null +++ b/maxscale-system-test/cnf/maxscale.cnf.template.mxs2250_describe_temp_table @@ -0,0 +1,61 @@ +[maxscale] +threads=###threads### +log_warning=1 + +[server1] +type=server +address=###node_server_IP_1### +port=###node_server_port_1### +protocol=mariadbbackend + +[server2] +type=server +address=###node_server_IP_2### +port=###node_server_port_2### +protocol=mariadbbackend + +[server3] +type=server +address=###node_server_IP_3### +port=###node_server_port_3### +protocol=mariadbbackend + +[server4] +type=server +address=###node_server_IP_4### +port=###node_server_port_4### +protocol=mariadbbackend + +[Monitor] +type=monitor +module=mariadbmon +servers=server1,server2,server3,server4 +user=maxskysql +password=skysql +monitor_interval=1000 + +[RWS] +type=service +router=readwritesplit +servers=server1,server2,server3,server4 +user=maxskysql +password=skysql +slave_selection_criteria=LEAST_ROUTER_CONNECTIONS + +[RWS-Listener] +type=listener +service=RWS +protocol=mariadbclient +port=4006 +#socket=/tmp/rwsplit.sock + +[CLI] +type=service +router=cli + +[CLI Listener] +type=listener +service=CLI +protocol=maxscaled +#address=localhost +socket=default diff --git a/maxscale-system-test/mxs2250_describe_temp_table.cpp b/maxscale-system-test/mxs2250_describe_temp_table.cpp new file mode 100644 index 000000000..2e89f913d --- /dev/null +++ b/maxscale-system-test/mxs2250_describe_temp_table.cpp @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2019 MariaDB Corporation Ab + * + * Use of this software is governed by the Business Source License included + * in the LICENSE.TXT file and at www.mariadb.com/bsl11. + * + * Change Date: 2022-01-01 + * + * On the date above, in accordance with the Business Source License, use + * of this software will be governed by version 2 or later of the General + * Public License. + */ + +#include "mariadb_func.h" +#include "testconnections.h" + +int main(int argc, char* argv[]) +{ + TestConnections test(argc, argv); + + Connection rwsplit = test.maxscales->rwsplit(); + + test.expect(rwsplit.connect(), + "Could not connect to rwsplit."); + test.expect(rwsplit.query("CREATE TEMPORARY TABLE mxs2250 (a int)"), + "Could not create temporary table."); + test.expect(rwsplit.query("DESCRIBE mxs2250"), + "Could not describe temporary table."); + + return test.global_result; +} diff --git a/query_classifier/qc_mysqlembedded/qc_mysqlembedded.cc b/query_classifier/qc_mysqlembedded/qc_mysqlembedded.cc index 1153ce429..3fed16c1a 100644 --- a/query_classifier/qc_mysqlembedded/qc_mysqlembedded.cc +++ b/query_classifier/qc_mysqlembedded/qc_mysqlembedded.cc @@ -1654,7 +1654,7 @@ int32_t qc_mysql_get_table_names(GWBUF* querybuf, int32_t fullnames, char*** tab goto retblock; } - if (lex->describe || is_show_command(lex->sql_command)) + if (lex->describe || (is_show_command(lex->sql_command) && !(lex->sql_command == SQLCOM_SHOW_FIELDS))) { goto retblock; } @@ -1992,7 +1992,9 @@ int32_t qc_mysql_get_database_names(GWBUF* querybuf, char*** databasesp, int* si goto retblock; } - if (lex->describe || (is_show_command(lex->sql_command) && !(lex->sql_command == SQLCOM_SHOW_TABLES))) + if (lex->describe || (is_show_command(lex->sql_command) + && !(lex->sql_command == SQLCOM_SHOW_TABLES) + && !(lex->sql_command == SQLCOM_SHOW_FIELDS))) { goto retblock; } diff --git a/query_classifier/qc_sqlite/qc_sqlite.cc b/query_classifier/qc_sqlite/qc_sqlite.cc index d3b14b93f..d99330d5a 100644 --- a/query_classifier/qc_sqlite/qc_sqlite.cc +++ b/query_classifier/qc_sqlite/qc_sqlite.cc @@ -749,7 +749,7 @@ public: if (should_collect_database) { - zCollected_database = update_database_names(database); + zCollected_database = update_database_names(database, nDatabase); } if (pAliases && zCollected_table && zAlias) @@ -2281,11 +2281,7 @@ public: // TODO: certain what a returned database actually refers to // TODO: so better not to provide a name until there is a // TODO: specific op. - char database[pDatabase->n + 1]; - strncpy(database, pDatabase->z, pDatabase->n); - database[pDatabase->n] = 0; - - update_database_names(database); + update_database_names(pDatabase->z, pDatabase->n); #endif } break; @@ -2369,7 +2365,7 @@ public: exposed_sqlite3ExprDelete(pParse->db, pExprSpan->pExpr); } - void maxscaleExplain(Parse* pParse, Token* pNext) + void maxscaleExplainTable(Parse* pParse, SrcList* pList) { mxb_assert(this_thread.initialized); @@ -2377,28 +2373,24 @@ public: m_type_mask = QUERY_TYPE_READ; m_operation = QUERY_OP_SHOW; - if (pNext) + for (int i = 0; i < pList->nSrc; ++i) { - if (pNext->z) + if (pList->a[i].zName) { - const char EXTENDED[] = "EXTENDED"; - const char PARTITIONS[] = "PARTITIONS"; - const char FORMAT[] = "FORMAT"; - const char FOR[] = "FOR"; - -#define MATCHES_KEYWORD(t, k) ((t->n == sizeof(k) - 1) && (strncasecmp(t->z, k, t->n) == 0)) - - if (MATCHES_KEYWORD(pNext, EXTENDED) - || MATCHES_KEYWORD(pNext, PARTITIONS) - || MATCHES_KEYWORD(pNext, FORMAT) - || MATCHES_KEYWORD(pNext, FOR)) - { - m_operation = QUERY_OP_EXPLAIN; - } + update_names(pList->a[i].zDatabase, pList->a[i].zName, pList->a[i].zAlias, nullptr); } } } + void maxscaleExplain(Parse* pParse) + { + mxb_assert(this_thread.initialized); + + m_status = QC_QUERY_PARSED; + m_type_mask = QUERY_TYPE_READ; + m_operation = QUERY_OP_EXPLAIN; + } + void maxscaleFlush(Parse* pParse, Token* pWhat) { mxb_assert(this_thread.initialized); @@ -3053,7 +3045,21 @@ public: switch (pShow->what) { case MXS_SHOW_COLUMNS: - m_type_mask = QUERY_TYPE_READ; + { + m_type_mask = QUERY_TYPE_READ; + const char* zDatabase = nullptr; + size_t nDatabase = 0; + + if (pShow->pDatabase) + { + zDatabase = pShow->pDatabase->z; + nDatabase = pShow->pDatabase->n; + + update_database_names(zDatabase, nDatabase); + } + + update_table_names(zDatabase, nDatabase, pShow->pName->z, pShow->pName->n); + } break; case MXS_SHOW_CREATE_SEQUENCE: @@ -3113,11 +3119,7 @@ public: m_type_mask = QUERY_TYPE_SHOW_TABLES; if (pShow->pDatabase->z) { - char db[pShow->pDatabase->n + 1]; - strncpy(db, pShow->pDatabase->z, pShow->pDatabase->n); - db[pShow->pDatabase->n] = 0; - - update_database_names(db); + update_database_names(pShow->pDatabase->z, pShow->pDatabase->n); } break; @@ -3302,11 +3304,13 @@ private: return pz; } - const char* table_name_collected(const char* zTable) + const char* table_name_collected(const char* zTable, size_t nTable) { size_t i = 0; - while ((i < m_table_names.size()) && (strcmp(m_table_names[i], zTable) != 0)) + while ((i < m_table_names.size()) + && (strlen(m_table_names[i]) != nTable + || (strncmp(m_table_names[i], zTable, nTable) != 0))) { ++i; } @@ -3326,11 +3330,13 @@ private: return (i != m_table_fullnames.size()) ? m_table_fullnames[i] : NULL; } - const char* database_name_collected(const char* zDatabase) + const char* database_name_collected(const char* zDatabase, size_t nDatabase) { size_t i = 0; - while ((i < m_database_names.size()) && (strcmp(m_database_names[i], zDatabase) != 0)) + while ((i < m_database_names.size()) + && (strlen(m_database_names[i]) != nDatabase + || (strncmp(m_database_names[i], zDatabase, nDatabase) != 0))) { ++i; } @@ -3345,11 +3351,11 @@ private: { mxb_assert(zTable && nTable); - const char* zCollected_table = table_name_collected(zTable); + const char* zCollected_table = table_name_collected(zTable, nTable); if (!zCollected_table) { - char* zCopy = MXS_STRDUP_A(zTable); + char* zCopy = MXS_STRNDUP_A(zTable, nTable); m_table_names.push_back(zCopy); @@ -3360,7 +3366,8 @@ private: if (nDatabase) { - strcpy(fullname, zDatabase); + strncpy(fullname, zDatabase, nDatabase); + fullname[nDatabase] = 0; strcat(fullname, "."); } else @@ -3368,7 +3375,7 @@ private: fullname[0] = 0; } - strcat(fullname, zTable); + strncat(fullname, zTable, nTable); if (!table_fullname_collected(fullname)) { @@ -3380,16 +3387,16 @@ private: return zCollected_table; } - const char* update_database_names(const char* zDatabase) + const char* update_database_names(const char* zDatabase, size_t nDatabase) { mxb_assert(zDatabase); mxb_assert(strlen(zDatabase) != 0); - const char* zCollected_database = database_name_collected(zDatabase); + const char* zCollected_database = database_name_collected(zDatabase, nDatabase); if (!zCollected_database) { - char* zCopy = MXS_STRDUP_A(zDatabase); + char* zCopy = MXS_STRNDUP_A(zDatabase, nDatabase); m_database_names.push_back(zCopy); @@ -3486,7 +3493,8 @@ extern void maxscaleDo(Parse*, ExprList* pEList); extern void maxscaleDrop(Parse*, int what, Token* pDatabase, Token* pName); extern void maxscaleExecute(Parse*, Token* pName, int type_mask); extern void maxscaleExecuteImmediate(Parse*, Token* pName, ExprSpan* pExprSpan, int type_mask); -extern void maxscaleExplain(Parse*, Token* pNext); +extern void maxscaleExplainTable(Parse*, SrcList* pList); +extern void maxscaleExplain(Parse*); extern void maxscaleFlush(Parse*, Token* pWhat); extern void maxscaleHandler(Parse*, mxs_handler_t, SrcList* pFullName, Token* pName); extern void maxscaleLoadData(Parse*, SrcList* pFullName, int local); @@ -4412,14 +4420,24 @@ void maxscaleExecuteImmediate(Parse* pParse, Token* pName, ExprSpan* pExprSpan, QC_EXCEPTION_GUARD(pInfo->maxscaleExecuteImmediate(pParse, pName, pExprSpan, type_mask)); } -void maxscaleExplain(Parse* pParse, Token* pNext) +void maxscaleExplainTable(Parse* pParse, SrcList* pList) { QC_TRACE(); QcSqliteInfo* pInfo = this_thread.pInfo; mxb_assert(pInfo); - QC_EXCEPTION_GUARD(pInfo->maxscaleExplain(pParse, pNext)); + QC_EXCEPTION_GUARD(pInfo->maxscaleExplainTable(pParse, pList)); +} + +void maxscaleExplain(Parse* pParse) +{ + QC_TRACE(); + + QcSqliteInfo* pInfo = this_thread.pInfo; + mxb_assert(pInfo); + + QC_EXCEPTION_GUARD(pInfo->maxscaleExplain(pParse)); } void maxscaleFlush(Parse* pParse, Token* pWhat) diff --git a/query_classifier/qc_sqlite/sqlite-src-3110100/src/parse.y b/query_classifier/qc_sqlite/sqlite-src-3110100/src/parse.y index de0acfc91..08318ba8d 100644 --- a/query_classifier/qc_sqlite/sqlite-src-3110100/src/parse.y +++ b/query_classifier/qc_sqlite/sqlite-src-3110100/src/parse.y @@ -117,7 +117,8 @@ extern void maxscaleDo(Parse*, ExprList* pEList); extern void maxscaleDrop(Parse*, int what, Token* pDatabase, Token* pName); extern void maxscaleExecute(Parse*, Token* pName, int type_mask); extern void maxscaleExecuteImmediate(Parse*, Token* pName, ExprSpan* pExprSpan, int type_mask); -extern void maxscaleExplain(Parse*, Token* pNext); +extern void maxscaleExplainTable(Parse*, SrcList* pList); +extern void maxscaleExplain(Parse*); extern void maxscaleFlush(Parse*, Token* pWhat); extern void maxscaleHandler(Parse*, mxs_handler_t, SrcList* pFullName, Token* pName); extern void maxscaleLoadData(Parse*, SrcList* pFullName, int local); @@ -303,13 +304,16 @@ ecmd ::= oracle_assignment SEMI. explain_kw ::= EXPLAIN. // Also covers DESCRIBE explain_kw ::= DESC. -explain ::= explain_kw. { pParse->explain = 1; } +explain ::= explain_kw tbl_name(A). { pParse->explain = 1; maxscaleExplainTable(pParse, A); } +explain_type ::= . +explain_type ::= EXTENDED. +explain_type ::= PARTITIONS. // deferred_id is defined later, after the id token_class has been defined. -explain ::= explain_kw deferred_id(A). { maxscaleExplain(pParse, &A); } -explain ::= explain_kw deferred_id(A) DOT deferred_id. { maxscaleExplain(pParse, &A); } -ecmd ::= explain FOR(A) deferred_id INTEGER SEMI. { // FOR CONNECTION connection_id +explain_type ::= FORMAT TK_EQ deferred_id. // FORMAT = {TRADITIONAL|JSON} + +explain ::= explain_kw explain_type FOR CONNECTION INTEGER. { // FOR CONNECTION connection_id pParse->explain = 1; - maxscaleExplain(pParse, &A); + maxscaleExplain(pParse); } %endif %ifndef SQLITE_OMIT_EXPLAIN @@ -615,10 +619,10 @@ columnid(A) ::= nm(X). { // TODO: BINARY is a reserved word and should not automatically convert into an identifer. // TODO: However, if not here then rules such as CAST need to be modified. BINARY - CACHE /*CASCADE*/ CAST CLOSE COLUMNKW COLUMNS COMMENT CONCURRENT /*CONFLICT*/ + CACHE /*CASCADE*/ CAST CLOSE COLUMNKW COLUMNS COMMENT CONCURRENT /*CONFLICT*/ CONNECTION DATA DATABASE DEALLOCATE DEFERRED /*DESC*/ /*DETACH*/ DUMPFILE - /*EACH*/ END ENGINE ENUM EXCLUSIVE /*EXPLAIN*/ - FIRST FLUSH /*FOR*/ FORMAT + /*EACH*/ END ENGINE ENUM EXCLUSIVE /*EXPLAIN*/ EXTENDED + FIELDS FIRST FLUSH /*FOR*/ FORMAT GLOBAL // TODO: IF is a reserved word and should not automatically convert into an identifer. IF IMMEDIATE INITIALLY INSTEAD @@ -628,7 +632,7 @@ columnid(A) ::= nm(X). { NAMES NEXT NO OF OFFSET OPEN - PREVIOUS + PARTITIONS PREVIOUS QUERY QUICK RAISE RECURSIVE /*REINDEX*/ RELEASE /*RENAME*/ /*REPLACE*/ RESET RESTRICT ROLLBACK ROLLUP ROW SAVEPOINT SELECT_OPTIONS_KW /*SEQUENCE*/ SLAVE /*START*/ STATEMENT STATUS @@ -3248,7 +3252,10 @@ like_or_where_opt ::= WHERE expr. %type show {MxsShow} -show(A) ::= SHOW full_opt(X) COLUMNS from_or_in nm(Y) dbnm(Z) from_or_in_db_opt(W) like_or_where_opt . { +columns_or_fields ::= COLUMNS. +columns_or_fields ::= FIELDS. + +show(A) ::= SHOW full_opt(X) columns_or_fields from_or_in nm(Y) dbnm(Z) from_or_in_db_opt(W) like_or_where_opt . { A.what = MXS_SHOW_COLUMNS; A.data = X; if (Z.z) { diff --git a/query_classifier/qc_sqlite/sqlite-src-3110100/tool/mkkeywordhash.c b/query_classifier/qc_sqlite/sqlite-src-3110100/tool/mkkeywordhash.c index b733f4a7e..d0e14c40a 100644 --- a/query_classifier/qc_sqlite/sqlite-src-3110100/tool/mkkeywordhash.c +++ b/query_classifier/qc_sqlite/sqlite-src-3110100/tool/mkkeywordhash.c @@ -204,6 +204,7 @@ static Keyword aKeywordTable[] = { { "CONFLICT", "TK_CONFLICT", CONFLICT }, #endif #ifdef MAXSCALE + { "CONNECTION", "TK_CONNECTION", ALWAYS }, { "CONCURRENT", "TK_CONCURRENT", ALWAYS }, #endif { "CONSTRAINT", "TK_CONSTRAINT", ALWAYS }, @@ -261,6 +262,7 @@ static Keyword aKeywordTable[] = { #ifdef MAXSCALE { "EXECUTE", "TK_EXECUTE", ALWAYS }, { "EXCLUDE", "TK_EXCLUDE", ALWAYS }, + { "EXTENDED", "TK_EXTENDED", ALWAYS }, #endif { "EXISTS", "TK_EXISTS", ALWAYS }, { "EXPLAIN", "TK_EXPLAIN", EXPLAIN }, @@ -268,6 +270,7 @@ static Keyword aKeywordTable[] = { { "FAIL", "TK_FAIL", CONFLICT|TRIGGER }, #endif #ifdef MAXSCALE + { "FIELDS", "TK_FIELDS", ALWAYS }, { "FIRST", "TK_FIRST", ALWAYS }, { "FLUSH", "TK_FLUSH", ALWAYS }, { "FOLLOWING", "TK_FOLLOWING", ALWAYS }, @@ -377,6 +380,7 @@ static Keyword aKeywordTable[] = { #endif #ifdef MAXSCALE { "PARTITION", "TK_PARTITION", ALWAYS }, + { "PARTITIONS", "TK_PARTITIONS", ALWAYS }, { "PERSISTENT", "TK_PERSISTENT", ALWAYS }, #endif #ifndef MAXSCALE