Merge branch '2.3' into develop

This commit is contained in:
Johan Wikman
2019-06-05 14:48:43 +03:00
7 changed files with 182 additions and 56 deletions

View File

@ -202,6 +202,9 @@ add_test_executable(mxs2167_extra_port.cpp mxs2167_extra_port mxs2167_extra_port
# Test KILL QUERY functionality # Test KILL QUERY functionality
add_test_executable(kill_query.cpp kill_query replication LABELS REPL_BACKEND) add_test_executable(kill_query.cpp kill_query replication LABELS REPL_BACKEND)
# MXS-2250: DESCRIBE on temporary table should work.
add_test_executable(mxs2250_describe_temp_table.cpp mxs2250_describe_temp_table mxs2250_describe_temp_table LABELS REPL_BACKEND)
############################################ ############################################
# BEGIN: Tests that require GTID # # BEGIN: Tests that require GTID #
############################################ ############################################

View File

@ -0,0 +1,61 @@
[maxscale]
threads=###threads###
log_warning=1
[server1]
type=server
address=###node_server_IP_1###
port=###node_server_port_1###
protocol=mariadbbackend
[server2]
type=server
address=###node_server_IP_2###
port=###node_server_port_2###
protocol=mariadbbackend
[server3]
type=server
address=###node_server_IP_3###
port=###node_server_port_3###
protocol=mariadbbackend
[server4]
type=server
address=###node_server_IP_4###
port=###node_server_port_4###
protocol=mariadbbackend
[Monitor]
type=monitor
module=mariadbmon
servers=server1,server2,server3,server4
user=maxskysql
password=skysql
monitor_interval=1000
[RWS]
type=service
router=readwritesplit
servers=server1,server2,server3,server4
user=maxskysql
password=skysql
slave_selection_criteria=LEAST_ROUTER_CONNECTIONS
[RWS-Listener]
type=listener
service=RWS
protocol=mariadbclient
port=4006
#socket=/tmp/rwsplit.sock
[CLI]
type=service
router=cli
[CLI Listener]
type=listener
service=CLI
protocol=maxscaled
#address=localhost
socket=default

View File

@ -0,0 +1,31 @@
/*
* Copyright (c) 2019 MariaDB Corporation Ab
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file and at www.mariadb.com/bsl11.
*
* Change Date: 2022-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2 or later of the General
* Public License.
*/
#include "mariadb_func.h"
#include "testconnections.h"
int main(int argc, char* argv[])
{
TestConnections test(argc, argv);
Connection rwsplit = test.maxscales->rwsplit();
test.expect(rwsplit.connect(),
"Could not connect to rwsplit.");
test.expect(rwsplit.query("CREATE TEMPORARY TABLE mxs2250 (a int)"),
"Could not create temporary table.");
test.expect(rwsplit.query("DESCRIBE mxs2250"),
"Could not describe temporary table.");
return test.global_result;
}

View File

@ -1654,7 +1654,7 @@ int32_t qc_mysql_get_table_names(GWBUF* querybuf, int32_t fullnames, char*** tab
goto retblock; goto retblock;
} }
if (lex->describe || is_show_command(lex->sql_command)) if (lex->describe || (is_show_command(lex->sql_command) && !(lex->sql_command == SQLCOM_SHOW_FIELDS)))
{ {
goto retblock; goto retblock;
} }
@ -1992,7 +1992,9 @@ int32_t qc_mysql_get_database_names(GWBUF* querybuf, char*** databasesp, int* si
goto retblock; goto retblock;
} }
if (lex->describe || (is_show_command(lex->sql_command) && !(lex->sql_command == SQLCOM_SHOW_TABLES))) if (lex->describe || (is_show_command(lex->sql_command)
&& !(lex->sql_command == SQLCOM_SHOW_TABLES)
&& !(lex->sql_command == SQLCOM_SHOW_FIELDS)))
{ {
goto retblock; goto retblock;
} }

View File

@ -749,7 +749,7 @@ public:
if (should_collect_database) if (should_collect_database)
{ {
zCollected_database = update_database_names(database); zCollected_database = update_database_names(database, nDatabase);
} }
if (pAliases && zCollected_table && zAlias) if (pAliases && zCollected_table && zAlias)
@ -2281,11 +2281,7 @@ public:
// TODO: certain what a returned database actually refers to // TODO: certain what a returned database actually refers to
// TODO: so better not to provide a name until there is a // TODO: so better not to provide a name until there is a
// TODO: specific op. // TODO: specific op.
char database[pDatabase->n + 1]; update_database_names(pDatabase->z, pDatabase->n);
strncpy(database, pDatabase->z, pDatabase->n);
database[pDatabase->n] = 0;
update_database_names(database);
#endif #endif
} }
break; break;
@ -2369,7 +2365,7 @@ public:
exposed_sqlite3ExprDelete(pParse->db, pExprSpan->pExpr); exposed_sqlite3ExprDelete(pParse->db, pExprSpan->pExpr);
} }
void maxscaleExplain(Parse* pParse, Token* pNext) void maxscaleExplainTable(Parse* pParse, SrcList* pList)
{ {
mxb_assert(this_thread.initialized); mxb_assert(this_thread.initialized);
@ -2377,28 +2373,24 @@ public:
m_type_mask = QUERY_TYPE_READ; m_type_mask = QUERY_TYPE_READ;
m_operation = QUERY_OP_SHOW; m_operation = QUERY_OP_SHOW;
if (pNext) for (int i = 0; i < pList->nSrc; ++i)
{ {
if (pNext->z) if (pList->a[i].zName)
{ {
const char EXTENDED[] = "EXTENDED"; update_names(pList->a[i].zDatabase, pList->a[i].zName, pList->a[i].zAlias, nullptr);
const char PARTITIONS[] = "PARTITIONS";
const char FORMAT[] = "FORMAT";
const char FOR[] = "FOR";
#define MATCHES_KEYWORD(t, k) ((t->n == sizeof(k) - 1) && (strncasecmp(t->z, k, t->n) == 0))
if (MATCHES_KEYWORD(pNext, EXTENDED)
|| MATCHES_KEYWORD(pNext, PARTITIONS)
|| MATCHES_KEYWORD(pNext, FORMAT)
|| MATCHES_KEYWORD(pNext, FOR))
{
m_operation = QUERY_OP_EXPLAIN;
}
} }
} }
} }
void maxscaleExplain(Parse* pParse)
{
mxb_assert(this_thread.initialized);
m_status = QC_QUERY_PARSED;
m_type_mask = QUERY_TYPE_READ;
m_operation = QUERY_OP_EXPLAIN;
}
void maxscaleFlush(Parse* pParse, Token* pWhat) void maxscaleFlush(Parse* pParse, Token* pWhat)
{ {
mxb_assert(this_thread.initialized); mxb_assert(this_thread.initialized);
@ -3053,7 +3045,21 @@ public:
switch (pShow->what) switch (pShow->what)
{ {
case MXS_SHOW_COLUMNS: case MXS_SHOW_COLUMNS:
m_type_mask = QUERY_TYPE_READ; {
m_type_mask = QUERY_TYPE_READ;
const char* zDatabase = nullptr;
size_t nDatabase = 0;
if (pShow->pDatabase)
{
zDatabase = pShow->pDatabase->z;
nDatabase = pShow->pDatabase->n;
update_database_names(zDatabase, nDatabase);
}
update_table_names(zDatabase, nDatabase, pShow->pName->z, pShow->pName->n);
}
break; break;
case MXS_SHOW_CREATE_SEQUENCE: case MXS_SHOW_CREATE_SEQUENCE:
@ -3113,11 +3119,7 @@ public:
m_type_mask = QUERY_TYPE_SHOW_TABLES; m_type_mask = QUERY_TYPE_SHOW_TABLES;
if (pShow->pDatabase->z) if (pShow->pDatabase->z)
{ {
char db[pShow->pDatabase->n + 1]; update_database_names(pShow->pDatabase->z, pShow->pDatabase->n);
strncpy(db, pShow->pDatabase->z, pShow->pDatabase->n);
db[pShow->pDatabase->n] = 0;
update_database_names(db);
} }
break; break;
@ -3302,11 +3304,13 @@ private:
return pz; return pz;
} }
const char* table_name_collected(const char* zTable) const char* table_name_collected(const char* zTable, size_t nTable)
{ {
size_t i = 0; size_t i = 0;
while ((i < m_table_names.size()) && (strcmp(m_table_names[i], zTable) != 0)) while ((i < m_table_names.size())
&& (strlen(m_table_names[i]) != nTable
|| (strncmp(m_table_names[i], zTable, nTable) != 0)))
{ {
++i; ++i;
} }
@ -3326,11 +3330,13 @@ private:
return (i != m_table_fullnames.size()) ? m_table_fullnames[i] : NULL; return (i != m_table_fullnames.size()) ? m_table_fullnames[i] : NULL;
} }
const char* database_name_collected(const char* zDatabase) const char* database_name_collected(const char* zDatabase, size_t nDatabase)
{ {
size_t i = 0; size_t i = 0;
while ((i < m_database_names.size()) && (strcmp(m_database_names[i], zDatabase) != 0)) while ((i < m_database_names.size())
&& (strlen(m_database_names[i]) != nDatabase
|| (strncmp(m_database_names[i], zDatabase, nDatabase) != 0)))
{ {
++i; ++i;
} }
@ -3345,11 +3351,11 @@ private:
{ {
mxb_assert(zTable && nTable); mxb_assert(zTable && nTable);
const char* zCollected_table = table_name_collected(zTable); const char* zCollected_table = table_name_collected(zTable, nTable);
if (!zCollected_table) if (!zCollected_table)
{ {
char* zCopy = MXS_STRDUP_A(zTable); char* zCopy = MXS_STRNDUP_A(zTable, nTable);
m_table_names.push_back(zCopy); m_table_names.push_back(zCopy);
@ -3360,7 +3366,8 @@ private:
if (nDatabase) if (nDatabase)
{ {
strcpy(fullname, zDatabase); strncpy(fullname, zDatabase, nDatabase);
fullname[nDatabase] = 0;
strcat(fullname, "."); strcat(fullname, ".");
} }
else else
@ -3368,7 +3375,7 @@ private:
fullname[0] = 0; fullname[0] = 0;
} }
strcat(fullname, zTable); strncat(fullname, zTable, nTable);
if (!table_fullname_collected(fullname)) if (!table_fullname_collected(fullname))
{ {
@ -3380,16 +3387,16 @@ private:
return zCollected_table; return zCollected_table;
} }
const char* update_database_names(const char* zDatabase) const char* update_database_names(const char* zDatabase, size_t nDatabase)
{ {
mxb_assert(zDatabase); mxb_assert(zDatabase);
mxb_assert(strlen(zDatabase) != 0); mxb_assert(strlen(zDatabase) != 0);
const char* zCollected_database = database_name_collected(zDatabase); const char* zCollected_database = database_name_collected(zDatabase, nDatabase);
if (!zCollected_database) if (!zCollected_database)
{ {
char* zCopy = MXS_STRDUP_A(zDatabase); char* zCopy = MXS_STRNDUP_A(zDatabase, nDatabase);
m_database_names.push_back(zCopy); m_database_names.push_back(zCopy);
@ -3486,7 +3493,8 @@ extern void maxscaleDo(Parse*, ExprList* pEList);
extern void maxscaleDrop(Parse*, int what, Token* pDatabase, Token* pName); extern void maxscaleDrop(Parse*, int what, Token* pDatabase, Token* pName);
extern void maxscaleExecute(Parse*, Token* pName, int type_mask); extern void maxscaleExecute(Parse*, Token* pName, int type_mask);
extern void maxscaleExecuteImmediate(Parse*, Token* pName, ExprSpan* pExprSpan, int type_mask); extern void maxscaleExecuteImmediate(Parse*, Token* pName, ExprSpan* pExprSpan, int type_mask);
extern void maxscaleExplain(Parse*, Token* pNext); extern void maxscaleExplainTable(Parse*, SrcList* pList);
extern void maxscaleExplain(Parse*);
extern void maxscaleFlush(Parse*, Token* pWhat); extern void maxscaleFlush(Parse*, Token* pWhat);
extern void maxscaleHandler(Parse*, mxs_handler_t, SrcList* pFullName, Token* pName); extern void maxscaleHandler(Parse*, mxs_handler_t, SrcList* pFullName, Token* pName);
extern void maxscaleLoadData(Parse*, SrcList* pFullName, int local); extern void maxscaleLoadData(Parse*, SrcList* pFullName, int local);
@ -4412,14 +4420,24 @@ void maxscaleExecuteImmediate(Parse* pParse, Token* pName, ExprSpan* pExprSpan,
QC_EXCEPTION_GUARD(pInfo->maxscaleExecuteImmediate(pParse, pName, pExprSpan, type_mask)); QC_EXCEPTION_GUARD(pInfo->maxscaleExecuteImmediate(pParse, pName, pExprSpan, type_mask));
} }
void maxscaleExplain(Parse* pParse, Token* pNext) void maxscaleExplainTable(Parse* pParse, SrcList* pList)
{ {
QC_TRACE(); QC_TRACE();
QcSqliteInfo* pInfo = this_thread.pInfo; QcSqliteInfo* pInfo = this_thread.pInfo;
mxb_assert(pInfo); mxb_assert(pInfo);
QC_EXCEPTION_GUARD(pInfo->maxscaleExplain(pParse, pNext)); QC_EXCEPTION_GUARD(pInfo->maxscaleExplainTable(pParse, pList));
}
void maxscaleExplain(Parse* pParse)
{
QC_TRACE();
QcSqliteInfo* pInfo = this_thread.pInfo;
mxb_assert(pInfo);
QC_EXCEPTION_GUARD(pInfo->maxscaleExplain(pParse));
} }
void maxscaleFlush(Parse* pParse, Token* pWhat) void maxscaleFlush(Parse* pParse, Token* pWhat)

View File

@ -117,7 +117,8 @@ extern void maxscaleDo(Parse*, ExprList* pEList);
extern void maxscaleDrop(Parse*, int what, Token* pDatabase, Token* pName); extern void maxscaleDrop(Parse*, int what, Token* pDatabase, Token* pName);
extern void maxscaleExecute(Parse*, Token* pName, int type_mask); extern void maxscaleExecute(Parse*, Token* pName, int type_mask);
extern void maxscaleExecuteImmediate(Parse*, Token* pName, ExprSpan* pExprSpan, int type_mask); extern void maxscaleExecuteImmediate(Parse*, Token* pName, ExprSpan* pExprSpan, int type_mask);
extern void maxscaleExplain(Parse*, Token* pNext); extern void maxscaleExplainTable(Parse*, SrcList* pList);
extern void maxscaleExplain(Parse*);
extern void maxscaleFlush(Parse*, Token* pWhat); extern void maxscaleFlush(Parse*, Token* pWhat);
extern void maxscaleHandler(Parse*, mxs_handler_t, SrcList* pFullName, Token* pName); extern void maxscaleHandler(Parse*, mxs_handler_t, SrcList* pFullName, Token* pName);
extern void maxscaleLoadData(Parse*, SrcList* pFullName, int local); extern void maxscaleLoadData(Parse*, SrcList* pFullName, int local);
@ -303,13 +304,16 @@ ecmd ::= oracle_assignment SEMI.
explain_kw ::= EXPLAIN. // Also covers DESCRIBE explain_kw ::= EXPLAIN. // Also covers DESCRIBE
explain_kw ::= DESC. explain_kw ::= DESC.
explain ::= explain_kw. { pParse->explain = 1; } explain ::= explain_kw tbl_name(A). { pParse->explain = 1; maxscaleExplainTable(pParse, A); }
explain_type ::= .
explain_type ::= EXTENDED.
explain_type ::= PARTITIONS.
// deferred_id is defined later, after the id token_class has been defined. // deferred_id is defined later, after the id token_class has been defined.
explain ::= explain_kw deferred_id(A). { maxscaleExplain(pParse, &A); } explain_type ::= FORMAT TK_EQ deferred_id. // FORMAT = {TRADITIONAL|JSON}
explain ::= explain_kw deferred_id(A) DOT deferred_id. { maxscaleExplain(pParse, &A); }
ecmd ::= explain FOR(A) deferred_id INTEGER SEMI. { // FOR CONNECTION connection_id explain ::= explain_kw explain_type FOR CONNECTION INTEGER. { // FOR CONNECTION connection_id
pParse->explain = 1; pParse->explain = 1;
maxscaleExplain(pParse, &A); maxscaleExplain(pParse);
} }
%endif %endif
%ifndef SQLITE_OMIT_EXPLAIN %ifndef SQLITE_OMIT_EXPLAIN
@ -615,10 +619,10 @@ columnid(A) ::= nm(X). {
// TODO: BINARY is a reserved word and should not automatically convert into an identifer. // TODO: BINARY is a reserved word and should not automatically convert into an identifer.
// TODO: However, if not here then rules such as CAST need to be modified. // TODO: However, if not here then rules such as CAST need to be modified.
BINARY BINARY
CACHE /*CASCADE*/ CAST CLOSE COLUMNKW COLUMNS COMMENT CONCURRENT /*CONFLICT*/ CACHE /*CASCADE*/ CAST CLOSE COLUMNKW COLUMNS COMMENT CONCURRENT /*CONFLICT*/ CONNECTION
DATA DATABASE DEALLOCATE DEFERRED /*DESC*/ /*DETACH*/ DUMPFILE DATA DATABASE DEALLOCATE DEFERRED /*DESC*/ /*DETACH*/ DUMPFILE
/*EACH*/ END ENGINE ENUM EXCLUSIVE /*EXPLAIN*/ /*EACH*/ END ENGINE ENUM EXCLUSIVE /*EXPLAIN*/ EXTENDED
FIRST FLUSH /*FOR*/ FORMAT FIELDS FIRST FLUSH /*FOR*/ FORMAT
GLOBAL GLOBAL
// TODO: IF is a reserved word and should not automatically convert into an identifer. // TODO: IF is a reserved word and should not automatically convert into an identifer.
IF IMMEDIATE INITIALLY INSTEAD IF IMMEDIATE INITIALLY INSTEAD
@ -628,7 +632,7 @@ columnid(A) ::= nm(X). {
NAMES NEXT NAMES NEXT
NO NO
OF OFFSET OPEN OF OFFSET OPEN
PREVIOUS PARTITIONS PREVIOUS
QUERY QUICK QUERY QUICK
RAISE RECURSIVE /*REINDEX*/ RELEASE /*RENAME*/ /*REPLACE*/ RESET RESTRICT ROLLBACK ROLLUP ROW RAISE RECURSIVE /*REINDEX*/ RELEASE /*RENAME*/ /*REPLACE*/ RESET RESTRICT ROLLBACK ROLLUP ROW
SAVEPOINT SELECT_OPTIONS_KW /*SEQUENCE*/ SLAVE /*START*/ STATEMENT STATUS SAVEPOINT SELECT_OPTIONS_KW /*SEQUENCE*/ SLAVE /*START*/ STATEMENT STATUS
@ -3248,7 +3252,10 @@ like_or_where_opt ::= WHERE expr.
%type show {MxsShow} %type show {MxsShow}
show(A) ::= SHOW full_opt(X) COLUMNS from_or_in nm(Y) dbnm(Z) from_or_in_db_opt(W) like_or_where_opt . { columns_or_fields ::= COLUMNS.
columns_or_fields ::= FIELDS.
show(A) ::= SHOW full_opt(X) columns_or_fields from_or_in nm(Y) dbnm(Z) from_or_in_db_opt(W) like_or_where_opt . {
A.what = MXS_SHOW_COLUMNS; A.what = MXS_SHOW_COLUMNS;
A.data = X; A.data = X;
if (Z.z) { if (Z.z) {

View File

@ -204,6 +204,7 @@ static Keyword aKeywordTable[] = {
{ "CONFLICT", "TK_CONFLICT", CONFLICT }, { "CONFLICT", "TK_CONFLICT", CONFLICT },
#endif #endif
#ifdef MAXSCALE #ifdef MAXSCALE
{ "CONNECTION", "TK_CONNECTION", ALWAYS },
{ "CONCURRENT", "TK_CONCURRENT", ALWAYS }, { "CONCURRENT", "TK_CONCURRENT", ALWAYS },
#endif #endif
{ "CONSTRAINT", "TK_CONSTRAINT", ALWAYS }, { "CONSTRAINT", "TK_CONSTRAINT", ALWAYS },
@ -261,6 +262,7 @@ static Keyword aKeywordTable[] = {
#ifdef MAXSCALE #ifdef MAXSCALE
{ "EXECUTE", "TK_EXECUTE", ALWAYS }, { "EXECUTE", "TK_EXECUTE", ALWAYS },
{ "EXCLUDE", "TK_EXCLUDE", ALWAYS }, { "EXCLUDE", "TK_EXCLUDE", ALWAYS },
{ "EXTENDED", "TK_EXTENDED", ALWAYS },
#endif #endif
{ "EXISTS", "TK_EXISTS", ALWAYS }, { "EXISTS", "TK_EXISTS", ALWAYS },
{ "EXPLAIN", "TK_EXPLAIN", EXPLAIN }, { "EXPLAIN", "TK_EXPLAIN", EXPLAIN },
@ -268,6 +270,7 @@ static Keyword aKeywordTable[] = {
{ "FAIL", "TK_FAIL", CONFLICT|TRIGGER }, { "FAIL", "TK_FAIL", CONFLICT|TRIGGER },
#endif #endif
#ifdef MAXSCALE #ifdef MAXSCALE
{ "FIELDS", "TK_FIELDS", ALWAYS },
{ "FIRST", "TK_FIRST", ALWAYS }, { "FIRST", "TK_FIRST", ALWAYS },
{ "FLUSH", "TK_FLUSH", ALWAYS }, { "FLUSH", "TK_FLUSH", ALWAYS },
{ "FOLLOWING", "TK_FOLLOWING", ALWAYS }, { "FOLLOWING", "TK_FOLLOWING", ALWAYS },
@ -377,6 +380,7 @@ static Keyword aKeywordTable[] = {
#endif #endif
#ifdef MAXSCALE #ifdef MAXSCALE
{ "PARTITION", "TK_PARTITION", ALWAYS }, { "PARTITION", "TK_PARTITION", ALWAYS },
{ "PARTITIONS", "TK_PARTITIONS", ALWAYS },
{ "PERSISTENT", "TK_PERSISTENT", ALWAYS }, { "PERSISTENT", "TK_PERSISTENT", ALWAYS },
#endif #endif
#ifndef MAXSCALE #ifndef MAXSCALE