Merge branch '2.3' into develop

This commit is contained in:
Esa Korhonen
2019-03-28 14:28:48 +02:00
9 changed files with 234 additions and 63 deletions

View File

@ -222,18 +222,23 @@ undefined. To work around this limitation, the query must be executed in separat
query will be routed to the first available server. This possibly returns an
error about database rights instead of a missing database.
* The preparation of a prepared statement is routed to all servers. The
execution of a prepared statement is routed to the first available server or to
the server pointed by a routing hint attached to the query. In practice this
means that prepared statements aren't supported by the SchemaRouter.
* Prepared statement support is limited. PREPARE, EXECUTE and DEALLOCATE are routed to the
correct backend if the statement is known and only requires one backend server. EXECUTE
IMMEADIATE is not supported and is routed to the first available backend and may give
wrong results. Similarly, preparing a statement from a variable (e.g. `PREPARE stmt FROM
@a`) is not supported and may be routed wrong.
* `SHOW DATABASES` is handled by the router instead of routed to a server. The router only
answers correctly to the basic version of the query. Any modifiers such as `LIKE` are
ignored.
* `SHOW TABLES` is routed to the server with the current database. If using table-level
sharding, the results will be incomplete. Use `SHOW SHARDS` to get results from the router
itself.
sharding, the results will be incomplete. Similarly, `SHOW TABLES FROM db1` is routed to
the server with database `db1`, ignoring table sharding. Use `SHOW SHARDS` to get results
from the router itself.
* `USE db1` is routed to the server with `db1`. If the database is divided to multiple
servers, only one server will get the command.
## Examples

View File

@ -52,6 +52,13 @@ public:
*/
void write(DCB* dcb);
/**
* Write the result set to a DCB as JSON
*
* @param dcb DCB where the result set is written
*/
void write_as_json(DCB* dcb);
private:
std::vector<std::string> m_columns;
std::vector<std::vector<std::string>> m_rows;

View File

@ -1668,19 +1668,23 @@ const char* gw_dcb_state2string(dcb_state_t state)
*/
void dcb_printf(DCB* dcb, const char* fmt, ...)
{
GWBUF* buf;
va_list args;
if ((buf = gwbuf_alloc(10240)) == NULL)
{
return;
}
va_start(args, fmt);
vsnprintf((char*)GWBUF_DATA(buf), 10240, fmt, args);
int n = vsnprintf(nullptr, 0, fmt, args);
va_end(args);
buf->end = (void*)((char*)GWBUF_DATA(buf) + strlen((char*)GWBUF_DATA(buf)));
dcb->func.write(dcb, buf);
GWBUF* buf = gwbuf_alloc(n + 1);
if (buf)
{
va_start(args, fmt);
vsnprintf((char*)GWBUF_DATA(buf), n + 1, fmt, args);
va_end(args);
// Remove the trailing null character
GWBUF_RTRIM(buf, 1);
dcb->func.write(dcb, buf);
}
}
/**

View File

@ -16,6 +16,7 @@
#include <numeric>
#include <maxscale/alloc.h>
#include <maxscale/resultset.hh>
#include <maxscale/buffer.hh>
#include <maxscale/dcb.hh>
@ -206,3 +207,24 @@ void ResultSet::write(DCB* dcb)
mysql_send_eof(dcb, seqno);
}
void ResultSet::write_as_json(DCB* dcb)
{
json_t* arr = json_array();
for (const auto& row : m_rows)
{
json_t* obj = json_object();
for (size_t i = 0; i < row.size(); i++)
{
json_object_set_new(obj, m_columns[i].c_str(), json_string(row[i].c_str()));
}
json_array_append_new(arr, obj);
}
char* js = json_dumps(arr, JSON_INDENT(4));
dcb_printf(dcb, "%s", js);
MXS_FREE(js);
}

View File

@ -1,4 +1,4 @@
add_library(maxinfo SHARED maxinfo.cc maxinfo_parse.cc maxinfo_error.cc maxinfo_exec.cc)
add_library(maxinfo SHARED maxinfo.cc maxinfo_parse.cc maxinfo_error.cc maxinfo_exec.cc maxinfo_http.cc)
set_target_properties(maxinfo PROPERTIES INSTALL_RPATH ${CMAKE_INSTALL_RPATH}:${MAXSCALE_LIBDIR} VERSION "1.0.0" LINK_FLAGS -Wl,-z,defs)
target_link_libraries(maxinfo maxscale-common)
install_module(maxinfo core)

View File

@ -303,6 +303,7 @@ static int execute(MXS_ROUTER* rinstance, MXS_ROUTER_SESSION* router_session, GW
if (GWBUF_TYPE(queue) == GWBUF_TYPE_HTTP)
{
handle_url(instance, session, queue);
gwbuf_free(queue);
return 0;
}

View File

@ -137,5 +137,6 @@ extern void maxinfo_send_error(DCB*, int, const char*);
extern void maxinfo_send_parse_error(DCB*, char*, PARSE_ERROR);
extern std::unique_ptr<ResultSet> maxinfo_variables();
extern std::unique_ptr<ResultSet> maxinfo_status();
extern int handle_url(INFO_INSTANCE* instance, INFO_SESSION* session, GWBUF* queue);
#endif

View File

@ -0,0 +1,108 @@
/*
* Copyright (c) 2016 MariaDB Corporation Ab
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file and at www.mariadb.com/bsl11.
*
* Change Date: 2022-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2 or later of the General
* Public License.
*/
#include "maxinfo.hh"
#include <unordered_map>
#include <string>
#include <functional>
#include <maxscale/utils.hh>
#include "../../../core/internal/poll.hh"
#include "../../../core/internal/monitor.hh"
#include "../../../core/internal/server.hh"
#include "../../../core/internal/service.hh"
#include "../../../core/internal/modules.hh"
#include "../../../core/internal/session.hh"
void serviceGetList_http(INFO_INSTANCE* instance, INFO_SESSION* session, DCB* dcb)
{
serviceGetList()->write_as_json(dcb);
}
void serviceGetListenerList_http(INFO_INSTANCE* instance, INFO_SESSION* session, DCB* dcb)
{
serviceGetListenerList()->write_as_json(dcb);
}
void moduleGetList_http(INFO_INSTANCE* instance, INFO_SESSION* session, DCB* dcb)
{
moduleGetList()->write_as_json(dcb);
}
void monitorGetList_http(INFO_INSTANCE* instance, INFO_SESSION* session, DCB* dcb)
{
MonitorManager::monitor_get_list()->write_as_json(dcb);
}
void maxinfoSessionsAll_http(INFO_INSTANCE* instance, INFO_SESSION* session, DCB* dcb)
{
sessionGetList()->write_as_json(dcb);
}
void maxinfoClientSessions_http(INFO_INSTANCE* instance, INFO_SESSION* session, DCB* dcb)
{
sessionGetList()->write_as_json(dcb);
}
void serverGetList_http(INFO_INSTANCE* instance, INFO_SESSION* session, DCB* dcb)
{
Server::getList()->write_as_json(dcb);
}
void eventTimesGetList_http(INFO_INSTANCE* instance, INFO_SESSION* session, DCB* dcb)
{
eventTimesGetList()->write_as_json(dcb);
}
void maxinfo_variables_http(INFO_INSTANCE* instance, INFO_SESSION* session, DCB* dcb)
{
maxinfo_variables()->write_as_json(dcb);
}
void maxinfo_status_http(INFO_INSTANCE* instance, INFO_SESSION* session, DCB* dcb)
{
maxinfo_status()->write_as_json(dcb);
}
/**
* Table that maps a URI to a function to call to
* to obtain the result set related to that URI
*/
static std::unordered_map<std::string, void (*)(INFO_INSTANCE*, INFO_SESSION*, DCB*)> supported_uri
{
{"/services", serviceGetList_http},
{"/listeners", serviceGetListenerList_http},
{"/modules", moduleGetList_http},
{"/monitors", monitorGetList_http},
{"/sessions", maxinfoSessionsAll_http},
{"/clients", maxinfoClientSessions_http},
{"/servers", serverGetList_http},
{"/variables", maxinfo_variables_http},
{"/status", maxinfo_status_http},
{"/event/times", eventTimesGetList_http}
};
int handle_url(INFO_INSTANCE* instance, INFO_SESSION* session, GWBUF* queue)
{
std::string uri((char*)GWBUF_DATA(queue));
auto it = supported_uri.find(uri);
if (it != supported_uri.end())
{
it->second(instance, session, session->dcb);
}
return 1;
}

View File

@ -1630,41 +1630,59 @@ SERVER* SchemaRouterSession::get_query_target(GWBUF* buffer)
int n_databases = 0;
char** databases = qc_get_database_names(buffer, &n_databases);
for (int i = 0; i < n_databases; i++)
if (n_databases > 0)
{
for (int j = 0; j < n_tables; j++)
// Prefer to select the route target by table. If no tables, route by database.
if (n_tables)
{
SERVER* target = m_shard.get_location(tables[j]);
if (target)
for (int i = 0; i < n_tables; i++)
{
if (rval && target != rval)
SERVER* target = m_shard.get_location(tables[i]);
if (target)
{
MXS_ERROR("Query targets tables on servers '%s' and '%s'. "
"Cross server queries are not supported.",
rval->name(),
target->name());
}
else if (rval == NULL)
{
rval = target;
MXS_INFO("Query targets table '%s' on server '%s'",
tables[j],
rval->name());
if (rval && target != rval)
{
MXS_ERROR("Query targets tables on servers '%s' and '%s'. "
"Cross server queries are not supported.",
rval->name(), target->name());
}
else if (rval == NULL)
{
rval = target;
MXS_INFO("Query targets table '%s' on server '%s'", tables[i], rval->name());
}
}
}
}
else if (rval == nullptr)
{
// Queries which target a database but no tables can have multiple targets. Select first one.
for (int i = 0; i < n_databases; i++)
{
SERVER* target = m_shard.get_location(databases[i]);
if (target)
{
rval = target;
break;
}
}
}
}
// Free the databases and tables arrays.
for (int i = 0; i < n_databases; i++)
{
MXS_FREE(databases[i]);
}
MXS_FREE(databases);
for (int i = 0; i < n_tables; i++)
{
MXS_FREE(tables[i]);
}
MXS_FREE(tables);
MXS_FREE(databases);
return rval;
}
@ -1675,52 +1693,57 @@ SERVER* SchemaRouterSession::get_ps_target(GWBUF* buffer, uint32_t qtype, qc_que
if (qc_query_is_type(qtype, QUERY_TYPE_PREPARE_NAMED_STMT))
{
// If pStmt is null, the PREPARE was malformed. In that case it can be routed to any backend to get
// a proper error response. Also returns null if preparing from a variable. This is a limitation.
GWBUF* pStmt = qc_get_preparable_stmt(buffer);
int n_tables = 0;
char** tables = qc_get_table_names(pStmt, &n_tables, true);
char* stmt = qc_get_prepare_name(buffer);
for (int i = 0; i < n_tables; i++)
if (pStmt)
{
SERVER* target = m_shard.get_location(tables[i]);
int n_tables = 0;
char** tables = qc_get_table_names(pStmt, &n_tables, true);
char* stmt = qc_get_prepare_name(buffer);
if (target)
for (int i = 0; i < n_tables; i++)
{
if (rval && target != rval)
SERVER* target = m_shard.get_location(tables[i]);
if (target)
{
MXS_ERROR("Statement targets tables on servers '%s' and '%s'. "
"Cross server queries are not supported.",
rval->name(),
target->name());
}
else if (rval == NULL)
{
rval = target;
if (rval && target != rval)
{
MXS_ERROR("Statement targets tables on servers '%s' and '%s'. "
"Cross server queries are not supported.",
rval->name(), target->name());
}
else if (rval == NULL)
{
rval = target;
}
}
MXS_FREE(tables[i]);
}
MXS_FREE(tables[i]);
}
if (rval)
{
MXS_INFO("PREPARING NAMED %s ON SERVER %s", stmt, rval->name());
m_shard.add_statement(stmt, rval);
if (rval)
{
MXS_INFO("PREPARING NAMED %s ON SERVER %s", stmt, rval->name());
m_shard.add_statement(stmt, rval);
}
MXS_FREE(tables);
MXS_FREE(stmt);
}
MXS_FREE(tables);
MXS_FREE(stmt);
}
else if (op == QUERY_OP_EXECUTE)
{
char* stmt = qc_get_prepare_name(buffer);
rval = m_shard.get_statement(stmt);
MXS_INFO("Executing named statement %s on server %s", stmt, rval->name());
SERVER* ps_target = m_shard.get_statement(stmt);
if (ps_target)
{
rval = ps_target;
MXS_INFO("Executing named statement %s on server %s", stmt, rval->name());
}
MXS_FREE(stmt);
}
else if (qc_query_is_type(qtype, QUERY_TYPE_DEALLOC_PREPARE))
{
char* stmt = qc_get_prepare_name(buffer);
if ((rval = m_shard.get_statement(stmt)))
{
MXS_INFO("Closing named statement %s on server %s", stmt, rval->name());