Merge branch '2.3' into develop
This commit is contained in:
commit
6038f1f386
@ -29,13 +29,15 @@ protocol=MariaDBBackend
|
||||
authenticator=PAMBackendAuth
|
||||
```
|
||||
|
||||
The client PAM authenticator will fetch user entries with `plugin='pam'` from
|
||||
the `mysql.user` table. The entries should also have a PAM service name set in
|
||||
the `authetication_string` column. The matching PAM service in the operating
|
||||
system PAM config will be used for authenticating a user. If the
|
||||
`authetication_string` for an entry is empty, a fallback service (e.g. `other`)
|
||||
is used. If a username@host has multiple matching entries, they will all be
|
||||
attempted until authentication succeeds or all fail.
|
||||
The PAM authenticator fetches user entries with `plugin='pam'` from
|
||||
the `mysql.user` table of a backend. The user accounts also need to have either
|
||||
the global SELECT-privilege or a database or a table-level privilege. The PAM
|
||||
service name of a user is read from the `authetication_string`-column. The
|
||||
matching PAM service in the operating system PAM config is used for
|
||||
authenticating the user. If the `authetication_string` for a user is empty,
|
||||
the fallback service `mysql` is used. If a username@host-combination matches
|
||||
multiple rows, they will all be attempted until authentication succeeds or all
|
||||
services fail.
|
||||
|
||||
PAM service configuration is out of the scope of this document, see
|
||||
[The Linux-PAM System Administrators' Guide
|
||||
|
@ -602,6 +602,9 @@ Set the directory where the data files used by MariaDB MaxScale are stored.
|
||||
Modules can write to this directory and for example the binlogrouter uses this
|
||||
folder as the default location for storing binary logs.
|
||||
|
||||
This is also the directory where the password encryption key is read from that
|
||||
is generated by `maxkeys`.
|
||||
|
||||
```
|
||||
datadir=/home/user/maxscale_data/
|
||||
```
|
||||
@ -702,12 +705,6 @@ provided by this module is used by MariaDB MaxScale when deciding where a
|
||||
particular statement should be sent. The default query classifier is
|
||||
_qc_sqlite_.
|
||||
|
||||
#### `query_classifier_args`
|
||||
|
||||
Arguments for the query classifier. What arguments are accepted depends on the
|
||||
particular query classifier being used. The default query classifier -
|
||||
_qc_sqlite_ - supports the following arguments:
|
||||
|
||||
#### `query_classifier_cache_size`
|
||||
|
||||
Specifies the maximum size of the query classifier cache. The default limit is
|
||||
@ -735,6 +732,12 @@ amount of memory available for each thread, divide the cache size with the value
|
||||
of `threads`. If statements are evicted from the cache (visible in the
|
||||
diagnostic output), consider increasing the cache size.
|
||||
|
||||
#### `query_classifier_args`
|
||||
|
||||
Arguments for the query classifier. What arguments are accepted depends on the
|
||||
particular query classifier being used. The default query classifier -
|
||||
_qc_sqlite_ - supports the following arguments:
|
||||
|
||||
##### `log_unrecognized_statements`
|
||||
|
||||
An integer argument taking the following values:
|
||||
@ -1323,6 +1326,12 @@ default. To enable them, define the timeout in seconds in the service's
|
||||
configuration section. A value of zero is interpreted as no timeout, the same
|
||||
as if the parameter is not defined.
|
||||
|
||||
**Warning:** If a connection is idle for longer than the configured connection
|
||||
timeout, it will be forcefully disconnected and a warning will be logged in the
|
||||
MaxScale log file. If you are performing long-running maintenance operations
|
||||
(e.g. `ALTER TABLE`) either do them with a direct connection to the server or
|
||||
set `connection_timeout` to zero before executing them.
|
||||
|
||||
Example:
|
||||
|
||||
```
|
||||
|
@ -5,43 +5,7 @@ and its intended use case scenarios. It also displays all router configuration
|
||||
parameters with their descriptions. A list of current limitations of the module
|
||||
is included and use examples are provided.
|
||||
|
||||
Table of Contents
|
||||
=================
|
||||
|
||||
|
||||
* [Overview](#overview)
|
||||
* [Configuration](#configuration)
|
||||
* [Parameters](#parameters)
|
||||
* [max_slave_connections](#max_slave_connections)
|
||||
* [max_slave_replication_lag](#max_slave_replication_lag)
|
||||
* [use_sql_variables_in](#use_sql_variables_in)
|
||||
* [connection_keepalive](#connection_keepalive)
|
||||
* [master_reconnection](#master_reconnection)
|
||||
* [slave_selection_criteria](#slave_selection_criteria)
|
||||
* [Server Weights and slave_selection_criteria](#server-weights-and-slave_selection_criteria)
|
||||
* [Interaction Between slave_selection_criteria and max_slave_connections](#interaction-between-slave_selection_criteria-and-max_slave_connections)
|
||||
* [max_sescmd_history](#max_sescmd_history)
|
||||
* [disable_sescmd_history](#disable_sescmd_history)
|
||||
* [master_accept_reads](#master_accept_reads)
|
||||
* [strict_multi_stmt](#strict_multi_stmt)
|
||||
* [strict_sp_calls](#strict_sp_calls)
|
||||
* [master_failure_mode](#master_failure_mode)
|
||||
* [retry_failed_reads](#retry_failed_reads)
|
||||
* [delayed_retry](#delayed_retry)
|
||||
* [delayed_retry_timeout](#delayed_retry_timeout)
|
||||
* [transaction_replay](#transaction_replay)
|
||||
* [transaction_replay_max_size](#transaction_replay_max_size)
|
||||
* [optimistic_trx](#optimistic_trx)
|
||||
* [causal_reads](#causal_reads)
|
||||
* [causal_reads_timeout](#causal_reads_timeout)
|
||||
* [Routing hints](#routing-hints)
|
||||
* [Limitations](#limitations)
|
||||
* [Legacy Configuration](#legacy-configuration)
|
||||
* [Examples](#examples)
|
||||
* [Readwritesplit routing decisions](#readwritesplit-routing-decisions)
|
||||
* [Routing to Master](#routing-to-master)
|
||||
* [Routing to Slaves](#routing-to-slaves)
|
||||
* [Routing to every session backend](#routing-to-every-session-backend)
|
||||
[TOC]
|
||||
|
||||
## Overview
|
||||
|
||||
@ -318,6 +282,38 @@ default of 50 session commands after which the history is disabled.
|
||||
disable_sescmd_history=true
|
||||
```
|
||||
|
||||
### `prune_sescmd_history`
|
||||
|
||||
This option prunes the session command history when it exceeds the value
|
||||
configured in `max_sescmd_history`. When this option is enabled, only a set
|
||||
number of statements are stored in the history. This limits the per-session
|
||||
memory use while still allowing safe reconnections. This parameter was added in
|
||||
MaxScale 2.3.4 and is disabled by default.
|
||||
|
||||
This parameter is intended to be used with pooled connections that remain in use
|
||||
for a very long time. Most connection pool implementations do not reset the
|
||||
session state and instead re-initialize it with new values. This causes the
|
||||
session command history to grow at roughly a constant rate for the lifetime of
|
||||
the pooled connection.
|
||||
|
||||
Each client-side session that uses a pooled connection only executes a finite
|
||||
amount of session commands. By retaining a shorter history that encompasses all
|
||||
session commands the individual clients execute, the session state of a pooled
|
||||
connection can be accurately recreated on another server.
|
||||
|
||||
If the session command history pruning is enabled, there is a theoretical
|
||||
possibility that upon server reconnection the session states of the connections
|
||||
are inconsistent. This can only happen if the length of the stored history is
|
||||
shorter than the list of relevant statements that affect the session state. In
|
||||
practice the default value of 50 session commands is a fairly reasonable value
|
||||
and the risk of inconsistent session state is relatively low.
|
||||
|
||||
In case the default history length is too short for safe pruning, set the value
|
||||
of `max_sescmd_history` to the total number of commands that affect the session
|
||||
state plus a safety margin of 10. The safety margin reserves some extra space
|
||||
for new commands that might be executed due to changes in the client side
|
||||
application.
|
||||
|
||||
### `master_accept_reads`
|
||||
|
||||
**`master_accept_reads`** allows the master server to be used for reads. This is
|
||||
|
@ -3,13 +3,16 @@
|
||||
There are two options for representing the password, either plain text or
|
||||
encrypted passwords may be used. In order to use encrypted passwords a set of
|
||||
keys must be generated that will be used by the encryption and decryption
|
||||
process. To generate the keys use the `maxkeys` command and pass the name of the
|
||||
secrets file in which the keys are stored.
|
||||
process. To generate the keys, use the `maxkeys` command.
|
||||
|
||||
```
|
||||
maxkeys /var/lib/maxscale/.secrets
|
||||
maxkeys
|
||||
```
|
||||
|
||||
By default the key file will be generated in `/var/lib/maxscale`. If a different
|
||||
directory is required, it can be given as the first argument to the program. For
|
||||
more information, see `maxkeys --help`.
|
||||
|
||||
Once the keys have been created the `maxpasswd` command can be used to generate
|
||||
the encrypted password.
|
||||
|
||||
@ -21,6 +24,10 @@ maxpasswd plainpassword
|
||||
The username and password, either encrypted or plain text, are stored in the
|
||||
service section using the `user` and `password` parameters.
|
||||
|
||||
If a custom location was used for the key file, give it as the first argument to
|
||||
`maxpasswd` and pass the password to be encrypted as the second argument. For
|
||||
more information, see `maxkeys --help`.
|
||||
|
||||
Here is an example configuration that uses an encrypted password.
|
||||
|
||||
```
|
||||
@ -32,3 +39,7 @@ servers=dbserv1, dbserv2, dbserv3
|
||||
user=maxscale
|
||||
password=96F99AA1315BDC3604B006F427DD9484
|
||||
```
|
||||
|
||||
If the key file is not in the default location, the
|
||||
[`datadir`](../Getting-Started/Configuration-Guide.md#datadir) parameter must be
|
||||
set to the directory that contains it.
|
||||
|
@ -23,7 +23,7 @@ set(DEFAULT_EXEC_SUBPATH "${MAXSCALE_BINDIR}" CACHE PATH "Default executable sub
|
||||
set(DEFAULT_CONFIG_SUBPATH "etc" CACHE PATH "Default configuration subpath")
|
||||
set(DEFAULT_CONFIG_PERSIST_SUBPATH "maxscale.cnf.d" CACHE PATH "Default persisted configuration subpath")
|
||||
set(DEFAULT_MODULE_CONFIG_SUBPATH "${DEFAULT_CONFIG_SUBPATH}/maxscale.modules.d" CACHE PATH "Default configuration subpath")
|
||||
set(DEFAULT_CONNECTOR_PLUGIN_SUBPATH "lib/plugin" CACHE PATH "Default connector plugin subpath")
|
||||
set(DEFAULT_CONNECTOR_PLUGIN_SUBPATH "/mysql/plugin" CACHE PATH "Default connector plugin subpath")
|
||||
|
||||
set(DEFAULT_PIDDIR ${MAXSCALE_VARDIR}/${DEFAULT_PID_SUBPATH} CACHE PATH "Default PID file directory")
|
||||
set(DEFAULT_MAXADMIN_SOCKET ${DEFAULT_PIDDIR}/${DEFAULT_MAXADMIN_SOCKET_FILE} CACHE PATH "Default MaxAdmin socket path")
|
||||
@ -36,7 +36,7 @@ set(DEFAULT_EXECDIR ${CMAKE_INSTALL_PREFIX}/${DEFAULT_EXEC_SUBPATH} CACHE PATH "
|
||||
set(DEFAULT_CONFIGDIR /${DEFAULT_CONFIG_SUBPATH} CACHE PATH "Default configuration directory")
|
||||
set(DEFAULT_CONFIG_PERSISTDIR ${DEFAULT_DATADIR}/${DEFAULT_CONFIG_PERSIST_SUBPATH} CACHE PATH "Default persisted configuration directory")
|
||||
set(DEFAULT_MODULE_CONFIGDIR /${DEFAULT_MODULE_CONFIG_SUBPATH} CACHE PATH "Default module configuration directory")
|
||||
set(DEFAULT_CONNECTOR_PLUGINDIR ${MAXSCALE_VARDIR}/${DEFAULT_CONNECTOR_PLUGIN_SUBPATH} CACHE PATH "Default connector plugin directory")
|
||||
set(DEFAULT_CONNECTOR_PLUGINDIR ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}/${DEFAULT_CONNECTOR_PLUGIN_SUBPATH} CACHE PATH "Default connector plugin directory")
|
||||
|
||||
# Massage TARGET_COMPONENT into a list
|
||||
if (TARGET_COMPONENT)
|
||||
|
@ -330,6 +330,23 @@ public:
|
||||
int64_t num_selects() const;
|
||||
const maxbase::StopWatch& session_timer() const;
|
||||
const maxbase::IntervalTimer& select_timer() const;
|
||||
|
||||
/**
|
||||
* Get verbose status description
|
||||
*
|
||||
* @return A verbose description of the backend's status
|
||||
*/
|
||||
std::string get_verbose_status() const;
|
||||
|
||||
/**
|
||||
* Add explanation message to latest close reason
|
||||
*
|
||||
* The message is printed in get_verbose_status() if the backend is closed.
|
||||
*
|
||||
* @param reason The human-readable message
|
||||
*/
|
||||
void set_close_reason(const std::string& reason);
|
||||
|
||||
private:
|
||||
/**
|
||||
* Internal state of the backend
|
||||
@ -355,8 +372,13 @@ private:
|
||||
*/
|
||||
void set_state(backend_state state);
|
||||
|
||||
// Stringification function
|
||||
static std::string to_string(backend_state state);
|
||||
|
||||
bool m_closed; /**< True if a connection has been opened and closed */
|
||||
time_t m_closed_at; /**< Timestamp when the backend was last closed */
|
||||
std::string m_close_reason; /**< Why the backend was closed */
|
||||
time_t m_opened_at; /**< Timestamp when the backend was last opened */
|
||||
SERVER_REF* m_backend; /**< Backend server */
|
||||
DCB* m_dcb; /**< Backend DCB */
|
||||
mxs::Buffer m_pending_cmd; /**< Pending commands */
|
||||
|
@ -297,8 +297,11 @@ typedef enum
|
||||
MXS_COM_STMT_RESET = 26,
|
||||
MXS_COM_SET_OPTION = 27,
|
||||
MXS_COM_STMT_FETCH = 28,
|
||||
MXS_COM_DAEMON = 29,
|
||||
MXS_COM_UNSUPPORTED = 30,
|
||||
MXS_COM_RESET_CONNECTION = 31,
|
||||
MXS_COM_STMT_BULK_EXECUTE = 0xfa,
|
||||
MXS_COM_DAEMON,
|
||||
MXS_COM_MULTI = 0xfe,
|
||||
MXS_COM_END
|
||||
} mxs_mysql_cmd_t;
|
||||
|
||||
|
@ -348,7 +348,8 @@ private:
|
||||
*/
|
||||
bool query_type_is_read_only(uint32_t qtype) const;
|
||||
|
||||
uint32_t get_route_target(uint8_t command, uint32_t qtype, HINT* pHints);
|
||||
void process_routing_hints(HINT* pHints, uint32_t* target);
|
||||
uint32_t get_route_target(uint8_t command, uint32_t qtype);
|
||||
|
||||
MXS_SESSION* session() const
|
||||
{
|
||||
|
@ -166,12 +166,12 @@ exports.builder = function(yargs) {
|
||||
])
|
||||
})
|
||||
})
|
||||
.command('users', 'List created network users', function(yargs) {
|
||||
return yargs.epilog('List the users that can be used to connect to the MaxScale REST API.')
|
||||
.command('users', 'List created users', function(yargs) {
|
||||
return yargs.epilog('List network the users that can be used to connect to the MaxScale REST API as well as enabled local accounts.')
|
||||
.usage('Usage: list users')
|
||||
}, function(argv) {
|
||||
maxctrl(argv, function(host) {
|
||||
return getCollection(host, 'users/inet',[
|
||||
return getCollection(host, 'users',[
|
||||
{'Name':'id'},
|
||||
{'Type':'type'},
|
||||
{'Privileges':'attributes.account'},
|
||||
|
@ -913,6 +913,12 @@ add_test_executable(mxs2115_version_string.cpp mxs2115_version_string replicatio
|
||||
# MXS-2273: Introduce server state BEING_DRAINED
|
||||
add_test_executable(mxs2273_being_drained.cpp mxs2273_being_drained mxs2273_being_drained LABELS REPL_BACKEND)
|
||||
|
||||
# MXS-2295: COM_CHANGE_USER does not clear out session command history
|
||||
add_test_executable(mxs2295_change_user_loop.cpp mxs2295_change_user_loop mxs2295_change_user_loop LABELS REPL_BACKEND)
|
||||
|
||||
# MXS-2300: Prune session command history
|
||||
add_test_executable(mxs2300_history_pruning.cpp mxs2300_history_pruning mxs2300_history_pruning LABELS REPL_BACKEND)
|
||||
|
||||
############################################
|
||||
# BEGIN: binlogrouter and avrorouter tests #
|
||||
############################################
|
||||
|
50
maxscale-system-test/cnf/maxscale.cnf.template.mxs2295_change_user_loop
Executable file
50
maxscale-system-test/cnf/maxscale.cnf.template.mxs2295_change_user_loop
Executable file
@ -0,0 +1,50 @@
|
||||
[maxscale]
|
||||
threads=###threads###
|
||||
log_info=1
|
||||
|
||||
[server1]
|
||||
type=server
|
||||
address=###node_server_IP_1###
|
||||
port=###node_server_port_1###
|
||||
protocol=MySQLBackend
|
||||
|
||||
[server2]
|
||||
type=server
|
||||
address=###node_server_IP_2###
|
||||
port=###node_server_port_2###
|
||||
protocol=MySQLBackend
|
||||
|
||||
[server3]
|
||||
type=server
|
||||
address=###node_server_IP_3###
|
||||
port=###node_server_port_3###
|
||||
protocol=MySQLBackend
|
||||
|
||||
[server4]
|
||||
type=server
|
||||
address=###node_server_IP_4###
|
||||
port=###node_server_port_4###
|
||||
protocol=MySQLBackend
|
||||
|
||||
[MySQL Monitor]
|
||||
type=monitor
|
||||
module=mysqlmon
|
||||
servers=server1,server2,server3,server4
|
||||
user=maxskysql
|
||||
password=skysql
|
||||
monitor_interval=1000
|
||||
|
||||
[RW Split Router]
|
||||
type=service
|
||||
router=readwritesplit
|
||||
servers=server1,server2,server3,server4
|
||||
user=maxskysql
|
||||
password=skysql
|
||||
max_sescmd_history=20
|
||||
disable_sescmd_history=false
|
||||
|
||||
[RW Split Listener]
|
||||
type=listener
|
||||
service=RW Split Router
|
||||
protocol=MySQLClient
|
||||
port=4006
|
51
maxscale-system-test/cnf/maxscale.cnf.template.mxs2300_history_pruning
Executable file
51
maxscale-system-test/cnf/maxscale.cnf.template.mxs2300_history_pruning
Executable file
@ -0,0 +1,51 @@
|
||||
[maxscale]
|
||||
threads=###threads###
|
||||
log_info=1
|
||||
|
||||
[server1]
|
||||
type=server
|
||||
address=###node_server_IP_1###
|
||||
port=###node_server_port_1###
|
||||
protocol=MySQLBackend
|
||||
|
||||
[server2]
|
||||
type=server
|
||||
address=###node_server_IP_2###
|
||||
port=###node_server_port_2###
|
||||
protocol=MySQLBackend
|
||||
|
||||
[server3]
|
||||
type=server
|
||||
address=###node_server_IP_3###
|
||||
port=###node_server_port_3###
|
||||
protocol=MySQLBackend
|
||||
|
||||
[server4]
|
||||
type=server
|
||||
address=###node_server_IP_4###
|
||||
port=###node_server_port_4###
|
||||
protocol=MySQLBackend
|
||||
|
||||
[MySQL Monitor]
|
||||
type=monitor
|
||||
module=mysqlmon
|
||||
servers=server1,server2,server3,server4
|
||||
user=maxskysql
|
||||
password=skysql
|
||||
monitor_interval=1000
|
||||
|
||||
[RW Split Router]
|
||||
type=service
|
||||
router=readwritesplit
|
||||
servers=server1,server2,server3,server4
|
||||
user=maxskysql
|
||||
password=skysql
|
||||
max_sescmd_history=10
|
||||
prune_sescmd_history=true
|
||||
max_slave_connections=1
|
||||
|
||||
[RW Split Listener]
|
||||
type=listener
|
||||
service=RW Split Router
|
||||
protocol=MySQLClient
|
||||
port=4006
|
@ -18,6 +18,7 @@ servers=server1, server2, server3, server4
|
||||
user=maxskysql
|
||||
password=skysql
|
||||
slave_selection_criteria=LEAST_ROUTER_CONNECTIONS
|
||||
max_slave_replication_lag=1
|
||||
|
||||
[Read-Connection-Router-Slave]
|
||||
type=service
|
||||
|
@ -334,6 +334,16 @@ public:
|
||||
return mysql_error(m_conn);
|
||||
}
|
||||
|
||||
bool change_user(std::string user, std::string pw, std::string db = "test")
|
||||
{
|
||||
return mysql_change_user(m_conn, user.c_str(), pw.c_str(), db.c_str()) == 0;
|
||||
}
|
||||
|
||||
bool reset_connection()
|
||||
{
|
||||
return change_user(m_user, m_pw, m_db);
|
||||
}
|
||||
|
||||
private:
|
||||
std::string m_host;
|
||||
int m_port;
|
||||
|
@ -936,6 +936,18 @@ std::string Mariadb_nodes::get_server_id_str(int index)
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::vector<int> Mariadb_nodes::get_all_server_ids()
|
||||
{
|
||||
std::vector<int> rval;
|
||||
|
||||
for (int i = 0; i < N; i++)
|
||||
{
|
||||
rval.push_back(get_server_id(i));
|
||||
}
|
||||
|
||||
return rval;
|
||||
}
|
||||
|
||||
bool do_flush_hosts(MYSQL* conn)
|
||||
{
|
||||
int local_result = 0;
|
||||
|
@ -316,6 +316,13 @@ public:
|
||||
int get_server_id(int index);
|
||||
std::string get_server_id_str(int index);
|
||||
|
||||
/**
|
||||
* Get server IDs of all servers
|
||||
*
|
||||
* @return List of server IDs
|
||||
*/
|
||||
std::vector<int> get_all_server_ids();
|
||||
|
||||
/**
|
||||
* @brief Execute 'mysqladmin flush-hosts' on all nodes
|
||||
* @return 0 in case of success
|
||||
|
@ -196,7 +196,7 @@ int main(int argc, char** argv)
|
||||
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'"),
|
||||
},
|
||||
{
|
||||
bind(err, "SELECT 7"),
|
||||
bind(err, "SELECT 8"),
|
||||
bind(err, "COMMIT"),
|
||||
},
|
||||
{
|
||||
@ -252,9 +252,9 @@ int main(int argc, char** argv)
|
||||
|
||||
// Block and unblock the master
|
||||
test.repl->block_node(0);
|
||||
test.maxscales->wait_for_monitor();
|
||||
test.maxscales->wait_for_monitor(2);
|
||||
test.repl->unblock_node(0);
|
||||
test.maxscales->wait_for_monitor();
|
||||
test.maxscales->wait_for_monitor(2);
|
||||
|
||||
for (auto& f : a.post)
|
||||
{
|
||||
|
66
maxscale-system-test/mxs2295_change_user_loop.cpp
Normal file
66
maxscale-system-test/mxs2295_change_user_loop.cpp
Normal file
@ -0,0 +1,66 @@
|
||||
/**
|
||||
* MXS-2295: COM_CHANGE_USER does not clear out session command history
|
||||
* https://jira.mariadb.org/browse/MXS-2295
|
||||
*/
|
||||
|
||||
#include "testconnections.h"
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
TestConnections test(argc, argv);
|
||||
|
||||
|
||||
Connection conn = test.maxscales->rwsplit();
|
||||
test.expect(conn.connect(), "Connection failed: %s", conn.error());
|
||||
|
||||
for (int i = 0; i <= 300 && test.global_result == 0; i++)
|
||||
{
|
||||
if (i % 50 == 0)
|
||||
{
|
||||
test.tprintf("Iteration: %d", i);
|
||||
}
|
||||
|
||||
test.set_timeout(60);
|
||||
|
||||
// Interleaved session commands, reads and "writes" (`SELECT @@last_insert_id` is treated as a master-only read)
|
||||
test.expect(conn.query("SET @a = 1"), "Query failed: %s", conn.error());
|
||||
test.expect(conn.query("USE test"), "Query failed: %s", conn.error());
|
||||
test.expect(conn.query("SET SQL_MODE=''"), "Query failed: %s", conn.error());
|
||||
test.expect(conn.query("USE test"), "Query failed: %s", conn.error());
|
||||
test.expect(conn.query("SELECT @@last_insert_id"), "Query failed: %s", conn.error());
|
||||
test.expect(conn.query("SELECT 1"), "Query failed: %s", conn.error());
|
||||
test.expect(conn.query("USE test"), "Query failed: %s", conn.error());
|
||||
test.expect(conn.query("SELECT 1"), "Query failed: %s", conn.error());
|
||||
|
||||
// User variable inside transaction
|
||||
test.expect(conn.query("SET @a = 123"), "Query failed: %s", conn.error());
|
||||
test.expect(conn.query("BEGIN"), "Query failed: %s", conn.error());
|
||||
Row row = conn.row("SELECT @a");
|
||||
test.expect(!row.empty() && row[0] == "123", "Invalid contents in user variable inside RW trx");
|
||||
test.expect(conn.query("COMMIT"), "Query failed: %s", conn.error());
|
||||
|
||||
// User variable outside transaction
|
||||
test.expect(conn.query("SET @a = 321"), "Query failed: %s", conn.error());
|
||||
row = conn.row("SELECT @a");
|
||||
test.expect(!row.empty() && row[0] == "321", "Invalid contents in user variable outside trx");
|
||||
|
||||
// User variable inside read-only transaction
|
||||
test.expect(conn.query("SET @a = 456"), "Query failed: %s", conn.error());
|
||||
test.expect(conn.query("START TRANSACTION READ ONLY"), "Query failed: %s", conn.error());
|
||||
row = conn.row("SELECT @a");
|
||||
test.expect(!row.empty() && row[0] == "456", "Invalid contents in user variable inside RO trx");
|
||||
test.expect(conn.query("COMMIT"), "Query failed: %s", conn.error());
|
||||
|
||||
test.expect(conn.query("PREPARE ps FROM 'SELECT 1'"), "PREPARE failed: %s", conn.error());
|
||||
row = conn.row("EXECUTE ps");
|
||||
test.expect(!row.empty() && row[0] == "1", "Invalid contents in PS result");
|
||||
test.expect(conn.query("DEALLOCATE PREPARE ps"), "DEALLOCATE failed: %s", conn.error());
|
||||
|
||||
test.expect(conn.reset_connection(), "Connection reset failed: %s", conn.error());
|
||||
}
|
||||
|
||||
test.log_excludes(0, "Router session exceeded session command history limit");
|
||||
test.log_includes(0, "Resetting session command history");
|
||||
|
||||
return test.global_result;
|
||||
}
|
109
maxscale-system-test/mxs2300_history_pruning.cpp
Normal file
109
maxscale-system-test/mxs2300_history_pruning.cpp
Normal file
@ -0,0 +1,109 @@
|
||||
/**
|
||||
* MXS-2300: Session command history pruning
|
||||
*/
|
||||
|
||||
#include "testconnections.h"
|
||||
#include <sstream>
|
||||
|
||||
std::vector<int> ids;
|
||||
|
||||
void block_by_id(TestConnections& test, int id)
|
||||
{
|
||||
for (size_t i = 0; i < ids.size(); i++)
|
||||
{
|
||||
if (ids[i] == id)
|
||||
{
|
||||
test.repl->block_node(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void unblock_by_id(TestConnections& test, int id)
|
||||
{
|
||||
for (size_t i = 0; i < ids.size(); i++)
|
||||
{
|
||||
if (ids[i] == id)
|
||||
{
|
||||
test.repl->unblock_node(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
TestConnections test(argc, argv);
|
||||
|
||||
test.repl->connect();
|
||||
ids = test.repl->get_all_server_ids();
|
||||
test.repl->disconnect();
|
||||
|
||||
int master_id = test.get_master_server_id();
|
||||
Connection conn = test.maxscales->rwsplit();
|
||||
test.expect(conn.connect(), "Connection failed: %s", conn.error());
|
||||
|
||||
test.tprintf("Get the ID of the server we first start with");
|
||||
int first_id = std::stoi(conn.field("SELECT @@server_id"));
|
||||
|
||||
test.tprintf("The history size is set to 10 commands, execute five and check that they are retained");
|
||||
for (int i = 0; i < 5; i++)
|
||||
{
|
||||
std::stringstream query;
|
||||
query << "SET @a" << i << " = " << i;
|
||||
conn.query(query.str());
|
||||
}
|
||||
|
||||
block_by_id(test, first_id);
|
||||
test.maxscales->wait_for_monitor();
|
||||
|
||||
int second_id = std::stoi(conn.field("SELECT @@server_id"));
|
||||
|
||||
test.tprintf("Make sure that a reconnection actually took place");
|
||||
test.expect(first_id != second_id && second_id > 0, "Invalid server ID: %d", second_id);
|
||||
test.expect(master_id != second_id, "SELECT should not go to the master");
|
||||
|
||||
test.tprintf("Check that the values were correctly set");
|
||||
for (int i = 0; i < 5; i++)
|
||||
{
|
||||
std::string value = std::to_string(i);
|
||||
std::string query = "SELECT @a" + value;
|
||||
test.expect(conn.check(query, value), "Invalid value for user variable @a%s", value.c_str());
|
||||
}
|
||||
|
||||
unblock_by_id(test, first_id);
|
||||
|
||||
test.tprintf("Execute 15 commands and check that we lose the first five values");
|
||||
for (int i = 0; i < 15; i++)
|
||||
{
|
||||
std::stringstream query;
|
||||
query << "SET @b" << i << " =" << i;
|
||||
conn.query(query.str());
|
||||
}
|
||||
|
||||
block_by_id(test, second_id);
|
||||
test.maxscales->wait_for_monitor();
|
||||
|
||||
int third_id = std::stoi(conn.field("SELECT @@server_id"));
|
||||
|
||||
test.expect(third_id != second_id && third_id > 0, "Invalid server ID: %d", third_id);
|
||||
test.expect(master_id != third_id, "SELECT should not go to the master");
|
||||
|
||||
for (int i = 0; i < 5; i++)
|
||||
{
|
||||
std::string variable = "@b" + std::to_string(i);
|
||||
std::string query = "SELECT IFNULL(" + variable + ", '" + variable + " is null')";
|
||||
test.expect(conn.check(query, variable + " is null"), "%s should not be set", variable.c_str());
|
||||
}
|
||||
|
||||
test.tprintf("Check that the remaining values were correctly set");
|
||||
for (int i = 5; i < 15; i++)
|
||||
{
|
||||
std::string value = std::to_string(i);
|
||||
std::string query = "SELECT @b" + value;
|
||||
std::string f = conn.field(query);
|
||||
test.expect(conn.check(query, value), "Invalid value for user variable @b%s: %s", value.c_str(), f.c_str());
|
||||
}
|
||||
|
||||
unblock_by_id(test, second_id);
|
||||
|
||||
return test.global_result;
|
||||
}
|
@ -201,8 +201,6 @@ int main(int argc, char* argv[])
|
||||
test.maxscales->wait_for_monitor(2);
|
||||
auto maxconn = test.maxscales->open_rwsplit_connection();
|
||||
test.try_query(maxconn, "FLUSH TABLES;");
|
||||
mysql_close(maxconn);
|
||||
|
||||
test.maxscales->wait_for_monitor(1);
|
||||
|
||||
check_status(test, "server1", mm_master_states);
|
||||
@ -214,6 +212,10 @@ int main(int argc, char* argv[])
|
||||
check_group(test, "server3", 1);
|
||||
check_group(test, "server4", 0);
|
||||
check_rlag(test, "server4", 1, max_rlag);
|
||||
// Need to send a read query so that rwsplit detects replication lag.
|
||||
test.try_query(maxconn, "SHOW DATABASES;");
|
||||
mysql_close(maxconn);
|
||||
test.log_includes(0, "is excluded from query routing.");
|
||||
|
||||
test.tprintf("Test 2 - Set nodes 0 and 1 into read-only mode");
|
||||
|
||||
@ -328,7 +330,6 @@ int main(int argc, char* argv[])
|
||||
test.maxscales->wait_for_monitor(1);
|
||||
maxconn = test.maxscales->open_rwsplit_connection();
|
||||
test.try_query(maxconn, "FLUSH TABLES;");
|
||||
mysql_close(maxconn);
|
||||
test.maxscales->wait_for_monitor(1);
|
||||
|
||||
check_status(test, "server1", slave_states);
|
||||
@ -349,6 +350,10 @@ int main(int argc, char* argv[])
|
||||
|
||||
check_status(test, "server1", slave_states);
|
||||
check_rlag(test, "server1", 0, 0);
|
||||
// Rwsplit should detects that replication lag is 0.
|
||||
test.try_query(maxconn, "SHOW DATABASES;");
|
||||
mysql_close(maxconn);
|
||||
test.log_includes(0, "is returned to query routing.");
|
||||
|
||||
// Test over, reset topology.
|
||||
const char reset_with_name[] = "STOP SLAVE '%s'; RESET SLAVE '%s' ALL;";
|
||||
|
@ -154,7 +154,7 @@ QUERY_CLASSIFIER* load_classifier(const char* name)
|
||||
{
|
||||
bool loaded = false;
|
||||
size_t len = strlen(name);
|
||||
char libdir[len + 1];
|
||||
char libdir[len + 3 + 1]; // Extra for ../
|
||||
|
||||
sprintf(libdir, "../%s", name);
|
||||
|
||||
|
@ -81,6 +81,10 @@ if (HAVE_LIBDL)
|
||||
target_link_libraries(maxscale-common dl)
|
||||
endif()
|
||||
|
||||
# Using initial-exec instead of the default global-dynamic tls-model
|
||||
# reduces the cost of using thread-local variables in dynamic libraries.
|
||||
target_compile_options(maxscale-common PUBLIC "-ftls-model=initial-exec")
|
||||
|
||||
add_dependencies(maxscale-common pcre2 connector-c libmicrohttpd jansson maxbase)
|
||||
set_target_properties(maxscale-common PROPERTIES VERSION "1.0.0")
|
||||
install_module(maxscale-common core)
|
||||
|
@ -17,12 +17,15 @@
|
||||
|
||||
#include <maxbase/atomic.hh>
|
||||
|
||||
#include <maxscale/alloc.h>
|
||||
#include <maxscale/protocol/mysql.hh>
|
||||
|
||||
using namespace maxscale;
|
||||
|
||||
Backend::Backend(SERVER_REF* ref)
|
||||
: m_closed(false)
|
||||
, m_closed_at(0)
|
||||
, m_opened_at(0)
|
||||
, m_backend(ref)
|
||||
, m_dcb(NULL)
|
||||
, m_state(0)
|
||||
@ -49,6 +52,7 @@ void Backend::close(close_type type)
|
||||
if (!m_closed)
|
||||
{
|
||||
m_closed = true;
|
||||
m_closed_at = time(NULL);
|
||||
|
||||
if (in_use())
|
||||
{
|
||||
@ -182,6 +186,8 @@ bool Backend::connect(MXS_SESSION* session, SessionCommandList* sescmd)
|
||||
if ((m_dcb = dcb_connect(m_backend->server, session, m_backend->server->protocol().c_str())))
|
||||
{
|
||||
m_closed = false;
|
||||
m_closed_at = 0;
|
||||
m_opened_at = time(NULL);
|
||||
m_state = IN_USE;
|
||||
mxb::atomic::add(&m_backend->connections, 1, mxb::atomic::RELAXED);
|
||||
rval = true;
|
||||
@ -285,3 +291,73 @@ int64_t Backend::num_selects() const
|
||||
{
|
||||
return m_num_selects;
|
||||
}
|
||||
|
||||
void Backend::set_close_reason(const std::string& reason)
|
||||
{
|
||||
m_close_reason = reason;
|
||||
}
|
||||
|
||||
std::string Backend::get_verbose_status() const
|
||||
{
|
||||
std::stringstream ss;
|
||||
char closed_at[30] = "not closed";
|
||||
char opened_at[30] = "not opened";
|
||||
|
||||
if (m_closed_at)
|
||||
{
|
||||
mxb_assert(m_closed);
|
||||
ctime_r(&m_closed_at, closed_at);
|
||||
char* nl = strrchr(closed_at, '\n');
|
||||
mxb_assert(nl);
|
||||
*nl = '\0';
|
||||
}
|
||||
|
||||
if (m_opened_at)
|
||||
{
|
||||
ctime_r(&m_opened_at, opened_at);
|
||||
char* nl = strrchr(opened_at, '\n');
|
||||
mxb_assert(nl);
|
||||
*nl = '\0';
|
||||
}
|
||||
|
||||
ss << "name: [" << name() << "] "
|
||||
<< "status: [" << m_backend->server->status_string() << "] "
|
||||
<< "state: [" << to_string((backend_state)m_state) << "] "
|
||||
<< "last opened at: [" << opened_at << "] "
|
||||
<< "last closed at: [" << closed_at << "] "
|
||||
<< "last close reason: [" << m_close_reason << "] "
|
||||
<< "num sescmd: [" << m_session_commands.size() << "]";
|
||||
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string Backend::to_string(backend_state state)
|
||||
{
|
||||
std::string rval;
|
||||
|
||||
if (state == 0)
|
||||
{
|
||||
rval = "NOT_IN_USE";
|
||||
}
|
||||
else
|
||||
{
|
||||
if (state & IN_USE)
|
||||
{
|
||||
rval += "IN_USE";
|
||||
}
|
||||
|
||||
if (state & WAITING_RESULT)
|
||||
{
|
||||
rval += rval.empty() ? "" : "|";
|
||||
rval += "WAITING_RESULT";
|
||||
}
|
||||
|
||||
if (state & FATAL_FAILURE)
|
||||
{
|
||||
rval += rval.empty() ? "" : "|";
|
||||
rval += "FATAL_FAILURE";
|
||||
}
|
||||
}
|
||||
|
||||
return rval;
|
||||
}
|
||||
|
@ -1217,6 +1217,10 @@ bool config_load_global(const char* filename)
|
||||
"cache. To enable it, add '%s' to the configuration file.",
|
||||
CN_QUERY_CLASSIFIER_CACHE_SIZE);
|
||||
}
|
||||
else if (gateway.qc_cache_properties.max_size == 0)
|
||||
{
|
||||
MXS_NOTICE("Query classifier cache is disabled");
|
||||
}
|
||||
else
|
||||
{
|
||||
MXS_NOTICE("Using up to %s of memory for query classifier cache",
|
||||
@ -1275,6 +1279,34 @@ const MXS_MODULE* get_module(CONFIG_CONTEXT* obj, const char* param_name, const
|
||||
return module ? get_module(module, module_type) : NULL;
|
||||
}
|
||||
|
||||
const char* get_missing_module_parameter_name(const CONFIG_CONTEXT* obj)
|
||||
{
|
||||
std::string type = config_get_string(obj->parameters, CN_TYPE);
|
||||
|
||||
if (type == CN_SERVICE && !config_get_param(obj->parameters, CN_ROUTER))
|
||||
{
|
||||
return CN_ROUTER;
|
||||
}
|
||||
else if (type == CN_LISTENER && !config_get_param(obj->parameters, CN_PROTOCOL))
|
||||
{
|
||||
return CN_PROTOCOL;
|
||||
}
|
||||
else if (type == CN_SERVER && !config_get_param(obj->parameters, CN_PROTOCOL))
|
||||
{
|
||||
return CN_PROTOCOL;
|
||||
}
|
||||
else if (type == CN_MONITOR && !config_get_param(obj->parameters, CN_MODULE))
|
||||
{
|
||||
return CN_MODULE;
|
||||
}
|
||||
else if (type == CN_FILTER && !config_get_param(obj->parameters, CN_MODULE))
|
||||
{
|
||||
return CN_MODULE;
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
std::pair<const MXS_MODULE_PARAM*, const MXS_MODULE*> get_module_details(const CONFIG_CONTEXT* obj)
|
||||
{
|
||||
std::string type = config_get_string(obj->parameters, CN_TYPE);
|
||||
@ -3038,6 +3070,15 @@ static bool check_config_objects(CONFIG_CONTEXT* context)
|
||||
continue;
|
||||
}
|
||||
|
||||
const char* no_module_defined = get_missing_module_parameter_name(obj);
|
||||
|
||||
if (no_module_defined)
|
||||
{
|
||||
MXS_ERROR("'%s' is missing the required parameter '%s'", obj->object, no_module_defined);
|
||||
rval = false;
|
||||
continue;
|
||||
}
|
||||
|
||||
const MXS_MODULE_PARAM* param_set = nullptr;
|
||||
const MXS_MODULE* mod = nullptr;
|
||||
std::tie(param_set, mod) = get_module_details(obj);
|
||||
|
@ -433,7 +433,73 @@ bool QueryClassifier::query_type_is_read_only(uint32_t qtype) const
|
||||
return rval;
|
||||
}
|
||||
|
||||
uint32_t QueryClassifier::get_route_target(uint8_t command, uint32_t qtype, HINT* pHints)
|
||||
void QueryClassifier::process_routing_hints(HINT* pHints, uint32_t* target)
|
||||
{
|
||||
HINT* pHint = pHints;
|
||||
|
||||
while (pHint)
|
||||
{
|
||||
if (m_pHandler->supports_hint(pHint->type))
|
||||
{
|
||||
switch (pHint->type)
|
||||
{
|
||||
case HINT_ROUTE_TO_MASTER:
|
||||
// This means override, so we bail out immediately.
|
||||
*target = TARGET_MASTER;
|
||||
MXS_DEBUG("Hint: route to master");
|
||||
pHint = NULL;
|
||||
break;
|
||||
|
||||
case HINT_ROUTE_TO_NAMED_SERVER:
|
||||
// The router is expected to look up the named server.
|
||||
*target |= TARGET_NAMED_SERVER;
|
||||
MXS_DEBUG("Hint: route to named server: %s", (char*)pHint->data);
|
||||
break;
|
||||
|
||||
case HINT_ROUTE_TO_UPTODATE_SERVER:
|
||||
// TODO: Add generic target type, never to be seem by RWS.
|
||||
mxb_assert(false);
|
||||
break;
|
||||
|
||||
case HINT_ROUTE_TO_ALL:
|
||||
// TODO: Add generic target type, never to be seem by RWS.
|
||||
mxb_assert(false);
|
||||
break;
|
||||
|
||||
case HINT_ROUTE_TO_LAST_USED:
|
||||
MXS_DEBUG("Hint: route to last used");
|
||||
*target = TARGET_LAST_USED;
|
||||
break;
|
||||
|
||||
case HINT_PARAMETER:
|
||||
if (strncasecmp((char*)pHint->data,
|
||||
"max_slave_replication_lag",
|
||||
strlen("max_slave_replication_lag")) == 0)
|
||||
{
|
||||
*target |= TARGET_RLAG_MAX;
|
||||
}
|
||||
else
|
||||
{
|
||||
MXS_ERROR("Unknown hint parameter '%s' when "
|
||||
"'max_slave_replication_lag' was expected.",
|
||||
(char*)pHint->data);
|
||||
}
|
||||
break;
|
||||
|
||||
case HINT_ROUTE_TO_SLAVE:
|
||||
*target = TARGET_SLAVE;
|
||||
MXS_DEBUG("Hint: route to slave.");
|
||||
}
|
||||
}
|
||||
|
||||
if (pHint)
|
||||
{
|
||||
pHint = pHint->next;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t QueryClassifier::get_route_target(uint8_t command, uint32_t qtype)
|
||||
{
|
||||
bool trx_active = session_trx_is_active(m_pSession);
|
||||
uint32_t target = TARGET_UNDEFINED;
|
||||
@ -533,70 +599,6 @@ uint32_t QueryClassifier::get_route_target(uint8_t command, uint32_t qtype, HINT
|
||||
target = TARGET_MASTER;
|
||||
}
|
||||
|
||||
/** Process routing hints */
|
||||
HINT* pHint = pHints;
|
||||
|
||||
while (pHint)
|
||||
{
|
||||
if (m_pHandler->supports_hint(pHint->type))
|
||||
{
|
||||
switch (pHint->type)
|
||||
{
|
||||
case HINT_ROUTE_TO_MASTER:
|
||||
// This means override, so we bail out immediately.
|
||||
target = TARGET_MASTER;
|
||||
MXS_DEBUG("Hint: route to master");
|
||||
pHint = NULL;
|
||||
break;
|
||||
|
||||
case HINT_ROUTE_TO_NAMED_SERVER:
|
||||
// The router is expected to look up the named server.
|
||||
target |= TARGET_NAMED_SERVER;
|
||||
MXS_DEBUG("Hint: route to named server: %s", (char*)pHint->data);
|
||||
break;
|
||||
|
||||
case HINT_ROUTE_TO_UPTODATE_SERVER:
|
||||
// TODO: Add generic target type, never to be seem by RWS.
|
||||
mxb_assert(false);
|
||||
break;
|
||||
|
||||
case HINT_ROUTE_TO_ALL:
|
||||
// TODO: Add generic target type, never to be seem by RWS.
|
||||
mxb_assert(false);
|
||||
break;
|
||||
|
||||
case HINT_ROUTE_TO_LAST_USED:
|
||||
MXS_DEBUG("Hint: route to last used");
|
||||
target = TARGET_LAST_USED;
|
||||
break;
|
||||
|
||||
case HINT_PARAMETER:
|
||||
if (strncasecmp((char*)pHint->data,
|
||||
"max_slave_replication_lag",
|
||||
strlen("max_slave_replication_lag")) == 0)
|
||||
{
|
||||
target |= TARGET_RLAG_MAX;
|
||||
}
|
||||
else
|
||||
{
|
||||
MXS_ERROR("Unknown hint parameter '%s' when "
|
||||
"'max_slave_replication_lag' was expected.",
|
||||
(char*)pHint->data);
|
||||
}
|
||||
break;
|
||||
|
||||
case HINT_ROUTE_TO_SLAVE:
|
||||
target = TARGET_SLAVE;
|
||||
MXS_DEBUG("Hint: route to slave.");
|
||||
}
|
||||
}
|
||||
|
||||
if (pHint)
|
||||
{
|
||||
pHint = pHint->next;
|
||||
}
|
||||
}
|
||||
|
||||
return target;
|
||||
}
|
||||
|
||||
@ -999,9 +1001,11 @@ QueryClassifier::RouteInfo QueryClassifier::update_route_info(
|
||||
type_mask = ps_get_type(stmt_id);
|
||||
}
|
||||
|
||||
route_target = get_route_target(command, type_mask, pBuffer->hint);
|
||||
route_target = get_route_target(command, type_mask);
|
||||
}
|
||||
|
||||
process_routing_hints(pBuffer->hint, &route_target);
|
||||
|
||||
if (session_trx_is_ending(m_pSession)
|
||||
|| qc_query_is_type(type_mask, QUERY_TYPE_BEGIN_TRX))
|
||||
{
|
||||
|
@ -272,6 +272,9 @@ void RWSplit::diagnostics(DCB* dcb)
|
||||
dcb_printf(dcb,
|
||||
"\tstrict_sp_calls: %s\n",
|
||||
cnf.strict_sp_calls ? "true" : "false");
|
||||
dcb_printf(dcb,
|
||||
"\tprune_sescmd_history: %s\n",
|
||||
cnf.prune_sescmd_history ? "true" : "false");
|
||||
dcb_printf(dcb,
|
||||
"\tdisable_sescmd_history: %s\n",
|
||||
cnf.disable_sescmd_history ? "true" : "false");
|
||||
@ -503,6 +506,7 @@ extern "C" MXS_MODULE* MXS_CREATE_MODULE()
|
||||
{"max_slave_replication_lag", MXS_MODULE_PARAM_INT, "-1" },
|
||||
{"max_slave_connections", MXS_MODULE_PARAM_STRING, MAX_SLAVE_COUNT},
|
||||
{"retry_failed_reads", MXS_MODULE_PARAM_BOOL, "true" },
|
||||
{"prune_sescmd_history", MXS_MODULE_PARAM_BOOL, "false" },
|
||||
{"disable_sescmd_history", MXS_MODULE_PARAM_BOOL, "false" },
|
||||
{"max_sescmd_history", MXS_MODULE_PARAM_COUNT, "50" },
|
||||
{"strict_multi_stmt", MXS_MODULE_PARAM_BOOL, "false" },
|
||||
|
@ -141,6 +141,7 @@ struct Config
|
||||
, master_failure_mode(
|
||||
(enum failure_mode)params->get_enum("master_failure_mode", master_failure_mode_values))
|
||||
, max_sescmd_history(params->get_integer("max_sescmd_history"))
|
||||
, prune_sescmd_history(config_get_bool(params, "prune_sescmd_history"))
|
||||
, disable_sescmd_history(config_get_bool(params, "disable_sescmd_history"))
|
||||
, master_accept_reads(config_get_bool(params, "master_accept_reads"))
|
||||
, strict_multi_stmt(config_get_bool(params, "strict_multi_stmt"))
|
||||
@ -195,6 +196,7 @@ struct Config
|
||||
* master or all nodes */
|
||||
failure_mode master_failure_mode; /**< Master server failure handling mode */
|
||||
uint64_t max_sescmd_history; /**< Maximum amount of session commands to store */
|
||||
bool prune_sescmd_history; /**< Prune session command history */
|
||||
bool disable_sescmd_history;/**< Disable session command history */
|
||||
bool master_accept_reads; /**< Use master for reads */
|
||||
bool strict_multi_stmt; /**< Force non-multistatement queries to be routed to
|
||||
|
@ -341,8 +341,7 @@ bool RWSplitSession::route_single_stmt(GWBUF* querybuf)
|
||||
else
|
||||
{
|
||||
MXS_ERROR("Could not find valid server for target type %s, closing "
|
||||
"connection.",
|
||||
STRTARGET(route_target));
|
||||
"connection.", route_target_to_string(route_target));
|
||||
}
|
||||
}
|
||||
|
||||
@ -391,6 +390,23 @@ void RWSplitSession::continue_large_session_write(GWBUF* querybuf, uint32_t type
|
||||
}
|
||||
}
|
||||
|
||||
void RWSplitSession::prune_to_position(uint64_t pos)
|
||||
{
|
||||
/** Prune all completed responses before a certain position */
|
||||
ResponseMap::iterator it = m_sescmd_responses.lower_bound(pos);
|
||||
|
||||
if (it != m_sescmd_responses.end())
|
||||
{
|
||||
// Found newer responses that were returned after this position
|
||||
m_sescmd_responses.erase(m_sescmd_responses.begin(), it);
|
||||
}
|
||||
else
|
||||
{
|
||||
// All responses are older than the requested position
|
||||
m_sescmd_responses.clear();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute in backends used by current router session.
|
||||
* Save session variable commands to router session property
|
||||
@ -498,7 +514,7 @@ bool RWSplitSession::route_session_write(GWBUF* querybuf, uint8_t command, uint3
|
||||
}
|
||||
}
|
||||
|
||||
if (m_config.max_sescmd_history > 0 && m_sescmd_list.size() >= m_config.max_sescmd_history)
|
||||
if (m_config.max_sescmd_history > 0 && m_sescmd_list.size() > m_config.max_sescmd_history)
|
||||
{
|
||||
static bool warn_history_exceeded = true;
|
||||
if (warn_history_exceeded)
|
||||
@ -520,20 +536,17 @@ bool RWSplitSession::route_session_write(GWBUF* querybuf, uint8_t command, uint3
|
||||
m_sescmd_list.clear();
|
||||
}
|
||||
|
||||
if (m_config.prune_sescmd_history && !m_sescmd_list.empty()
|
||||
&& m_sescmd_list.size() + 1 > m_config.max_sescmd_history)
|
||||
{
|
||||
// Close to the history limit, remove the oldest command
|
||||
prune_to_position(m_sescmd_list.front()->get_position());
|
||||
m_sescmd_list.pop_front();
|
||||
}
|
||||
|
||||
if (m_config.disable_sescmd_history)
|
||||
{
|
||||
/** Prune stored responses */
|
||||
ResponseMap::iterator it = m_sescmd_responses.lower_bound(lowest_pos);
|
||||
|
||||
if (it != m_sescmd_responses.end())
|
||||
{
|
||||
m_sescmd_responses.erase(m_sescmd_responses.begin(), it);
|
||||
}
|
||||
else
|
||||
{
|
||||
// All responses processed
|
||||
m_sescmd_responses.clear();
|
||||
}
|
||||
prune_to_position(lowest_pos);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -554,8 +567,16 @@ bool RWSplitSession::route_session_write(GWBUF* querybuf, uint8_t command, uint3
|
||||
}
|
||||
else
|
||||
{
|
||||
MXS_ERROR("Could not route session command: %s", attempted_write ? "Write to all backends failed" :
|
||||
"All connections have failed");
|
||||
std::string status;
|
||||
for (const auto& a : m_backends)
|
||||
{
|
||||
status += "\n";
|
||||
status += a->get_verbose_status();
|
||||
}
|
||||
|
||||
MXS_ERROR("Could not route session command: %s. Connection information: %s",
|
||||
attempted_write ? "Write to all backends failed" : "All connections have failed",
|
||||
status.c_str());
|
||||
}
|
||||
|
||||
return nsucc;
|
||||
@ -695,7 +716,6 @@ RWBackend* RWSplitSession::get_target_backend(backend_type_t btype,
|
||||
|
||||
if (name) /*< Choose backend by name from a hint */
|
||||
{
|
||||
mxb_assert(btype != BE_MASTER);
|
||||
btype = BE_SLAVE;
|
||||
rval = get_hinted_backend(name);
|
||||
}
|
||||
@ -800,10 +820,19 @@ RWBackend* RWSplitSession::handle_hinted_target(GWBUF* querybuf, route_target_t
|
||||
{
|
||||
if (TARGET_IS_NAMED_SERVER(route_target))
|
||||
{
|
||||
MXS_INFO("Was supposed to route to named server "
|
||||
"%s but couldn't find the server in a "
|
||||
"suitable state.",
|
||||
named_server);
|
||||
std::string status = "Could not find server";
|
||||
|
||||
for (const auto& a : m_backends)
|
||||
{
|
||||
if (strcmp(a->server()->name(), named_server) == 0)
|
||||
{
|
||||
status = a->server()->status_string();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
MXS_INFO("Was supposed to route to named server %s but couldn't find the server in a "
|
||||
"suitable state. Server state: %s", named_server, status.c_str());
|
||||
}
|
||||
else if (TARGET_IS_RLAG_MAX(route_target))
|
||||
{
|
||||
@ -1013,6 +1042,7 @@ bool RWSplitSession::handle_master_is_target(RWBackend** dest)
|
||||
if (m_current_master && m_current_master->in_use())
|
||||
{
|
||||
m_current_master->close();
|
||||
m_current_master->set_close_reason("The original master is not available");
|
||||
}
|
||||
}
|
||||
else if (!m_config.delayed_retry
|
||||
|
@ -78,6 +78,7 @@ static void discard_if_response_differs(RWBackend* backend,
|
||||
STRPACKETTYPE(cmd),
|
||||
query.empty() ? "<no query>" : query.c_str());
|
||||
backend->close(mxs::Backend::CLOSE_FATAL);
|
||||
backend->set_close_reason("Invalid response to: " + query);
|
||||
}
|
||||
}
|
||||
|
||||
@ -160,5 +161,32 @@ void RWSplitSession::process_sescmd_response(RWBackend* backend, GWBUF** ppPacke
|
||||
gwbuf_free(*ppPacket);
|
||||
*ppPacket = NULL;
|
||||
}
|
||||
|
||||
if (m_expected_responses == 0
|
||||
&& (command == MXS_COM_CHANGE_USER || command == MXS_COM_RESET_CONNECTION))
|
||||
{
|
||||
mxb_assert_message(m_slave_responses.empty(), "All responses should've been processed");
|
||||
// This is the last session command to finish that resets the session state, reset the history
|
||||
MXS_INFO("Resetting session command history (length: %lu)", m_sescmd_list.size());
|
||||
|
||||
/**
|
||||
* Since new connections need to perform the COM_CHANGE_USER, pop it off the list along
|
||||
* with the expected response to it.
|
||||
*/
|
||||
SSessionCommand latest = m_sescmd_list.back();
|
||||
cmd = m_sescmd_responses[latest->get_position()];
|
||||
|
||||
m_sescmd_list.clear();
|
||||
m_sescmd_responses.clear();
|
||||
|
||||
// Push the response back as the first executed session command
|
||||
m_sescmd_list.push_back(latest);
|
||||
m_sescmd_responses[latest->get_position()] = cmd;
|
||||
|
||||
// Adjust counters to match the number of stored session commands
|
||||
m_recv_sescmd = 1;
|
||||
m_sent_sescmd = 1;
|
||||
m_sescmd_count = 2;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -949,6 +949,7 @@ void RWSplitSession::handleError(GWBUF* errmsgbuf,
|
||||
}
|
||||
|
||||
backend->close();
|
||||
backend->set_close_reason("Master connection failed: " + extract_error(errmsgbuf));
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -962,6 +963,7 @@ void RWSplitSession::handleError(GWBUF* errmsgbuf,
|
||||
// Try to replay the transaction on another node
|
||||
can_continue = start_trx_replay();
|
||||
backend->close();
|
||||
backend->set_close_reason("Read-only trx failed: " + extract_error(errmsgbuf));
|
||||
|
||||
if (!can_continue)
|
||||
{
|
||||
@ -982,6 +984,7 @@ void RWSplitSession::handleError(GWBUF* errmsgbuf,
|
||||
m_otrx_state = OTRX_INACTIVE;
|
||||
can_continue = start_trx_replay();
|
||||
backend->close();
|
||||
backend->set_close_reason("Optimistic trx failed: " + extract_error(errmsgbuf));
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -1071,6 +1074,7 @@ bool RWSplitSession::handle_error_new_connection(DCB* backend_dcb, GWBUF* errmsg
|
||||
* is closed, it's possible that the routing logic will pick the failed
|
||||
* server as the target. */
|
||||
backend->close();
|
||||
backend->set_close_reason("Slave connection failed: " + extract_error(errmsg));
|
||||
|
||||
if (route_stored)
|
||||
{
|
||||
|
@ -139,6 +139,7 @@ private:
|
||||
void process_sescmd_response(mxs::RWBackend* backend, GWBUF** ppPacket);
|
||||
void compress_history(mxs::SSessionCommand& sescmd);
|
||||
|
||||
void prune_to_position(uint64_t pos);
|
||||
bool route_session_write(GWBUF* querybuf, uint8_t command, uint32_t type);
|
||||
void continue_large_session_write(GWBUF* querybuf, uint32_t type);
|
||||
bool route_single_stmt(GWBUF* querybuf);
|
||||
@ -277,7 +278,7 @@ private:
|
||||
int m_last_keepalive_check; /**< When the last ping was done */
|
||||
int m_nbackends; /**< Number of backend servers (obsolete) */
|
||||
DCB* m_client; /**< The client DCB */
|
||||
uint64_t m_sescmd_count; /**< Number of executed session commands */
|
||||
uint64_t m_sescmd_count; /**< Number of executed session commands (starts from 1) */
|
||||
int m_expected_responses; /**< Number of expected responses to the current
|
||||
* query */
|
||||
GWBUF* m_query_queue; /**< Queued commands waiting to be executed */
|
||||
@ -322,22 +323,35 @@ private:
|
||||
*/
|
||||
uint32_t get_internal_ps_id(RWSplitSession* rses, GWBUF* buffer);
|
||||
|
||||
#define STRTARGET(t) \
|
||||
(t == TARGET_ALL ? "TARGET_ALL" \
|
||||
: (t == TARGET_MASTER ? "TARGET_MASTER" \
|
||||
: (t == TARGET_SLAVE ? "TARGET_SLAVE" \
|
||||
: (t \
|
||||
== TARGET_NAMED_SERVER \
|
||||
? "TARGET_NAMED_SERVER" \
|
||||
: (t \
|
||||
== \
|
||||
TARGET_RLAG_MAX \
|
||||
? "TARGET_RLAG_MAX" \
|
||||
: ( \
|
||||
t \
|
||||
== \
|
||||
TARGET_UNDEFINED \
|
||||
? \
|
||||
"TARGET_UNDEFINED" \
|
||||
: \
|
||||
"Unknown target value"))))))
|
||||
static inline const char* route_target_to_string(route_target_t target)
|
||||
{
|
||||
if (TARGET_IS_MASTER(target))
|
||||
{
|
||||
return "TARGET_MASTER";
|
||||
}
|
||||
else if (TARGET_IS_SLAVE(target))
|
||||
{
|
||||
return "TARGET_SLAVE";
|
||||
}
|
||||
else if (TARGET_IS_NAMED_SERVER(target))
|
||||
{
|
||||
return "TARGET_NAMED_SERVER";
|
||||
}
|
||||
else if (TARGET_IS_ALL(target))
|
||||
{
|
||||
return "TARGET_ALL";
|
||||
}
|
||||
else if (TARGET_IS_RLAG_MAX(target))
|
||||
{
|
||||
return "TARGET_RLAG_MAX";
|
||||
}
|
||||
else if (TARGET_IS_LAST_USED(target))
|
||||
{
|
||||
return "TARGET_LAST_USED";
|
||||
}
|
||||
else
|
||||
{
|
||||
mxb_assert(!true);
|
||||
return "Unknown target value";
|
||||
}
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user