MXS-2350: Allow lazy connection creation

The lazy connection creation reduces the burden that short sessions place
on the backend servers. This also prevents the problems caused by early
disconnections that happen when only one server is used but multiple
connections are created. This does not solve the problem (MXS-619) but it
does mitigate it to acceptable levels.

This commit also adds a change to the weighting algorithm that prefers
existing connections over unopened ones. This helps avoid the
flip-flopping that happens when the absolute scores are very similar. The
hard-coded value might need to be tuned once testing is done.
This commit is contained in:
Markus Mäkelä 2019-02-28 21:17:17 +02:00
parent 95317725ce
commit 24ea222ed6
No known key found for this signature in database
GPG Key ID: 72D48FCE664F7B19
7 changed files with 52 additions and 15 deletions

View File

@ -545,6 +545,20 @@ when the slave timed out.
The timeout for the slave synchronization done by `causal_reads`. The
default value is 10 seconds.
### `lazy_connect`
Lazy connection creation causes connections to backend servers to be opened only
when they are needed. This reduces the load that is placed on the backend
servers when the client connections are short. This parameter is a boolean type
and is disabled by default.
By default readwritesplit opens as many connections as it can when the session
is first opened. This makes the execution of the first query faster when all
available connections are already created. When `lazy_connect` is enabled, this
initial connection creation is skipped. If the client executes only read
queries, no connection to the master is made. If only write queries are made,
only the master connection is used.
## Routing hints
The readwritesplit router supports routing hints. For a detailed guide on hint

View File

@ -522,6 +522,7 @@ extern "C" MXS_MODULE* MXS_CREATE_MODULE()
{"transaction_replay", MXS_MODULE_PARAM_BOOL, "false" },
{"transaction_replay_max_size",MXS_MODULE_PARAM_SIZE, "1Mi" },
{"optimistic_trx", MXS_MODULE_PARAM_BOOL, "false" },
{"lazy_connect", MXS_MODULE_PARAM_BOOL, "false" },
{MXS_END_MODULE_PARAMS}
}
};

View File

@ -159,6 +159,7 @@ struct Config
, transaction_replay(params->get_bool("transaction_replay"))
, trx_max_size(params->get_size("transaction_replay_max_size"))
, optimistic_trx(params->get_bool("optimistic_trx"))
, lazy_connect(params->get_bool("lazy_connect"))
{
if (causal_reads)
{
@ -177,13 +178,17 @@ struct Config
transaction_replay = true;
}
if (transaction_replay)
if (transaction_replay || lazy_connect)
{
/**
* Replaying transactions requires that we are able to do delayed query
* retries and reconnect to a master.
* retries. Both transaction replay and lazy connection creation require
* fail-on-write failure mode and reconnections to masters.
*/
delayed_retry = true;
if (transaction_replay)
{
delayed_retry = true;
}
master_reconnection = true;
master_failure_mode = RW_FAIL_ON_WRITE;
}
@ -217,6 +222,7 @@ struct Config
bool transaction_replay; /**< Replay failed transactions */
size_t trx_max_size; /**< Max transaction size for replaying */
bool optimistic_trx; /**< Enable optimistic transactions */
bool lazy_connect; /**< Create connections only when needed */
};
/**

View File

@ -922,7 +922,7 @@ void RWSplitSession::log_master_routing_failure(bool found,
|| old_master->dcb()->role == DCB::Role::BACKEND);
mxb_assert(!curr_master || !curr_master->in_use()
|| curr_master->dcb()->role == DCB::Role::BACKEND);
char errmsg[SERVER::MAX_ADDRESS_LEN * 2 + 100]; // Extra space for error message
char errmsg[SERVER::MAX_ADDRESS_LEN* 2 + 100]; // Extra space for error message
if (m_config.delayed_retry && m_retry_duration >= m_config.delayed_retry_timeout)
{

View File

@ -57,6 +57,13 @@ PRWBackends::iterator best_score(PRWBackends& sBackends,
for (auto ite = sBackends.begin(); ite != sBackends.end(); ++ite)
{
double score = server_score((**ite).backend());
if (!(*ite)->in_use())
{
// To prefer servers that we are connected to, inflate the score of unconnected servers
score = (score + 5.0) * 1.5;
}
if (min > score)
{
min = score;

View File

@ -23,13 +23,14 @@ using namespace maxscale;
RWSplitSession::RWSplitSession(RWSplit* instance,
MXS_SESSION* session,
const Config& config,
mxs::SRWBackends backends,
mxs::RWBackend* master)
: mxs::RouterSession(session)
, m_backends(std::move(backends))
, m_raw_backends(sptr_vec_to_ptr_vec(m_backends))
, m_current_master(master)
, m_config(instance->config())
, m_config(config)
, m_last_keepalive_check(mxs_clock())
, m_nbackends(instance->service()->n_dbref)
, m_client(session->client_dcb)
@ -71,17 +72,18 @@ RWSplitSession* RWSplitSession::create(RWSplit* router, MXS_SESSION* session)
*/
RWBackend* master = nullptr;
const auto& config = router->config();
auto backend_ptrs = sptr_vec_to_ptr_vec(backends);
if (router->select_connect_backend_servers(session,
backend_ptrs,
&master,
NULL,
NULL,
connection_type::ALL))
if (config.lazy_connect
|| router->select_connect_backend_servers(session,
backend_ptrs,
&master,
NULL,
NULL,
connection_type::ALL))
{
if ((rses = new RWSplitSession(router, session, std::move(backends), master)))
if ((rses = new RWSplitSession(router, session, config, std::move(backends), master)))
{
router->stats().n_sessions += 1;
}
@ -1102,11 +1104,17 @@ bool RWSplitSession::handle_error_new_connection(DCB* backend_dcb, GWBUF* errmsg
}
bool succp = false;
if (m_config.lazy_connect)
{
// Lazy connect is enabled, don't care whether we have available servers
succp = true;
}
/**
* Try to get replacement slave or at least the minimum
* number of slave connections for router session.
*/
if (m_recv_sescmd > 0 && m_config.disable_sescmd_history)
else if (m_recv_sescmd > 0 && m_config.disable_sescmd_history)
{
for (const auto& a : m_raw_backends)
{

View File

@ -133,6 +133,7 @@ public:
private:
RWSplitSession(RWSplit* instance,
MXS_SESSION* session,
const Config& config,
mxs::SRWBackends backends,
mxs::RWBackend* master);
@ -290,7 +291,7 @@ private:
uint64_t m_sent_sescmd; /**< ID of the last sent session command*/
uint64_t m_recv_sescmd; /**< ID of the most recently completed session
* command */
ExecMap m_exec_map; /**< Map of COM_STMT_EXECUTE statement IDs to
ExecMap m_exec_map; /**< Map of COM_STMT_EXECUTE statement IDs to
* Backends */
std::string m_gtid_pos; /**< Gtid position for causal read */
wait_gtid_state m_wait_gtid; /**< State of MASTER_GTID_WAIT reply */