MXS-1777 Turn server weights to their inverses, and make them doubles.

The math becomes simpler when the weight is inverted, i.e. a simple multiplication
to get the (inverse) score. Inverse weights are normalized to the range [0..1] where a lower
number is a higher weight,
The enum select_criteria_t is used to provide a std::function that takes the backends
as vector  (rather than the prior pairwise compares) and returns the best backend.
This commit is contained in:
Niclas Antti
2018-08-28 11:01:11 +03:00
parent 105bd7195f
commit 91acbc0994
7 changed files with 278 additions and 289 deletions

View File

@ -62,11 +62,23 @@ typedef struct server_ref_t
{
struct server_ref_t *next; /**< Next server reference */
SERVER* server; /**< The actual server */
int weight; /**< Weight of this server */
double inv_weight; /**< Inverse of weight in the range [0..1], 0 is best. */
int connections; /**< Number of connections created through this reference */
bool active; /**< Whether this reference is valid and in use*/
} SERVER_REF;
/** Returns true if the two server "scores" are within 1/(see code) of each other.
* The epsilon needs tweaking, and might even need to be in config. This
* function is important for some compares, where one server might be only
* marginally better than others, in which case historical data could determine
* the outcome.
*/
inline bool almost_equal_server_scores(double lhs, double rhs)
{
constexpr double div = 100; // within 1% of each other.
return std::abs(lhs - rhs) < std::abs(std::max(lhs, rhs)) * (1 / div);
}
/** Macro to check whether a SERVER_REF is active */
#define SERVER_REF_IS_ACTIVE(ref) (ref->active && server_is_active(ref->server))

View File

@ -72,9 +72,6 @@ using namespace maxscale;
using LockGuard = std::lock_guard<std::mutex>;
using UniqueLock = std::unique_lock<std::mutex>;
/** Base value for server weights */
#define SERVICE_BASE_SERVER_WEIGHT 1000
static struct
{
std::mutex lock;
@ -915,7 +912,8 @@ static SERVER_REF* server_ref_create(SERVER *server)
{
sref->next = NULL;
sref->server = server;
sref->weight = SERVICE_BASE_SERVER_WEIGHT;
// all servers have weight 1.0, when weights are not configured.
sref->inv_weight = 1.0;
sref->connections = 0;
sref->active = true;
}
@ -1628,7 +1626,6 @@ bool service_all_services_have_listeners()
return rval;
}
static void service_calculate_weights(SERVICE *service)
{
const char *weightby = serviceGetWeightingParameter(service);
@ -1637,31 +1634,25 @@ static void service_calculate_weights(SERVICE *service)
{
char buf[50]; // Enough to hold most numbers
/** Service has a weighting parameter and at least one server */
int total = 0;
double total {0};
bool weights_are_in_use = false;
/** Calculate total weight */
for (SERVER_REF *server = service->dbref; server; server = server->next)
{
server->weight = SERVICE_BASE_SERVER_WEIGHT;
if (server_get_parameter(server->server, weightby, buf, sizeof(buf)))
{
total += atoi(buf);
weights_are_in_use = true;
}
}
if (total == 0)
if (!weights_are_in_use)
{
MXS_WARNING("Weighting Parameter for service '%s' will be ignored as "
"no servers have values for the parameter '%s'.",
service->name, weightby);
}
else if (total < 0)
{
MXS_ERROR("Sum of weighting parameter '%s' for service '%s' exceeds "
"maximum value of %d. Weighting will be ignored.",
weightby, service->name, INT_MAX);
}
else
{
/** Calculate the relative weight of the servers */
@ -1669,34 +1660,27 @@ static void service_calculate_weights(SERVICE *service)
{
if (server_get_parameter(server->server, weightby, buf, sizeof(buf)))
{
int wght = atoi(buf);
int perc = (wght * SERVICE_BASE_SERVER_WEIGHT) / total;
if (perc == 0)
int config_weight = atoi(buf);
if (config_weight <= 0)
{
MXS_WARNING("Weighting parameter '%s' with a value of %d for"
" server '%s' rounds down to zero with total weight"
" of %d for service '%s'. No queries will be "
"routed to this server as long as a server with"
" positive weight is available.",
weightby, wght, server->server->name,
total, service->name);
MXS_WARNING("Weighting parameter '%s' is set to %d for server '%s'."
" The runtime weight will be set to 0, and the server"
" will only be used if no other servers are available.",
weightby,
config_weight,
server->server->name);
config_weight = 0;
}
else if (perc < 0)
{
MXS_ERROR("Weighting parameter '%s' for server '%s' is too large, "
"maximum value is %d. No weighting will be used for this "
"server.", weightby, server->server->name,
INT_MAX / SERVICE_BASE_SERVER_WEIGHT);
perc = SERVICE_BASE_SERVER_WEIGHT;
}
server->weight = perc;
server->inv_weight = 1.0 - config_weight / total;
}
else
{
MXS_WARNING("Server '%s' has no parameter '%s' used for weighting"
" for service '%s'.", server->server->name,
weightby, service->name);
MXS_WARNING("Weighting parameter '%s' is not set for server '%s'."
" The runtime weight will be set to 0, and the server"
" will only be used if no other servers are available.",
weightby,
server->server->name);
server->inv_weight = 1.0;
}
}
}

View File

@ -358,23 +358,30 @@ newSession(MXS_ROUTER *instance, MXS_SESSION *session)
{
candidate = ref;
}
else if (ref->weight == 0 || candidate->weight == 0)
else if (candidate->connections == 0)
{
candidate = ref->weight ? ref : candidate;
/* The candidate is already as good as it gets. */
}
else if (((ref->connections + 1) * 1000) / ref->weight <
((candidate->connections + 1) * 1000) / candidate->weight)
else if (ref->connections == 0)
{
/* This running server has fewer connections, set it as a new candidate */
candidate = ref;
}
else if (((ref->connections + 1) * 1000) / ref->weight ==
((candidate->connections + 1) * 1000) / candidate->weight &&
else if (ref->inv_weight * ref->connections <
candidate->inv_weight * candidate->connections)
{
/* ref has a better score. */
candidate = ref;
}
else if (almost_equal_server_scores(ref->inv_weight * ref->connections,
candidate->inv_weight * candidate->connections) &&
ref->server->stats.n_connections < candidate->server->stats.n_connections)
{
/* This running server has the same number of connections currently as the candidate
but has had fewer connections over time than candidate, set this server to
candidate*/
/* The servers are about equally good, but ref has had fewer connections over time.
* TODO: On second thought, if the servers are currently about equally good,
* should selection not favor the one that has had more connections over time,
* since load balancing has previously found it to be better? Or perhaps
* this check has very little effect anyway.
*/
candidate = ref;
}
}
@ -686,7 +693,7 @@ diagnostics(MXS_ROUTER *router, DCB *dcb)
{
dcb_printf(dcb, "\t\t%-20s %3.1f%% %d\n",
ref->server->name,
(float) ref->weight / 10,
(1.0-ref->inv_weight) * 100,
ref->connections);
}
}
@ -854,8 +861,8 @@ static SERVER_REF *get_root_master(SERVER_REF *servers)
{
if (ref->active && server_is_master(ref->server))
{
// No master found yet or this one has higher weight.
if (master_host == NULL || ref->weight > master_host->weight)
// No master found yet or this one has better weight.
if (master_host == NULL || ref->inv_weight < master_host->inv_weight)
{
master_host = ref;
}

View File

@ -307,7 +307,7 @@ void RWSplit::diagnostics(DCB *dcb)
for (SERVER_REF *ref = service()->dbref; ref; ref = ref->next)
{
dcb_printf(dcb, "\t\t%-20s %3.1f%% %-6d %-6d %d\n",
ref->server->name, (float)ref->weight / 10,
ref->server->name, (1.0 - ref->inv_weight) * 100,
ref->server->stats.n_current, ref->connections,
ref->server->stats.n_current_ops);
}

View File

@ -25,6 +25,7 @@
#include <map>
#include <string>
#include <mutex>
#include <functional>
#include <maxscale/dcb.h>
#include <maxscale/log.h>
@ -61,22 +62,13 @@ typedef uint32_t route_target_t;
*/
enum select_criteria_t
{
UNDEFINED_CRITERIA = 0,
LEAST_GLOBAL_CONNECTIONS, /**< all connections established by MaxScale */
LEAST_ROUTER_CONNECTIONS, /**< connections established by this router */
LEAST_BEHIND_MASTER,
LEAST_CURRENT_OPERATIONS,
LOWEST_RESPONSE_TIME,
LAST_CRITERIA /**< not used except for an index */
LOWEST_RESPONSE_TIME
};
#define STRCRITERIA(c) ((c) == UNDEFINED_CRITERIA ? "UNDEFINED_CRITERIA" : \
((c) == LEAST_GLOBAL_CONNECTIONS ? "LEAST_GLOBAL_CONNECTIONS" : \
((c) == LEAST_ROUTER_CONNECTIONS ? "LEAST_ROUTER_CONNECTIONS" : \
((c) == LEAST_BEHIND_MASTER ? "LEAST_BEHIND_MASTER" : \
((c) == LEAST_CURRENT_OPERATIONS ? "LEAST_CURRENT_OPERATIONS" : \
"Unknown criteria")))))
/**
* Controls how master failure is handled
*/
@ -136,12 +128,21 @@ static const char gtid_wait_stmt[] =
"SET @maxscale_secret_variable=(SELECT CASE WHEN %s('%s', %s) = 0 "
"THEN 1 ELSE (SELECT 1 FROM INFORMATION_SCHEMA.ENGINES) END);";
/** Function that returns a "score" for a server to enable comparison.
* Smaller numbers are better.
*/
using SRWBackendVector = std::vector<mxs::SRWBackend*>;
using BackendSelectFunction = std::function
<SRWBackendVector::const_iterator (const SRWBackendVector& sBackends)>;
BackendSelectFunction get_backend_select_function(select_criteria_t);
struct Config
{
Config(MXS_CONFIG_PARAMETER* params):
slave_selection_criteria(
(select_criteria_t)config_get_enum(
params, "slave_selection_criteria", slave_selection_criteria_values)),
backend_select_fct(get_backend_select_function(slave_selection_criteria)),
use_sql_variables_in(
(mxs_target_t)config_get_enum(
params, "use_sql_variables_in", use_sql_variables_in_values)),
@ -173,7 +174,9 @@ struct Config
}
}
select_criteria_t slave_selection_criteria; /**< The slave selection criteria */
select_criteria_t slave_selection_criteria; /**< The slave selection criteria */
BackendSelectFunction backend_select_fct;
mxs_target_t use_sql_variables_in; /**< Whether to send user variables to
* master or all nodes */
failure_mode master_failure_mode; /**< Master server failure handling mode */
@ -370,24 +373,20 @@ mxs::SRWBackend get_root_master(const mxs::SRWBackendList& backends);
*/
std::pair<int, int> get_slave_counts(mxs::SRWBackendList& backends, mxs::SRWBackend& master);
/* TODO, hopefully temporary */
using BackendSPtrVec = std::vector<mxs::SRWBackend*>;
/**
* Find the best backend based on categorizing the servers, and then applying
* selection criteria to the best category.
* Find the best backend by grouping the servers by priority, and then applying
* selection criteria to the best group.
*
* @param backends: vector of SRWBackend
* @param sc: which select_criteria_t to use
* @param select: selection function
* @param master_accept_reads: NOTE: even if this is false, in some cases a master can
* still be selected for reads.
*
* @return Valid iterator into argument backends, or end(backends) if empty
*/
BackendSPtrVec::const_iterator find_best_backend(const BackendSPtrVec& backends,
select_criteria_t sc,
bool masters_accept_reads);
SRWBackendVector::const_iterator find_best_backend(const SRWBackendVector& backends,
BackendSelectFunction select,
bool masters_accepts_reads);
/*
* The following are implemented in rwsplit_tmp_table_multi.c

View File

@ -36,8 +36,6 @@ using namespace maxscale;
* write split router, and not intended to be called from anywhere else.
*/
extern int (*criteria_cmpfun[LAST_CRITERIA])(const SRWBackend&, const SRWBackend&);
void RWSplitSession::handle_connection_keepalive(SRWBackend& target)
{
mxb_assert(target);
@ -543,7 +541,7 @@ SRWBackend RWSplitSession::get_slave_backend(int max_rlag)
{
// create a list of useable backends (includes masters, function name is a bit off),
// then feed that list to compare.
BackendSPtrVec candidates;
SRWBackendVector candidates;
auto counts = get_slave_counts(m_backends, m_current_master);
for (auto& backend : m_backends)
@ -566,8 +564,8 @@ SRWBackend RWSplitSession::get_slave_backend(int max_rlag)
}
}
BackendSPtrVec::const_iterator rval = find_best_backend(candidates,
m_config.slave_selection_criteria,
SRWBackendVector::const_iterator rval = find_best_backend(candidates,
m_config.backend_select_fct,
m_config.master_accept_reads);
return (rval == candidates.end()) ? SRWBackend() : **rval;

View File

@ -19,12 +19,24 @@
#include <stdlib.h>
#include <stdint.h>
#include <sstream>
#include <functional>
#include <random>
#include <iostream>
#include <maxbase/stopwatch.hh>
#include <maxscale/router.h>
using namespace maxscale;
// TODO, there should be a utility with the most common used random cases.
// FYI: rand() is about twice as fast as the below toss.
static std::mt19937 random_engine;
static std::uniform_real_distribution<> zero_to_one(0.0, 1.0);
double toss()
{
return zero_to_one(random_engine);
}
/**
* The functions that implement back end selection for the read write
* split router. All of these functions are internal to that router and
@ -45,213 +57,158 @@ static bool valid_for_slave(const SRWBackend& backend, const SRWBackend& master)
(!master || backend != master);
}
/** Compare number of connections from this router in backend servers */
static int backend_cmp_router_conn(const SRWBackend& a, const SRWBackend& b)
SRWBackendVector::const_iterator best_score(const SRWBackendVector& sBackends,
std::function <double(SERVER_REF* server)> server_score)
{
SERVER_REF *first = a->backend();
SERVER_REF *second = b->backend();
if (first->weight == 0 && second->weight == 0)
double min {std::numeric_limits<double>::max()};
auto best = sBackends.end();
for (auto ite = sBackends.begin(); ite != sBackends.end(); ++ite)
{
return first->connections - second->connections;
}
else if (first->weight == 0)
{
return 1;
}
else if (second->weight == 0)
{
return -1;
}
return ((1000 + 1000 * first->connections) / first->weight) -
((1000 + 1000 * second->connections) / second->weight);
}
/** Compare number of global connections in backend servers */
static int backend_cmp_global_conn(const SRWBackend& a, const SRWBackend& b)
{
SERVER_REF *first = a->backend();
SERVER_REF *second = b->backend();
if (first->weight == 0 && second->weight == 0)
{
return first->server->stats.n_current -
second->server->stats.n_current;
}
else if (first->weight == 0)
{
return 1;
}
else if (second->weight == 0)
{
return -1;
}
return ((1000 + 1000 * first->server->stats.n_current) / first->weight) -
((1000 + 1000 * second->server->stats.n_current) / second->weight);
}
/** Compare replication lag between backend servers */
static int backend_cmp_behind_master(const SRWBackend& a, const SRWBackend& b)
{
SERVER_REF *first = a->backend();
SERVER_REF *second = b->backend();
if (first->weight == 0 && second->weight == 0)
{
return first->server->rlag -
second->server->rlag;
}
else if (first->weight == 0)
{
return 1;
}
else if (second->weight == 0)
{
return -1;
}
return ((1000 + 1000 * first->server->rlag) / first->weight) -
((1000 + 1000 * second->server->rlag) / second->weight);
}
/** Compare number of current operations in backend servers */
static int backend_cmp_current_load(const SRWBackend& a, const SRWBackend& b)
{
SERVER_REF *first = a->backend();
SERVER_REF *second = b->backend();
if (first->weight == 0 && second->weight == 0)
{
return first->server->stats.n_current_ops - second->server->stats.n_current_ops;
}
else if (first->weight == 0)
{
return 1;
}
else if (second->weight == 0)
{
return -1;
}
return ((1000 + 1000 * first->server->stats.n_current_ops) / first->weight) -
((1000 + 1000 * second->server->stats.n_current_ops) / second->weight);
}
/** nantti. TODO. TEMP, this needs to see all eligible servers at the same time.
*/
static int backend_cmp_response_time(const SRWBackend& a, const SRWBackend& b)
{
// Minimum average response time for use in selection. Avoids special cases (zero),
// and new servers immediately get some traffic.
constexpr double min_average = 100.0/1000000000; // 100 nano seconds
// Invert the response times.
double lhs = 1/std::max(min_average, a->backend()->server->response_time->average());
double rhs = 1/std::max(min_average, b->backend()->server->response_time->average());
// Clamp values to a range where the slowest is at least some fraction of the speed of the
// fastest. This allows sampling of slaves that have experienced anomalies. Also, if one
// slave is really slow compared to another, something is wrong and perhaps we should
// log something informational.
constexpr int clamp = 20;
double fastest = std::max(lhs, rhs);
lhs = std::max(lhs, fastest / clamp);
rhs = std::max(rhs, fastest / clamp);
// If random numbers are too slow to generate, an array of, say 500'000
// random numbers in the range [0.0, 1.0] could be generated during startup.
double r = rand() / static_cast<double>(RAND_MAX);
return (r < (lhs / (lhs + rhs))) ? -1 : 1;
}
/**
* The order of functions _must_ match with the order the select criteria are
* listed in select_criteria_t definition in readwritesplit.h
*/
int (*criteria_cmpfun[LAST_CRITERIA])(const SRWBackend&, const SRWBackend&) =
{
NULL,
backend_cmp_global_conn,
backend_cmp_router_conn,
backend_cmp_behind_master,
backend_cmp_current_load,
backend_cmp_response_time
};
// This is still the current compare method. The response-time compare, along with anything
// using weights, have to change to use the whole array at once to be correct. Id est, everything
// will change to use the whole array in the next iteration.
static BackendSPtrVec::const_iterator run_comparison(const BackendSPtrVec& candidates,
select_criteria_t sc)
{
if (candidates.empty()) return candidates.end();
auto best = candidates.begin();
for (auto rival = std::next(best);
rival != candidates.end();
rival = std::next(rival))
{
if (criteria_cmpfun[sc](**best, **rival) > 0)
double score = server_score((***ite).backend());
if (min > score)
{
best = rival;
min = score;
best = ite;
}
}
return best;
}
/**
* @brief Find the best slave candidate for a new connection.
*
* @param bends backends
* @param master the master server
* @param sc which select_criteria_t to use
*
* @return The best slave backend reference or null if no candidates could be found
*/
static SRWBackend get_slave_candidate(const SRWBackendList& bends,
const SRWBackend& master,
select_criteria_t sc)
/** Compare number of connections from this router in backend servers */
SRWBackendVector::const_iterator backend_cmp_router_conn(const SRWBackendVector& sBackends)
{
// TODO, nantti, see if this and get_slave_backend can be combined to a single function
BackendSPtrVec backends;
for (auto& b : bends) // match intefaces. TODO, should go away in the future.
static auto server_score = [](SERVER_REF * server)
{
backends.push_back(const_cast<SRWBackend*>(&b));
}
BackendSPtrVec candidates;
return server->inv_weight * server->connections;
};
for (auto& backend : backends)
return best_score(sBackends, server_score);
}
/** Compare number of global connections in backend servers */
SRWBackendVector::const_iterator backend_cmp_global_conn(const SRWBackendVector& sBackends)
{
static auto server_score = [](SERVER_REF * server)
{
if (!(*backend)->in_use()
&& (*backend)->can_connect()
&& valid_for_slave(*backend, master))
return server->inv_weight * server->server->stats.n_current;
};
return best_score(sBackends, server_score);
}
/** Compare replication lag between backend servers */
SRWBackendVector::const_iterator backend_cmp_behind_master(const SRWBackendVector& sBackends)
{
static auto server_score = [](SERVER_REF * server)
{
return server->inv_weight * server->server->rlag;
};
return best_score(sBackends, server_score);
}
/** Compare number of current operations in backend servers */
SRWBackendVector::const_iterator backend_cmp_current_load(const SRWBackendVector& sBackends)
{
static auto server_score = [](SERVER_REF * server)
{
return server->inv_weight * server->server->stats.n_current_ops;
};
return best_score(sBackends, server_score);
}
SRWBackendVector::const_iterator backend_cmp_response_time(const SRWBackendVector& sBackends)
{
const int SZ = sBackends.size();
double slot[SZ];
// fill slots with inverses of averages
double total {0};
for (int i = 0; i < SZ; ++i)
{
SERVER_REF* server = (**sBackends[i]).backend();
auto ave = server->server->response_time->average();
if (ave==0)
{
candidates.push_back(backend);
constexpr double very_quick = 1.0/10000000; // arbitrary very short duration (0.1 microseconds)
slot[i] = 1 / very_quick; // will be used and updated (almost) immediately.
}
else
{
slot[i] = 1 / ave;
}
slot[i] = slot[i]*slot[i]; // favor faster servers even more
total += slot[i];
}
// turn slots into a roulette wheel, where sum of slots is 1.0
for (int i = 0; i < SZ; ++i)
{
slot[i] = slot[i] / total;
}
// Find the winner, role the ball:
double ball = toss();
double slot_walk {0};
int winner {0};
for (; winner < SZ; ++winner)
{
slot_walk += slot[winner];
if (ball < slot_walk)
{
break;
}
}
return !candidates.empty() ? **run_comparison(candidates, sc) : SRWBackend();
return sBackends.begin() + winner;
}
BackendSPtrVec::const_iterator find_best_backend(const BackendSPtrVec& backends,
select_criteria_t sc,
bool masters_accept_reads)
BackendSelectFunction get_backend_select_function(select_criteria_t sc)
{
// Divide backends to priorities. The set of highest priority backends will then compete.
std::map<int, BackendSPtrVec> priority_map;;
switch (sc)
{
case LEAST_GLOBAL_CONNECTIONS:
return backend_cmp_global_conn;
case LEAST_ROUTER_CONNECTIONS:
return backend_cmp_router_conn;
case LEAST_BEHIND_MASTER:
return backend_cmp_behind_master;
case LEAST_CURRENT_OPERATIONS:
return backend_cmp_current_load;
case LOWEST_RESPONSE_TIME:
return backend_cmp_response_time;
}
assert(false && "incorrect use of select_criteria_t");
return backend_cmp_current_load;
}
/**
* @brief Find the best slave candidate for routing reads.
*
* @param backends All backends
* @param select Server selection function
* @param masters_accepts_reads
*
* @return iterator to the best slave or backends.end() if none found
*/
SRWBackendVector::const_iterator find_best_backend(const SRWBackendVector& backends,
BackendSelectFunction select,
bool masters_accepts_reads)
{
// Group backends by priority. The set of highest priority backends will then compete.
std::map<int, SRWBackendVector> priority_map;
int best_priority {INT_MAX}; // low numbers are high priority
for (auto& pSBackend : backends)
for (auto& psBackend : backends)
{
auto& backend = **pSBackend;
auto& backend = **psBackend;
bool is_busy = backend.in_use() && backend.has_session_commands();
bool acts_slave = backend.is_slave() || (backend.is_master() && masters_accept_reads);
bool acts_slave = backend.is_slave() || (backend.is_master() && masters_accepts_reads);
int priority;
if (acts_slave)
@ -270,11 +227,11 @@ BackendSPtrVec::const_iterator find_best_backend(const BackendSPtrVec& backends,
priority = 2; // idle masters with masters_accept_reads==false
}
priority_map[priority].push_back(pSBackend);
priority_map[priority].push_back(psBackend);
best_priority = std::min(best_priority, priority);
}
auto best = run_comparison(priority_map[best_priority], sc);
auto best = select(priority_map[best_priority]);
return std::find(backends.begin(), backends.end(), *best);
}
@ -399,7 +356,7 @@ bool RWSplit::select_connect_backend_servers(MXS_SESSION *session,
connection_type type)
{
SRWBackend master = get_root_master(backends);
Config cnf(config());
const Config& cnf {config()};
if (!master && cnf.master_failure_mode == RW_FAIL_INSTANTLY)
{
@ -439,34 +396,66 @@ bool RWSplit::select_connect_backend_servers(MXS_SESSION *session,
mxb_assert(slaves_connected <= max_nslaves || max_nslaves == 0);
if (slaves_connected < max_nslaves)
SRWBackendVector candidates;
for (auto& sBackend : backends)
{
/** Connect to all possible slaves */
for (SRWBackend backend(get_slave_candidate(backends, master, select_criteria));
backend && slaves_connected < max_nslaves;
backend = get_slave_candidate(backends, master, select_criteria))
if (!sBackend->in_use()
&& sBackend->can_connect()
&& valid_for_slave(sBackend, master))
{
if (backend->can_connect() && backend->connect(session, sescmd_list))
{
MXS_INFO("Selected Slave: %s", backend->name());
if (sescmd_list && sescmd_list->size() && expected_responses)
{
(*expected_responses)++;
}
slaves_connected++;
}
candidates.push_back(&sBackend);
}
}
else
{
/**
* We are already connected to all possible slaves. Currently this can
* only happen if this function is called by handle_error_new_connection
* and the routing of queued queries created new connections.
*/
}
while (slaves_connected < max_nslaves && candidates.size())
{
auto ite = m_config->backend_select_fct(candidates);
if (ite == candidates.end()) break;
auto& backend = **ite;
if (backend->connect(session, sescmd_list))
{
MXS_INFO("Selected Slave: %s", backend->name());
if (sescmd_list && sescmd_list->size() && expected_responses)
{
(*expected_responses)++;
}
++slaves_connected;
}
candidates.erase(ite);
}
return true;
}
/** Documenting ideas and tests. This will be removed before merging to develop.
* The strategi with least opearations performs very well.
* Lowest response time (should rename to fastest server) beats all other methods
* but least operations comes near. There are probably a whole set of rules for adaptive
* load balancing. For example,
* 1. If there is low traffic (few operations), pick the fastest machine. But due to its nature
* other machines need to be tested once in awhile.
* 2. Favour the fast machines more than the distribution would suggest. Squaring the normalized
* fitness (goodness) is clearly right, but maybe not the optimal choise.
* 3. The parameters of EMAverage do not seem to weigh in as much as my intuition suggested.
* The tests with machines with very different speeds still give great results.
* The important thing is that it responds fast enough to changes in speed, some of which is
* caused by the load balancing itself (favoring a fast machine makes it slower).
* 4. My tests have used a single and simple sql query. In the face of very divergent queries,
* maybe a standard query could be used to asses a servers speed, initially and once in a while,
* if traffic slows down.
* 5. Alternatively to 4), the EMAverage could take the time between queries into account, so that
* it converges faster to new measurments if they are far apart in time. I have a feeling
* this could make a real difference.
* 5. It might make sense to do a little math to see how to best use slower machines. It is clear
* some queries should be offloaded to them when volume is high, and the random method I use
* could be made better.
* 6. Another idea is to favor faster machines even more, but at the same time increase the rating
* of slower machines as time goes by. In that way slower machines are not used unecessarily,
* but in time they still get some traffic, which might show them to now be faster, or immediately
* be downgraded again.
* 7. Canonicals could be used, but I don't really see how...
* 8. are all those preconditions needed (like rlag)
*/