Format routers and monitors

This commit is contained in:
Markus Mäkelä
2019-05-09 11:05:34 +03:00
parent 6625c1296b
commit 418ccf861d
42 changed files with 358 additions and 340 deletions

View File

@ -21,11 +21,10 @@ namespace
{
const char CN_DYNAMIC[] = "dynamic";
const char CN_NORMAL[] = "normal";
const char CN_QUORUM[] = "quorum";
const char CN_STATIC[] = "static";
const char CN_NORMAL[] = "normal";
const char CN_QUORUM[] = "quorum";
const char CN_STATIC[] = "static";
const char CN_UNKNOWN[] = "unknown";
}
std::string Clustrix::to_string(Clustrix::Status status)

View File

@ -120,5 +120,4 @@ inline bool ping_or_connect_to_hub(const char* zName,
{
return ping_or_connect_to_hub(zName, settings, softfailed, *ms.server, &ms.con);
}
}

View File

@ -76,7 +76,7 @@ private:
int m_instance;
};
inline std::ostream& operator << (std::ostream& out, const ClustrixMembership& x)
inline std::ostream& operator<<(std::ostream& out, const ClustrixMembership& x)
{
x.print(out);
return out;

View File

@ -42,7 +42,6 @@ bool handle_unsoftfail(const MODULECMD_ARG* args, json_t** error_out)
return pMon->unsoftfail(pServer, error_out);
}
}
/**

View File

@ -17,18 +17,18 @@
#include <maxscale/ccdefs.hh>
#include <maxbase/log.hh>
#define CLUSTER_MONITOR_INTERVAL_NAME "cluster_monitor_interval"
#define CLUSTER_MONITOR_INTERVAL_NAME "cluster_monitor_interval"
const long DEFAULT_CLUSTER_MONITOR_INTERVAL_VALUE = 60000;
#define DEFAULT_CLUSTER_MONITOR_INTERVAL_ZVALUE "60000"
#define DEFAULT_CLUSTER_MONITOR_INTERVAL_ZVALUE "60000"
#define HEALTH_CHECK_THRESHOLD_NAME "health_check_threshold"
const long DEFAULT_HEALTH_CHECK_THRESHOLD_VALUE = 2;
#define DEFAULT_HEALTH_CHECK_THRESHOLD_ZVALUE "2"
#define HEALTH_CHECK_THRESHOLD_NAME "health_check_threshold"
const long DEFAULT_HEALTH_CHECK_THRESHOLD_VALUE = 2;
#define DEFAULT_HEALTH_CHECK_THRESHOLD_ZVALUE "2"
#define DYNAMIC_NODE_DETECTION_NAME "dynamic_node_detection"
const bool DEFAULT_DYNAMIC_NODE_DETECTION_VALUE = true;
#define DEFAULT_DYNAMIC_NODE_DETECTION_ZVALUE "true"
#define DYNAMIC_NODE_DETECTION_NAME "dynamic_node_detection"
const bool DEFAULT_DYNAMIC_NODE_DETECTION_VALUE = true;
#define DEFAULT_DYNAMIC_NODE_DETECTION_ZVALUE "true"
#define HEALTH_CHECK_PORT_NAME "health_check_port"
const long DEFAULT_HEALTH_CHECK_PORT_VALUE = 3581;
#define DEFAULT_HEALTH_CHECK_PORT_ZVALUE "3581"
#define HEALTH_CHECK_PORT_NAME "health_check_port"
const long DEFAULT_HEALTH_CHECK_PORT_VALUE = 3581;
#define DEFAULT_HEALTH_CHECK_PORT_ZVALUE "3581"

View File

@ -76,7 +76,7 @@ static const char SQL_DN_SELECT[] =
"SELECT ip, mysql_port FROM dynamic_nodes";
using HostPortPair = std::pair<std::string, int>;
using HostPortPair = std::pair<std::string, int>;
using HostPortPairs = std::vector<HostPortPair>;
// sqlite3 callback.
@ -93,7 +93,6 @@ int select_cb(void* pData, int nColumns, char** ppColumn, char** ppNames)
return 0;
}
}
namespace
@ -150,7 +149,6 @@ sqlite3* open_or_create_db(const std::string& path)
return pDb;
}
}
ClustrixMonitor::ClustrixMonitor(const string& name, const string& module, sqlite3* pDb)
@ -164,7 +162,7 @@ ClustrixMonitor::~ClustrixMonitor()
sqlite3_close_v2(m_pDb);
}
//static
// static
ClustrixMonitor* ClustrixMonitor::create(const string& name, const string& module)
{
string path = get_datadir();
@ -224,9 +222,9 @@ bool ClustrixMonitor::softfail(SERVER* pServer, json_t** ppError)
if (is_running())
{
call([this, pServer, ppError, &rv]() {
rv = perform_softfail(pServer, ppError);
},
EXECUTE_QUEUED);
rv = perform_softfail(pServer, ppError);
},
EXECUTE_QUEUED);
}
else
{
@ -246,9 +244,9 @@ bool ClustrixMonitor::unsoftfail(SERVER* pServer, json_t** ppError)
if (is_running())
{
call([this, pServer, ppError, &rv]() {
rv = perform_unsoftfail(pServer, ppError);
},
EXECUTE_QUEUED);
rv = perform_unsoftfail(pServer, ppError);
},
EXECUTE_QUEUED);
}
else
{
@ -317,17 +315,17 @@ void ClustrixMonitor::tick()
case http::Async::ERROR:
MXS_WARNING("%s: Health check round ended with general error.", name());
make_health_check();
break;
make_health_check();
break;
case http::Async::READY:
update_server_statuses();
if (!m_health_urls.empty())
{
make_health_check();
}
break;
make_health_check();
}
break;
}
flush_server_status();
@ -903,7 +901,7 @@ bool ClustrixMonitor::check_cluster_membership(MYSQL* pHub_con,
else
{
MXS_ERROR("%s: Could not execute '%s' on %s: %s",
name(), ZQUERY, mysql_get_host_info(pHub_con), mysql_error(pHub_con));
name(), ZQUERY, mysql_get_host_info(pHub_con), mysql_error(pHub_con));
}
return rv;
@ -949,7 +947,7 @@ void ClustrixMonitor::update_server_statuses()
pMs->stash_current_status();
auto it = find_if(m_nodes_by_id.begin(), m_nodes_by_id.end(),
[pMs](const std::pair<int,ClustrixNode>& element) -> bool {
[pMs](const std::pair<int, ClustrixNode>& element) -> bool {
const ClustrixNode& info = element.second;
return pMs->server->address == info.ip();
});
@ -983,16 +981,16 @@ void ClustrixMonitor::make_health_check()
switch (m_http.status())
{
case http::Async::PENDING:
initiate_delayed_http_check();
break;
initiate_delayed_http_check();
break;
case http::Async::ERROR:
MXS_ERROR("%s: Could not initiate health check.", name());
break;
MXS_ERROR("%s: Could not initiate health check.", name());
break;
case http::Async::READY:
MXS_INFO("%s: Health check available immediately.", name());
break;
MXS_INFO("%s: Health check available immediately.", name());
break;
}
}
@ -1035,7 +1033,7 @@ bool ClustrixMonitor::check_http(Call::action_t action)
for (const auto& result : results)
{
bool running = (result.code == 200); // HTTP OK
bool running = (result.code == 200); // HTTP OK
ClustrixNode& node = it->second;
@ -1126,7 +1124,7 @@ bool ClustrixMonitor::perform_operation(Operation operation,
if (m_pHub_con)
{
auto it = find_if(m_nodes_by_id.begin(), m_nodes_by_id.end(),
[pServer] (const std::pair<int, ClustrixNode>& element) {
[pServer](const std::pair<int, ClustrixNode>& element) {
return element.second.server() == pServer;
});
@ -1137,7 +1135,8 @@ bool ClustrixMonitor::perform_operation(Operation operation,
const char ZQUERY_FORMAT[] = "ALTER CLUSTER %s %d";
int id = node.id();
char zQuery[sizeof(ZQUERY_FORMAT) + sizeof(ZUNSOFTFAIL) + UINTLEN(id)]; // ZUNSOFTFAIL is longer
// ZUNSOFTFAIL is longer
char zQuery[sizeof(ZQUERY_FORMAT) + sizeof(ZUNSOFTFAIL) + UINTLEN(id)];
sprintf(zQuery, ZQUERY_FORMAT, zOperation, id);

View File

@ -21,8 +21,8 @@
#include "clustrixmembership.hh"
#include "clustrixnode.hh"
class ClustrixMonitor : public maxscale::MonitorWorker,
private ClustrixNode::Persister
class ClustrixMonitor : public maxscale::MonitorWorker
, private ClustrixNode::Persister
{
ClustrixMonitor(const ClustrixMonitor&) = delete;
ClustrixMonitor& operator=(const ClustrixMonitor&) = delete;
@ -36,7 +36,7 @@ public:
, m_dynamic_node_detection(DEFAULT_DYNAMIC_NODE_DETECTION_VALUE)
, m_health_check_port(DEFAULT_HEALTH_CHECK_PORT_VALUE)
{
};
}
long cluster_monitor_interval() const
{
@ -179,9 +179,9 @@ private:
std::map<int, ClustrixNode> m_nodes_by_id;
std::vector<std::string> m_health_urls;
mxb::http::Async m_http;
uint32_t m_delayed_http_check_id { 0 };
long m_last_cluster_check { 0 };
SERVER* m_pHub_server { nullptr };
MYSQL* m_pHub_con { nullptr };
sqlite3* m_pDb { nullptr };
uint32_t m_delayed_http_check_id {0};
long m_last_cluster_check {0};
SERVER* m_pHub_server {nullptr};
MYSQL* m_pHub_con {nullptr};
sqlite3* m_pDb {nullptr};
};

View File

@ -110,7 +110,7 @@ public:
bool is_running() const
{
return m_nRunning > 0;
return m_nRunning > 0;
}
void set_running(bool running, approach_t approach = APPROACH_DEFAULT)
@ -231,15 +231,15 @@ private:
Clustrix::SubState m_substate;
int m_instance;
std::string m_ip;
int m_mysql_port { DEFAULT_MYSQL_PORT };
int m_health_port { DEFAULT_HEALTH_PORT };
int m_health_check_threshold { DEFAULT_HEALTH_CHECK_THRESHOLD_VALUE };
int m_nRunning { 0 };
SERVER* m_pServer { nullptr };
MYSQL* m_pCon { nullptr };
int m_mysql_port {DEFAULT_MYSQL_PORT};
int m_health_port {DEFAULT_HEALTH_PORT};
int m_health_check_threshold {DEFAULT_HEALTH_CHECK_THRESHOLD_VALUE};
int m_nRunning {0};
SERVER* m_pServer {nullptr};
MYSQL* m_pCon {nullptr};
};
inline std::ostream& operator << (std::ostream& out, const ClustrixNode& x)
inline std::ostream& operator<<(std::ostream& out, const ClustrixNode& x)
{
x.print(out);
return out;

View File

@ -145,12 +145,12 @@ extern "C" MXS_MODULE* MXS_CREATE_MODULE()
"V1.0.0",
MXS_NO_MODULE_CAPABILITIES,
&maxscale::MonitorApi<CsMonitor>::s_api,
NULL, /* Process init. */
NULL, /* Process finish. */
NULL, /* Thread init. */
NULL, /* Thread finish. */
NULL, /* Process init. */
NULL, /* Process finish. */
NULL, /* Thread init. */
NULL, /* Thread finish. */
{
{"primary", MXS_MODULE_PARAM_SERVER},
{"primary", MXS_MODULE_PARAM_SERVER},
{MXS_END_MODULE_PARAMS}
}
};

View File

@ -39,11 +39,11 @@ using maxscale::MonitorServer;
/** Log a warning when a bad 'wsrep_local_index' is found */
static bool warn_erange_on_local_index = true;
static MonitorServer* set_cluster_master(MonitorServer*, MonitorServer*, int);
static void disableMasterFailback(void*, int);
static int compare_node_index(const void*, const void*);
static int compare_node_priority(const void*, const void*);
static bool using_xtrabackup(MonitorServer* database, const char* server_string);
static MonitorServer* set_cluster_master(MonitorServer*, MonitorServer*, int);
static void disableMasterFailback(void*, int);
static int compare_node_index(const void*, const void*);
static int compare_node_priority(const void*, const void*);
static bool using_xtrabackup(MonitorServer* database, const char* server_string);
GaleraMonitor::GaleraMonitor(const std::string& name, const std::string& module)
: MonitorWorkerSimple(name, module)
@ -277,8 +277,8 @@ void GaleraMonitor::update_server_status(MonitorServer* monitored_server)
/* Node is in desync - lets take it offline */
if (strcmp(row[0], "wsrep_desync") == 0)
{
if (strcasecmp(row[1], "YES") == 0 || strcasecmp(row[1], "ON") == 0
|| strcasecmp(row[1], "1") == 0 || strcasecmp(row[1], "true") == 0)
if (strcasecmp(row[1], "YES") || strcasecmp(row[1], "ON")
|| strcasecmp(row[1], "1") || strcasecmp(row[1], "true"))
{
info.joined = 0;
}
@ -287,8 +287,7 @@ void GaleraMonitor::update_server_status(MonitorServer* monitored_server)
/* Node rejects queries - lets take it offline */
if (strcmp(row[0], "wsrep_reject_queries") == 0)
{
if (strcasecmp(row[1], "ALL") == 0
|| strcasecmp(row[1], "ALL_KILL") == 0)
if (strcasecmp(row[1], "ALL") || strcasecmp(row[1], "ALL_KILL"))
{
info.joined = 0;
}
@ -297,8 +296,8 @@ void GaleraMonitor::update_server_status(MonitorServer* monitored_server)
/* Node rejects queries - lets take it offline */
if (strcmp(row[0], "wsrep_sst_donor_rejects_queries") == 0)
{
if (strcasecmp(row[1], "YES") == 0 || strcasecmp(row[1], "ON") == 0
|| strcasecmp(row[1], "1") == 0 || strcasecmp(row[1], "true") == 0)
if (strcasecmp(row[1], "YES") || strcasecmp(row[1], "ON")
|| strcasecmp(row[1], "1") || strcasecmp(row[1], "true"))
{
info.joined = 0;
}
@ -307,8 +306,8 @@ void GaleraMonitor::update_server_status(MonitorServer* monitored_server)
/* Node is not ready - lets take it offline */
if (strcmp(row[0], "wsrep_ready") == 0)
{
if (strcasecmp(row[1], "NO") == 0 || strcasecmp(row[1], "OFF") == 0
|| strcasecmp(row[1], "0") == 0 || strcasecmp(row[1], "false") == 0)
if (strcasecmp(row[1], "NO") || strcasecmp(row[1], "OFF")
|| strcasecmp(row[1], "0") || strcasecmp(row[1], "false"))
{
info.joined = 0;
}
@ -861,10 +860,10 @@ extern "C" MXS_MODULE* MXS_CREATE_MODULE()
"V2.0.0",
MXS_NO_MODULE_CAPABILITIES,
&maxscale::MonitorApi<GaleraMonitor>::s_api,
NULL, /* Process init. */
NULL, /* Process finish. */
NULL, /* Thread init. */
NULL, /* Thread finish. */
NULL,
NULL,
NULL,
NULL,
{
{"disable_master_failback", MXS_MODULE_PARAM_BOOL, "false"},
{"available_when_donor", MXS_MODULE_PARAM_BOOL, "false"},

View File

@ -58,25 +58,25 @@ protected:
void post_tick();
private:
int m_disableMasterFailback; /**< Monitor flag for Galera Cluster Master failback */
int m_availableWhenDonor; /**< Monitor flag for Galera Cluster Donor availability */
bool m_disableMasterRoleSetting; /**< Monitor flag to disable setting master role */
bool m_root_node_as_master; /**< Whether we require that the Master should
* have a wsrep_local_index of 0 */
bool m_use_priority; /**< Use server priorities */
bool m_set_donor_nodes; /**< set the wrep_sst_donor variable with an
* ordered list of nodes */
std::string m_cluster_uuid; /**< The Cluster UUID */
bool m_log_no_members; /**< Should we log if no member are found. */
NodeMap m_info; /**< Contains Galera Cluster variables of all nodes */
int m_cluster_size; /**< How many nodes in the cluster */
int m_disableMasterFailback; /**< Monitor flag for Galera Cluster Master failback */
int m_availableWhenDonor; /**< Monitor flag for Galera Cluster Donor availability */
bool m_disableMasterRoleSetting; /**< Monitor flag to disable setting master role */
bool m_root_node_as_master; /**< Whether we require that the Master should
* have a wsrep_local_index of 0 */
bool m_use_priority; /**< Use server priorities */
bool m_set_donor_nodes; /**< set the wrep_sst_donor variable with an
* ordered list of nodes */
std::string m_cluster_uuid; /**< The Cluster UUID */
bool m_log_no_members; /**< Should we log if no member are found. */
NodeMap m_info; /**< Contains Galera Cluster variables of all nodes */
int m_cluster_size; /**< How many nodes in the cluster */
GaleraMonitor(const std::string& name, const std::string& module);
bool detect_cluster_size(const int n_nodes,
const char* candidate_uuid,
const int candidate_size);
mxs::MonitorServer* get_candidate_master();
void set_galera_cluster();
void update_sst_donor_nodes(int is_cluster);
const int candidate_size);
mxs::MonitorServer* get_candidate_master();
void set_galera_cluster();
void update_sst_donor_nodes(int is_cluster);
};

View File

@ -76,7 +76,7 @@ void topology_DFS(MariaDBServer* root, VisitorFunc& visitor)
* @param next_cycle Index of next found cycle
*/
void MariaDBMonitor::tarjan_scc_visit_node(MariaDBServer* node,
ServerArray* stack,
ServerArray* stack,
int* next_ind,
int* next_cycle)
{

View File

@ -520,8 +520,8 @@ int MariaDBMonitor::redirect_slaves_ex(GeneralOpData& general, OperationType typ
int conflicts = 0;
auto redirection_helper =
[this, &general, &conflicts, &successes, &fails](ServerArray& redirect_these,
const MariaDBServer* from, const MariaDBServer* to,
ServerArray* redirected) {
const MariaDBServer* from, const MariaDBServer* to,
ServerArray* redirected) {
for (MariaDBServer* redirectable : redirect_these)
{
mxb_assert(redirected != NULL);
@ -634,7 +634,7 @@ uint32_t MariaDBMonitor::do_rejoin(const ServerArray& joinable_servers, json_t**
{
// Assume that server is an old master which was failed over. Even if this is not really
// the case, the following is unlikely to do damage.
ServerOperation demotion(joinable, true, /* treat as old master */
ServerOperation demotion(joinable, true, /* treat as old master */
m_handle_event_scheduler, m_demote_sql_file);
if (joinable->demote(general, demotion))
{
@ -1101,7 +1101,7 @@ void MariaDBMonitor::wait_cluster_stabilization(GeneralOpData& op, const ServerA
* @return The selected promotion target or NULL if no valid candidates
*/
MariaDBServer* MariaDBMonitor::select_promotion_target(MariaDBServer* demotion_target,
OperationType op,
OperationType op,
Log log_mode,
json_t** error_out)
{
@ -1696,7 +1696,7 @@ MariaDBMonitor::switchover_prepare(SERVER* promotion_server, SERVER* demotion_se
demotion_target->m_slave_status, demotion_target->m_enabled_events);
ServerOperation demotion(demotion_target, master_swap, m_handle_event_scheduler,
m_demote_sql_file, promotion_target->m_slave_status,
EventNameSet() /* unused */);
EventNameSet() /* unused */);
GeneralOpData general(m_replication_user, m_replication_password, m_replication_ssl,
error_out, time_limit);
rval.reset(new SwitchoverParams(promotion, demotion, general));
@ -1868,13 +1868,12 @@ void MariaDBMonitor::delay_auto_cluster_ops()
}
// + 1 because the start of next tick subtracts 1.
cluster_operation_disable_timer = m_failcount + 1;
}
bool MariaDBMonitor::can_perform_cluster_ops()
{
return (!config_get_global_options()->passive && cluster_operation_disable_timer <= 0 &&
!m_cluster_modified);
return !config_get_global_options()->passive && cluster_operation_disable_timer <= 0
&& !m_cluster_modified;
}
MariaDBMonitor::SwitchoverParams::SwitchoverParams(const ServerOperation& promotion,

View File

@ -297,7 +297,7 @@ void MariaDBMonitor::diagnostics(DCB* dcb) const
string MariaDBMonitor::diagnostics_to_string() const
{
string rval;
rval.reserve(1000); // Enough for basic output.
rval.reserve(1000); // Enough for basic output.
rval += string_printf("Automatic failover: %s\n", m_auto_failover ? "Enabled" : "Disabled");
rval += string_printf("Failcount: %d\n", m_failcount);
@ -398,8 +398,8 @@ void MariaDBMonitor::tick()
const auto& conn_settings = m_settings.conn_settings;
auto update_task = [should_update_disk_space, conn_settings](MariaDBServer* server) {
server->update_server(should_update_disk_space, conn_settings);
};
server->update_server(should_update_disk_space, conn_settings);
};
// Asynchronously query all servers for their status.
std::vector<std::future<void>> futures;
@ -975,40 +975,40 @@ extern "C" MXS_MODULE* MXS_CREATE_MODULE()
NULL, /* Thread finish. */
{
{
"detect_replication_lag", MXS_MODULE_PARAM_BOOL, "false"
"detect_replication_lag", MXS_MODULE_PARAM_BOOL, "false"
},
{
"detect_stale_master", MXS_MODULE_PARAM_BOOL, "true"
"detect_stale_master", MXS_MODULE_PARAM_BOOL, "true"
},
{
"detect_stale_slave", MXS_MODULE_PARAM_BOOL, "true"
"detect_stale_slave", MXS_MODULE_PARAM_BOOL, "true"
},
{
"mysql51_replication", MXS_MODULE_PARAM_BOOL, "false",
"mysql51_replication", MXS_MODULE_PARAM_BOOL, "false",
MXS_MODULE_OPT_DEPRECATED
},
{
"multimaster", MXS_MODULE_PARAM_BOOL, "false",
"multimaster", MXS_MODULE_PARAM_BOOL, "false",
MXS_MODULE_OPT_DEPRECATED
},
{
CN_DETECT_STANDALONE_MASTER, MXS_MODULE_PARAM_BOOL, "true"
CN_DETECT_STANDALONE_MASTER, MXS_MODULE_PARAM_BOOL, "true"
},
{
CN_FAILCOUNT, MXS_MODULE_PARAM_COUNT, "5"
CN_FAILCOUNT, MXS_MODULE_PARAM_COUNT, "5"
},
{
"allow_cluster_recovery", MXS_MODULE_PARAM_BOOL, "true",
"allow_cluster_recovery", MXS_MODULE_PARAM_BOOL, "true",
MXS_MODULE_OPT_DEPRECATED
},
{
"ignore_external_masters", MXS_MODULE_PARAM_BOOL, "false"
"ignore_external_masters", MXS_MODULE_PARAM_BOOL, "false"
},
{
CN_AUTO_FAILOVER, MXS_MODULE_PARAM_BOOL, "false"
CN_AUTO_FAILOVER, MXS_MODULE_PARAM_BOOL, "false"
},
{
CN_FAILOVER_TIMEOUT, MXS_MODULE_PARAM_DURATION, "90s",
CN_FAILOVER_TIMEOUT, MXS_MODULE_PARAM_DURATION, "90s",
MXS_MODULE_OPT_DURATION_S
},
{
@ -1022,20 +1022,20 @@ extern "C" MXS_MODULE* MXS_CREATE_MODULE()
CN_REPLICATION_PASSWORD, MXS_MODULE_PARAM_STRING
},
{
CN_REPLICATION_MASTER_SSL, MXS_MODULE_PARAM_BOOL, "false"
CN_REPLICATION_MASTER_SSL, MXS_MODULE_PARAM_BOOL, "false"
},
{
CN_VERIFY_MASTER_FAILURE, MXS_MODULE_PARAM_BOOL, "true"
CN_VERIFY_MASTER_FAILURE, MXS_MODULE_PARAM_BOOL, "true"
},
{
CN_MASTER_FAILURE_TIMEOUT, MXS_MODULE_PARAM_DURATION, "10s",
CN_MASTER_FAILURE_TIMEOUT, MXS_MODULE_PARAM_DURATION, "10s",
MXS_MODULE_OPT_DURATION_S
},
{
CN_AUTO_REJOIN, MXS_MODULE_PARAM_BOOL, "false"
CN_AUTO_REJOIN, MXS_MODULE_PARAM_BOOL, "false"
},
{
CN_ENFORCE_READONLY, MXS_MODULE_PARAM_BOOL, "false"
CN_ENFORCE_READONLY, MXS_MODULE_PARAM_BOOL, "false"
},
{
CN_NO_PROMOTE_SERVERS, MXS_MODULE_PARAM_SERVERLIST
@ -1047,16 +1047,16 @@ extern "C" MXS_MODULE* MXS_CREATE_MODULE()
CN_DEMOTION_SQL_FILE, MXS_MODULE_PARAM_PATH
},
{
CN_SWITCHOVER_ON_LOW_DISK_SPACE, MXS_MODULE_PARAM_BOOL, "false"
CN_SWITCHOVER_ON_LOW_DISK_SPACE, MXS_MODULE_PARAM_BOOL, "false"
},
{
CN_MAINTENANCE_ON_LOW_DISK_SPACE, MXS_MODULE_PARAM_BOOL, "true"
CN_MAINTENANCE_ON_LOW_DISK_SPACE, MXS_MODULE_PARAM_BOOL, "true"
},
{
CN_HANDLE_EVENTS, MXS_MODULE_PARAM_BOOL, "true"
CN_HANDLE_EVENTS, MXS_MODULE_PARAM_BOOL, "true"
},
{
CN_ASSUME_UNIQUE_HOSTNAMES, MXS_MODULE_PARAM_BOOL, "true"
CN_ASSUME_UNIQUE_HOSTNAMES, MXS_MODULE_PARAM_BOOL, "true"
},
{MXS_END_MODULE_PARAMS}
}

View File

@ -203,7 +203,7 @@ private:
bool m_ignore_external_masters = false; /* Ignore masters outside of the monitor configuration.
* TODO: requires work */
bool m_assume_unique_hostnames = true; /* Are server hostnames consistent between MaxScale and servers */
int m_failcount = 1; /* Number of ticks master must be down before it's considered
int m_failcount = 1; /* Number of ticks master must be down before it's considered
* totally down, allowing failover or master change. */
// Cluster operations activation settings
@ -291,8 +291,8 @@ private:
void handle_auto_rejoin();
const MariaDBServer* slave_receiving_events(const MariaDBServer* demotion_target,
maxbase::Duration* event_age_out,
maxbase::Duration* delay_out);
maxbase::Duration* event_age_out,
maxbase::Duration* delay_out);
std::unique_ptr<SwitchoverParams> switchover_prepare(SERVER* new_master, SERVER* current_master,
Log log_mode, json_t** error_out);
std::unique_ptr<FailoverParams> failover_prepare(Log log_mode, json_t** error_out);

View File

@ -220,7 +220,7 @@ bool MariaDBServer::execute_cmd_time_limit(const std::string& cmd, maxbase::Dura
}
else if (errmsg_out)
{
*errmsg_out = error_msg; // The error string already has all required info.
*errmsg_out = error_msg; // The error string already has all required info.
}
}
}
@ -450,7 +450,7 @@ bool MariaDBServer::update_gtids(string* errmsg_out)
m_gtid_current_pos = GtidList();
m_gtid_binlog_pos = GtidList();
}
} // If query failed, do not update gtid:s.
} // If query failed, do not update gtid:s.
return rval;
}
@ -524,7 +524,6 @@ bool MariaDBServer::read_server_variables(string* errmsg_out)
m_gtid_domain_id = domain_id_parsed;
}
}
}
return rval;
@ -682,7 +681,7 @@ string MariaDBServer::diagnostics() const
const char fmt_int64[] = "%-23s %" PRIi64 "\n";
string rval;
rval.reserve(300); // Enough for most common ouput.
rval.reserve(300); // Enough for most common ouput.
rval += string_printf(fmt_string, "Server:", name());
rval += string_printf(fmt_int64, "Server ID:", m_server_id);
@ -743,7 +742,7 @@ json_t* MariaDBServer::to_json() const
bool MariaDBServer::can_replicate_from(MariaDBServer* master, string* reason_out)
{
mxb_assert(reason_out);
mxb_assert(is_usable()); // The server must be running.
mxb_assert(is_usable()); // The server must be running.
bool can_replicate = false;
if (m_gtid_current_pos.empty())
@ -923,7 +922,7 @@ void MariaDBServer::update_server_version()
auto srv = m_server_base->server;
mxs_mysql_update_server_version(srv, conn);
m_srv_type = server_type::UNKNOWN; // TODO: Use type information in SERVER directly
m_srv_type = server_type::UNKNOWN; // TODO: Use type information in SERVER directly
auto base_server_type = srv->type();
MYSQL_RES* result;
if (base_server_type == SERVER::Type::CLUSTRIX)
@ -954,12 +953,12 @@ void MariaDBServer::update_server_version()
if (base_server_type == SERVER::Type::MARIADB && major >= 10)
{
// 10.0.2 or 10.1.X or greater than 10
if (((minor == 0 && patch >= 2) || minor >= 1) || major > 10)
if (((minor == 0 && patch >= 2) || minor >= 1) || major > 10)
{
m_capabilities.gtid = true;
}
// 10.1.2 (10.1.1 has limited support, not enough) or 10.2.X or greater than 10
if (((minor == 1 && patch >= 2) || minor >= 2) || major > 10)
if (((minor == 1 && patch >= 2) || minor >= 2) || major > 10)
{
m_capabilities.max_statement_time = true;
}
@ -1219,8 +1218,8 @@ const SlaveStatus* MariaDBServer::slave_connection_status(const MariaDBServer* t
int target_port = target_srv->port;
for (const SlaveStatus& ss : m_slave_status)
{
if (ss.master_host == target_host && ss.master_port == target_port &&
ss.slave_sql_running && ss.slave_io_running != SlaveStatus::SLAVE_IO_NO)
if (ss.master_host == target_host && ss.master_port == target_port
&& ss.slave_sql_running && ss.slave_io_running != SlaveStatus::SLAVE_IO_NO)
{
rval = &ss;
break;
@ -1268,17 +1267,17 @@ bool MariaDBServer::enable_events(const EventNameSet& event_names, json_t** erro
// Helper function which enables a disabled event if that event name is found in the events-set.
ManipulatorFunc enabler = [this, event_names, &found_disabled_events, &events_enabled](
const EventInfo& event, json_t** error_out) {
if (event_names.count(event.name) > 0
&& (event.status == "SLAVESIDE_DISABLED" || event.status == "DISABLED"))
{
found_disabled_events++;
if (alter_event(event, "ENABLE", error_out))
const EventInfo& event, json_t** error_out) {
if (event_names.count(event.name) > 0
&& (event.status == "SLAVESIDE_DISABLED" || event.status == "DISABLED"))
{
events_enabled++;
found_disabled_events++;
if (alter_event(event, "ENABLE", error_out))
{
events_enabled++;
}
}
}
};
};
bool rval = false;
if (events_foreach(enabler, error_out))
@ -1517,7 +1516,7 @@ bool MariaDBServer::promote(GeneralOpData& general, ServerOperation& promotion,
else if (type == OperationType::FAILOVER)
{
stopped = remove_slave_conns(general, {*master_conn});
master_conn = NULL; // The connection pointed to may no longer exist.
master_conn = NULL; // The connection pointed to may no longer exist.
}
if (stopped)
@ -1911,8 +1910,8 @@ bool MariaDBServer::merge_slave_conns(GeneralOpData& op, const SlaveStatusArray&
if (my_slave_conn.seen_connected && my_slave_conn.master_server_id == master_id)
{
accepted = false;
const char format[] = "its Master_Server_Id (%" PRIi64 ") matches an existing "
"slave connection on '%s'.";
const char format[] = "its Master_Server_Id (%" PRIi64
") matches an existing slave connection on '%s'.";
ignore_reason = string_printf(format, master_id, name());
}
else if (my_slave_conn.master_host == slave_conn.master_host
@ -2161,9 +2160,9 @@ bool MariaDBServer::update_enabled_events()
"Status = 'ENABLED';", &error_msg);
if (event_info.get() == NULL)
{
MXS_ERROR("Could not query events of '%s': %s Event handling can be disabled by "
"setting '%s' to false.",
name(), error_msg.c_str(), CN_HANDLE_EVENTS);
MXS_ERROR("Could not query events of '%s': %s Event handling can be disabled by "
"setting '%s' to false.",
name(), error_msg.c_str(), CN_HANDLE_EVENTS);
return false;
}
@ -2176,7 +2175,7 @@ bool MariaDBServer::update_enabled_events()
while (event_info->next_row())
{
string full_name = event_info->get_string(db_name_ind) + "." + event_info->get_string(event_name_ind);
full_names.insert(full_name); // Ignore duplicates, they shouldn't exists.
full_names.insert(full_name); // Ignore duplicates, they shouldn't exists.
}
m_enabled_events = std::move(full_names);

View File

@ -148,8 +148,8 @@ public:
* 'update_replication_settings' before use. */
ReplicationSettings m_rpl_settings;
bool m_query_events; /* Copy of monitor->m_handle_event_scheduler. TODO: move elsewhere */
EventNameSet m_enabled_events; /* Enabled scheduled events */
bool m_query_events; /* Copy of monitor->m_handle_event_scheduler. TODO: move elsewhere */
EventNameSet m_enabled_events; /* Enabled scheduled events */
bool m_print_update_errormsg = true; /* Should an update error be printed? */

View File

@ -170,7 +170,7 @@ ServerOperation::ServerOperation(MariaDBServer* target, bool was_is_master, bool
ServerOperation::ServerOperation(MariaDBServer* target, bool was_is_master, bool handle_events,
const std::string& sql_file)
: ServerOperation(target, was_is_master, handle_events, sql_file,
SlaveStatusArray() /* empty */, EventNameSet() /* empty */)
SlaveStatusArray() /* empty */, EventNameSet() /* empty */)
{
}

View File

@ -240,7 +240,7 @@ public:
const std::string sql_file; // Path to file with SQL commands to run during op
const SlaveStatusArray conns_to_copy; // Slave connections the target should copy/merge
const EventNameSet events_to_enable; // Scheduled event names last seen on master.
const EventNameSet events_to_enable; // Scheduled event names last seen on master.
ServerOperation(MariaDBServer* target, bool was_is_master, bool handle_events,
const std::string& sql_file, const SlaveStatusArray& conns_to_copy,
@ -248,5 +248,4 @@ public:
ServerOperation(MariaDBServer* target, bool was_is_master, bool handle_events,
const std::string& sql_file);
};

View File

@ -117,26 +117,26 @@ int MariaDBMonitor::Test::run_tests()
// Test 2: 4 servers, two cycles with a connection between them
init_servers(4);
EdgeArray edges2 = { { {1, 2}, {2, 1}, {3, 2}, {3, 4}, {4, 3}}};
EdgeArray edges2 = {{{1, 2}, {2, 1}, {3, 2}, {3, 4}, {4, 3}}};
add_replication(edges2);
CycleArray expected_cycles2 = { { { {1, 2}}, { {3, 4}}}};
CycleArray expected_cycles2 = {{{{1, 2}}, {{3, 4}}}};
results.push_back(check_result_cycles(expected_cycles2));
// Test 3: 6 servers, with one cycle
init_servers(6);
EdgeArray edges3 = { { {2, 1}, {3, 2}, {4, 3}, {2, 4}, {5, 1}, {6, 5}, {6, 4}}};
EdgeArray edges3 = {{{2, 1}, {3, 2}, {4, 3}, {2, 4}, {5, 1}, {6, 5}, {6, 4}}};
add_replication(edges3);
CycleArray expected_cycles3 = { { { {2, 3, 4}}}};
CycleArray expected_cycles3 = {{{{2, 3, 4}}}};
results.push_back(check_result_cycles(expected_cycles3));
// Test 4: 10 servers, with a big cycle composed of two smaller ones plus non-cycle servers
init_servers(10);
EdgeArray edges4 =
{ { {1, 5}, {2, 1}, {2, 5}, {3, 1}, {3, 4}, {3, 10}, {4, 1}, {5, 6}, {6, 7}, {6, 4}, {7, 8},
{8, 6},
{9, 8}}};
{ {{1, 5}, {2, 1}, {2, 5}, {3, 1}, {3, 4}, {3, 10}, {4, 1}, {5, 6}, {6, 7}, {6, 4}, {7, 8},
{8, 6},
{9, 8}}};
add_replication(edges4);
CycleArray expected_cycles4 = { { { {1, 5, 6, 7, 8, 4}}}};
CycleArray expected_cycles4 = {{{{1, 5, 6, 7, 8, 4}}}};
results.push_back(check_result_cycles(expected_cycles4));
clear_servers();