MXS-2219 Update datastructures instead of recreating them

The node infos of the Clustrix servers are now kept around and
and updated based upon changing conditions instead of regularly
being re-created.

Further, the server is now looked up by name only right after
having been created (and that only due to runtime_create_server()
currently being used).

The state of the dynamically created server is now updated directly
as a result of the health-check ping, while the state of the bootstrap
servers is updated during the tick()-call according to the monitor
"protocol".
This commit is contained in:
Johan Wikman
2019-01-15 15:47:03 +02:00
parent 0d1743c76b
commit f7c840df26
3 changed files with 100 additions and 42 deletions

View File

@ -13,6 +13,7 @@
#include "clustrixmonitor.hh" #include "clustrixmonitor.hh"
#include <algorithm> #include <algorithm>
#include <set>
#include "../../../core/internal/config_runtime.hh" #include "../../../core/internal/config_runtime.hh"
namespace http = mxb::http; namespace http = mxb::http;
@ -135,11 +136,14 @@ void ClustrixMonitor::fetch_cluster_nodes_from(MXS_MONITORED_SERVER& ms)
{ {
mxb_assert(mysql_field_count(ms.con) == 4); mxb_assert(mysql_field_count(ms.con) == 4);
vector<ClustrixNodeInfo> node_infos;
vector<string> health_urls;
MYSQL_ROW row; MYSQL_ROW row;
set<int> nids;
for_each(m_node_infos.begin(), m_node_infos.end(),
[&nids](const pair<int, ClustrixNodeInfo>& element) {
nids.insert(element.first);
});
while ((row = mysql_fetch_row(pResult)) != nullptr) while ((row = mysql_fetch_row(pResult)) != nullptr)
{ {
if (row[0] && row[1]) if (row[0] && row[1])
@ -150,20 +154,27 @@ void ClustrixMonitor::fetch_cluster_nodes_from(MXS_MONITORED_SERVER& ms)
int health_port = row[3] ? atoi(row[3]) : DEFAULT_HEALTH_PORT; int health_port = row[3] ? atoi(row[3]) : DEFAULT_HEALTH_PORT;
int health_check_threshold = m_config.health_check_threshold(); int health_check_threshold = m_config.health_check_threshold();
string name = "Clustrix-Server-" + std::to_string(id); string name = "@Clustrix-Server-" + std::to_string(id);
if (SERVER::find_by_unique_name(name) || auto it = m_node_infos.find(id);
runtime_create_server(name.c_str(),
if (it == m_node_infos.end())
{
mxb_assert(!SERVER::find_by_unique_name(name));
if (runtime_create_server(name.c_str(),
ip.c_str(), ip.c_str(),
std::to_string(mysql_port).c_str(), std::to_string(mysql_port).c_str(),
"mariadbbackend", "mariadbbackend",
"mysqlbackendauth", "mysqlbackendauth",
false)) false))
{ {
node_infos.emplace_back(id, ip, mysql_port, health_port, health_check_threshold); SERVER* pServer = SERVER::find_by_unique_name(name);
mxb_assert(pServer);
string health_url = "http://" + ip + ":" + std::to_string(health_port); ClustrixNodeInfo info(id, ip, mysql_port, health_port, health_check_threshold, pServer);
health_urls.push_back(health_url);
m_node_infos.insert(make_pair(id, info));
} }
else else
{ {
@ -173,14 +184,40 @@ void ClustrixMonitor::fetch_cluster_nodes_from(MXS_MONITORED_SERVER& ms)
} }
else else
{ {
MXS_WARNING("Either nodeid and/or iface_ip is missing, " mxb_assert(SERVER::find_by_unique_name(name));
"ignoring node.");
auto it = nids.find(id);
mxb_assert(it != nids.end());
nids.erase(it);
}
}
else
{
MXS_WARNING("Either nodeid and/or iface_ip is missing, ignoring node.");
} }
} }
mysql_free_result(pResult); mysql_free_result(pResult);
m_node_infos.swap(node_infos); for_each(nids.begin(), nids.end(),
[this](int nid) {
auto it = m_node_infos.find(nid);
mxb_assert(it != m_node_infos.end());
ClustrixNodeInfo& info = it->second;
info.deactivate_server();
m_node_infos.erase(it);
});
vector<string> health_urls;
for_each(m_node_infos.begin(), m_node_infos.end(),
[&health_urls](const pair<int, ClustrixNodeInfo>& element) {
const ClustrixNodeInfo& info = element.second;
string url = "http://" + info.ip() + ":" + std::to_string(info.health_port());
health_urls.push_back(url);
});
m_health_urls.swap(health_urls); m_health_urls.swap(health_urls);
m_last_cluster_check = now(); m_last_cluster_check = now();
@ -233,13 +270,16 @@ void ClustrixMonitor::update_server_statuses()
monitor_stash_current_status(&ms); monitor_stash_current_status(&ms);
auto it = find_if(m_node_infos.begin(), m_node_infos.end(), auto it = find_if(m_node_infos.begin(), m_node_infos.end(),
[&ms](const ClustrixNodeInfo& info) -> bool { [&ms](const std::pair<int,ClustrixNodeInfo>& element) -> bool {
const ClustrixNodeInfo& info = element.second;
return ms.server->address == info.ip(); return ms.server->address == info.ip();
}); });
if (it != m_node_infos.end()) if (it != m_node_infos.end())
{ {
if (it->is_running()) const ClustrixNodeInfo& info = it->second;
if (info.is_running())
{ {
monitor_set_pending_status(&ms, SERVER_RUNNING); monitor_set_pending_status(&ms, SERVER_RUNNING);
} }
@ -310,10 +350,10 @@ bool ClustrixMonitor::check_http(Call::action_t action)
{ {
const vector<http::Result>& results = m_http.results(); const vector<http::Result>& results = m_http.results();
for (size_t i = 0; i < results.size(); ++i) auto it = m_node_infos.begin();
{
const auto& result = results[i];
for_each(results.begin(), results.end(),
[&it](const http::Result& result) {
bool running = false; bool running = false;
if (result.code == 200) if (result.code == 200)
@ -321,10 +361,12 @@ bool ClustrixMonitor::check_http(Call::action_t action)
running = true; running = true;
} }
auto& node_info = m_node_infos[i]; auto& node_info = it->second;
node_info.set_running(running); node_info.set_running(running);
}
++it;
});
} }
break; break;

View File

@ -13,6 +13,7 @@
#pragma once #pragma once
#include "clustrixmon.hh" #include "clustrixmon.hh"
#include <map>
#include <maxscale/monitor.hh> #include <maxscale/monitor.hh>
#include <maxbase/http.hh> #include <maxbase/http.hh>
#include "clustrixnodeinfo.hh" #include "clustrixnodeinfo.hh"
@ -87,7 +88,7 @@ private:
private: private:
Config m_config; Config m_config;
std::vector<std::string> m_config_servers; std::vector<std::string> m_config_servers;
std::vector<ClustrixNodeInfo> m_node_infos; std::map<int, ClustrixNodeInfo> m_node_infos;
std::vector<std::string> m_health_urls; std::vector<std::string> m_health_urls;
mxb::http::Async m_http; mxb::http::Async m_http;
uint32_t m_delayed_http_check_id { 0 }; uint32_t m_delayed_http_check_id { 0 };

View File

@ -24,13 +24,15 @@ public:
const std::string& ip, const std::string& ip,
int mysql_port, int mysql_port,
int health_port, int health_port,
int health_check_threshold) int health_check_threshold,
SERVER* pServer)
: m_id(id) : m_id(id)
, m_ip(ip) , m_ip(ip)
, m_mysql_port(mysql_port) , m_mysql_port(mysql_port)
, m_health_port(health_port) , m_health_port(health_port)
, m_health_check_threshold(health_check_threshold) , m_health_check_threshold(health_check_threshold)
, m_nRunning(m_health_check_threshold) , m_nRunning(m_health_check_threshold)
, m_pServer(pServer)
{ {
} }
@ -64,15 +66,27 @@ public:
if (running) if (running)
{ {
m_nRunning = m_health_check_threshold; m_nRunning = m_health_check_threshold;
m_pServer->set_status(SERVER_RUNNING);
} }
else else
{ {
if (m_nRunning > 0) if (m_nRunning > 0)
{ {
--m_nRunning; --m_nRunning;
if (m_nRunning == 0)
{
m_pServer->clear_status(SERVER_RUNNING);
} }
} }
} }
}
void deactivate_server()
{
m_pServer->is_active = false;
}
std::string to_string() const std::string to_string() const
{ {
@ -93,6 +107,7 @@ private:
int m_health_port; int m_health_port;
int m_health_check_threshold; int m_health_check_threshold;
int m_nRunning; int m_nRunning;
SERVER* m_pServer;
}; };
inline std::ostream& operator << (std::ostream& out, const ClustrixNodeInfo& x) inline std::ostream& operator << (std::ostream& out, const ClustrixNodeInfo& x)