Move wait_for_maxscale functionality inside MaxScale

By exposing a (currently undocumented) debug endpoint that lets one
monitor interval pass, we make the reuse of the monitor waiting
functionality a lot easier. With it, when MaxScale is started by the test
framework it knows that at least one monitor interval will have passed for
all monitors and that the system is ready to accept queries.
This commit is contained in:
Markus Mäkelä
2018-11-06 13:58:44 +02:00
parent a53dbeec57
commit 00d0ec5f8e
5 changed files with 43 additions and 53 deletions

View File

@ -423,4 +423,7 @@ int mon_config_get_servers(const MXS_CONFIG_PARAMETER* params,
*/
bool monitor_set_disk_space_threshold(MXS_MONITOR* monitor, const char* disk_space_threshold);
// Function for waiting one monitor interval
void monitor_debug_wait();
MXS_END_DECLS

View File

@ -447,56 +447,5 @@ int Maxscales::port(enum service type, int m) const
void Maxscales::wait_for_monitor(int intervals, int m)
{
// Helper for getting number of monitor ticks
auto get_ticks = [&](std::string name) {
int rc;
char* ticks = ssh_node_output_f(m,
false,
&rc,
"maxctrl api get monitors/%s data.attributes.ticks",
name.c_str());
char* ptr;
int rval = strtol(ticks, &ptr, 10);
if (ptr == ticks || (*ptr != '\0' && !isspace(*ptr)))
{
printf("ERROR, invalid monitor tick value: %s\n", ticks);
rval = -1;
}
free(ticks);
return rval;
};
int rc = 0;
// Get a list of monitor names that are running
char* monitors = ssh_node_output_f(m, false, &rc, "maxctrl --tsv list monitors|grep Running|cut -f 1");
std::istringstream is;
is.str(monitors);
free(monitors);
std::string name;
std::unordered_map<std::string, int> ticks;
// For each monitor, store the current monitor tick
while (std::getline(is, name))
{
ticks[name] = get_ticks(name);
}
for (auto a : ticks)
{
// Wait a maximum of 60 seconds for a single monitor interval
for (int i = 0; i < 60; i++)
{
int start = a.second;
int end = get_ticks(a.first);
if (start == -1 || end == -1 || end - start >= intervals)
{
break;
}
sleep(1);
}
}
ssh_node_f(m, false, "for ((i=0;i<%d;i++)); do maxctrl api get maxscale/debug/monitor_wait; done", intervals);
}

View File

@ -724,7 +724,8 @@ void TestConnections::init_maxscale(int m)
"cp maxscale.cnf %s;"
"iptables -F INPUT;"
"rm -rf %s/*.log /tmp/core* /dev/shm/* /var/lib/maxscale/maxscale.cnf.d/ /var/lib/maxscale/*;"
"%s",
"%s"
"maxctrl api get maxscale/debug/monitor_wait",
maxscales->maxscale_cnf[m],
maxscales->maxscale_log_dir[m],
maxscale::start ? "service maxscale restart;" : "");

View File

@ -20,6 +20,7 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <chrono>
#include <string>
#include <sstream>
#include <set>
@ -2487,6 +2488,34 @@ bool monitor_set_disk_space_threshold(MXS_MONITOR* monitor, const char* disk_spa
return rv;
}
void monitor_debug_wait()
{
using namespace std::chrono;
std::lock_guard<std::mutex> guard(monLock);
std::map<MXS_MONITOR*, uint64_t> ticks;
// Get tick values for all monitors
for (MXS_MONITOR* mon = allMonitors; mon; mon = mon->next)
{
ticks[mon] = mxb::atomic::load(&mon->ticks);
}
// Wait for all running monitors to advance at least one tick
for (MXS_MONITOR* mon = allMonitors; mon; mon = mon->next)
{
if (mon->state == MONITOR_STATE_RUNNING)
{
auto start = steady_clock::now();
while (ticks[mon] == mxb::atomic::load(&mon->ticks)
&& steady_clock::now() - start < seconds(60))
{
std::this_thread::sleep_for(milliseconds(100));
}
}
}
}
namespace maxscale
{

View File

@ -733,6 +733,11 @@ HttpResponse cb_unix_user(const HttpRequest& request)
return HttpResponse(MHD_HTTP_OK, admin_user_to_json(request.host(), user.c_str(), USER_TYPE_UNIX));
}
HttpResponse cb_monitor_wait(const HttpRequest& request)
{
monitor_debug_wait();
return HttpResponse(MHD_HTTP_OK);
}
HttpResponse cb_create_user(const HttpRequest& request)
{
mxb_assert(request.get_json());
@ -976,6 +981,9 @@ public:
m_get.push_back(SResource(new Resource(cb_inet_user, 3, "users", "inet", ":inetuser")));
m_get.push_back(SResource(new Resource(cb_unix_user, 3, "users", "unix", ":unixuser")));
/** Debug utility endpoints */
m_get.push_back(SResource(new Resource(cb_monitor_wait, 3, "maxscale", "debug", "monitor_wait")));
/** Create new resources */
m_post.push_back(SResource(new Resource(cb_create_server, 1, "servers")));
m_post.push_back(SResource(new Resource(cb_create_monitor, 1, "monitors")));