MXS-2652 Add test

Test case added to an existing test.
This commit is contained in:
Esa Korhonen
2019-09-05 09:57:44 +03:00
parent 00feb61b23
commit ff2048625b
2 changed files with 75 additions and 8 deletions

View File

@ -246,7 +246,7 @@ add_test_executable(mysqlmon_switchover.cpp mysqlmon_switchover mysqlmon_switcho
# MySQL Monitor switchover with bad master
add_test_executable(mysqlmon_switchover_bad_master.cpp mysqlmon_switchover_bad_master mysqlmon_switchover_bad_master LABELS mysqlmon REPL_BACKEND)
# MySQL Monitor manual failover with no valid slaves, uses config of mysqlmon_failover_auto
# MySQL Monitor manual failover with no valid slaves, uses config of mysqlmon_failover_auto. Also MXS-2652.
add_test_executable(mysqlmon_failover_no_slaves.cpp mysqlmon_failover_no_slaves mysqlmon_failover_auto LABELS mysqlmon REPL_BACKEND)
# MySQL Monitor Rejoin (good) Test

View File

@ -11,8 +11,21 @@
* Public License.
*/
// MXS-2652: https://jira.mariadb.org/browse/MXS-2652
#include "testconnections.h"
#include "fail_switch_rejoin_common.cpp"
using std::string;
namespace
{
void expect_maintenance(TestConnections& test, std::string server_name, bool value);
void expect_running(TestConnections& test, std::string server_name, bool value);
const string running = "Running";
const string down = "Down";
const string maint = "Maintenance";
}
int main(int argc, char** argv)
{
@ -38,13 +51,33 @@ int main(int argc, char** argv)
test.repl->stash_server_settings(2);
test.repl->disable_server_setting(2, "log-bin");
test.repl->start_node(2, (char*) "");
// Slave 3. Stop this slave as well. The monitor is quick to turn failover off it a slave has another
// slave connection or if a slave connection is not using gtid, so those situations are hard to test.
// This may change later when failover support for such situations is added, so add a more interesting
// test for node 3 then.
test.try_query(nodes[3], "STOP SLAVE");
test.maxscales->wait_for_monitor();
// Slave 3. Set node to maintenance, then shut it down. Simultaneously check issue
// MXS-2652: Maintenance flag should persist when server goes down & comes back up.
int server_ind = 3;
int server_num = server_ind + 1;
string server_name = "server" + std::to_string(server_num);
expect_maintenance(test, server_name, false);
if (test.ok())
{
test.maxscales->ssh_node_f(0, true, "maxadmin set server %s maintenance", server_name.c_str());
test.maxscales->wait_for_monitor();
expect_running(test, server_name, true);
expect_maintenance(test, server_name, true);
test.repl->stop_node(server_ind);
test.maxscales->wait_for_monitor();
expect_running(test, server_name, false);
expect_maintenance(test, server_name, true);
test.repl->start_node(server_ind);
test.maxscales->wait_for_monitor();
expect_running(test, server_name, true);
expect_maintenance(test, server_name, true);
}
get_output(test);
test.tprintf(LINE);
@ -62,7 +95,41 @@ int main(int argc, char** argv)
test.repl->stop_node(2);
test.repl->restore_server_settings(2);
test.repl->start_node(2, (char*) "");
test.try_query(nodes[3], "START SLAVE;");
test.maxscales->ssh_node_f(0, true, "maxadmin clear server %s maintenance", server_name.c_str());
test.repl->fix_replication();
return test.global_result;
}
namespace
{
void expect_running(TestConnections& test, std::string server_name, bool value)
{
auto states = test.get_server_status(server_name.c_str());
if (value)
{
test.expect(states.count(running) == 1, "'%s' is not running when it should be.",
server_name.c_str());
}
else
{
test.expect(states.count(down) == 1, "'%s' is not down when it should be.",
server_name.c_str());
}
}
void expect_maintenance(TestConnections& test, std::string server_name, bool value)
{
auto states = test.get_server_status(server_name.c_str());
if (value)
{
test.expect(states.count(maint) == 1, "'%s' is not in maintenance when it should be.",
server_name.c_str());
}
else
{
test.expect(states.count(maint) == 0, "'%s' is in maintenance when it should not be.",
server_name.c_str());
}
}
}