From 2440b48ccca70bdd9f3594714e42d6b194e85d30 Mon Sep 17 00:00:00 2001 From: Timofey Turenko Date: Fri, 22 Feb 2019 16:12:57 +0200 Subject: [PATCH 01/15] Mxs 2236 own longtest (#189) MXS-2236 Add own long test and possibility to run tests under Valgrind Long test executes INSERT queries, transactions, prepared statements in parallel to create weird load on Maxscale to catch crashes and leaks Test is not included into ctest scope. Test should be executed manually. For BuildBot (and also for run_test.sh) 'test_set' should be set 'NAME# ./long_test' Time to run test is defined by 'long_test_time' variable (in seconds) Possibility to run Maxscale under Valgrind is also added. To run Maxscale under Vaslgrind 'use_valgrind=yes' variable have to be defined --- maxscale-system-test/CMakeLists.txt | 6 +- maxscale-system-test/change_user.cpp | 1 - .../cnf/maxscale.cnf.template.replication | 33 +- maxscale-system-test/long_test.cpp | 352 ++++++++++++++++++ maxscale-system-test/maxscales.cpp | 53 ++- maxscale-system-test/maxscales.h | 13 +- maxscale-system-test/mdbci/run_test.sh | 12 +- .../mdbci/templates/nogalera.json.template | 4 +- maxscale-system-test/testconnections.cpp | 97 +++-- maxscale-system-test/testconnections.h | 7 +- 10 files changed, 515 insertions(+), 63 deletions(-) create mode 100644 maxscale-system-test/long_test.cpp diff --git a/maxscale-system-test/CMakeLists.txt b/maxscale-system-test/CMakeLists.txt index 89d4c0805..a1d63a75c 100644 --- a/maxscale-system-test/CMakeLists.txt +++ b/maxscale-system-test/CMakeLists.txt @@ -926,9 +926,13 @@ add_test_executable_notest(delete_rds.cpp delete_rds replication LABELS EXTERN_B # a tool to create RDS Aurora cluster add_test_executable_notest(create_rds.cpp create_rds replication LABELS EXTERN_BACKEND) -# start sysbench ageints RWSplit for infinite execution +# start sysbench against RWSplit for infinite execution add_test_executable_notest(long_sysbench.cpp long_sysbench replication LABELS readwritesplit REPL_BACKEND) +# own long test +# 'long_test_time' variable defines time of execution (in seconds) +add_test_executable_notest(long_test.cpp long_test replication LABELS readwritesplit REPL_BACKEND) + # test effect of local_address in configuration file add_test_executable(local_address.cpp local_address local_address LABELS REPL_BACKEND) diff --git a/maxscale-system-test/change_user.cpp b/maxscale-system-test/change_user.cpp index d3a1277ce..e4c48595f 100644 --- a/maxscale-system-test/change_user.cpp +++ b/maxscale-system-test/change_user.cpp @@ -62,7 +62,6 @@ int main(int argc, char *argv[]) execute_query_silent(test.repl->nodes[0], "DROP USER user@'%%';"); execute_query_silent(test.repl->nodes[0], "DROP TABLE test.t1"); test.repl->disconnect(); - return test.global_result; } diff --git a/maxscale-system-test/cnf/maxscale.cnf.template.replication b/maxscale-system-test/cnf/maxscale.cnf.template.replication index e94023239..0ffc5e537 100755 --- a/maxscale-system-test/cnf/maxscale.cnf.template.replication +++ b/maxscale-system-test/cnf/maxscale.cnf.template.replication @@ -1,57 +1,58 @@ [maxscale] threads=###threads### +#log_info=1 -[MySQL Monitor] +[MySQL-Monitor] type=monitor module=mysqlmon ###repl51### servers=server1,server2,server3,server4 user=maxskysql -passwd=skysql +password=skysql monitor_interval=1000 detect_stale_master=false detect_standalone_master=false -[RW Split Router] +[RW-Split-Router] type=service router=readwritesplit servers=server1,server2,server3,server4 user=maxskysql -passwd=skysql -router_options=slave_selection_criteria=LEAST_GLOBAL_CONNECTIONS +password=skysql +slave_selection_criteria=LEAST_GLOBAL_CONNECTIONS max_slave_connections=1 -[Read Connection Router Slave] +[Read-Connection-Router-Slave] type=service router=readconnroute router_options=slave servers=server1,server2,server3,server4 user=maxskysql -passwd=skysql +password=skysql -[Read Connection Router Master] +[Read-Connection-Router-Master] type=service router=readconnroute router_options=master servers=server1,server2,server3,server4 user=maxskysql -passwd=skysql +password=skysql -[RW Split Listener] +[RW-Split-Listener] type=listener -service=RW Split Router +service=RW-Split-Router protocol=MySQLClient port=4006 -[Read Connection Listener Slave] +[Read-Connection-Listener-Slave] type=listener -service=Read Connection Router Slave +service=Read-Connection-Router-Slave protocol=MySQLClient port=4009 -[Read Connection Listener Master] +[Read-Connection-Listener-Master] type=listener -service=Read Connection Router Master +service=Read-Connection-Router-Master protocol=MySQLClient port=4008 @@ -59,7 +60,7 @@ port=4008 type=service router=cli -[CLI Listener] +[CLI-Listener] type=listener service=CLI protocol=maxscaled diff --git a/maxscale-system-test/long_test.cpp b/maxscale-system-test/long_test.cpp new file mode 100644 index 000000000..ce1c9b111 --- /dev/null +++ b/maxscale-system-test/long_test.cpp @@ -0,0 +1,352 @@ +/** + * @file long_test.cpp Run different load for long long execution (long load test) + * + * time to execute test is defined by 'long_test_time' environmental variable + * e.g. 'long_test_time=3600 ./long_test' + */ + + +#include "testconnections.h" +#include "big_transaction.h" + +typedef void * FUNC(void * ptr); + +FUNC query_thread; +FUNC prepared_stmt_thread; +FUNC transaction_thread; +FUNC short_session_thread; +FUNC read_thread; + +TestConnections * Test; + +const int threads_type_num = 4; +int threads_num[threads_type_num]; +const int max_threads_num = 32; +int port; +char * IP; + +typedef struct +{ + int id; + bool exit_flag; + char * sql; +} t_data; + +t_data data[threads_type_num][max_threads_num]; + +int main(int argc, char *argv[]) +{ + Test = new TestConnections(argc, argv); + int i, j; + + Test->tprintf("***************************************************\n" + "This is long running test to catch memory leaks and crashes\n" + "please define 'long_test_time' variable to set running time (seconds)\n" + "***************************************************\n"); + + pthread_t thread_id[threads_type_num][max_threads_num]; + FUNC * thread[threads_type_num]; + thread[0] = query_thread; + threads_num[0] = 1; + thread[1] = transaction_thread; + threads_num[1] = 1; + thread[2] = prepared_stmt_thread; + threads_num[2] = 1; + thread[3] = read_thread; + threads_num[3] = 1; + + //thread[4] = short_session_thread; + //threads_num[4] = 4; + + + port = Test->maxscales->rwsplit_port[0]; + IP = Test->maxscales->IP[0]; + + //port = 3306; + //IP = Test->repl->IP[0]; + + + Test->set_timeout(60); + Test->tprintf("Set big maximums\n"); + + Test->repl->execute_query_all_nodes((char *) "set global max_connections = 300000;"); + Test->repl->execute_query_all_nodes((char *) "set global max_connect_errors = 10000000;"); + Test->repl->execute_query_all_nodes((char *) "set global expire_logs_days = 1;"); + + + + Test->maxscales->connect_rwsplit(0); + + Test->repl->execute_query_all_nodes( (char *) "set global max_allowed_packet=100000000"); + + Test->tprintf("create t1 in `test` DB\n"); + create_t1(Test->maxscales->conn_rwsplit[0]); + + execute_query(Test->maxscales->conn_rwsplit[0], "DROP DATABASE test1"); + execute_query(Test->maxscales->conn_rwsplit[0], "DROP DATABASE test2"); + Test->tprintf("create`test1` DB\n"); + Test->try_query(Test->maxscales->conn_rwsplit[0], "CREATE DATABASE test1"); + + Test->tprintf("create`test2` DB\n"); + Test->try_query(Test->maxscales->conn_rwsplit[0], "CREATE DATABASE test2"); + + Test->tprintf("Waiting for slaves after DB creation\n"); + Test->repl->sync_slaves(0); + //sleep(15); + Test->tprintf("...ok\n"); + + Test->tprintf("create t1 in `test1` DB\n"); + Test->tprintf("... use\n"); + Test->try_query(Test->maxscales->conn_rwsplit[0], "USE test1"); + Test->tprintf("... create\n"); + create_t1(Test->maxscales->conn_rwsplit[0]); + + Test->tprintf("create t1 in `test2` DB\n"); + Test->tprintf("... use\n"); + Test->try_query(Test->maxscales->conn_rwsplit[0], "USE test2"); + Test->tprintf("... create\n"); + create_t1(Test->maxscales->conn_rwsplit[0]); + + Test->tprintf("Waiting for slaves after tables creation\n"); + Test->repl->sync_slaves(0); + + Test->tprintf("...ok\n"); + + Test->set_timeout(60); + // Create threads + Test->tprintf("Starting threads\n"); + + for (j = 0; j < threads_type_num; j++) + { + for (i = 0; i < threads_num[j]; i++) + { + data[j][i].sql = (char*) malloc((i +1) * 32 * 14 + 32); + create_insert_string(data[j][i].sql, (i + 1) * 32 , i); + Test->tprintf("sqL %d: %d\n", i, strlen(data[j][i].sql)); + data[j][i].exit_flag = false; + data[j][i].id = i; + pthread_create(&thread_id[j][i], NULL, thread[j], &data[j][i]); + } + } + + Test->set_log_copy_interval(100); + + Test->stop_timeout(); + + char * env = getenv("long_test_time"); + int test_time = 0; + if (env != NULL) + { + sscanf(env, "%d", &test_time); + } + if (test_time <= 0) + { + test_time = 3600; + Test->tprintf("´long_test_time´ variable is not defined, set test_time to %d\n", test_time); + } + Test->tprintf("´test_time´ is %d\n", test_time); + sleep(test_time); + + Test->set_timeout(180); + + Test->tprintf("Stopping threads\n"); + + for (j = 0; j < threads_type_num; j++) + { + for (i = 0; i < threads_num[j]; i++) + { + data[j][i].exit_flag = true; + pthread_join(thread_id[j][i], NULL); + } + } + + //Test->tprintf("Checking if MaxScale is still alive!\n"); + //fflush(stdout); + //Test->check_maxscale_alive(0); + + Test->maxscales->stop_maxscale(0); + + int rval = Test->global_result; + delete Test; + return rval; +} + +void try_and_reconnect(MYSQL * conn, char * db, char * sql) +{ + if (execute_query(conn, sql)) + { + Test->tprintf("reconnect"); + mysql_close(conn); + conn = open_conn_db_timeout(port, + IP, + db, + Test->repl->user_name, + Test->repl->password, + 20, + Test->ssl); + } +} + +void *query_thread(void *ptr ) +{ + MYSQL * conn; + t_data * data = (t_data *) ptr; + int inserts_until_optimize = 100000; + int tn = 0; + conn = open_conn_db_timeout(port, + IP, + (char *) "test", + Test->repl->user_name, + Test->repl->password, + 20, + Test->ssl); + while (!data->exit_flag) + { + + //Test->try_query(conn, data->sql); + try_and_reconnect(conn, (char *) "test", data->sql); + + if (tn >= inserts_until_optimize) + { + tn = 0; + Test->tprintf("Removing everything from table in the queries thread"); + try_and_reconnect(conn, (char *) "test", (char *) "DELETE FROM t1"); + Test->tprintf("Optimizing table in the queries thread"); + try_and_reconnect(conn, (char *) "test", (char *) "OPTIMIZE TABLE t1"); + } + tn++; + } + mysql_close(conn); + return NULL; +} + +void *read_thread(void *ptr ) +{ + MYSQL * conn; + t_data * data = (t_data *) ptr; + int i = 0; + char sql[256]; + conn = open_conn_db_timeout(port, + IP, + (char *) "test", + Test->repl->user_name, + Test->repl->password, + 20, + Test->ssl); + while (!data->exit_flag) + { + sprintf(sql, "SELECT * FROM t1 WHERE fl=%d", data->id); + try_and_reconnect(conn, (char *) "test", sql); + i++; + } + mysql_close(conn); + return NULL; +} + +void *transaction_thread(void *ptr ) +{ + MYSQL * conn; + int transactions_until_optimize = 10; + int tn = 0; + t_data * data = (t_data *) ptr; + conn = open_conn_db_timeout(port, + IP, + (char *) "test1", + Test->repl->user_name, + Test->repl->password, + 20, + Test->ssl); + while (!data->exit_flag) + { + + try_and_reconnect(conn, (char *) "test1", (char *) "START TRANSACTION"); + try_and_reconnect(conn, (char *) "test1", (char *) "SET autocommit = 0"); + + int stmt_num = 200000 / strlen(data->sql); + for (int i = 0; i < stmt_num; i++) + { + try_and_reconnect(conn, (char *) "test1", data->sql); + } + Test->try_query(conn, (char *) "COMMIT"); + if (tn >= transactions_until_optimize) + { + tn = 0; + Test->tprintf("Removing everything from table in the transactions thread"); + try_and_reconnect(conn, (char *) "test1", (char *) "DELETE FROM t1"); + Test->tprintf("Optimizing table in the transactions thread"); + try_and_reconnect(conn, (char *) "test1", (char *) "OPTIMIZE TABLE t1"); + } + tn++; + } + mysql_close(conn); + + conn = open_conn_db_timeout(port, + IP, + (char *) "", + Test->maxscales->user_name, + Test->maxscales->password, + 20, + Test->ssl); + Test->try_query(conn, "DROP DATABASE test1"); + mysql_close(conn); + return NULL; +} + +void *short_session_thread(void *ptr ) +{ + MYSQL * conn; + t_data * data = (t_data *) ptr; + while (!data->exit_flag) + { + conn = open_conn_db_timeout(port, + IP, + (char *) "test", + Test->repl->user_name, + Test->repl->password, + 20, + Test->ssl); + mysql_close(conn); + } + return NULL; +} + + +void *prepared_stmt_thread(void *ptr ) +{ + MYSQL * conn; + t_data * data = (t_data *) ptr; + char sql[256]; + conn = open_conn_db_timeout(port, + IP, + (char *) "test2", + Test->repl->user_name, + Test->repl->password, + 20, + Test->ssl); + while (!data->exit_flag) + { + sprintf(sql, "PREPARE stmt%d FROM 'SELECT * FROM t1 WHERE fl=@x;';", data->id); + try_and_reconnect(conn, (char *) "test2", sql); + try_and_reconnect(conn, (char *) "test2", (char *) "SET @x = 3;"); + sprintf(sql, "EXECUTE stmt%d", data->id); + try_and_reconnect(conn, (char *) "test2", sql); + try_and_reconnect(conn, (char *) "test2", (char *) "SET @x = 4;"); + try_and_reconnect(conn, (char *) "test2", sql); + try_and_reconnect(conn, (char *) "test2", (char *) "SET @x = 400;"); + try_and_reconnect(conn, (char *) "test2", sql); + sprintf(sql, "DEALLOCATE PREPARE stmt%d", data->id); + try_and_reconnect(conn, (char *) "test2", sql); + } + mysql_close(conn); + + conn = open_conn_db_timeout(port, + IP, + (char *) "", + Test->maxscales->user_name, + Test->maxscales->password, + 20, + Test->ssl); + Test->try_query(conn, "DROP DATABASE test2"); + mysql_close(conn); + return NULL; +} diff --git a/maxscale-system-test/maxscales.cpp b/maxscale-system-test/maxscales.cpp index 475e6800f..9c890c37d 100644 --- a/maxscale-system-test/maxscales.cpp +++ b/maxscale-system-test/maxscales.cpp @@ -2,12 +2,24 @@ #include #include -Maxscales::Maxscales(const char *pref, const char *test_cwd, bool verbose) +Maxscales::Maxscales(const char *pref, const char *test_cwd, bool verbose, bool use_valgrind) { strcpy(prefix, pref); this->verbose = verbose; + this->use_valgrind = use_valgrind; + valgring_log_num = 0; strcpy(test_dir, test_cwd); read_env(); + if (use_valgrind) + { + for (int i = 0; i < N; i++) + { + ssh_node_f(i, true, "yum install -y valgrind gdb 2>&1", maxscale_log_dir[i]); + ssh_node_f(i, true, "apt install -y --force-yes valgrind gdb 2>&1", maxscale_log_dir[i]); + ssh_node_f(i, true, "zypper -n install valgrind gdb 2>&1", maxscale_log_dir[i]); + ssh_node_f(i, true, "rm -rf /var/cache/maxscale/maxscale.lock"); + } + } } int Maxscales::read_env() @@ -208,21 +220,54 @@ int Maxscales::close_maxscale_connections(int m) int Maxscales::restart_maxscale(int m) { - int res = ssh_node(m, "service maxscale restart", true); + int res; + if (use_valgrind) + { + res = stop_maxscale(m); + res += start_maxscale(m); + } + else + { + res =ssh_node(m, "service maxscale restart", true); + } fflush(stdout); return res; } int Maxscales::start_maxscale(int m) { - int res = ssh_node(m, "service maxscale start", true); + int res; + if (use_valgrind) + { + res = ssh_node_f(m, false, + "sudo --user=maxscale valgrind --leak-check=full --show-leak-kinds=all " + "--log-file=/%s/valgrind%02d.log --trace-children=yes " + "--track-origins=yes /usr/bin/maxscale", maxscale_log_dir[m], valgring_log_num); + valgring_log_num++; + } + else + { + res = ssh_node(m, "service maxscale start", true); + } fflush(stdout); return res; } int Maxscales::stop_maxscale(int m) { - int res = ssh_node(m, "service maxscale stop", true); + int res; + if (use_valgrind) + { + res = ssh_node_f(m, true, "sudo kill $(pidof valgrind) 2>&1 > /dev/null"); + if ((res != 0) || atoi(ssh_node_output(m, "pidof valgrind", true, &res)) > 0) + { + res = ssh_node_f(m, true, "sudo kill -9 $(pidof valgrind) 2>&1 > /dev/null"); + } + } + else + { + res = ssh_node(m, "service maxscale stop", true); + } fflush(stdout); return res; } diff --git a/maxscale-system-test/maxscales.h b/maxscale-system-test/maxscales.h index 5d9a9518f..815d5ef2a 100644 --- a/maxscale-system-test/maxscales.h +++ b/maxscale-system-test/maxscales.h @@ -15,7 +15,7 @@ public: READCONN_SLAVE }; - Maxscales(const char *pref, const char *test_cwd, bool verbose); + Maxscales(const char *pref, const char *test_cwd, bool verbose, bool use_valgrind); int read_env(); /** @@ -272,6 +272,17 @@ public: */ void wait_for_monitor(int intervals = 1, int m = 0); + /** + * @brief use_valrind if true Maxscale will be executed under Valgrind + */ + bool use_valgrind; + + /** + * @brief valgring_log_num Counter for Maxscale restarts to avoid Valgrind log overwriting + */ + int valgring_log_num; + + }; #endif // MAXSCALES_H diff --git a/maxscale-system-test/mdbci/run_test.sh b/maxscale-system-test/mdbci/run_test.sh index dfb35d523..95ce814f5 100755 --- a/maxscale-system-test/mdbci/run_test.sh +++ b/maxscale-system-test/mdbci/run_test.sh @@ -40,6 +40,10 @@ # $test_set - parameters to be send to 'ctest' (e.g. '-I 1,100', # '-LE UNSTABLE' +# if $test_set starts from 'NAME#' ctest will not be executed, +# the value of $test_set after 'NAME#' is used as bash command +# line +# example: '#NAME long_test_time=3600 ./long_test' export vm_memory=${vm_memory:-"2048"} export dir=`pwd` @@ -71,11 +75,15 @@ if [ $res == 0 ] ; then set -x echo ${test_set} | grep "NAME#" if [ $? == 0 ] ; then - named_test=`echo ${test_set} | sed "s/NAME#//" | sed "s/ //g"` + named_test=`echo ${test_set} | sed "s/NAME#//"` + echo ${named_test} | grep "\./" + if [ $? != 0 ] ; then + named_test="./"${named_test} + fi fi if [ ! -z "${named_test}" ] ; then - ./${named_test} + eval ${named_test} else ./check_backend if [ $? != 0 ]; then diff --git a/maxscale-system-test/mdbci/templates/nogalera.json.template b/maxscale-system-test/mdbci/templates/nogalera.json.template index 14b654a61..e8b11d30b 100644 --- a/maxscale-system-test/mdbci/templates/nogalera.json.template +++ b/maxscale-system-test/mdbci/templates/nogalera.json.template @@ -52,9 +52,9 @@ } }, - "maxscale" : + "maxscale_000" : { - "hostname" : "maxscale", + "hostname" : "maxscale1", "box" : "${box}", "memory_size" : "${vm_memory}", "product" : { diff --git a/maxscale-system-test/testconnections.cpp b/maxscale-system-test/testconnections.cpp index 0891af940..e837d108e 100644 --- a/maxscale-system-test/testconnections.cpp +++ b/maxscale-system-test/testconnections.cpp @@ -87,7 +87,8 @@ TestConnections::TestConnections(int argc, char *argv[]): no_galera(false), no_vm_revert(true), threads(4), - use_ipv6(false) + use_ipv6(false), + use_valgrind(false) { signal_set(SIGSEGV, sigfatal_handler); signal_set(SIGABRT, sigfatal_handler); @@ -239,7 +240,7 @@ TestConnections::TestConnections(int argc, char *argv[]): repl->take_snapshot_command = take_snapshot_command; repl->revert_snapshot_command = revert_snapshot_command; - maxscales = new Maxscales("maxscale", test_dir, verbose); + maxscales = new Maxscales("maxscale", test_dir, verbose, use_valgrind); maxscales->use_ipv6 = use_ipv6; maxscales->ssl = ssl; @@ -347,6 +348,16 @@ TestConnections::~TestConnections() //galera->disable_ssl(); } + if (use_valgrind) + { + // stop all Maxscales to get proper Valgrind logs + for (int i = 0; i < maxscales->N; i++) + { + stop_maxscale(i); + } + sleep(15); // sleep to let logs be written do disks + } + copy_all_logs(); /* Temporary disable snapshot revert due to Galera failures @@ -420,7 +431,6 @@ void TestConnections::expect(bool result, const char *format, ...) void TestConnections::read_env() { - char *env; if (verbose) @@ -428,7 +438,6 @@ void TestConnections::read_env() printf("Reading test setup configuration from environmental variables\n"); } - //env = getenv("get_logs_command"); if (env != NULL) {sprintf(get_logs_command, "%s", env);} //env = getenv("test_dir"); if (env != NULL) {sprintf(test_dir, "%s", env);} @@ -531,6 +540,12 @@ void TestConnections::read_env() { no_vm_revert = false; } + + env = getenv("use_valgrind"); + if ((env != NULL) && ((strcasecmp(env, "yes") == 0) || (strcasecmp(env, "true") == 0) )) + { + use_valgrind = true; + } } void TestConnections::print_env() @@ -683,22 +698,18 @@ void TestConnections::init_maxscale(int m) "chmod a+x %s;" "%s" "iptables -F INPUT;" - "rm -f %s/maxscale.log;" - "rm -f %s/maxscale1.log;" - "rm -rf /tmp/core* /dev/shm/* /var/lib/maxscale/maxscale.cnf.d/ /var/lib/maxscale/*;" - "%s", + "rm -f %s/*;" + "rm -rf /tmp/core* /dev/shm/* /var/lib/maxscale/maxscale.cnf.d/ /var/lib/maxscale/*", maxscales->access_homedir[m], maxscales->access_homedir[m], maxscales->access_homedir[m], maxscale::start ? "killall -9 maxscale;" : "", - maxscales->maxscale_log_dir[m], - maxscales->maxscale_log_dir[m], - maxscale::start ? "service maxscale restart" : ""); - + maxscales->maxscale_log_dir[m]); fflush(stdout); - if (maxscale::start) { + maxscales->restart_maxscale(m); + fflush(stdout); int waits; for (waits = 0; waits < 15; waits++) @@ -723,10 +734,17 @@ int TestConnections::copy_mariadb_logs(Mariadb_nodes * repl, char * prefix) int local_result = 0; char * mariadb_log; FILE * f; - int i; + int i, j; int exit_code; char str[4096]; + const int log_retrive_command_num = 3; + const char * log_retrive_command[log_retrive_command_num] = { + "cat /var/lib/mysql/*.err", + "cat /var/log/syslog | grep mysql", + "cat /var/log/messages | grep mysql" + }; + if (repl == NULL) return local_result; sprintf(str, "mkdir -p LOGS/%s", test_name); @@ -735,20 +753,23 @@ int TestConnections::copy_mariadb_logs(Mariadb_nodes * repl, char * prefix) { if (strcmp(repl->IP[i], "127.0.0.1") != 0) // Do not copy MariaDB logs in case of local backend { - mariadb_log = repl->ssh_node_output(i, (char *) "cat /var/lib/mysql/*.err", true, &exit_code); - sprintf(str, "LOGS/%s/%s%d_mariadb_log", test_name, prefix, i); - f = fopen(str, "w"); - if (f != NULL) + for (j = 0; j < log_retrive_command_num; j++) { - fwrite(mariadb_log, sizeof(char), strlen(mariadb_log), f); - fclose(f); + mariadb_log = repl->ssh_node_output(i, log_retrive_command[j], true, &exit_code); + sprintf(str, "LOGS/%s/%s%d_mariadb_log_%d", test_name, prefix, i, j); + f = fopen(str, "w"); + if (f != NULL) + { + fwrite(mariadb_log, sizeof(char), strlen(mariadb_log), f); + fclose(f); + } + else + { + printf("Error writing MariaDB log"); + local_result = 1; + } + free(mariadb_log); } - else - { - printf("Error writing MariaDB log"); - local_result = 1; - } - free(mariadb_log); } } return local_result; @@ -1054,9 +1075,9 @@ bool TestConnections::replicate_from_master(int m) repl->execute_query_all_nodes("STOP SLAVE"); /** Clean up MaxScale directories */ - maxscales->ssh_node(m, "service maxscale stop", true); + maxscales->stop_maxscale(m); prepare_binlog(m); - maxscales->ssh_node(m, "service maxscale start", true); + maxscales->start_maxscale(m); char log_file[256] = ""; char log_pos[256] = "4"; @@ -1308,7 +1329,16 @@ int TestConnections::find_connected_slave1(int m) int TestConnections::check_maxscale_processes(int m, int expected) { int exit_code; - char* maxscale_num = maxscales->ssh_node_output(m, "ps -C maxscale | grep maxscale | wc -l", false, + const char * ps_cmd; + if (use_valgrind) + { + ps_cmd = "ps ax | grep valgrind | grep maxscale | grep -v grep | wc -l"; + } + else + { + ps_cmd = "ps -C maxscale | grep maxscale | wc -l"; + } + char* maxscale_num = maxscales->ssh_node_output(m, ps_cmd, false, &exit_code); if ((maxscale_num == NULL) || (exit_code != 0)) { @@ -1322,9 +1352,9 @@ int TestConnections::check_maxscale_processes(int m, int expected) if (atoi(maxscale_num) != expected) { - tprintf("%s maxscale processes detected, trying agin in 5 seconds\n", maxscale_num); + tprintf("%s maxscale processes detected, trying again in 5 seconds\n", maxscale_num); sleep(5); - maxscale_num = maxscales->ssh_node_output(m, "ps -C maxscale | grep maxscale | wc -l", false, &exit_code); + maxscale_num = maxscales->ssh_node_output(m, ps_cmd, false, &exit_code); if (atoi(maxscale_num) != expected) { add_result(1, "Number of MaxScale processes is not %d, it is %s\n", expected, maxscale_num); @@ -1336,7 +1366,7 @@ int TestConnections::check_maxscale_processes(int m, int expected) int TestConnections::stop_maxscale(int m) { - int res = maxscales->ssh_node(m, "service maxscale stop", true); + int res = maxscales->stop_maxscale(m); check_maxscale_processes(m, 0); fflush(stdout); return res; @@ -1344,7 +1374,7 @@ int TestConnections::stop_maxscale(int m) int TestConnections::start_maxscale(int m) { - int res = maxscales->ssh_node(m, "service maxscale start", true); + int res = maxscales->start_maxscale(m); check_maxscale_processes(m, 1); fflush(stdout); return res; @@ -1370,7 +1400,6 @@ int TestConnections::check_maxscale_alive(int m) maxscales->close_maxscale_connections(m); add_result(global_result - gr, "Maxscale is not alive\n"); stop_timeout(); - check_maxscale_processes(m, 1); return global_result - gr; diff --git a/maxscale-system-test/testconnections.h b/maxscale-system-test/testconnections.h index 312607c1f..46dbc4a65 100644 --- a/maxscale-system-test/testconnections.h +++ b/maxscale-system-test/testconnections.h @@ -477,8 +477,6 @@ public: */ int list_dirs(int m = 0); - - /** * @brief make_snapshot Makes a snapshot for all running VMs * @param snapshot_name name of created snapshot @@ -514,6 +512,11 @@ public: int start_maxscale(int m = 0); void process_template(const char *src, const char *dest = "/etc/maxscale.cnf"); + /** + * @brief use_valrind if true Maxscale will be executed under Valgrind + */ + bool use_valgrind; + private: void report_result(const char *format, va_list argp); }; From b0efcea3f6149388c37f96e48f1b982a0629f710 Mon Sep 17 00:00:00 2001 From: Timofey Turenko Date: Mon, 25 Feb 2019 14:22:32 +0200 Subject: [PATCH 02/15] Remove vagrant lock from build.sh and run_test.sh (#190) vagrant_lock was created in order to prevent parallel execution of two 'Vagrant up' operations in parallel. Previously it was necessary due to Vagrant internal Chef issue. Now several bugs in Vagrant are fixed and MDBCI uses own Chef to provision nodes for builds and tests. There is no need to wait for previous Vagrant run finished, removing all waiting from all scripts. 'rm vagrant_lock is still present to remove locks created by previous versions of build and test scripts- --- BUILD/mdbci/build.sh | 11 ----------- BUILD/mdbci/upgrade_test.sh | 11 ++--------- .../Documentation/LOCAL_DEPLOYMENT.md | 7 ------- maxscale-system-test/JENKINS.md | 4 ---- maxscale-system-test/mdbci/create_config.sh | 13 ++----------- maxscale-system-test/mdbci/run_test.sh | 4 ++-- 6 files changed, 6 insertions(+), 44 deletions(-) diff --git a/BUILD/mdbci/build.sh b/BUILD/mdbci/build.sh index cbeb7d992..02cdcc629 100755 --- a/BUILD/mdbci/build.sh +++ b/BUILD/mdbci/build.sh @@ -63,13 +63,6 @@ if [ "$already_running" != "ok" ]; then $(<${script_dir}/templates/build.json.template) " 2> /dev/null > $MDBCI_VM_PATH/${name}.json - while [ -f ~/vagrant_lock ] - do - sleep 5 - done - touch ~/vagrant_lock - echo $JOB_NAME-$BUILD_NUMBER >> ~/vagrant_lock - # starting VM for build echo "Generating build VM template" ${mdbci_dir}/mdbci --override --template $MDBCI_VM_PATH/$name.json generate $name @@ -77,7 +70,6 @@ $(<${script_dir}/templates/build.json.template) ${mdbci_dir}/mdbci up --attempts=1 $name if [ $? != 0 ] ; then echo "Error starting VM" - rm ~/vagrant_lock exit 1 fi echo "copying public keys to VM" @@ -92,9 +84,6 @@ export sshkey=`${mdbci_dir}/mdbci show keyfile $name/build --silent 2> /dev/null export scpopt="-i $sshkey -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ConnectTimeout=120 " export sshopt="$scpopt $sshuser@$IP" -echo "Release Vagrant lock" -rm ~/vagrant_lock - echo "Starting build" ${script_dir}/remote_build.sh export build_result=$? diff --git a/BUILD/mdbci/upgrade_test.sh b/BUILD/mdbci/upgrade_test.sh index fbac44d70..24695b5b0 100755 --- a/BUILD/mdbci/upgrade_test.sh +++ b/BUILD/mdbci/upgrade_test.sh @@ -21,13 +21,6 @@ eval "cat < /dev/null > $MDBCI_VM_PATH/${name}.json -while [ -f ~/vagrant_lock ] -do - sleep 5 -done -touch ~/vagrant_lock -echo $JOB_NAME-$BUILD_NUMBER >> ~/vagrant_lock - # destroying existing box if [ -d "install_$box" ]; then ${mdbci_dir}/mdbci destroy $name @@ -42,12 +35,12 @@ if [ $? != 0 ] ; then if [ "x$do_not_destroy_vm" != "xyes" ] ; then ${mdbci_dir}/mdbci destroy $name fi - rm ~/vagrant_lock + rm -f ~/vagrant_lock exit 1 fi fi -rm ~/vagrant_lock +rm -f ~/vagrant_lock # get VM info export sshuser=`${mdbci_dir}/mdbci ssh --command 'whoami' --silent $name/maxscale 2> /dev/null | tr -d '\r'` diff --git a/maxscale-system-test/Documentation/LOCAL_DEPLOYMENT.md b/maxscale-system-test/Documentation/LOCAL_DEPLOYMENT.md index 39c1ba337..6beb5c3d8 100644 --- a/maxscale-system-test/Documentation/LOCAL_DEPLOYMENT.md +++ b/maxscale-system-test/Documentation/LOCAL_DEPLOYMENT.md @@ -141,13 +141,6 @@ https://help.ubuntu.com/lts/serverguide/libvirt.html https://github.com/vagrant-libvirt/vagrant-libvirt#installation -### vagrant is locked, waiting ... - -```bash -rm ~/vagrant_lock -``` - - ### Random VM creation failures Plese check the amount of free memory and amount of running VMs diff --git a/maxscale-system-test/JENKINS.md b/maxscale-system-test/JENKINS.md index d43344bff..44d4956dd 100644 --- a/maxscale-system-test/JENKINS.md +++ b/maxscale-system-test/JENKINS.md @@ -121,7 +121,3 @@ If test run was executed with parameter 'do_not_destroy' set yo 'yes' please do [destroy](http://max-tst-01.mariadb.com:8089/view/axilary/job/destroy/) against your 'target' This job also have to be executed if test run job crashed or it was interrupted. - -In case of build or test job crash, interruption, Jenkins crash during Vagrant operation it is possible that Vagrant lock -stays in locked state and no other job can progress (job can be started, but it is waiting for Vagrant lock - -'/home/vagrant/vagrant_lock' can be seen in the job log). In this case lock can be removed by [remove_lock](http://max-tst-01.mariadb.com:8089/view/axilary/job/remove_lock/) job. diff --git a/maxscale-system-test/mdbci/create_config.sh b/maxscale-system-test/mdbci/create_config.sh index 1e2795486..697ff4cac 100755 --- a/maxscale-system-test/mdbci/create_config.sh +++ b/maxscale-system-test/mdbci/create_config.sh @@ -32,26 +32,17 @@ ${mdbci_dir}/mdbci --override --template ${MDBCI_VM_PATH}/${name}.json generate mkdir ${MDBCI_VM_PATH}/$name/cnf cp -r ${script_dir}/cnf/* ${MDBCI_VM_PATH}/$name/cnf/ - -while [ -f ~/vagrant_lock ] -do - echo "vagrant is locked, waiting ..." - sleep 5 -done -touch ~/vagrant_lock -echo ${JOB_NAME}-${BUILD_NUMBER} >> ~/vagrant_lock - echo "running vagrant up $provider" ${mdbci_dir}/mdbci up $name --attempts 3 if [ $? != 0 ]; then echo "Error creating configuration" - rm ~/vagrant_lock + rm -f ~/vagrant_lock exit 1 fi #cp ~/build-scripts/team_keys . ${mdbci_dir}/mdbci public_keys --key ${team_keys} $name -rm ~/vagrant_lock +rm -f ~/vagrant_lock exit 0 diff --git a/maxscale-system-test/mdbci/run_test.sh b/maxscale-system-test/mdbci/run_test.sh index 95ce814f5..16574e342 100755 --- a/maxscale-system-test/mdbci/run_test.sh +++ b/maxscale-system-test/mdbci/run_test.sh @@ -91,7 +91,7 @@ set -x if [ "${do_not_destroy_vm}" != "yes" ] ; then ${mdbci_dir}/mdbci destroy $name fi - rm ~/vagrant_lock + rm -f ~/vagrant_lock exit 1 fi ${mdbci_dir}/mdbci snapshot take --path-to-nodes $name --snapshot-name clean @@ -105,7 +105,7 @@ else if [ "${do_not_destroy_vm}" != "yes" ] ; then ${mdbci_dir}/mdbci destroy $name fi - rm ~/vagrant_lock + rm -f ~/vagrant_lock exit 1 fi From 9de26b6df0878e4db5830d6be5481c62c7c54788 Mon Sep 17 00:00:00 2001 From: Timofey Turenko Date: Wed, 27 Feb 2019 22:22:26 +0200 Subject: [PATCH 03/15] add -DCMAKE_BUILD_TYPE=Debug to run_test_snapshot.sh --- maxscale-system-test/mdbci/run_test_snapshot.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/maxscale-system-test/mdbci/run_test_snapshot.sh b/maxscale-system-test/mdbci/run_test_snapshot.sh index 577163cf0..485cbc871 100755 --- a/maxscale-system-test/mdbci/run_test_snapshot.sh +++ b/maxscale-system-test/mdbci/run_test_snapshot.sh @@ -80,7 +80,7 @@ cd ${script_dir}/.. rm -rf build mkdir build && cd build -cmake .. -DBUILDNAME=$JOB_NAME-$BUILD_NUMBER-$target +cmake .. -DBUILDNAME=$JOB_NAME-$BUILD_NUMBER-$target -DBUILD_SYSTEM_TESTS=Y -DCMAKE_BUILD_TYPE=Debug make ./check_backend --restart-galera From cc3dbeeb6c5cd3cb365ea1af58617d08c1a84201 Mon Sep 17 00:00:00 2001 From: Timofey Turenko Date: Wed, 27 Feb 2019 22:34:41 +0200 Subject: [PATCH 04/15] add start and stop VM functions --- maxscale-system-test/keepalived.cpp | 8 ++++---- maxscale-system-test/nodes.cpp | 10 ++++++++++ maxscale-system-test/nodes.h | 14 ++++++++++++++ 3 files changed, 28 insertions(+), 4 deletions(-) diff --git a/maxscale-system-test/keepalived.cpp b/maxscale-system-test/keepalived.cpp index b5935e35e..16fbb50be 100644 --- a/maxscale-system-test/keepalived.cpp +++ b/maxscale-system-test/keepalived.cpp @@ -43,7 +43,7 @@ int main(int argc, char *argv[]) print_version_string(Test); Test->tprintf("Suspend Maxscale 000 machine and waiting\n"); - system(Test->maxscales->stop_vm_command[0]); + Test->add_result(Test->maxscales->start_vm(0), "Failed to stop VM maxscale_000\n"); sleep(FAILOVER_WAIT_TIME); version = print_version_string(Test); @@ -54,12 +54,12 @@ int main(int argc, char *argv[]) Test->tprintf("Resume Maxscale 000 machine and waiting\n"); - system(Test->maxscales->start_vm_command[0]); + Test->add_result(Test->maxscales->start_vm(0), "Failed to start VM maxscale_000\n"); sleep(FAILOVER_WAIT_TIME); print_version_string(Test); Test->tprintf("Suspend Maxscale 001 machine and waiting\n"); - system(Test->maxscales->stop_vm_command[1]); + Test->add_result(Test->maxscales->start_vm(1), "Failed to stop VM maxscale_001\n"); sleep(FAILOVER_WAIT_TIME); version = print_version_string(Test); @@ -70,7 +70,7 @@ int main(int argc, char *argv[]) print_version_string(Test); Test->tprintf("Resume Maxscale 001 machine and waiting\n"); - system(Test->maxscales->start_vm_command[1]); + Test->add_result(Test->maxscales->start_vm(1), "Failed to start VM maxscale_001\n"); sleep(FAILOVER_WAIT_TIME); print_version_string(Test); diff --git a/maxscale-system-test/nodes.cpp b/maxscale-system-test/nodes.cpp index 6b14162ba..8cfea8bef 100644 --- a/maxscale-system-test/nodes.cpp +++ b/maxscale-system-test/nodes.cpp @@ -472,3 +472,13 @@ const char* Nodes::ip(int i) const { return use_ipv6 ? IP6[i] : IP[i]; } + +int Nodes::start_vm(int node) +{ + return(system(start_vm_command[node])); +} + +int Nodes::stop_vm(int node) +{ + return(system(stop_vm_command[node])); +} diff --git a/maxscale-system-test/nodes.h b/maxscale-system-test/nodes.h index c19ac5575..4ab25e223 100644 --- a/maxscale-system-test/nodes.h +++ b/maxscale-system-test/nodes.h @@ -158,6 +158,20 @@ public: */ int read_basic_env(); + /** + * @brief start_vm Start virtual machine + * @param node Node number + * @return 0 in case of success + */ + int start_vm(int node); + + /** + * @brief stop_vm Stop virtual machine + * @param node Node number + * @return 0 in case of success + */ + int stop_vm(int node); + private: int check_node_ssh(int node); From 21fc01563515de5ef57fe0e4feb2a2a27dd02429 Mon Sep 17 00:00:00 2001 From: Timofey Turenko Date: Wed, 27 Feb 2019 23:15:36 +0200 Subject: [PATCH 05/15] Fix compiler warnings in system tests --- maxscale-system-test/fwf_syntax.cpp | 8 ++++---- maxscale-system-test/rds_vpc.cpp | 5 +++-- maxscale-system-test/script.cpp | 2 +- maxscale-system-test/testconnections.cpp | 4 ++-- 4 files changed, 10 insertions(+), 9 deletions(-) diff --git a/maxscale-system-test/fwf_syntax.cpp b/maxscale-system-test/fwf_syntax.cpp index 04df1a428..b50c42d9f 100644 --- a/maxscale-system-test/fwf_syntax.cpp +++ b/maxscale-system-test/fwf_syntax.cpp @@ -41,9 +41,9 @@ const char *rules_failure[] = NULL }; -void truncate_maxscale_logs(TestConnections& test) +int truncate_maxscale_logs(TestConnections& test) { - test.maxscales->ssh_node(0, "truncate -s 0 /var/log/maxscale/*", true); + return test.maxscales->ssh_node(0, "truncate -s 0 /var/log/maxscale/max*", true); } void create_rule(const char *rule, const char* user) @@ -63,7 +63,7 @@ int main(int argc, char** argv) for (int i = 0; rules_failure[i]; i++) { /** Create rule file with syntax error */ - truncate(temp_rules, 0); + test.add_result(truncate(temp_rules, 0), "Failed to truncate"); create_rule(rules_failure[i], users_ok[0]); copy_rules(&test, (char*)temp_rules, (char*)test_dir); @@ -74,7 +74,7 @@ int main(int argc, char** argv) * a message about the syntax error. */ test.check_maxscale_processes(0, 0); test.check_log_err(0, "syntax error", true); - truncate_maxscale_logs(test); + test.add_result(truncate_maxscale_logs(test), "Failed to truncate Maxscale logs"); } return test.global_result; diff --git a/maxscale-system-test/rds_vpc.cpp b/maxscale-system-test/rds_vpc.cpp index a7c6a188e..1a8923595 100644 --- a/maxscale-system-test/rds_vpc.cpp +++ b/maxscale-system-test/rds_vpc.cpp @@ -168,6 +168,7 @@ int RDS::destroy_route_tables() json_t *root; char cmd[1024]; char * json; + int res = 0; sprintf(cmd, "aws ec2 describe-vpcs --vpc-ids=%s", vpc_id_intern); if (execute_cmd(cmd, &json)) @@ -196,11 +197,11 @@ int RDS::destroy_route_tables() if (strcmp(vpc_id_intern, vpc_id) == 0) { sprintf(cmd, "aws ec2 delete-route-table --route-table-id %s", rt_id); - system(cmd); + res += system(cmd); } } - return 0; + return res; } int RDS::detach_and_destroy_gw() diff --git a/maxscale-system-test/script.cpp b/maxscale-system-test/script.cpp index 50e3e07f4..2d1a1e7d5 100644 --- a/maxscale-system-test/script.cpp +++ b/maxscale-system-test/script.cpp @@ -146,7 +146,7 @@ int main(int argc, char *argv[]) "scp -i %s -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o LogLevel=quiet script_output_expected* %s@%s:%s/", Test->maxscales->sshkey[0], Test->maxscales->access_user[0], Test->maxscales->IP[0], Test->maxscales->access_homedir[0]); - system(str); + Test->add_result(system(str), "Error copying script to VM"); sprintf(str, "%s/script_output_expected", Test->maxscales->access_homedir[0]); test_script_monitor(Test, Test->repl, str); diff --git a/maxscale-system-test/testconnections.cpp b/maxscale-system-test/testconnections.cpp index e837d108e..7a6864540 100644 --- a/maxscale-system-test/testconnections.cpp +++ b/maxscale-system-test/testconnections.cpp @@ -1964,14 +1964,14 @@ void TestConnections::check_current_connections(int m, int value) int TestConnections::take_snapshot(char * snapshot_name) { - char str[4096]; + char str[strlen(take_snapshot_command) + strlen(snapshot_name) + 2]; sprintf(str, "%s %s", take_snapshot_command, snapshot_name); return system(str); } int TestConnections::revert_snapshot(char * snapshot_name) { - char str[4096]; + char str[strlen(revert_snapshot_command) + strlen(snapshot_name) + 2]; sprintf(str, "%s %s", revert_snapshot_command, snapshot_name); return system(str); } From 0f4e485a1cd0a9a7863e63e3bdba1f89a978b585 Mon Sep 17 00:00:00 2001 From: Timofey Turenko Date: Wed, 27 Feb 2019 23:42:15 +0200 Subject: [PATCH 06/15] fix compiler warnings in rds_vpc test --- maxscale-system-test/rds_vpc.cpp | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/maxscale-system-test/rds_vpc.cpp b/maxscale-system-test/rds_vpc.cpp index 1a8923595..9a06bbb71 100644 --- a/maxscale-system-test/rds_vpc.cpp +++ b/maxscale-system-test/rds_vpc.cpp @@ -463,6 +463,7 @@ int RDS::create_cluster() char * result; json_error_t error; size_t i; + int res = 0; sprintf(cmd, "aws rds create-db-cluster --database-name=test --engine=aurora --master-username=skysql --master-user-password=skysqlrds --db-cluster-identifier=%s --db-subnet-group-name=%s", @@ -487,7 +488,7 @@ int RDS::create_cluster() printf("Security group %s\n", sg_id); sprintf(cmd, "aws ec2 authorize-security-group-ingress --group-id %s --protocol tcp --port 3306 --cidr 0.0.0.0/0", sg_id); - system(cmd); + res += system(cmd); } sg_intern = sg_id; @@ -497,9 +498,9 @@ int RDS::create_cluster() "aws rds create-db-instance --db-cluster-identifier=%s --engine=aurora --db-instance-class=db.t2.medium --publicly-accessible --db-instance-identifier=node%03lu", cluster_name_intern, i); printf("%s\n", cmd); - system(cmd); + res += system(cmd); } - return 0; + return res; } int RDS::get_writer(const char ** writer_name) From 9ec4557075def0044383fc007ee5788269cea7fb Mon Sep 17 00:00:00 2001 From: Timofey Turenko Date: Fri, 1 Mar 2019 15:13:54 +0200 Subject: [PATCH 07/15] create symlink rhel -> centos and do not use real rhel build --- BUILD/mdbci/copy_repos.sh | 37 +++++++++++++++++++++++++------------ 1 file changed, 25 insertions(+), 12 deletions(-) diff --git a/BUILD/mdbci/copy_repos.sh b/BUILD/mdbci/copy_repos.sh index 26fb51ab6..851050b7e 100755 --- a/BUILD/mdbci/copy_repos.sh +++ b/BUILD/mdbci/copy_repos.sh @@ -4,25 +4,38 @@ dir=`pwd` if [ "$box_type" == "RPM" ] ; then - export arch=`ssh $sshopt "arch"` - . ${script_dir}/generate_build_info_path.sh + # For RHEL packages are not going to the repo + # Build can be executed to check if it is possible to build + # and to run install and upgrade tests + # with thre real RHEL, but we use CentOS packages for production + if [ "$platform" != "rhel" ] ; then + export arch=`ssh $sshopt "arch"` + . ${script_dir}/generate_build_info_path.sh - rm -rf $path_prefix/$platform/$platform_version/$arch/ - mkdir -p $path_prefix/$platform/$platform_version/$arch/ - rsync -avz --progress ${unsorted_repo_dir}/$repo_name/$box/* $path_prefix/$platform/$platform_version/$arch/ - env > $build_info_path - find $path_prefix/.. -type d -exec chmod 755 {} \; - find $path_prefix/.. -type f -exec chmod 644 {} \; - cd $path_prefix/$platform - ln -s $platform_version "$platform_version"server - ln -s $platform_version "$platform_version"Server + rm -rf $path_prefix/$platform/$platform_version/$arch/ + mkdir -p $path_prefix/$platform/$platform_version/$arch/ + rsync -avz --progress ${unsorted_repo_dir}/$repo_name/$box/* $path_prefix/$platform/$platform_version/$arch/ + env > $build_info_path + find $path_prefix/.. -type d -exec chmod 755 {} \; + find $path_prefix/.. -type f -exec chmod 644 {} \; + cd $path_prefix/$platform + ln -s $platform_version "$platform_version"server + ln -s $platform_version "$platform_version"Server + if [ "$platform" == "centos" ] ; then + cd .. + ln -s centos rhel + fi eval "cat < /dev/null > ${path_prefix}/${platform}_${platform_version}.json - echo "copying done" + echo "copying done" + else + echo "RHEL! Not copying packages to the repo" + fi + else export arch=`ssh $sshopt "dpkg --print-architecture"` . ${script_dir}/generate_build_info_path.sh From 7fb2ae571ba7e2d278f2af6d3a55dacec00d2b6c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Sat, 2 Mar 2019 04:20:09 +0200 Subject: [PATCH 08/15] Improve blocked host error message The error now explains how the problem can be mitigated by increasing max_connect_errors on the backend server. --- .../protocol/MySQL/mariadbbackend/mysql_backend.cc | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/server/modules/protocol/MySQL/mariadbbackend/mysql_backend.cc b/server/modules/protocol/MySQL/mariadbbackend/mysql_backend.cc index 497edba56..30fc9eeab 100644 --- a/server/modules/protocol/MySQL/mariadbbackend/mysql_backend.cc +++ b/server/modules/protocol/MySQL/mariadbbackend/mysql_backend.cc @@ -333,11 +333,10 @@ static void handle_error_response(DCB* dcb, GWBUF* buffer) * This will prevent repeated authentication failures. */ if (errcode == ER_HOST_IS_BLOCKED) { - MXS_ERROR("Server %s has been put into maintenance mode due " - "to the server blocking connections from MaxScale. " - "Run 'mysqladmin -h %s -P %d flush-hosts' on this " - "server before taking this server out of maintenance " - "mode.", + MXS_ERROR("Server %s has been put into maintenance mode due to the server blocking connections " + "from MaxScale. Run 'mysqladmin -h %s -P %d flush-hosts' on this server before taking " + "this server out of maintenance mode. To avoid this problem in the future, set " + "'max_connect_errors' to a larger value in the backend server.", dcb->server->name, dcb->server->address, dcb->server->port); From 14557c7455508a498acd7c08e536947604f49bfe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Sat, 2 Mar 2019 17:27:47 +0200 Subject: [PATCH 09/15] MXS-2357: Explain runtime changes in `alter service` The help output now states that a subset of the routers support runtime configuration changes to all parameters. --- maxctrl/lib/alter.js | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/maxctrl/lib/alter.js b/maxctrl/lib/alter.js index 32a952774..2c6294e26 100644 --- a/maxctrl/lib/alter.js +++ b/maxctrl/lib/alter.js @@ -93,7 +93,10 @@ exports.builder = function(yargs) { }) .command('service ', 'Alter service parameters', function(yargs) { return yargs.epilog('To display the service parameters, execute `show service `. ' + - 'The following list of parameters can be altered at runtime:\n\n' + JSON.stringify(service_params, null, 4)) + 'Some routers support runtime configuration changes to all parameters. ' + + 'Currently all readconnroute, readwritesplit and schemarouter parameters ' + + 'can be changed at runtime. In addition to module specific parameters, ' + + 'the following list of common service parameters can be altered at runtime:\n\n' + JSON.stringify(service_params, null, 4)) .usage('Usage: alter service ') }, function(argv) { maxctrl(argv, function(host) { From 7904cdaefb6ab4d79f03fe64a92b8a14eb4c4d11 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Sat, 2 Mar 2019 23:52:44 +0200 Subject: [PATCH 10/15] Fix assume_unique_hostnames It was always set to true when the servers were created. --- server/modules/monitor/mariadbmon/mariadbmon.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/server/modules/monitor/mariadbmon/mariadbmon.cc b/server/modules/monitor/mariadbmon/mariadbmon.cc index ee190bedf..31b16c2cd 100644 --- a/server/modules/monitor/mariadbmon/mariadbmon.cc +++ b/server/modules/monitor/mariadbmon/mariadbmon.cc @@ -185,10 +185,6 @@ MariaDBMonitor* MariaDBMonitor::create(MXS_MONITOR* monitor) */ bool MariaDBMonitor::configure(const MXS_CONFIG_PARAMETER* params) { - /* Reset all monitored state info. The server dependent values must be reset as servers could have been - * added, removed and modified. */ - reset_server_info(); - m_detect_stale_master = config_get_bool(params, "detect_stale_master"); m_detect_stale_slave = config_get_bool(params, "detect_stale_slave"); m_ignore_external_masters = config_get_bool(params, "ignore_external_masters"); @@ -208,6 +204,10 @@ bool MariaDBMonitor::configure(const MXS_CONFIG_PARAMETER* params) m_maintenance_on_low_disk_space = config_get_bool(params, CN_MAINTENANCE_ON_LOW_DISK_SPACE); m_handle_event_scheduler = config_get_bool(params, CN_HANDLE_EVENTS); + /* Reset all monitored state info. The server dependent values must be reset as servers could have been + * added, removed and modified. */ + reset_server_info(); + m_excluded_servers.clear(); MXS_MONITORED_SERVER** excluded_array = NULL; int n_excluded = mon_config_get_servers(params, CN_NO_PROMOTE_SERVERS, m_monitor, &excluded_array); From 6436d959e3cc74c30051f61a6c2817f80e3f45ea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Sun, 3 Mar 2019 22:04:49 +0200 Subject: [PATCH 11/15] Fix avrorouter file rotation The avro filenames weren't processed properly which caused them to not work correctly. --- server/modules/routing/avrorouter/avro_client.cc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/server/modules/routing/avrorouter/avro_client.cc b/server/modules/routing/avrorouter/avro_client.cc index 1329e3634..e80d171d0 100644 --- a/server/modules/routing/avrorouter/avro_client.cc +++ b/server/modules/routing/avrorouter/avro_client.cc @@ -668,11 +668,12 @@ static std::string get_next_filename(std::string file, std::string dir) { // Find the last and second to last dot auto last = file.find_last_of('.'); - auto almost_last = file.find_last_of('.', last); + auto part = file.substr(0, last); + auto almost_last = part.find_last_of('.'); mxb_assert(last != std::string::npos && almost_last != std::string::npos); // Extract the number between the dots - std::string number_part = file.substr(almost_last + 1, last); + std::string number_part = part.substr(almost_last + 1, std::string::npos); int filenum = strtol(number_part.c_str(), NULL, 10); std::string file_part = file.substr(0, almost_last); From af33d19703660c9083a7cbf55d20056291a85529 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Mon, 4 Mar 2019 10:13:06 +0200 Subject: [PATCH 12/15] Add short version of --tls-verify-server-cert It is frequently used when using self-signed certificates so making it shorther makes life easier. Also added the missing --tls-passphrase into the TLS options group. --- maxctrl/lib/core.js | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/maxctrl/lib/core.js b/maxctrl/lib/core.js index 7ce2909c4..603d0459c 100644 --- a/maxctrl/lib/core.js +++ b/maxctrl/lib/core.js @@ -65,7 +65,7 @@ program default: false, type: 'boolean' }) - .group(['s', 'tls-key', 'tls-cert', 'tls-ca-cert', 'tls-verify-server-cert'], 'HTTPS/TLS Options:') + .group(['s', 'tls-key', 'tls-passphrase', 'tls-cert', 'tls-ca-cert', 'n'], 'HTTPS/TLS Options:') .option('s', { alias: 'secure', describe: 'Enable HTTPS requests', @@ -88,7 +88,8 @@ program describe: 'Path to TLS CA certificate', type: 'string' }) - .option('tls-verify-server-cert', { + .option('n', { + alias: 'tls-verify-server-cert', describe: 'Whether to verify server TLS certificates', default: true, type: 'boolean' From 966787e11966b9dfe38040594f7dedffd5d56f38 Mon Sep 17 00:00:00 2001 From: Timofey Turenko Date: Mon, 4 Mar 2019 11:29:38 +0200 Subject: [PATCH 13/15] do not create or copy repos if build failed --- BUILD/mdbci/build.sh | 12 +++++++++--- BUILD/mdbci/copy_repos.sh | 8 ++++++++ 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/BUILD/mdbci/build.sh b/BUILD/mdbci/build.sh index 02cdcc629..0b45af572 100755 --- a/BUILD/mdbci/build.sh +++ b/BUILD/mdbci/build.sh @@ -91,11 +91,17 @@ export build_result=$? shellcheck `find . | grep "\.sh"` | grep -i "POSIX sh" if [ $? -eq 0 ] ; then echo "POSIX sh error are found in the scripts" -# exit 1 fi -${script_dir}/create_remote_repo.sh -${script_dir}/copy_repos.sh +if [ ${build_result} -eq 0 ]; then + ${script_dir}/create_remote_repo.sh + export build_result=$? +fi + +if [ ${build_result} -eq 0 ]; then + ${script_dir}/copy_repos.sh + export build_result=$? +fi echo "Removing locks and destroying VM" diff --git a/BUILD/mdbci/copy_repos.sh b/BUILD/mdbci/copy_repos.sh index 851050b7e..60063fe1c 100755 --- a/BUILD/mdbci/copy_repos.sh +++ b/BUILD/mdbci/copy_repos.sh @@ -15,6 +15,10 @@ if [ "$box_type" == "RPM" ] ; then rm -rf $path_prefix/$platform/$platform_version/$arch/ mkdir -p $path_prefix/$platform/$platform_version/$arch/ rsync -avz --progress ${unsorted_repo_dir}/$repo_name/$box/* $path_prefix/$platform/$platform_version/$arch/ + if [ $? !=0 ] ; then + echo "Error copying repos" + exit 1 + fi env > $build_info_path find $path_prefix/.. -type d -exec chmod 755 {} \; find $path_prefix/.. -type f -exec chmod 644 {} \; @@ -43,6 +47,10 @@ else rm -rf $path_prefix/$platform_family/dists/$platform_version/main/binary-i386 mkdir -p $path_prefix/$platform_family/ rsync -avz --progress ${unsorted_repo_dir}/$repo_name/$box/* $path_prefix/$platform_family/ + if [ $? !=0 ] ; then + echo "Error copying repos" + exit 1 + fi env > $build_info_path find $path_prefix/.. -type d -exec chmod 755 {} \; find $path_prefix/.. -type f -exec chmod 644 {} \; From 42b3f970c59ad77454568f68b9af950f8b6e9c30 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Mon, 4 Mar 2019 16:40:17 +0200 Subject: [PATCH 14/15] MXS-2260: Add REST API tutorial The tutorial shows how to configure the MaxScale REST API and communicate with it using the `curl` command line client. --- Documentation/Documentation-Contents.md | 1 + Documentation/Tutorials/REST-API-Tutorial.md | 355 +++++++++++++++++++ 2 files changed, 356 insertions(+) create mode 100644 Documentation/Tutorials/REST-API-Tutorial.md diff --git a/Documentation/Documentation-Contents.md b/Documentation/Documentation-Contents.md index ce733ee30..d14b34f5b 100644 --- a/Documentation/Documentation-Contents.md +++ b/Documentation/Documentation-Contents.md @@ -53,6 +53,7 @@ Here are tutorials on monitoring and managing MariaDB MaxScale in cluster enviro - [MariaDB MaxScale HA with Lsyncd](Tutorials/MaxScale-HA-with-lsyncd.md) - [Nagios Plugins for MariaDB MaxScale Tutorial](Tutorials/Nagios-Plugins.md) + - [REST API Tutorial](Tutorials/REST-API-Tutorial.md) ## Routers diff --git a/Documentation/Tutorials/REST-API-Tutorial.md b/Documentation/Tutorials/REST-API-Tutorial.md new file mode 100644 index 000000000..397b42f77 --- /dev/null +++ b/Documentation/Tutorials/REST-API-Tutorial.md @@ -0,0 +1,355 @@ +# REST API Tutorial + +This tutorial is a quick overview of what the MaxScale REST API offers, how it +can be used to inspect the state of MaxScale and how to use it to modify the +runtime configuration of MaxScale. The tutorial uses the `curl` command line +client to demonstrate how the API is used. + +## Configuration and Hardening + +The MaxScale REST API listens on port 8989 on the local host. The `admin_port` +and `admin_host` parameters control which port and address the REST API listens +on. Note that for security reasons the API only listens for local connections +with the default configuration. It is critical that the default credentials are +changed and TLS/SSL encryption is configured before exposing the REST API to a +network. + +The default user for the REST API is `admin` and the password is `mariadb`. The +easiest way to secure the REST API is to use the `maxctrl` command line client +to create a new admin user and delete the default one. To do this, run the +following commands: + +``` +maxctrl create user my_user my_password --type=admin +maxctrl destroy user admin +``` + +This will create the user `my_user` with the password `my_password` that is an +administrative account. After this account is created, the default `admin` +account is removed with the next command. + +The next step is to enable TLS encryption. To do this, you need a CA +certificate, a private key and a public certificate file all in PEM format. Add +the following three parameters under the `[maxscale]` section of the MaxScale +configuration file and restart MaxScale. + +``` +admin_ssl_key=/certs/server-key.pem +admin_ssl_cert=/certs/server-cert.pem +admin_ssl_ca_cert=/certs/ca-cert.pem +``` + +Use `maxctrl` to verify that the TLS encryption is enabled. In this tutorial our +server certificates are self-signed so the `--tls-verify-server-cert=false` +option is required. + +``` +maxctrl --user=my_user --password=my_password --secure --tls-ca-cert=/certs/ca-cert.pem --tls-verify-server-cert=false show maxscale +``` + +If no errors are raised, this means that the communication via the REST API is +now secure and can be used across networks. + +## Requesting Data + +**Note:** For the sake of brevity, the rest of this tutorial will omit the +TLS/SSL options for the `curl` command line. For more information, refer to the +`curl` manpage. + +The most basic task to do with the REST API is to see whether MaxScale is up and +running. To do this, we do a HTTP request on the root resource (the `-i` option +shows the HTTP headers). + +`curl -i 127.0.0.1:8989/v1/` +``` +HTTP/1.1 200 OK +Connection: Keep-Alive +Content-Length: 0 +Last-Modified: Mon, 04 Mar 2019 08:23:09 GMT +ETag: "0" +Date: Mon, 04 Mar 19 08:29:41 GMT +``` + +To query a resource collection endpoint, append it to the URL. The `/v1/filters/` +endpoint shows the list of filters configured in MaxScale. This is a _resource +collection_ endpoint: it contains the list of all resources of a particular +type. + +`curl 127.0.0.1:8989/v1/filters` +``` +{ + "links": { + "self": "http://127.0.0.1:8989/v1/filters/" + }, + "data": [ + { + "id": "Hint", + "type": "filters", + "relationships": { + "services": { + "links": { + "self": "http://127.0.0.1:8989/v1/services/" + }, + "data": [ + { + "id": "RW-Split-Hint-Router", + "type": "services" + } + ] + } + }, + "attributes": { + "module": "hintfilter", + "parameters": {} + }, + "links": { + "self": "http://127.0.0.1:8989/v1/filters/Hint" + } + }, + { + "id": "Logger", + "type": "filters", + "relationships": { + "services": { + "links": { + "self": "http://127.0.0.1:8989/v1/services/" + }, + "data": [] + } + }, + "attributes": { + "module": "qlafilter", + "parameters": { + "match": null, + "exclude": null, + "user": null, + "source": null, + "filebase": "/tmp/log", + "options": "ignorecase", + "log_type": "session", + "log_data": "date,user,query", + "newline_replacement": "\" \"", + "separator": ",", + "flush": false, + "append": false + }, + "filter_diagnostics": { + "separator": ",", + "newline_replacement": "\" \"" + } + }, + "links": { + "self": "http://127.0.0.1:8989/v1/filters/Logger" + } + } + ] +} +``` + +The `data` holds the actual list of resources: the `Hint` and `Logger` +filters. Each object has the `id` field which is the unique name of that +object. It is the same as the section name in `maxscale.cnf`. + +Each resource in the list has a `relationships` object. This shows the +relationship links between resources. In our example, the `Hint` filter is used +by a service named `RW-Split-Hint-Router` and the `Logger` is not currently in +use. + +To request an individual resource, we add the object name to the resource +collection URL. For example, if we want to get only the `Logger` filter we +execute the following command. + +`curl 127.0.0.1:8989/v1/filters/Logger` +``` +{ + "links": { + "self": "http://127.0.0.1:8989/v1/filters/Logger" + }, + "data": { + "id": "Logger", + "type": "filters", + "relationships": { + "services": { + "links": { + "self": "http://127.0.0.1:8989/v1/services/" + }, + "data": [] + } + }, + "attributes": { + "module": "qlafilter", + "parameters": { + "match": null, + "exclude": null, + "user": null, + "source": null, + "filebase": "/tmp/log", + "options": "ignorecase", + "log_type": "session", + "log_data": "date,user,query", + "newline_replacement": "\" \"", + "separator": ",", + "flush": false, + "append": false + }, + "filter_diagnostics": { + "separator": ",", + "newline_replacement": "\" \"" + } + }, + "links": { + "self": "http://127.0.0.1:8989/v1/filters/Logger" + } + } +} +``` + +Note that this time the `data` member holds an object instead of an array of +objects. All other parts of the response are similar to what was shown in the +previous example. + +## Creating Objects + +One of the uses of the REST API is to create new objects in MaxScale at +runtime. This allows new servers, services, filters, monitor and listeners to be +created without restarting MaxScale. + +For example, to create a new server in MaxScale the JSON definition of a server +must be sent to the REST API at the `/v1/servers/` endpoint. The request body +defines the server name as well as the parameters for it. + +To create objects with `curl`, first write the JSON definition into a file. + +``` +{ + "data": { + "id": "server1", + "type": "servers", + "attributes": { + "parameters": { + "address": "127.0.0.1", + "port": 3003 + } + } + } +} +``` + +To send the data, use the following command. + +``` +curl -X POST -d @new_server.txt 127.0.0.1:8989/v1/servers +``` + +The `-d` option takes a file name prefixed with a `@` as an argument. Here we +have `@new_server.txt` which is the name of the file where the JSON definition +was stored. The `-X` option defines the HTTP verb to use and to create a new +object we must use the POST verb. + +To verify the data request the newly created object. + +``` +curl 127.0.0.1:8989/v1/servers/server1 +``` + +## Modifying Data + +The easiest way to modify an object is to first request it, store the result in +a file, edit it and then send the updated object back to the REST API. + +Let's say we want to modify the port that the server we created earlier listens +on. First we request the current object and store the result in a file. + +``` +curl 127.0.0.1:8989/v1/servers/server1 > server1.txt +``` + +After that we edit the file and change the port from 3003 to 3306. Next the +modified JSON object is sent to the REST API as a PATCH command. To do this, +execute the following command. + +``` +curl -X PATCH -d @server1.txt 127.0.0.1:8989/v1/servers/server1 +``` + +To verify that the data was updated correctly, request the updated created +object. + +``` +curl 127.0.0.1:8989/v1/servers/server1 +``` + +## Object Relationships + +To continue with our previous example, we add the updated server to a +service. To do this, the `relationships` object of the server must be modified +to include the service we want to add the server to. + +To define a relationship between a server and a service, the `data` member must +have the `relationships` field and it must contain an object with the `services` +field (some fields omitted for brevity). + +``` +{ + "data": { + "id": "server1", + "type": "servers", + "relationships": { + "services": { + "data": [ + { + "id": "RW-Split-Router", + "type": "services" + } + ] + } + }, + "attributes": ... + } +} +``` + +The `data.relationships.services.data` field contains a list of objects that +define the `id` and `type` fields. The id is the name of the object (a service +or a monitor for servers) and the type tells which type it is. Only `services` +type objects should be present in the `services` object. + +In our example we are linking the `server1` server to the `RW-Split-Router` +service. As was seen with the previous example, the easiest way to do this is to +store the result, edit it and then send it back with a HTTP PATCH. + +If we want to remove a server from _all_ services, we can set the +`relationships` field to `{}`. The REST API interprets this as an instruction +to remove the server from all services and monitors. This is useful if you want +to delete the server which can only be done if it has no relationships to other +objects. + +## Deleting Objects + +To delete an object, simply execute a HTTP DELETE request on the resource you +want to delete. For example, to delete the `server1` server, execute the +following command. + +``` +curl -X DELETE 127.0.0.1:8989/v1/servers/server1 +``` + +## Further Reading + +The full list of all available endpoints in MaxScale can be found in the +[REST API documentation](../REST-API/API.md). + +The `maxctrl` command line client is self-documenting and the `maxctrl help` +command is a good tool for exploring the various commands that are available in +it. The `maxctrl api get` command can be useful way to explore the REST API as +it provides a way to easily extract values out of the JSON data generated by the +REST API. + +There is a multitude of REST API clients readily available and most of them are +far more convenient to use than `curl`. We recommend investigating what you need +and how you intend to either integrate or use the MaxScale REST API. Most modern +languages either have a built-in HTTP library or there exists a de facto +standard library. + +The MaxScale REST API follows the JSON API specification and there exist +libraries that are built specifically for these sorts of APIs From 8a0b6005a4cadcdfcbbeea9e3a2c8cc10bc5c53c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Mon, 18 Feb 2019 13:03:44 +0200 Subject: [PATCH 15/15] MXS-2335: Fix lower_case_table_names The database check always used the case-sensitive SQL to check that the database exists. --- server/modules/authenticator/MySQLAuth/dbusers.c | 11 +++++++---- server/modules/authenticator/MySQLAuth/mysql_auth.h | 2 ++ 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/server/modules/authenticator/MySQLAuth/dbusers.c b/server/modules/authenticator/MySQLAuth/dbusers.c index bda31ba8c..baa4893e8 100644 --- a/server/modules/authenticator/MySQLAuth/dbusers.c +++ b/server/modules/authenticator/MySQLAuth/dbusers.c @@ -244,17 +244,20 @@ static int database_cb(void *data, int columns, char** rows, char** row_names) return 0; } -static bool check_database(sqlite3 *handle, const char *database) +static bool check_database(MYSQL_AUTH* instance, sqlite3 *handle, const char *database) { bool rval = true; if (*database) { rval = false; - size_t len = sizeof(mysqlauth_validate_database_query) + strlen(database) + 1; + const char* query = instance->lower_case_table_names ? + mysqlauth_validate_database_query_lower : + mysqlauth_validate_database_query; + size_t len = strlen(query) + strlen(database) + 1; char sql[len]; - sprintf(sql, mysqlauth_validate_database_query, database); + sprintf(sql, query, database); char *err; @@ -363,7 +366,7 @@ int validate_mysql_user(MYSQL_AUTH* instance, DCB *dcb, MYSQL_session *session, scramble, scramble_len, session->client_sha1)) { /** Password is OK, check that the database exists */ - if (check_database(handle, session->db)) + if (check_database(instance, handle, session->db)) { rval = MXS_AUTH_SUCCEEDED; } diff --git a/server/modules/authenticator/MySQLAuth/mysql_auth.h b/server/modules/authenticator/MySQLAuth/mysql_auth.h index 73c3304e5..7baff4de8 100644 --- a/server/modules/authenticator/MySQLAuth/mysql_auth.h +++ b/server/modules/authenticator/MySQLAuth/mysql_auth.h @@ -81,6 +81,8 @@ static const char mysqlauth_skip_auth_query[] = /** Query that checks that the database exists */ static const char mysqlauth_validate_database_query[] = "SELECT * FROM " MYSQLAUTH_DATABASES_TABLE_NAME " WHERE db = '%s' LIMIT 1"; +static const char mysqlauth_validate_database_query_lower[] = + "SELECT * FROM " MYSQLAUTH_DATABASES_TABLE_NAME " WHERE LOWER(db) = LOWER('%s') LIMIT 1"; /** Delete query used to clean up the database before loading new users */ static const char delete_users_query[] = "DELETE FROM " MYSQLAUTH_USERS_TABLE_NAME;