Merge branch '2.2' into 2.3
This commit is contained in:
@ -63,13 +63,6 @@ if [ "$already_running" != "ok" ]; then
|
|||||||
$(<${script_dir}/templates/build.json.template)
|
$(<${script_dir}/templates/build.json.template)
|
||||||
" 2> /dev/null > $MDBCI_VM_PATH/${name}.json
|
" 2> /dev/null > $MDBCI_VM_PATH/${name}.json
|
||||||
|
|
||||||
while [ -f ~/vagrant_lock ]
|
|
||||||
do
|
|
||||||
sleep 5
|
|
||||||
done
|
|
||||||
touch ~/vagrant_lock
|
|
||||||
echo $JOB_NAME-$BUILD_NUMBER >> ~/vagrant_lock
|
|
||||||
|
|
||||||
# starting VM for build
|
# starting VM for build
|
||||||
echo "Generating build VM template"
|
echo "Generating build VM template"
|
||||||
${mdbci_dir}/mdbci --override --template $MDBCI_VM_PATH/$name.json generate $name
|
${mdbci_dir}/mdbci --override --template $MDBCI_VM_PATH/$name.json generate $name
|
||||||
@ -77,7 +70,6 @@ $(<${script_dir}/templates/build.json.template)
|
|||||||
${mdbci_dir}/mdbci up --attempts=1 $name
|
${mdbci_dir}/mdbci up --attempts=1 $name
|
||||||
if [ $? != 0 ] ; then
|
if [ $? != 0 ] ; then
|
||||||
echo "Error starting VM"
|
echo "Error starting VM"
|
||||||
rm ~/vagrant_lock
|
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
echo "copying public keys to VM"
|
echo "copying public keys to VM"
|
||||||
@ -92,9 +84,6 @@ export sshkey=`${mdbci_dir}/mdbci show keyfile $name/build --silent 2> /dev/null
|
|||||||
export scpopt="-i $sshkey -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ConnectTimeout=120 "
|
export scpopt="-i $sshkey -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ConnectTimeout=120 "
|
||||||
export sshopt="$scpopt $sshuser@$IP"
|
export sshopt="$scpopt $sshuser@$IP"
|
||||||
|
|
||||||
echo "Release Vagrant lock"
|
|
||||||
rm ~/vagrant_lock
|
|
||||||
|
|
||||||
echo "Starting build"
|
echo "Starting build"
|
||||||
${script_dir}/remote_build.sh
|
${script_dir}/remote_build.sh
|
||||||
export build_result=$?
|
export build_result=$?
|
||||||
@ -102,11 +91,17 @@ export build_result=$?
|
|||||||
shellcheck `find . | grep "\.sh"` | grep -i "POSIX sh"
|
shellcheck `find . | grep "\.sh"` | grep -i "POSIX sh"
|
||||||
if [ $? -eq 0 ] ; then
|
if [ $? -eq 0 ] ; then
|
||||||
echo "POSIX sh error are found in the scripts"
|
echo "POSIX sh error are found in the scripts"
|
||||||
# exit 1
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
${script_dir}/create_remote_repo.sh
|
if [ ${build_result} -eq 0 ]; then
|
||||||
${script_dir}/copy_repos.sh
|
${script_dir}/create_remote_repo.sh
|
||||||
|
export build_result=$?
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ${build_result} -eq 0 ]; then
|
||||||
|
${script_dir}/copy_repos.sh
|
||||||
|
export build_result=$?
|
||||||
|
fi
|
||||||
|
|
||||||
echo "Removing locks and destroying VM"
|
echo "Removing locks and destroying VM"
|
||||||
|
|
||||||
|
@ -4,25 +4,42 @@
|
|||||||
|
|
||||||
dir=`pwd`
|
dir=`pwd`
|
||||||
if [ "$box_type" == "RPM" ] ; then
|
if [ "$box_type" == "RPM" ] ; then
|
||||||
export arch=`ssh $sshopt "arch"`
|
# For RHEL packages are not going to the repo
|
||||||
. ${script_dir}/generate_build_info_path.sh
|
# Build can be executed to check if it is possible to build
|
||||||
|
# and to run install and upgrade tests
|
||||||
|
# with thre real RHEL, but we use CentOS packages for production
|
||||||
|
if [ "$platform" != "rhel" ] ; then
|
||||||
|
export arch=`ssh $sshopt "arch"`
|
||||||
|
. ${script_dir}/generate_build_info_path.sh
|
||||||
|
|
||||||
rm -rf $path_prefix/$platform/$platform_version/$arch/
|
rm -rf $path_prefix/$platform/$platform_version/$arch/
|
||||||
mkdir -p $path_prefix/$platform/$platform_version/$arch/
|
mkdir -p $path_prefix/$platform/$platform_version/$arch/
|
||||||
rsync -avz --progress ${unsorted_repo_dir}/$repo_name/$box/* $path_prefix/$platform/$platform_version/$arch/
|
rsync -avz --progress ${unsorted_repo_dir}/$repo_name/$box/* $path_prefix/$platform/$platform_version/$arch/
|
||||||
env > $build_info_path
|
if [ $? !=0 ] ; then
|
||||||
find $path_prefix/.. -type d -exec chmod 755 {} \;
|
echo "Error copying repos"
|
||||||
find $path_prefix/.. -type f -exec chmod 644 {} \;
|
exit 1
|
||||||
cd $path_prefix/$platform
|
fi
|
||||||
ln -s $platform_version "$platform_version"server
|
env > $build_info_path
|
||||||
ln -s $platform_version "$platform_version"Server
|
find $path_prefix/.. -type d -exec chmod 755 {} \;
|
||||||
|
find $path_prefix/.. -type f -exec chmod 644 {} \;
|
||||||
|
cd $path_prefix/$platform
|
||||||
|
ln -s $platform_version "$platform_version"server
|
||||||
|
ln -s $platform_version "$platform_version"Server
|
||||||
|
if [ "$platform" == "centos" ] ; then
|
||||||
|
cd ..
|
||||||
|
ln -s centos rhel
|
||||||
|
fi
|
||||||
|
|
||||||
eval "cat <<EOF
|
eval "cat <<EOF
|
||||||
$(<${script_dir}/templates/repository-config/rpm.json.template)
|
$(<${script_dir}/templates/repository-config/rpm.json.template)
|
||||||
" 2> /dev/null > ${path_prefix}/${platform}_${platform_version}.json
|
" 2> /dev/null > ${path_prefix}/${platform}_${platform_version}.json
|
||||||
|
|
||||||
|
|
||||||
echo "copying done"
|
echo "copying done"
|
||||||
|
else
|
||||||
|
echo "RHEL! Not copying packages to the repo"
|
||||||
|
fi
|
||||||
|
|
||||||
else
|
else
|
||||||
export arch=`ssh $sshopt "dpkg --print-architecture"`
|
export arch=`ssh $sshopt "dpkg --print-architecture"`
|
||||||
. ${script_dir}/generate_build_info_path.sh
|
. ${script_dir}/generate_build_info_path.sh
|
||||||
@ -30,6 +47,10 @@ else
|
|||||||
rm -rf $path_prefix/$platform_family/dists/$platform_version/main/binary-i386
|
rm -rf $path_prefix/$platform_family/dists/$platform_version/main/binary-i386
|
||||||
mkdir -p $path_prefix/$platform_family/
|
mkdir -p $path_prefix/$platform_family/
|
||||||
rsync -avz --progress ${unsorted_repo_dir}/$repo_name/$box/* $path_prefix/$platform_family/
|
rsync -avz --progress ${unsorted_repo_dir}/$repo_name/$box/* $path_prefix/$platform_family/
|
||||||
|
if [ $? !=0 ] ; then
|
||||||
|
echo "Error copying repos"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
env > $build_info_path
|
env > $build_info_path
|
||||||
find $path_prefix/.. -type d -exec chmod 755 {} \;
|
find $path_prefix/.. -type d -exec chmod 755 {} \;
|
||||||
find $path_prefix/.. -type f -exec chmod 644 {} \;
|
find $path_prefix/.. -type f -exec chmod 644 {} \;
|
||||||
|
@ -21,13 +21,6 @@ eval "cat <<EOF
|
|||||||
$(<${script_dir}/templates/install.json.template)
|
$(<${script_dir}/templates/install.json.template)
|
||||||
" 2> /dev/null > $MDBCI_VM_PATH/${name}.json
|
" 2> /dev/null > $MDBCI_VM_PATH/${name}.json
|
||||||
|
|
||||||
while [ -f ~/vagrant_lock ]
|
|
||||||
do
|
|
||||||
sleep 5
|
|
||||||
done
|
|
||||||
touch ~/vagrant_lock
|
|
||||||
echo $JOB_NAME-$BUILD_NUMBER >> ~/vagrant_lock
|
|
||||||
|
|
||||||
# destroying existing box
|
# destroying existing box
|
||||||
if [ -d "install_$box" ]; then
|
if [ -d "install_$box" ]; then
|
||||||
${mdbci_dir}/mdbci destroy $name
|
${mdbci_dir}/mdbci destroy $name
|
||||||
@ -42,12 +35,12 @@ if [ $? != 0 ] ; then
|
|||||||
if [ "x$do_not_destroy_vm" != "xyes" ] ; then
|
if [ "x$do_not_destroy_vm" != "xyes" ] ; then
|
||||||
${mdbci_dir}/mdbci destroy $name
|
${mdbci_dir}/mdbci destroy $name
|
||||||
fi
|
fi
|
||||||
rm ~/vagrant_lock
|
rm -f ~/vagrant_lock
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
rm ~/vagrant_lock
|
rm -f ~/vagrant_lock
|
||||||
|
|
||||||
# get VM info
|
# get VM info
|
||||||
export sshuser=`${mdbci_dir}/mdbci ssh --command 'whoami' --silent $name/maxscale 2> /dev/null | tr -d '\r'`
|
export sshuser=`${mdbci_dir}/mdbci ssh --command 'whoami' --silent $name/maxscale 2> /dev/null | tr -d '\r'`
|
||||||
|
@ -836,9 +836,13 @@ add_test_executable_notest(delete_rds.cpp delete_rds replication LABELS EXTERN_B
|
|||||||
# a tool to create RDS Aurora cluster
|
# a tool to create RDS Aurora cluster
|
||||||
add_test_executable_notest(create_rds.cpp create_rds replication LABELS EXTERN_BACKEND)
|
add_test_executable_notest(create_rds.cpp create_rds replication LABELS EXTERN_BACKEND)
|
||||||
|
|
||||||
# start sysbench ageints RWSplit for infinite execution
|
# start sysbench against RWSplit for infinite execution
|
||||||
add_test_executable_notest(long_sysbench.cpp long_sysbench replication LABELS readwritesplit REPL_BACKEND)
|
add_test_executable_notest(long_sysbench.cpp long_sysbench replication LABELS readwritesplit REPL_BACKEND)
|
||||||
|
|
||||||
|
# own long test
|
||||||
|
# 'long_test_time' variable defines time of execution (in seconds)
|
||||||
|
add_test_executable_notest(long_test.cpp long_test replication LABELS readwritesplit REPL_BACKEND)
|
||||||
|
|
||||||
# test effect of local_address in configuration file
|
# test effect of local_address in configuration file
|
||||||
add_test_executable(local_address.cpp local_address local_address LABELS REPL_BACKEND)
|
add_test_executable(local_address.cpp local_address local_address LABELS REPL_BACKEND)
|
||||||
|
|
||||||
|
@ -141,13 +141,6 @@ https://help.ubuntu.com/lts/serverguide/libvirt.html
|
|||||||
https://github.com/vagrant-libvirt/vagrant-libvirt#installation
|
https://github.com/vagrant-libvirt/vagrant-libvirt#installation
|
||||||
|
|
||||||
|
|
||||||
### vagrant is locked, waiting ...
|
|
||||||
|
|
||||||
```bash
|
|
||||||
rm ~/vagrant_lock
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
### Random VM creation failures
|
### Random VM creation failures
|
||||||
|
|
||||||
Plese check the amount of free memory and amount of running VMs
|
Plese check the amount of free memory and amount of running VMs
|
||||||
|
@ -121,7 +121,3 @@ If test run was executed with parameter 'do_not_destroy' set yo 'yes' please do
|
|||||||
[destroy](http://max-tst-01.mariadb.com:8089/view/axilary/job/destroy/) against your 'target'
|
[destroy](http://max-tst-01.mariadb.com:8089/view/axilary/job/destroy/) against your 'target'
|
||||||
|
|
||||||
This job also have to be executed if test run job crashed or it was interrupted.
|
This job also have to be executed if test run job crashed or it was interrupted.
|
||||||
|
|
||||||
In case of build or test job crash, interruption, Jenkins crash during Vagrant operation it is possible that Vagrant lock
|
|
||||||
stays in locked state and no other job can progress (job can be started, but it is waiting for Vagrant lock -
|
|
||||||
'/home/vagrant/vagrant_lock' can be seen in the job log). In this case lock can be removed by [remove_lock](http://max-tst-01.mariadb.com:8089/view/axilary/job/remove_lock/) job.
|
|
||||||
|
@ -64,6 +64,5 @@ int main(int argc, char *argv[])
|
|||||||
execute_query_silent(test.repl->nodes[0], "DROP USER user@'%%';");
|
execute_query_silent(test.repl->nodes[0], "DROP USER user@'%%';");
|
||||||
execute_query_silent(test.repl->nodes[0], "DROP TABLE test.t1");
|
execute_query_silent(test.repl->nodes[0], "DROP TABLE test.t1");
|
||||||
test.repl->disconnect();
|
test.repl->disconnect();
|
||||||
|
|
||||||
return test.global_result;
|
return test.global_result;
|
||||||
}
|
}
|
||||||
|
@ -1,7 +1,8 @@
|
|||||||
[maxscale]
|
[maxscale]
|
||||||
threads=###threads###
|
threads=###threads###
|
||||||
|
#log_info=1
|
||||||
|
|
||||||
[MySQL Monitor]
|
[MySQL-Monitor]
|
||||||
type=monitor
|
type=monitor
|
||||||
module=mysqlmon
|
module=mysqlmon
|
||||||
servers=server1,server2,server3,server4
|
servers=server1,server2,server3,server4
|
||||||
@ -11,7 +12,7 @@ monitor_interval=1000
|
|||||||
detect_stale_master=false
|
detect_stale_master=false
|
||||||
detect_standalone_master=false
|
detect_standalone_master=false
|
||||||
|
|
||||||
[RW Split Router]
|
[RW-Split-Router]
|
||||||
type=service
|
type=service
|
||||||
router=readwritesplit
|
router=readwritesplit
|
||||||
servers=server1,server2,server3,server4
|
servers=server1,server2,server3,server4
|
||||||
@ -20,7 +21,7 @@ password=skysql
|
|||||||
slave_selection_criteria=LEAST_GLOBAL_CONNECTIONS
|
slave_selection_criteria=LEAST_GLOBAL_CONNECTIONS
|
||||||
max_slave_connections=1
|
max_slave_connections=1
|
||||||
|
|
||||||
[Read Connection Router Slave]
|
[Read-Connection-Router-Slave]
|
||||||
type=service
|
type=service
|
||||||
router=readconnroute
|
router=readconnroute
|
||||||
router_options=slave
|
router_options=slave
|
||||||
@ -28,7 +29,7 @@ servers=server1,server2,server3,server4
|
|||||||
user=maxskysql
|
user=maxskysql
|
||||||
password=skysql
|
password=skysql
|
||||||
|
|
||||||
[Read Connection Router Master]
|
[Read-Connection-Router-Master]
|
||||||
type=service
|
type=service
|
||||||
router=readconnroute
|
router=readconnroute
|
||||||
router_options=master
|
router_options=master
|
||||||
@ -36,21 +37,21 @@ servers=server1,server2,server3,server4
|
|||||||
user=maxskysql
|
user=maxskysql
|
||||||
password=skysql
|
password=skysql
|
||||||
|
|
||||||
[RW Split Listener]
|
[RW-Split-Listener]
|
||||||
type=listener
|
type=listener
|
||||||
service=RW Split Router
|
service=RW-Split-Router
|
||||||
protocol=MySQLClient
|
protocol=MySQLClient
|
||||||
port=4006
|
port=4006
|
||||||
|
|
||||||
[Read Connection Listener Slave]
|
[Read-Connection-Listener-Slave]
|
||||||
type=listener
|
type=listener
|
||||||
service=Read Connection Router Slave
|
service=Read-Connection-Router-Slave
|
||||||
protocol=MySQLClient
|
protocol=MySQLClient
|
||||||
port=4009
|
port=4009
|
||||||
|
|
||||||
[Read Connection Listener Master]
|
[Read-Connection-Listener-Master]
|
||||||
type=listener
|
type=listener
|
||||||
service=Read Connection Router Master
|
service=Read-Connection-Router-Master
|
||||||
protocol=MySQLClient
|
protocol=MySQLClient
|
||||||
port=4008
|
port=4008
|
||||||
|
|
||||||
@ -58,7 +59,7 @@ port=4008
|
|||||||
type=service
|
type=service
|
||||||
router=cli
|
router=cli
|
||||||
|
|
||||||
[CLI Listener]
|
[CLI-Listener]
|
||||||
type=listener
|
type=listener
|
||||||
service=CLI
|
service=CLI
|
||||||
protocol=maxscaled
|
protocol=maxscaled
|
||||||
|
@ -42,9 +42,9 @@ const char* rules_failure[] =
|
|||||||
NULL
|
NULL
|
||||||
};
|
};
|
||||||
|
|
||||||
void truncate_maxscale_logs(TestConnections& test)
|
int truncate_maxscale_logs(TestConnections& test)
|
||||||
{
|
{
|
||||||
test.maxscales->ssh_node(0, "truncate -s 0 /var/log/maxscale/*", true);
|
return test.maxscales->ssh_node(0, "truncate -s 0 /var/log/maxscale/max*", true);
|
||||||
}
|
}
|
||||||
|
|
||||||
void create_rule(const char* rule, const char* user)
|
void create_rule(const char* rule, const char* user)
|
||||||
@ -64,7 +64,7 @@ int main(int argc, char** argv)
|
|||||||
for (int i = 0; rules_failure[i]; i++)
|
for (int i = 0; rules_failure[i]; i++)
|
||||||
{
|
{
|
||||||
/** Create rule file with syntax error */
|
/** Create rule file with syntax error */
|
||||||
truncate(temp_rules, 0);
|
test.add_result(truncate(temp_rules, 0), "Failed to truncate");
|
||||||
create_rule(rules_failure[i], users_ok[0]);
|
create_rule(rules_failure[i], users_ok[0]);
|
||||||
char buf[PATH_MAX + 1];
|
char buf[PATH_MAX + 1];
|
||||||
copy_rules(&test, (char*)temp_rules, (char*)getcwd(buf, sizeof(buf)));
|
copy_rules(&test, (char*)temp_rules, (char*)getcwd(buf, sizeof(buf)));
|
||||||
@ -77,7 +77,7 @@ int main(int argc, char** argv)
|
|||||||
* a message about the syntax error. */
|
* a message about the syntax error. */
|
||||||
test.check_maxscale_processes(0, 0);
|
test.check_maxscale_processes(0, 0);
|
||||||
test.log_includes(0, "syntax error");
|
test.log_includes(0, "syntax error");
|
||||||
truncate_maxscale_logs(test);
|
test.add_result(truncate_maxscale_logs(test), "Failed to truncate Maxscale logs");
|
||||||
}
|
}
|
||||||
|
|
||||||
return test.global_result;
|
return test.global_result;
|
||||||
|
@ -48,7 +48,7 @@ int main(int argc, char* argv[])
|
|||||||
print_version_string(Test);
|
print_version_string(Test);
|
||||||
|
|
||||||
Test->tprintf("Suspend Maxscale 000 machine and waiting\n");
|
Test->tprintf("Suspend Maxscale 000 machine and waiting\n");
|
||||||
system(Test->maxscales->stop_vm_command[0]);
|
Test->add_result(Test->maxscales->start_vm(0), "Failed to stop VM maxscale_000\n");
|
||||||
sleep(FAILOVER_WAIT_TIME);
|
sleep(FAILOVER_WAIT_TIME);
|
||||||
|
|
||||||
version = print_version_string(Test);
|
version = print_version_string(Test);
|
||||||
@ -59,12 +59,12 @@ int main(int argc, char* argv[])
|
|||||||
|
|
||||||
|
|
||||||
Test->tprintf("Resume Maxscale 000 machine and waiting\n");
|
Test->tprintf("Resume Maxscale 000 machine and waiting\n");
|
||||||
system(Test->maxscales->start_vm_command[0]);
|
Test->add_result(Test->maxscales->start_vm(0), "Failed to start VM maxscale_000\n");
|
||||||
sleep(FAILOVER_WAIT_TIME);
|
sleep(FAILOVER_WAIT_TIME);
|
||||||
print_version_string(Test);
|
print_version_string(Test);
|
||||||
|
|
||||||
Test->tprintf("Suspend Maxscale 001 machine and waiting\n");
|
Test->tprintf("Suspend Maxscale 001 machine and waiting\n");
|
||||||
system(Test->maxscales->stop_vm_command[1]);
|
Test->add_result(Test->maxscales->start_vm(1), "Failed to stop VM maxscale_001\n");
|
||||||
sleep(FAILOVER_WAIT_TIME);
|
sleep(FAILOVER_WAIT_TIME);
|
||||||
|
|
||||||
version = print_version_string(Test);
|
version = print_version_string(Test);
|
||||||
@ -75,7 +75,7 @@ int main(int argc, char* argv[])
|
|||||||
|
|
||||||
print_version_string(Test);
|
print_version_string(Test);
|
||||||
Test->tprintf("Resume Maxscale 001 machine and waiting\n");
|
Test->tprintf("Resume Maxscale 001 machine and waiting\n");
|
||||||
system(Test->maxscales->start_vm_command[1]);
|
Test->add_result(Test->maxscales->start_vm(1), "Failed to start VM maxscale_001\n");
|
||||||
sleep(FAILOVER_WAIT_TIME);
|
sleep(FAILOVER_WAIT_TIME);
|
||||||
print_version_string(Test);
|
print_version_string(Test);
|
||||||
|
|
||||||
|
352
maxscale-system-test/long_test.cpp
Normal file
352
maxscale-system-test/long_test.cpp
Normal file
@ -0,0 +1,352 @@
|
|||||||
|
/**
|
||||||
|
* @file long_test.cpp Run different load for long long execution (long load test)
|
||||||
|
*
|
||||||
|
* time to execute test is defined by 'long_test_time' environmental variable
|
||||||
|
* e.g. 'long_test_time=3600 ./long_test'
|
||||||
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
#include "testconnections.h"
|
||||||
|
#include "big_transaction.h"
|
||||||
|
|
||||||
|
typedef void * FUNC(void * ptr);
|
||||||
|
|
||||||
|
FUNC query_thread;
|
||||||
|
FUNC prepared_stmt_thread;
|
||||||
|
FUNC transaction_thread;
|
||||||
|
FUNC short_session_thread;
|
||||||
|
FUNC read_thread;
|
||||||
|
|
||||||
|
TestConnections * Test;
|
||||||
|
|
||||||
|
const int threads_type_num = 4;
|
||||||
|
int threads_num[threads_type_num];
|
||||||
|
const int max_threads_num = 32;
|
||||||
|
int port;
|
||||||
|
char * IP;
|
||||||
|
|
||||||
|
typedef struct
|
||||||
|
{
|
||||||
|
int id;
|
||||||
|
bool exit_flag;
|
||||||
|
char * sql;
|
||||||
|
} t_data;
|
||||||
|
|
||||||
|
t_data data[threads_type_num][max_threads_num];
|
||||||
|
|
||||||
|
int main(int argc, char *argv[])
|
||||||
|
{
|
||||||
|
Test = new TestConnections(argc, argv);
|
||||||
|
int i, j;
|
||||||
|
|
||||||
|
Test->tprintf("***************************************************\n"
|
||||||
|
"This is long running test to catch memory leaks and crashes\n"
|
||||||
|
"please define 'long_test_time' variable to set running time (seconds)\n"
|
||||||
|
"***************************************************\n");
|
||||||
|
|
||||||
|
pthread_t thread_id[threads_type_num][max_threads_num];
|
||||||
|
FUNC * thread[threads_type_num];
|
||||||
|
thread[0] = query_thread;
|
||||||
|
threads_num[0] = 1;
|
||||||
|
thread[1] = transaction_thread;
|
||||||
|
threads_num[1] = 1;
|
||||||
|
thread[2] = prepared_stmt_thread;
|
||||||
|
threads_num[2] = 1;
|
||||||
|
thread[3] = read_thread;
|
||||||
|
threads_num[3] = 1;
|
||||||
|
|
||||||
|
//thread[4] = short_session_thread;
|
||||||
|
//threads_num[4] = 4;
|
||||||
|
|
||||||
|
|
||||||
|
port = Test->maxscales->rwsplit_port[0];
|
||||||
|
IP = Test->maxscales->IP[0];
|
||||||
|
|
||||||
|
//port = 3306;
|
||||||
|
//IP = Test->repl->IP[0];
|
||||||
|
|
||||||
|
|
||||||
|
Test->set_timeout(60);
|
||||||
|
Test->tprintf("Set big maximums\n");
|
||||||
|
|
||||||
|
Test->repl->execute_query_all_nodes((char *) "set global max_connections = 300000;");
|
||||||
|
Test->repl->execute_query_all_nodes((char *) "set global max_connect_errors = 10000000;");
|
||||||
|
Test->repl->execute_query_all_nodes((char *) "set global expire_logs_days = 1;");
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Test->maxscales->connect_rwsplit(0);
|
||||||
|
|
||||||
|
Test->repl->execute_query_all_nodes( (char *) "set global max_allowed_packet=100000000");
|
||||||
|
|
||||||
|
Test->tprintf("create t1 in `test` DB\n");
|
||||||
|
create_t1(Test->maxscales->conn_rwsplit[0]);
|
||||||
|
|
||||||
|
execute_query(Test->maxscales->conn_rwsplit[0], "DROP DATABASE test1");
|
||||||
|
execute_query(Test->maxscales->conn_rwsplit[0], "DROP DATABASE test2");
|
||||||
|
Test->tprintf("create`test1` DB\n");
|
||||||
|
Test->try_query(Test->maxscales->conn_rwsplit[0], "CREATE DATABASE test1");
|
||||||
|
|
||||||
|
Test->tprintf("create`test2` DB\n");
|
||||||
|
Test->try_query(Test->maxscales->conn_rwsplit[0], "CREATE DATABASE test2");
|
||||||
|
|
||||||
|
Test->tprintf("Waiting for slaves after DB creation\n");
|
||||||
|
Test->repl->sync_slaves(0);
|
||||||
|
//sleep(15);
|
||||||
|
Test->tprintf("...ok\n");
|
||||||
|
|
||||||
|
Test->tprintf("create t1 in `test1` DB\n");
|
||||||
|
Test->tprintf("... use\n");
|
||||||
|
Test->try_query(Test->maxscales->conn_rwsplit[0], "USE test1");
|
||||||
|
Test->tprintf("... create\n");
|
||||||
|
create_t1(Test->maxscales->conn_rwsplit[0]);
|
||||||
|
|
||||||
|
Test->tprintf("create t1 in `test2` DB\n");
|
||||||
|
Test->tprintf("... use\n");
|
||||||
|
Test->try_query(Test->maxscales->conn_rwsplit[0], "USE test2");
|
||||||
|
Test->tprintf("... create\n");
|
||||||
|
create_t1(Test->maxscales->conn_rwsplit[0]);
|
||||||
|
|
||||||
|
Test->tprintf("Waiting for slaves after tables creation\n");
|
||||||
|
Test->repl->sync_slaves(0);
|
||||||
|
|
||||||
|
Test->tprintf("...ok\n");
|
||||||
|
|
||||||
|
Test->set_timeout(60);
|
||||||
|
// Create threads
|
||||||
|
Test->tprintf("Starting threads\n");
|
||||||
|
|
||||||
|
for (j = 0; j < threads_type_num; j++)
|
||||||
|
{
|
||||||
|
for (i = 0; i < threads_num[j]; i++)
|
||||||
|
{
|
||||||
|
data[j][i].sql = (char*) malloc((i +1) * 32 * 14 + 32);
|
||||||
|
create_insert_string(data[j][i].sql, (i + 1) * 32 , i);
|
||||||
|
Test->tprintf("sqL %d: %d\n", i, strlen(data[j][i].sql));
|
||||||
|
data[j][i].exit_flag = false;
|
||||||
|
data[j][i].id = i;
|
||||||
|
pthread_create(&thread_id[j][i], NULL, thread[j], &data[j][i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Test->set_log_copy_interval(100);
|
||||||
|
|
||||||
|
Test->stop_timeout();
|
||||||
|
|
||||||
|
char * env = getenv("long_test_time");
|
||||||
|
int test_time = 0;
|
||||||
|
if (env != NULL)
|
||||||
|
{
|
||||||
|
sscanf(env, "%d", &test_time);
|
||||||
|
}
|
||||||
|
if (test_time <= 0)
|
||||||
|
{
|
||||||
|
test_time = 3600;
|
||||||
|
Test->tprintf("´long_test_time´ variable is not defined, set test_time to %d\n", test_time);
|
||||||
|
}
|
||||||
|
Test->tprintf("´test_time´ is %d\n", test_time);
|
||||||
|
sleep(test_time);
|
||||||
|
|
||||||
|
Test->set_timeout(180);
|
||||||
|
|
||||||
|
Test->tprintf("Stopping threads\n");
|
||||||
|
|
||||||
|
for (j = 0; j < threads_type_num; j++)
|
||||||
|
{
|
||||||
|
for (i = 0; i < threads_num[j]; i++)
|
||||||
|
{
|
||||||
|
data[j][i].exit_flag = true;
|
||||||
|
pthread_join(thread_id[j][i], NULL);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//Test->tprintf("Checking if MaxScale is still alive!\n");
|
||||||
|
//fflush(stdout);
|
||||||
|
//Test->check_maxscale_alive(0);
|
||||||
|
|
||||||
|
Test->maxscales->stop_maxscale(0);
|
||||||
|
|
||||||
|
int rval = Test->global_result;
|
||||||
|
delete Test;
|
||||||
|
return rval;
|
||||||
|
}
|
||||||
|
|
||||||
|
void try_and_reconnect(MYSQL * conn, char * db, char * sql)
|
||||||
|
{
|
||||||
|
if (execute_query(conn, "%s", sql))
|
||||||
|
{
|
||||||
|
Test->tprintf("reconnect");
|
||||||
|
mysql_close(conn);
|
||||||
|
conn = open_conn_db_timeout(port,
|
||||||
|
IP,
|
||||||
|
db,
|
||||||
|
Test->repl->user_name,
|
||||||
|
Test->repl->password,
|
||||||
|
20,
|
||||||
|
Test->ssl);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void *query_thread(void *ptr )
|
||||||
|
{
|
||||||
|
MYSQL * conn;
|
||||||
|
t_data * data = (t_data *) ptr;
|
||||||
|
int inserts_until_optimize = 100000;
|
||||||
|
int tn = 0;
|
||||||
|
conn = open_conn_db_timeout(port,
|
||||||
|
IP,
|
||||||
|
(char *) "test",
|
||||||
|
Test->repl->user_name,
|
||||||
|
Test->repl->password,
|
||||||
|
20,
|
||||||
|
Test->ssl);
|
||||||
|
while (!data->exit_flag)
|
||||||
|
{
|
||||||
|
|
||||||
|
//Test->try_query(conn, data->sql);
|
||||||
|
try_and_reconnect(conn, (char *) "test", data->sql);
|
||||||
|
|
||||||
|
if (tn >= inserts_until_optimize)
|
||||||
|
{
|
||||||
|
tn = 0;
|
||||||
|
Test->tprintf("Removing everything from table in the queries thread");
|
||||||
|
try_and_reconnect(conn, (char *) "test", (char *) "DELETE FROM t1");
|
||||||
|
Test->tprintf("Optimizing table in the queries thread");
|
||||||
|
try_and_reconnect(conn, (char *) "test", (char *) "OPTIMIZE TABLE t1");
|
||||||
|
}
|
||||||
|
tn++;
|
||||||
|
}
|
||||||
|
mysql_close(conn);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
void *read_thread(void *ptr )
|
||||||
|
{
|
||||||
|
MYSQL * conn;
|
||||||
|
t_data * data = (t_data *) ptr;
|
||||||
|
int i = 0;
|
||||||
|
char sql[256];
|
||||||
|
conn = open_conn_db_timeout(port,
|
||||||
|
IP,
|
||||||
|
(char *) "test",
|
||||||
|
Test->repl->user_name,
|
||||||
|
Test->repl->password,
|
||||||
|
20,
|
||||||
|
Test->ssl);
|
||||||
|
while (!data->exit_flag)
|
||||||
|
{
|
||||||
|
sprintf(sql, "SELECT * FROM t1 WHERE fl=%d", data->id);
|
||||||
|
try_and_reconnect(conn, (char *) "test", sql);
|
||||||
|
i++;
|
||||||
|
}
|
||||||
|
mysql_close(conn);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
void *transaction_thread(void *ptr )
|
||||||
|
{
|
||||||
|
MYSQL * conn;
|
||||||
|
int transactions_until_optimize = 10;
|
||||||
|
int tn = 0;
|
||||||
|
t_data * data = (t_data *) ptr;
|
||||||
|
conn = open_conn_db_timeout(port,
|
||||||
|
IP,
|
||||||
|
(char *) "test1",
|
||||||
|
Test->repl->user_name,
|
||||||
|
Test->repl->password,
|
||||||
|
20,
|
||||||
|
Test->ssl);
|
||||||
|
while (!data->exit_flag)
|
||||||
|
{
|
||||||
|
|
||||||
|
try_and_reconnect(conn, (char *) "test1", (char *) "START TRANSACTION");
|
||||||
|
try_and_reconnect(conn, (char *) "test1", (char *) "SET autocommit = 0");
|
||||||
|
|
||||||
|
int stmt_num = 200000 / strlen(data->sql);
|
||||||
|
for (int i = 0; i < stmt_num; i++)
|
||||||
|
{
|
||||||
|
try_and_reconnect(conn, (char *) "test1", data->sql);
|
||||||
|
}
|
||||||
|
Test->try_query(conn, (char *) "COMMIT");
|
||||||
|
if (tn >= transactions_until_optimize)
|
||||||
|
{
|
||||||
|
tn = 0;
|
||||||
|
Test->tprintf("Removing everything from table in the transactions thread");
|
||||||
|
try_and_reconnect(conn, (char *) "test1", (char *) "DELETE FROM t1");
|
||||||
|
Test->tprintf("Optimizing table in the transactions thread");
|
||||||
|
try_and_reconnect(conn, (char *) "test1", (char *) "OPTIMIZE TABLE t1");
|
||||||
|
}
|
||||||
|
tn++;
|
||||||
|
}
|
||||||
|
mysql_close(conn);
|
||||||
|
|
||||||
|
conn = open_conn_db_timeout(port,
|
||||||
|
IP,
|
||||||
|
(char *) "",
|
||||||
|
Test->maxscales->user_name,
|
||||||
|
Test->maxscales->password,
|
||||||
|
20,
|
||||||
|
Test->ssl);
|
||||||
|
Test->try_query(conn, "DROP DATABASE test1");
|
||||||
|
mysql_close(conn);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
void *short_session_thread(void *ptr )
|
||||||
|
{
|
||||||
|
MYSQL * conn;
|
||||||
|
t_data * data = (t_data *) ptr;
|
||||||
|
while (!data->exit_flag)
|
||||||
|
{
|
||||||
|
conn = open_conn_db_timeout(port,
|
||||||
|
IP,
|
||||||
|
(char *) "test",
|
||||||
|
Test->repl->user_name,
|
||||||
|
Test->repl->password,
|
||||||
|
20,
|
||||||
|
Test->ssl);
|
||||||
|
mysql_close(conn);
|
||||||
|
}
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void *prepared_stmt_thread(void *ptr )
|
||||||
|
{
|
||||||
|
MYSQL * conn;
|
||||||
|
t_data * data = (t_data *) ptr;
|
||||||
|
char sql[256];
|
||||||
|
conn = open_conn_db_timeout(port,
|
||||||
|
IP,
|
||||||
|
(char *) "test2",
|
||||||
|
Test->repl->user_name,
|
||||||
|
Test->repl->password,
|
||||||
|
20,
|
||||||
|
Test->ssl);
|
||||||
|
while (!data->exit_flag)
|
||||||
|
{
|
||||||
|
sprintf(sql, "PREPARE stmt%d FROM 'SELECT * FROM t1 WHERE fl=@x;';", data->id);
|
||||||
|
try_and_reconnect(conn, (char *) "test2", sql);
|
||||||
|
try_and_reconnect(conn, (char *) "test2", (char *) "SET @x = 3;");
|
||||||
|
sprintf(sql, "EXECUTE stmt%d", data->id);
|
||||||
|
try_and_reconnect(conn, (char *) "test2", sql);
|
||||||
|
try_and_reconnect(conn, (char *) "test2", (char *) "SET @x = 4;");
|
||||||
|
try_and_reconnect(conn, (char *) "test2", sql);
|
||||||
|
try_and_reconnect(conn, (char *) "test2", (char *) "SET @x = 400;");
|
||||||
|
try_and_reconnect(conn, (char *) "test2", sql);
|
||||||
|
sprintf(sql, "DEALLOCATE PREPARE stmt%d", data->id);
|
||||||
|
try_and_reconnect(conn, (char *) "test2", sql);
|
||||||
|
}
|
||||||
|
mysql_close(conn);
|
||||||
|
|
||||||
|
conn = open_conn_db_timeout(port,
|
||||||
|
IP,
|
||||||
|
(char *) "",
|
||||||
|
Test->maxscales->user_name,
|
||||||
|
Test->maxscales->password,
|
||||||
|
20,
|
||||||
|
Test->ssl);
|
||||||
|
Test->try_query(conn, "DROP DATABASE test2");
|
||||||
|
mysql_close(conn);
|
||||||
|
return NULL;
|
||||||
|
}
|
@ -2,12 +2,24 @@
|
|||||||
#include <sstream>
|
#include <sstream>
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
|
|
||||||
Maxscales::Maxscales(const char* pref, const char* test_cwd, bool verbose)
|
Maxscales::Maxscales(const char* pref, const char* test_cwd, bool verbose, bool use_valgrind)
|
||||||
{
|
{
|
||||||
strcpy(prefix, pref);
|
strcpy(prefix, pref);
|
||||||
this->verbose = verbose;
|
this->verbose = verbose;
|
||||||
|
this->use_valgrind = use_valgrind;
|
||||||
|
valgring_log_num = 0;
|
||||||
strcpy(test_dir, test_cwd);
|
strcpy(test_dir, test_cwd);
|
||||||
read_env();
|
read_env();
|
||||||
|
if (use_valgrind)
|
||||||
|
{
|
||||||
|
for (int i = 0; i < N; i++)
|
||||||
|
{
|
||||||
|
ssh_node_f(i, true, "yum install -y valgrind gdb 2>&1");
|
||||||
|
ssh_node_f(i, true, "apt install -y --force-yes valgrind gdb 2>&1");
|
||||||
|
ssh_node_f(i, true, "zypper -n install valgrind gdb 2>&1");
|
||||||
|
ssh_node_f(i, true, "rm -rf /var/cache/maxscale/maxscale.lock");
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int Maxscales::read_env()
|
int Maxscales::read_env()
|
||||||
@ -231,14 +243,35 @@ int Maxscales::close_maxscale_connections(int m)
|
|||||||
|
|
||||||
int Maxscales::restart_maxscale(int m)
|
int Maxscales::restart_maxscale(int m)
|
||||||
{
|
{
|
||||||
int res = ssh_node(m, "service maxscale restart", true);
|
int res;
|
||||||
|
if (use_valgrind)
|
||||||
|
{
|
||||||
|
res = stop_maxscale(m);
|
||||||
|
res += start_maxscale(m);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
res =ssh_node(m, "service maxscale restart", true);
|
||||||
|
}
|
||||||
fflush(stdout);
|
fflush(stdout);
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
int Maxscales::stop_maxscale(int m)
|
int Maxscales::stop_maxscale(int m)
|
||||||
{
|
{
|
||||||
int res = ssh_node(m, "service maxscale stop", true);
|
int res;
|
||||||
|
if (use_valgrind)
|
||||||
|
{
|
||||||
|
res = ssh_node_f(m, true, "sudo kill $(pidof valgrind) 2>&1 > /dev/null");
|
||||||
|
if ((res != 0) || atoi(ssh_node_output(m, "pidof valgrind", true, &res)) > 0)
|
||||||
|
{
|
||||||
|
res = ssh_node_f(m, true, "sudo kill -9 $(pidof valgrind) 2>&1 > /dev/null");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
res = ssh_node(m, "service maxscale stop", true);
|
||||||
|
}
|
||||||
fflush(stdout);
|
fflush(stdout);
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
@ -15,7 +15,7 @@ public:
|
|||||||
READCONN_SLAVE
|
READCONN_SLAVE
|
||||||
};
|
};
|
||||||
|
|
||||||
Maxscales(const char* pref, const char* test_cwd, bool verbose);
|
Maxscales(const char* pref, const char* test_cwd, bool verbose, bool use_valgrind);
|
||||||
int read_env();
|
int read_env();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -319,6 +319,18 @@ public:
|
|||||||
* @param m Number of Maxscale node
|
* @param m Number of Maxscale node
|
||||||
*/
|
*/
|
||||||
void wait_for_monitor(int intervals = 1, int m = 0);
|
void wait_for_monitor(int intervals = 1, int m = 0);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief use_valrind if true Maxscale will be executed under Valgrind
|
||||||
|
*/
|
||||||
|
bool use_valgrind;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief valgring_log_num Counter for Maxscale restarts to avoid Valgrind log overwriting
|
||||||
|
*/
|
||||||
|
int valgring_log_num;
|
||||||
|
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // MAXSCALES_H
|
#endif // MAXSCALES_H
|
||||||
|
@ -32,26 +32,17 @@ ${mdbci_dir}/mdbci --override --template ${MDBCI_VM_PATH}/${name}.json generate
|
|||||||
mkdir ${MDBCI_VM_PATH}/$name/cnf
|
mkdir ${MDBCI_VM_PATH}/$name/cnf
|
||||||
cp -r ${script_dir}/cnf/* ${MDBCI_VM_PATH}/$name/cnf/
|
cp -r ${script_dir}/cnf/* ${MDBCI_VM_PATH}/$name/cnf/
|
||||||
|
|
||||||
|
|
||||||
while [ -f ~/vagrant_lock ]
|
|
||||||
do
|
|
||||||
echo "vagrant is locked, waiting ..."
|
|
||||||
sleep 5
|
|
||||||
done
|
|
||||||
touch ~/vagrant_lock
|
|
||||||
echo ${JOB_NAME}-${BUILD_NUMBER} >> ~/vagrant_lock
|
|
||||||
|
|
||||||
echo "running vagrant up $provider"
|
echo "running vagrant up $provider"
|
||||||
|
|
||||||
${mdbci_dir}/mdbci up $name --attempts 3
|
${mdbci_dir}/mdbci up $name --attempts 3
|
||||||
if [ $? != 0 ]; then
|
if [ $? != 0 ]; then
|
||||||
echo "Error creating configuration"
|
echo "Error creating configuration"
|
||||||
rm ~/vagrant_lock
|
rm -f ~/vagrant_lock
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
#cp ~/build-scripts/team_keys .
|
#cp ~/build-scripts/team_keys .
|
||||||
${mdbci_dir}/mdbci public_keys --key ${team_keys} $name
|
${mdbci_dir}/mdbci public_keys --key ${team_keys} $name
|
||||||
|
|
||||||
rm ~/vagrant_lock
|
rm -f ~/vagrant_lock
|
||||||
exit 0
|
exit 0
|
||||||
|
@ -40,6 +40,10 @@
|
|||||||
|
|
||||||
# $test_set - parameters to be send to 'ctest' (e.g. '-I 1,100',
|
# $test_set - parameters to be send to 'ctest' (e.g. '-I 1,100',
|
||||||
# '-LE UNSTABLE'
|
# '-LE UNSTABLE'
|
||||||
|
# if $test_set starts from 'NAME#' ctest will not be executed,
|
||||||
|
# the value of $test_set after 'NAME#' is used as bash command
|
||||||
|
# line
|
||||||
|
# example: '#NAME long_test_time=3600 ./long_test'
|
||||||
|
|
||||||
export vm_memory=${vm_memory:-"2048"}
|
export vm_memory=${vm_memory:-"2048"}
|
||||||
export dir=`pwd`
|
export dir=`pwd`
|
||||||
@ -73,11 +77,15 @@ if [ $res == 0 ] ; then
|
|||||||
set -x
|
set -x
|
||||||
echo ${test_set} | grep "NAME#"
|
echo ${test_set} | grep "NAME#"
|
||||||
if [ $? == 0 ] ; then
|
if [ $? == 0 ] ; then
|
||||||
named_test=`echo ${test_set} | sed "s/NAME#//" | sed "s/ //g"`
|
named_test=`echo ${test_set} | sed "s/NAME#//"`
|
||||||
|
echo ${named_test} | grep "\./"
|
||||||
|
if [ $? != 0 ] ; then
|
||||||
|
named_test="./"${named_test}
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ ! -z "${named_test}" ] ; then
|
if [ ! -z "${named_test}" ] ; then
|
||||||
./${named_test}
|
eval ${named_test}
|
||||||
else
|
else
|
||||||
./check_backend
|
./check_backend
|
||||||
if [ $? != 0 ]; then
|
if [ $? != 0 ]; then
|
||||||
@ -85,7 +93,7 @@ set -x
|
|||||||
if [ "${do_not_destroy_vm}" != "yes" ] ; then
|
if [ "${do_not_destroy_vm}" != "yes" ] ; then
|
||||||
${mdbci_dir}/mdbci destroy $name
|
${mdbci_dir}/mdbci destroy $name
|
||||||
fi
|
fi
|
||||||
rm ~/vagrant_lock
|
rm -f ~/vagrant_lock
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
${mdbci_dir}/mdbci snapshot take --path-to-nodes $name --snapshot-name clean
|
${mdbci_dir}/mdbci snapshot take --path-to-nodes $name --snapshot-name clean
|
||||||
@ -99,7 +107,7 @@ else
|
|||||||
if [ "${do_not_destroy_vm}" != "yes" ] ; then
|
if [ "${do_not_destroy_vm}" != "yes" ] ; then
|
||||||
${mdbci_dir}/mdbci destroy $name
|
${mdbci_dir}/mdbci destroy $name
|
||||||
fi
|
fi
|
||||||
rm ~/vagrant_lock
|
rm -f ~/vagrant_lock
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -81,7 +81,7 @@ cd ${script_dir}/../../
|
|||||||
rm -rf build
|
rm -rf build
|
||||||
|
|
||||||
mkdir build && cd build
|
mkdir build && cd build
|
||||||
cmake .. -DBUILDNAME=$JOB_NAME-$BUILD_NUMBER-$target -DBUILD_SYSTEM_TESTS=Y
|
cmake .. -DBUILDNAME=$JOB_NAME-$BUILD_NUMBER-$target -DBUILD_SYSTEM_TESTS=Y -DCMAKE_BUILD_TYPE=Debug
|
||||||
cd maxscale-system-test
|
cd maxscale-system-test
|
||||||
make
|
make
|
||||||
|
|
||||||
|
@ -52,9 +52,9 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
"maxscale" :
|
"maxscale_000" :
|
||||||
{
|
{
|
||||||
"hostname" : "maxscale",
|
"hostname" : "maxscale1",
|
||||||
"box" : "${box}",
|
"box" : "${box}",
|
||||||
"memory_size" : "${vm_memory}",
|
"memory_size" : "${vm_memory}",
|
||||||
"product" : {
|
"product" : {
|
||||||
|
@ -495,3 +495,13 @@ const char* Nodes::ip(int i) const
|
|||||||
{
|
{
|
||||||
return use_ipv6 ? IP6[i] : IP[i];
|
return use_ipv6 ? IP6[i] : IP[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int Nodes::start_vm(int node)
|
||||||
|
{
|
||||||
|
return(system(start_vm_command[node]));
|
||||||
|
}
|
||||||
|
|
||||||
|
int Nodes::stop_vm(int node)
|
||||||
|
{
|
||||||
|
return(system(stop_vm_command[node]));
|
||||||
|
}
|
||||||
|
@ -174,6 +174,20 @@ public:
|
|||||||
*/
|
*/
|
||||||
int read_basic_env();
|
int read_basic_env();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief start_vm Start virtual machine
|
||||||
|
* @param node Node number
|
||||||
|
* @return 0 in case of success
|
||||||
|
*/
|
||||||
|
int start_vm(int node);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief stop_vm Stop virtual machine
|
||||||
|
* @param node Node number
|
||||||
|
* @return 0 in case of success
|
||||||
|
*/
|
||||||
|
int stop_vm(int node);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
int check_node_ssh(int node);
|
int check_node_ssh(int node);
|
||||||
};
|
};
|
||||||
|
@ -169,6 +169,7 @@ int RDS::destroy_route_tables()
|
|||||||
json_t* root;
|
json_t* root;
|
||||||
char cmd[1024];
|
char cmd[1024];
|
||||||
char* json;
|
char* json;
|
||||||
|
int res = 0;
|
||||||
|
|
||||||
sprintf(cmd, "aws ec2 describe-vpcs --vpc-ids=%s", vpc_id_intern);
|
sprintf(cmd, "aws ec2 describe-vpcs --vpc-ids=%s", vpc_id_intern);
|
||||||
if (execute_cmd(cmd, &json))
|
if (execute_cmd(cmd, &json))
|
||||||
@ -197,11 +198,11 @@ int RDS::destroy_route_tables()
|
|||||||
if (strcmp(vpc_id_intern, vpc_id) == 0)
|
if (strcmp(vpc_id_intern, vpc_id) == 0)
|
||||||
{
|
{
|
||||||
sprintf(cmd, "aws ec2 delete-route-table --route-table-id %s", rt_id);
|
sprintf(cmd, "aws ec2 delete-route-table --route-table-id %s", rt_id);
|
||||||
system(cmd);
|
res += system(cmd);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
int RDS::detach_and_destroy_gw()
|
int RDS::detach_and_destroy_gw()
|
||||||
@ -477,6 +478,7 @@ int RDS::create_cluster()
|
|||||||
char* result;
|
char* result;
|
||||||
json_error_t error;
|
json_error_t error;
|
||||||
size_t i;
|
size_t i;
|
||||||
|
int res = 0;
|
||||||
|
|
||||||
sprintf(cmd,
|
sprintf(cmd,
|
||||||
"aws rds create-db-cluster --database-name=test --engine=aurora --master-username=skysql --master-user-password=skysqlrds --db-cluster-identifier=%s --db-subnet-group-name=%s",
|
"aws rds create-db-cluster --database-name=test --engine=aurora --master-username=skysql --master-user-password=skysqlrds --db-cluster-identifier=%s --db-subnet-group-name=%s",
|
||||||
@ -503,7 +505,7 @@ int RDS::create_cluster()
|
|||||||
sprintf(cmd,
|
sprintf(cmd,
|
||||||
"aws ec2 authorize-security-group-ingress --group-id %s --protocol tcp --port 3306 --cidr 0.0.0.0/0",
|
"aws ec2 authorize-security-group-ingress --group-id %s --protocol tcp --port 3306 --cidr 0.0.0.0/0",
|
||||||
sg_id);
|
sg_id);
|
||||||
system(cmd);
|
res += system(cmd);
|
||||||
}
|
}
|
||||||
sg_intern = sg_id;
|
sg_intern = sg_id;
|
||||||
|
|
||||||
@ -514,9 +516,9 @@ int RDS::create_cluster()
|
|||||||
cluster_name_intern,
|
cluster_name_intern,
|
||||||
i);
|
i);
|
||||||
printf("%s\n", cmd);
|
printf("%s\n", cmd);
|
||||||
system(cmd);
|
res += system(cmd);
|
||||||
}
|
}
|
||||||
return 0;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
int RDS::get_writer(const char** writer_name)
|
int RDS::get_writer(const char** writer_name)
|
||||||
|
@ -206,7 +206,7 @@ int main(int argc, char* argv[])
|
|||||||
Test->maxscales->access_user[0],
|
Test->maxscales->access_user[0],
|
||||||
Test->maxscales->IP[0],
|
Test->maxscales->IP[0],
|
||||||
Test->maxscales->access_homedir[0]);
|
Test->maxscales->access_homedir[0]);
|
||||||
system(str);
|
Test->add_result(system(str), "Error copying script to VM");
|
||||||
|
|
||||||
sprintf(str, "%s/script_output_expected", Test->maxscales->access_homedir[0]);
|
sprintf(str, "%s/script_output_expected", Test->maxscales->access_homedir[0]);
|
||||||
test_script_monitor(Test, Test->repl, str);
|
test_script_monitor(Test, Test->repl, str);
|
||||||
|
@ -9,6 +9,7 @@
|
|||||||
#include <sys/stat.h>
|
#include <sys/stat.h>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
#include <sstream>
|
#include <sstream>
|
||||||
|
#include <fstream>
|
||||||
#include <maxbase/stacktrace.hh>
|
#include <maxbase/stacktrace.hh>
|
||||||
|
|
||||||
#include "mariadb_func.h"
|
#include "mariadb_func.h"
|
||||||
@ -120,6 +121,7 @@ TestConnections::TestConnections(int argc, char* argv[])
|
|||||||
, no_vm_revert(true)
|
, no_vm_revert(true)
|
||||||
, threads(4)
|
, threads(4)
|
||||||
, use_ipv6(false)
|
, use_ipv6(false)
|
||||||
|
, use_valgrind(false)
|
||||||
{
|
{
|
||||||
std::ios::sync_with_stdio(true);
|
std::ios::sync_with_stdio(true);
|
||||||
signal_set(SIGSEGV, sigfatal_handler);
|
signal_set(SIGSEGV, sigfatal_handler);
|
||||||
@ -293,7 +295,7 @@ TestConnections::TestConnections(int argc, char* argv[])
|
|||||||
repl->take_snapshot_command = take_snapshot_command;
|
repl->take_snapshot_command = take_snapshot_command;
|
||||||
repl->revert_snapshot_command = revert_snapshot_command;
|
repl->revert_snapshot_command = revert_snapshot_command;
|
||||||
|
|
||||||
maxscales = new Maxscales("maxscale", test_dir, verbose);
|
maxscales = new Maxscales("maxscale", test_dir, verbose, use_valgrind);
|
||||||
|
|
||||||
maxscales->use_ipv6 = use_ipv6;
|
maxscales->use_ipv6 = use_ipv6;
|
||||||
maxscales->ssl = ssl;
|
maxscales->ssl = ssl;
|
||||||
@ -404,6 +406,16 @@ TestConnections::~TestConnections()
|
|||||||
// galera->disable_ssl();
|
// galera->disable_ssl();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (use_valgrind)
|
||||||
|
{
|
||||||
|
// stop all Maxscales to get proper Valgrind logs
|
||||||
|
for (int i = 0; i < maxscales->N; i++)
|
||||||
|
{
|
||||||
|
stop_maxscale(i);
|
||||||
|
}
|
||||||
|
sleep(15); // sleep to let logs be written do disks
|
||||||
|
}
|
||||||
|
|
||||||
copy_all_logs();
|
copy_all_logs();
|
||||||
|
|
||||||
/* Temporary disable snapshot revert due to Galera failures
|
/* Temporary disable snapshot revert due to Galera failures
|
||||||
@ -482,7 +494,6 @@ void TestConnections::expect(bool result, const char* format, ...)
|
|||||||
|
|
||||||
void TestConnections::read_env()
|
void TestConnections::read_env()
|
||||||
{
|
{
|
||||||
|
|
||||||
char* env;
|
char* env;
|
||||||
|
|
||||||
if (verbose)
|
if (verbose)
|
||||||
@ -490,7 +501,6 @@ void TestConnections::read_env()
|
|||||||
printf("Reading test setup configuration from environmental variables\n");
|
printf("Reading test setup configuration from environmental variables\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// env = getenv("get_logs_command"); if (env != NULL) {sprintf(get_logs_command, "%s", env);}
|
// env = getenv("get_logs_command"); if (env != NULL) {sprintf(get_logs_command, "%s", env);}
|
||||||
|
|
||||||
env = getenv("sysbench_dir");
|
env = getenv("sysbench_dir");
|
||||||
@ -599,6 +609,12 @@ void TestConnections::read_env()
|
|||||||
{
|
{
|
||||||
no_vm_revert = false;
|
no_vm_revert = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
env = getenv("use_valgrind");
|
||||||
|
if ((env != NULL) && ((strcasecmp(env, "yes") == 0) || (strcasecmp(env, "true") == 0)))
|
||||||
|
{
|
||||||
|
use_valgrind = true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void TestConnections::print_env()
|
void TestConnections::print_env()
|
||||||
@ -770,17 +786,24 @@ void TestConnections::init_maxscale(int m)
|
|||||||
|
|
||||||
void TestConnections::copy_one_mariadb_log(int i, std::string filename)
|
void TestConnections::copy_one_mariadb_log(int i, std::string filename)
|
||||||
{
|
{
|
||||||
int exit_code;
|
auto log_retrive_commands =
|
||||||
char* mariadb_log = repl->ssh_node_output(i, "cat /var/lib/mysql/*.err 2>/dev/null", true, &exit_code);
|
|
||||||
FILE* f = fopen(filename.c_str(), "w");
|
|
||||||
|
|
||||||
if (f != NULL)
|
|
||||||
{
|
{
|
||||||
fwrite(mariadb_log, sizeof(char), strlen(mariadb_log), f);
|
"cat /var/lib/mysql/*.err",
|
||||||
fclose(f);
|
"cat /var/log/syslog | grep mysql",
|
||||||
}
|
"cat /var/log/messages | grep mysql"
|
||||||
|
};
|
||||||
|
|
||||||
free(mariadb_log);
|
int j = 1;
|
||||||
|
|
||||||
|
for (auto cmd : log_retrive_commands)
|
||||||
|
{
|
||||||
|
std::ofstream outfile(filename + std::to_string(j++));
|
||||||
|
|
||||||
|
if (outfile)
|
||||||
|
{
|
||||||
|
outfile << repl->ssh_output(cmd, i).second;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int TestConnections::copy_mariadb_logs(Mariadb_nodes* repl,
|
int TestConnections::copy_mariadb_logs(Mariadb_nodes* repl,
|
||||||
@ -793,8 +816,8 @@ int TestConnections::copy_mariadb_logs(Mariadb_nodes* repl,
|
|||||||
{
|
{
|
||||||
for (int i = 0; i < repl->N; i++)
|
for (int i = 0; i < repl->N; i++)
|
||||||
{
|
{
|
||||||
if (strcmp(repl->IP[i], "127.0.0.1") != 0) // Do not copy MariaDB logs in case of local
|
// Do not copy MariaDB logs in case of local backend
|
||||||
// backend
|
if (strcmp(repl->IP[i], "127.0.0.1") != 0)
|
||||||
{
|
{
|
||||||
char str[4096];
|
char str[4096];
|
||||||
sprintf(str, "LOGS/%s/%s%d_mariadb_log", test_name, prefix, i);
|
sprintf(str, "LOGS/%s/%s%d_mariadb_log", test_name, prefix, i);
|
||||||
@ -857,21 +880,21 @@ int TestConnections::copy_maxscale_logs(double timestamp)
|
|||||||
if (strcmp(maxscales->IP[i], "127.0.0.1") != 0)
|
if (strcmp(maxscales->IP[i], "127.0.0.1") != 0)
|
||||||
{
|
{
|
||||||
int rc = maxscales->ssh_node_f(i, true,
|
int rc = maxscales->ssh_node_f(i, true,
|
||||||
"rm -rf %s/logs;"
|
"rm -rf %s/logs;"
|
||||||
"mkdir %s/logs;"
|
"mkdir %s/logs;"
|
||||||
"cp %s/*.log %s/logs/;"
|
"cp %s/*.log %s/logs/;"
|
||||||
"cp /tmp/core* %s/logs/;"
|
"cp /tmp/core* %s/logs/;"
|
||||||
"cp %s %s/logs/;"
|
"cp %s %s/logs/;"
|
||||||
"chmod 777 -R %s/logs;"
|
"chmod 777 -R %s/logs;"
|
||||||
"ls /tmp/core* && exit 42;",
|
"ls /tmp/core* && exit 42;",
|
||||||
maxscales->access_homedir[i],
|
maxscales->access_homedir[i],
|
||||||
maxscales->access_homedir[i],
|
maxscales->access_homedir[i],
|
||||||
maxscales->maxscale_log_dir[i],
|
maxscales->maxscale_log_dir[i],
|
||||||
maxscales->access_homedir[i],
|
maxscales->access_homedir[i],
|
||||||
maxscales->access_homedir[i],
|
maxscales->access_homedir[i],
|
||||||
maxscales->maxscale_cnf[i],
|
maxscales->maxscale_cnf[i],
|
||||||
maxscales->access_homedir[i],
|
maxscales->access_homedir[i],
|
||||||
maxscales->access_homedir[i]);
|
maxscales->access_homedir[i]);
|
||||||
sprintf(sys, "%s/logs/*", maxscales->access_homedir[i]);
|
sprintf(sys, "%s/logs/*", maxscales->access_homedir[i]);
|
||||||
maxscales->copy_from_node(i, sys, log_dir_i);
|
maxscales->copy_from_node(i, sys, log_dir_i);
|
||||||
expect(rc != 42, "Test should not generate core files");
|
expect(rc != 42, "Test should not generate core files");
|
||||||
@ -1158,9 +1181,9 @@ bool TestConnections::replicate_from_master(int m)
|
|||||||
repl->execute_query_all_nodes("STOP SLAVE");
|
repl->execute_query_all_nodes("STOP SLAVE");
|
||||||
|
|
||||||
/** Clean up MaxScale directories */
|
/** Clean up MaxScale directories */
|
||||||
maxscales->ssh_node(m, "service maxscale stop", true);
|
maxscales->stop_maxscale(m);
|
||||||
prepare_binlog(m);
|
prepare_binlog(m);
|
||||||
maxscales->ssh_node(m, "service maxscale start", true);
|
maxscales->start_maxscale(m);
|
||||||
|
|
||||||
char log_file[256] = "";
|
char log_file[256] = "";
|
||||||
char log_pos[256] = "4";
|
char log_pos[256] = "4";
|
||||||
@ -1368,11 +1391,13 @@ int TestConnections::find_connected_slave1(int m)
|
|||||||
|
|
||||||
int TestConnections::check_maxscale_processes(int m, int expected)
|
int TestConnections::check_maxscale_processes(int m, int expected)
|
||||||
{
|
{
|
||||||
|
const char* ps_cmd = use_valgrind ?
|
||||||
|
"ps ax | grep valgrind | grep maxscale | grep -v grep | wc -l" :
|
||||||
|
"ps -C maxscale | grep maxscale | wc -l";
|
||||||
|
|
||||||
int exit_code;
|
int exit_code;
|
||||||
char* maxscale_num = maxscales->ssh_node_output(m,
|
char* maxscale_num = maxscales->ssh_node_output(m, ps_cmd, false, &exit_code);
|
||||||
"ps -C maxscale | grep maxscale | wc -l",
|
|
||||||
false,
|
|
||||||
&exit_code);
|
|
||||||
if ((maxscale_num == NULL) || (exit_code != 0))
|
if ((maxscale_num == NULL) || (exit_code != 0))
|
||||||
{
|
{
|
||||||
return -1;
|
return -1;
|
||||||
@ -1385,12 +1410,10 @@ int TestConnections::check_maxscale_processes(int m, int expected)
|
|||||||
|
|
||||||
if (atoi(maxscale_num) != expected)
|
if (atoi(maxscale_num) != expected)
|
||||||
{
|
{
|
||||||
tprintf("%s maxscale processes detected, trying agin in 5 seconds\n", maxscale_num);
|
tprintf("%s maxscale processes detected, trying again in 5 seconds\n", maxscale_num);
|
||||||
sleep(5);
|
sleep(5);
|
||||||
maxscale_num = maxscales->ssh_node_output(m,
|
maxscale_num = maxscales->ssh_node_output(m, ps_cmd, false, &exit_code);
|
||||||
"ps -C maxscale | grep maxscale | wc -l",
|
|
||||||
false,
|
|
||||||
&exit_code);
|
|
||||||
if (atoi(maxscale_num) != expected)
|
if (atoi(maxscale_num) != expected)
|
||||||
{
|
{
|
||||||
add_result(1, "Number of MaxScale processes is not %d, it is %s\n", expected, maxscale_num);
|
add_result(1, "Number of MaxScale processes is not %d, it is %s\n", expected, maxscale_num);
|
||||||
@ -1402,7 +1425,7 @@ int TestConnections::check_maxscale_processes(int m, int expected)
|
|||||||
|
|
||||||
int TestConnections::stop_maxscale(int m)
|
int TestConnections::stop_maxscale(int m)
|
||||||
{
|
{
|
||||||
int res = maxscales->ssh_node(m, "service maxscale stop", true);
|
int res = maxscales->stop_maxscale(m);
|
||||||
check_maxscale_processes(m, 0);
|
check_maxscale_processes(m, 0);
|
||||||
fflush(stdout);
|
fflush(stdout);
|
||||||
return res;
|
return res;
|
||||||
@ -1410,7 +1433,7 @@ int TestConnections::stop_maxscale(int m)
|
|||||||
|
|
||||||
int TestConnections::start_maxscale(int m)
|
int TestConnections::start_maxscale(int m)
|
||||||
{
|
{
|
||||||
int res = maxscales->ssh_node(m, "service maxscale start", true);
|
int res = maxscales->start_maxscale(m);
|
||||||
check_maxscale_processes(m, 1);
|
check_maxscale_processes(m, 1);
|
||||||
fflush(stdout);
|
fflush(stdout);
|
||||||
return res;
|
return res;
|
||||||
@ -1436,7 +1459,6 @@ int TestConnections::check_maxscale_alive(int m)
|
|||||||
maxscales->close_maxscale_connections(m);
|
maxscales->close_maxscale_connections(m);
|
||||||
add_result(global_result - gr, "Maxscale is not alive\n");
|
add_result(global_result - gr, "Maxscale is not alive\n");
|
||||||
stop_timeout();
|
stop_timeout();
|
||||||
|
|
||||||
check_maxscale_processes(m, 1);
|
check_maxscale_processes(m, 1);
|
||||||
|
|
||||||
return global_result - gr;
|
return global_result - gr;
|
||||||
@ -2040,14 +2062,14 @@ void TestConnections::check_current_connections(int m, int value)
|
|||||||
|
|
||||||
int TestConnections::take_snapshot(char* snapshot_name)
|
int TestConnections::take_snapshot(char* snapshot_name)
|
||||||
{
|
{
|
||||||
char str[4096];
|
char str[strlen(take_snapshot_command) + strlen(snapshot_name) + 2];
|
||||||
sprintf(str, "%s %s", take_snapshot_command, snapshot_name);
|
sprintf(str, "%s %s", take_snapshot_command, snapshot_name);
|
||||||
return system(str);
|
return system(str);
|
||||||
}
|
}
|
||||||
|
|
||||||
int TestConnections::revert_snapshot(char* snapshot_name)
|
int TestConnections::revert_snapshot(char* snapshot_name)
|
||||||
{
|
{
|
||||||
char str[4096];
|
char str[strlen(revert_snapshot_command) + strlen(snapshot_name) + 2];
|
||||||
sprintf(str, "%s %s", revert_snapshot_command, snapshot_name);
|
sprintf(str, "%s %s", revert_snapshot_command, snapshot_name);
|
||||||
return system(str);
|
return system(str);
|
||||||
}
|
}
|
||||||
|
@ -531,8 +531,6 @@ public:
|
|||||||
*/
|
*/
|
||||||
int list_dirs(int m = 0);
|
int list_dirs(int m = 0);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief make_snapshot Makes a snapshot for all running VMs
|
* @brief make_snapshot Makes a snapshot for all running VMs
|
||||||
* @param snapshot_name name of created snapshot
|
* @param snapshot_name name of created snapshot
|
||||||
@ -605,6 +603,11 @@ public:
|
|||||||
m_on_destroy.push_back(func);
|
m_on_destroy.push_back(func);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief use_valrind if true Maxscale will be executed under Valgrind
|
||||||
|
*/
|
||||||
|
bool use_valgrind;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void report_result(const char* format, va_list argp);
|
void report_result(const char* format, va_list argp);
|
||||||
void copy_one_mariadb_log(int i, std::string filename);
|
void copy_one_mariadb_log(int i, std::string filename);
|
||||||
|
@ -250,17 +250,20 @@ static int database_cb(void* data, int columns, char** rows, char** row_names)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool check_database(sqlite3* handle, const char* database)
|
static bool check_database(MYSQL_AUTH* instance, sqlite3* handle, const char* database)
|
||||||
{
|
{
|
||||||
bool rval = true;
|
bool rval = true;
|
||||||
|
|
||||||
if (*database)
|
if (*database)
|
||||||
{
|
{
|
||||||
rval = false;
|
rval = false;
|
||||||
size_t len = sizeof(mysqlauth_validate_database_query) + strlen(database) + 1;
|
const char* query = instance->lower_case_table_names ?
|
||||||
|
mysqlauth_validate_database_query_lower :
|
||||||
|
mysqlauth_validate_database_query;
|
||||||
|
size_t len = strlen(query) + strlen(database) + 1;
|
||||||
char sql[len];
|
char sql[len];
|
||||||
|
|
||||||
sprintf(sql, mysqlauth_validate_database_query, database);
|
sprintf(sql, query, database);
|
||||||
|
|
||||||
char* err;
|
char* err;
|
||||||
|
|
||||||
@ -391,7 +394,7 @@ int validate_mysql_user(MYSQL_AUTH* instance,
|
|||||||
session->client_sha1))
|
session->client_sha1))
|
||||||
{
|
{
|
||||||
/** Password is OK, check that the database exists */
|
/** Password is OK, check that the database exists */
|
||||||
if (check_database(handle, session->db))
|
if (check_database(instance, handle, session->db))
|
||||||
{
|
{
|
||||||
rval = MXS_AUTH_SUCCEEDED;
|
rval = MXS_AUTH_SUCCEEDED;
|
||||||
}
|
}
|
||||||
|
@ -81,6 +81,8 @@ static const char mysqlauth_skip_auth_query[] =
|
|||||||
/** Query that checks that the database exists */
|
/** Query that checks that the database exists */
|
||||||
static const char mysqlauth_validate_database_query[] =
|
static const char mysqlauth_validate_database_query[] =
|
||||||
"SELECT * FROM " MYSQLAUTH_DATABASES_TABLE_NAME " WHERE db = '%s' LIMIT 1";
|
"SELECT * FROM " MYSQLAUTH_DATABASES_TABLE_NAME " WHERE db = '%s' LIMIT 1";
|
||||||
|
static const char mysqlauth_validate_database_query_lower[] =
|
||||||
|
"SELECT * FROM " MYSQLAUTH_DATABASES_TABLE_NAME " WHERE LOWER(db) = LOWER('%s') LIMIT 1";
|
||||||
|
|
||||||
/** Delete query used to clean up the database before loading new users */
|
/** Delete query used to clean up the database before loading new users */
|
||||||
static const char delete_users_query[] = "DELETE FROM " MYSQLAUTH_USERS_TABLE_NAME;
|
static const char delete_users_query[] = "DELETE FROM " MYSQLAUTH_USERS_TABLE_NAME;
|
||||||
|
Reference in New Issue
Block a user