Merge branch '2.3' into develop
This commit is contained in:
commit
9572ff84ea
@ -63,13 +63,6 @@ if [ "$already_running" != "ok" ]; then
|
||||
$(<${script_dir}/templates/build.json.template)
|
||||
" 2> /dev/null > $MDBCI_VM_PATH/${name}.json
|
||||
|
||||
while [ -f ~/vagrant_lock ]
|
||||
do
|
||||
sleep 5
|
||||
done
|
||||
touch ~/vagrant_lock
|
||||
echo $JOB_NAME-$BUILD_NUMBER >> ~/vagrant_lock
|
||||
|
||||
# starting VM for build
|
||||
echo "Generating build VM template"
|
||||
${mdbci_dir}/mdbci --override --template $MDBCI_VM_PATH/$name.json generate $name
|
||||
@ -77,7 +70,6 @@ $(<${script_dir}/templates/build.json.template)
|
||||
${mdbci_dir}/mdbci up --attempts=1 $name
|
||||
if [ $? != 0 ] ; then
|
||||
echo "Error starting VM"
|
||||
rm ~/vagrant_lock
|
||||
exit 1
|
||||
fi
|
||||
echo "copying public keys to VM"
|
||||
@ -92,9 +84,6 @@ export sshkey=`${mdbci_dir}/mdbci show keyfile $name/build --silent 2> /dev/null
|
||||
export scpopt="-i $sshkey -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ConnectTimeout=120 "
|
||||
export sshopt="$scpopt $sshuser@$IP"
|
||||
|
||||
echo "Release Vagrant lock"
|
||||
rm ~/vagrant_lock
|
||||
|
||||
echo "Starting build"
|
||||
${script_dir}/remote_build.sh
|
||||
export build_result=$?
|
||||
@ -102,11 +91,17 @@ export build_result=$?
|
||||
shellcheck `find . | grep "\.sh"` | grep -i "POSIX sh"
|
||||
if [ $? -eq 0 ] ; then
|
||||
echo "POSIX sh error are found in the scripts"
|
||||
# exit 1
|
||||
fi
|
||||
|
||||
${script_dir}/create_remote_repo.sh
|
||||
${script_dir}/copy_repos.sh
|
||||
if [ ${build_result} -eq 0 ]; then
|
||||
${script_dir}/create_remote_repo.sh
|
||||
export build_result=$?
|
||||
fi
|
||||
|
||||
if [ ${build_result} -eq 0 ]; then
|
||||
${script_dir}/copy_repos.sh
|
||||
export build_result=$?
|
||||
fi
|
||||
|
||||
echo "Removing locks and destroying VM"
|
||||
|
||||
|
@ -4,25 +4,42 @@
|
||||
|
||||
dir=`pwd`
|
||||
if [ "$box_type" == "RPM" ] ; then
|
||||
export arch=`ssh $sshopt "arch"`
|
||||
. ${script_dir}/generate_build_info_path.sh
|
||||
# For RHEL packages are not going to the repo
|
||||
# Build can be executed to check if it is possible to build
|
||||
# and to run install and upgrade tests
|
||||
# with thre real RHEL, but we use CentOS packages for production
|
||||
if [ "$platform" != "rhel" ] ; then
|
||||
export arch=`ssh $sshopt "arch"`
|
||||
. ${script_dir}/generate_build_info_path.sh
|
||||
|
||||
rm -rf $path_prefix/$platform/$platform_version/$arch/
|
||||
mkdir -p $path_prefix/$platform/$platform_version/$arch/
|
||||
rsync -avz --progress ${unsorted_repo_dir}/$repo_name/$box/* $path_prefix/$platform/$platform_version/$arch/
|
||||
env > $build_info_path
|
||||
find $path_prefix/.. -type d -exec chmod 755 {} \;
|
||||
find $path_prefix/.. -type f -exec chmod 644 {} \;
|
||||
cd $path_prefix/$platform
|
||||
ln -s $platform_version "$platform_version"server
|
||||
ln -s $platform_version "$platform_version"Server
|
||||
rm -rf $path_prefix/$platform/$platform_version/$arch/
|
||||
mkdir -p $path_prefix/$platform/$platform_version/$arch/
|
||||
rsync -avz --progress ${unsorted_repo_dir}/$repo_name/$box/* $path_prefix/$platform/$platform_version/$arch/
|
||||
if [ $? !=0 ] ; then
|
||||
echo "Error copying repos"
|
||||
exit 1
|
||||
fi
|
||||
env > $build_info_path
|
||||
find $path_prefix/.. -type d -exec chmod 755 {} \;
|
||||
find $path_prefix/.. -type f -exec chmod 644 {} \;
|
||||
cd $path_prefix/$platform
|
||||
ln -s $platform_version "$platform_version"server
|
||||
ln -s $platform_version "$platform_version"Server
|
||||
if [ "$platform" == "centos" ] ; then
|
||||
cd ..
|
||||
ln -s centos rhel
|
||||
fi
|
||||
|
||||
eval "cat <<EOF
|
||||
$(<${script_dir}/templates/repository-config/rpm.json.template)
|
||||
" 2> /dev/null > ${path_prefix}/${platform}_${platform_version}.json
|
||||
|
||||
|
||||
echo "copying done"
|
||||
echo "copying done"
|
||||
else
|
||||
echo "RHEL! Not copying packages to the repo"
|
||||
fi
|
||||
|
||||
else
|
||||
export arch=`ssh $sshopt "dpkg --print-architecture"`
|
||||
. ${script_dir}/generate_build_info_path.sh
|
||||
@ -30,6 +47,10 @@ else
|
||||
rm -rf $path_prefix/$platform_family/dists/$platform_version/main/binary-i386
|
||||
mkdir -p $path_prefix/$platform_family/
|
||||
rsync -avz --progress ${unsorted_repo_dir}/$repo_name/$box/* $path_prefix/$platform_family/
|
||||
if [ $? !=0 ] ; then
|
||||
echo "Error copying repos"
|
||||
exit 1
|
||||
fi
|
||||
env > $build_info_path
|
||||
find $path_prefix/.. -type d -exec chmod 755 {} \;
|
||||
find $path_prefix/.. -type f -exec chmod 644 {} \;
|
||||
|
@ -21,13 +21,6 @@ eval "cat <<EOF
|
||||
$(<${script_dir}/templates/install.json.template)
|
||||
" 2> /dev/null > $MDBCI_VM_PATH/${name}.json
|
||||
|
||||
while [ -f ~/vagrant_lock ]
|
||||
do
|
||||
sleep 5
|
||||
done
|
||||
touch ~/vagrant_lock
|
||||
echo $JOB_NAME-$BUILD_NUMBER >> ~/vagrant_lock
|
||||
|
||||
# destroying existing box
|
||||
if [ -d "install_$box" ]; then
|
||||
${mdbci_dir}/mdbci destroy $name
|
||||
@ -42,12 +35,12 @@ if [ $? != 0 ] ; then
|
||||
if [ "x$do_not_destroy_vm" != "xyes" ] ; then
|
||||
${mdbci_dir}/mdbci destroy $name
|
||||
fi
|
||||
rm ~/vagrant_lock
|
||||
rm -f ~/vagrant_lock
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
rm ~/vagrant_lock
|
||||
rm -f ~/vagrant_lock
|
||||
|
||||
# get VM info
|
||||
export sshuser=`${mdbci_dir}/mdbci ssh --command 'whoami' --silent $name/maxscale 2> /dev/null | tr -d '\r'`
|
||||
|
@ -53,6 +53,7 @@ Here are tutorials on monitoring and managing MariaDB MaxScale in cluster enviro
|
||||
|
||||
- [MariaDB MaxScale HA with Lsyncd](Tutorials/MaxScale-HA-with-lsyncd.md)
|
||||
- [Nagios Plugins for MariaDB MaxScale Tutorial](Tutorials/Nagios-Plugins.md)
|
||||
- [REST API Tutorial](Tutorials/REST-API-Tutorial.md)
|
||||
|
||||
## Routers
|
||||
|
||||
|
355
Documentation/Tutorials/REST-API-Tutorial.md
Normal file
355
Documentation/Tutorials/REST-API-Tutorial.md
Normal file
@ -0,0 +1,355 @@
|
||||
# REST API Tutorial
|
||||
|
||||
This tutorial is a quick overview of what the MaxScale REST API offers, how it
|
||||
can be used to inspect the state of MaxScale and how to use it to modify the
|
||||
runtime configuration of MaxScale. The tutorial uses the `curl` command line
|
||||
client to demonstrate how the API is used.
|
||||
|
||||
## Configuration and Hardening
|
||||
|
||||
The MaxScale REST API listens on port 8989 on the local host. The `admin_port`
|
||||
and `admin_host` parameters control which port and address the REST API listens
|
||||
on. Note that for security reasons the API only listens for local connections
|
||||
with the default configuration. It is critical that the default credentials are
|
||||
changed and TLS/SSL encryption is configured before exposing the REST API to a
|
||||
network.
|
||||
|
||||
The default user for the REST API is `admin` and the password is `mariadb`. The
|
||||
easiest way to secure the REST API is to use the `maxctrl` command line client
|
||||
to create a new admin user and delete the default one. To do this, run the
|
||||
following commands:
|
||||
|
||||
```
|
||||
maxctrl create user my_user my_password --type=admin
|
||||
maxctrl destroy user admin
|
||||
```
|
||||
|
||||
This will create the user `my_user` with the password `my_password` that is an
|
||||
administrative account. After this account is created, the default `admin`
|
||||
account is removed with the next command.
|
||||
|
||||
The next step is to enable TLS encryption. To do this, you need a CA
|
||||
certificate, a private key and a public certificate file all in PEM format. Add
|
||||
the following three parameters under the `[maxscale]` section of the MaxScale
|
||||
configuration file and restart MaxScale.
|
||||
|
||||
```
|
||||
admin_ssl_key=/certs/server-key.pem
|
||||
admin_ssl_cert=/certs/server-cert.pem
|
||||
admin_ssl_ca_cert=/certs/ca-cert.pem
|
||||
```
|
||||
|
||||
Use `maxctrl` to verify that the TLS encryption is enabled. In this tutorial our
|
||||
server certificates are self-signed so the `--tls-verify-server-cert=false`
|
||||
option is required.
|
||||
|
||||
```
|
||||
maxctrl --user=my_user --password=my_password --secure --tls-ca-cert=/certs/ca-cert.pem --tls-verify-server-cert=false show maxscale
|
||||
```
|
||||
|
||||
If no errors are raised, this means that the communication via the REST API is
|
||||
now secure and can be used across networks.
|
||||
|
||||
## Requesting Data
|
||||
|
||||
**Note:** For the sake of brevity, the rest of this tutorial will omit the
|
||||
TLS/SSL options for the `curl` command line. For more information, refer to the
|
||||
`curl` manpage.
|
||||
|
||||
The most basic task to do with the REST API is to see whether MaxScale is up and
|
||||
running. To do this, we do a HTTP request on the root resource (the `-i` option
|
||||
shows the HTTP headers).
|
||||
|
||||
`curl -i 127.0.0.1:8989/v1/`
|
||||
```
|
||||
HTTP/1.1 200 OK
|
||||
Connection: Keep-Alive
|
||||
Content-Length: 0
|
||||
Last-Modified: Mon, 04 Mar 2019 08:23:09 GMT
|
||||
ETag: "0"
|
||||
Date: Mon, 04 Mar 19 08:29:41 GMT
|
||||
```
|
||||
|
||||
To query a resource collection endpoint, append it to the URL. The `/v1/filters/`
|
||||
endpoint shows the list of filters configured in MaxScale. This is a _resource
|
||||
collection_ endpoint: it contains the list of all resources of a particular
|
||||
type.
|
||||
|
||||
`curl 127.0.0.1:8989/v1/filters`
|
||||
```
|
||||
{
|
||||
"links": {
|
||||
"self": "http://127.0.0.1:8989/v1/filters/"
|
||||
},
|
||||
"data": [
|
||||
{
|
||||
"id": "Hint",
|
||||
"type": "filters",
|
||||
"relationships": {
|
||||
"services": {
|
||||
"links": {
|
||||
"self": "http://127.0.0.1:8989/v1/services/"
|
||||
},
|
||||
"data": [
|
||||
{
|
||||
"id": "RW-Split-Hint-Router",
|
||||
"type": "services"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"attributes": {
|
||||
"module": "hintfilter",
|
||||
"parameters": {}
|
||||
},
|
||||
"links": {
|
||||
"self": "http://127.0.0.1:8989/v1/filters/Hint"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "Logger",
|
||||
"type": "filters",
|
||||
"relationships": {
|
||||
"services": {
|
||||
"links": {
|
||||
"self": "http://127.0.0.1:8989/v1/services/"
|
||||
},
|
||||
"data": []
|
||||
}
|
||||
},
|
||||
"attributes": {
|
||||
"module": "qlafilter",
|
||||
"parameters": {
|
||||
"match": null,
|
||||
"exclude": null,
|
||||
"user": null,
|
||||
"source": null,
|
||||
"filebase": "/tmp/log",
|
||||
"options": "ignorecase",
|
||||
"log_type": "session",
|
||||
"log_data": "date,user,query",
|
||||
"newline_replacement": "\" \"",
|
||||
"separator": ",",
|
||||
"flush": false,
|
||||
"append": false
|
||||
},
|
||||
"filter_diagnostics": {
|
||||
"separator": ",",
|
||||
"newline_replacement": "\" \""
|
||||
}
|
||||
},
|
||||
"links": {
|
||||
"self": "http://127.0.0.1:8989/v1/filters/Logger"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
The `data` holds the actual list of resources: the `Hint` and `Logger`
|
||||
filters. Each object has the `id` field which is the unique name of that
|
||||
object. It is the same as the section name in `maxscale.cnf`.
|
||||
|
||||
Each resource in the list has a `relationships` object. This shows the
|
||||
relationship links between resources. In our example, the `Hint` filter is used
|
||||
by a service named `RW-Split-Hint-Router` and the `Logger` is not currently in
|
||||
use.
|
||||
|
||||
To request an individual resource, we add the object name to the resource
|
||||
collection URL. For example, if we want to get only the `Logger` filter we
|
||||
execute the following command.
|
||||
|
||||
`curl 127.0.0.1:8989/v1/filters/Logger`
|
||||
```
|
||||
{
|
||||
"links": {
|
||||
"self": "http://127.0.0.1:8989/v1/filters/Logger"
|
||||
},
|
||||
"data": {
|
||||
"id": "Logger",
|
||||
"type": "filters",
|
||||
"relationships": {
|
||||
"services": {
|
||||
"links": {
|
||||
"self": "http://127.0.0.1:8989/v1/services/"
|
||||
},
|
||||
"data": []
|
||||
}
|
||||
},
|
||||
"attributes": {
|
||||
"module": "qlafilter",
|
||||
"parameters": {
|
||||
"match": null,
|
||||
"exclude": null,
|
||||
"user": null,
|
||||
"source": null,
|
||||
"filebase": "/tmp/log",
|
||||
"options": "ignorecase",
|
||||
"log_type": "session",
|
||||
"log_data": "date,user,query",
|
||||
"newline_replacement": "\" \"",
|
||||
"separator": ",",
|
||||
"flush": false,
|
||||
"append": false
|
||||
},
|
||||
"filter_diagnostics": {
|
||||
"separator": ",",
|
||||
"newline_replacement": "\" \""
|
||||
}
|
||||
},
|
||||
"links": {
|
||||
"self": "http://127.0.0.1:8989/v1/filters/Logger"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Note that this time the `data` member holds an object instead of an array of
|
||||
objects. All other parts of the response are similar to what was shown in the
|
||||
previous example.
|
||||
|
||||
## Creating Objects
|
||||
|
||||
One of the uses of the REST API is to create new objects in MaxScale at
|
||||
runtime. This allows new servers, services, filters, monitor and listeners to be
|
||||
created without restarting MaxScale.
|
||||
|
||||
For example, to create a new server in MaxScale the JSON definition of a server
|
||||
must be sent to the REST API at the `/v1/servers/` endpoint. The request body
|
||||
defines the server name as well as the parameters for it.
|
||||
|
||||
To create objects with `curl`, first write the JSON definition into a file.
|
||||
|
||||
```
|
||||
{
|
||||
"data": {
|
||||
"id": "server1",
|
||||
"type": "servers",
|
||||
"attributes": {
|
||||
"parameters": {
|
||||
"address": "127.0.0.1",
|
||||
"port": 3003
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
To send the data, use the following command.
|
||||
|
||||
```
|
||||
curl -X POST -d @new_server.txt 127.0.0.1:8989/v1/servers
|
||||
```
|
||||
|
||||
The `-d` option takes a file name prefixed with a `@` as an argument. Here we
|
||||
have `@new_server.txt` which is the name of the file where the JSON definition
|
||||
was stored. The `-X` option defines the HTTP verb to use and to create a new
|
||||
object we must use the POST verb.
|
||||
|
||||
To verify the data request the newly created object.
|
||||
|
||||
```
|
||||
curl 127.0.0.1:8989/v1/servers/server1
|
||||
```
|
||||
|
||||
## Modifying Data
|
||||
|
||||
The easiest way to modify an object is to first request it, store the result in
|
||||
a file, edit it and then send the updated object back to the REST API.
|
||||
|
||||
Let's say we want to modify the port that the server we created earlier listens
|
||||
on. First we request the current object and store the result in a file.
|
||||
|
||||
```
|
||||
curl 127.0.0.1:8989/v1/servers/server1 > server1.txt
|
||||
```
|
||||
|
||||
After that we edit the file and change the port from 3003 to 3306. Next the
|
||||
modified JSON object is sent to the REST API as a PATCH command. To do this,
|
||||
execute the following command.
|
||||
|
||||
```
|
||||
curl -X PATCH -d @server1.txt 127.0.0.1:8989/v1/servers/server1
|
||||
```
|
||||
|
||||
To verify that the data was updated correctly, request the updated created
|
||||
object.
|
||||
|
||||
```
|
||||
curl 127.0.0.1:8989/v1/servers/server1
|
||||
```
|
||||
|
||||
## Object Relationships
|
||||
|
||||
To continue with our previous example, we add the updated server to a
|
||||
service. To do this, the `relationships` object of the server must be modified
|
||||
to include the service we want to add the server to.
|
||||
|
||||
To define a relationship between a server and a service, the `data` member must
|
||||
have the `relationships` field and it must contain an object with the `services`
|
||||
field (some fields omitted for brevity).
|
||||
|
||||
```
|
||||
{
|
||||
"data": {
|
||||
"id": "server1",
|
||||
"type": "servers",
|
||||
"relationships": {
|
||||
"services": {
|
||||
"data": [
|
||||
{
|
||||
"id": "RW-Split-Router",
|
||||
"type": "services"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"attributes": ...
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The `data.relationships.services.data` field contains a list of objects that
|
||||
define the `id` and `type` fields. The id is the name of the object (a service
|
||||
or a monitor for servers) and the type tells which type it is. Only `services`
|
||||
type objects should be present in the `services` object.
|
||||
|
||||
In our example we are linking the `server1` server to the `RW-Split-Router`
|
||||
service. As was seen with the previous example, the easiest way to do this is to
|
||||
store the result, edit it and then send it back with a HTTP PATCH.
|
||||
|
||||
If we want to remove a server from _all_ services, we can set the
|
||||
`relationships` field to `{}`. The REST API interprets this as an instruction
|
||||
to remove the server from all services and monitors. This is useful if you want
|
||||
to delete the server which can only be done if it has no relationships to other
|
||||
objects.
|
||||
|
||||
## Deleting Objects
|
||||
|
||||
To delete an object, simply execute a HTTP DELETE request on the resource you
|
||||
want to delete. For example, to delete the `server1` server, execute the
|
||||
following command.
|
||||
|
||||
```
|
||||
curl -X DELETE 127.0.0.1:8989/v1/servers/server1
|
||||
```
|
||||
|
||||
## Further Reading
|
||||
|
||||
The full list of all available endpoints in MaxScale can be found in the
|
||||
[REST API documentation](../REST-API/API.md).
|
||||
|
||||
The `maxctrl` command line client is self-documenting and the `maxctrl help`
|
||||
command is a good tool for exploring the various commands that are available in
|
||||
it. The `maxctrl api get` command can be useful way to explore the REST API as
|
||||
it provides a way to easily extract values out of the JSON data generated by the
|
||||
REST API.
|
||||
|
||||
There is a multitude of REST API clients readily available and most of them are
|
||||
far more convenient to use than `curl`. We recommend investigating what you need
|
||||
and how you intend to either integrate or use the MaxScale REST API. Most modern
|
||||
languages either have a built-in HTTP library or there exists a de facto
|
||||
standard library.
|
||||
|
||||
The MaxScale REST API follows the JSON API specification and there exist
|
||||
libraries that are built specifically for these sorts of APIs
|
@ -93,7 +93,10 @@ exports.builder = function(yargs) {
|
||||
})
|
||||
.command('service <service> <key> <value>', 'Alter service parameters', function(yargs) {
|
||||
return yargs.epilog('To display the service parameters, execute `show service <service>`. ' +
|
||||
'The following list of parameters can be altered at runtime:\n\n' + JSON.stringify(service_params, null, 4))
|
||||
'Some routers support runtime configuration changes to all parameters. ' +
|
||||
'Currently all readconnroute, readwritesplit and schemarouter parameters ' +
|
||||
'can be changed at runtime. In addition to module specific parameters, ' +
|
||||
'the following list of common service parameters can be altered at runtime:\n\n' + JSON.stringify(service_params, null, 4))
|
||||
.usage('Usage: alter service <service> <key> <value>')
|
||||
}, function(argv) {
|
||||
maxctrl(argv, function(host) {
|
||||
|
@ -65,7 +65,7 @@ program
|
||||
default: false,
|
||||
type: 'boolean'
|
||||
})
|
||||
.group(['s', 'tls-key', 'tls-cert', 'tls-ca-cert', 'tls-verify-server-cert'], 'HTTPS/TLS Options:')
|
||||
.group(['s', 'tls-key', 'tls-passphrase', 'tls-cert', 'tls-ca-cert', 'n'], 'HTTPS/TLS Options:')
|
||||
.option('s', {
|
||||
alias: 'secure',
|
||||
describe: 'Enable HTTPS requests',
|
||||
@ -88,7 +88,8 @@ program
|
||||
describe: 'Path to TLS CA certificate',
|
||||
type: 'string'
|
||||
})
|
||||
.option('tls-verify-server-cert', {
|
||||
.option('n', {
|
||||
alias: 'tls-verify-server-cert',
|
||||
describe: 'Whether to verify server TLS certificates',
|
||||
default: true,
|
||||
type: 'boolean'
|
||||
|
@ -831,9 +831,13 @@ add_test_executable_notest(delete_rds.cpp delete_rds replication LABELS EXTERN_B
|
||||
# a tool to create RDS Aurora cluster
|
||||
add_test_executable_notest(create_rds.cpp create_rds replication LABELS EXTERN_BACKEND)
|
||||
|
||||
# start sysbench ageints RWSplit for infinite execution
|
||||
# start sysbench against RWSplit for infinite execution
|
||||
add_test_executable_notest(long_sysbench.cpp long_sysbench replication LABELS readwritesplit REPL_BACKEND)
|
||||
|
||||
# own long test
|
||||
# 'long_test_time' variable defines time of execution (in seconds)
|
||||
add_test_executable_notest(long_test.cpp long_test replication LABELS readwritesplit REPL_BACKEND)
|
||||
|
||||
# test effect of local_address in configuration file
|
||||
add_test_executable(local_address.cpp local_address local_address LABELS REPL_BACKEND)
|
||||
|
||||
|
@ -141,13 +141,6 @@ https://help.ubuntu.com/lts/serverguide/libvirt.html
|
||||
https://github.com/vagrant-libvirt/vagrant-libvirt#installation
|
||||
|
||||
|
||||
### vagrant is locked, waiting ...
|
||||
|
||||
```bash
|
||||
rm ~/vagrant_lock
|
||||
```
|
||||
|
||||
|
||||
### Random VM creation failures
|
||||
|
||||
Plese check the amount of free memory and amount of running VMs
|
||||
|
@ -121,7 +121,3 @@ If test run was executed with parameter 'do_not_destroy' set yo 'yes' please do
|
||||
[destroy](http://max-tst-01.mariadb.com:8089/view/axilary/job/destroy/) against your 'target'
|
||||
|
||||
This job also have to be executed if test run job crashed or it was interrupted.
|
||||
|
||||
In case of build or test job crash, interruption, Jenkins crash during Vagrant operation it is possible that Vagrant lock
|
||||
stays in locked state and no other job can progress (job can be started, but it is waiting for Vagrant lock -
|
||||
'/home/vagrant/vagrant_lock' can be seen in the job log). In this case lock can be removed by [remove_lock](http://max-tst-01.mariadb.com:8089/view/axilary/job/remove_lock/) job.
|
||||
|
@ -64,6 +64,5 @@ int main(int argc, char *argv[])
|
||||
execute_query_silent(test.repl->nodes[0], "DROP USER user@'%%';");
|
||||
execute_query_silent(test.repl->nodes[0], "DROP TABLE test.t1");
|
||||
test.repl->disconnect();
|
||||
|
||||
return test.global_result;
|
||||
}
|
||||
|
@ -1,5 +1,6 @@
|
||||
[maxscale]
|
||||
threads=###threads###
|
||||
#log_info=1
|
||||
|
||||
[MySQL-Monitor]
|
||||
type=monitor
|
||||
|
@ -42,9 +42,9 @@ const char* rules_failure[] =
|
||||
NULL
|
||||
};
|
||||
|
||||
void truncate_maxscale_logs(TestConnections& test)
|
||||
int truncate_maxscale_logs(TestConnections& test)
|
||||
{
|
||||
test.maxscales->ssh_node(0, "truncate -s 0 /var/log/maxscale/*", true);
|
||||
return test.maxscales->ssh_node(0, "truncate -s 0 /var/log/maxscale/max*", true);
|
||||
}
|
||||
|
||||
void create_rule(const char* rule, const char* user)
|
||||
@ -64,7 +64,7 @@ int main(int argc, char** argv)
|
||||
for (int i = 0; rules_failure[i]; i++)
|
||||
{
|
||||
/** Create rule file with syntax error */
|
||||
truncate(temp_rules, 0);
|
||||
test.add_result(truncate(temp_rules, 0), "Failed to truncate");
|
||||
create_rule(rules_failure[i], users_ok[0]);
|
||||
char buf[PATH_MAX + 1];
|
||||
copy_rules(&test, (char*)temp_rules, (char*)getcwd(buf, sizeof(buf)));
|
||||
@ -77,7 +77,7 @@ int main(int argc, char** argv)
|
||||
* a message about the syntax error. */
|
||||
test.check_maxscale_processes(0, 0);
|
||||
test.log_includes(0, "syntax error");
|
||||
truncate_maxscale_logs(test);
|
||||
test.add_result(truncate_maxscale_logs(test), "Failed to truncate Maxscale logs");
|
||||
}
|
||||
|
||||
return test.global_result;
|
||||
|
@ -48,7 +48,7 @@ int main(int argc, char* argv[])
|
||||
print_version_string(Test);
|
||||
|
||||
Test->tprintf("Suspend Maxscale 000 machine and waiting\n");
|
||||
system(Test->maxscales->stop_vm_command[0]);
|
||||
Test->add_result(Test->maxscales->start_vm(0), "Failed to stop VM maxscale_000\n");
|
||||
sleep(FAILOVER_WAIT_TIME);
|
||||
|
||||
version = print_version_string(Test);
|
||||
@ -59,12 +59,12 @@ int main(int argc, char* argv[])
|
||||
|
||||
|
||||
Test->tprintf("Resume Maxscale 000 machine and waiting\n");
|
||||
system(Test->maxscales->start_vm_command[0]);
|
||||
Test->add_result(Test->maxscales->start_vm(0), "Failed to start VM maxscale_000\n");
|
||||
sleep(FAILOVER_WAIT_TIME);
|
||||
print_version_string(Test);
|
||||
|
||||
Test->tprintf("Suspend Maxscale 001 machine and waiting\n");
|
||||
system(Test->maxscales->stop_vm_command[1]);
|
||||
Test->add_result(Test->maxscales->start_vm(1), "Failed to stop VM maxscale_001\n");
|
||||
sleep(FAILOVER_WAIT_TIME);
|
||||
|
||||
version = print_version_string(Test);
|
||||
@ -75,7 +75,7 @@ int main(int argc, char* argv[])
|
||||
|
||||
print_version_string(Test);
|
||||
Test->tprintf("Resume Maxscale 001 machine and waiting\n");
|
||||
system(Test->maxscales->start_vm_command[1]);
|
||||
Test->add_result(Test->maxscales->start_vm(1), "Failed to start VM maxscale_001\n");
|
||||
sleep(FAILOVER_WAIT_TIME);
|
||||
print_version_string(Test);
|
||||
|
||||
|
352
maxscale-system-test/long_test.cpp
Normal file
352
maxscale-system-test/long_test.cpp
Normal file
@ -0,0 +1,352 @@
|
||||
/**
|
||||
* @file long_test.cpp Run different load for long long execution (long load test)
|
||||
*
|
||||
* time to execute test is defined by 'long_test_time' environmental variable
|
||||
* e.g. 'long_test_time=3600 ./long_test'
|
||||
*/
|
||||
|
||||
|
||||
#include "testconnections.h"
|
||||
#include "big_transaction.h"
|
||||
|
||||
typedef void * FUNC(void * ptr);
|
||||
|
||||
FUNC query_thread;
|
||||
FUNC prepared_stmt_thread;
|
||||
FUNC transaction_thread;
|
||||
FUNC short_session_thread;
|
||||
FUNC read_thread;
|
||||
|
||||
TestConnections * Test;
|
||||
|
||||
const int threads_type_num = 4;
|
||||
int threads_num[threads_type_num];
|
||||
const int max_threads_num = 32;
|
||||
int port;
|
||||
char * IP;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
int id;
|
||||
bool exit_flag;
|
||||
char * sql;
|
||||
} t_data;
|
||||
|
||||
t_data data[threads_type_num][max_threads_num];
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
Test = new TestConnections(argc, argv);
|
||||
int i, j;
|
||||
|
||||
Test->tprintf("***************************************************\n"
|
||||
"This is long running test to catch memory leaks and crashes\n"
|
||||
"please define 'long_test_time' variable to set running time (seconds)\n"
|
||||
"***************************************************\n");
|
||||
|
||||
pthread_t thread_id[threads_type_num][max_threads_num];
|
||||
FUNC * thread[threads_type_num];
|
||||
thread[0] = query_thread;
|
||||
threads_num[0] = 1;
|
||||
thread[1] = transaction_thread;
|
||||
threads_num[1] = 1;
|
||||
thread[2] = prepared_stmt_thread;
|
||||
threads_num[2] = 1;
|
||||
thread[3] = read_thread;
|
||||
threads_num[3] = 1;
|
||||
|
||||
//thread[4] = short_session_thread;
|
||||
//threads_num[4] = 4;
|
||||
|
||||
|
||||
port = Test->maxscales->rwsplit_port[0];
|
||||
IP = Test->maxscales->IP[0];
|
||||
|
||||
//port = 3306;
|
||||
//IP = Test->repl->IP[0];
|
||||
|
||||
|
||||
Test->set_timeout(60);
|
||||
Test->tprintf("Set big maximums\n");
|
||||
|
||||
Test->repl->execute_query_all_nodes((char *) "set global max_connections = 300000;");
|
||||
Test->repl->execute_query_all_nodes((char *) "set global max_connect_errors = 10000000;");
|
||||
Test->repl->execute_query_all_nodes((char *) "set global expire_logs_days = 1;");
|
||||
|
||||
|
||||
|
||||
Test->maxscales->connect_rwsplit(0);
|
||||
|
||||
Test->repl->execute_query_all_nodes( (char *) "set global max_allowed_packet=100000000");
|
||||
|
||||
Test->tprintf("create t1 in `test` DB\n");
|
||||
create_t1(Test->maxscales->conn_rwsplit[0]);
|
||||
|
||||
execute_query(Test->maxscales->conn_rwsplit[0], "DROP DATABASE test1");
|
||||
execute_query(Test->maxscales->conn_rwsplit[0], "DROP DATABASE test2");
|
||||
Test->tprintf("create`test1` DB\n");
|
||||
Test->try_query(Test->maxscales->conn_rwsplit[0], "CREATE DATABASE test1");
|
||||
|
||||
Test->tprintf("create`test2` DB\n");
|
||||
Test->try_query(Test->maxscales->conn_rwsplit[0], "CREATE DATABASE test2");
|
||||
|
||||
Test->tprintf("Waiting for slaves after DB creation\n");
|
||||
Test->repl->sync_slaves(0);
|
||||
//sleep(15);
|
||||
Test->tprintf("...ok\n");
|
||||
|
||||
Test->tprintf("create t1 in `test1` DB\n");
|
||||
Test->tprintf("... use\n");
|
||||
Test->try_query(Test->maxscales->conn_rwsplit[0], "USE test1");
|
||||
Test->tprintf("... create\n");
|
||||
create_t1(Test->maxscales->conn_rwsplit[0]);
|
||||
|
||||
Test->tprintf("create t1 in `test2` DB\n");
|
||||
Test->tprintf("... use\n");
|
||||
Test->try_query(Test->maxscales->conn_rwsplit[0], "USE test2");
|
||||
Test->tprintf("... create\n");
|
||||
create_t1(Test->maxscales->conn_rwsplit[0]);
|
||||
|
||||
Test->tprintf("Waiting for slaves after tables creation\n");
|
||||
Test->repl->sync_slaves(0);
|
||||
|
||||
Test->tprintf("...ok\n");
|
||||
|
||||
Test->set_timeout(60);
|
||||
// Create threads
|
||||
Test->tprintf("Starting threads\n");
|
||||
|
||||
for (j = 0; j < threads_type_num; j++)
|
||||
{
|
||||
for (i = 0; i < threads_num[j]; i++)
|
||||
{
|
||||
data[j][i].sql = (char*) malloc((i +1) * 32 * 14 + 32);
|
||||
create_insert_string(data[j][i].sql, (i + 1) * 32 , i);
|
||||
Test->tprintf("sqL %d: %d\n", i, strlen(data[j][i].sql));
|
||||
data[j][i].exit_flag = false;
|
||||
data[j][i].id = i;
|
||||
pthread_create(&thread_id[j][i], NULL, thread[j], &data[j][i]);
|
||||
}
|
||||
}
|
||||
|
||||
Test->set_log_copy_interval(100);
|
||||
|
||||
Test->stop_timeout();
|
||||
|
||||
char * env = getenv("long_test_time");
|
||||
int test_time = 0;
|
||||
if (env != NULL)
|
||||
{
|
||||
sscanf(env, "%d", &test_time);
|
||||
}
|
||||
if (test_time <= 0)
|
||||
{
|
||||
test_time = 3600;
|
||||
Test->tprintf("´long_test_time´ variable is not defined, set test_time to %d\n", test_time);
|
||||
}
|
||||
Test->tprintf("´test_time´ is %d\n", test_time);
|
||||
sleep(test_time);
|
||||
|
||||
Test->set_timeout(180);
|
||||
|
||||
Test->tprintf("Stopping threads\n");
|
||||
|
||||
for (j = 0; j < threads_type_num; j++)
|
||||
{
|
||||
for (i = 0; i < threads_num[j]; i++)
|
||||
{
|
||||
data[j][i].exit_flag = true;
|
||||
pthread_join(thread_id[j][i], NULL);
|
||||
}
|
||||
}
|
||||
|
||||
//Test->tprintf("Checking if MaxScale is still alive!\n");
|
||||
//fflush(stdout);
|
||||
//Test->check_maxscale_alive(0);
|
||||
|
||||
Test->maxscales->stop_maxscale(0);
|
||||
|
||||
int rval = Test->global_result;
|
||||
delete Test;
|
||||
return rval;
|
||||
}
|
||||
|
||||
void try_and_reconnect(MYSQL * conn, char * db, char * sql)
|
||||
{
|
||||
if (execute_query(conn, "%s", sql))
|
||||
{
|
||||
Test->tprintf("reconnect");
|
||||
mysql_close(conn);
|
||||
conn = open_conn_db_timeout(port,
|
||||
IP,
|
||||
db,
|
||||
Test->repl->user_name,
|
||||
Test->repl->password,
|
||||
20,
|
||||
Test->ssl);
|
||||
}
|
||||
}
|
||||
|
||||
void *query_thread(void *ptr )
|
||||
{
|
||||
MYSQL * conn;
|
||||
t_data * data = (t_data *) ptr;
|
||||
int inserts_until_optimize = 100000;
|
||||
int tn = 0;
|
||||
conn = open_conn_db_timeout(port,
|
||||
IP,
|
||||
(char *) "test",
|
||||
Test->repl->user_name,
|
||||
Test->repl->password,
|
||||
20,
|
||||
Test->ssl);
|
||||
while (!data->exit_flag)
|
||||
{
|
||||
|
||||
//Test->try_query(conn, data->sql);
|
||||
try_and_reconnect(conn, (char *) "test", data->sql);
|
||||
|
||||
if (tn >= inserts_until_optimize)
|
||||
{
|
||||
tn = 0;
|
||||
Test->tprintf("Removing everything from table in the queries thread");
|
||||
try_and_reconnect(conn, (char *) "test", (char *) "DELETE FROM t1");
|
||||
Test->tprintf("Optimizing table in the queries thread");
|
||||
try_and_reconnect(conn, (char *) "test", (char *) "OPTIMIZE TABLE t1");
|
||||
}
|
||||
tn++;
|
||||
}
|
||||
mysql_close(conn);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void *read_thread(void *ptr )
|
||||
{
|
||||
MYSQL * conn;
|
||||
t_data * data = (t_data *) ptr;
|
||||
int i = 0;
|
||||
char sql[256];
|
||||
conn = open_conn_db_timeout(port,
|
||||
IP,
|
||||
(char *) "test",
|
||||
Test->repl->user_name,
|
||||
Test->repl->password,
|
||||
20,
|
||||
Test->ssl);
|
||||
while (!data->exit_flag)
|
||||
{
|
||||
sprintf(sql, "SELECT * FROM t1 WHERE fl=%d", data->id);
|
||||
try_and_reconnect(conn, (char *) "test", sql);
|
||||
i++;
|
||||
}
|
||||
mysql_close(conn);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void *transaction_thread(void *ptr )
|
||||
{
|
||||
MYSQL * conn;
|
||||
int transactions_until_optimize = 10;
|
||||
int tn = 0;
|
||||
t_data * data = (t_data *) ptr;
|
||||
conn = open_conn_db_timeout(port,
|
||||
IP,
|
||||
(char *) "test1",
|
||||
Test->repl->user_name,
|
||||
Test->repl->password,
|
||||
20,
|
||||
Test->ssl);
|
||||
while (!data->exit_flag)
|
||||
{
|
||||
|
||||
try_and_reconnect(conn, (char *) "test1", (char *) "START TRANSACTION");
|
||||
try_and_reconnect(conn, (char *) "test1", (char *) "SET autocommit = 0");
|
||||
|
||||
int stmt_num = 200000 / strlen(data->sql);
|
||||
for (int i = 0; i < stmt_num; i++)
|
||||
{
|
||||
try_and_reconnect(conn, (char *) "test1", data->sql);
|
||||
}
|
||||
Test->try_query(conn, (char *) "COMMIT");
|
||||
if (tn >= transactions_until_optimize)
|
||||
{
|
||||
tn = 0;
|
||||
Test->tprintf("Removing everything from table in the transactions thread");
|
||||
try_and_reconnect(conn, (char *) "test1", (char *) "DELETE FROM t1");
|
||||
Test->tprintf("Optimizing table in the transactions thread");
|
||||
try_and_reconnect(conn, (char *) "test1", (char *) "OPTIMIZE TABLE t1");
|
||||
}
|
||||
tn++;
|
||||
}
|
||||
mysql_close(conn);
|
||||
|
||||
conn = open_conn_db_timeout(port,
|
||||
IP,
|
||||
(char *) "",
|
||||
Test->maxscales->user_name,
|
||||
Test->maxscales->password,
|
||||
20,
|
||||
Test->ssl);
|
||||
Test->try_query(conn, "DROP DATABASE test1");
|
||||
mysql_close(conn);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void *short_session_thread(void *ptr )
|
||||
{
|
||||
MYSQL * conn;
|
||||
t_data * data = (t_data *) ptr;
|
||||
while (!data->exit_flag)
|
||||
{
|
||||
conn = open_conn_db_timeout(port,
|
||||
IP,
|
||||
(char *) "test",
|
||||
Test->repl->user_name,
|
||||
Test->repl->password,
|
||||
20,
|
||||
Test->ssl);
|
||||
mysql_close(conn);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
void *prepared_stmt_thread(void *ptr )
|
||||
{
|
||||
MYSQL * conn;
|
||||
t_data * data = (t_data *) ptr;
|
||||
char sql[256];
|
||||
conn = open_conn_db_timeout(port,
|
||||
IP,
|
||||
(char *) "test2",
|
||||
Test->repl->user_name,
|
||||
Test->repl->password,
|
||||
20,
|
||||
Test->ssl);
|
||||
while (!data->exit_flag)
|
||||
{
|
||||
sprintf(sql, "PREPARE stmt%d FROM 'SELECT * FROM t1 WHERE fl=@x;';", data->id);
|
||||
try_and_reconnect(conn, (char *) "test2", sql);
|
||||
try_and_reconnect(conn, (char *) "test2", (char *) "SET @x = 3;");
|
||||
sprintf(sql, "EXECUTE stmt%d", data->id);
|
||||
try_and_reconnect(conn, (char *) "test2", sql);
|
||||
try_and_reconnect(conn, (char *) "test2", (char *) "SET @x = 4;");
|
||||
try_and_reconnect(conn, (char *) "test2", sql);
|
||||
try_and_reconnect(conn, (char *) "test2", (char *) "SET @x = 400;");
|
||||
try_and_reconnect(conn, (char *) "test2", sql);
|
||||
sprintf(sql, "DEALLOCATE PREPARE stmt%d", data->id);
|
||||
try_and_reconnect(conn, (char *) "test2", sql);
|
||||
}
|
||||
mysql_close(conn);
|
||||
|
||||
conn = open_conn_db_timeout(port,
|
||||
IP,
|
||||
(char *) "",
|
||||
Test->maxscales->user_name,
|
||||
Test->maxscales->password,
|
||||
20,
|
||||
Test->ssl);
|
||||
Test->try_query(conn, "DROP DATABASE test2");
|
||||
mysql_close(conn);
|
||||
return NULL;
|
||||
}
|
@ -2,12 +2,24 @@
|
||||
#include <sstream>
|
||||
#include <unordered_map>
|
||||
|
||||
Maxscales::Maxscales(const char* pref, const char* test_cwd, bool verbose)
|
||||
Maxscales::Maxscales(const char* pref, const char* test_cwd, bool verbose, bool use_valgrind)
|
||||
{
|
||||
strcpy(prefix, pref);
|
||||
this->verbose = verbose;
|
||||
this->use_valgrind = use_valgrind;
|
||||
valgring_log_num = 0;
|
||||
strcpy(test_dir, test_cwd);
|
||||
read_env();
|
||||
if (use_valgrind)
|
||||
{
|
||||
for (int i = 0; i < N; i++)
|
||||
{
|
||||
ssh_node_f(i, true, "yum install -y valgrind gdb 2>&1");
|
||||
ssh_node_f(i, true, "apt install -y --force-yes valgrind gdb 2>&1");
|
||||
ssh_node_f(i, true, "zypper -n install valgrind gdb 2>&1");
|
||||
ssh_node_f(i, true, "rm -rf /var/cache/maxscale/maxscale.lock");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int Maxscales::read_env()
|
||||
@ -231,14 +243,35 @@ int Maxscales::close_maxscale_connections(int m)
|
||||
|
||||
int Maxscales::restart_maxscale(int m)
|
||||
{
|
||||
int res = ssh_node(m, "service maxscale restart", true);
|
||||
int res;
|
||||
if (use_valgrind)
|
||||
{
|
||||
res = stop_maxscale(m);
|
||||
res += start_maxscale(m);
|
||||
}
|
||||
else
|
||||
{
|
||||
res =ssh_node(m, "service maxscale restart", true);
|
||||
}
|
||||
fflush(stdout);
|
||||
return res;
|
||||
}
|
||||
|
||||
int Maxscales::stop_maxscale(int m)
|
||||
{
|
||||
int res = ssh_node(m, "service maxscale stop", true);
|
||||
int res;
|
||||
if (use_valgrind)
|
||||
{
|
||||
res = ssh_node_f(m, true, "sudo kill $(pidof valgrind) 2>&1 > /dev/null");
|
||||
if ((res != 0) || atoi(ssh_node_output(m, "pidof valgrind", true, &res)) > 0)
|
||||
{
|
||||
res = ssh_node_f(m, true, "sudo kill -9 $(pidof valgrind) 2>&1 > /dev/null");
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
res = ssh_node(m, "service maxscale stop", true);
|
||||
}
|
||||
fflush(stdout);
|
||||
return res;
|
||||
}
|
||||
|
@ -15,7 +15,7 @@ public:
|
||||
READCONN_SLAVE
|
||||
};
|
||||
|
||||
Maxscales(const char* pref, const char* test_cwd, bool verbose);
|
||||
Maxscales(const char* pref, const char* test_cwd, bool verbose, bool use_valgrind);
|
||||
int read_env();
|
||||
|
||||
/**
|
||||
@ -319,6 +319,18 @@ public:
|
||||
* @param m Number of Maxscale node
|
||||
*/
|
||||
void wait_for_monitor(int intervals = 1, int m = 0);
|
||||
|
||||
/**
|
||||
* @brief use_valrind if true Maxscale will be executed under Valgrind
|
||||
*/
|
||||
bool use_valgrind;
|
||||
|
||||
/**
|
||||
* @brief valgring_log_num Counter for Maxscale restarts to avoid Valgrind log overwriting
|
||||
*/
|
||||
int valgring_log_num;
|
||||
|
||||
|
||||
};
|
||||
|
||||
#endif // MAXSCALES_H
|
||||
|
@ -32,26 +32,17 @@ ${mdbci_dir}/mdbci --override --template ${MDBCI_VM_PATH}/${name}.json generate
|
||||
mkdir ${MDBCI_VM_PATH}/$name/cnf
|
||||
cp -r ${script_dir}/cnf/* ${MDBCI_VM_PATH}/$name/cnf/
|
||||
|
||||
|
||||
while [ -f ~/vagrant_lock ]
|
||||
do
|
||||
echo "vagrant is locked, waiting ..."
|
||||
sleep 5
|
||||
done
|
||||
touch ~/vagrant_lock
|
||||
echo ${JOB_NAME}-${BUILD_NUMBER} >> ~/vagrant_lock
|
||||
|
||||
echo "running vagrant up $provider"
|
||||
|
||||
${mdbci_dir}/mdbci up $name --attempts 3
|
||||
if [ $? != 0 ]; then
|
||||
echo "Error creating configuration"
|
||||
rm ~/vagrant_lock
|
||||
rm -f ~/vagrant_lock
|
||||
exit 1
|
||||
fi
|
||||
|
||||
#cp ~/build-scripts/team_keys .
|
||||
${mdbci_dir}/mdbci public_keys --key ${team_keys} $name
|
||||
|
||||
rm ~/vagrant_lock
|
||||
rm -f ~/vagrant_lock
|
||||
exit 0
|
||||
|
@ -40,6 +40,10 @@
|
||||
|
||||
# $test_set - parameters to be send to 'ctest' (e.g. '-I 1,100',
|
||||
# '-LE UNSTABLE'
|
||||
# if $test_set starts from 'NAME#' ctest will not be executed,
|
||||
# the value of $test_set after 'NAME#' is used as bash command
|
||||
# line
|
||||
# example: '#NAME long_test_time=3600 ./long_test'
|
||||
|
||||
export vm_memory=${vm_memory:-"2048"}
|
||||
export dir=`pwd`
|
||||
@ -73,11 +77,15 @@ if [ $res == 0 ] ; then
|
||||
set -x
|
||||
echo ${test_set} | grep "NAME#"
|
||||
if [ $? == 0 ] ; then
|
||||
named_test=`echo ${test_set} | sed "s/NAME#//" | sed "s/ //g"`
|
||||
named_test=`echo ${test_set} | sed "s/NAME#//"`
|
||||
echo ${named_test} | grep "\./"
|
||||
if [ $? != 0 ] ; then
|
||||
named_test="./"${named_test}
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ! -z "${named_test}" ] ; then
|
||||
./${named_test}
|
||||
eval ${named_test}
|
||||
else
|
||||
./check_backend
|
||||
if [ $? != 0 ]; then
|
||||
@ -85,7 +93,7 @@ set -x
|
||||
if [ "${do_not_destroy_vm}" != "yes" ] ; then
|
||||
${mdbci_dir}/mdbci destroy $name
|
||||
fi
|
||||
rm ~/vagrant_lock
|
||||
rm -f ~/vagrant_lock
|
||||
exit 1
|
||||
fi
|
||||
${mdbci_dir}/mdbci snapshot take --path-to-nodes $name --snapshot-name clean
|
||||
@ -99,7 +107,7 @@ else
|
||||
if [ "${do_not_destroy_vm}" != "yes" ] ; then
|
||||
${mdbci_dir}/mdbci destroy $name
|
||||
fi
|
||||
rm ~/vagrant_lock
|
||||
rm -f ~/vagrant_lock
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
@ -81,7 +81,7 @@ cd ${script_dir}/../../
|
||||
rm -rf build
|
||||
|
||||
mkdir build && cd build
|
||||
cmake .. -DBUILDNAME=$JOB_NAME-$BUILD_NUMBER-$target -DBUILD_SYSTEM_TESTS=Y
|
||||
cmake .. -DBUILDNAME=$JOB_NAME-$BUILD_NUMBER-$target -DBUILD_SYSTEM_TESTS=Y -DCMAKE_BUILD_TYPE=Debug
|
||||
cd maxscale-system-test
|
||||
make
|
||||
|
||||
|
@ -52,9 +52,9 @@
|
||||
}
|
||||
},
|
||||
|
||||
"maxscale" :
|
||||
"maxscale_000" :
|
||||
{
|
||||
"hostname" : "maxscale",
|
||||
"hostname" : "maxscale1",
|
||||
"box" : "${box}",
|
||||
"memory_size" : "${vm_memory}",
|
||||
"product" : {
|
||||
|
@ -495,3 +495,13 @@ const char* Nodes::ip(int i) const
|
||||
{
|
||||
return use_ipv6 ? IP6[i] : IP[i];
|
||||
}
|
||||
|
||||
int Nodes::start_vm(int node)
|
||||
{
|
||||
return(system(start_vm_command[node]));
|
||||
}
|
||||
|
||||
int Nodes::stop_vm(int node)
|
||||
{
|
||||
return(system(stop_vm_command[node]));
|
||||
}
|
||||
|
@ -174,6 +174,20 @@ public:
|
||||
*/
|
||||
int read_basic_env();
|
||||
|
||||
/**
|
||||
* @brief start_vm Start virtual machine
|
||||
* @param node Node number
|
||||
* @return 0 in case of success
|
||||
*/
|
||||
int start_vm(int node);
|
||||
|
||||
/**
|
||||
* @brief stop_vm Stop virtual machine
|
||||
* @param node Node number
|
||||
* @return 0 in case of success
|
||||
*/
|
||||
int stop_vm(int node);
|
||||
|
||||
private:
|
||||
int check_node_ssh(int node);
|
||||
};
|
||||
|
@ -169,6 +169,7 @@ int RDS::destroy_route_tables()
|
||||
json_t* root;
|
||||
char cmd[1024];
|
||||
char* json;
|
||||
int res = 0;
|
||||
|
||||
sprintf(cmd, "aws ec2 describe-vpcs --vpc-ids=%s", vpc_id_intern);
|
||||
if (execute_cmd(cmd, &json))
|
||||
@ -197,11 +198,11 @@ int RDS::destroy_route_tables()
|
||||
if (strcmp(vpc_id_intern, vpc_id) == 0)
|
||||
{
|
||||
sprintf(cmd, "aws ec2 delete-route-table --route-table-id %s", rt_id);
|
||||
system(cmd);
|
||||
res += system(cmd);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
return res;
|
||||
}
|
||||
|
||||
int RDS::detach_and_destroy_gw()
|
||||
@ -477,6 +478,7 @@ int RDS::create_cluster()
|
||||
char* result;
|
||||
json_error_t error;
|
||||
size_t i;
|
||||
int res = 0;
|
||||
|
||||
sprintf(cmd,
|
||||
"aws rds create-db-cluster --database-name=test --engine=aurora --master-username=skysql --master-user-password=skysqlrds --db-cluster-identifier=%s --db-subnet-group-name=%s",
|
||||
@ -503,7 +505,7 @@ int RDS::create_cluster()
|
||||
sprintf(cmd,
|
||||
"aws ec2 authorize-security-group-ingress --group-id %s --protocol tcp --port 3306 --cidr 0.0.0.0/0",
|
||||
sg_id);
|
||||
system(cmd);
|
||||
res += system(cmd);
|
||||
}
|
||||
sg_intern = sg_id;
|
||||
|
||||
@ -514,9 +516,9 @@ int RDS::create_cluster()
|
||||
cluster_name_intern,
|
||||
i);
|
||||
printf("%s\n", cmd);
|
||||
system(cmd);
|
||||
res += system(cmd);
|
||||
}
|
||||
return 0;
|
||||
return res;
|
||||
}
|
||||
|
||||
int RDS::get_writer(const char** writer_name)
|
||||
|
@ -206,7 +206,7 @@ int main(int argc, char* argv[])
|
||||
Test->maxscales->access_user[0],
|
||||
Test->maxscales->IP[0],
|
||||
Test->maxscales->access_homedir[0]);
|
||||
system(str);
|
||||
Test->add_result(system(str), "Error copying script to VM");
|
||||
|
||||
sprintf(str, "%s/script_output_expected", Test->maxscales->access_homedir[0]);
|
||||
test_script_monitor(Test, Test->repl, str);
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <sys/stat.h>
|
||||
#include <iostream>
|
||||
#include <sstream>
|
||||
#include <fstream>
|
||||
#include <maxbase/stacktrace.hh>
|
||||
|
||||
#include "mariadb_func.h"
|
||||
@ -132,6 +133,7 @@ TestConnections::TestConnections(int argc, char* argv[])
|
||||
, no_vm_revert(true)
|
||||
, threads(4)
|
||||
, use_ipv6(false)
|
||||
, use_valgrind(false)
|
||||
{
|
||||
std::ios::sync_with_stdio(true);
|
||||
signal_set(SIGSEGV, sigfatal_handler);
|
||||
@ -308,7 +310,7 @@ TestConnections::TestConnections(int argc, char* argv[])
|
||||
repl->take_snapshot_command = take_snapshot_command;
|
||||
repl->revert_snapshot_command = revert_snapshot_command;
|
||||
|
||||
maxscales = new Maxscales("maxscale", test_dir, verbose);
|
||||
maxscales = new Maxscales("maxscale", test_dir, verbose, use_valgrind);
|
||||
|
||||
maxscales->use_ipv6 = use_ipv6;
|
||||
maxscales->ssl = ssl;
|
||||
@ -419,6 +421,16 @@ TestConnections::~TestConnections()
|
||||
// galera->disable_ssl();
|
||||
}
|
||||
|
||||
if (use_valgrind)
|
||||
{
|
||||
// stop all Maxscales to get proper Valgrind logs
|
||||
for (int i = 0; i < maxscales->N; i++)
|
||||
{
|
||||
stop_maxscale(i);
|
||||
}
|
||||
sleep(15); // sleep to let logs be written do disks
|
||||
}
|
||||
|
||||
copy_all_logs();
|
||||
|
||||
/* Temporary disable snapshot revert due to Galera failures
|
||||
@ -497,7 +509,6 @@ void TestConnections::expect(bool result, const char* format, ...)
|
||||
|
||||
void TestConnections::read_env()
|
||||
{
|
||||
|
||||
char* env;
|
||||
|
||||
if (verbose)
|
||||
@ -505,7 +516,6 @@ void TestConnections::read_env()
|
||||
printf("Reading test setup configuration from environmental variables\n");
|
||||
}
|
||||
|
||||
|
||||
// env = getenv("get_logs_command"); if (env != NULL) {sprintf(get_logs_command, "%s", env);}
|
||||
|
||||
env = getenv("sysbench_dir");
|
||||
@ -614,6 +624,12 @@ void TestConnections::read_env()
|
||||
{
|
||||
no_vm_revert = false;
|
||||
}
|
||||
|
||||
env = getenv("use_valgrind");
|
||||
if ((env != NULL) && ((strcasecmp(env, "yes") == 0) || (strcasecmp(env, "true") == 0)))
|
||||
{
|
||||
use_valgrind = true;
|
||||
}
|
||||
}
|
||||
|
||||
void TestConnections::print_env()
|
||||
@ -785,17 +801,24 @@ void TestConnections::init_maxscale(int m)
|
||||
|
||||
void TestConnections::copy_one_mariadb_log(int i, std::string filename)
|
||||
{
|
||||
int exit_code;
|
||||
char* mariadb_log = repl->ssh_node_output(i, "cat /var/lib/mysql/*.err 2>/dev/null", true, &exit_code);
|
||||
FILE* f = fopen(filename.c_str(), "w");
|
||||
|
||||
if (f != NULL)
|
||||
auto log_retrive_commands =
|
||||
{
|
||||
fwrite(mariadb_log, sizeof(char), strlen(mariadb_log), f);
|
||||
fclose(f);
|
||||
}
|
||||
"cat /var/lib/mysql/*.err",
|
||||
"cat /var/log/syslog | grep mysql",
|
||||
"cat /var/log/messages | grep mysql"
|
||||
};
|
||||
|
||||
free(mariadb_log);
|
||||
int j = 1;
|
||||
|
||||
for (auto cmd : log_retrive_commands)
|
||||
{
|
||||
std::ofstream outfile(filename + std::to_string(j++));
|
||||
|
||||
if (outfile)
|
||||
{
|
||||
outfile << repl->ssh_output(cmd, i).second;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int TestConnections::copy_mariadb_logs(Mariadb_nodes* repl,
|
||||
@ -808,8 +831,8 @@ int TestConnections::copy_mariadb_logs(Mariadb_nodes* repl,
|
||||
{
|
||||
for (int i = 0; i < repl->N; i++)
|
||||
{
|
||||
if (strcmp(repl->IP[i], "127.0.0.1") != 0) // Do not copy MariaDB logs in case of local
|
||||
// backend
|
||||
// Do not copy MariaDB logs in case of local backend
|
||||
if (strcmp(repl->IP[i], "127.0.0.1") != 0)
|
||||
{
|
||||
char str[4096];
|
||||
sprintf(str, "LOGS/%s/%s%d_mariadb_log", test_name, prefix, i);
|
||||
@ -1173,9 +1196,9 @@ bool TestConnections::replicate_from_master(int m)
|
||||
repl->execute_query_all_nodes("STOP SLAVE");
|
||||
|
||||
/** Clean up MaxScale directories */
|
||||
maxscales->ssh_node(m, "service maxscale stop", true);
|
||||
maxscales->stop_maxscale(m);
|
||||
prepare_binlog(m);
|
||||
maxscales->ssh_node(m, "service maxscale start", true);
|
||||
maxscales->start_maxscale(m);
|
||||
|
||||
char log_file[256] = "";
|
||||
char log_pos[256] = "4";
|
||||
@ -1383,11 +1406,13 @@ int TestConnections::find_connected_slave1(int m)
|
||||
|
||||
int TestConnections::check_maxscale_processes(int m, int expected)
|
||||
{
|
||||
const char* ps_cmd = use_valgrind ?
|
||||
"ps ax | grep valgrind | grep maxscale | grep -v grep | wc -l" :
|
||||
"ps -C maxscale | grep maxscale | wc -l";
|
||||
|
||||
int exit_code;
|
||||
char* maxscale_num = maxscales->ssh_node_output(m,
|
||||
"ps -C maxscale | grep maxscale | wc -l",
|
||||
false,
|
||||
&exit_code);
|
||||
char* maxscale_num = maxscales->ssh_node_output(m, ps_cmd, false, &exit_code);
|
||||
|
||||
if ((maxscale_num == NULL) || (exit_code != 0))
|
||||
{
|
||||
return -1;
|
||||
@ -1400,12 +1425,10 @@ int TestConnections::check_maxscale_processes(int m, int expected)
|
||||
|
||||
if (atoi(maxscale_num) != expected)
|
||||
{
|
||||
tprintf("%s maxscale processes detected, trying agin in 5 seconds\n", maxscale_num);
|
||||
tprintf("%s maxscale processes detected, trying again in 5 seconds\n", maxscale_num);
|
||||
sleep(5);
|
||||
maxscale_num = maxscales->ssh_node_output(m,
|
||||
"ps -C maxscale | grep maxscale | wc -l",
|
||||
false,
|
||||
&exit_code);
|
||||
maxscale_num = maxscales->ssh_node_output(m, ps_cmd, false, &exit_code);
|
||||
|
||||
if (atoi(maxscale_num) != expected)
|
||||
{
|
||||
add_result(1, "Number of MaxScale processes is not %d, it is %s\n", expected, maxscale_num);
|
||||
@ -1417,7 +1440,7 @@ int TestConnections::check_maxscale_processes(int m, int expected)
|
||||
|
||||
int TestConnections::stop_maxscale(int m)
|
||||
{
|
||||
int res = maxscales->ssh_node(m, "service maxscale stop", true);
|
||||
int res = maxscales->stop_maxscale(m);
|
||||
check_maxscale_processes(m, 0);
|
||||
fflush(stdout);
|
||||
return res;
|
||||
@ -1425,7 +1448,7 @@ int TestConnections::stop_maxscale(int m)
|
||||
|
||||
int TestConnections::start_maxscale(int m)
|
||||
{
|
||||
int res = maxscales->ssh_node(m, "service maxscale start", true);
|
||||
int res = maxscales->start_maxscale(m);
|
||||
check_maxscale_processes(m, 1);
|
||||
fflush(stdout);
|
||||
return res;
|
||||
@ -1451,7 +1474,6 @@ int TestConnections::check_maxscale_alive(int m)
|
||||
maxscales->close_maxscale_connections(m);
|
||||
add_result(global_result - gr, "Maxscale is not alive\n");
|
||||
stop_timeout();
|
||||
|
||||
check_maxscale_processes(m, 1);
|
||||
|
||||
return global_result - gr;
|
||||
@ -2055,14 +2077,14 @@ void TestConnections::check_current_connections(int m, int value)
|
||||
|
||||
int TestConnections::take_snapshot(char* snapshot_name)
|
||||
{
|
||||
char str[PATH_MAX + 4096];
|
||||
char str[strlen(take_snapshot_command) + strlen(snapshot_name) + 2];
|
||||
sprintf(str, "%s %s", take_snapshot_command, snapshot_name);
|
||||
return call_system(str);
|
||||
}
|
||||
|
||||
int TestConnections::revert_snapshot(char* snapshot_name)
|
||||
{
|
||||
char str[PATH_MAX + 4096];
|
||||
char str[strlen(revert_snapshot_command) + strlen(snapshot_name) + 2];
|
||||
sprintf(str, "%s %s", revert_snapshot_command, snapshot_name);
|
||||
return call_system(str);
|
||||
}
|
||||
|
@ -531,8 +531,6 @@ public:
|
||||
*/
|
||||
int list_dirs(int m = 0);
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* @brief make_snapshot Makes a snapshot for all running VMs
|
||||
* @param snapshot_name name of created snapshot
|
||||
@ -605,6 +603,11 @@ public:
|
||||
m_on_destroy.push_back(func);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief use_valrind if true Maxscale will be executed under Valgrind
|
||||
*/
|
||||
bool use_valgrind;
|
||||
|
||||
private:
|
||||
void report_result(const char* format, va_list argp);
|
||||
void copy_one_mariadb_log(int i, std::string filename);
|
||||
|
@ -312,17 +312,20 @@ static int database_cb(void* data, int columns, char** rows, char** row_names)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool check_database(sqlite3* handle, const char* database)
|
||||
static bool check_database(MYSQL_AUTH* instance, sqlite3* handle, const char* database)
|
||||
{
|
||||
bool rval = true;
|
||||
|
||||
if (*database)
|
||||
{
|
||||
rval = false;
|
||||
size_t len = sizeof(mysqlauth_validate_database_query) + strlen(database) + 1;
|
||||
const char* query = instance->lower_case_table_names ?
|
||||
mysqlauth_validate_database_query_lower :
|
||||
mysqlauth_validate_database_query;
|
||||
size_t len = strlen(query) + strlen(database) + 1;
|
||||
char sql[len];
|
||||
|
||||
sprintf(sql, mysqlauth_validate_database_query, database);
|
||||
sprintf(sql, query, database);
|
||||
|
||||
char* err;
|
||||
|
||||
@ -453,7 +456,7 @@ int validate_mysql_user(MYSQL_AUTH* instance,
|
||||
session->client_sha1))
|
||||
{
|
||||
/** Password is OK, check that the database exists */
|
||||
if (check_database(handle, session->db))
|
||||
if (check_database(instance, handle, session->db))
|
||||
{
|
||||
rval = MXS_AUTH_SUCCEEDED;
|
||||
}
|
||||
|
@ -71,6 +71,8 @@ static const char mysqlauth_skip_auth_query[] =
|
||||
/** Query that checks that the database exists */
|
||||
static const char mysqlauth_validate_database_query[] =
|
||||
"SELECT * FROM " MYSQLAUTH_DATABASES_TABLE_NAME " WHERE db = '%s' LIMIT 1";
|
||||
static const char mysqlauth_validate_database_query_lower[] =
|
||||
"SELECT * FROM " MYSQLAUTH_DATABASES_TABLE_NAME " WHERE LOWER(db) = LOWER('%s') LIMIT 1";
|
||||
|
||||
/** Delete query used to clean up the database before loading new users */
|
||||
static const char delete_users_query[] = "DELETE FROM " MYSQLAUTH_USERS_TABLE_NAME;
|
||||
|
@ -187,10 +187,6 @@ MariaDBMonitor* MariaDBMonitor::create(const string& name, const string& module)
|
||||
*/
|
||||
bool MariaDBMonitor::configure(const MXS_CONFIG_PARAMETER* params)
|
||||
{
|
||||
/* Reset all monitored state info. The server dependent values must be reset as servers could have been
|
||||
* added, removed and modified. */
|
||||
reset_server_info();
|
||||
|
||||
m_detect_stale_master = params->get_bool("detect_stale_master");
|
||||
m_detect_stale_slave = params->get_bool("detect_stale_slave");
|
||||
m_ignore_external_masters = params->get_bool("ignore_external_masters");
|
||||
@ -210,6 +206,10 @@ bool MariaDBMonitor::configure(const MXS_CONFIG_PARAMETER* params)
|
||||
m_maintenance_on_low_disk_space = params->get_bool(CN_MAINTENANCE_ON_LOW_DISK_SPACE);
|
||||
m_handle_event_scheduler = params->get_bool(CN_HANDLE_EVENTS);
|
||||
|
||||
/* Reset all monitored state info. The server dependent values must be reset as servers could have been
|
||||
* added, removed and modified. */
|
||||
reset_server_info();
|
||||
|
||||
m_excluded_servers.clear();
|
||||
bool settings_ok = true;
|
||||
bool list_error = false;
|
||||
|
@ -332,11 +332,10 @@ static void handle_error_response(DCB* dcb, GWBUF* buffer)
|
||||
* This will prevent repeated authentication failures. */
|
||||
if (errcode == ER_HOST_IS_BLOCKED)
|
||||
{
|
||||
MXS_ERROR("Server %s has been put into maintenance mode due "
|
||||
"to the server blocking connections from MaxScale. "
|
||||
"Run 'mysqladmin -h %s -P %d flush-hosts' on this "
|
||||
"server before taking this server out of maintenance "
|
||||
"mode.",
|
||||
MXS_ERROR("Server %s has been put into maintenance mode due to the server blocking connections "
|
||||
"from MaxScale. Run 'mysqladmin -h %s -P %d flush-hosts' on this server before taking "
|
||||
"this server out of maintenance mode. To avoid this problem in the future, set "
|
||||
"'max_connect_errors' to a larger value in the backend server.",
|
||||
dcb->server->name(),
|
||||
dcb->server->address,
|
||||
dcb->server->port);
|
||||
|
@ -667,11 +667,12 @@ static std::string get_next_filename(std::string file, std::string dir)
|
||||
{
|
||||
// Find the last and second to last dot
|
||||
auto last = file.find_last_of('.');
|
||||
auto almost_last = file.find_last_of('.', last);
|
||||
auto part = file.substr(0, last);
|
||||
auto almost_last = part.find_last_of('.');
|
||||
mxb_assert(last != std::string::npos && almost_last != std::string::npos);
|
||||
|
||||
// Extract the number between the dots
|
||||
std::string number_part = file.substr(almost_last + 1, last);
|
||||
std::string number_part = part.substr(almost_last + 1, std::string::npos);
|
||||
int filenum = strtol(number_part.c_str(), NULL, 10);
|
||||
|
||||
std::string file_part = file.substr(0, almost_last);
|
||||
|
Loading…
x
Reference in New Issue
Block a user