Merge branch 'develop' into MXS-1266

This commit is contained in:
MassimilianoPinto
2017-05-25 08:43:10 +02:00
682 changed files with 51954 additions and 1257 deletions

9
.gitignore vendored
View File

@ -32,6 +32,7 @@ depend.mk
.#* .#*
._* ._*
# Vi swap files # Vi swap files
.*.swp .*.swp
/build/ /build/
@ -51,3 +52,11 @@ CMakeFiles/*
*/*/*/*/CMakeFiles/* */*/*/*/CMakeFiles/*
Makefile Makefile
/.DS_Store /.DS_Store
# Netbeans Project files
nbproject/
/build/
# RBCommons
.reviewboardrc

View File

@ -23,7 +23,8 @@ addons:
- pandoc - pandoc
- uuid - uuid
- uuid-dev - uuid-dev
- libmicrohttpd-dev - libgnutls30
- libgcrypt20
coverity_scan: coverity_scan:
project: project:
name: "mariadb-corporation/MaxScale" name: "mariadb-corporation/MaxScale"

84
BUILD/build_deb_local.sh Executable file
View File

@ -0,0 +1,84 @@
#!/bin/bash
# do the real building work
# this script is executed on build VM
set -x
cd ./MaxScale
mkdir _build
cd _build
cmake .. $cmake_flags
export LD_LIBRARY_PATH=$PWD/log_manager:$PWD/query_classifier
make
export LD_LIBRARY_PATH=$(for i in `find $PWD/ -name '*.so*'`; do echo $(dirname $i); done|sort|uniq|xargs|sed -e 's/[[:space:]]/:/g')
make package
res=$?
if [ $res != 0 ] ; then
echo "Make package failed"
exit $res
fi
sudo rm ../CMakeCache.txt
sudo rm CMakeCache.txt
echo "Building tarball..."
cmake .. $cmake_flags -DTARBALL=Y
sudo make package
cp _CPack_Packages/Linux/DEB/*.deb ../
rm ../CMakeCache.txt
rm CMakeCache.txt
cd ..
cp _build/*.deb .
cp *.deb ..
cp _build/*.gz .
set -x
if [ "$build_experimental" == "yes" ] ; then
rm -rf _bild
mkdir _build
cd _build
export LD_LIBRARY_PATH=""
cmake .. $cmake_flags -DTARGET_COMPONENT=experimental
export LD_LIBRARY_PATH=$(for i in `find $PWD/ -name '*.so*'`; do echo $(dirname $i); done|sort|uniq|xargs|sed -e 's/[[:space:]]/:/g')
make package
cp _CPack_Packages/Linux/DEB/*.deb ../
cd ..
cp _build/*.deb .
cp *.deb ..
cp _build/*.gz .
rm -rf _bild
mkdir _build
cd _build
export LD_LIBRARY_PATH=""
cmake .. $cmake_flags -DTARGET_COMPONENT=devel
export LD_LIBRARY_PATH=$(for i in `find $PWD/ -name '*.so*'`; do echo $(dirname $i); done|sort|uniq|xargs|sed -e 's/[[:space:]]/:/g')
make package
cp _CPack_Packages/Linux/DEB/*.deb ../
cd ..
cp _build/*.deb .
cp *.deb ..
cp _build/*.gz .
fi
if [ "$BUILD_RABBITMQ" == "yes" ] ; then
cmake ../rabbitmq_consumer/ $cmake_flags
sudo make package
res=$?
if [ $res != 0 ] ; then
exit $res
fi
cp _CPack_Packages/Linux/DEB/*.deb ../
cd ..
cp _build/*.deb .
cp *.deb ..
fi
sudo dpkg -i ../maxscale*.dev
set +x

70
BUILD/build_rpm_local.sh Executable file
View File

@ -0,0 +1,70 @@
#!/bin/bash
# do the real building work
# this script is executed on build VM
set -x
cd ./MaxScale
mkdir _build
cd _build
cmake .. $cmake_flags
make
if [ $remove_strip == "yes" ] ; then
sudo rm -rf /usr/bin/strip
sudo touch /usr/bin/strip
sudo chmod a+x /usr/bin/strip
fi
sudo make package
res=$?
if [ $res != 0 ] ; then
echo "Make package failed"
exit $res
fi
sudo rm ../CMakeCache.txt
sudo rm CMakeCache.txt
echo "Building tarball..."
cmake .. $cmake_flags -DTARBALL=Y
sudo make package
cd ..
cp _build/*.rpm .
cp _build/*.gz .
if [ "$build_experimental" == "yes" ] ; then
sudo rm -rf _build
mkdir _build
cd _build
cmake .. $cmake_flags -DTARGET_COMPONENT=experimental
sudo make package
cd ..
cp _build/*.rpm .
cp _build/*.gz .
sudo rm -rf _build
mkdir _build
cd _build
cmake .. $cmake_flags -DTARGET_COMPONENT=devel
sudo make package
cd ..
cp _build/*.rpm .
cp _build/*.gz .
fi
if [ "$BUILD_RABBITMQ" == "yes" ] ; then
cmake ../rabbitmq_consumer/ $cmake_flags
sudo make package
res=$?
if [ $res != 0 ] ; then
exit $res
fi
cd ..
cp _build/*.rpm .
cp _build/*.gz .
fi
sudo rpm -i maxscale*.rpm

130
BUILD/install_build_deps.sh Executable file
View File

@ -0,0 +1,130 @@
#!/bin/bash
# Do the real building work. This script is executed on build VM and
# requires a working installation of CMake.
# Check if CMake needs to be installed
command -v cmake || install_cmake="cmake"
command -v apt-get
if [ $? -e 0 ]
then
# DEB-based distro
sudo apt-get update
sudo apt-get install -y --force-yes dpkg-dev git gcc g++ ncurses-dev bison \
build-essential libssl-dev libaio-dev perl make libtool libcurl4-openssl-dev \
libpcre3-dev flex tcl libeditline-dev uuid-dev liblzma-dev libsqlite3-dev \
sqlite3 liblua5.1 liblua5.1-dev libgnutls30 libgcrypt20 $install_cmake
else
## RPM-based distro
command -v yum
if [ $? -ne 0 ]
then
# We need zypper here
sudo zypper -n install gcc gcc-c++ ncurses-devel bison glibc-devel libgcc_s1 perl \
make libtool libopenssl-devel libaio libaio-devel flex libcurl-devel \
pcre-devel git wget tcl libuuid-devel \
xz-devel sqlite3 sqlite3-devel pkg-config lua lua-devel \
gnutls gcrypt $install_cmake
sudo zypper -n install rpm-build
cat /etc/*-release | grep "SUSE Linux Enterprise Server 11"
if [ $? -ne 0 ]
then
sudo zypper -n install libedit-devel
fi
else
# YUM!
sudo yum clean all
sudo yum install -y --nogpgcheck gcc gcc-c++ ncurses-devel bison glibc-devel \
libgcc perl make libtool openssl-devel libaio libaio-devel libedit-devel \
libedit-devel libcurl-devel curl-devel systemtap-sdt-devel rpm-sign \
gnupg pcre-devel flex rpmdevtools git wget tcl openssl libuuid-devel xz-devel \
sqlite sqlite-devel pkgconfig lua lua-devel rpm-build createrepo yum-utils \
gnutls gcrypt $install_cmake
cat /etc/redhat-release | grep "release 5"
if [ $? -eq 0 ]
then
sudo yum remove -y libedit-devel libedit
fi
fi
fi
# Flex
wget http://maxscale-jenkins.mariadb.com/x/flex-2.5.35-0.8.el5.rfb.x86_64.rpm
sudo yum install flex-2.5.35-0.8.el5.rfb.x86_64.rpm -y --nogpgcheck
rm flex-2.5.35-0.8.el5.rfb.x86_64*
# RabbitMQ C client
mkdir rabbit
cd rabbit
git clone https://github.com/alanxz/rabbitmq-c.git
if [ $? -ne 0 ]
then
echo "Error cloning rabbitmq-c"
exit 1
fi
cd rabbitmq-c
git checkout v0.7.1
cmake . -DCMAKE_C_FLAGS=-fPIC -DBUILD_SHARED_LIBS=N -DCMAKE_INSTALL_PREFIX=/usr
sudo make install
cd ../../
# TCL
mkdir tcl
cd tcl
wget --no-check-certificate http://prdownloads.sourceforge.net/tcl/tcl8.6.5-src.tar.gz
if [ $? -ne 0 ]
then
echo "Error getting tcl"
exit 1
fi
tar xzvf tcl8.6.5-src.tar.gz
cd tcl8.6.5/unix
./configure
sudo make install
cd ../../..
# Jansson
git clone https://github.com/akheron/jansson.git
if [ $? != 0 ]
then
echo "Error cloning jansson"
exit 1
fi
mkdir -p jansson/build
pushd jansson/build
cmake .. -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_C_FLAGS=-fPIC -DJANSSON_INSTALL_LIB_DIR=/usr/lib64
make
sudo make install
popd
# Avro C API
wget -r -l1 -nH --cut-dirs=2 --no-parent -A.tar.gz --no-directories http://mirror.netinch.com/pub/apache/avro/stable/c
if [ $? != 0 ]
then
echo "Error getting avro-c"
exit 1
fi
avro_filename=`ls -1 *.tar.gz`
avro_dir=`echo "$avro_filename" | sed "s/.tar.gz//"`
tar -axf $avro_filename
mkdir $avro_dir/build
pushd $avro_dir/build
cmake .. -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_C_FLAGS=-fPIC -DCMAKE_CXX_FLAGS=-fPIC
make
sudo make install
popd

6
BUILD/run_test.sh Normal file
View File

@ -0,0 +1,6 @@
cd ~/Maxscale/maxscale-system-test
cmake .
make
ctest -LE HEAVY -VV

View File

@ -60,6 +60,10 @@ include_directories(${PCRE2_INCLUDE_DIRS})
if(NOT MARIADB_CONNECTOR_FOUND) if(NOT MARIADB_CONNECTOR_FOUND)
message(STATUS "Building MariaDB Connector-C from source.") message(STATUS "Building MariaDB Connector-C from source.")
include(cmake/BuildMariaDBConnector.cmake) include(cmake/BuildMariaDBConnector.cmake)
else()
# This is required as the core depends on the `connector-c` target
add_custom_target(connector-c)
message(STATUS "Using system Connector-C")
endif() endif()
if(NOT JANSSON_FOUND) if(NOT JANSSON_FOUND)
@ -67,6 +71,8 @@ if(NOT JANSSON_FOUND)
include(cmake/BuildJansson.cmake) include(cmake/BuildJansson.cmake)
endif() endif()
include(cmake/BuildMicroHttpd.cmake)
include_directories(${JANSSON_INCLUDE_DIR}) include_directories(${JANSSON_INCLUDE_DIR})
# You can find the variables set by this in the FindCURL.cmake file # You can find the variables set by this in the FindCURL.cmake file

View File

@ -1,8 +1,8 @@
#Database Firewall filter # Database Firewall filter
## Overview ## Overview
The database firewall filter is used to block queries that match a set of The Database Firewall filter is used to block queries that match a set of
rules. It can be used to prevent harmful queries from reaching the backend rules. It can be used to prevent harmful queries from reaching the backend
database instances or to limit access to the database based on a more flexible database instances or to limit access to the database based on a more flexible
set of rules compared to the traditional GRANT-based privilege system. Currently set of rules compared to the traditional GRANT-based privilege system. Currently
@ -10,9 +10,9 @@ the filter does not support multi-statements.
## Configuration ## Configuration
The database firewall filter only requires minimal configuration in the The Database Firewall filter only requires minimal configuration in the
maxscale.cnf file. The actual rules of the database firewall filter are located maxscale.cnf file. The actual rules of the Database Firewall filter are located
in a separate text file. The following is an example of a database firewall in a separate text file. The following is an example of a Database Firewall
filter configuration in maxscale.cnf. filter configuration in maxscale.cnf.
``` ```
@ -32,7 +32,7 @@ filters=DatabaseFirewall
### Filter Parameters ### Filter Parameters
The database firewall filter has one mandatory parameter, `rules`. The Database Firewall filter has one mandatory parameter, `rules`.
#### `rules` #### `rules`
@ -128,7 +128,7 @@ parameter (_allow_, _block_ or _ignore_).
### Mandatory rule parameters ### Mandatory rule parameters
The database firewall filter's rules expect a single mandatory parameter for a The Database Firewall filter's rules expect a single mandatory parameter for a
rule. You can define multiple rules to cover situations where you would like to rule. You can define multiple rules to cover situations where you would like to
apply multiple mandatory rules to a query. apply multiple mandatory rules to a query.
@ -220,7 +220,7 @@ the network address. You can use the `%` character as the wildcard to enable
user name matching from any address or network matching for all users. After the user name matching from any address or network matching for all users. After the
list of users and networks the keyword match is expected. list of users and networks the keyword match is expected.
After this either the keyword `any` `all` or `strict_all` is expected. This After this either the keyword `any`, `all` or `strict_all` is expected. This
defined how the rules are matched. If `any` is used when the first rule is defined how the rules are matched. If `any` is used when the first rule is
matched the query is considered as matched and the rest of the rules are matched the query is considered as matched and the rest of the rules are
skipped. If instead the `all` keyword is used all rules must match for the query skipped. If instead the `all` keyword is used all rules must match for the query
@ -260,7 +260,7 @@ Shows the current statistics of the rules.
To prevent the excessive use of a database we want to set a limit on the rate of To prevent the excessive use of a database we want to set a limit on the rate of
queries. We only want to apply this limit to certain queries that cause unwanted queries. We only want to apply this limit to certain queries that cause unwanted
behavior. To achieve this we can use a regular expression. behaviour. To achieve this we can use a regular expression.
First we define the limit on the rate of queries. The first parameter for the First we define the limit on the rate of queries. The first parameter for the
rule sets the number of allowed queries to 10 queries and the second parameter rule sets the number of allowed queries to 10 queries and the second parameter

View File

@ -35,6 +35,8 @@ the _ssn_ would be masked, as in
... ...
``` ```
## Security
Note that he masking filter alone is *not* sufficient for preventing Note that he masking filter alone is *not* sufficient for preventing
access to a particular column. As the masking filter works on the column access to a particular column. As the masking filter works on the column
name alone a query like name alone a query like
@ -49,8 +51,11 @@ a sufficient number of times with different _ssn_ values, will, eventually,
reveal the social security number of all persons in the database. reveal the social security number of all persons in the database.
For a secure solution, the masking filter *must* be combined with the For a secure solution, the masking filter *must* be combined with the
firewall filter to prevent the use of functions and the use of particular firewall filter to prevent the use of functions using which the masking
columns in where-clauses. can be bypassed.
In a future release, the combined use of the masking filter and the
database firewall filter will be simplified.
## Limitations ## Limitations

View File

@ -3,7 +3,7 @@
This filter was introduced in MariaDB MaxScale 2.1. This filter was introduced in MariaDB MaxScale 2.1.
## Overview ## Overview
The maxrows filter is capable of restricting the amount of rows that a SELECT, The Maxrows filter is capable of restricting the amount of rows that a SELECT,
a prepared statement or stored procedure could return to the client application. a prepared statement or stored procedure could return to the client application.
If a resultset from a backend server has more rows than the configured limit If a resultset from a backend server has more rows than the configured limit
@ -12,7 +12,7 @@ or the resultset size exceeds the configured size,
## Configuration ## Configuration
The maxrows filter is easy to configure and to add to any existing service. The Maxrows filter is easy to configure and to add to any existing service.
``` ```
[MaxRows] [MaxRows]
@ -22,12 +22,12 @@ module=maxrows
[MaxRows Routing Service] [MaxRows Routing Service]
type=service type=service
... ...
filters=maxrows filters=MaxRows
``` ```
### Filter Parameters ### Filter Parameters
The maxrows filter has no mandatory parameters. The Maxrows filter has no mandatory parameters.
Optional parameters are: Optional parameters are:
#### `max_resultset_rows` #### `max_resultset_rows`
@ -81,7 +81,7 @@ ERROR 1415 (0A000): Row limit/size exceeded for query: select * from test.t4
#### `debug` #### `debug`
An integer value, using which the level of debug logging made by the maxrows An integer value, using which the level of debug logging made by the Maxrows
filter can be controlled. The value is actually a bitfield with different bits filter can be controlled. The value is actually a bitfield with different bits
denoting different logging. denoting different logging.
@ -97,8 +97,8 @@ debug=2
## Example Configuration ## Example Configuration
Here is an example of filter configuration where the max number of returned Here is an example of filter configuration where the maximum number of returned
rows is 10000 and max allowed resultset size is 256KB rows is 10000 and maximum allowed resultset size is 256KB
``` ```
[MaxRows] [MaxRows]

View File

@ -6,7 +6,7 @@ The Query Log All (QLA) filter is a filter module for MariaDB MaxScale that is a
## Configuration ## Configuration
The configuration block for the QLA filter requires the minimal filter options in it's section within the maxscale.cnf file, stored in /etc/maxscale.cnf. The configuration block for the QLA filter requires the minimal filter options in its section within the maxscale.cnf file, stored in /etc/maxscale.cnf.
``` ```
[MyLogFilter] [MyLogFilter]
type=filter type=filter
@ -31,7 +31,7 @@ The QLA filter accepts the following options.
case | Use case-sensitive matching case | Use case-sensitive matching
extended | Use extended regular expression syntax (ERE) extended | Use extended regular expression syntax (ERE)
To use multiple filter options, list them in a comma-separated list. If no file settings are given, default will be used. Multiple file settings can be enabled simultaneously. To use multiple filter options, list them in a comma-separated list. If no options are given, default will be used. Multiple options can be enabled simultaneously.
``` ```
options=case,extended options=case,extended
@ -53,7 +53,7 @@ The basename of the output file created for each session. A session index is add
filebase=/tmp/SqlQueryLog filebase=/tmp/SqlQueryLog
``` ```
The filebase may also be set as the filter, the mechanism to set the filebase via the filter option is superseded by the parameter. If both are set the parameter setting will be used and the filter option ignored. The filebase may also be set as the filter option, the mechanism to set the filebase via the filter option is superseded by the parameter. If both are set the parameter setting will be used and the filter option ignored.
### `match` ### `match`
@ -99,8 +99,7 @@ user=john
### `log_type` ### `log_type`
The type of log file to use. Parameter value is a comma separated list of the The type of log file to use. The default value is _session_.
following values. The default value is _session_.
|Value | Description | |Value | Description |
|--------|--------------------------------| |--------|--------------------------------|
@ -108,7 +107,7 @@ following values. The default value is _session_.
|unified |Use one file for all sessions | |unified |Use one file for all sessions |
``` ```
log_type=session, unified log_type=session
``` ```
### `log_data` ### `log_data`

View File

@ -2,7 +2,7 @@
## Overview ## Overview
The regex filter is a filter module for MariaDB MaxScale that is able to rewrite query content using regular expression matches and text substitution. It uses the PCRE2 syntax which differs from the POSIX regular expressions used in MariaDB MaxScale versions prior to 1.3.0. The Regex filter is a filter module for MariaDB MaxScale that is able to rewrite query content using regular expression matches and text substitution. It uses the PCRE2 syntax which differs from the POSIX regular expressions used in MariaDB MaxScale versions prior to 1.3.0.
For all details about the PCRE2 syntax, please read the [PCRE2 syntax documentation](http://www.pcre.org/current/doc/html/pcre2syntax.html). For all details about the PCRE2 syntax, please read the [PCRE2 syntax documentation](http://www.pcre.org/current/doc/html/pcre2syntax.html).
@ -10,7 +10,7 @@ Please note that the PCRE2 library uses a different syntax to refer to capture g
## Configuration ## Configuration
The configuration block for the Regex filter requires the minimal filter options in its section within the maxscale.cnf file, stored in /etc/maxscale.cnf. The configuration block for the Regex filter requires the minimal filter options in its section within the maxscale.cnf file, stored in /etc/maxscale.cnf.
``` ```
[MyRegexFilter] [MyRegexFilter]
@ -30,7 +30,7 @@ filters=MyRegexfilter
## Filter Options ## Filter Options
The regex filter accepts the options ignorecase or case. These define if the pattern text should take the case of the string it is matching against into consideration or not. The Regex filter accepts the options ignorecase or case. These define if the pattern text should take the case of the string it is matching against into consideration or not.
## Filter Parameters ## Filter Parameters

View File

@ -104,7 +104,7 @@ user=john
You have an order system and believe the updates of the PRODUCTS table is causing some performance issues for the rest of your application. You would like to know which of the many updates in your application is causing the issue. You have an order system and believe the updates of the PRODUCTS table is causing some performance issues for the rest of your application. You would like to know which of the many updates in your application is causing the issue.
Add a filter with the following definition; Add a filter with the following definition:
``` ```
[ProductsUpdateTop20] [ProductsUpdateTop20]
@ -120,9 +120,9 @@ Note the exclude entry, this is to prevent updates to the PRODUCTS_STOCK table f
### Example 2 - One Application Server is Slow ### Example 2 - One Application Server is Slow
One of your applications servers is slower than the rest, you believe it is related to database access but you not not sure what is taking the time. One of your applications servers is slower than the rest, you believe it is related to database access but you are not sure what is taking the time.
Add a filter with the following definition; Add a filter with the following definition:
``` ```
[SlowAppServer] [SlowAppServer]

View File

@ -24,6 +24,7 @@ other packages in addition to these.
git gcc gcc-c++ ncurses-devel bison flex glibc-devel cmake libgcc perl make \ git gcc gcc-c++ ncurses-devel bison flex glibc-devel cmake libgcc perl make \
libtool openssl openssl-devel libcurl-devel pcre-devel tcl tcl-devel \ libtool openssl openssl-devel libcurl-devel pcre-devel tcl tcl-devel \
systemtap-sdt-devel libuuid libuuid-devel sqlite sqlite-devel systemtap-sdt-devel libuuid libuuid-devel sqlite sqlite-devel
gnutls gcrypt
``` ```
You can install the packages with the following commands. You can install the packages with the following commands.
@ -31,8 +32,8 @@ You can install the packages with the following commands.
``` ```
sudo yum install git gcc gcc-c++ ncurses-devel bison flex glibc-devel cmake \ sudo yum install git gcc gcc-c++ ncurses-devel bison flex glibc-devel cmake \
libgcc perl make libtool openssl openssl-devel libcurl-devel pcre-devel \ libgcc perl make libtool openssl openssl-devel libcurl-devel pcre-devel \
tcl tcl-devel systemtap-sdt-devel libuuid libuuid-devel sqlite3 sqlite3-devel \ tcl tcl-devel systemtap-sdt-devel libuuid libuuid-devel sqlite sqlite-devel \
libmicrohttpd-devel gnutls gcrypt
``` ```
### Required packages on Ubuntu and Debian systems ### Required packages on Ubuntu and Debian systems
@ -42,7 +43,8 @@ require other packages in addition to these.
``` ```
git build-essential libssl-dev ncurses-dev bison flex cmake perl libtool \ git build-essential libssl-dev ncurses-dev bison flex cmake perl libtool \
libcurl4-openssl-dev libpcre3-dev tlc tcl-dev uuid uuid-dev sqlite3-dev libmicrohttpd-dev libcurl4-openssl-dev libpcre3-dev tlc tcl-dev uuid uuid-dev sqlite3-dev
libgnutls30 libgcrypt20
``` ```
You can install the packages with the following command. You can install the packages with the following command.
@ -50,7 +52,7 @@ You can install the packages with the following command.
``` ```
sudo apt-get install git build-essential libssl-dev ncurses-dev bison flex \ sudo apt-get install git build-essential libssl-dev ncurses-dev bison flex \
cmake perl libtool libcurl4-openssl-dev libpcre3-dev tcl tcl-dev uuid \ cmake perl libtool libcurl4-openssl-dev libpcre3-dev tcl tcl-dev uuid \
uuid-dev libsqlite3-dev libmicrohttpd-dev uuid-dev libsqlite3-dev libgnutls30 libgcrypt20
``` ```
## Preparing the MariaDB MaxScale build ## Preparing the MariaDB MaxScale build

View File

@ -544,17 +544,9 @@ Enable HTTP admin interface authentication using HTTP Basic Access
authentication. This is not a secure method of authentication but it does add a authentication. This is not a secure method of authentication but it does add a
small layer of security. This option is disabled by default. small layer of security. This option is disabled by default.
#### `admin_user` The admin interface authentication uses the same user as MaxAdmin network
interface. This means that new users can be added with both MaxAdmin and the
The HTTP admin interface username. This is the username which is used when REST API. The default credentials for the interface are `admin:mariadb`.
_admin_auth_ is enabled. The default user for the HTTP admin interface is
`admin`.
#### `admin_password`
The HTTP admin interface password. This is the which which is used when
_admin_auth_ is enabled. The default password for the HTTP admin interface is
`mariadb`.
#### `admin_ssl_key` #### `admin_ssl_key`
@ -743,9 +735,10 @@ In versions of MySQL 5.7.6 and later, the `Password` column was replaced by
**Note**: If authentication fails, MaxScale will try to refresh the list of **Note**: If authentication fails, MaxScale will try to refresh the list of
database users used by the service up to 4 times every 30 seconds. database users used by the service up to 4 times every 30 seconds.
#### `passwd` <a id="passwd"></a>
#### `password`
The passwd parameter provides the password information for the above user and The password parameter provides the password information for the above user and
may be either a plain text password or it may be an encrypted password. See the may be either a plain text password or it may be an encrypted password. See the
section on encrypting passwords for use in the maxscale.cnf file. This user must section on encrypting passwords for use in the maxscale.cnf file. This user must
be capable of connecting to the backend database and executing these SQL be capable of connecting to the backend database and executing these SQL

View File

@ -74,7 +74,7 @@ same node for writes.
If the `root_node_as_master` option is disabled for galeramon, the node with the If the `root_node_as_master` option is disabled for galeramon, the node with the
lowest index will always be chosen as the master. If it is enabled, only the lowest index will always be chosen as the master. If it is enabled, only the
node with a a _wsrep_local_index_ value of 0 can be chosed as the master. node with a a _wsrep_local_index_ value of 0 can be chosen as the master.
### `set_donor_nodes` ### `set_donor_nodes`
@ -107,7 +107,7 @@ set_donor_nodes=true
If the `use_priority` option is set and a server is configured with the If the `use_priority` option is set and a server is configured with the
`priority=<int>` parameter, galeramon will use that as the basis on which the `priority=<int>` parameter, galeramon will use that as the basis on which the
master node is chosen. This requires the `disable_master_role_setting` to be master node is chosen. This requires the `disable_master_role_setting` to be
undefined or disabled. The server with the lowest positive value in _priority_ undefined or disabled. The server with the lowest positive value of _priority_
will be chosen as the master node when a replacement Galera node is promoted to will be chosen as the master node when a replacement Galera node is promoted to
a master server inside MaxScale. a master server inside MaxScale.
@ -115,7 +115,7 @@ Nodes with a non-positive value (_priority_ <= 0) will never be chosen as the ma
you to mark some servers as permanent slaves by assigning a non-positive value you to mark some servers as permanent slaves by assigning a non-positive value
into _priority_. into _priority_.
Here is an example with two servers. Here is an example.
``` ```
[node-1] [node-1]
@ -147,9 +147,9 @@ In this example `node-1` is always used as the master if available. If `node-1`
is not available, then the next node with the highest priority rank is used. In is not available, then the next node with the highest priority rank is used. In
this case it would be `node-3`. If both `node-1` and `node-3` were down, then this case it would be `node-3`. If both `node-1` and `node-3` were down, then
`node-2` would be used. Because `node-4` has a value of 0 in _priority_, it will `node-2` would be used. Because `node-4` has a value of 0 in _priority_, it will
never be the master. Nodes without priority are considered as having the lowest never be the master. Nodes without _priority_ parameter are considered as
priority rank and will be used only if all nodes with priority ranks are not having the lowest priority rank and will be used only if all nodes
available. with _priority_ parameter are not available.
With priority ranks you can control the order in which MaxScale chooses the With priority ranks you can control the order in which MaxScale chooses the
master node. This will allow for a controlled failure and replacement of nodes. master node. This will allow for a controlled failure and replacement of nodes.

View File

@ -4,6 +4,16 @@ This document lists optional parameters that all current monitors support.
## Parameters ## Parameters
### `user`
Username used by the monitor to connect to the backend servers. If a server defines
the `monitoruser` parameter, that value will be used instead.
### `password`
Password for the user defined with the `user` parameter. If a server defines
the `monitorpw` parameter, that value will be used instead.
### `monitor_interval` ### `monitor_interval`
This is the time the monitor waits between each cycle of monitoring. The default value of 10000 milliseconds (10 seconds) should be lowered if you want a faster response to changes in the server states. The value is defined in milliseconds and the smallest possible value is 100 milliseconds. This is the time the monitor waits between each cycle of monitoring. The default value of 10000 milliseconds (10 seconds) should be lowered if you want a faster response to changes in the server states. The value is defined in milliseconds and the smallest possible value is 100 milliseconds.

View File

@ -35,9 +35,9 @@ These are optional parameters specific to the MySQL Monitor.
### `detect_replication_lag` ### `detect_replication_lag`
A truth value which controls if replication lag between the master and the A boolean value which controls if replication lag between the master and the
slaves is monitored. This allows the routers to route read queries to only slaves is monitored. This allows the routers to route read queries to only
slaves that are up to date. Default value for this parameter is false. slaves that are up to date. Default value for this parameter is _false_.
To detect the replication lag, MaxScale uses the _maxscale_schema.replication_heartbeat_ To detect the replication lag, MaxScale uses the _maxscale_schema.replication_heartbeat_
table. This table is created on the master server and it is updated at every heartbeat table. This table is created on the master server and it is updated at every heartbeat
@ -87,7 +87,8 @@ detect_stale_slave=true
### `mysql51_replication` ### `mysql51_replication`
Enable support for MySQL 5.1 replication monitoring. This is needed if a MySQL server older than 5.5 is used as a slave in replication. Enable support for MySQL 5.1 replication monitoring. This is needed if a MySQL
server older than 5.5 is used as a slave in replication.
``` ```
mysql51_replication=true mysql51_replication=true
@ -112,7 +113,7 @@ the master status.
By setting the servers into read-only mode, the user can control which By setting the servers into read-only mode, the user can control which
server receive the master status. To do this: server receive the master status. To do this:
- Enable `@@read_only` on all servers (preferrably through the configuration file) - Enable `@@read_only` on all servers (preferably through the configuration file)
- Manually disable `@@read_only` on the server which should be the master - Manually disable `@@read_only` on the server which should be the master
This functionality is similar to the [Multi-Master Monitor](MM-Monitor.md) This functionality is similar to the [Multi-Master Monitor](MM-Monitor.md)
@ -146,7 +147,7 @@ This mode in mysqlmon is completely passive in the sense that it does not modify
the cluster or any of the servers in it. It only labels the last remaining the cluster or any of the servers in it. It only labels the last remaining
server in a cluster as the master server. server in a cluster as the master server.
Before a server is labeled as a standalone master, the following conditions must Before a server is labelled as a standalone master, the following conditions must
have been met: have been met:
- Previous attempts to connect to other servers in the cluster have failed, - Previous attempts to connect to other servers in the cluster have failed,
@ -173,7 +174,7 @@ been set up.
### `failcount` ### `failcount`
Number of failures that must occur on all failed servers before a standalone Number of failures that must occur on all failed servers before a standalone
server is labeled as a master. The default value is 5 failures. server is labelled as a master. The default value is 5 failures.
The monitor will attempt to contact all servers once per monitoring cycle. When The monitor will attempt to contact all servers once per monitoring cycle. When
`detect_standalone_master` is enabled, all of the failed servers must fail `detect_standalone_master` is enabled, all of the failed servers must fail
@ -181,7 +182,7 @@ _failcount_ number of connection attempts before the last server is labeled as
the master. the master.
The formula for calculating the actual number of milliseconds before the server The formula for calculating the actual number of milliseconds before the server
is labeled as the master is `monitor_interval * failcount`. is labelled as the master is `monitor_interval * failcount`.
### `allow_cluster_recovery` ### `allow_cluster_recovery`
@ -190,7 +191,7 @@ takes a boolean parameter is enabled by default. This parameter requires that
`detect_standalone_master` is set to true. In MaxScale 2.1.0, this parameter was `detect_standalone_master` is set to true. In MaxScale 2.1.0, this parameter was
called `failover_recovery`. called `failover_recovery`.
When this parameter is disabled, if the last remaining server is labeled as the When this parameter is disabled, if the last remaining server is labelled as the
master, the monitor will set all of the failed servers into maintenance master, the monitor will set all of the failed servers into maintenance
mode. When this option is enabled, the failed servers are allowed to rejoin the mode. When this option is enabled, the failed servers are allowed to rejoin the
cluster. cluster.
@ -228,7 +229,8 @@ starting MaxScale.
## Example 1 - Monitor script ## Example 1 - Monitor script
Here is an example shell script which sends an email to an admin when a server goes down. Here is an example shell script which sends an email to an admin@my.org
when a server goes down.
``` ```
#!/usr/bin/env bash #!/usr/bin/env bash

View File

@ -26,13 +26,13 @@ of the comment and extend to the end of the current line.
The MaxScale REST API provides the following resources. All resources conform to The MaxScale REST API provides the following resources. All resources conform to
the [JSON API](http://jsonapi.org/format/) specification. the [JSON API](http://jsonapi.org/format/) specification.
- [/maxscale](Resources-MaxScale.md) - [maxscale](Resources-MaxScale.md)
- [/services](Resources-Service.md) - [services](Resources-Service.md)
- [/servers](Resources-Server.md) - [servers](Resources-Server.md)
- [/filters](Resources-Filter.md) - [filters](Resources-Filter.md)
- [/monitors](Resources-Monitor.md) - [monitors](Resources-Monitor.md)
- [/sessions](Resources-Session.md) - [sessions](Resources-Session.md)
- [/users](Resources-User.md) - [users](Resources-User.md)
### Resource Relationships ### Resource Relationships
@ -81,8 +81,7 @@ in addition to the _self_ link.
## Common Request Parameters ## Common Request Parameters
Most of the resources that support GET also support the following All the resources that return JSON content also support the following
parameters. See the resource documentation for a list of supported request
parameters. parameters.
- `pretty` - `pretty`
@ -159,10 +158,14 @@ The value of this header must be a date value in the
#### X-HTTP-Method-Override #### X-HTTP-Method-Override
Some clients only support GET and PUT requests. By providing the string value of Some clients only support GET and PUT requests. By providing the string value of
the intended method in the `X-HTTP-Method-Override` header, a client can perform the intended method in the `X-HTTP-Method-Override` header, a client can, for
a POST, PATCH or DELETE request with the PUT method example, perform a POST, PATCH or DELETE request with the PUT method
(e.g. `X-HTTP-Method-Override: PATCH`). (e.g. `X-HTTP-Method-Override: PATCH`).
If this header is defined in the request, the current method of the request is
replaced with the one in the header. The HTTP method must be in uppercase and it
must be one of the methods that the requested resource supports.
### Response Headers ### Response Headers
#### Allow #### Allow
@ -264,13 +267,14 @@ complete the request.
### 4xx Client Error ### 4xx Client Error
The 4xx class of status code is when the client seems to have erred. Except when The 4xx class of status code is when the client seems to have erred. Except when
responding to a HEAD request, the body of the response contains a JSON responding to a HEAD request, the body of the response *MAY* contains a JSON
representation of the error in the following format. representation of the error.
``` ```javascript
{ {
"error": "Method not supported", "error": {
"description": "The `/service` resource does not support POST." "detail" : "The new `/server/` resource is missing the `port` parameter"
}
} }
``` ```

View File

@ -8,144 +8,80 @@ services can use the same filter and a single service can use multiple filters.
### Get a filter ### Get a filter
Get a single filter. The _:name_ in the URI must be a valid filter name with all Get a single filter. The _:name_ in the URI must be a valid filter name with all
whitespace replaced with hyphens. The filter names are case-insensitive. whitespace replaced with hyphens. The filter names are case-sensitive.
``` ```
GET /filters/:name GET /v1/filters/:name
``` ```
#### Response #### Response
``` `Status: 200 OK`
Status: 200 OK
```javascript
{ {
"name": "Query Logging Filter", "links": {
"module": "qlafilter", "self": "http://localhost:8989/v1/filters/Hint-Filter"
"parameters": {
"filebase": {
"value": "/var/log/maxscale/qla/log.",
"configurable": false
}, },
"match": { "data": {
"value": "select.*from.*t1", "id": "Hint-Filter",
"configurable": true "type": "filters",
"relationships": {
"services": { // All serices that use this filter
"links": {
"self": "http://localhost:8989/v1/services/"
},
"data": [] // No service is using this filter
} }
}, },
"services": [ "attributes": {
"/services/my-service", "module": "hintfilter",
"/services/my-second-service" "parameters": {} // Filter parameters
] },
"links": {
"self": "http://localhost:8989/v1/filters/Hint-Filter"
}
}
} }
``` ```
#### Supported Request Parameter
- `fields`
### Get all filters ### Get all filters
Get all filters. Get all filters.
``` ```
GET /filters GET /v1/filters
``` ```
#### Response #### Response
``` `Status: 200 OK`
Status: 200 OK
[
{
"name": "Query Logging Filter",
"module": "qlafilter",
"parameters": {
"filebase": {
"value": "/var/log/maxscale/qla/log.",
"configurable": false
},
"match": {
"value": "select.*from.*t1",
"configurable": true
}
},
"services": [
"/services/my-service",
"/services/my-second-service
]
},
{
"name": "DBFW Filter",
"module": "dbfwfilter",
"parameters": {
{
"name": "rules",
"value": "/etc/maxscale-rules",
"configurable": false
}
},
"services": [
"/services/my-second-service
]
}
]
```
#### Supported Request Parameter
- `fields`
- `range`
### Update a filter
**Note**: The update mechanisms described here are provisional and most likely
will change in the future. This description is only for design purposes and
does not yet work.
Partially update a filter. The _:name_ in the URI must map to a filter name
and the request body must be a valid JSON Patch document which is applied to the
resource.
```
PATCH /filter/:name
```
### Modifiable Fields
|Field |Type |Description |
|------------|-------|---------------------------------|
|parameters |object |Module specific filter parameters|
```
[
{ "op": "replace", "path": "/parameters/rules/value", "value": "/etc/new-rules" },
{ "op": "add", "path": "/parameters/action/value", "value": "allow" }
]
```
#### Response
Response contains the modified resource.
```
Status: 200 OK
```javascript
{ {
"name": "DBFW Filter", "links": {
"module": "dbfwfilter", "self": "http://localhost:8989/v1/filters/"
"parameters": {
"rules": {
"value": "/etc/new-rules",
"configurable": false
}, },
"action": { "data": [ // Array of filter resources
"value": "allow", {
"configurable": true "id": "Hint-Filter",
"type": "filters",
"relationships": {
"services": {
"links": {
"self": "http://localhost:8989/v1/services/"
},
"data": []
}
},
"attributes": {
"module": "hintfilter",
"parameters": {}
},
"links": {
"self": "http://localhost:8989/v1/filters/Hint-Filter"
} }
} }
"services": [
"/services/my-second-service"
] ]
} }
``` ```

View File

@ -11,129 +11,251 @@ Retrieve global information about a MaxScale instance. This includes various
file locations, configuration options and version information. file locations, configuration options and version information.
``` ```
GET /maxscale GET /v1/maxscale
``` ```
#### Response #### Response
``` `Status: 200 OK`
Status: 200 OK
```javascript
{ {
"config": "/etc/maxscale.cnf", "links": {
"cachedir": "/var/cache/maxscale/", "self": "http://localhost:8989/v1/maxscale/"
"datadir": "/var/lib/maxscale/" },
"libdir": "/usr/lib64/maxscale/", "data": {
"piddir": "/var/run/maxscale/", "attributes": {
"execdir": "/usr/bin/", "parameters": {
"languagedir": "/var/lib/maxscale/", "libdir": "/usr/lib64/maxscale",
"user": "maxscale", "datadir": "/var/lib/maxscale",
"process_datadir": "/var/lib/maxscale/data16218",
"cachedir": "/var/cache/maxscale",
"configdir": "/etc",
"config_persistdir": "/var/lib/maxscale/maxscale.cnf.d",
"module_configdir": "/etc/maxscale.modules.d",
"piddir": "/var/run/maxscale",
"logdir": "/var/log/maxscale",
"langdir": "/var/lib/maxscale",
"execdir": "/usr/bin",
"connector_plugindir": "/var/lib/plugin",
"threads": 4, "threads": 4,
"version": "2.1.0", "auth_connect_timeout": 3,
"commit": "12e7f17eb361e353f7ac413b8b4274badb41b559" "auth_read_timeout": 1,
"started": "Wed, 31 Aug 2016 23:29:26 +0300" "auth_write_timeout": 2,
"skip_permission_checks": false,
"syslog": true,
"maxlog": true,
"log_to_shm": false,
"query_classifier": ""
},
"version": "2.1.3",
"commit": "a32aa6c16236d2d8830e1286ea3aa4dba19174ec",
"started_at": "Wed, 17 May 2017 05:33:46 GMT",
"uptime": 19
},
"id": "maxscale",
"type": "maxscale"
}
} }
``` ```
#### Supported Request Parameter
- `fields`
## Get thread information ## Get thread information
Get detailed information and statistics about the threads. Get the information and statistics of a particular thread. The _:id_ in
the URI must map to a valid thread number between 0 and the configured
value of `threads`.
``` ```
GET /maxscale/threads GET /v1/maxscale/threads/:id
``` ```
#### Response #### Response
``` `Status: 200 OK`
Status: 200 OK
```javascript
{ {
"load_average": { "links": {
"historic": 1.05, "self": "http://localhost:8989/v1/maxscale/threads/0"
"current": 1.00,
"1min": 0.00,
"5min": 0.00,
"15min": 0.00
}, },
"threads": [ "data": {
"id": "0",
"type": "threads",
"attributes": {
"stats": {
"reads": 2,
"writes": 0,
"errors": 0,
"hangups": 0,
"accepts": 0,
"blocking_polls": 180,
"event_queue_length": 1,
"max_event_queue_length": 1,
"max_exec_time": 0,
"max_queue_time": 0
}
},
"links": {
"self": "http://localhost:8989/v1/threads/0"
}
}
}
```
## Get information for all threads
Get the informatino for all threads. Returns a collection of threads resources.
```
GET /v1/maxscale/threads
```
#### Response
`Status: 200 OK`
```javascript
{
"links": {
"self": "http://localhost:8989/v1/maxscale/threads/"
},
"data": [
{ {
"id": 0, "id": "0",
"state": "processing", "type": "threads",
"file_descriptors": 1, "attributes": {
"event": [ "stats": {
"in", "reads": 1,
"out" "writes": 0,
], "errors": 0,
"run_time": 300 "hangups": 0,
"accepts": 0,
"blocking_polls": 116,
"event_queue_length": 1,
"max_event_queue_length": 1,
"max_exec_time": 0,
"max_queue_time": 0
}
},
"links": {
"self": "http://localhost:8989/v1/threads/0"
}
}, },
{ {
"id": 1, "id": "1",
"state": "polling", "type": "threads",
"file_descriptors": 0, "attributes": {
"event": [], "stats": {
"run_time": 0 "reads": 1,
"writes": 0,
"errors": 0,
"hangups": 0,
"accepts": 0,
"blocking_polls": 116,
"event_queue_length": 1,
"max_event_queue_length": 1,
"max_exec_time": 0,
"max_queue_time": 0
}
},
"links": {
"self": "http://localhost:8989/v1/threads/1"
}
},
{
"id": "2",
"type": "threads",
"attributes": {
"stats": {
"reads": 1,
"writes": 0,
"errors": 0,
"hangups": 0,
"accepts": 0,
"blocking_polls": 116,
"event_queue_length": 1,
"max_event_queue_length": 1,
"max_exec_time": 0,
"max_queue_time": 0
}
},
"links": {
"self": "http://localhost:8989/v1/threads/2"
}
},
{
"id": "3",
"type": "threads",
"attributes": {
"stats": {
"reads": 1,
"writes": 0,
"errors": 0,
"hangups": 0,
"accepts": 0,
"blocking_polls": 116,
"event_queue_length": 1,
"max_event_queue_length": 1,
"max_exec_time": 0,
"max_queue_time": 0
}
},
"links": {
"self": "http://localhost:8989/v1/threads/3"
}
} }
] ]
} }
``` ```
#### Supported Request Parameter
- `fields`
## Get logging information ## Get logging information
Get information about the current state of logging, enabled log files and the Get information about the current state of logging, enabled log files and the
location where the log files are stored. location where the log files are stored.
``` ```
GET /maxscale/logs GET /v1/maxscale/logs
``` ```
#### Response #### Response
``` `Status: 200 OK`
Status: 200 OK
```javascript
{ {
"logdir": "/var/log/maxscale/", "links": {
"self": "http://localhost:8989/v1/maxscale/logs/"
},
"data": {
"attributes": {
"parameters": {
"highprecision": false,
"maxlog": true, "maxlog": true,
"syslog": false, "syslog": true,
"log_levels": { "throttling": {
"error": true, "count": 10,
"warning": true, "suppress_ms": 10000,
"notice": true, "window_ms": 1000
"info": false,
"debug": false
}, },
"log_augmentation": { "log_warning": true,
"function": true "log_notice": true,
"log_info": false,
"log_debug": false
}
}, },
"log_throttling": { "id": "logs",
"limit": 8, "type": "logs"
"window": 2000, }
"suppression": 10000
},
"last_flushed": "Wed, 31 Aug 2016 23:29:26 +0300"
} }
``` ```
#### Supported Request Parameter
- `fields`
## Flush and rotate log files ## Flush and rotate log files
Flushes any pending messages to disk and reopens the log files. The body of the Flushes any pending messages to disk and reopens the log files. The body of the
message is ignored. message is ignored.
``` ```
POST /maxscale/logs/flush POST /v1/maxscale/logs/flush
``` ```
#### Response #### Response
@ -147,70 +269,177 @@ Status: 204 No Content
Retrieve all pending tasks that are queued for execution. Retrieve all pending tasks that are queued for execution.
``` ```
GET /maxscale/tasks GET /v1/maxscale/tasks
``` ```
#### Response #### Response
``` `Status: 200 OK`
Status: 200 OK
[ ```javascript
{ {
"name": "Load Average", "links": {
"type": "repeated", "self": "http://localhost:8989/v1/maxscale/tasks/"
"frequency": 10, },
"next_due": "Fri Sep 9 14:12:37 2016" "data": [] // No tasks active
}
} }
``` ```
#### Supported Request Parameter
- `fields`
## Get loaded modules ## Get loaded modules
Retrieve information about all loaded modules. This includes version, API and Retrieve information about a loaded module. This includes version, API and
maturity information. maturity information as well as all the parameters that the module defines.
``` ```
GET /maxscale/modules GET /v1/maxscale/modules
``` ```
#### Response #### Response
``` `Status: 200 OK`
Status: 200 OK
[ ```javascript
{ {
"name": "MySQLBackend", "links": {
"type": "Protocol", "self": "http://localhost:8989/v1/maxscale/modules/"
"version": "V2.0.0",
"api_version": "1.1.0",
"maturity": "GA"
}, },
{ "data": {
"name": "qlafilter", "id": "readwritesplit",
"type": "Filter", "type": "module",
"version": "V1.1.1", "attributes": {
"api_version": "1.1.0", "module_type": "Router",
"maturity": "GA"
},
{
"name": "readwritesplit",
"type": "Router",
"version": "V1.1.0", "version": "V1.1.0",
"api_version": "1.0.0", "description": "A Read/Write splitting router for enhancement read scalability",
"maturity": "GA" "api": "router",
"status": "GA",
"parameters": [
{
"name": "use_sql_variables_in",
"type": "enum",
"default_value": "all",
"enum_values": [
"all",
"master"
]
},
{
"name": "slave_selection_criteria",
"type": "enum",
"default_value": "LEAST_CURRENT_OPERATIONS",
"enum_values": [
"LEAST_GLOBAL_CONNECTIONS",
"LEAST_ROUTER_CONNECTIONS",
"LEAST_BEHIND_MASTER",
"LEAST_CURRENT_OPERATIONS"
]
},
{
"name": "master_failure_mode",
"type": "enum",
"default_value": "fail_instantly",
"enum_values": [
"fail_instantly",
"fail_on_write",
"error_on_write"
]
},
{
"name": "max_slave_replication_lag",
"type": "int",
"default_value": "-1"
},
{
"name": "max_slave_connections",
"type": "string",
"default_value": "255"
},
{
"name": "retry_failed_reads",
"type": "bool",
"default_value": "true"
},
{
"name": "disable_sescmd_history",
"type": "bool",
"default_value": "true"
},
{
"name": "max_sescmd_history",
"type": "count",
"default_value": "0"
},
{
"name": "strict_multi_stmt",
"type": "bool",
"default_value": "true"
},
{
"name": "master_accept_reads",
"type": "bool",
"default_value": "false"
},
{
"name": "connection_keepalive",
"type": "count",
"default_value": "0"
}
]
},
"links": {
"self": "http://localhost:8989/v1/modules/readwritesplit"
}
} }
} }
``` ```
#### Supported Request Parameter ## Get all loaded modules
- `fields` Retrieve information about all loaded modules.
- `range`
TODO: Add epoll statistics and rest of the supported methods. ```
GET /v1/maxscale/modules
```
#### Response
`Status: 200 OK`
```javascript
{
"links": {
"self": "http://localhost:8989/v1/maxscale/modules/"
},
"data": [
{
"id": "qc_sqlite",
"type": "module",
"attributes": {
"module_type": "QueryClassifier",
"version": "V1.0.0",
"description": "Query classifier using sqlite.",
"api": "query_classifier",
"status": "Beta",
"parameters": []
},
"links": {
"self": "http://localhost:8989/v1/modules/qc_sqlite"
}
},
{
"id": "MySQLAuth",
"type": "module",
"attributes": {
"module_type": "Authenticator",
"version": "V1.1.0",
"description": "The MySQL client to MaxScale authenticator implementation",
"api": "authenticator",
"status": "GA",
"parameters": []
},
"links": {
"self": "http://localhost:8989/v1/modules/MySQLAuth"
}
},
]
}
```

View File

@ -8,169 +8,318 @@ more servers.
### Get a monitor ### Get a monitor
Get a single monitor. The _:name_ in the URI must be a valid monitor name with Get a single monitor. The _:name_ in the URI must be a valid monitor name with
all whitespace replaced with hyphens. The monitor names are case-insensitive. all whitespace replaced with hyphens. The monitor names are case-sensitive.
``` ```
GET /monitors/:name GET /v1/monitors/:name
``` ```
#### Response #### Response
``` `Status: 200 OK`
Status: 200 OK
```javascript
{ {
"name": "MySQL Monitor", "links": {
"module": "mysqlmon", "self": "http://localhost:8989/v1/monitors/MySQL-Monitor"
"state": "started", },
"monitor_interval": 2500, "data": {
"connect_timeout": 5, "id": "MySQL-Monitor",
"read_timeout": 2, "type": "monitors",
"write_timeout": 3, "relationships": {
"servers": [ "servers": {
"/servers/db-serv-1", "links": {
"/servers/db-serv-2", "self": "http://localhost:8989/v1/servers/"
"/servers/db-serv-3" },
"data": [
{
"id": "server1",
"type": "servers"
},
{
"id": "server2",
"type": "servers"
}
] ]
}
},
"attributes": {
"module": "mysqlmon",
"state": "Running",
"parameters": {
"user": "maxuser",
"password": "maxpwd",
"monitor_interval": 10000,
"backend_connect_timeout": 3,
"backend_read_timeout": 1,
"backend_write_timeout": 2,
"backend_connect_attempts": 1,
"detect_replication_lag": false,
"detect_stale_master": true,
"detect_stale_slave": true,
"mysql51_replication": false,
"multimaster": false,
"detect_standalone_master": false,
"failcount": 5,
"allow_cluster_recovery": true,
"journal_max_age": 28800
},
"monitor_diagnostics": {
"monitor_id": 0,
"detect_stale_master": true,
"detect_stale_slave": true,
"detect_replication_lag": false,
"multimaster": false,
"detect_standalone_master": false,
"failcount": 5,
"allow_cluster_recovery": true,
"mysql51_replication": false,
"journal_max_age": 28800,
"server_info": [
{
"name": "server1",
"server_id": 0,
"master_id": 0,
"read_only": false,
"slave_configured": false,
"slave_io_running": false,
"slave_sql_running": false,
"master_binlog_file": "",
"master_binlog_position": 0
},
{
"name": "server2",
"server_id": 0,
"master_id": 0,
"read_only": false,
"slave_configured": false,
"slave_io_running": false,
"slave_sql_running": false,
"master_binlog_file": "",
"master_binlog_position": 0
}
]
}
},
"links": {
"self": "http://localhost:8989/v1/monitors/MySQL-Monitor"
}
}
} }
``` ```
#### Supported Request Parameter
- `fields`
### Get all monitors ### Get all monitors
Get all monitors. Get all monitors.
``` ```
GET /monitors GET /v1/monitors
``` ```
#### Response #### Response
``` `Status: 200 OK`
Status: 200 OK
[ ```javascript
{
"links": {
"self": "http://localhost:8989/v1/monitors/"
},
"data": [
{ {
"name": "MySQL Monitor", "id": "MySQL-Monitor",
"module": "mysqlmon", "type": "monitors",
"state": "started", "relationships": {
"monitor_interval": 2500, "servers": {
"connect_timeout": 5, "links": {
"read_timeout": 2, "self": "http://localhost:8989/v1/servers/"
"write_timeout": 3, },
"servers": [ "data": [
"/servers/db-serv-1", {
"/servers/db-serv-2", "id": "server1",
"/servers/db-serv-3" "type": "servers"
]
}, },
{ {
"name": "Galera Monitor", "id": "server2",
"module": "galeramon", "type": "servers"
"state": "started", }
"monitor_interval": 5000,
"connect_timeout": 10,
"read_timeout": 5,
"write_timeout": 5,
"servers": [
"/servers/db-galera-1",
"/servers/db-galera-2",
"/servers/db-galera-3"
] ]
} }
] },
"attributes": {
"module": "mysqlmon",
"state": "Running",
"parameters": {
"user": "maxuser",
"password": "maxpwd",
"monitor_interval": 10000,
"backend_connect_timeout": 3,
"backend_read_timeout": 1,
"backend_write_timeout": 2,
"backend_connect_attempts": 1,
"detect_replication_lag": false,
"detect_stale_master": true,
"detect_stale_slave": true,
"mysql51_replication": false,
"multimaster": false,
"detect_standalone_master": false,
"failcount": 5,
"allow_cluster_recovery": true,
"journal_max_age": 28800
},
"monitor_diagnostics": {
"monitor_id": 0,
"detect_stale_master": true,
"detect_stale_slave": true,
"detect_replication_lag": false,
"multimaster": false,
"detect_standalone_master": false,
"failcount": 5,
"allow_cluster_recovery": true,
"mysql51_replication": false,
"journal_max_age": 28800,
"server_info": [
{
"name": "server1",
"server_id": 0,
"master_id": 0,
"read_only": false,
"slave_configured": false,
"slave_io_running": false,
"slave_sql_running": false,
"master_binlog_file": "",
"master_binlog_position": 0
},
{
"name": "server2",
"server_id": 0,
"master_id": 0,
"read_only": false,
"slave_configured": false,
"slave_io_running": false,
"slave_sql_running": false,
"master_binlog_file": "",
"master_binlog_position": 0
}
]
}
},
"links": {
"self": "http://localhost:8989/v1/monitors/MySQL-Monitor"
}
}
]
}
``` ```
#### Supported Request Parameter ### Create a monitor
- `fields` Create a new monitor. The request body must define the `/data/id`
- `range` field with the name of the monitor, the `/data/type` field with the
value of `monitors` and the `/data/attributes/module` field with the
monitor module for this monitor. All of the monitor parameters can
be defined at creation time.
`POST /v1/monitors`
The following example defines a request body which creates the new monitor,
_test-monitor_, and assigns two servers to be monitored by it. It also defines
a custom value for the _monitor_interval_ parameter.
```javascript
{
data: {
"id": "test-monitor", // Name of the monitor
"type": "monitors",
"attributes": {
"module": "mysqlmon", // The monitor uses the mysqlmon module
"parameters": { // Monitor parameters
"monitor_interval": 1000
}
},
"relationships": { // List of server relationships that this monitor uses
"servers": {
"data": [ // This monitor uses two servers
{
"id": "server1",
"type": "servers"
},
{
"id": "server2",
"type": "servers"
}
]
}
}
}
}
```
#### Response
Monitor is created.
`Status: 204 No Content`
### Update a monitor
The :name in the URI must map to a monitor name with all whitespace replaced with
hyphens. The request body must be a valid JSON document representing the modified monitor.
```
PUT /v1/monitor/:name
```
### Modifiable Fields
The following standard server parameter can be modified.
- [user](../Monitors/Monitor-Common.md#user)
- [password](../Monitors/Monitor-Common.md#password)
- [monitor_interval](../Monitors/Monitor-Common.md#monitor_interval)
- [backend_connect_timeout](../Monitors/Monitor-Common.md#backend_connect_timeout)
- [backend_write_timeout](../Monitors/Monitor-Common.md#backend_write_timeout)
- [backend_read_timeout](../Monitors/Monitor-Common.md#backend_read_timeout)
- [backend_connect_attempts](../Monitors/Monitor-Common.md#backend_connect_attempts)
Refer to the documentation on these parameters for valid values.
In addition to these standard parameters, the monitor specific parameters can also be
modified. Refer to the monitor module documentation for details on these parameters.
#### Response
Monitor is modified.
`Status: 204 No Content`
Invalid request body.
`Status: 403 Forbidden`
### Stop a monitor ### Stop a monitor
Stops a started monitor. Stops a started monitor.
``` ```
PUT /monitor/:name/stop PUT /v1/monitor/:name/stop
``` ```
#### Response #### Response
``` Monitor is stopped.
Status: 204 No Content
``` `Status: 204 No Content`
### Start a monitor ### Start a monitor
Starts a stopped monitor. Starts a stopped monitor.
``` ```
PUT /monitor/:name/start PUT /v1/monitor/:name/start
``` ```
#### Response #### Response
``` Monitor is started.
Status: 204 No Content
```
### Update a monitor `Status: 204 No Content`
**Note**: The update mechanisms described here are provisional and most likely
will change in the future. This description is only for design purposes and
does not yet work.
Partially update a monitor. The _:name_ in the URI must map to a monitor name
and the request body must be a valid JSON Patch document which is applied to the
resource.
```
PATCH /monitor/:name
```
### Modifiable Fields
The following values can be modified with the PATCH method.
|Field |Type |Description |
|-----------------|------------|---------------------------------------------------|
|servers |string array|Servers monitored by this monitor |
|monitor_interval |number |Monitoring interval in milliseconds |
|connect_timeout |number |Connection timeout in seconds |
|read_timeout |number |Read timeout in seconds |
|write_timeout |number |Write timeout in seconds |
```
[
{ "op": "remove", "path": "/servers/0" },
{ "op": "replace", "path": "/monitor_interval", "value": 2000 },
{ "op": "replace", "path": "/connect_timeout", "value": 2 },
{ "op": "replace", "path": "/read_timeout", "value": 2 },
{ "op": "replace", "path": "/write_timeout", "value": 2 }
]
```
#### Response
Response contains the modified resource.
```
Status: 200 OK
{
"name": "MySQL Monitor",
"module": "mysqlmon",
"servers": [
"/servers/db-serv-2",
"/servers/db-serv-3"
],
"state": "started",
"monitor_interval": 2000,
"connect_timeout": 2,
"read_timeout": 2,
"write_timeout": 2
}
```

View File

@ -91,10 +91,6 @@ Server not found:
Status: 404 Not Found Status: 404 Not Found
``` ```
#### Supported Request Parameter
- `pretty`
### Get all servers ### Get all servers
``` ```
@ -223,10 +219,6 @@ Status: 200 OK
} }
``` ```
#### Supported Request Parameter
- `pretty`
### Create a server ### Create a server
``` ```
@ -330,7 +322,7 @@ response is returned.
### Modifiable Fields ### Modifiable Fields
The following standard server parameter can be modified. The following standard server parameters can be modified.
- [address](../Getting-Started/Configuration-Guide.md#address) - [address](../Getting-Started/Configuration-Guide.md#address)
- [port](../Getting-Started/Configuration-Guide.md#port) - [port](../Getting-Started/Configuration-Guide.md#port)
@ -478,10 +470,6 @@ Invalid JSON body:
Status: 403 Forbidden Status: 403 Forbidden
``` ```
#### Supported Request Parameter
- `pretty`
### Destroy a server ### Destroy a server
``` ```
@ -525,8 +513,6 @@ GET /v1/servers/:name/connections
#### Response #### Response
#### Supported Request Parameter
### Close all connections to a server ### Close all connections to a server
Close all connections to a particular server. This will forcefully close all Close all connections to a particular server. This will forcefully close all

View File

@ -11,95 +11,219 @@ Get a single service. The _:name_ in the URI must be a valid service name with
all whitespace replaced with hyphens. The service names are case-insensitive. all whitespace replaced with hyphens. The service names are case-insensitive.
``` ```
GET /services/:name GET /v1/services/:name
``` ```
#### Response #### Response
``` `Status: 200 OK`
Status: 200 OK
```javascript
{ {
"name": "My Service", "links": {
"router": "readwritesplit", "self": "http://localhost:8989/v1/services/Read-Connection-Router"
"router_options": {
"disable_sescmd_history": "true"
}, },
"state": "started", "data": {
"total_connections": 10, "id": "Read-Connection-Router",
"current_connections": 2, "type": "services",
"started": "2016-08-29T12:52:31+03:00", "attributes": {
"filters": [ "router": "readconnroute",
"/filters/Query-Logging-Filter" "state": "Started",
], "router_diagnostics": {
"servers": [ "connections": 0,
"/servers/db-serv-1", "current_connections": 1,
"/servers/db-serv-2", "queries": 0
"/servers/db-serv-3" },
"started": "Mon May 22 12:54:05 2017",
"total_connections": 1,
"connections": 1,
"parameters": { // Service parameters
"router_options": "master",
"user": "maxuser",
"password": "maxpwd",
"enable_root_user": false,
"max_retry_interval": 3600,
"max_connections": 0,
"connection_timeout": 0,
"auth_all_servers": false,
"strip_db_esc": true,
"localhost_match_wildcard_host": true,
"version_string": "",
"log_auth_warnings": true,
"retry_on_failure": true
},
"listeners": [ // Listeners that point to this service
{
"attributes": {
"parameters": {
"port": 4008,
"protocol": "MySQLClient",
"authenticator": "MySQLAuth"
}
},
"id": "Read-Connection-Listener",
"type": "listeners"
}
] ]
},
"relationships": {
"servers": {
"links": {
"self": "http://localhost:8989/v1/servers/"
},
"data": [ // List of servers that this service uses
{
"id": "server1",
"type": "servers"
}
]
}
},
"links": {
"self": "http://localhost:8989/v1/services/Read-Connection-Router"
}
}
} }
``` ```
#### Supported Request Parameter
- `fields`
### Get all services ### Get all services
Get all services. Get all services.
``` ```
GET /services GET /v1/services
``` ```
#### Response #### Response
``` `Status: 200 OK`
Status: 200 OK
[ ```javascript
{ {
"name": "My Service", "links": {
"router": "readwritesplit", "self": "http://localhost:8989/v1/services/"
"router_options": {
"disable_sescmd_history": "true"
}, },
"state": "started", "data": [ // Collection of service resources
"total_connections": 10, {
"current_connections": 2, "id": "Read-Connection-Router",
"started": "2016-08-29T12:52:31+03:00", "type": "services",
"filters": [ "attributes": {
"/filters/Query-Logging-Filter" "router": "readconnroute",
], "state": "Started",
"servers": [ "router_diagnostics": {
"/servers/db-serv-1", "connections": 0,
"/servers/db-serv-2", "current_connections": 1,
"/servers/db-serv-3" "queries": 0
},
"started": "Mon May 22 13:00:46 2017",
"total_connections": 1,
"connections": 1,
"parameters": {
"router_options": "master",
"user": "maxuser",
"password": "maxpwd",
"enable_root_user": false,
"max_retry_interval": 3600,
"max_connections": 0,
"connection_timeout": 0,
"auth_all_servers": false,
"strip_db_esc": true,
"localhost_match_wildcard_host": true,
"version_string": "",
"log_auth_warnings": true,
"retry_on_failure": true
},
"listeners": [
{
"attributes": {
"parameters": {
"port": 4008,
"protocol": "MySQLClient",
"authenticator": "MySQLAuth"
}
},
"id": "Read-Connection-Listener",
"type": "listeners"
}
] ]
}, },
{ "relationships": {
"name": "My Second Service", "servers": {
"router": "readconnroute", "links": {
"router_options": { "self": "http://localhost:8989/v1/servers/"
"type": "master"
}, },
"state": "started", "data": [
"total_connections": 10, {
"current_connections": 2, "id": "server1",
"started": "2016-08-29T12:52:31+03:00", "type": "servers"
"servers": [ }
"/servers/db-serv-1",
"/servers/db-serv-2"
] ]
} }
] },
"links": {
"self": "http://localhost:8989/v1/services/Read-Connection-Router"
}
},
{
"id": "CLI",
"type": "services",
"attributes": {
"router": "cli",
"state": "Started",
"started": "Mon May 22 13:00:46 2017",
"total_connections": 2,
"connections": 2,
"parameters": {
"router_options": "",
"user": "",
"password": "",
"enable_root_user": false,
"max_retry_interval": 3600,
"max_connections": 0,
"connection_timeout": 0,
"auth_all_servers": false,
"strip_db_esc": true,
"localhost_match_wildcard_host": true,
"version_string": "",
"log_auth_warnings": true,
"retry_on_failure": true
},
"listeners": [
{
"attributes": {
"parameters": {
"address": "default",
"port": 0,
"protocol": "maxscaled",
"authenticator": "MaxAdminAuth"
}
},
"id": "CLI-Listener",
"type": "listeners"
},
{
"attributes": {
"parameters": {
"address": "0.0.0.0",
"port": 6603,
"protocol": "maxscaled",
"authenticator": "MaxAdminAuth"
}
},
"id": "CLI-Network-Listener",
"type": "listeners"
}
]
},
"relationships": {},
"links": {
"self": "http://localhost:8989/v1/services/CLI"
}
}
]
}
``` ```
#### Supported Request Parameter
- `fields`
- `range`
### Get service listeners ### Get service listeners
Get the listeners of a service. The _:name_ in the URI must be a valid service Get the listeners of a service. The _:name_ in the URI must be a valid service
@ -107,165 +231,64 @@ name with all whitespace replaced with hyphens. The service names are
case-insensitive. case-insensitive.
``` ```
GET /services/:name/listeners GET /v1/services/:name/listeners
``` ```
#### Response #### Response
``` `Status: 200 OK`
Status: 200 OK
[ ```javascript
{ {
"name": "My Listener", "links": {
"protocol": "MySQLClient", "self": "http://localhost:8989/v1/services/Read-Connection-Router/listeners"
"address": "0.0.0.0",
"port": 4006
}, },
"data": [
{ {
"name": "My SSL Listener", "attributes": {
"parameters": {
"port": 4008,
"protocol": "MySQLClient", "protocol": "MySQLClient",
"address": "127.0.0.1", "authenticator": "MySQLAuth"
"port": 4006,
"ssl": "required",
"ssl_cert": "/home/markusjm/newcerts/server-cert.pem",
"ssl_key": "/home/markusjm/newcerts/server-key.pem",
"ssl_ca_cert": "/home/markusjm/newcerts/ca.pem"
} }
] },
"id": "Read-Connection-Listener",
"type": "listeners"
}
]
}
``` ```
#### Supported Request Parameter
- `fields`
- `range`
### Update a service ### Update a service
**Note**: The update mechanisms described here are provisional and most likely The _:name_ in the URI must map to a service name and the request body must be a
will change in the future. This description is only for design purposes and valid JSON Patch document which is applied to the resource.
does not yet work.
Partially update a service. The _:name_ in the URI must map to a service name
and the request body must be a valid JSON Patch document which is applied to the
resource.
``` ```
PATCH /services/:name PUT /v1/services/:name
``` ```
### Modifiable Fields The following standard service parameters can be modified.
|Field |Type |Description | - [user](../Getting-Started/Configuration-Guide.md#user)
|--------------|------------|---------------------------------------------------| - [password](../Getting-Started/Configuration-Guide.md#password)
|servers |string array|Servers used by this service, must be relative links to existing server resources| - [enable_root_user](../Getting-Started/Configuration-Guide.md#enable_root_user)
|router_options|object |Router specific options| - [max_retry_interval](../Getting-Started/Configuration-Guide.md#max_retry_interval)
|filters |string array|Service filters, configured in the same order they are declared in the array (`filters[0]` => first filter, `filters[1]` => second filter)| - [max_connections](../Getting-Started/Configuration-Guide.md#max_connections)
|user |string |The username for the service user| - [connection_timeout](../Getting-Started/Configuration-Guide.md#connection_timeout)
|password |string |The password for the service user| - [auth_all_servers](../Getting-Started/Configuration-Guide.md#auth_all_servers)
|root_user |boolean |Allow root user to connect via this service| - [strip_db_esc](../Getting-Started/Configuration-Guide.md#strip_db_esc)
|version_string|string |Custom version string given to connecting clients| - [localhost_match_wildcard_host](../Getting-Started/Configuration-Guide.md#localhost_match_wildcard_host)
|weightby |string |Name of a server weigting parameter which is used for connection weighting| - [version_string](../Getting-Started/Configuration-Guide.md#version_string)
|connection_timeout|number |Client idle timeout in seconds| - [weightby](../Getting-Started/Configuration-Guide.md#weightby)
|max_connection|number |Maximum number of allowed connections| - [log_auth_warnings](../Getting-Started/Configuration-Guide.md#log_auth_warnings)
|strip_db_esc|boolean |Strip escape characters from default database name| - [retry_on_failure](../Getting-Started/Configuration-Guide.md#retry_on_failure)
``` Refer to the documentation on these parameters for valid values.
[
{ "op": "replace", "path": "/servers", "value": ["/servers/db-serv-2","/servers/db-serv-3"] },
{ "op": "add", "path": "/router_options/master_failover_mode", "value": "fail_on_write" },
{ "op": "remove", "path": "/filters" }
]
```
#### Response #### Response
Response contains the modified resource. Service is modified.
```
Status: 200 OK
{
"name": "My Service",
"router": "readwritesplit",
"router_options": {
"disable_sescmd_history=false",
"master_failover_mode": "fail_on_write"
}
"state": "started",
"total_connections": 10,
"current_connections": 2,
"started": "2016-08-29T12:52:31+03:00",
"servers": [
"/servers/db-serv-2",
"/servers/db-serv-3"
]
}
```
### Stop a service
Stops a started service.
```
PUT /service/:name/stop
```
#### Response
```
Status: 204 No Content
```
### Start a service
Starts a stopped service.
```
PUT /service/:name/start
```
#### Response
```
Status: 204 No Content
```
### Get all sessions for a service
Get all sessions for a particular service.
```
GET /services/:name/sessions
```
#### Response
Relative links to all sessions for this service.
```
Status: 200 OK
[
"/sessions/1",
"/sessions/2"
]
```
#### Supported Request Parameter
- `range`
### Close all sessions for a service
Close all sessions for a particular service. This will forcefully close all
client connections and any backend connections they have made.
```
DELETE /services/:name/sessions
```
#### Response
``` ```
Status: 204 No Content Status: 204 No Content

View File

@ -1,8 +1,8 @@
# Session Resource # Session Resource
A session consists of a client connection, any number of related backend A session is an abstraction of a client connection, any number of related backend
connections, a router module session and possibly filter module sessions. Each connections, a router module session and possibly filter module sessions. Each
session is created on a service and a service can have multiple sessions. session is created on a service and each service can have multiple sessions.
## Resource Operations ## Resource Operations
@ -11,128 +11,135 @@ session is created on a service and a service can have multiple sessions.
Get a single session. _:id_ must be a valid session ID. Get a single session. _:id_ must be a valid session ID.
``` ```
GET /sessions/:id GET /v1/sessions/:id
``` ```
#### Response #### Response
``` `Status: 200 OK`
Status: 200 OK
```javascript
{ {
"id": 1, "links": {
"state": "Session ready for routing", "self": "http://localhost:8989/v1/sessions/1"
"user": "jdoe", },
"address": "192.168.0.200", "data": {
"service": "/services/my-service", "id": "1",
"connected": "Wed Aug 31 03:03:12 2016", "type": "sessions",
"idle": 260 "relationships": {
"services": {
"links": {
"self": "http://localhost:8989/v1/services/"
},
"data": [
{
"id": "RW-Split-Router",
"type": "services"
}
]
}
},
"attributes": {
"state": "Listener Session",
"connected": "Wed May 17 10:06:35 2017"
},
"links": {
"self": "http://localhost:8989/v1/sessions/1"
}
}
} }
``` ```
#### Supported Request Parameter
- `fields`
### Get all sessions ### Get all sessions
Get all sessions. Get all sessions.
``` ```
GET /sessions GET /v1/sessions
``` ```
#### Response #### Response
``` `Status: 200 OK`
Status: 200 OK
[ ```javascript
{ {
"id": 1, "links": {
"state": "Session ready for routing", "self": "http://localhost:8989/v1/sessions/"
"user": "jdoe",
"address": "192.168.0.200",
"service": "/services/My-Service",
"connected": "Wed Aug 31 03:03:12 2016",
"idle": 260
}, },
"data": [
{ {
"id": 2, "id": "1",
"state": "Session ready for routing", "type": "sessions",
"user": "dba", "relationships": {
"address": "192.168.0.201", "services": {
"service": "/services/My-Service", "links": {
"connected": "Wed Aug 31 03:10:00 2016", "self": "http://localhost:8989/v1/services/"
"idle": 1 },
"data": [
{
"id": "RW-Split-Router",
"type": "services"
} }
] ]
``` }
},
#### Supported Request Parameter "attributes": {
"state": "Listener Session",
- `fields` "connected": "Wed May 17 10:06:35 2017"
- `range` },
"links": {
### Get all connections created by a session "self": "http://localhost:8989/v1/sessions/1"
Get all backend connections created by a session. _:id_ must be a valid session ID.
```
GET /sessions/:id/connections
```
#### Response
```
Status: 200 OK
[
{
"state": "DCB in the polling loop",
"role": "Backend Request Handler",
"server": "/servers/db-serv-01",
"service": "/services/my-service",
"statistics": {
"reads": 2197
"writes": 1562
"buffered_writes": 0
"high_water_events": 0
"low_water_events": 0
} }
}, },
{ {
"state": "DCB in the polling loop", "id": "2",
"role": "Backend Request Handler", "type": "sessions",
"server": "/servers/db-serv-02", "relationships": {
"service": "/services/my-service", "services": {
"statistics": { "links": {
"reads": 0 "self": "http://localhost:8989/v1/services/"
"writes": 0 },
"buffered_writes": 0 "data": [
"high_water_events": 0 {
"low_water_events": 0 "id": "Read-Connection-Router",
"type": "services"
}
]
}
},
"attributes": {
"state": "Listener Session",
"connected": "Wed May 17 10:06:35 2017"
},
"links": {
"self": "http://localhost:8989/v1/sessions/2"
}
},
{
"id": "3",
"type": "sessions",
"relationships": {
"services": {
"links": {
"self": "http://localhost:8989/v1/services/"
},
"data": [
{
"id": "CLI",
"type": "services"
}
]
}
},
"attributes": {
"state": "Listener Session",
"connected": "Wed May 17 10:06:35 2017"
},
"links": {
"self": "http://localhost:8989/v1/sessions/3"
} }
} }
] ]
``` }
#### Supported Request Parameter
- `fields`
- `range`
### Close a session
Close a session. This will forcefully close the client connection and any
backend connections.
```
DELETE /sessions/:id
```
#### Response
```
Status: 204 No Content
``` ```

View File

@ -5,57 +5,187 @@ MaxScale's configuration.
## Resource Operations ## Resource Operations
### Get all users ### Get network user
Get all administrative users. Get a single network user. The The _:name_ in the URI must be a valid network
user name.
``` ```
GET /users GET /v1/users/inet/:name
``` ```
#### Response #### Response
``` `Status: 200 OK`
Status: 200 OK
[ ```javascript
{
"name": "jdoe"
},
{
"name": "dba"
},
{
"name": "admin"
}
]
#### Supported Request Parameter
- `fields`
- `range`
### Create a user
Create a new administrative user.
```
PUT /users
```
### Modifiable Fields
All of the following fields need to be defined in the request body.
|Field |Type |Description |
|---------|------|-------------------------|
|name |string|Username, consisting of alphanumeric characters|
|password |string|Password for the new user|
```
{ {
"name": "foo", "links": {
"password": "bar" "self": "http://localhost:8989/v1/users/inet/my-user"
},
"data": {
"id": "my-user",
"type": "inet",
"relationships": {
"self": "http://localhost:8989/v1/users/inet/my-user"
}
}
}
```
### Get all network users
Get all network users.
```
GET /v1/users/inet
```
#### Response
`Status: 200 OK`
```javascript
{
"links": {
"self": "http://localhost:8989/v1/users/inet"
},
"data": [
{
"id": "my-user",
"type": "inet",
"relationships": {
"self": "http://localhost:8989/v1/users/inet/my-user"
}
}
]
}
```
### Get enabled UNIX account
Get a single enabled UNIX account. The The _:name_ in the URI must be a valid
UNIX account name that has been enabled.
```
GET /v1/users/unix/:name
```
#### Response
`Status: 200 OK`
```javascript
{
"links": {
"self": "http://localhost:8989/v1/users/unix"
},
"data": [
{
"id": "maxscale",
"type": "unix",
"relationships": {
"self": "http://localhost:8989/v1/users/unix/maxscale"
}
}
]
}
```
### Get all enabled UNIX accounts
Get all enabled UNIX accounts.
```
GET /v1/users/unix
```
#### Response
`Status: 200 OK`
```javascript
{
"links": {
"self": "http://localhost:8989/v1/users/unix"
},
"data": [
{
"id": "maxscale",
"type": "unix",
"relationships": {
"self": "http://localhost:8989/v1/users/unix/maxscale"
}
}
]
}
```
### Get all users
Get all administrative users. This fetches both network users and local UNIX
accounts.
```
GET /v1/users
```
#### Response
`Status: 200 OK`
```javascript
{
"links": {
"self": "http://localhost:8989/v1/users/"
},
"data": [ // List of all users
{
"id": "my-user",
"type": "inet", // A network user
"relationships": {
"self": "http://localhost:8989/v1/users/inet/my-user"
}
},
{
"id": "maxscale",
"type": "unix", // A local UNIX account
"relationships": {
"self": "http://localhost:8989/v1/users/unix/maxscale"
}
}
]
}
```
### Create a network user
Create a new network user.
```
PUT /v1/users/inet
```
The request body must fulfill the following requirements.
- The `/data/id`, `/data/type` and `/data/attributes/password` fields must be
defined.
- The `/data/id` field defines the name of the account
- The `/data/attributes/password` field defines the password for this user.
- The value of the `/data/type` field must always be `inet`.
Here is an example request body defining the network user _my-user_ with the
password _my-password_.
```javascript
{
"data": {
"id": "my-user",
"type": "inet",
"attributes": {
"password": "my-password"
}
}
} }
``` ```
@ -65,13 +195,58 @@ All of the following fields need to be defined in the request body.
Status: 204 No Content Status: 204 No Content
``` ```
### Delete a user ### Enable a UNIX account
Delete a user. The _:name_ part of the URI must be a valid user name. The user This enables an existing UNIX account on the system for administrative
names are case-insensitive. operations.
``` ```
DELETE /users/:name PUT /v1/users/unix
```
The request body must fulfill the following requirements.
- The `/data/id` and `/data/type` fields must be defined.
- The `/data/id` field defines the name of the account
- The value of the `/data/type` field must always be `unix`.
Here is an example request body enabling the UNIX account _jdoe_.
```javascript
{
"data": {
"id": "jdoe",
"type": "unix"
}
}
```
#### Response
```
Status: 204 No Content
```
### Delete a network user
The _:name_ part of the URI must be a valid user name.
```
DELETE /v1/users/inet/:name
```
#### Response
```
Status: 204 No Content
```
### Disable a UNIX account
The _:name_ part of the URI must be a valid user name.
```
DELETE /v1/users/unix/:name
``` ```
#### Response #### Response

View File

@ -355,6 +355,8 @@ destroy:
alter: alter:
alter server - Alter server parameters alter server - Alter server parameters
alter monitor - Alter monitor parameters alter monitor - Alter monitor parameters
alter service - Alter service parameters
alter maxscale - Alter maxscale parameters
set: set:
set server - Set the status of a server set server - Set the status of a server
@ -1583,6 +1585,64 @@ The monitor is stopped and it will be removed on the next restart of MaxScale
Example: destroy monitor my-monitor Example: destroy monitor my-monitor
``` ```
## Services
### Altering Services
To alter the common service parameters, use the `alter service` command. Module
specific parameters cannot be altered with this command.
```
alter service - Alter service parameters
Usage: alter service NAME KEY=VALUE ...
Parameters:
NAME Service name
KEY=VALUE List of `key=value` pairs separated by spaces
All services support the following values for KEY:
user Username used when connecting to servers
password Password used when connecting to servers
enable_root_user Allow root user access through this service
max_retry_interval Maximum restart retry interval
max_connections Maximum connection limit
connection_timeout Client idle timeout in seconds
auth_all_servers Retrieve authentication data from all servers
strip_db_esc Strip escape characters from database names
localhost_match_wildcard_host Match wildcard host to 'localhost' address
version_string The version string given to client connections
weightby Weighting parameter name
log_auth_warnings Log authentication warnings
retry_on_failure Retry service start on failure
Example: alter service my-service user=maxuser password=maxpwd
```
## MaxScale Core
### Altering MaxScale
The core MaxScale parameters that can be modified at runtime can be altered with
the `alter maxscale` command.
```
alter maxscale - Alter maxscale parameters
Usage: alter maxscale KEY=VALUE ...
Parameters:
KEY=VALUE List of `key=value` pairs separated by spaces
The following configuration values can be altered:
auth_connect_timeout Connection timeout for permission checks
auth_read_timeout Read timeout for permission checks
auth_write_timeout Write timeout for permission checks
admin_auth Enable admin interface authentication
Example: alter maxscale auth_connect_timeout=10
```
## Other Modules ## Other Modules
Modules can implement custom commands called _module commands_. These are Modules can implement custom commands called _module commands_. These are

View File

@ -18,7 +18,7 @@ report at [Jira](https://jira.mariadb.org).
MaxScale 2.1 has not been extended to understand all new features that MaxScale 2.1 has not been extended to understand all new features that
MariaDB 10.2 introduces. Please see MariaDB 10.2 introduces. Please see
[Support for 10.2](About/Support-for-10.2.md) [Support for 10.2](../About/Support-for-10.2.md)
for details. for details.
## Changed Features ## Changed Features
@ -42,8 +42,10 @@ for details.
[Here is a list of bugs fixed since the release of MaxScale 2.1.2.](https://jira.mariadb.org/browse/MXS-1212?jql=project%20%3D%20MXS%20AND%20issuetype%20%3D%20Bug%20AND%20resolution%20in%20(Fixed%2C%20Done)%20AND%20fixVersion%20%3D%202.1.3) [Here is a list of bugs fixed since the release of MaxScale 2.1.2.](https://jira.mariadb.org/browse/MXS-1212?jql=project%20%3D%20MXS%20AND%20issuetype%20%3D%20Bug%20AND%20resolution%20in%20(Fixed%2C%20Done)%20AND%20fixVersion%20%3D%202.1.3)
* [MXS-1244](https://jira.mariadb.org/browse/MXS-1244) MySQL monitor "detect_replication_lag=true" doesn't work with "mysql51_replication=true"
* [MXS-1227](https://jira.mariadb.org/browse/MXS-1227) Nagios Plugins broken by change in output of "show monitors" in 2.1 * [MXS-1227](https://jira.mariadb.org/browse/MXS-1227) Nagios Plugins broken by change in output of "show monitors" in 2.1
* [MXS-1221](https://jira.mariadb.org/browse/MXS-1221) Nagios plugin scripts does not process -S option properly * [MXS-1221](https://jira.mariadb.org/browse/MXS-1221) Nagios plugin scripts does not process -S option properly
* [MXS-1213](https://jira.mariadb.org/browse/MXS-1213) Improve documentation of dynamic configuration changes
* [MXS-1212](https://jira.mariadb.org/browse/MXS-1212) Excessive execution time when maxrows limit has been reached * [MXS-1212](https://jira.mariadb.org/browse/MXS-1212) Excessive execution time when maxrows limit has been reached
* [MXS-1202](https://jira.mariadb.org/browse/MXS-1202) maxadmin "show service" counters overflow * [MXS-1202](https://jira.mariadb.org/browse/MXS-1202) maxadmin "show service" counters overflow
* [MXS-1200](https://jira.mariadb.org/browse/MXS-1200) config file lines limited to ~1024 chars * [MXS-1200](https://jira.mariadb.org/browse/MXS-1200) config file lines limited to ~1024 chars
@ -64,6 +66,6 @@ Packages can be downloaded [here](https://mariadb.com/resources/downloads).
The source code of MaxScale is tagged at GitHub with a tag, which is identical The source code of MaxScale is tagged at GitHub with a tag, which is identical
with the version of MaxScale. For instance, the tag of version X.Y.Z of MaxScale with the version of MaxScale. For instance, the tag of version X.Y.Z of MaxScale
is X.Y.Z. Further, *master* always refers to the latest released non-beta version. is X.Y.Z.
The source code is available [here](https://github.com/mariadb-corporation/MaxScale). The source code is available [here](https://github.com/mariadb-corporation/MaxScale).

View File

@ -273,5 +273,12 @@ To build the avrorouter from source, you will need the [Avro C](https://avro.apa
library, liblzma, [the Jansson library](http://www.digip.org/jansson/) and sqlite3 development headers. When library, liblzma, [the Jansson library](http://www.digip.org/jansson/) and sqlite3 development headers. When
configuring MaxScale with CMake, you will need to add `-DBUILD_CDC=Y` to build the CDC module set. configuring MaxScale with CMake, you will need to add `-DBUILD_CDC=Y` to build the CDC module set.
The Avro C library needs to be build with position independent code enabled. You can do this by
adding the following flags to the CMake invocation when configuring the Avro C library.
```
-DCMAKE_C_FLAGS=-fPIC -DCMAKE_CXX_FLAGS=-fPIC
```
For more details about building MaxScale from source, please refer to the For more details about building MaxScale from source, please refer to the
[Building MaxScale from Source Code](../Getting-Started/Building-MaxScale-from-Source-Code.md) document. [Building MaxScale from Source Code](../Getting-Started/Building-MaxScale-from-Source-Code.md) document.

View File

@ -29,7 +29,8 @@ binlog_format=row
binlog_row_image=full binlog_row_image=full
``` ```
_You can find out more about replication formats from the [MariaDB Knowledge Base](https://mariadb.com/kb/en/mariadb/binary-log-formats/)_ _You can find out more about replication formats from the
[MariaDB Knowledge Base](https://mariadb.com/kb/en/mariadb/binary-log-formats/)_
## Configuring MaxScale ## Configuring MaxScale
@ -77,7 +78,8 @@ You can see that the `source` parameter in the _avro-service_ points to the
_replication-service_ we defined before. This service will be the data source _replication-service_ we defined before. This service will be the data source
for the avrorouter. The _filestem_ is the prefix in the binlog files and the for the avrorouter. The _filestem_ is the prefix in the binlog files and the
additional _avrodir_ router_option is where the converted Avro files are stored. additional _avrodir_ router_option is where the converted Avro files are stored.
For more information on the avrorouter options, read the [Avrorouter Documentation](../Routers/Avrorouter.md). For more information on the avrorouter options, read the
[Avrorouter Documentation](../Routers/Avrorouter.md).
After the services were defined, we added the listeners for the After the services were defined, we added the listeners for the
_replication-service_ and the _avro-service_. The _CDC_ protocol is a new _replication-service_ and the _avro-service_. The _CDC_ protocol is a new
@ -103,29 +105,29 @@ the following format:
``` ```
{ {
"Namespace": "MaxScaleChangeDataSchema.avro", "namespace": "MaxScaleChangeDataSchema.avro",
"Type": "record", "type": "record",
"Name": "ChangeRecord", "name": "ChangeRecord",
"Fields": "fields":
[ [
{ {
"Name": "name", "name": "name",
"Type": "string" "type": "string"
}, },
{ {
"Name":"address", "name":"address",
"Type":"string" "type":"string"
}, },
{ {
"Name":"age", "name":"age",
"Type":"int" "type":"int"
} }
] ]
} }
``` ```
The avrorouter uses the schema file to identify the columns, their names and The avrorouter uses the schema file to identify the columns, their names and
what type they are. The Name fiels contains the name of the column and the Type what type they are. The _name_ field contains the name of the column and the _type_
contains the Avro type. Read the [Avro specification](https://avro.apache.org/docs/1.8.1/spec.html) contains the Avro type. Read the [Avro specification](https://avro.apache.org/docs/1.8.1/spec.html)
for details on the layout of the schema files. for details on the layout of the schema files.

View File

@ -130,13 +130,16 @@ servers=dbbubble1,dbbubble2,dbbubble3,dbbubble4,dbbubble5
user=maxscale user=maxscale
passwd=6628C50E07CCE1F0392EDEEB9D1203F3 passwd=6628C50E07CCE1F0392EDEEB9D1203F3
``` ```
The table you wish to store in Cassandra in called HighScore and will contain the same columns in both the MariaDB table and the Cassandra table. The first step is to install a MariaDB instance with the Cassandra storage engine to act as a bridge server between the relational database and Cassandra. In this bridge server add a table definition for the HighScore table with the engine type set to cassandra. Add this server into the MariaDB MaxScale configuration and create a service that will connect to this server. The table you wish to store in Cassandra in called HighScore and will contain the same columns in both the MariaDB table and the Cassandra table. The first step is to install a MariaDB instance with the Cassandra storage engine to act as a bridge server between the relational database and Cassandra. In this bridge server add a table definition for the HighScore table with the engine type set to Cassandra.
See [Cassandra Storage Engine Overview]( https://mariadb.com/kb/en/mariadb/cassandra-storage-engine-overview/) for details.
Add this server into the MariaDB MaxScale configuration and create a service that will connect to this server.
``` ```
[CassandraDB] [CassandraDB]
type=server type=server
address=192.168.4.28 address=192.168.4.28
port=3306 port=3306
protocol=MySQLBackend protocol=MySQLBackend
[Cassandra] [Cassandra]
type=service type=service
router=readconnrouter router=readconnrouter

View File

@ -28,7 +28,8 @@ set up replication between the two. The only thing we need to do is to create th
users we will use for monitoring and authentication. users we will use for monitoring and authentication.
The process of creating monitoring and authentication users for MariaDB MaxScale is described The process of creating monitoring and authentication users for MariaDB MaxScale is described
in the Creating Database Users section of the [MariaDB MaxScale Tutorial](MaxScale-Tutorial.md). in the Creating Database Users section of the
[MariaDB MaxScale Tutorial](MaxScale-Tutorial.md#creating-database-users).
## Setting up RabbitMQ server ## Setting up RabbitMQ server
@ -301,7 +302,7 @@ router=cli
type=listener type=listener
service=MaxAdmin Service service=MaxAdmin Service
protocol=maxscaled protocol=maxscaled
port=6603 socket=default
``` ```
## Testing the setup ## Testing the setup
@ -317,7 +318,7 @@ sudo systemctl start maxscale
We can see the state of the two servers with MaxAdmin: We can see the state of the two servers with MaxAdmin:
``` ```
maxadmin list servers sudo maxadmin list servers
Servers. Servers.
-------------------+-----------------+-------+-------------+-------------------- -------------------+-----------------+-------+-------------+--------------------

View File

@ -0,0 +1,10 @@
ExternalProject_Add(libmicrohttpd
URL http://ftpmirror.gnu.org/libmicrohttpd/libmicrohttpd-0.9.54.tar.gz
SOURCE_DIR ${CMAKE_BINARY_DIR}/libmicrohttpd/
CONFIGURE_COMMAND ${CMAKE_BINARY_DIR}/libmicrohttpd//configure --prefix=${CMAKE_BINARY_DIR}/libmicrohttpd/ --enable-shared --with-pic
BINARY_DIR ${CMAKE_BINARY_DIR}/libmicrohttpd/
BUILD_COMMAND make
INSTALL_COMMAND make install)
include_directories(${CMAKE_BINARY_DIR}/libmicrohttpd/include/)
set(MICROHTTPD_LIBRARIES ${CMAKE_BINARY_DIR}/libmicrohttpd/lib/libmicrohttpd.a)

View File

@ -33,6 +33,28 @@ MXS_BEGIN_DECLS
static const char INET_DEFAULT_USERNAME[] = "admin"; static const char INET_DEFAULT_USERNAME[] = "admin";
static const char INET_DEFAULT_PASSWORD[] = "mariadb"; static const char INET_DEFAULT_PASSWORD[] = "mariadb";
/** Return values for the functions */
static const char *ADMIN_ERR_NOMEM = "Out of memory";
static const char *ADMIN_ERR_FILEOPEN = "Unable to create password file";
static const char *ADMIN_ERR_DUPLICATE = "Duplicate username specified";
static const char *ADMIN_ERR_USERNOTFOUND = "User not found";
static const char *ADMIN_ERR_AUTHENTICATION = "Authentication failed";
static const char *ADMIN_ERR_FILEAPPEND = "Unable to append to password file";
static const char *ADMIN_ERR_PWDFILEOPEN = "Failed to open password file";
static const char *ADMIN_ERR_TMPFILEOPEN = "Failed to open temporary password file";
static const char *ADMIN_ERR_PWDFILEACCESS = "Failed to access password file";
static const char *ADMIN_ERR_DELLASTUSER = "Deleting the last user is forbidden";
static const char *ADMIN_ERR_DELROOT = "Deleting the default admin user is forbidden";
static const char *ADMIN_SUCCESS = NULL;
/** User types */
enum user_type
{
USER_TYPE_ALL, // Type that matches all users
USER_TYPE_INET, // Network users
USER_TYPE_UNIX // Linux accounts
};
/* /*
* MySQL session specific data * MySQL session specific data
* *
@ -49,16 +71,39 @@ typedef struct admin_session
#endif #endif
} ADMIN_session; } ADMIN_session;
extern const char *admin_enable_linux_account(const char *uname); void admin_users_init();
extern const char *admin_disable_linux_account(const char *uname);
extern bool admin_linux_account_enabled(const char *uname);
extern const char *admin_add_inet_user(const char *uname, const char *password); const char* admin_enable_linux_account(const char *uname);
extern const char *admin_remove_inet_user(const char *uname, const char *password); const char* admin_disable_linux_account(const char *uname);
extern bool admin_inet_user_exists(const char *uname); bool admin_linux_account_enabled(const char *uname);
extern bool admin_verify_inet_user(const char *uname, const char *password); const char* admin_add_inet_user(const char *uname, const char *password);
const char* admin_remove_inet_user(const char *uname, const char *password);
bool admin_inet_user_exists(const char *uname);
bool admin_verify_inet_user(const char *uname, const char *password);
extern void dcb_PrintAdminUsers(DCB *dcb); /**
* @brief Convert all admin users to JSON
*
* @param host Hostname of this server
* @param type USER_TYPE_INET for networks users, USER_TYPE_UNIX for unix accounts
* or USER_TYPE_ALL for all users
*
* @return Collection of users resources
*/
json_t* admin_all_users_to_json(const char* host, enum user_type type);
/**
* @brief Convert an admin user into JSON
*
* @param host Hostname of this server
* @param user Username to convert
* @param type The type of user, either USER_TYPE_INET or USER_TYPE_UNIX
*
* @return The user converted to JSON
*/
json_t* admin_user_to_json(const char* host, const char* user, enum user_type type);
void dcb_PrintAdminUsers(DCB *dcb);
MXS_END_DECLS MXS_END_DECLS

View File

@ -37,7 +37,9 @@ MXS_BEGIN_DECLS
#define MAX_ADMIN_HOST_LEN 1024 #define MAX_ADMIN_HOST_LEN 1024
/** JSON Pointers to key parts of JSON objects */ /** JSON Pointers to key parts of JSON objects */
#define MXS_JSON_PTR_DATA "/data"
#define MXS_JSON_PTR_ID "/data/id" #define MXS_JSON_PTR_ID "/data/id"
#define MXS_JSON_PTR_TYPE "/data/type"
#define MXS_JSON_PTR_PARAMETERS "/data/attributes/parameters" #define MXS_JSON_PTR_PARAMETERS "/data/attributes/parameters"
/** Pointers to relation lists */ /** Pointers to relation lists */
@ -58,7 +60,9 @@ MXS_BEGIN_DECLS
#define MXS_JSON_PTR_PARAM_SSL_VERSION MXS_JSON_PTR_PARAMETERS "/ssl_version" #define MXS_JSON_PTR_PARAM_SSL_VERSION MXS_JSON_PTR_PARAMETERS "/ssl_version"
#define MXS_JSON_PTR_PARAM_SSL_CERT_VERIFY_DEPTH MXS_JSON_PTR_PARAMETERS "/ssl_cert_verify_depth" #define MXS_JSON_PTR_PARAM_SSL_CERT_VERIFY_DEPTH MXS_JSON_PTR_PARAMETERS "/ssl_cert_verify_depth"
/** Non-parameter JSON pointers */
#define MXS_JSON_PTR_MODULE "/data/attributes/module" #define MXS_JSON_PTR_MODULE "/data/attributes/module"
#define MXS_JSON_PTR_PASSWORD "/data/attributes/password"
/** /**
* Common configuration parameters names * Common configuration parameters names
@ -71,9 +75,7 @@ extern const char CN_ADDRESS[];
extern const char CN_ADMIN_AUTH[]; extern const char CN_ADMIN_AUTH[];
extern const char CN_ADMIN_ENABLED[]; extern const char CN_ADMIN_ENABLED[];
extern const char CN_ADMIN_HOST[]; extern const char CN_ADMIN_HOST[];
extern const char CN_ADMIN_PASSWORD[];
extern const char CN_ADMIN_PORT[]; extern const char CN_ADMIN_PORT[];
extern const char CN_ADMIN_USER[];
extern const char CN_ADMIN_SSL_KEY[]; extern const char CN_ADMIN_SSL_KEY[];
extern const char CN_ADMIN_SSL_CERT[]; extern const char CN_ADMIN_SSL_CERT[];
extern const char CN_ADMIN_SSL_CA_CERT[]; extern const char CN_ADMIN_SSL_CA_CERT[];
@ -94,6 +96,7 @@ extern const char CN_FILTERS[];
extern const char CN_FILTER[]; extern const char CN_FILTER[];
extern const char CN_GATEWAY[]; extern const char CN_GATEWAY[];
extern const char CN_ID[]; extern const char CN_ID[];
extern const char CN_INET[];
extern const char CN_LISTENER[]; extern const char CN_LISTENER[];
extern const char CN_LISTENERS[]; extern const char CN_LISTENERS[];
extern const char CN_LOCALHOST_MATCH_WILDCARD_HOST[]; extern const char CN_LOCALHOST_MATCH_WILDCARD_HOST[];
@ -142,7 +145,9 @@ extern const char CN_SSL_VERSION[];
extern const char CN_STRIP_DB_ESC[]; extern const char CN_STRIP_DB_ESC[];
extern const char CN_THREADS[]; extern const char CN_THREADS[];
extern const char CN_TYPE[]; extern const char CN_TYPE[];
extern const char CN_UNIX[];
extern const char CN_USER[]; extern const char CN_USER[];
extern const char CN_USERS[];
extern const char CN_VERSION_STRING[]; extern const char CN_VERSION_STRING[];
extern const char CN_WEIGHTBY[]; extern const char CN_WEIGHTBY[];
@ -192,8 +197,6 @@ typedef struct
bool skip_permission_checks; /**< Skip service and monitor permission checks */ bool skip_permission_checks; /**< Skip service and monitor permission checks */
char qc_name[PATH_MAX]; /**< The name of the query classifier to load */ char qc_name[PATH_MAX]; /**< The name of the query classifier to load */
char* qc_args; /**< Arguments for the query classifier */ char* qc_args; /**< Arguments for the query classifier */
char admin_user[MAX_ADMIN_USER_LEN]; /**< Admin interface user */
char admin_password[MAX_ADMIN_PW_LEN]; /**< Admin interface password */
char admin_host[MAX_ADMIN_HOST_LEN]; /**< Admin interface host */ char admin_host[MAX_ADMIN_HOST_LEN]; /**< Admin interface host */
uint16_t admin_port; /**< Admin interface port */ uint16_t admin_port; /**< Admin interface port */
bool admin_auth; /**< Admin interface authentication */ bool admin_auth; /**< Admin interface authentication */
@ -434,6 +437,6 @@ bool config_reload(void);
* @param host Hostname of this server * @param host Hostname of this server
* @return JSON object representing the paths used by MaxScale * @return JSON object representing the paths used by MaxScale
*/ */
json_t* config_paths_to_json(const char* host); json_t* config_maxscale_to_json(const char* host);
MXS_END_DECLS MXS_END_DECLS

View File

@ -166,27 +166,6 @@ typedef enum skygw_chk_t
((n) == LOG_DEBUG ? "LOG_DEBUG" : \ ((n) == LOG_DEBUG ? "LOG_DEBUG" : \
"Unknown log priority")))))))) "Unknown log priority"))))))))
#define STRPACKETTYPE(p) ((p) == MYSQL_COM_INIT_DB ? "COM_INIT_DB" : \
((p) == MYSQL_COM_CREATE_DB ? "COM_CREATE_DB" : \
((p) == MYSQL_COM_DROP_DB ? "COM_DROP_DB" : \
((p) == MYSQL_COM_REFRESH ? "COM_REFRESH" : \
((p) == MYSQL_COM_DEBUG ? "COM_DEBUG" : \
((p) == MYSQL_COM_PING ? "COM_PING" : \
((p) == MYSQL_COM_CHANGE_USER ? "COM_CHANGE_USER" : \
((p) == MYSQL_COM_QUERY ? "COM_QUERY" : \
((p) == MYSQL_COM_SHUTDOWN ? "COM_SHUTDOWN" : \
((p) == MYSQL_COM_PROCESS_INFO ? "COM_PROCESS_INFO" : \
((p) == MYSQL_COM_CONNECT ? "COM_CONNECT" : \
((p) == MYSQL_COM_PROCESS_KILL ? "COM_PROCESS_KILL" : \
((p) == MYSQL_COM_TIME ? "COM_TIME" : \
((p) == MYSQL_COM_DELAYED_INSERT ? "COM_DELAYED_INSERT" : \
((p) == MYSQL_COM_DAEMON ? "COM_DAEMON" : \
((p) == MYSQL_COM_QUIT ? "COM_QUIT" : \
((p) == MYSQL_COM_STMT_PREPARE ? "MYSQL_COM_STMT_PREPARE" : \
((p) == MYSQL_COM_STMT_EXECUTE ? "MYSQL_COM_STMT_EXECUTE" : \
((p) == MYSQL_COM_SET_OPTION ? "MYSQL_COM_SET_OPTION" : \
"UNKNOWN MYSQL PACKET TYPE")))))))))))))))))))
#define STRDCBSTATE(s) ((s) == DCB_STATE_ALLOC ? "DCB_STATE_ALLOC" : \ #define STRDCBSTATE(s) ((s) == DCB_STATE_ALLOC ? "DCB_STATE_ALLOC" : \
((s) == DCB_STATE_POLLING ? "DCB_STATE_POLLING" : \ ((s) == DCB_STATE_POLLING ? "DCB_STATE_POLLING" : \
((s) == DCB_STATE_LISTENING ? "DCB_STATE_LISTENING" : \ ((s) == DCB_STATE_LISTENING ? "DCB_STATE_LISTENING" : \

View File

@ -32,6 +32,7 @@ MXS_BEGIN_DECLS
#define MXS_JSON_API_LOGS "/maxscale/logs/" #define MXS_JSON_API_LOGS "/maxscale/logs/"
#define MXS_JSON_API_TASKS "/maxscale/tasks/" #define MXS_JSON_API_TASKS "/maxscale/tasks/"
#define MXS_JSON_API_MODULES "/maxscale/modules/" #define MXS_JSON_API_MODULES "/maxscale/modules/"
#define MXS_JSON_API_USERS "/users/"
/** /**
* @brief Create a JSON object * @brief Create a JSON object

View File

@ -103,4 +103,7 @@ bool is_mysql_statement_end(const char* start, int len);
bool is_mysql_sp_end(const char* start, int len); bool is_mysql_sp_end(const char* start, int len);
char* modutil_get_canonical(GWBUF* querybuf); char* modutil_get_canonical(GWBUF* querybuf);
// TODO: Move modutil out of the core
const char* STRPACKETTYPE(int p);
MXS_END_DECLS MXS_END_DECLS

View File

@ -85,7 +85,7 @@ bool column_is_decimal(uint8_t type);
bool fixed_string_is_enum(uint8_t type); bool fixed_string_is_enum(uint8_t type);
/** Value unpacking */ /** Value unpacking */
size_t unpack_temporal_value(uint8_t type, uint8_t *ptr, uint8_t* metadata, struct tm *tm); size_t unpack_temporal_value(uint8_t type, uint8_t *ptr, uint8_t* metadata, int length, struct tm *tm);
size_t unpack_enum(uint8_t *ptr, uint8_t *metadata, uint8_t *dest); size_t unpack_enum(uint8_t *ptr, uint8_t *metadata, uint8_t *dest);
size_t unpack_numeric_field(uint8_t *ptr, uint8_t type, uint8_t* metadata, uint8_t* val); size_t unpack_numeric_field(uint8_t *ptr, uint8_t type, uint8_t* metadata, uint8_t* val);
size_t unpack_bit(uint8_t *ptr, uint8_t *null_mask, uint32_t col_count, size_t unpack_bit(uint8_t *ptr, uint8_t *null_mask, uint32_t col_count,

View File

@ -14,6 +14,7 @@
#include <maxscale/cppdefs.hh> #include <maxscale/cppdefs.hh>
#include <stdio.h> #include <stdio.h>
#include <tr1/unordered_map>
namespace maxscale namespace maxscale
{ {
@ -177,4 +178,94 @@ struct CloserTraits<FILE*>
} }
}; };
/* Helper type for Registry. Must be specialized for each EntryType. The types
* listed below are just examples and will not compile. */
template<typename EntryType>
struct RegistryTraits
{
typedef int id_type;
typedef EntryType* entry_type;
static id_type get_id(entry_type entry)
{
static_assert(sizeof(EntryType) != sizeof(EntryType), "get_id() and the"
" surrounding struct must be specialized for every EntryType!");
return 0;
}
static entry_type null_entry()
{
return NULL;
}
};
/**
* Class Registy wraps a map, allowing only a few operations on it. The intended
* use is simple registries, such as the session registry in Worker. The owner
* can expose a reference to this class without exposing all the methods the
* underlying container implements. When instantiating with a new EntryType, the
* traits-class RegistryTraits should be specialized for the new type as well.
*/
template <typename EntryType>
class Registry
{
Registry(const Registry&);
Registry& operator = (const Registry&);
public:
typedef typename RegistryTraits<EntryType>::id_type id_type;
typedef typename RegistryTraits<EntryType>::entry_type entry_type;
Registry()
{
}
/**
* Find an entry in the registry.
*
* @param id Entry key
* @return The found entry, or NULL if not found
*/
entry_type lookup(id_type id) const
{
entry_type rval = RegistryTraits<EntryType>::null_entry();
typename ContainerType::const_iterator iter = m_registry.find(id);
if (iter != m_registry.end())
{
rval = iter->second;
}
return rval;
}
/**
* Add an entry to the registry.
*
* @param entry The entry to add
* @return True if successful, false if id was already in
*/
bool add(entry_type entry)
{
id_type id = RegistryTraits<EntryType>::get_id(entry);
typename ContainerType::value_type new_value(id, entry);
return m_registry.insert(new_value).second;
}
/**
* Remove an entry from the registry.
*
* @param id Entry id
* @return True if an entry was removed, false if not found
*/
bool remove(id_type id)
{
entry_type rval = lookup(id);
if (rval)
{
m_registry.erase(id);
}
return rval;
}
private:
typedef typename std::tr1::unordered_map<id_type, entry_type> ContainerType;
ContainerType m_registry;
};
} }

View File

@ -133,7 +133,7 @@ bool mxs_worker_register_session(MXS_SESSION* session);
* @param id Which id to remove. * @param id Which id to remove.
* @return The removed session or NULL if not found. * @return The removed session or NULL if not found.
*/ */
MXS_SESSION* mxs_worker_deregister_session(uint64_t id); bool mxs_worker_deregister_session(uint64_t id);
/** /**
* Find a session in the current worker's session container. * Find a session in the current worker's session container.

View File

@ -0,0 +1,702 @@
# Building test package:
#
# apt-get install libssl-dev libmariadbclient-dev php5 perl \
# coreutils realpath libjansson-dev openjdk-7-jdk
# pip install JayDeBeApi
# Backend labes:
# REPL_BACKEND
# GALERA_BACKEND
# EXTERN_BACKEND
# BREAKS_REPL
# BREAKS_GALERA
project(maxscale_system_test)
cmake_minimum_required(VERSION 2.8)
include_directories("/usr/include/mysql/")
set(CTEST_BUILD_NAME "${BUILDNAME}")
set(CMAKE_BUILD_TYPE "RelWithDebInfo" CACHE STRING "Choose the type of
build, options are: None(CMAKE_CXX_FLAGS or CMAKE_C_FLAGS used) Debug
Release RelWithDebInfo MinSizeRel.")
set(CMAKE_CXX_FLAGS "-std=c++11 -ggdb")
set(CMAKE_CXX_FLAGS_DEBUG "-std=c++11 -ggdb")
set(CMAKE_CXX_FLAGS_RELEASE "-std=c++11 -ggdb")
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-std=c++11 -ggdb")
enable_testing()
# utilities.cmake contains all helper functions and extra tools
include(utilities.cmake)
# Is this needed?
configure_file(${CMAKE_SOURCE_DIR}/cnf/maxscale.cnf.template.setup_binlog.in ${CMAKE_BINARY_DIR}/cnf/maxscale.cnf.template.setup_binlog @ONLY)
# Enable Java
find_package(Java)
if(EXISTS ${Java_JAVA_EXECUTABLE} ${JAVA_JAVAC_EXECUTABLE} ${JAVA_JAR_EXECUTABLE})
include(UseJava)
if(Java_FOUND)
add_subdirectory(maxscale/java/)
endif()
else()
message(WARNING "Java not found, Java based tests are not run.")
endif()
# The core library
add_library(testcore SHARED testconnections.cpp mariadb_nodes.cpp
mariadb_func.cpp get_com_select_insert.cpp maxadmin_operations.cpp big_transaction.cpp
sql_t1.cpp test_binlog_fnc.cpp get_my_ip.cpp big_load.cpp get_com_select_insert.cpp
different_size.cpp fw_copy_rules maxinfo_func.cpp config_operations.cpp rds_vpc.cpp execute_cmd.cpp
blob_test.cpp cdc_connector.cpp)
target_link_libraries(testcore ${MYSQL_CLIENT} z crypt nsl m pthread ssl crypto dl rt jansson)
install(TARGETS testcore DESTINATION system-test)
add_dependencies(testcore connector-c)
# Tool used to check backend state
add_test_executable_notest(check_backend.cpp check_backend check_backend LABELS CONFIG)
# Configuration tests
add_template(bug359 bug359)
add_template(bug495 bug495)
add_template(bug526 bug526)
add_template(bug479 bug479)
add_template(bug493 bug493)
add_template(bug643_1 bug643_1)
add_template(mxs652_bad_ssl bad_ssl)
add_template(mxs710_bad_socket mxs710_bad_socket)
add_template(mxs710_bad_socket mxs711_two_ports)
add_template(mxs720_line_with_no_equal mxs720_line_with_no_equal)
add_template(mxs720_wierd_line mxs720_wierd_line)
add_template(mxs710_bad_socket mxs799)
add_test_executable(config_test.cpp config_test replication LABELS CONFIG)
add_subdirectory(cdc_datatypes)
# Repeatedly connect to maxscale while the backends reject all connections, expect no crash
add_test_executable(backend_auth_fail.cpp backend_auth_fail replication LABELS readconnroute REPL_BACKEND)
# Regression case for the bug "MaxScale ignores host in user authentication"
add_test_executable(bug143.cpp bug143 replication LABELS MySQLAuth REPL_BACKEND)
# Regression case for the bug "Executing '\s' doesn't always produce complete result set"
add_test_executable(bug422.cpp bug422 replication LABELS readwritesplit readconnroute maxscale REPL_BACKEND)
# Regression case for the bug "Wildcard in host column of mysql.user table don't work properly"
add_test_executable(bug448.cpp bug448 replication LABELS MySQLAuth LIGHT REPL_BACKEND)
# Regression case for the bug "rwsplit counts every connection twice in master - counnection counts leak"
add_test_executable(bug469.cpp bug469 replication LABELS readwritesplit LIGHT REPL_BACKEND)
# Regression case for the bug "Routing Hints route to server sometimes doesn't work"
add_test_executable(bug471.cpp bug471 bug471 LABELS readwritesplit hintfilter REPL_BACKEND)
# Regression case for the bugs "malformed hints cause crash"
add_test_executable(bug473.cpp bug473 hints LABELS readwritesplit hintfilter REPL_BACKEND)
# Regression case for the bug "The end comment tag in hints isn't properly detected"
add_test_executable(bug475.cpp bug475 hints LABELS readwritesplit hintfilter REPL_BACKEND)
# Regression case for the bug "SHOW VARIABLES randomly failing with "Lost connection to MySQL server"
add_test_executable(bug488.cpp bug488 galera LABELS readwritesplit readconnroute maxscale GALERA_BACKEND)
# Regression case for the bug "rw-split router does not send last_insert_id() to master"
add_test_executable(bug507.cpp bug507 replication LABELS readwritesplit LIGHT REPL_BACKEND)
# Regression case for the bug "Referring to a nonexisting server in servers=... doesn't even raise a warning"
add_test_executable(bug509.cpp bug509 galera LABELS readwritesplit GALERA_BACKEND)
# Checks "SELECT * INTO OUTFILE" and "LOAD DATA LOCAL INFILE"
add_test_executable(bug519.cpp bug519 replication LABELS readwritesplit HEAVY REPL_BACKEND)
# Regression case for the bug "'Current no. of conns' not going down"
add_test_executable(bug529.cpp bug529 replication LABELS readwritesplit readconnroute maxscale REPL_BACKEND)
# Regression case for the bugs "get_dcb fails if slaves are not available" and "Maxscale fails to start without anything in the logs if there is no slave available"
add_test_executable(bug547.cpp bug547 replication LABELS readwritesplit REPL_BACKEND)
# Regression case for the bug "crash if max_slave_connections=10% and 4 or less backends are configured"
add_test_executable(bug681.cpp bug681 galera.bug681 LABELS readwritesplit GALERA_BACKEND)
# Regression case for the bug "crash with tee filter"
add_test_executable(bug643.cpp bug643 bug643 LABELS tee REPL_BACKEND)
# Regression case for the bug ""Different error messages from MariaDB and Maxscale"
add_test_script(bug561.sh bug561.sh replication LABELS MySQLAuth REPL_BACKEND)
# Regression case for the bug "Wrong error message for Access denied error"
add_test_script(bug562.sh bug562.sh replication LABELS MySQLAuth REPL_BACKEND)
# Regression case for the bug "Wrong charset settings"
add_test_script(bug564.sh bug564.sh replication LABELS MySQLProtocol REPL_BACKEND)
# Regression case for the bug "Clients CLIENT_FOUND_ROWS setting is ignored by maxscale"
add_test_executable(bug565.cpp bug565 replication LABELS MySQLProtocol REPL_BACKEND)
# Regression case for the bug "Crash if files from /dev/shm/ removed"
add_test_script(bug567.sh bug567.sh bug567 LABELS maxscale REPL_BACKEND)
# Regression case for the bug "Using regex filter hangs MaxScale"
add_test_executable(bug571.cpp bug571 bug571 LABELS regexfilter REPL_BACKEND)
# Attempt to use GRANT with wrong IP, expect no crash or hangs
add_test_executable(bug572.cpp bug572 replication LABELS readwritesplit REPL_BACKEND)
# Regression cases for the bug "Hint filter don't work if listed before regex filter in configuration file"
# (different filter sequence and configuration, but the same test, see .cnf for details)
add_test_script(bug585 bug587 bug585 LABELS regexfilter REPL_BACKEND)
add_test_executable(bug587.cpp bug587 bug587 LABELS regexfilter hintfilter REPL_BACKEND)
add_test_script(bug587_1 bug587 bug587_1 LABELS regexfilter hintfilter REPL_BACKEND)
# Tries to connect Maxscale when all slaves stopped
add_test_executable(bug592.cpp bug592 replication LABELS MySQLAuth readwritesplit REPL_BACKEND)
# Tries to do change user in the loop, checks that autorization is still ok
add_test_executable(bug601.cpp bug601 bug601 LABELS MySQLAuth MySQLProtocol REPL_BACKEND)
# Simple test with enable_root_user=true
add_test_executable(bug620.cpp bug620 bug620 LABELS MySQLAuth MySQLProtocol REPL_BACKEND)
# Regression case for the bug "Crash when user define with old password style (before 4.1 protocol)"
add_test_executable(bug626.cpp bug626 replication LABELS MySQLAuth MySQLProtocol REPL_BACKEND)
# Regression case for the bug 634 "SHOW SLAVE STATUS in RW SPLITTER is send to master"
add_test_executable(bug634.cpp bug634 replication LABELS readwritesplit REPL_BACKEND)
# Regression cases for several TEE filter hangs
add_test_executable(bug645.cpp bug645 bug645 LABELS tee REPL_BACKEND)
add_test_executable(bug645_1.cpp bug645_1 bug645_1 LABELS tee REPL_BACKEND)
add_test_executable(bug649.cpp bug649 bug645 LABELS tee)
add_test_executable(bug650.cpp bug650 bug650 LABELS tee REPL_BACKEND)
# Heavy test for TEE filter
add_test_script(bug648 sql_queries bug648 LABELS tee UNSTABLE HEAVY REPL_BACKEND)
# Crash when host name for some user in mysql.user is very long
add_test_executable(bug653.cpp bug653 replication LABELS MySQLAuth MySQLProtocol REPL_BACKEND)
# Crash with malformed Maxadmin command
add_test_executable(bug654.cpp bug654 replication LABELS maxscale REPL_BACKEND)
# Regression case for the bug "Tee filter: closing child session causes MaxScale to fail"
add_test_executable(bug657.cpp bug657 bug657 LABELS tee REPL_BACKEND)
# Block backends (master or all slaves) and tries to connect Maxscale
add_test_executable(bug658.cpp bug658 replication LABELS readwritesplit readconnroute maxscale REPL_BACKEND)
# Block all backends
add_test_executable(bug662.cpp bug662 replication LABELS readwritesplit readconnroute maxscale REPL_BACKEND)
# Bad TEE filter configuration
add_test_executable(bug664.cpp bug664 bug664 LABELS MySQLAuth MySQLProtocol)
# TEE fileter: execute long sequence of queries ans session commands in the loop
add_test_executable(bug670.cpp bug670 bug670 LABELS tee REPL_BACKEND)
# Regression case for the bug "MaxScale crashes if "Users table data" is empty and "show dbusers" is executed in maxadmin"
add_test_executable(bug673.cpp bug673 bug673 LABELS MySQLAuth REPL_BACKEND)
# Crash in case of backend node in Galera cluster stopping and then reconnect to Maxscale
add_test_executable(bug676.cpp bug676 galera LABELS galeramon GALERA_BACKEND)
# Rgression test for th bug "RWSplit: 'SELECT @a:=@a+1 as a, test.b FROM test' breaks client session"
add_test_executable(bug694.cpp bug694 bug694 LABELS readwritesplit REPL_BACKEND)
# Compare @@hostname from "select @@wsrep_node_name, @@hostname" and from "select @@hostname, @@wsrep_node_name"
add_test_executable(bug699.cpp bug699 galera LABELS readwritesplit LIGHT GALERA_BACKEND)
# Wrong processing of 'SET GLOBAL sql_mode="ANSI"'
add_test_executable(bug705.cpp bug705 bug705 LABELS MySQLAuth REPL_BACKEND)
# Try SHOW GLOBAL STATUS via Maxscale
add_test_executable(bug711.cpp bug711 bug711 LABELS readwritesplit REPL_BACKEND)
# Prepared statement from PHP application
add_test_executable(bug729.cpp bug729 replication LABELS readwritesplit LIGHT REPL_BACKEND)
# Regression case for the bug "Regex filter and shorter than original replacement queries MaxScale" (crash)
add_test_executable(bug730.cpp bug730 bug730 LABELS regexfilter REPL_BACKEND)
# Test MariaDB 10.2 bulk inserts
add_test_executable(bulk_insert.cpp bulk_insert bulk_insert LABELS MySQLProtocol REPL_BACKEND 10.2)
# Tests for the CCRFilter module
add_test_executable(ccrfilter.cpp ccrfilter ccrfilter LABELS ccrfilter LIGHT REPL_BACKEND)
# Tries to reconfigure replication setup to use another node as a Master
add_test_executable(change_master_during_session.cpp change_master_during_session replication LABELS readwritesplit mysqlmon REPL_BACKEND)
# Executes change_user command in the loop
add_test_executable(change_user.cpp change_user replication LABELS MySQLAuth MySQLProtocol LIGHT REPL_BACKEND)
# Tries to connect to non existing DB, expects no crash
add_test_executable(connect_to_nonexisting_db.cpp connect_to_nonexisting_db replication LABELS MySQLAuth MySQLProtoco LIGHT REPL_BACKEND)
# check if max_connections parameter works
add_test_executable(connection_limit.cpp connection_limit connection_limit LABELS maxscale LIGHT REPL_BACKEND)
# Tries to open to many connections, expect no crash
add_test_executable(crash_out_of_files.cpp crash_out_of_files load LABELS maxscale HEAVY REPL_BACKEND)
# Tries to open to many connections, expect no crash, with Galera backend
add_test_executable(crash_out_of_files_galera.cpp crash_out_of_files_galera galera LABELS maxscale HEAVY GALERA_BACKEND)
# Tries INSERTs with size close to 0x0ffffff * N
add_test_executable(different_size_rwsplit.cpp different_size_rwsplit replication LABELS readwritesplit HEAVY REPL_BACKEND)
# Tries to use 'maxkeys', 'maxpasswrd'
add_test_executable(encrypted_passwords.cpp encrypted_passwords replication LABELS maxscale LIGHT REPL_BACKEND)
# MySQL Monitor Failover Test
add_test_executable(failover_mysqlmon.cpp failover_mysqlmon failover_mysqlmon LABELS mysqlmon REPL_BACKEND)
# Test monitor state change events when manually clearing server bits
add_test_executable(false_monitor_state_change.cpp false_monitor_state_change replication LABELS mysqlmon REPL_BACKEND)
# A set of tests for Firewall filter
add_test_executable(fwf.cpp fwf fwf LABELS dbfwfilter REPL_BACKEND)
add_test_executable(fwf2.cpp fwf2 fwf LABELS dbfwfilter REPL_BACKEND)
add_test_executable(fwf_duplicate_rules.cpp fwf_duplicate_rules fwf LABELS dbfwfilter REPL_BACKEND)
add_test_executable(fwf_prepared_stmt.cpp fwf_prepared_stmt fwf LABELS dbfwfilter REPL_BACKEND)
add_test_executable(fwf_actions.cpp fwf_actions fwf_action LABELS dbfwfilter REPL_BACKEND)
add_test_executable(fwf_logging.cpp fwf_logging fwf_logging LABELS dbfwfilter REPL_BACKEND)
add_test_executable(fwf_reload.cpp fwf_reload fwf LABELS dbfwfilter REPL_BACKEND)
add_test_executable(fwf_syntax.cpp fwf_syntax fwf_syntax LABELS dbfwfilter REPL_BACKEND)
add_test_executable(fwf_com_ping.cpp fwf_com_ping fwf_com_ping LABELS dbfwfilter REPL_BACKEND)
# Galera node priority test
add_test_executable(galera_priority.cpp galera_priority galera_priority LABELS galeramon LIGHT GALERA_BACKEND)
# Block and unblock Master and check that Maxscale survived
add_test_executable(kill_master.cpp kill_master replication LABELS readwritesplit LIGHT REPL_BACKEND)
# Test insertstream filter
add_test_script(insertstream insertstream.sh insertstream LABELS insertstream REPL_BACKEND)
# Check load balancing
add_test_executable(load_balancing.cpp load_balancing load LABELS readwritesplit LIGHT REPL_BACKEND)
# Check load balancing with Galera backend
add_test_executable(load_balancing_galera.cpp load_balancing_galera load_galera LABELS readwritesplit GALERA_BACKEND)
# Check load balancing parameters with Galera backend and 1 persistent connection
add_test_script(load_balancing_galera_pers1 load_balancing_galera load_galera_pers1 LABELS readwritesplit HEAVY GALERA_BACKEND)
# Check load balancing parameters with Galera backend and 10 persistent connections
add_test_script(load_balancing_galera_pers10 load_balancing_galera load_galera_pers10 LABELS readwritesplit HEAVY GALERA_BACKEND)
# Check load balancing parameters with 1 persistent connection
add_test_script(load_balancing_pers1 load_balancing load_pers1 LABELS readwritesplit HEAVY REPL_BACKEND)
# Check load balancing parameters with 10 persistent connections
add_test_script(load_balancing_pers10 load_balancing load_pers10 LABELS readwritesplit HEAVY REPL_BACKEND)
# Test with extremely big blob inserting
add_test_executable(longblob.cpp longblob longblob LABELS readwritesplit readconnroute UNSTABLE HEAVY REPL_BACKEND)
# Test with extremely big blob inserting/selecting with > 16 mb data blocks
add_test_executable(mxs1110_16mb.cpp mxs1110_16mb longblob_filters LABELS readwritesplit readconnroute HEAVY REPL_BACKEND)
# INSERT extremelly big number of rows
add_test_executable(lots_of_rows.cpp lots_of_rows galera LABELS readwritesplit HEAVY GALERA_BACKEND)
# A set of MariaDB server tests executed against Maxscale RWSplit
add_test_script(mariadb_tests_hartmut mariadb_tests_hartmut.sh replication LABELS readwritesplit REPL_BACKEND)
# A set of MariaDB server tests executed against Maxscale RWSplit (Galera backend)
add_test_script(mariadb_tests_hartmut_galera mariadb_tests_hartmut.sh galera LABELS readwritesplit GALERA_BACKEND)
# Creates a number of connections > max_connections setting
add_test_executable(max_connections.cpp max_connections replication LABELS MySQLAuth MySQLProtocol UNSTABLE HEAVY REPL_BACKEND)
# Test of Maxinfo interface (http)
#add_test_executable(maxinfo.cpp maxinfocpp maxinfo LABELS maxinfo UNSTABLE HEAVY REPL_BACKEND)
# Test of Maxinfo interface (http), python impelemntation
add_test_script(maxinfo.py maxinfo.py maxinfo LABELS maxinfo LIGHT REPL_BACKEND)
# Checks tha Maxscale processis running as 'maxscale' user
add_test_executable(maxscale_process_user.cpp maxscale_process_user replication LABELS maxscale LIGHT REPL_BACKEND)
# Test of multi master monitor
add_test_executable(mm.cpp mm mm LABELS mmmon BREAKS_REPL)
# MySQL Monitor with Multi-master configurations
add_test_executable(mm_mysqlmon.cpp mm_mysqlmon mm_mysqlmon LABELS mysqlmon REPL_BACKEND BREAKS_REPL)
# MySQL Monitor crash safety
add_test_executable(mysqlmon_backup.cpp mysqlmon_backup mysqlmon_backup LABELS mysqlmon REPL_BACKEND)
# Regression case for the bug "Two monitors loaded at the same time result into not working installation"
add_test_executable(mxs118.cpp mxs118 mxs118 LABELS maxscale LIGHT REPL_BACKEND)
# Regression case for the bug "disable_sescmd_history causes MaxScale to crash under load"
add_test_executable(mxs127.cpp mxs127 mxs127 LABELS readwritesplit LIGHT REPL_BACKEND)
# Prepearing and execution statements in the loop
add_test_executable(mxs244_prepared_stmt_loop.cpp mxs244_prepared_stmt_loop galera LABELS readwritesplit readconnroute LIGHT GALERA_BACKEND)
# Regression case for the bug "SELECT INTO OUTFILE query succeeds even if backed fails"
add_test_executable(mxs280_select_outfile.cpp mxs280_select_outfile replication LABELS readwritesplit REPL_BACKEND)
# Tries prepared stmt 'SELECT 1,1,1,1...." with different nu,ber of '1'
add_test_executable(mxs314.cpp mxs314 galera LABELS MySQLProtocol LIGHT GALERA_BACKEND)
# Creates and closes a lot of connections, checks that 'maxadmin list servers' shows 0 connections at the end
add_test_executable(mxs321.cpp mxs321 replication LABELS maxscale REPL_BACKEND)
# Crash with Galera and backend restart when persistant cfonnections are in use
add_test_script(mxs361 pers_02 mxs361 mxs361 LABELS maxscale GALERA_BACKEND)
# Load huge file with 'LOAD DATA LOCAL INFILE'
add_test_executable(mxs365.cpp mxs365 replication LABELS readwritesplit REPL_BACKEND)
# Connect to Maxscale with user with only 'SELECT' priveledge
add_test_executable(mxs37_table_privilege.cpp mxs37_table_privilege replication LABELS MySQLAuth LIGHT REPL_BACKEND)
# Connect to Maxscale with user with only 'SELECT' priveledge (Galera backend)
add_test_script(mxs37_table_privilege_galera mxs37_table_privilege galera LABELS MySQLAuth GALERA_BACKEND)
# Connect repeatedly to Schema router and execute simple query, check if auth is ok
add_test_executable(mxs431.cpp mxs431 sharding LABELS schemarouter REPL_BACKEND BREAKS_REPL)
# execute SELECT REPEAT('a',i), where 'i' is changing from 1 to 50000 (bug "Session freeze when small tail packet")
add_test_executable(mxs47.cpp mxs47 replication LABELS MySQLProtocol LIGHT REPL_BACKEND)
# Regression case for the bug "USE <db> hangs when Tee filter uses matching"
add_test_executable(mxs501_tee_usedb.cpp mxs501_tee_usedb mxs501 LABELS tee REPL_BACKEND)
# Open connection, execute 'change user', close connection in the loop
add_test_executable(mxs548_short_session_change_user.cpp mxs548_short_session_change_user mxs548 LABELS MySQLProtocol REPL_BACKEND)
# Playing with blocking and unblocking Master under load
add_test_executable(mxs559_block_master.cpp mxs559_block_master mxs559 LABELS readwritesplit REPL_BACKEND)
# Playing with blocking and unblocking nodes under INSERT load
add_test_executable(mxs564_big_dump.cpp mxs564_big_dump galera_mxs564 LABELS readwritesplit readconnroute GALERA_BACKEND)
# Executes simple queries from python script in the loop
add_test_script(mxs585.py mxs585.py replication LABELS readwritesplit readconnroute UNSTABLE HEAVY REPL_BACKEND)
# Simple transactions in the loop from python script with client SSL on
add_test_script(mxs598.py mxs598.py ssl LABELS MySQLProtocol UNSTABLE HEAVY REPL_BACKEND)
# Regression case for the bug "MaxScale fails to start silently if config file is not readable"
add_test_executable(mxs621_unreadable_cnf.cpp mxs621_unreadable_cnf replication LABELS maxscale REPL_BACKEND)
# playing with 'restart service' and restart Maxscale under load
add_test_executable(mxs657_restart.cpp mxs657_restart replication LABELS maxscale HEAVY REPL_BACKEND)
add_test_executable(mxs657_restart_service.cpp mxs657_restart_service replication LABELS maxscale REPL_BACKEND)
# put cyrillic letters to the table and check from backend
add_test_executable(mxs682_cyrillic.cpp mxs682_cyrillic replication LABELS maxscale LIGHT REPL_BACKEND)
# put cyrillic letters to the table and check from backend (Galera backend)
add_test_script(mxs682_cyrillic_galera mxs682_cyrillic galera LABELS maxscale GALERA_BACKEND)
# Connect using different default database using user with database and table level grants
add_test_executable(mxs716.cpp mxs716 replication LABELS MySQLAuth LIGHT REPL_BACKEND)
# MaxScale configuration check functionality test (maxscale -c)
add_test_executable(mxs722.cpp mxs722 mxs722 LABELS maxscale LIGHT REPL_BACKEND)
# Test of 'maxadmin' user Unix accounts enable/disable
add_test_executable(mxs729_maxadmin.cpp mxs729_maxadmin replication LABELS MaxAdminAuth LIGHT REPL_BACKEND)
# Simple connect test in bash, checks that defined in cmd line DB is selected
add_test_script(mxs791.sh mxs791.sh replication LABELS UNSTABLE HEAVY REPL_BACKEND)
# Simple connect test in bash, checks that defined in cmd line DB is selected (Galera backend)
add_test_script(mxs791_galera.sh mxs791_galera.sh galera LABELS UNSTABLE HEAVY GALERA_BACKEND)
# Checks "Current no. of conns" maxadmin output after long blob inserting
add_test_executable(mxs812_1.cpp mxs812_1 longblob LABELS readwritesplit REPL_BACKEND)
# Checks "Current no. of conns" maxadmin output after long blob inserting
add_test_executable(mxs812_2.cpp mxs812_2 longblob LABELS readwritesplit REPL_BACKEND)
# Execute prepared statements while master is blocked, checks "Current no. of conns" after the test
add_test_executable(mxs822_maxpasswd.cpp mxs822_maxpasswd maxpasswd LABELS maxscale REPL_BACKEND)
# Do only SELECTS during time > wait_timeout and then do INSERT
# This test will fail because the functionality hasn't been implemented
add_test_executable(mxs827_write_timeout.cpp mxs827_write_timeout mxs827_write_timeout LABELS readwritesplit REPL_BACKEND)
# Block and unblock first and second slaves and check that they are recovered
add_test_executable(mxs874_slave_recovery.cpp mxs874_slave_recovery mxs874 LABELS readwritesplit REPL_BACKEND)
# A set of dynamic configuration tests
# Server removal test
add_test_executable(mxs922_bad_server.cpp mxs922_bad_server mxs922 LABELS maxscale REPL_BACKEND)
# Server creation test
add_test_executable(mxs922_server.cpp mxs922_server mxs922_base LABELS maxscale REPL_BACKEND)
# Monitor creation test
add_test_executable(mxs922_monitor.cpp mxs922_monitor mxs922_base LABELS maxscale REPL_BACKEND)
# Double creation of listeners, expect no crash
add_test_executable(mxs922_double_listener.cpp mxs922_double_listener mxs922_base LABELS maxscale REPL_BACKEND)
# Test persisting of configuration changes
add_test_executable(mxs922_restart.cpp mxs922_restart mxs922 LABELS maxscale REPL_BACKEND)
# Server scaling test
add_test_executable(mxs922_scaling.cpp mxs922_scaling mxs922_base LABELS maxscale REPL_BACKEND)
# Dynamic listener SSL test
add_test_executable(mxs922_listener_ssl.cpp mxs922_listener_ssl mxs922_base LABELS maxscale REPL_BACKEND)
# Test of MaxRows filter
add_test_executable(mxs1071_maxrows.cpp mxs1071_maxrows maxrows LABELS maxrowsfilter REPL_BACKEND)
# Test of Masking filter
add_test_script(masking_mysqltest masking_mysqltest_driver.sh masking_mysqltest LABELS maskingfilter REPL_BACKEND)
add_test_script(masking_user masking_user.sh masking_mysqltest LABELS maskingfilter REPL_BACKEND)
# Test of Cache filter
add_test_script(cache_basic cache_basic.sh cache_basic LABELS cachefilter REPL_BACKEND)
# Set utf8mb4 in the backend and restart Maxscale
add_test_executable(mxs951_utfmb4.cpp mxs951_utfmb4 replication LABELS REPL_BACKEND)
# Execute given SQL through readwritesplit (with temporary tables usage)
add_test_executable(mxs957.cpp mxs957 replication LABELS readwritesplit REPL_BACKEND)
# Regression case for the bug "Defunct processes after maxscale have executed script during failover"
add_test_executable(mxs1045.cpp mxs1045 mxs1045 LABELS maxscale REPL_BACKEND)
# MXS-1123: connect_timeout setting causes frequent disconnects
# https://jira.mariadb.org/browse/MXS-1123
add_test_executable(mxs1123.cpp mxs1123 mxs1123 LABELS maxscale REPL_BACKEND)
# 'namedserverfilter' test
add_test_executable(namedserverfilter.cpp namedserverfilter namedserverfilter LABELS namedserverfilter LIGHT REPL_BACKEND)
# Authentication error testing
add_test_executable(no_password.cpp no_password replication LABELS MySQLAuth LIGHT REPL_BACKEND)
# Open and immediatelly close a big number of connections
add_test_executable(open_close_connections.cpp open_close_connections replication LABELS maxscale REPL_BACKEND)
# Open and immediatelly close a big number of connections, ssl is in use
#
# The test is broken due to some problem in the connector. It crashes with a
# double free error somewhere deep inside the connector/SSL libraries.
#
# add_test_script(open_close_connections_ssl open_close_connections ssl LABELS maxscale REPL_BACKEND)
# Persistant connection test
add_test_executable(pers_01.cpp pers_01 pers_01 LABELS maxscale REPL_BACKEND GALERA_BACKEND)
# Test with persistant connections configured and big number iof opened connections ,expect no crash
add_test_executable(pers_02.cpp pers_02 pers_01 LABELS maxscale REPL_BACKEND GALERA_BACKEND)
# Check if prepared statement works via Maxscale (via RWSplit)
add_test_executable(prepared_statement.cpp prepared_statement replication LABELS readwritesplit LIGHT REPL_BACKEND)
# Connect to ReadConn in master mode and check if there is only one backend connection to master
add_test_executable(readconnrouter_master.cpp readconnrouter_master replication LABELS readconnroute LIGHT REPL_BACKEND)
# Creates 100 connections to ReadConn in slave mode and check if connections are distributed among all slaves
add_test_executable(readconnrouter_slave.cpp readconnrouter_slave replication LABELS readconnroute LIGHT REPL_BACKEND)
# Regex filter test
add_test_executable(regexfilter1.cpp regexfilter1 regexfilter1 LABELS regexfilter LIGHT REPL_BACKEND)
# check that Maxscale is reacting correctly on ctrc+c signal and termination does not take ages
add_test_script(run_ctrl_c.sh run_ctrl_c.sh replication LABELS maxscale LIGHT REPL_BACKEND)
# run a set of queries in the loop (see setmix.sql) using Perl client
add_test_script(run_session_hang.sh run_session_hang.sh replication LABELS readwritesplit REPL_BACKEND)
# Checks changes of COM_SELECT and COM_INSERT after queris to check if RWSplit sends queries to master or to slave depending on if it is write or read only query
add_test_executable(rw_select_insert.cpp rw_select_insert replication LABELS readwritesplit REPL_BACKEND)
# Checks connections are distributed equaly among backends
add_test_executable(rwsplit_conn_num.cpp rwsplit_conn_num repl_lgc LABELS readwritesplit LIGHT REPL_BACKEND)
# Check that there is one connection to Master and one connection to one of slaves
add_test_executable(rwsplit_connect.cpp rwsplit_connect replication LABELS readwritesplit LIGHT REPL_BACKEND)
# Test of the read-only mode for readwritesplit when master fails (blocked)
add_test_executable(rwsplit_readonly.cpp rwsplit_readonly rwsplit_readonly LABELS readwritesplit REPL_BACKEND)
# Test of the read-only mode for readwritesplit when master fails (blocked), under load
add_test_executable(rwsplit_readonly_stress.cpp rwsplit_readonly_stress rwsplit_readonly LABELS readwritesplit HEAVY REPL_BACKEND)
# Test readwritesplit multi-statement handling
add_test_executable(rwsplit_multi_stmt.cpp rwsplit_multi_stmt rwsplit_multi_stmt LABELS readwritesplit REPL_BACKEND)
# Test readwritesplit multi-statement handling
add_test_executable(rwsplit_read_only_trx.cpp rwsplit_read_only_trx rwsplit_read_only_trx LABELS readwritesplit REPL_BACKEND)
# Test replication-manager with MaxScale
add_test_executable(replication_manager.cpp replication_manager replication_manager LABELS maxscale REPL_BACKEND)
add_test_executable_notest(replication_manager_2nodes.cpp replication_manager_2nodes replication_manager_2nodes LABELS maxscale REPL_BACKEND)
add_test_executable_notest(replication_manager_3nodes.cpp replication_manager_3nodes replication_manager_3nodes LABELS maxscale REPL_BACKEND)
# Schemarouter duplicate database detection test: create DB on all nodes and then try query againt schema router
add_test_executable(schemarouter_duplicate_db.cpp schemarouter_duplicate_db schemarouter_duplicate_db LABELS schemarouter REPL_BACKEND)
# Test of external script execution
add_test_executable(script.cpp script script LABELS maxscale REPL_BACKEND)
# Check if 'weightby' parameter works
add_test_executable(server_weight.cpp server_weight galera.weight LABELS readwritesplit readconnroute LIGHT GALERA_BACKEND)
# Executes a lot of session commands with "disable_sescmd_history=true" and check that memory consumption is not increasing
add_test_executable(ses_bigmem.cpp ses_bigmem no_ses_cmd_store LABELS readwritesplit REPL_BACKEND)
# test for 'max_sescmd_history' and 'connection_timeout' parameters
add_test_executable(session_limits.cpp session_limits session_limits LABELS readwritesplit REPL_BACKEND)
# Test of schema router
add_test_executable(sharding.cpp sharding sharding LABELS schemarouter BREAKS_REPL)
# MXS-1160: LOAD DATA LOCAL INFILE with schemarouter
add_test_executable(sharding_load_data.cpp sharding_load_data sharding LABELS schemarouter BREAKS_REPL)
# Do short sessions (open conn, short query, close conn) in the loop
add_test_executable(short_sessions.cpp short_sessions replication LABELS readwritesplit readconnroute REPL_BACKEND)
# Do short sessions (open conn, short query, close conn) in the loop, client ssl is ON
add_test_script(short_sessions_ssl short_sessions ssl LABELS readwritesplit readconnroute REPL_BACKEND)
# Regression case for crash if maxadmin 'show monitors' command is issued, but no monitor is not running
add_test_executable(show_monitor_crash.cpp show_monitor_crash show_monitor_crash LABELS maxscale)
# Check how Maxscale works in case of one slave failure, only one slave is configured
add_test_executable(slave_failover.cpp slave_failover replication.one_slave LABELS readwritesplit REPL_BACKEND)
# Execute queries of different size, check data is the same when accessing via Maxscale and directly to backend
add_test_executable(sql_queries.cpp sql_queries replication LABELS readwritesplit REPL_BACKEND)
# Execute queries of different size, check data is the same when accessing via Maxscale and directly to backend, one persistant connection configured
add_test_script(sql_queries_pers1 sql_queries sql_queries_pers1 LABELS maxscale readwritesplit HEAVY REPL_BACKEND)
# Execute queries of different size, check data is the same when accessing via Maxscale and directly to backend, 10 persistant connections configured
add_test_script(sql_queries_pers10 sql_queries sql_queries_pers10 LABELS maxscale readwritesplit HEAVY REPL_BACKEND)
# Execute queries of different size, check data is the same when accessing via Maxscale and directly to backend, client ssl is ON
add_test_script(ssl sql_queries ssl LABELS maxscale readwritesplit REPL_BACKEND)
# Check load balancing, client ssl is ON
add_test_script(ssl_load load_balancing ssl_load LABELS maxscale readwritesplit REPL_BACKEND)
# Check load balancing, client ssl is ON, Galera backend
add_test_script(ssl_load_galera load_balancing_galera ssl_load_galera LABELS maxscale readwritesplit GALERA_BACKEND)
# Testing slaves who have lost their master and how MaxScale works with them
add_test_executable(stale_slaves.cpp stale_slaves replication LABELS mysqlmon REPL_BACKEND)
# Run sysbech test and block one slave during test execution
add_test_executable(sysbench_kill_slave.cpp sysbench_kill_slave replication LABELS UNSTABLE HEAVY REPL_BACKEND)
# Check temporal tables commands functionality
add_test_executable(temporal_tables.cpp temporal_tables replication LABELS readwritesplit REPL_BACKEND)
# Test routing hints
add_test_executable(test_hints.cpp test_hints hints2 LABELS hintfilter LIGHT REPL_BACKEND)
# Binlogrouter tests, these heavily alter the replication so they are run last
add_test_executable(avro.cpp avro avro LABELS avrorouter binlogrouter LIGHT BREAKS_REPL)
# Test avrorouter file compression
add_test_script(avro_compression avro avro_compression LABELS avrorouter binlogrouter LIGHT BREAKS_REPL)
# In the binlog router setup stop Master and promote one of the Slaves to be new Master
add_test_executable(binlog_change_master.cpp binlog_change_master setup_binlog_tx_safe LABELS binlogrouter BREAKS_REPL)
# trying to start binlog setup with incomplete Maxscale.cnf
add_test_executable(binlog_incompl.cpp binlog_incompl binlog_incompl LABELS binlogrouter BREAKS_REPL)
# configure binlog router setup, execute queries and transactions, check data; install semysync plugin, router options semisync=1,transaction_safety=1
add_test_executable(binlog_semisync.cpp binlog_semisync setup_binlog_semisync LABELS binlogrouter HEAVY BREAKS_REPL)
# configure binlog router setup, execute queries and transactions, check data; install semysync plugin, router options semisync=0,transaction_safety=0
add_test_script(binlog_semisync_txs0_ss0 binlog_semisync setup_binlog_semisync_txs0_ss0 LABELS binlogrouter HEAVY BREAKS_REPL)
# configure binlog router setup, execute queries and transactions, check data; install semysync plugin, router options semisync=0,transaction_safety=1
add_test_script(binlog_semisync_txs0_ss1 binlog_semisync setup_binlog_semisync_txs0_ss1 LABELS binlogrouter HEAVY BREAKS_REPL)
# configure binlog router setup, execute queries and transactions, check data; install semysync plugin, router options semisync=1,transaction_safety=0
add_test_script(binlog_semisync_txs1_ss0 binlog_semisync setup_binlog_semisync_txs1_ss0 LABELS binlogrouter HEAVY BREAKS_REPL)
set_tests_properties(binlog_semisync PROPERTIES TIMEOUT 3600)
set_tests_properties(binlog_semisync_txs0_ss0 PROPERTIES TIMEOUT 3600)
set_tests_properties(binlog_semisync_txs0_ss1 PROPERTIES TIMEOUT 3600)
set_tests_properties(binlog_semisync_txs1_ss0 PROPERTIES TIMEOUT 3600)
#
# The encryption tests don't work as they require the file key management plugin
#
# Binlog encription test (aes_cbr encryption)
#add_test_executable(mxs1073_binlog_enc.cpp mxs1073_binlog_enc binlog_enc_aes_cbc LABELS binlogrouter 10.1 BREAKS_REPL)
# Binlog encription test (aes_ctr encryption)
#add_test_script(mxs1073_binlog_enc_aes_ctr mxs1073_binlog_enc binlog_enc_aes_ctr LABELS binlogrouter 10.1 BREAKS_REPL)
# Test of CDC protocol (avro listener)
add_test_executable(cdc_client.cpp cdc_client avro LABELS avrorouter binlogrouter BREAKS_REPL)
# Tries INSERTs with size close to 0x0ffffff * N (with binlog backend)
add_test_executable(different_size_binlog.cpp different_size_binlog setup_binlog LABELS binlogrouter HEAVY BREAKS_REPL)
# Try to configure binlog router to use wrong password for Master and check 'slave status' on binlog
add_test_executable(mxs781_binlog_wrong_passwrd.cpp mxs781_binlog_wrong_passwrd setup_binlog LABELS binlogrouter BREAKS_REPL)
# Regression case for crash if long host name is used for binlog router (in 'change master to ...')
add_test_executable(mxs813_long_hostname.cpp mxs813_long_hostname setup_binlog LABELS binlogrouter BREAKS_REPL)
# configure binlog rouer setup, execute queries and transactions, check data;
add_test_executable(setup_binlog.cpp setup_binlog setup_binlog LABELS binlogrouter BREAKS_REPL)
# configure binlog rouer setup, execute queries and transactions, check data; install semysync plugin, backends started with --binlog-checksum=CRC32 option
# disabled because it is included into setup_binlog test, separate test was created for debugging
# add_test_executable(setup_binlog_crc_32.cpp setup_binlog_crc_32 setup_binlog LABELS binlogrouter BREAKS_REPL)
# configure binlog rouer setup, execute queries and transactions, check data; install semysync plugin, backends started with --binlog-checksum=NONE option
# disabled because it is included into setup_binlog test, separate test was created for debugging
# add_test_executable(setup_binlog_crc_none.cpp setup_binlog_crc_none setup_binlog LABELS binlogrouter LIGHT BREAKS_REPL)
# Creates KDC and tries authrization via GSSAPI (both client and backend)
# works only with yum-based distributions
# TODO: make it working with zypper and apt, move part of KDC setup to MDBCI
add_test_executable(kerberos_setup.cpp kerberos_setup kerberos LABELS HEAVY gssapi REPL_BACKEND)
# enable after fixing MXS-419
# add_test_executable(mxs419_lots_of_connections.cpp mxs419_lots_of_connections replication LABELS REPL_BACKEND)
# TODO: Alter the user_cache test
# add_test_executable(user_cache.cpp user_cache user_cache LABELS REPL_BACKEND)
# https://mariadb.atlassian.net/browse/MXS-576 - it is possible to set negative value for
# 'persistpoolmax' without any warning
#add_test_executable(bad_pers.cpp bad_pers bad_pers LABELS REPL_BACKEND)
# Test Aurora RDS monitor
add_test_executable(auroramon.cpp auroramon auroramon LABELS HEAVY EXTERNAL_BACKEND)
# Disabled for the time being
# add_test_executable(gatekeeper.cpp gatekeeper gatekeeper LABELS gatekeeper)
# not implemented, just template
#add_test_executable(rw_galera_select_insert.cpp rw_galera_select_insert galera LABELS readwritesplit GALERA_BACKEND)
# a tool to delete RDS Aurora cluster
add_test_executable_notest(delete_rds.cpp delete_rds replication LABELS EXTERN_BACKEND)
# a tool to create RDS Aurora cluster
add_test_executable_notest(create_rds.cpp create_rds replication LABELS EXTERN_BACKEND)
# start sysbench ageints RWSplit for infinite execution
add_test_executable_notest(long_sysbench.cpp long_sysbench replication LABELS readwritesplit REPL_BACKEND)
configure_file(templates.h.in templates.h @ONLY)
include(CTest)

View File

@ -0,0 +1,15 @@
## This file should be placed in the root directory of your project.
## Then modify the CMakeLists.txt file in the root directory of your
## project to incorporate the testing dashboard.
##
## # The following are required to submit to the CDash dashboard:
## ENABLE_TESTING()
## INCLUDE(CTest)
set(CTEST_PROJECT_NAME "MaxScale")
set(CTEST_NIGHTLY_START_TIME "01:00:00 UTC")
set(CTEST_DROP_METHOD "http")
set(CTEST_DROP_SITE "jenkins.engskysql.com")
set(CTEST_DROP_LOCATION "/CDash/submit.php?project=MaxScale")
set(CTEST_DROP_SITE_CDASH TRUE)

View File

@ -0,0 +1,289 @@
# Build and test environment setup
### Full build and test environment setup
<pre>
# install ruby
sudo apt-get install ruby
# install all needed libraries
sudo apt-get install libxslt-dev libxml2-dev libvirt-dev zlib1g-dev
# install vagrant
# it is also possible to install Vagrant from distribution repository, but in case of problems please use 1.7.2
wget https://dl.bintray.com/mitchellh/vagrant/vagrant_1.7.2_x86_64.deb
sudo dpkg -i vagrant_1.7.2_x86_64.deb
# install Vagrant plugins
vagrant plugin install vagrant-aws vagrant-libvirt vagrant-mutate
# get MDBCI, build scripts, descriptions of MDBCI boxes and keys from GitHub
git clone https://github.com/OSLL/mdbci.git
git clone git@github.com:mariadb-corporation/mdbci-repository-config.git
git clone git@github.com:mariadb-corporation/build-scripts-vagrant.git
git clone git@github.com:mariadb-corporation/mdbci-boxes
# Copy scripts and boxes to proper places
mv build-scripts-vagrant build-scripts
scp -r mdbci-boxes/* mdbci/
# set proper access rights for ssh keys (for ppc64 machines)
chmod 400 mdbci/KEYS/*
# install all the stuff for test package build
sudo apt-get install cmake gcc g++ libssl-dev
sudo apt-get install mariadb-client shellcheck
# install MariaDB development library
sudo apt-get install libmariadbclient-dev
# Ubuntu repos can contain the sa,e package with different name 'libmariadb-client-lgpl-dev'
# but it can not be used to build maxscale-system-test; please use mariadb.org repositories
# https://downloads.mariadb.org/mariadb/repositories/
# Do not forget to remove all other MariaDB and MySQL packages!
# install qemu (more info https://en.wikibooks.org/wiki/QEMU/Installing_QEMU)
sudo apt-get install qemu qemu-kvm libvirt-bin
# install virt-manager (if you prefer UI)
sudo apt-get install virt-manager
# install docker (if needed) - see https://docs.docker.com/engine/installation/
# if cmake from distribution repository is too old it is recommended to build it from latest sources
wget https://cmake.org/files/v3.4/cmake-3.4.1.tar.gz # replace 3.4.1 to latest version
tar xzvf cmake-3.4.1.tar.gz
cd cmake-3.4.1
./bootstrap
make
sudo make install
cd
# sysbench 0.5 should be in sysbench_deb7 directory; it can be built from source:
git clone https://github.com/akopytov/sysbench.git
cd sysbench
./autogen.sh
./configure
make
cd ..
mv sysbench sysbench_deb7
# for OVH servers it is needed to move 'docker' and 'libvirt' working directories to /home
# (replace 'vagrant' to your home directory name)
cd /var/lib/
sudo mv docker /home/vagrant/
sudo ln -s /home/vagrant/docker docker
cd libvirt
sudo mv images /home/vagrant/
sudo ln -s /home/vagrant/images images
cd
# (HACK) in case of problem with building sysbench:
scp -r vagrant@maxscale-jenkins.mariadb.com:/home/vagrant/sysbench_deb7 .
# (HACK) in case of problem with 'dummy' box (problem is caused by MDBCI bug):
scp -r vagrant@maxscale-jenkins.mariadb.com:/home/vagrant/.vagrant.d/boxes/dummy .vagrant.d/boxes/
# MariaDBManager-GPG* files are needed for Maxscale builds in the home directory
# put AWS keys to aws-config.yml (see https://github.com/OSLL/mdbci/blob/master/aws-config.yml.template)
# add curent user to the group 'libvirtd'
sudo usermod -a -G user_name libvirtd
# start libvirt default pool
virsh pool-start default
</pre>
### Setup VMs manually
#### Empty virtual machine
Following template can be used to create empty VM (for qemu machines):
<pre>
{
"cookbook_path" : "../recipes/cookbooks/",
"build" :
{
"hostname" : "default",
"box" : "###box###",
"product" : {
"name" : "packages"
}
}
}
</pre>
for AWS machines:
<pre>
{
"cookbook_path" : "../recipes/cookbooks/",
"aws_config" : "../aws-config.yml",
"build" :
{
"hostname" : "build",
"box" : "###box###"
}
}
</pre>
Following boxes are availabe:
* qemu: debian_7.5_libvirt, ubuntu_trusty_libvirt, centos_7.0_libvirt, centos_6.5_libvirt
* AWS: rhel5, rhel6, rhel7, sles11, sles12, fedora20, fedora21, fediora22, ubuntu_wily, ubuntu_vivid, centos7, deb_jessie
#### Maxscale and backend machines creation
* Generation of Maxscale repository description
It is necessary to generate descriptions of MariaDB and Maxscale repositories before bringin up Maxscale machine with Vagrant
<pre>
export ci_url="http://my_repository_site.com/repostory/"
~/mdbci-repository-config/generate_all.sh $repo_dir
~/mdbci-repository-config/maxscale-ci.sh $target $repo_dir
</pre>
where
<pre>
$repo_dir - directory where repository descriptions will be created
$target - directory with MaxScale packages in the repositoy
</pre>
example:
<pre>
export ci_url="http://max-tst-01.mariadb.com/ci-repository/"
~/mdbci-repository-config/generate_all.sh repo.d
~/mdbci-repository-config/maxscale-ci.sh develop repo.d
</pre>
More information can be found in the [MDBCI documentation](https://github.com/OSLL/mdbci#repod-files) and in the [mdbci-repository-config documentaion](https://github.com/mariadb-corporation/mdbci-repository-config#mdbci-repository-config)
* Preparing configuration description
Virtual machines should be described in JSON format. Example template can be found in the [build-scripts package](https://github.com/mariadb-corporation/build-scripts-vagrant/blob/master/test/template.libvirt.json).
MariaDB machine description example:
<pre>
"node0" :
{
"hostname" : "node0",
"box" : "centos_7.0_libvirt",
"product" : {
"name": "mariadb",
"version": "10.0",
"cnf_template" : "server1.cnf",
"cnf_template_path": "~/build-scripts/test-setup-scripts/cnf"
}
}
</pre>
"cnf_template" defines .cnf file which will be places into MariaDB machine. [build-scripts package](https://github.com/mariadb-corporation/build-scripts-vagrant/tree/master/test-setup-scripts/cnf) contains examples of .cnf files.
MariaDB Galera machine description example:
<pre>
"galera0" :
{
"hostname" : "galera0",
"box" : "centos_7.0_libvirt",
"product" : {
"name": "galera",
"version": "10.0",
"cnf_template" : "galera_server1.cnf",
"cnf_template_path": "~/build-scripts/test-setup-scripts/cnf"
}
}
</pre>
For Galera machines MDBCI automatically puts following information into .cnf file:
|field|description|
|------|----|
|###NODE-ADDRESS###|IP address of the node (for AWS - private IP)|
|###NODE-NAME###|Replaces by node name ("node0" in this example)|
|###GALERA-LIB-PATH###|Path to the Galera library file (.so file)|
Example of Maxscale machine description:
<pre>
"maxscale" :
{
"hostname" : "maxscale",
"box" : "centos_7.0_libvirt",
"product" : {
"name": "maxscale"
}
}
</pre>
#### Generation configuration and bringing machines up
After creation machines description JSON two steps are needed.
1. Generate configuration
<pre>
./mdbci --override --template $template_name.json --repo-dir $repo_dir generate $name
</pre>
where
|variable|description|
|----|----|
|$template_name|name of machines descripiton JSON file|
|$repo_dir|directory with repositories description generated by mdbci-repository-config (repo.d)|
|$name|name of test configuration; will be used as directory name for Vagrant files|
2. Bringing machines up
<pre>
./mdbci up $name
</pre>
#### Configuring DB users
Automatic DB users is not implemented yet, so it have to be done manually. See [setup_repl.sh](https://github.com/mariadb-corporation/build-scripts-vagrant/blob/master/test-setup-scripts/setup_repl.sh) and [setup_galera.sh](https://github.com/mariadb-corporation/build-scripts-vagrant/blob/master/test-setup-scripts/galera/setup_galera.sh) for details.
Any test from 'maxscale-system-test' checks Master/Slave and Galera configurations and restores them if they are broken, but it works only if DB users are created.
TODO: add it into 'maxscale-system-test'
### Access VMs
MDBCI provides a number of commands to get information about running vrtial machines. See [MDBCI documentation](https://github.com/OSLL/mdbci#mdbci-syntax) for details.
[set_env_vagrant.sh script](https://github.com/mariadb-corporation/build-scripts-vagrant/blob/master/test/set_env_vagrant.sh) defines environmental variables needed by 'maxscale-system-test'. The same variables can be used to access VMs manually.
Script have to be executed fro 'mbdci' directory. Do not forget '.':
<pre>
cd ~/mdbci/
. ../build-scripts/test/set_env_vagrant.sh $name
</pre>
After it virual machines can be accessed via ssh, for example:
<pre>
ssh -i $maxscale_sshkey $maxscale_access_user@$maxscale_IP
</pre>
Another way is to use 'vagrant ssh':
<pre>
cd ~/mdbci/$name/
vagrant ssh &lt;node_name&gt;
</pre>
MDBCI can give IP address, path to ssh key:
<pre>
./mdbci show network &lt;configuration_name&gt;/&lt;node_name&gt; --silent
./mdbci show keyfile &lt;configuration_name&gt;/&lt;node_name&gt; --silent
./mdbci ssh --command 'whoami' &lt;configuration_name&gt;/&lt;node_name&gt; --silent
</pre>
Node name for build machine is 'build'
Nodes names for typical test setup are node0, ..., node3, galera0, ..., galera3, maxscale
Example:
<pre>
./mdbci show network centos6_vm01/build --silent
./mdbci show keyfile centos6_vm01/build --silent
./mdbci ssh --command 'whoami' centos6_vm01/build --silent
</pre>
### Destroying configuration
<pre>
cd ~/mdbci/$name
vagrant destroy -f
</pre>

View File

@ -0,0 +1,204 @@
# Creating a test case
This document describes basic principles of test case creation and provides list of basic usefull functions and properties.
For detailed function and properties description and thier full list please see documetation generated by Doxygen.
## Test case basics
For every test case following should be created:
- test executable
- record in the 'templates' file
- Maxscale configuration template (if test requires special Maxscale configuration)
- [CMakeLists.txt](CMakeLists.txt) record:
- add_test_executable(<source.cpp> <binary_name> <cnf_template_name>)
- 'set_tests_properties' if test should be added to the group or bigger timeout should be defined (> default 1800s)
## 'templates' file
'templates' file contains information about Maxscale configuration template for every test in plain text format:
\<test_executable_name\> \<suffix_of_cnf_template\>
Template itself should be:
cnf/maxscale.cnf.template.\<suffix_of_cnf_template\>
## Maxscale configuration template
All templates are in cnf/ directory:
cnf/maxscale.cnf.template.\<suffix_of_cnf_template\>
Template can contain following varables:
|Variable|Maeaning|
|--------|--------|
|###threads###| Number of Maxscale treads|
|###node_server_IP_N###|IP of Master/Slave node N|
|###node_server_port_N###|port of Master/Slave node N|
|###galera_server_IP_N###|IP of Galera node N|
|###galera_server_port_N###|port of Galera node N|
## Test creation principles
* start from initializing of an object of TestConnections class
* set timeout before every operation which can got stuck, do not forget to disable timeout before long sleep()
* use TestConnections::tprintf function to print test log
* use TestConnections::add_result() to idicate test failure and print explanation message
* execute TestConnections::copy_all_logs at the end of test
* return TestConnections::global_result value
* do not leave any node blocked by firewall
## Class TestConnections
This class contains all information about Maxscale node and about all backend nodes as well as a set of functions
to handle Maxscale and backends, interact with Maxscale routers and Maxadmin.
Here is only list of main functions, for all details see Doxygen comments in [testconnections.h](testconnections.h)
Currently two backend sets are supported (represented by Mariadb_nodes class objects): 'repl' and 'galera'
- contains all info and operations for Master/Slave and Galera setups
(see Doxygen comments in [mariadb_nodes.h](mariadb_nodes.h) )
It is assumed that following routers+listers are configured
|Router|Port|
|------|----|
|RWSplit|4006|
|ReadConn master|4008|
|ReadConn Slave|4009|
|binlog|5306|
|test case -specific|4016|
### Most important fuctions and variables
Please check Doxygen comments for details
#### TestConnections(int argc, char *argv[]);
* reads all information from environmental variables
* checks backends, if broken - does one attempt to restore
* create maxscale.cnf out of template and copy it to Maxscale node
* create needed directories, set access righs for them, cleanup logs, coredumps
* start Maxscale
* initialize internal structures
#### Timeout functions
int set_timeout(int timeout_seconds)
stop_timeout()
If after set_timeout() a new call of set_timeout() or stop_timeout() is not done the test execution terminated,
logs from Maxscale are copied to host.
#### Open connection functions
|Function|Short description|
|----|---|
| int connect_maxscale();<br> int connect_rwsplit();<br> int connect_readconn_master();<br> int connect_maxscale_slave();|store MYSQL handler in TestConnections object (only one cnnection can be created by them, second call leads to MYSQL handler leak)|
|MYSQL * open_rwsplit_connection() <br> MYSQL * open_readconn_master_connection() <br> MYSQL * open_readconn_slave_connection() |returns MYSQL handler (can be used to create a number of connections to each router)|
| int create_connections(int conn_N) |- open and then close N connections to each router|
A number of useful wrappers for mysql_real_connect() are not included into TestConnections class, but
they are availve from [mariadb_func.h](mariadb_func.h)
#### Backend check and setup functions
|Function|Short description|
|----|---|
|start_replication()|Configure nodes from 'repl' object as Master/Slave|
|start_galera()|Configure nodes from 'galera'|
|start_binlog()|Configure nodes from 'repl' in following way: node0 - Master, node1 - slave of node0, all others - slaves of Maxscale binlog router|
|start_mm()|Configure nodes from 'repl' in multimuster setup|
#### Result reporting functions
|Function|Short description|
|----|---|
|add_result()|failure printing, increase global_result|
|tprint()| printing with timestamp|
|copy_logs()|copy Maxscale log, maxscale.cnf file and core dump from Maxscale machine to current directory|
#### Different checks functions
|Function|Short description|
|----|---|
|try_query()|try SQL query and print error message in case of failure, increase global_result|
|check_t1_table()|check if t1 present in give DB|
|test_maxscale_connections|check status of connections to RWSplit, ReadConn master, ReadConn slave routers|
|check_maxscale_alive()|Check if RWSplit, ReadConn master, ReadConn slave routers are alive|
|check_log_err()|Check Maxscale log for presence of absence of specific string|
|find_connected_slave|find first slave that have connections from Maxscale|
#### Maxscale machine control functions
|Function|Short description|
|----|---|
|start_maxscale()||
|stop_maxscale()||
|restart_maxscale()||
|execute_ssh_maxscale()|execute command on Maxscale node via ssh|
#### Properties
|Name|Short description|Corresponding env variable|
|----|-----|----|
|global_result|0 if there is not single failure during the test| - |
|repl|Mariadb_nodes object for Master/Slave nodes| - |
|galera|Mariadb_nodes object for Galera nodes| - |
|smoke|do short tests if TRUE|smoke|
|maxscale_IP|IP address of Maxscale machine|maxscale_IP|
|maxscale_user|DB user name to access via Maxscale|maxscale_user|
|maxscale_password|password for MaxscaleDB user|maxscale_password|
|maxadmin_password|password for MaxAdmin interface (user name is hard coded 'admin')|maxadmin_password|
|conn_rwsplit|MYSQL handler of connections to RWSplit router| - |
|conn_master|MYSQL handler of connections to ReadConn router in master mode| - |
|conn_slave|MYSQL handler of connections to ReadConn router in master mode| - |
### Mariadb_nodes class
#### Master/Slave and Galera setup and check
|Function|Short description|
|----|---|
|start_replication()|Configure nodes from 'repl' object as Master/Slave|
|start_galera()|Configure nodes from 'galera'|
|set_slave()|execute CHANGE MASTER TO agains the node|
|check_replication()|Check if 'repl' nodes are properly configured as Master/Slave|
|check_galera()|Check if 'galera' nodes are are properly configured as Galera cluster|
|change_master|Make another node to be a master|
#### Connections functions
|Function|Short description|
|----|---|
|connect()|open connections to all nodes, store MYSQL handlers in internal variable, second call leads to handlers leak|
|close_connections()|close connections to all nodes|
#### Nodes control functions
|Function|Short description|
|----|---|
|block_node()|block MariaDB server on the node by firawall|
|unblock_node()|unblock MariaDB server on the node by firawall|
|unblock_all_nodes()|unblock MariaDB server on all nodes by firawall|
|stop_node()|stop MariaDB server on the node|
|start node()|start MariaDB server on the node|
|restart_node()|stop and restart MariaDB server on the node|
|check_node()|check if MariaDB server on the node is alive|
|check_and_restart_node()|check if MariaDB server on the node is alive and restart it if it is not alive|
|stop_nodes()|stop MariaDB server on all nodes|
|ssh_node()|Execute command on the node via ssh, return error code|
|ssh_node_output()|Same as ssh_nodE(), but return command output|
|flush_hosts()|Execute 'mysqladmin flush-hosts' on all nodes|
|execute_query_all_nodes()|Execute same query on all nodes|
#### Properties
|Name|Short description|Corresponding env variable|
|----|-----|----|
|N|Number of nodes|node_N <br> galera_N|
|user_name|DB user name|node_user <br> galera_user|
|password|password for DB user|node_password <br> galera_password|
|IP[ ]|IP address of the node|node_XXX <br> galera_XXX|
|IP_private[ ]|private IP of the node (for AWS nodes)|node_private_XXX <br> galera_private_XXX|
|port[ ]|MariaDB port for the node|node_port_XXX <br> galera_port_XXX|
|nodes[ ]|MYSQL handler| - |
### Maxadmin operations functions
[maxadmin_operations.h](maxadmin_operations.h) contains fuctions to communicate to Maxscale via MaxAdmin interface
|Function|Short description|
|----|---|
|execute_maxadmin_command()|send MaxAdmin command to Maxscale|
|execute_maxadmin_command_print()|send MaxAdmin command to Maxscale and print reply|
|get_maxadmin_param()|send MaxAdmin command to Maxscale and try to find the value of given parameter in output|

View File

@ -0,0 +1,6 @@
BEGIN;
SELECT (@@server_id) INTO @a;
SELECT @a;
@a
####server_id####
COMMIT;

View File

@ -0,0 +1,8 @@
USE test;
drop table if exists t1;
create table t1 (id integer);
set autocommit=0;
begin;
insert into t1 values(1);
commit;
drop table t1;

View File

@ -0,0 +1,4 @@
USE test;
SELECT IF(@@server_id <> @TMASTER_ID,'OK (slave)','FAIL (master)') AS result;
result
OK (slave)

View File

@ -0,0 +1,9 @@
USE test;
drop table if exists t1;
create table t1 (id integer);
set autocommit=0;
insert into t1 values(1);
select count(*) from t1;
count(*)
1
drop table t1;

View File

@ -0,0 +1,9 @@
USE test;
drop table if exists t1;
create table t1 (id integer);
set autocommit=OFF;
insert into t1 values(1);
select count(*) from t1;
count(*)
1
drop table t1;

View File

@ -0,0 +1,11 @@
USE test;
drop table if exists t1;
create table t1 (id integer);
set autocommit=0;
begin;
insert into t1 values(1);
commit;
select count(*) from t1;
count(*)
1
drop table t1;

View File

@ -0,0 +1,11 @@
USE test;
drop table if exists t1;
create table t1 (id integer);
set autocommit=0;
begin;
insert into t1 values(1);
commit;
select count(*) from t1;
count(*)
1
drop table t1;

View File

@ -0,0 +1,11 @@
USE test;
DROP DATABASE If EXISTS FOO;
SET autocommit=1;
BEGIN;
CREATE DATABASE FOO;
SELECT (@@server_id) INTO @a;
SELECT IF(@a <> @TMASTER_ID,'OK (slave)','FAIL (master)') AS result;
result
OK (slave)
DROP DATABASE FOO;
COMMIT;

View File

@ -0,0 +1,17 @@
USE test;
DROP TABLE IF EXISTS T1;
DROP EVENT IF EXISTS myevent;
SET autocommit=1;
BEGIN;
CREATE TABLE T1 (id integer);
CREATE EVENT myevent
ON SCHEDULE AT CURRENT_TIMESTAMP + INTERVAL 1 HOUR
DO
UPDATE t1 SET id = id + 1;
SELECT (@@server_id) INTO @a;
SELECT IF(@a <> @TMASTER_ID,'OK (slave)','FAIL (master)') AS result;
result
OK (slave)
DROP TABLE T1;
DROP EVENT myevent;
COMMIT;

View File

@ -0,0 +1,11 @@
USE test;
DROP TABLE IF EXISTS T1;
SET autocommit=1;
BEGIN;
CREATE TABLE T1 (id integer);
SELECT (@@server_id) INTO @a;
SELECT IF(@a <> @TMASTER_ID,'OK (slave)','FAIL (master)') AS result;
result
OK (slave)
DROP TABLE T1;
COMMIT;

View File

@ -0,0 +1,14 @@
USE test;
DROP PROCEDURE IF EXISTS simpleproc;
SET autocommit=1;
BEGIN;
CREATE PROCEDURE simpleproc (OUT param1 INT)
BEGIN
SELECT COUNT(*) INTO param1 FROM t;
END //
SELECT (@@server_id) INTO @a;
SELECT IF(@a <> @TMASTER_ID,'OK (slave)','FAIL (master)') AS result;
result
OK (slave)
DROP PROCEDURE simpleproc;
COMMIT;

View File

@ -0,0 +1,13 @@
USE test;
DROP FUNCTION IF EXISTS hello;
SET autocommit=1;
BEGIN;
CREATE FUNCTION hello (s CHAR(20))
RETURNS CHAR(50) DETERMINISTIC
RETURN CONCAT('Hello, ',s,'!');
SELECT (@@server_id) INTO @a;
SELECT IF(@a <> @TMASTER_ID,'OK (slave)','FAIL (master)') AS result;
result
OK (slave)
DROP FUNCTION hello;
COMMIT;

View File

@ -0,0 +1,12 @@
USE test;
DROP TABLE IF EXISTS T1;
CREATE TABLE T1 (id integer);
SET autocommit=1;
BEGIN;
CREATE INDEX foo_t1 on T1 (id);
SELECT (@@server_id) INTO @a;
SELECT IF(@a <> @TMASTER_ID,'OK (slave)','FAIL (master)') AS result;
result
OK (slave)
DROP TABLE T1;
COMMIT;

View File

@ -0,0 +1,6 @@
use test;
set autocommit=1;
use mysql;
select count(*) from user where user='skysql';
count(*)
2

View File

@ -0,0 +1,15 @@
USE test;
DROP TABLE IF EXISTS myCity;
SET autocommit = 0;
START TRANSACTION;
CREATE TABLE myCity (a int, b char(20));
INSERT INTO myCity VALUES (1, 'Milan');
INSERT INTO myCity VALUES (2, 'London');
COMMIT;
START TRANSACTION;
DELETE FROM myCity;
SELECT COUNT(*) FROM myCity;
COUNT(*)
0
COMMIT;
DROP TABLE myCity;

View File

@ -0,0 +1,15 @@
USE test;
DROP TABLE IF EXISTS myCity;
SET autocommit = Off;
START TRANSACTION;
CREATE TABLE myCity (a int, b char(20));
INSERT INTO myCity VALUES (1, 'Milan');
INSERT INTO myCity VALUES (2, 'London');
COMMIT;
START TRANSACTION;
DELETE FROM myCity;
SELECT COUNT(*) FROM myCity;
COUNT(*)
0
COMMIT;
DROP TABLE myCity;

View File

@ -0,0 +1,13 @@
USE test;
DROP TABLE IF EXISTS myCity;
SET autocommit = 0;
CREATE TABLE myCity (a int, b char(20));
INSERT INTO myCity VALUES (1, 'Milan');
INSERT INTO myCity VALUES (2, 'London');
COMMIT;
DELETE FROM myCity;
SELECT COUNT(*) FROM myCity;
COUNT(*)
0
COMMIT;
DROP TABLE myCity;

View File

@ -0,0 +1,13 @@
USE test;
DROP TABLE IF EXISTS myCity;
SET autocommit = oFf;
CREATE TABLE myCity (a int, b char(20));
INSERT INTO myCity VALUES (1, 'Milan');
INSERT INTO myCity VALUES (2, 'London');
COMMIT;
DELETE FROM myCity;
SELECT COUNT(*) FROM myCity;
COUNT(*)
0
COMMIT;
DROP TABLE myCity;

View File

@ -0,0 +1,5 @@
--disable_query_log
--disable_result_log
SELECT SLEEP(5);
--enable_result_log
--enable_query_log

View File

@ -0,0 +1,11 @@
--source testconf.inc
USE test;
--disable_warnings
drop table if exists t1;
--enable_warnings
create table t1 (id integer);
set autocommit=0; # open transaction
begin;
insert into t1 values(1); # write to master
commit;
drop table t1;

View File

@ -0,0 +1,3 @@
--source testconf.inc
USE test;
SELECT IF(@@server_id <> @TMASTER_ID,'OK (slave)','FAIL (master)') AS result;

View File

@ -0,0 +1,11 @@
--source testconf.inc
USE test;
--disable_warnings
drop table if exists t1;
--enable_warnings
create table t1 (id integer);
set autocommit=0; # open transaction
insert into t1 values(1); # write to master
select count(*) from t1; # read from master
drop table t1;

View File

@ -0,0 +1,11 @@
--source testconf.inc
USE test;
--disable_warnings
drop table if exists t1;
--enable_warnings
create table t1 (id integer);
set autocommit=OFF; # open transaction
insert into t1 values(1); # write to master
select count(*) from t1; # read from master
drop table t1;

View File

@ -0,0 +1,13 @@
--source testconf.inc
USE test;
--disable_warnings
drop table if exists t1;
--enable_warnings
create table t1 (id integer);
set autocommit=0; # open transaction
begin;
insert into t1 values(1); # write to master
commit;
select count(*) from t1; # read from master since autocommit is disabled
drop table t1;

View File

@ -0,0 +1,13 @@
--source testconf.inc
USE test;
--disable_warnings
drop table if exists t1;
--enable_warnings
create table t1 (id integer);
set autocommit=0; # open transaction
begin;
insert into t1 values(1); # write to master
commit;
select count(*) from t1; # read from master since autocommit is disabled
drop table t1;

View File

@ -0,0 +1,4 @@
use test;
set autocommit=1;
use mysql;
select count(*) from user where user='skysql';

View File

@ -0,0 +1,16 @@
--source testconf.inc
USE test;
--disable_warnings
DROP TABLE IF EXISTS myCity;
--enable_warnings
SET autocommit = 0;
START TRANSACTION;
CREATE TABLE myCity (a int, b char(20));
INSERT INTO myCity VALUES (1, 'Milan');
INSERT INTO myCity VALUES (2, 'London');
COMMIT;
START TRANSACTION;
DELETE FROM myCity;
SELECT COUNT(*) FROM myCity; # read transaction's modifications from master
COMMIT;
DROP TABLE myCity;

View File

@ -0,0 +1,16 @@
--source testconf.inc
USE test;
--disable_warnings
DROP TABLE IF EXISTS myCity;
--enable_warnings
SET autocommit = Off;
START TRANSACTION;
CREATE TABLE myCity (a int, b char(20));
INSERT INTO myCity VALUES (1, 'Milan');
INSERT INTO myCity VALUES (2, 'London');
COMMIT;
START TRANSACTION;
DELETE FROM myCity;
SELECT COUNT(*) FROM myCity; # read transaction's modifications from master
COMMIT;
DROP TABLE myCity;

View File

@ -0,0 +1,16 @@
--source testconf.inc
USE test;
--disable_warnings
DROP TABLE IF EXISTS myCity;
--enable_warnings
SET autocommit = 0;
CREATE TABLE myCity (a int, b char(20));
INSERT INTO myCity VALUES (1, 'Milan');
INSERT INTO myCity VALUES (2, 'London');
COMMIT;
DELETE FROM myCity; # implicit transaction started
SELECT COUNT(*) FROM myCity; # read transaction's modifications from master
COMMIT;
DROP TABLE myCity;

View File

@ -0,0 +1,16 @@
--source testconf.inc
USE test;
--disable_warnings
DROP TABLE IF EXISTS myCity;
--enable_warnings
SET autocommit = oFf;
CREATE TABLE myCity (a int, b char(20));
INSERT INTO myCity VALUES (1, 'Milan');
INSERT INTO myCity VALUES (2, 'London');
COMMIT;
DELETE FROM myCity; # implicit transaction started
SELECT COUNT(*) FROM myCity; # read transaction's modifications from master
COMMIT;
DROP TABLE myCity;

View File

@ -0,0 +1,127 @@
# Jenkins
## List of Jenkins installations
| URL | Description |
|----|----|
|[max-tst-01.mariadb.com:8089](http://max-tst-01.mariadb.com:8089)|AWS, qemu; Regular testing for different MariaDB versions, different Linux distributions, Developers testing|
|[maxscale-jenkins.mariadb.com:8089/](http://maxscale-jenkins.mariadb.com:8089/)|AWS, VBox; Regular builds for all distributions, build for Coverity, regular test VBox+CentOS6+MariaDB5.5|
|[maxscale-jenkins.mariadb.com:8090](http://maxscale-jenkins.mariadb.com:8090/)|MDBCI testing and debugging, Jenkins experiments|
## Basic Jenkins jobs
### [max-tst-01.mariadb.com:8089](http://max-tst-01.mariadb.com:8089)
| Job | Description |
|----|----|
|[build_and_test](http://max-tst-01.mariadb.com:8089/view/test/job/build_and_test/)|Build Maxscale and run systems tests|
|[run_test](http://max-tst-01.mariadb.com:8089/view/test/job/run_test/)|Run system tests, Maxscale package should be in the repository|
|[build](http://max-tst-01.mariadb.com:8089/job/build/build)|Build Maxscale, create repository and publish it to [http://max-tst-01.mariadb.com/ci-repository/](http://max-tst-01.mariadb.com/ci-repository/)|
|[run_test_no_env_rebuild](http://max-tst-01.mariadb.com:8089/view/test/job/run_test_no_env_rebuild/)|Run system tests without creating a new set of VMs|
|[create_env](http://max-tst-01.mariadb.com:8089/view/env/job/create_env/)|Create VMs, install build environment to Maxscale machine, build Maxscale on Maxscale machine|
|[destroy](http://max-tst-01.mariadb.com:8089/view/axilary/job/destroy/)|Destroy VMs created by [run_test](http://max-tst-01.mariadb.com:8089/view/test/job/run_test/) or [create_env](http://max-tst-01.mariadb.com:8089/view/env/job/create_env/)|
|[remove_lock](http://max-tst-01.mariadb.com:8089/view/axilary/job/remove_lock/)|Remove Vagrant lock set by [run_test](http://max-tst-01.mariadb.com:8089/view/test/job/run_test/) or [create_env](http://max-tst-01.mariadb.com:8089/view/env/job/create_env/)|
Every test run should have unique name (parameter 'name'). This name is used as a name of MDBCI configuration.
If parameter 'do_not_destroy' is set to 'yes' virtual machines (VM) are not destroyed after tests execution and can be laters used
for debugging or new test runs (see [run_test_no_env_rebuild](http://max-tst-01.mariadb.com:8089/view/test/job/run_test_no_env_rebuild/))
VMs can be accessed from vagrant@max-tst-01.mariadb.com machine using 'mdbci ssh' or 'vagrant ssh' as well as direct ssh
access using environmental variables provided by
[set_env_vagrant.sh](https://github.com/mariadb-corporation/maxscale-system-test/blob/master/ENV_SETUP.md#access-vms)
script.
Parameter 'box' defines type of VM and Linux distribution to be used for tests.
Test results go to [CDash](http://jenkins.engskysql.com/CDash/index.php?project=MaxScale), logs and core dumps are
stored [here](http://max-tst-01.mariadb.com/LOGS/).
[create_env](http://max-tst-01.mariadb.com:8089/view/env/job/create_env/) job allows to create a set of VMs
(for backend and Maxscale) and does Maxscale build on the Maxscale VM. After execution this job Maxscale machine
contains Maxscale source and binaries. *NOTE:* to properly configure Maxscale init scripts it is necessary to
use rpm/dpkg tool to install Maxscale package (package can be found in the Maxscale build directory).
[run_test](http://max-tst-01.mariadb.com:8089/view/test/job/run_test/) and
[create_env](http://max-tst-01.mariadb.com:8089/view/env/job/create_env/)
jobs create Vagrant lock which prevents running two Vagrant instances in parallel (such parallel execution can
cause Vagrant of VM provider failures). In case of job crash or interruption by user Vagrant lock stays in locked state
and prevents any new VM creation. To remove lock job
[remove_lock](http://max-tst-01.mariadb.com:8089/view/axilary/job/remove_lock/)
should be used.
## Process examples
### Running regression test against a branch
Execute [build_and_test](http://max-tst-01.mariadb.com:8089/view/test/job/build_and_test/)
Recommendations regarding parameters:
* 'name' - unique name: it can be any text string, but as a good practice rule: 'name' should refer to branch,
Linux distribution, date/time of testing, MariaDB version
* 'box' - most recommended boxes are 'centos_7.0_libvirt' (QEMU box) and 'centos7' (Amazon Web Services box)
* 'source' - which type of source to use. BRANCH for git branch, TAG for a git tag and COMMIT for a commit ID.
* 'value' - name of the branch (if 'source' is BRANCH), name of the GIT tag (if 'source' is TAG) or commint ID (if 'source' is COMMIT)
### Build MaxScale
Execute [build](http://max-tst-01.mariadb.com:8089/job/build/build) job.
Parameter 'target' means a name of repository to put packages:
e.g. if 'target' is 'develop' packages are going to
[http://max-tst-01.mariadb.com/ci-repository/develop/](http://max-tst-01.mariadb.com/ci-repository/develop)
NOTE: building is executed only for selected distribution ('box' parameter). Be careful with other distributions: if build is not executed for same distribution old version can be in the repository (from some old builds). Later tests have to be executed against the same distribution otherwise they can be run against old version of MaxScale. It is recommended to use unique name for 'target'.
To debug failed build:
* set 'do_not_destroy_vm' parameter to 'yes'
* after the build:
<pre>
ssh -i vagrant.pem vagrant@max-tst-01.mariadb.com
cd ~/mdbci/build-&lt;box&gt;-&lt;date&gt;&lt;time&gt;
vagrant ssh
</pre>
For example:
<pre>
ssh -i vagrant.pem vagrant@max-tst-01.mariadb.com
cd ~/mdbci/build_centos6-20160119-0935
vagrant ssh
</pre>
### Create set of Master/Slave and Galera nodes and setup build environment for Maxscale on one more node
Execute [create_env](http://max-tst-01.mariadb.com:8089/view/env/job/create_env/) job.
Login to Maxscale machine (see [environment documentation](ENV_SETUP.md#access-vms)).
MaxScale source code, binaries and packages can be found in the ~/workspace/ directory.
All build tools are installed. GIT can be used to go trough source code.
It is not recommended to commit anything from virtual machine to GitHub.
Please use 'rpm' or 'dpkg' to properly install Maxscale package (/etc/init.d/maxscale script will not be
installed without execution of 'rpm' or 'dpkg')
### Running test agains exiting version of Maxscale
Execute [run_test](http://max-tst-01.mariadb.com:8089/view/test/job/run_test/) job.
Be sure Maxscale binary repository is present on the
[http://max-tst-01.mariadb.com/ci-repository/](http://max-tst-01.mariadb.com/ci-repository/)
server. Please check:
* there is a directory with the name equal to 'target' parameter
* there is sub-directory for selected distribution ('box' parameter)
e.g. if 'target' is 'develop' and distribution is CentOS7 (boxes 'centos7' or 'centos_7.0_libvirt') the directory [http://max-tst-01.mariadb.com/ci-repository/develop/mariadb-maxscale/centos/7/x86_64/](http://max-tst-01.mariadb.com/ci-repository/develop/mariadb-maxscale/centos/7/x86_64/) have to contain Maxscale RPM packages.
If parameter 'do_not_destroy' set to 'yes' after the test virtual machine will not be destroyed and
can be used for debugging. See [environment documentation](ENV_SETUP.md#access-vms) to get know how to access virtual machines.
### Maintenance operations
If test run was executed with parameter 'do_not_destroy' set yo 'yes' please do not forget to execute
[destroy](http://max-tst-01.mariadb.com:8089/view/axilary/job/destroy/) against your 'target'
This job also have to be executed if test run job crashed or it was interrupted.
In case of build or test job crash, interruption, Jenkins crash during Vagrant operation it is possible that Vagrant lock
stays in locked state and no other job can progress (job can be started, but it is waiting for Vagrant lock -
'/home/vagrant/vagrant_lock' can be seen in the job log). In this case lock can be removed by [remove_lock](http://max-tst-01.mariadb.com:8089/view/axilary/job/remove_lock/) job.

View File

@ -0,0 +1,340 @@
GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc., <http://fsf.org/>
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
License is intended to guarantee your freedom to share and change free
software--to make sure the software is free for all its users. This
General Public License applies to most of the Free Software
Foundation's software and to any other program whose authors commit to
using it. (Some other Free Software Foundation software is covered by
the GNU Lesser General Public License instead.) You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
this service if you wish), that you receive source code or can get it
if you want it, that you can change the software or use pieces of it
in new free programs; and that you know you can do these things.
To protect your rights, we need to make restrictions that forbid
anyone to deny you these rights or to ask you to surrender the rights.
These restrictions translate to certain responsibilities for you if you
distribute copies of the software, or if you modify it.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must give the recipients all the rights that
you have. You must make sure that they, too, receive or can get the
source code. And you must show them these terms so they know their
rights.
We protect your rights with two steps: (1) copyright the software, and
(2) offer you this license which gives you legal permission to copy,
distribute and/or modify the software.
Also, for each author's protection and ours, we want to make certain
that everyone understands that there is no warranty for this free
software. If the software is modified by someone else and passed on, we
want its recipients to know that what they have is not the original, so
that any problems introduced by others will not reflect on the original
authors' reputations.
Finally, any free program is threatened constantly by software
patents. We wish to avoid the danger that redistributors of a free
program will individually obtain patent licenses, in effect making the
program proprietary. To prevent this, we have made it clear that any
patent must be licensed for everyone's free use or not licensed at all.
The precise terms and conditions for copying, distribution and
modification follow.
GNU GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License applies to any program or other work which contains
a notice placed by the copyright holder saying it may be distributed
under the terms of this General Public License. The "Program", below,
refers to any such program or work, and a "work based on the Program"
means either the Program or any derivative work under copyright law:
that is to say, a work containing the Program or a portion of it,
either verbatim or with modifications and/or translated into another
language. (Hereinafter, translation is included without limitation in
the term "modification".) Each licensee is addressed as "you".
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running the Program is not restricted, and the output from the Program
is covered only if its contents constitute a work based on the
Program (independent of having been made by running the Program).
Whether that is true depends on what the Program does.
1. You may copy and distribute verbatim copies of the Program's
source code as you receive it, in any medium, provided that you
conspicuously and appropriately publish on each copy an appropriate
copyright notice and disclaimer of warranty; keep intact all the
notices that refer to this License and to the absence of any warranty;
and give any other recipients of the Program a copy of this License
along with the Program.
You may charge a fee for the physical act of transferring a copy, and
you may at your option offer warranty protection in exchange for a fee.
2. You may modify your copy or copies of the Program or any portion
of it, thus forming a work based on the Program, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) You must cause the modified files to carry prominent notices
stating that you changed the files and the date of any change.
b) You must cause any work that you distribute or publish, that in
whole or in part contains or is derived from the Program or any
part thereof, to be licensed as a whole at no charge to all third
parties under the terms of this License.
c) If the modified program normally reads commands interactively
when run, you must cause it, when started running for such
interactive use in the most ordinary way, to print or display an
announcement including an appropriate copyright notice and a
notice that there is no warranty (or else, saying that you provide
a warranty) and that users may redistribute the program under
these conditions, and telling the user how to view a copy of this
License. (Exception: if the Program itself is interactive but
does not normally print such an announcement, your work based on
the Program is not required to print an announcement.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Program,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Program, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Program.
In addition, mere aggregation of another work not based on the Program
with the Program (or with a work based on the Program) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
3. You may copy and distribute the Program (or a work based on it,
under Section 2) in object code or executable form under the terms of
Sections 1 and 2 above provided that you also do one of the following:
a) Accompany it with the complete corresponding machine-readable
source code, which must be distributed under the terms of Sections
1 and 2 above on a medium customarily used for software interchange; or,
b) Accompany it with a written offer, valid for at least three
years, to give any third party, for a charge no more than your
cost of physically performing source distribution, a complete
machine-readable copy of the corresponding source code, to be
distributed under the terms of Sections 1 and 2 above on a medium
customarily used for software interchange; or,
c) Accompany it with the information you received as to the offer
to distribute corresponding source code. (This alternative is
allowed only for noncommercial distribution and only if you
received the program in object code or executable form with such
an offer, in accord with Subsection b above.)
The source code for a work means the preferred form of the work for
making modifications to it. For an executable work, complete source
code means all the source code for all modules it contains, plus any
associated interface definition files, plus the scripts used to
control compilation and installation of the executable. However, as a
special exception, the source code distributed need not include
anything that is normally distributed (in either source or binary
form) with the major components (compiler, kernel, and so on) of the
operating system on which the executable runs, unless that component
itself accompanies the executable.
If distribution of executable or object code is made by offering
access to copy from a designated place, then offering equivalent
access to copy the source code from the same place counts as
distribution of the source code, even though third parties are not
compelled to copy the source along with the object code.
4. You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License. Any attempt
otherwise to copy, modify, sublicense or distribute the Program is
void, and will automatically terminate your rights under this License.
However, parties who have received copies, or rights, from you under
this License will not have their licenses terminated so long as such
parties remain in full compliance.
5. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Program or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Program (or any work based on the
Program), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Program or works based on it.
6. Each time you redistribute the Program (or any work based on the
Program), the recipient automatically receives a license from the
original licensor to copy, distribute or modify the Program subject to
these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties to
this License.
7. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Program at all. For example, if a patent
license would not permit royalty-free redistribution of the Program by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Program.
If any portion of this section is held invalid or unenforceable under
any particular circumstance, the balance of the section is intended to
apply and the section as a whole is intended to apply in other
circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system, which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
8. If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License
may add an explicit geographical distribution limitation excluding
those countries, so that distribution is permitted only in or among
countries not thus excluded. In such case, this License incorporates
the limitation as if written in the body of this License.
9. The Free Software Foundation may publish revised and/or new versions
of the General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the Program
specifies a version number of this License which applies to it and "any
later version", you have the option of following the terms and conditions
either of that version or of any later version published by the Free
Software Foundation. If the Program does not specify a version number of
this License, you may choose any version ever published by the Free Software
Foundation.
10. If you wish to incorporate parts of the Program into other free
programs whose distribution conditions are different, write to the author
to ask for permission. For software which is copyrighted by the Free
Software Foundation, write to the Free Software Foundation; we sometimes
make exceptions for this. Our decision will be guided by the two goals
of preserving the free status of all derivatives of our free software and
of promoting the sharing and reuse of software generally.
NO WARRANTY
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
REPAIR OR CORRECTION.
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
convey the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
{description}
Copyright (C) {year} {fullname}
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
Also add information on how to contact you by electronic and paper mail.
If the program is interactive, make it output a short notice like this
when it starts in an interactive mode:
Gnomovision version 69, Copyright (C) year name of author
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, the commands you use may
be called something other than `show w' and `show c'; they could even be
mouse-clicks or menu items--whatever suits your program.
You should also get your employer (if you work as a programmer) or your
school, if any, to sign a "copyright disclaimer" for the program, if
necessary. Here is a sample; alter the names:
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
`Gnomovision' (which makes passes at compilers) written by James Hacker.
{signature of Ty Coon}, 1 April 1989
Ty Coon, President of Vice
This General Public License does not permit incorporating your program into
proprietary programs. If your program is a subroutine library, you may
consider it more useful to permit linking proprietary applications with the
library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License.

View File

View File

@ -0,0 +1,67 @@
# maxscale-system-test
System level tests for MaxScale
## Basics
- every test is separate executable file
- backend for test:
- 1 machine for Maxscale
- >= 4 machines for Master/Slave
- >= 4 machines for Galera cluster
- environmental variables contains all information about backend: IPs, user names, passwords, paths to tools, etc
- backed can be created with help of [MDBCI tool](https://github.com/OSLL/mdbci)
- configuring of Master/Slave and Galera can be done with help of [build scripts package](https://github.com/mariadb-corporation/build-scripts-vagrant)
## Manuals
[How to run tests](https://github.com/mariadb-corporation/build-scripts-vagrant/blob/master/RUN_TEST.md)
[Hints: How to write a test](HOW_TO_WRITE_TEST.md)
[Build and test environment setup (if you want to play with MDBCI and Vagrant on your local machine](ENV_SETUP.md)
[Jenkins instructions](JENKINS.md)
## Environmental variables
|variable|meaning|
|--------|-------|
|node_N|Number of machines for Master/Slave|
|node_XXX_network|IP address of Master/Slave machine number XXX|
|node_XXX_private_ip|private IP address of Master/Slave machine XXX for AWS machines (for everything else - same as node_XXX|
|node_XXX_port|MariaDB port of Master/Slave machine XXX|
|node_XXX_whoami|user name to access Master/Slave machine XXX via ssh|
|node_XXX_access_sudo|'sudo ' if node_access_user_XXX does not have root rights, empty string if node_access_user_XXX has root rights|
|node_XXX_keyfile|full name of secret key to access Master/Slave machine XXX via ssh|
|node_XXX_start_db_command|bash command to start DB server on Master/Slave machine XXX|
|node_XXX_stop_db_command|bash command to stop DB server on Master/Slave machine XXX|
|node_user|DB user name to access Master/Slave nodes (have to have all priveligies with GRANT option)|
|node_password|password for node_user|
|galera_N|Number of machines for Galera|
|galera_XXX_network|IP address of Galera machine number XXX|
|galera_XXX_private|private IP address of Galera machine XXX for AWS machines (for everything else - same as node_XXX|
|galera_XXX_port|MariaDB port of Galera machine XXX|
|galera_XXX_whoami|user name to access Galera machine XXX via ssh|
|galera_XXX_access|'sudo ' if node_access_user_XXX does not have root rights, empty string if node_access_user_XXX has root rights|
|galera_XXX_keyfile|full name of secret key to access Galera machine XXX via ssh|
|galera_XXX_start_db_command|bash command to start DB server on Galera machine XXX|
|galera_XXX_stop_db_command|bash command to stop DB server on Galera machine XXX|
|galera_user|DB user name to access Galera nodes (have to have all priveligies with GRANT option)|
|galera_password|password for node_user|
|maxscale_cnf|full name of Maxscale configuration file (maxscale.cnf)|
|maxscale_log_dir|directory for Maxscale log files|
|maxscale_IP|IP address of Maxscale machine|
|maxscale_sshkey|full name of secret key to access Maxscale machine via ssh|
|maxscale_access_user|user name to access Maxscale machine via ssh|
|maxscale_access_sudo|'sudo ' if maxscale_access_user does not have root rights, empty string if maxscale_access_user has root rights|
|maxscale_user|DB user to access via Maxscale|
|maxscale_password|password for maxscale_user|
|maxscale_hostname|hostname of Maxscale machine|
|sysbench_dir|directory where Sysbanch is installed|
|ssl|'yes' if tests should try to use ssl to connect to Maxscale and to backends (obsolete, now should be 'yes' in all cases)|
|smoke|if 'yes' all tests are executed in 'quick' mode (less iterations, skip heavy operations)|
|backend_ssl|if 'yes' ssl config will be added to all servers definition in maxscale.cnf|
|use_snapshots|if TRUE every test is trying to revert snapshot before running the test|
|take_snapshot_command|revert_snapshot_command|
|revert_snapshot_command|Command line to revert a snapshot of all VMs|
|no_nodes_check|if yes backend checks are not executed (needed in case of RDS or similar backend)|
|no_backend_log_copy|if yes logs from backend nodes are not copied (needed in case of RDS or similar backend)|
|no_maxscale_start|Do not start Maxscale automatically|

View File

@ -0,0 +1,32 @@
# Results locations
| Location | Description |
|----------|-------------|
|[run_test](http://max-tst-01.mariadb.com:8089/view/test/job/run_test/) Jenkins job log|Vagrant and test application outputs|
|[CDash](jenkins.engskysql.com/CDash/index.php?project=MaxScale)|CTest reports|
|[http://max-tst-01.mariadb.com/LOGS/](http://max-tst-01.mariadb.com/LOGS/)|MaxScale logs and core dumps|
|/home/vagrant/LOGS|Same as [http://max-tst-01.mariadb.com/LOGS/](http://max-tst-01.mariadb.com/LOGS/)|
|Maxscale VM /var/log/maxscale|MaxScale log from latest test case|
|Maxscale VM /tpm/core*|Core dump from latest test case|
|Maxscale VM home directory|QLA filter files (if enabled in MaxScale test configuration|
|nodeN, galeraN VMs|MariaDB/MySQL logs (see MariaDB/MySQL documentation for details)|
For access to VMs see [environment documentation](ENV_SETUP.md#access-vms)
Jenkins job log consists of following parts:
* Vagrant output: VMs creation priocess, MariaDB Master/Slave and MariaDB Galera stuff installation, MaxScale installation
* [set_env_vagrant.sh](https://github.com/mariadb-corporation/build-scripts-vagrant/blob/master/test/set_env_vagrant.sh) output: retrieval of all VM parameters
* setup scripts output: MariaDB initialisation on backend nodes, DB users setup, enabling core dump on MaxScale VM
* test application output for all tests: eevry line starts from test case number and ':' (can be grepped)
* CTest final printing: N of M tests passed, CTest warnings, email sending logs
To check presence of core dumps:
<pre>
find /home/vagrant/LOGS/&lt;last_test_results_dir&gt; | grep core
</pre>
where 'last_test_results_dir' - automatically generated name of logs directory (based on date and time of test run)
To understand test case output please see test case description in Doxygen comments in every test case source file.
VMs are alive after the test run only if test run is done with 'do_not_destroy' parameter.

View File

@ -0,0 +1,14 @@
--style=allman
--indent=spaces=4
--indent-switches
--indent-labels
--min-conditional-indent=0
--pad-oper
--pad-header
--add-brackets
--convert-tabs
--max-code-length=110
--break-after-logical
--mode=c
--suffix=none
--max-instatement-indent=110

View File

@ -0,0 +1,157 @@
/**
* @file auroramon.cpp test of Aurora RDS monitor
* - create RDS cluster
* - find 'writer' node and uses 'maxadmin' to check that this node is "Master, Running"
* - do forced failover
* - find 'writer' again and repeat check
* - destroy RDS cluster
*/
#include "testconnections.h"
#include "execute_cmd.h"
#include "rds_vpc.h"
int set_endspoints(RDS * cluster)
{
json_t *endpoint;
long long int port;
const char * IP;
char p[64];
size_t i;
char cmd[1024];
json_t * endpoints = cluster->get_endpoints();
if (endpoints == NULL)
{
return -1;
}
json_array_foreach(endpoints, i, endpoint)
{
port = json_integer_value(json_object_get(endpoint, "Port"));
IP = json_string_value(json_object_get(endpoint, "Address"));
printf("host: %s \t port: %lld\n", IP, port);
sprintf(cmd, "node_%03d_network", (int) i);
setenv(cmd, IP, 1);
sprintf(cmd, "node_%03d_port", (int) i);
sprintf(p, "%lld", port);
setenv(cmd, p, 1);
}
setenv("node_password", "skysqlrds", 1);
setenv("maxscale_user", "skysql", 1);
setenv("maxscale_password", "skysqlrds", 1);
setenv("no_nodes_check", "yes", 1);
setenv("no_backend_log_copy", "yes", 1);
return 0;
}
void compare_masters(TestConnections* Test, RDS * cluster)
{
const char * aurora_master;
cluster->get_writer(&aurora_master);
Test->tprintf("Aurora writer node: %s\n", aurora_master);
char maxadmin_status[1024];
int i;
char cmd[1024];
for (i = 0; i < Test->repl->N; i++)
{
sprintf(cmd, "show server server%d", i + 1);
Test->get_maxadmin_param(cmd, (char *) "Status:", &maxadmin_status[0]);
Test->tprintf("Server%d status %s\n", i + 1, maxadmin_status);
sprintf(cmd, "node%03d", i);
if (strcmp(aurora_master, cmd) == 0)
{
if (strcmp(maxadmin_status, "Master, Running"))
{
Test->tprintf("Maxadmin reports node%03d is a Master as expected", i);
}
else
{
Test->add_result(1, "Server node%03d status is not 'Master, Running'', it is '%s'", i, maxadmin_status);
}
}
else
{
if (strcmp(maxadmin_status, "Slave, Running"))
{
Test->tprintf("Maxadmin reports node%03d is a Slave as expected", i);
}
else
{
Test->add_result(1, "Server node%03d status is not 'Slave, Running'', it is '%s'", i, maxadmin_status);
}
}
}
}
int main(int argc, char *argv[])
{
RDS * cluster = new RDS((char *) "auroratest");
if (cluster->create_rds_db(4) != 0)
{
printf("Error RDS creation\n");
return 1;
}
cluster->wait_for_nodes(4);
if (set_endspoints(cluster) != 0)
{
printf("Error getting RDS endpoints\n");
return 1;
}
TestConnections * Test = new TestConnections(argc, argv);
Test->set_timeout(30);
compare_masters(Test, cluster);
Test->set_timeout(30);
Test->tprintf("Executing a query through readwritesplit before failover");
Test->connect_rwsplit();
Test->try_query(Test->conn_rwsplit, "show processlist");
char server_id[1024];
Test->tprintf("Get aurora_server_id\n");
find_field(Test->conn_rwsplit, "select @@aurora_server_id;", "server_id", &server_id[0]);
Test->close_rwsplit();
Test->tprintf("server_id before failover: %s\n", server_id);
Test->stop_timeout();
Test->tprintf("Performing cluster failover\n");
Test->add_result(cluster->do_failover(), "Failover failed\n");
Test->tprintf("Failover done\n");
// Do the failover here and wait until it is over
//sleep(10);
Test->set_timeout(30);
Test->tprintf("Executing a query through readwritesplit after failover");
Test->connect_rwsplit();
Test->try_query(Test->conn_rwsplit, "show processlist");
Test->tprintf("Get aurora_server_id\n");
find_field(Test->conn_rwsplit, "select @@aurora_server_id;", "server_id", &server_id[0]);
Test->close_rwsplit();
Test->tprintf("server_id after failover: %s\n", server_id);
compare_masters(Test, cluster);
//Test->check_maxscale_alive();
Test->stop_timeout();
cluster->delete_rds_cluster();
int rval = Test->global_result;
delete Test;
return rval;
}

View File

@ -0,0 +1,107 @@
/**
* @file avro.cpp test of avro
* - setup binlog and avro
* - put some data to t1
* - check avro file with "maxavrocheck -vv /var/lib/maxscale/avro/test.t1.000001.avro"
* - check that data in avro file is correct
*/
#include <iostream>
#include "testconnections.h"
#include "maxadmin_operations.h"
#include "sql_t1.h"
#include "test_binlog_fnc.h"
#include <jansson.h>
#include "maxinfo_func.h"
#include <sstream>
#include <iostream>
using std::cout;
using std::endl;
int main(int argc, char *argv[])
{
TestConnections * Test = new TestConnections(argc, argv);
Test->set_timeout(600);
Test->stop_maxscale();
Test->ssh_maxscale(true, (char *) "rm -rf /var/lib/maxscale/avro");
Test->repl->connect();
execute_query(Test->repl->nodes[0], "DROP TABLE IF EXISTS t1");
Test->repl->close_connections();
sleep(5);
Test->start_binlog();
Test->set_timeout(120);
Test->stop_maxscale();
Test->ssh_maxscale(true, "rm -rf /var/lib/maxscale/avro");
Test->set_timeout(120);
Test->start_maxscale();
Test->set_timeout(60);
Test->repl->connect();
create_t1(Test->repl->nodes[0]);
insert_into_t1(Test->repl->nodes[0], 3);
execute_query(Test->repl->nodes[0], "FLUSH LOGS");
Test->repl->close_connections();
Test->set_timeout(120);
sleep(10);
char * avro_check = Test->ssh_maxscale_output(true,
"maxavrocheck -vv /var/lib/maxscale/avro/test.t1.000001.avro | grep \"{\"");
char * output = Test->ssh_maxscale_output(true, "maxavrocheck -d /var/lib/maxscale/avro/test.t1.000001.avro");
std::istringstream iss;
iss.str(output);
int x1_exp = 0;
int fl_exp = 0;
int x = 16;
for (std::string line; std::getline(iss, line);)
{
long long int x1, fl;
Test->set_timeout(20);
get_x_fl_from_json((char*)line.c_str(), &x1, &fl);
if (x1 != x1_exp || fl != fl_exp)
{
Test->add_result(1, "Output:x1 %lld, fl %lld, Expected: x1 %d, fl %d",
x1, fl, x1_exp, fl_exp);
break;
}
if ((++x1_exp) >= x)
{
x1_exp = 0;
x = x * 16;
fl_exp++;
Test->tprintf("fl = %d", fl_exp);
}
}
if (fl_exp != 3)
{
Test->add_result(1, "not enough lines in avrocheck output\n");
}
Test->set_timeout(120);
int rval = Test->global_result;
delete Test;
return rval;
}

View File

@ -0,0 +1,60 @@
/**
* @file avro_long.cpp test of avro
* - setup binlog and avro
* - put some data to t1 in the loop
*/
#include <iostream>
#include "testconnections.h"
#include "maxadmin_operations.h"
#include "sql_t1.h"
#include "test_binlog_fnc.h"
int main(int argc, char *argv[])
{
TestConnections * Test = new TestConnections(argc, argv);
Test->set_timeout(600);
Test->stop_maxscale();
Test->ssh_maxscale(true, (char *) "rm -rf /var/lib/maxscale/avro");
//Test->ssh_maxscale(true, (char *) "mkdir /var/lib/maxscale/avro; chown -R maxscale:maxscale /var/lib/maxscale/avro");
Test->repl->connect();
execute_query(Test->repl->nodes[0], (char *) "DROP TABLE IF EXISTS t1;");
Test->repl->close_connections();
sleep(5);
Test->start_binlog();
Test->set_timeout(120);
Test->stop_maxscale();
Test->ssh_maxscale(true, (char *) "rm -rf /var/lib/maxscale/avro");
Test->set_timeout(120);
Test->start_maxscale();
Test->set_timeout(60);
Test->repl->connect();
create_t1(Test->repl->nodes[0]);
for (int i = 0; i < 1000000; i++)
{
Test->set_timeout(60);
insert_into_t1(Test->repl->nodes[0], 3);
Test->tprintf("i=%d\n", i);
}
Test->repl->close_connections();
int rval = Test->global_result;
delete Test;
return rval;
}

View File

@ -0,0 +1,40 @@
/**
* @backend_auth_fail.cpp Repeatedly connect to maxscale while the backends reject all connections
*
* MaxScale should not crash
*/
#include "testconnections.h"
int main(int argc, char** argv)
{
MYSQL *mysql[1000];
TestConnections * Test = new TestConnections(argc, argv);
Test->stop_timeout();
Test->repl->execute_query_all_nodes((char *) "set global max_connections = 10;");
for (int x = 0; x < 3; x++)
{
Test->tprintf("Creating 100 connections...\n");
for (int i = 0; i < 100; i++)
{
Test->set_timeout(30);
mysql[i] = Test->open_readconn_master_connection();
execute_query_silent(mysql[i], "select 1");
}
Test->stop_timeout();
for (int i = 0; i < 100; i++)
{
Test->set_timeout(30);
mysql_close(mysql[i]);
}
}
Test->stop_timeout();
Test->check_maxscale_alive();
int rval = Test->global_result;
delete Test;
return rval;
}

View File

@ -0,0 +1,27 @@
/**
* @file bad_pres.cpp check that Maxscale prints warning if persistpoolmax=-1 for all backends (bug MXS-576)
*
* - Maxscale.cnf contains persistpoolmax=-1 for all servers
* - check log warning about it
*/
#include <iostream>
#include <unistd.h>
#include "testconnections.h"
using namespace std;
int main(int argc, char *argv[])
{
TestConnections * Test = new TestConnections(argc, argv);
Test->set_timeout(10);
Test->connect_maxscale();
Test->check_log_err((char *) "warning -1", true);
Test->check_maxscale_alive();
int rval = Test->global_result;
delete Test;
return rval;
}

View File

@ -0,0 +1,228 @@
#include "big_load.h"
#include <pthread.h>
void load(long int *new_inserts, long int *new_selects, long int *selects, long int *inserts, int threads_num,
TestConnections * Test, long int *i1, long int *i2, int rwsplit_only, bool galera, bool report_errors)
{
char sql[1000000];
thread_data data;
Mariadb_nodes * nodes;
if (galera)
{
nodes = Test->galera;
}
else
{
nodes = Test->repl;
}
int sql_l = 20000;
int run_time = 100;
if (Test->smoke)
{
sql_l = 500;
run_time = 10;
}
nodes->connect();
Test->connect_rwsplit();
data.i1 = 0;
data.i2 = 0;
data.exit_flag = 0;
data.Test = Test;
data.rwsplit_only = rwsplit_only;
// connect to the MaxScale server (rwsplit)
if (Test->conn_rwsplit == NULL )
{
if (report_errors)
{
Test->add_result(1, "Can't connect to MaxScale\n");
}
//Test->copy_all_logs();
exit(1);
}
else
{
create_t1(Test->conn_rwsplit);
create_insert_string(sql, sql_l, 1);
if ((execute_query(Test->conn_rwsplit, sql) != 0) && (report_errors))
{
Test->add_result(1, "Query %s failed\n", sql);
}
// close connections
Test->close_rwsplit();
Test->tprintf("Waiting for the table to replicate\n");
Test->repl->sync_slaves();
pthread_t thread1[threads_num];
pthread_t thread2[threads_num];
int iret1[threads_num];
int iret2[threads_num];
Test->tprintf("COM_INSERT and COM_SELECT before executing test\n");
Test->add_result(get_global_status_allnodes(&selects[0], &inserts[0], nodes, 0),
"get_global_status_allnodes failed\n");
data.exit_flag = 0;
/* Create independent threads each of them will execute function */
for (int i = 0; i < threads_num; i++)
{
iret1[i] = pthread_create(&thread1[i], NULL, query_thread1, &data);
iret2[i] = pthread_create(&thread2[i], NULL, query_thread2, &data);
}
Test->tprintf("Threads are running %d seconds \n", run_time);
sleep(run_time);
data.exit_flag = 1;
Test->tprintf("Waiting for all threads to exit\n");
Test->set_timeout(100);
for (int i = 0; i < threads_num; i++)
{
pthread_join(thread1[i], NULL);
pthread_join(thread2[i], NULL);
}
sleep(1);
Test->tprintf("COM_INSERT and COM_SELECT after executing test\n");
get_global_status_allnodes(&new_selects[0], &new_inserts[0], nodes, 0);
print_delta(&new_selects[0], &new_inserts[0], &selects[0], &inserts[0], nodes->N);
Test->tprintf("First group of threads did %d queries, second - %d \n", data.i1, data.i2);
}
nodes->close_connections();
*i1 = data.i1;
*i2 = data.i2;
}
void *query_thread1( void *ptr )
{
MYSQL * conn1;
MYSQL * conn2;
MYSQL * conn3;
int conn_err = 0;
thread_data * data = (thread_data *) ptr;
conn1 = open_conn_db_timeout(data->Test->rwsplit_port,
data->Test->maxscale_IP,
(char *) "test",
data->Test->maxscale_user,
data->Test->maxscale_password,
20,
data->Test->ssl);
//conn1 = data->Test->open_rwsplit_connection();
if (mysql_errno(conn1) != 0)
{
conn_err++;
}
if (data->rwsplit_only == 0)
{
//conn2 = data->Test->open_readconn_master_connection();
conn2 = open_conn_db_timeout(data->Test->readconn_master_port,
data->Test->maxscale_IP,
(char *) "test",
data->Test->maxscale_user,
data->Test->maxscale_password,
20,
data->Test->ssl);
if (mysql_errno(conn2) != 0)
{
conn_err++;
}
//conn3 = data->Test->open_readconn_slave_connection();
conn3 = open_conn_db_timeout(data->Test->readconn_slave_port,
data->Test->maxscale_IP,
(char *) "test",
data->Test->maxscale_user,
data->Test->maxscale_password,
20,
data->Test->ssl);
if (mysql_errno(conn3) != 0)
{
conn_err++;
}
}
if (conn_err == 0)
{
while (data->exit_flag == 0)
{
if (execute_query_silent(conn1, (char *) "SELECT * FROM t1;") == 0)
{
__sync_fetch_and_add(&data->i1, 1);
}
if (data->rwsplit_only == 0)
{
execute_query_silent(conn2, (char *) "SELECT * FROM t1;");
execute_query_silent(conn3, (char *) "SELECT * FROM t1;");
}
}
mysql_close(conn1);
if (data->rwsplit_only == 0)
{
mysql_close(conn2);
mysql_close(conn3);
}
}
return NULL;
}
void *query_thread2(void *ptr )
{
MYSQL * conn1;
MYSQL * conn2;
MYSQL * conn3;
thread_data * data = (thread_data *) ptr;
//conn1 = data->Test->open_rwsplit_connection();
conn1 = open_conn_db_timeout(data->Test->rwsplit_port,
data->Test->maxscale_IP,
(char *) "test",
data->Test->maxscale_user,
data->Test->maxscale_password,
20,
data->Test->ssl);
if (data->rwsplit_only == 0)
{
//conn2 = data->Test->open_readconn_master_connection();
//conn3 = data->Test->open_readconn_slave_connection();
conn2 = open_conn_db_timeout(data->Test->readconn_master_port,
data->Test->maxscale_IP,
(char *) "test",
data->Test->maxscale_user,
data->Test->maxscale_password,
20,
data->Test->ssl);
//if (mysql_errno(conn2) != 0) { conn_err++; }
conn3 = open_conn_db_timeout(data->Test->readconn_slave_port,
data->Test->maxscale_IP,
(char *) "test",
data->Test->maxscale_user,
data->Test->maxscale_password,
20,
data->Test->ssl);
//if (mysql_errno(conn3) != 0) { conn_err++; }
}
while (data->exit_flag == 0)
{
sleep(1);
if (execute_query_silent(conn1, (char *) "SELECT * FROM t1;") == 0)
{
__sync_fetch_and_add(&data->i2, 1);
}
if (data->rwsplit_only == 0)
{
execute_query_silent(conn2, (char *) "SELECT * FROM t1;");
execute_query_silent(conn3, (char *) "SELECT * FROM t1;");
}
}
mysql_close(conn1);
if (data->rwsplit_only == 0)
{
mysql_close(conn2);
mysql_close(conn3);
}
return NULL;
}

View File

@ -0,0 +1,39 @@
#ifndef BIG_LOAD_H
#define BIG_LOAD_H
#include "testconnections.h"
#include "sql_t1.h"
#include "get_com_select_insert.h"
//pthread_mutex_t mutex1 = PTHREAD_MUTEX_INITIALIZER;
typedef struct
{
int exit_flag;
long i1;
long i2;
int rwsplit_only;
TestConnections * Test;
} thread_data;
void *query_thread1(void *ptr );
void *query_thread2(void *ptr );
/**
* @brief load Creates load on Maxscale routers
* @param new_inserts COM_INSERT variable values array for all nodes after test
* @param new_selects COM_SELECT variable values array for all nodes after test
* @param selects COM_SELECT variable values array for all nodes before test
* @param inserts COM_INSERT variable values array for all nodes before test
* @param threads_num Number of load threads
* @param Test TestConnections object
* @param i1 Number of queries executed by "fast" threads (no wating between queries)
* @param i2 Number of queries executed by "slow" threads (sleep 1 second between queries)
* @param rwsplit_only if 1 create load only on RWSplit router, do not load ReadConn router
* @param galera if true use Galera backend (Test->galera instead of Test->repl)
* @param report_errors if true call add_result() in case of query failure
*/
void load(long *new_inserts, long *new_selects, long *selects, long *inserts, int threads_num,
TestConnections *Test, long *i1, long *i2, int rwsplit_only, bool galera, bool report_errors);
#endif // BIG_LOAD_H

View File

@ -0,0 +1,23 @@
#include "big_transaction.h"
int big_transaction(MYSQL * conn, int N)
{
int local_result = 0;
char sql[1000000];
local_result += create_t1(conn);
local_result += execute_query(conn, (char *) "START TRANSACTION");
local_result += execute_query(conn, (char *) "SET autocommit = 0");
for (int i = 0; i < N; i++)
{
create_insert_string(sql, 10000, i);
local_result += execute_query(conn, sql);
local_result += execute_query(conn, "CREATE TABLE t2(id int);");
local_result += execute_query(conn, sql);
local_result += execute_query(conn, "DROP TABLE t2;");
local_result += execute_query(conn, sql);
}
local_result += execute_query(conn, (char *) "COMMIT");
return local_result;
}

View File

@ -0,0 +1,17 @@
#ifndef BIG_TRANSACTION_H
#define BIG_TRANSACTION_H
#include <mariadb/mysql.h>
#include <stdio.h>
#include <stdlib.h>
#include "sql_t1.h"
/**
* @brief big_transaction Executes big transaction (includes N INSERTs of 10000 rows)
* @param conn MYSQL connection handler
* @param N Number of INSERTs
* @return 0 if success
*/
int big_transaction(MYSQL * conn, int N);
#endif // BIG_TRANSACTION_H

View File

@ -0,0 +1,72 @@
/**
* @file binlog_big_transaction.cpp test of simple binlog router setup and execute a number of big transactions
*/
#include <iostream>
#include "testconnections.h"
#include "maxadmin_operations.h"
#include "sql_t1.h"
#include "test_binlog_fnc.h"
#include "big_transaction.h"
void *disconnect_thread( void *ptr );
TestConnections * Test ;
int exit_flag;
int main(int argc, char *argv[])
{
Test = new TestConnections(argc, argv);
Test->set_timeout(3000);
Test->set_log_copy_interval(300);
Test->repl->connect();
execute_query(Test->repl->nodes[0], (char *) "DROP TABLE IF EXISTS t1;");
Test->repl->close_connections();
sleep(5);
Test->start_binlog();
pthread_t threads;
int iret;
exit_flag = 0;
iret = pthread_create( &threads, NULL, disconnect_thread, NULL);
Test->repl->connect();
for (int i = 0; i < 100000; i++)
{
Test->set_timeout(3000);
Test->tprintf("Trying transactions: %d\n", i);
Test->add_result(big_transaction(Test->repl->nodes[0], 7), "Transaction %d failed!\n", i);
}
Test->repl->close_connections();
int rval = Test->global_result;
delete Test;
return rval;
}
void *disconnect_thread( void *ptr )
{
MYSQL * conn;
char cmd[256];
int i;
conn = open_conn(Test->binlog_port, Test->maxscale_IP, Test->repl->user_name, Test->repl->password,
Test->repl->ssl);
Test->add_result(mysql_errno(conn), "Error connecting to Binlog router, error: %s\n", mysql_error(conn));
i = 3;
while (exit_flag == 0)
{
sprintf(cmd, "DISCONNECT SERVER %d", i);
execute_query(conn, cmd);
i++;
if (i > Test->repl->N)
{
i = 3;
sleep(30);
execute_query(conn, (char *) "DISCONNECT SERVER ALL");
}
sleep(5);
}
return NULL;
}

View File

@ -0,0 +1,322 @@
/**
* @file binlog_change_master.cpp In the binlog router setup stop Master and promote one of the Slaves to be new Master
* - setup binlog
* - start thread wich executes transactions
* - block master
* - transaction thread tries to elect a new master a continue with new master
* - continue transaction with new master
* - stop transactions
* - wait
* - chack data on all nodes
*/
#include <iostream>
#include "testconnections.h"
#include "maxadmin_operations.h"
#include "sql_t1.h"
#include "test_binlog_fnc.h"
#include "big_transaction.h"
void *disconnect_thread( void *ptr );
void *transaction_thread( void *ptr );
TestConnections * Test ;
int exit_flag;
int master = 0;
int i_trans = 0;
int failed_transaction_num = 0;
/** The amount of rows each transaction inserts */
const int N_INSERTS = 100;
int transaction(MYSQL * conn, int N)
{
int local_result = 0;
char sql[1000000];
Test->tprintf("START TRANSACTION\n");
local_result += execute_query(conn, (char *) "START TRANSACTION");
if (local_result != 0)
{
Test->tprintf("START TRANSACTION Failed\n");
return local_result;
}
Test->tprintf("SET autocommit = 0\n");
local_result += execute_query(conn, (char *) "SET autocommit = 0");
if (local_result != 0)
{
Test->tprintf("SET Failed\n");
return local_result;
}
create_insert_string(sql, N_INSERTS, N);
Test->tprintf("INSERT\n");
local_result += execute_query(conn, sql);
if (local_result != 0)
{
Test->tprintf("Insert Failed\n");
return local_result;
}
Test->tprintf("COMMIT\n");
local_result += execute_query(conn, (char *) "COMMIT");
if (local_result != 0)
{
Test->tprintf("Commit Failed\n");
return local_result;
}
return local_result;
}
int main(int argc, char *argv[])
{
int j;
Test = new TestConnections(argc, argv);
Test->set_timeout(3000);
Test->repl->connect();
execute_query(Test->repl->nodes[0], (char *) "DROP TABLE IF EXISTS t1;");
Test->repl->close_connections();
sleep(5);
Test->repl->connect();
Test->repl->execute_query_all_nodes((char *) "STOP SLAVE");
Test->repl->execute_query_all_nodes((char *) "RESET SLAVE ALL");
Test->repl->execute_query_all_nodes((char *) "RESET MASTER");
Test->tprintf("Starting binlog configuration\n");
Test->start_binlog();
pthread_t disconnec_thread_t;
int disconnect_iret;
pthread_t transaction_thread_t;
int transaction_iret;
exit_flag = 0;
Test->tprintf("Starting query thread\n");
transaction_iret = pthread_create(&transaction_thread_t, NULL, transaction_thread, NULL);
Test->tprintf("Sleeping\n");
Test->stop_timeout();
Test->repl->connect();
int flushes = Test->smoke ? 2 : 5;
for (j = 0; j < flushes; j++)
{
Test->tprintf("Flush logs on master\n");
execute_query(Test->repl->nodes[0], (char *) "flush logs");
sleep(15);
}
sleep(15);
Test->tprintf("Blocking master\n");
Test->repl->block_node(0);
Test->stop_timeout();
sleep(30);
Test->tprintf("Done! Waiting for thread\n");
exit_flag = 1;
pthread_join(transaction_thread_t, NULL );
Test->tprintf("Done!\n");
Test->tprintf("Checking data on the node3 (slave)\n");
char sql[256];
char rep[256];
int rep_d;
Test->tprintf("Sleeping to let replication happen\n");
sleep(30);
Test->repl->connect();
for (int i_n = 3; i_n < Test->repl->N; i_n++)
{
for (j = 0; j < i_trans; j++)
{
sprintf(sql, "select count(*) from t1 where fl=%d;", j);
find_field(Test->repl->nodes[i_n], sql, (char *) "count(*)", rep);
Test->tprintf("Transaction %d put %s rows\n", j, rep);
sscanf(rep, "%d", &rep_d);
if ((rep_d != N_INSERTS) && (j != (failed_transaction_num - 1)))
{
Test->add_result(1, "Transaction %d did not put data into slave\n", j);
}
if ((j == (failed_transaction_num - 1)) && (rep_d != 0) && (rep_d != N_INSERTS))
{
Test->add_result(1, "Incomplete transaction detected - %d\n", j);
}
if ((j == (failed_transaction_num - 1) && rep_d == 0))
{
Test->tprintf("Transaction %d was rejected, OK\n", j);
}
}
}
Test->repl->close_connections();
int rval = Test->global_result;
delete Test;
return rval;
}
const char * setup_slave1 =
"change master to MASTER_HOST='%s',\
MASTER_USER='repl',\
MASTER_PASSWORD='repl',\
MASTER_LOG_FILE='%s',\
MASTER_LOG_POS=%s,\
MASTER_PORT=%d";
int select_new_master(TestConnections * test)
{
char log_file[256];
char log_file_new[256];
char log_pos[256];
char maxscale_log_file[256];
char maxscale_log_file_new[256];
char maxscale_log_pos[256];
// Stopping slave
test->tprintf("Connection to backend\n");
test->repl->connect();
test->tprintf("'stop slave' to node2\n");
test->try_query(Test->repl->nodes[2], (char *) "stop slave;");
test->tprintf("'reset slave all' to node2\n");
test->try_query(Test->repl->nodes[2], (char *) "RESET slave all;");
//execute_query(Test->repl->nodes[2], (char *) "reset master;");
// Get master status
test->tprintf("show master status\n");
find_field(test->repl->nodes[2], (char *) "show master status", (char *) "File", &log_file[0]);
find_field(test->repl->nodes[2], (char *) "show master status", (char *) "Position", &log_pos[0]);
test->tprintf("Real master file: %s\n", log_file);
test->tprintf("Real master pos : %s\n", log_pos);
test->tprintf("Connecting to MaxScale binlog router (with any DB)\n");
MYSQL * binlog = open_conn_no_db(test->binlog_port, test->maxscale_IP, test->repl->user_name,
test->repl->password, test->ssl);
test->add_result(mysql_errno(binlog), "Error connection to binlog router %s\n", mysql_error(binlog));
test->tprintf("show master status on maxscale\n");
find_field(binlog, (char *) "show master status", (char *) "File", &maxscale_log_file[0]);
find_field(binlog, (char *) "show master status", (char *) "Position", &maxscale_log_pos[0]);
if (!maxscale_log_file[0] || !maxscale_log_pos[0])
{
test->add_result(1, "Failed to query for master status");
return 1;
}
test->tprintf("Real master file: %s\n", maxscale_log_file);
test->tprintf("Real master pos : %s\n", maxscale_log_pos);
char * p = strchr(maxscale_log_file, '.') + 1;
test->tprintf("log file num %s\n", p);
int pd;
sscanf(p, "%d", &pd);
test->tprintf("log file num (d) %d\n", pd);
p[0] = '\0';
test->tprintf("log file name %s\n", maxscale_log_file);
sprintf(maxscale_log_file_new, "%s%06d", maxscale_log_file, pd + 1);
test->try_query(test->repl->nodes[2], (char *) "reset master");
test->tprintf("Flush logs %d times\n", pd + 1);
for (int k = 0; k < pd + 1; k++)
{
test->try_query(test->repl->nodes[2], (char *) "flush logs");
}
// Set Maxscale to new master
test->try_query(binlog, "stop slave");
test->tprintf("configuring Maxscale binlog router\n");
test->tprintf("reconnect to binlog\n");
mysql_close(binlog);
binlog = open_conn_no_db(test->binlog_port, test->maxscale_IP, test->repl->user_name, test->repl->password,
test->ssl);
test->add_result(mysql_errno(binlog), "Error connection to binlog router %s\n", mysql_error(binlog));
char str[1024];
//sprintf(str, setup_slave1, test->repl->IP[2], log_file_new, test->repl->port[2]);
sprintf(str, setup_slave1, test->repl->IP[2], maxscale_log_file_new, "4", test->repl->port[2]);
test->tprintf("change master query: %s\n", str);
test->try_query(binlog, str);
test->try_query(binlog, "start slave");
test->repl->close_connections();
}
void *disconnect_thread( void *ptr )
{
MYSQL * conn;
char cmd[256];
int i;
conn = open_conn(Test->binlog_port, Test->maxscale_IP, Test->repl->user_name, Test->repl->password,
Test->repl->ssl);
Test->add_result(mysql_errno(conn), "Error connecting to Binlog router, error: %s\n", mysql_error(conn));
i = 3;
while (exit_flag == 0)
{
sprintf(cmd, "DISCONNECT SERVER %d", i);
execute_query(conn, cmd);
i++;
if (i > Test->repl->N)
{
i = 3;
sleep(30);
execute_query(conn, (char *) "DISCONNECT SERVER ALL");
}
sleep(5);
}
return NULL;
}
void *transaction_thread( void *ptr )
{
MYSQL * conn;
int trans_result = 0;
conn = open_conn_db_timeout(Test->repl->port[master], Test->repl->IP[master], (char *) "test",
Test->repl->user_name, Test->repl->password, 20, Test->repl->ssl);
Test->add_result(mysql_errno(conn), "Error connecting to Binlog router, error: %s\n", mysql_error(conn));
create_t1(conn);
while ((exit_flag == 0))
{
Test->tprintf("Transaction %d\n", i_trans);
trans_result = transaction(conn, i_trans);
if (trans_result != 0)
{
Test->tprintf("Transaction %d failed, doing master failover\n", i_trans);
failed_transaction_num = i_trans;
Test->tprintf("Closing connection\n");
mysql_close(conn);
Test->tprintf("Calling select_new_master()\n");
select_new_master(Test);
master = 2;
conn = open_conn_db_timeout(Test->repl->port[master], Test->repl->IP[master], (char *) "test",
Test->repl->user_name, Test->repl->password, 20, Test->repl->ssl);
Test->add_result(mysql_errno(conn), "Error connecting to Binlog router, error: %s\n", mysql_error(conn));
Test->tprintf("Retrying transaction %d\n", i_trans);
i_trans--;
}
i_trans++;
}
i_trans--;
return NULL;
}

View File

@ -0,0 +1,12 @@
[mysqld]
plugin-load-add=file_key_management.so
file_key_management_encryption_algorithm=aes_cbc
file_key_management_filename = /etc/mariadb_binlog_keys.txt
encrypt-binlog=1
# Enable checksum
binlog_checksum=CRC32
#Enable large packets handling
max_allowed_packet=1042M
innodb_log_file_size=142M

View File

@ -0,0 +1,12 @@
[mysqld]
plugin-load-add=file_key_management.so
file_key_management_encryption_algorithm=aes_ctr
file_key_management_filename = /etc/mariadb_binlog_keys.txt
encrypt-binlog=1
# Enable checksum
binlog_checksum=CRC32
#Enable large packets handling
max_allowed_packet=1042M
innodb_log_file_size=142M

View File

@ -0,0 +1,16 @@
/**
* @file binlog_failover.cpp binlog_failover Test of failover scenarion for binlog router
*
* - setup following configuration:
* - one master
* - two maxscale binlog machines
* - two slaves connected to each maxscale binlog mashine
* - put some date via master
* - block master
* - stop all Maxscale machines with STOP SLAVE command
* - check which Maxscale machine contains most recent data (let's call this machine 'most_recent_maxscale')
* - use CHANGE MASETER on the second Maxscale machine to point it to the Maxscale machine from the previous step ('most_recent_maxscale')
* - wait until second Maxscale is in sync with 'most_recent_maxscale' (use SHOW MASTER STATUS)
* - select new master (HOW??)
* - set all Maxscale machines to be a slaves of new master
*/

View File

@ -0,0 +1,26 @@
/**
* @file setup_incompl trying to start binlog setup with incomplete Maxscale.cnf
* check for crash
*/
#include <iostream>
#include "testconnections.h"
#include "maxadmin_operations.h"
#include "sql_t1.h"
int main(int argc, char *argv[])
{
TestConnections * Test = new TestConnections(argc, argv);
Test->set_timeout(60);
Test->connect_maxscale();
Test->close_maxscale_connections();
sleep(10);
Test->check_log_err("fatal signal 11", false);
int rval = Test->global_result;
delete Test;
return rval;
}

View File

@ -0,0 +1,91 @@
/**
* @file binlog_semisync.cpp Same test as setup_binlog, but with semisync enabled
*/
#include <iostream>
#include "testconnections.h"
#include "maxadmin_operations.h"
#include "sql_t1.h"
#include "test_binlog_fnc.h"
int main(int argc, char *argv[])
{
TestConnections * Test = new TestConnections(argc, argv);
Test->tprintf("Test object initialized\n");
Test->set_timeout(3000);
int options_set = 3;
if (Test->smoke)
{
options_set = 1;
}
Test->tprintf("Trying to connect to backend\n");
if (Test->repl->connect() == 0)
{
Test->tprintf("DROP TABLE t1\n");
execute_query(Test->repl->nodes[0], (char *) "DROP TABLE IF EXISTS t1;");
//Test->tprintf("SET GLOBAL rpl_semi_sync_master_enabled = 1;\n");
//execute_query(Test->repl->nodes[0], (char *) "SET GLOBAL rpl_semi_sync_master_enabled = 1;");
Test->repl->close_connections();
sleep(5);
for (int option = 0; option < options_set; option++)
{
Test->binlog_cmd_option = option;
Test->start_binlog();
Test->repl->connect();
Test->tprintf("install semisync plugin\n");
execute_query(Test->repl->nodes[0],
(char *) "INSTALL PLUGIN rpl_semi_sync_master SONAME 'semisync_master.so';");
//sleep(10);
Test->tprintf("Reconnect\n");
Test->repl->close_connections();
Test->repl->connect();
Test->tprintf("SET GLOBAL rpl_semi_sync_master_enabled = 1;\n");
execute_query(Test->repl->nodes[0], (char *) "SET GLOBAL rpl_semi_sync_master_enabled = 1;");
//sleep(10);
Test->repl->close_connections();
test_binlog(Test);
Test->repl->connect();
Test->tprintf("SET GLOBAL rpl_semi_sync_master_enabled = 0;\n");
execute_query(Test->repl->nodes[0], (char *) "SET GLOBAL rpl_semi_sync_master_enabled = 0;");
//sleep(10);
Test->repl->close_connections();
test_binlog(Test);
Test->repl->connect();
Test->tprintf("uninstall semisync plugin\n");
execute_query(Test->repl->nodes[0], (char *) "UNINSTALL PLUGIN rpl_semi_sync_master;");
Test->tprintf("Reconnect\n");
Test->repl->close_connections();
Test->repl->connect();
Test->tprintf("SET GLOBAL rpl_semi_sync_master_enabled = 1;\n");
execute_query(Test->repl->nodes[0], (char *) "SET GLOBAL rpl_semi_sync_master_enabled = 1;");
//sleep(10);
Test->repl->close_connections();
test_binlog(Test);
Test->repl->connect();
Test->tprintf("SET GLOBAL rpl_semi_sync_master_enabled = 0;\n");
execute_query(Test->repl->nodes[0], (char *) "SET GLOBAL rpl_semi_sync_master_enabled = 0;");
sleep(10);
Test->repl->close_connections();
test_binlog(Test);
}
}
else
{
Test->add_result(1, "Can't connect to backend\n");
}
int rval = Test->global_result;
delete Test;
return rval;
}

View File

@ -0,0 +1,193 @@
#include "blob_test.h"
int test_longblob(TestConnections* Test, MYSQL * conn, char * blob_name, unsigned long chunk_size, int chunks,
int rows)
{
unsigned long size = chunk_size;
unsigned long * data;
unsigned long i, j;
MYSQL_BIND param[1];
char sql[256];
int global_res = Test->global_result;
//Test->tprintf("chunk size %lu chunks %d inserts %d\n", chunk_size, chunks, rows);
char *insert_stmt = (char *) "INSERT INTO long_blob_table(x, b) VALUES(1, ?)";
Test->tprintf("Creating table with %s\n", blob_name);
Test->try_query(conn, (char *) "DROP TABLE IF EXISTS long_blob_table");
sprintf(sql, "CREATE TABLE long_blob_table(id int NOT NULL AUTO_INCREMENT, x INT, b %s, PRIMARY KEY (id))",
blob_name);
Test->try_query(conn, sql);
for (int k = 0; k < rows; k++)
{
Test->tprintf("Preparintg INSERT stmt\n");
MYSQL_STMT * stmt = mysql_stmt_init(conn);
if (stmt == NULL)
{
Test->add_result(1, "stmt init error: %s\n", mysql_error(conn));
}
Test->add_result(mysql_stmt_prepare(stmt, insert_stmt, strlen(insert_stmt)), "Error preparing stmt: %s\n",
mysql_stmt_error(stmt));
param[0].buffer_type = MYSQL_TYPE_STRING;
param[0].is_null = 0;
Test->tprintf("Binding parameter\n");
Test->add_result(mysql_stmt_bind_param(stmt, param), "Error parameter binding: %s\n", mysql_stmt_error(stmt));
Test->tprintf("Filling buffer\n");
data = (unsigned long *) malloc(size * sizeof(long int));
if (data == NULL)
{
Test->add_result(1, "Memory allocation error\n");
}
Test->tprintf("Sending data in %d bytes chunks, total size is %d\n", size * sizeof(unsigned long),
(size * sizeof(unsigned long)) * chunks);
for (i = 0; i < chunks; i++)
{
for (j = 0; j < size; j++)
{
data[j] = j + i * size;
}
Test->set_timeout(300);
Test->tprintf("Chunk #%d\n", i);
if (mysql_stmt_send_long_data(stmt, 0, (char *) data, size * sizeof(unsigned long)) != 0)
{
Test->add_result(1, "Error inserting data, iteration %d, error %s\n", i, mysql_stmt_error(stmt));
return 1;
}
}
//for (int k = 0; k < rows; k++)
//{
Test->tprintf("Executing statement: %02d\n", k);
Test->set_timeout(3000);
Test->add_result(mysql_stmt_execute(stmt), "INSERT Statement with %s failed, error is %s\n", blob_name,
mysql_stmt_error(stmt));
//}
Test->add_result(mysql_stmt_close(stmt), "Error closing stmt\n");
}
if (global_res == Test->global_result)
{
Test->tprintf("%s is OK\n", blob_name);
}
else
{
Test->tprintf("%s FAILED\n", blob_name);
}
return 0;
}
int check_longblob_data(TestConnections* Test, MYSQL * conn, unsigned long chunk_size, int chunks,
int rows)
{
//char *select_stmt = (char *) "SELECT id, x, b FROM long_blob_table WHERE id = ?";
char *select_stmt = (char *) "SELECT id, x, b FROM long_blob_table ";
MYSQL_STMT * stmt = mysql_stmt_init(Test->conn_rwsplit);
if (stmt == NULL)
{
Test->add_result(1, "stmt init error: %s\n", mysql_error(Test->conn_rwsplit));
}
Test->add_result(mysql_stmt_prepare(stmt, select_stmt, strlen(select_stmt)), "Error preparing stmt: %s\n",
mysql_stmt_error(stmt));
MYSQL_BIND param[1], result[3];
int id = 1;
memset(param, 0, sizeof(param));
memset(result, 0, sizeof(result));
param[0].buffer_type = MYSQL_TYPE_LONG;
param[0].buffer = &id;
unsigned long * data = (unsigned long *) malloc(chunk_size * chunks * sizeof(long int));
int r_id;
int r_x;
unsigned long l_id;
unsigned long l_x;
my_bool b_id;
my_bool b_x;
my_bool e_id;
my_bool e_x;
result[0].buffer_type = MYSQL_TYPE_LONG;
result[0].buffer = &r_id;
result[0].buffer_length = 0;
result[0].length = &l_id;
result[0].is_null = &b_id;
result[0].error = &e_id;
result[1].buffer_type = MYSQL_TYPE_LONG;
result[1].buffer = &r_x;
result[1].buffer_length = 0;
result[1].length = &l_x;
result[1].is_null = &b_x;
result[1].error = &e_x;
result[2].buffer_type = MYSQL_TYPE_LONG_BLOB;
result[2].buffer = data;
result[2].buffer_length = chunk_size * chunks * sizeof(long int);
/*
if (mysql_stmt_bind_param(stmt, param) != 0)
{
printf("Could not bind parameters\n");
return 1;
}
*/
if (mysql_stmt_bind_result(stmt, result) != 0)
{
printf("Could not bind results\n");
return 1;
}
if (mysql_stmt_execute(stmt) != 0)
{
Test->tprintf("Error executing stmt %s\n", mysql_error(Test->conn_rwsplit));
}
if (mysql_stmt_store_result(stmt) != 0)
{
printf("Could not buffer result set\n");
return 1;
}
int row = 0;
while (!mysql_stmt_fetch(stmt))
{
Test->tprintf("id=%d\tx=%d\n", r_id, r_x);
if (r_id != row + 1)
{
Test->add_result(1, "id field is wrong! Expected %d, but it is %d\n", row + 1, r_id);
}
for (int y = 0; y < chunk_size * chunks; y++)
{
if (data[y] != y)
{
Test->add_result(1, "Data is wrong!\n");
}
//printf("y = %d \t%lu\tid=%d\tx=%d\n", y, data[y], r_id, r_x);
}
row++;
}
if (row != rows)
{
Test->add_result(1, "Wrong number of rows in the table! Expected %d, but it is %d\n", rows, row);
}
mysql_stmt_free_result(stmt);
mysql_stmt_close(stmt);
}

View File

@ -0,0 +1,32 @@
#ifndef BLOB_TEST_H
#define BLOB_TEST_H
#include "testconnections.h"
/**
* @brief test_longblob INSERT big amount of data into lobg_blob_table
* @param Test TestConnection object
* @param conn MYSQL connection handler
* @param blob_name blob type (LONGBLOB; MEDIUMBLOB or BLOB)
* @param chunk_size size of one data chunk (in sizeof(long usingned))
* @param chunks number of chunks to INSERT
* @param rows number of rows to INSERT (executes INSERT stetament 'rows' times)
* @return 0 in case of success
*/
int test_longblob(TestConnections* Test, MYSQL * conn, char * blob_name, unsigned long chunk_size, int chunks,
int rows);
/**
* @brief check_longblob_data Does SELECT against table created by test_longblob() and cheks that data are correct
* @param Test TestConnection object
* @param conn MYSQL connection handler
* @param chunk_size size of one data chunk (in sizeof(long usingned))
* @param chunks number of chunks in the table
* @param rows number of rows in the table
* @return 0 in case of success
*/
int check_longblob_data(TestConnections* Test, MYSQL * conn, unsigned long chunk_size, int chunks,
int rows);
#endif // BLOB_TEST_H

Some files were not shown because too many files have changed in this diff Show More