diff --git a/CMakeLists.txt b/CMakeLists.txt index 9f5634e45..a2604c114 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,4 +1,9 @@ -cmake_minimum_required(VERSION 2.6) +if(PACKAGE) + cmake_minimum_required(VERSION 2.8.12) +else() + cmake_minimum_required(VERSION 2.8) +endif() + message(STATUS "CMake version: ${CMAKE_VERSION}") include(macros.cmake) @@ -7,7 +12,7 @@ enable_testing() set_variables() set_maxscale_version() -set(CMAKE_INSTALL_PREFIX "/usr/local/skysql/maxscale" CACHE PATH "Prefix prepended to install directories.") +set(CMAKE_INSTALL_PREFIX "/usr/local/mariadb-maxscale" CACHE PATH "Prefix prepended to install directories.") set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake") @@ -23,6 +28,13 @@ find_package(MySQLClient) find_package(MySQL) find_package(Pandoc) +# You can find the variables set by this in the FindCURL.cmake file +# which is a default module in CMake. +find_package(CURL) +if(NOT CURL_FOUND) + message(FATAL_ERROR "Failed to locate dependency: libcurl") +endif() + set(CMAKE_INSTALL_RPATH ${CMAKE_INSTALL_RPATH}:${CMAKE_INSTALL_PREFIX}/lib:${CMAKE_INSTALL_PREFIX}/modules) # Make sure the release notes for this release are present if it is a stable one @@ -57,22 +69,14 @@ if(CMAKE_VERSION VERSION_GREATER 2.6) endif() endif() -if(BUILD_TYPE STREQUAL Debug) +if(CMAKE_BUILD_TYPE STREQUAL Debug) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${DEBUG_FLAGS} -DSS_DEBUG") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${DEBUG_FLAGS} -DSS_DEBUG") message(STATUS "Generating debugging symbols and enabling debugging code") -elseif(BUILD_TYPE STREQUAL DebugSymbols) +elseif(CMAKE_BUILD_TYPE STREQUAL RelWithDebInfo) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${DEBUG_FLAGS}") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${DEBUG_FLAGS}") message(STATUS "Generating debugging symbols") -elseif(BUILD_TYPE MATCHES Optimized) - if(NOT (DEFINED OLEVEL)) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -O2") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O2") - message(STATUS "Optimization level at: 2") - endif() -else() - endif() if(DEFINED OLEVEL ) @@ -106,10 +110,13 @@ include_directories(utils) include_directories(log_manager) include_directories(query_classifier) include_directories(server/include) +include_directories(server/include/maxscale) include_directories(server/inih) include_directories(server/modules/include) include_directories(${CMAKE_BINARY_DIR}/server/include) +include_directories(${CURL_INCLUDE_DIRS}) +add_subdirectory(plugins) add_subdirectory(utils) add_subdirectory(log_manager) add_subdirectory(query_classifier) @@ -119,6 +126,19 @@ if(NOT WITHOUT_MAXADMIN) endif() +execute_process(COMMAND perl ${CMAKE_SOURCE_DIR}/Documentation/format.pl +${CMAKE_SOURCE_DIR}/Documentation/Changelog.md +${CMAKE_BINARY_DIR}/Changelog.txt) +execute_process(COMMAND perl ${CMAKE_SOURCE_DIR}/Documentation/format.pl +${CMAKE_SOURCE_DIR}/Documentation/Release-Notes/MaxScale-1.1-Release-Notes.md + ${CMAKE_BINARY_DIR}/ReleaseNotes.txt) +execute_process(COMMAND perl ${CMAKE_SOURCE_DIR}/Documentation/format.pl +${CMAKE_SOURCE_DIR}/Documentation/Upgrading-To-MaxScale-1.1.0.md + ${CMAKE_BINARY_DIR}/UpgradingToMaxScale110.txt) +install(FILES ${CMAKE_BINARY_DIR}/Changelog.txt DESTINATION .) +install(FILES ${CMAKE_BINARY_DIR}/ReleaseNotes.txt DESTINATION .) +install(FILES ${CMAKE_BINARY_DIR}/UpgradingToMaxScale110.txt DESTINATION .) + message(STATUS "Installing MaxScale to: ${CMAKE_INSTALL_PREFIX}/") install(FILES server/MaxScale_template.cnf DESTINATION etc) @@ -126,11 +146,10 @@ install(FILES ${ERRMSG} DESTINATION mysql) install(FILES ${CMAKE_SOURCE_DIR}/COPYRIGHT DESTINATION .) install(FILES ${CMAKE_SOURCE_DIR}/README DESTINATION .) install(FILES ${CMAKE_SOURCE_DIR}/LICENSE DESTINATION .) -install(FILES ${CMAKE_SOURCE_DIR}/SETUP DESTINATION .) install(DIRECTORY DESTINATION log) # Install startup scripts and ldconfig files -if( NOT ( (DEFINED INSTALL_SYSTEM_FILES) AND ( NOT ( INSTALL_SYSTEM_FILES ) ) ) ) +if(WITH_SCRIPTS) configure_file(${CMAKE_SOURCE_DIR}/maxscale.conf.in ${CMAKE_BINARY_DIR}/maxscale.conf @ONLY) if(DEB_BASED) configure_file(${CMAKE_SOURCE_DIR}/etc/ubuntu/init.d/maxscale.in ${CMAKE_BINARY_DIR}/maxscale @ONLY) @@ -155,6 +174,10 @@ if(PACKAGE) PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE) install(FILES ${CMAKE_BINARY_DIR}/maxscale.conf DESTINATION . PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE) + install(FILES ${CMAKE_BINARY_DIR}/postinst DESTINATION . + PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE) + install(FILES ${CMAKE_BINARY_DIR}/postrm DESTINATION . + PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE) if(${CMAKE_VERSION} VERSION_LESS 2.8.12) message(WARNING "CMake version is ${CMAKE_VERSION}. Building of packages requires version 2.8.12 or greater.") else() @@ -186,6 +209,7 @@ if(PACKAGE) set(CPACK_PACKAGE_DESCRIPTION_FILE ${CMAKE_SOURCE_DIR}/etc/DESCRIPTION) set(CPACK_PACKAGING_INSTALL_PREFIX "${CMAKE_INSTALL_PREFIX}") set(CPACK_DEBIAN_PACKAGE_CONTROL_EXTRA "${CMAKE_BINARY_DIR}/postinst;{CMAKE_BINARY_DIR}/postrm") + set(CPACK_RPM_PACKAGE_RELEASE 1) set(CPACK_RPM_POST_INSTALL_SCRIPT_FILE ${CMAKE_BINARY_DIR}/postinst) set(CPACK_RPM_POST_UNINSTALL_SCRIPT_FILE ${CMAKE_BINARY_DIR}/postrm) set(CPACK_RPM_PACKAGE_NAME "maxscale") @@ -201,18 +225,25 @@ if(PACKAGE) endif() add_custom_target(buildtests - COMMAND ${CMAKE_COMMAND} -DDEPS_OK=Y -DBUILD_TESTS=Y -DBUILD_TYPE=Debug -DCMAKE_INSTALL_PREFIX=${CMAKE_BINARY_DIR} -DINSTALL_SYSTEM_FILES=N ${CMAKE_SOURCE_DIR} + COMMAND ${CMAKE_COMMAND} -DDEPS_OK=Y -DBUILD_TESTS=Y -DCMAKE_BUILD_TYPE=Debug -DCMAKE_INSTALL_PREFIX=${CMAKE_BINARY_DIR} -DWITH_SCRIPTS=N ${CMAKE_SOURCE_DIR} COMMAND make COMMENT "Building test suite..." VERBATIM ) add_custom_target(testall - COMMAND ${CMAKE_COMMAND} -DDEPS_OK=Y -DBUILD_TESTS=Y -DBUILD_TYPE=Debug -DCMAKE_INSTALL_PREFIX=${CMAKE_BINARY_DIR} -DINSTALL_SYSTEM_FILES=N ${CMAKE_SOURCE_DIR} + COMMAND ${CMAKE_COMMAND} -DDEPS_OK=Y -DBUILD_TESTS=Y -DCMAKE_BUILD_TYPE=Debug -DCMAKE_INSTALL_PREFIX=${CMAKE_BINARY_DIR} -DWITH_SCRIPTS=N ${CMAKE_SOURCE_DIR} COMMAND make install COMMAND ${CMAKE_COMMAND} -E copy_if_different ${CMAKE_SOURCE_DIR}/server/test/MaxScale_test.cnf ${CMAKE_BINARY_DIR}/etc/MaxScale.cnf - COMMAND ${CMAKE_COMMAND} -P ${CMAKE_SOURCE_DIR}/testall.cmake + COMMAND ${CMAKE_COMMAND} -P ${CMAKE_SOURCE_DIR}/cmake/testall.cmake COMMENT "Running full test suite..." VERBATIM) +add_custom_target(testcore + COMMAND ${CMAKE_COMMAND} -DDEPS_OK=Y -DBUILD_TESTS=Y -DCMAKE_BUILD_TYPE=Debug -DCMAKE_INSTALL_PREFIX=${CMAKE_BINARY_DIR} -DWITH_SCRIPTS=N ${CMAKE_SOURCE_DIR} + COMMAND make install + COMMAND ${CMAKE_COMMAND} -E copy_if_different ${CMAKE_SOURCE_DIR}/server/test/MaxScale_test.cnf ${CMAKE_BINARY_DIR}/etc/MaxScale.cnf + COMMAND ctest -R Internal + COMMENT "Running core test suite..." VERBATIM) + # uninstall target # see http://www.cmake.org/Wiki/CMake_FAQ#Can_I_do_.22make_uninstall.22_with_CMake.3F configure_file( @@ -239,7 +270,7 @@ endif() # Testall target with Valgrind if(VALGRIND_FOUND) add_custom_target(testall-valgrind - COMMAND ${CMAKE_COMMAND} -DDEPS_OK=Y -DBUILD_TESTS=Y -DBUILD_TYPE=Debug -DCMAKE_INSTALL_PREFIX=${CMAKE_BINARY_DIR} -DINSTALL_SYSTEM_FILES=N ${CMAKE_SOURCE_DIR} + COMMAND ${CMAKE_COMMAND} -DDEPS_OK=Y -DBUILD_TESTS=Y -DCMAKE_BUILD_TYPE=Debug -DCMAKE_INSTALL_PREFIX=${CMAKE_BINARY_DIR} -DWITH_SCRIPTS=N ${CMAKE_SOURCE_DIR} COMMAND make install COMMAND ${CMAKE_COMMAND} -E copy_if_different ${CMAKE_SOURCE_DIR}/server/test/MaxScale_test.cnf ${CMAKE_BINARY_DIR}/etc/MaxScale.cnf COMMAND /bin/sh -c "valgrind --track-fds=yes --leak-check=full --show-leak-kinds=all --log-file=${CMAKE_BINARY_DIR}/valgrind.log ${CMAKE_BINARY_DIR}/bin/maxscale -c ${CMAKE_BINARY_DIR} &>/dev/null" @@ -257,6 +288,14 @@ add_custom_target(generate_pdf -P generate-pdf.cmake COMMENT "Generating PDF files" VERBATIM) +add_custom_target(generate_txt_release + COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_SOURCE_DIR}/Documentation ${CMAKE_BINARY_DIR}/Documentation + COMMAND ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/Documentation ${CMAKE_COMMAND} + -DBUILD_DIR=${CMAKE_BINARY_DIR} + -DCMAKE_MODULE_PATH=${CMAKE_MODULE_PATH} + -P generate-txt-release.cmake + COMMENT "Generating TXT release notes" VERBATIM) + add_custom_target(generate_html COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_SOURCE_DIR}/Documentation ${CMAKE_BINARY_DIR}/Documentation diff --git a/Documentation/About/SETUP.md b/Documentation/About/SETUP.md index 8dd5dd718..d391b68fd 100644 --- a/Documentation/About/SETUP.md +++ b/Documentation/About/SETUP.md @@ -1,25 +1,25 @@ Installation and startup Untar the binary distribution in the desired location, -e.g. /usr/local/skysql +e.g. /usr/local/mariadb Alternatively build from the source code using the instructions in the README file and execute make install. Simply set the environment variable MAXSCALE_HOME to point to the MaxScale directory, found inside the path into which the files have been copied, -e.g. MAXSCALE_HOME=/usr/local/skysql/maxscale/MaxScale +e.g. MAXSCALE_HOME=/usr/local/mariadb-maxscale Also you will need to optionaly set LD_LIBRARY_PATH to include the 'lib' folder, found inside the path into which the files have been copied, -e.g. LD_LIBRARY_PATH=/usr/local/skysql/maxscale/lib +e.g. LD_LIBRARY_PATH=/usr/local/mariadb-maxscale/lib Because we need the libmysqld library for parsing we must create a valid my.cnf file to enable the library to be used. Copy the my.cnf to $MAXSCALE_HOME/mysql/my.cnf. To start MaxScale execute the command 'maxscale' from the bin folder, -e.g. /usr/local/skysql/maxscale/bin/maxscale +e.g. /usr/local/mariadb-maxscale/bin/maxscale Configuration @@ -30,8 +30,8 @@ various services. In order to view the internal activity of the gateway you can telnet to the port defined for the telnet listener. Initially you may login with -the user name of "admin" and the password "skysql". Once connected type +the user name of "admin" and the password "mariadb". Once connected type help for an overview of the commands and help for the more detailed help on commands. Use the add user command to add a new user, -this will also remove the admin/skysql user. +this will also remove the admin/mariadb user. diff --git a/Documentation/Changelog.md b/Documentation/Changelog.md new file mode 100644 index 000000000..eb4f17395 --- /dev/null +++ b/Documentation/Changelog.md @@ -0,0 +1,22 @@ +#Changelog + +These are the changes introduced in the next MaxScale version. This is not the official change log and the latest changelog can always be found in here: [MaxScale 1.1 Release Notes](Release-Notes/MaxScale-1.1-Release-Notes.md) + +**NOTE:** MaxScale default installation directory has changed to `/usr/local/mariadb-maxscale` and the default password for MaxAdmin is now ´mariadb´. + +* New modules added + * Binlog router + * Firewall filter + * Multi-Master monitor + * RabbitMQ logging filter +* Added option to use high precision timestamps in logging. +* Readwritesplit router now returns the master server's response. +* New readwritesplit router option added. It is now possible to control the amount of memory readwritesplit sessions will consume by limiting the amount of session modifying statements they can execute. +* Minimum required CMake version is now 2.8.12 for package building. +* Session idle timeout added for services. +* Monitor API is updated to 2.0.0. Monitors with earlier versions of the API no longer work with this version of MaxScale. +* MaxScale now requires libcurl and libcurl development headers. +* Nagios plugins added. +* Notification service added. +* Readconnrouter has a new "running" router_option. This allows it to use any running server as a valid backend server. +* Database names can be stripped of escape characters with the `strip_db_esc` service parameter. diff --git a/Documentation/Design-Documents/SchemaRouter-technical.md b/Documentation/Design-Documents/SchemaRouter-technical.md new file mode 100644 index 000000000..fc6e48f81 --- /dev/null +++ b/Documentation/Design-Documents/SchemaRouter-technical.md @@ -0,0 +1,23 @@ +#SchemaRouter Router - Technical Overview + +This document is designed with a developer's point-of-view in mind. It explains the lifecycle of the module and details about its internal workings. It refers to the source code which can be found at [GitHub](https://github.com/mariadb-corporation/MaxScale). + +## Source Files and Data Structures + +The schemarouter router consists of the schemarouter.h header file located in the `server/modules/include/` directory and the schemarouter.c file located in the `server/modules/routing/schemarouter` directory. This router implements the router interface defined in the router.h file. The entry points and structures this router uses can be found in the header file. The two main structures in use are the router instace and router session structures. The router instance structure is defined in `struct router_instance` and the router session structure in `struct router_client_session`. + +The definitions of the external functions and all the internal functions of the router can be found in the schemarouter.c file. + +## Router Lifecycle + +When MaxScale first starts, it creates all services and thus creates the router instances of all the routers. The functions involved in this stage are ModuleInit, which is called only once when MaxScale first starts, and createInstance, called for each individual instace of this router in all the configured services. These functions read configuraion values and initialize internal data. + +When a user connects to MaxScale, a new session is created and the newSession function is called. At this point the client session connects to all the backend servers and initializes the list of databases. + +After the session is created queries are routed to the router's routeQuery function. This is where most of the work regarding the resolution of query destinations is done. This router parses the incoming buffers for full SQL packets first and routes each of them individually. The main internal functions involved in routing the query are get_shard_route_target (detects if a query needs to be sent to all the servers or to a specific one), get_shard_target_name (parses the query and finds the name of the right server) and route_session_write (handles sending and and storing session commands). After this point the client's query has been sent to the backend server and the router waits for either an response or an error signaling that the backend server is not responding. + +If a response is received the clientReply function is called and response is simply sent to the client and the router is then ready for more queries. If there is no response from the server and the connection to it is lost the handleError function is called. This function tries to find replacement servers for the failed ones and regenerates the list of databases. This also triggeres the sending of an error packet to the client that notifies that the server is not responding. + +After the session ends the closeSession is called where the session is set to a closed state after which the freeSession is called where the final freeing of memory is done. After this point the router's session has gone through all the stages of its lifecycle. + +![SchemaRouter Router Lifecycle](schemarouter-lifecycle.png) diff --git a/Documentation/Design-Documents/schemarouter-lifecycle.png b/Documentation/Design-Documents/schemarouter-lifecycle.png new file mode 100644 index 000000000..61bcf699e Binary files /dev/null and b/Documentation/Design-Documents/schemarouter-lifecycle.png differ diff --git a/Documentation/Documentation-Contents.md b/Documentation/Documentation-Contents.md index fb0f50492..56c8f30f2 100644 --- a/Documentation/Documentation-Contents.md +++ b/Documentation/Documentation-Contents.md @@ -5,7 +5,8 @@ ## About MaxScale - [About MaxScale](About/About-MaxScale.md) - - [Release Notes 1.0.4](About/MaxScale-1.0.4-Release-Notes.md) + - [Release Notes 1.1](Release-Notes/MaxScale-1.1-Release-Notes.md) + - [Changelog](Changelog.md) - [Limitations](About/Limitations.md) - [COPYRIGHT](About/COPYRIGHT.md) - [LICENSE](About/LICENSE.md) @@ -30,6 +31,7 @@ - [Filter Tutorial](Tutorials/Filter-Tutorial.md) - [Galera Cluster Connection Routing Tutorial](Tutorials/Galera-Cluster-Connection-Routing-Tutorial.md) - [Galera Cluster Read-Write Splitting Tutorial](Tutorials/Galera-Cluster-Read-Write-Splitting-Tutorial.md) + - [MaxScale Information Schema Tutorial](Tutorials/MaxScale-Informaton-Schema.md) - [MySQL Replication Connection Routing Tutorial](Tutorials/MySQL-Replication-Connection-Routing-Tutorial.md) - [MySQL Replication Read-Write Splitting Tutorial](Tutorials/MySQL-Replication-Read-Write-Splitting-Tutorial.md) - [MySQL Cluster Setup](Tutorials/MySQL-Cluster-Setup.md) @@ -42,17 +44,23 @@ - [Regex Filter](filters/Regex-Filter.md) - [Tee Filter](filters/Tee-Filter.md) - [Top N Filter](filters/Top-N-Filter.md) - - [Firewall Filter](filters/Firewall-Filter.md) + - [Database Firewall Filter](filters/Database-Firewall-Filter.md) - [RabbitMQ Filter](filters/RabbitMQ-Filter.md) ## Utilities - [RabbitMQ Consumer Client](filters/RabbitMQ-Consumer-Client.md) +## Routers + + - [Simple Schema Sharding Router](routers/schemarouter/SchemaRouter.md) + ## Design Documents - - [Session Commands design (in development)](http://mariadb-corporation.github.io/MaxScale/Design-Documents/) + - [Core Objects Design (in development)](http://mariadb-corporation.github.io/MaxScale/Design-Documents/core-objects-html-docs) + - [Binlog Router Design (in development)](http://mariadb-corporation.github.io/MaxScale/Design-Documents/binlog-router-html-docs) - [DCB States (to be replaced in StarUML)](Design-Documents/DCB-States.pdf) + - [Schema Sharding Router Technical Documentation](Design-Documents/SchemaRouter-technical.md) ## Earlier Release Notes diff --git a/Documentation/Getting-Started/Building-MaxScale-from-Source-Code.md b/Documentation/Getting-Started/Building-MaxScale-from-Source-Code.md index 66b178fa9..b5f43a658 100644 --- a/Documentation/Getting-Started/Building-MaxScale-from-Source-Code.md +++ b/Documentation/Getting-Started/Building-MaxScale-from-Source-Code.md @@ -12,87 +12,126 @@ You will need a number of tools and libraries in order to achieve this. * libedit 2.11 or later (used by the MaxAdmin tool) -The full list of dependencies for the most common distros is provided in the next section. +## Build dependencied + +The full list of dependencies for the most common distributions is provided in this section. If your system is not listed here, MaxScale building isn't guaranteed to be compatible but might still be successful. + +### All RHEL, CentOS and Fedora versions + +``` +gcc gcc-c++ ncurses-devel bison glibc-devel cmake libgcc perl make libtool +openssl-devel libaio libaio-devel librabbitmq-devel +``` -### All RHEL, CentOS and Fedora versions: - gcc gcc-c++ ncurses-devel bison glibc-devel cmake libgcc perl make libtool - openssl-devel libaio libaio-devel librabbitmq-devel In addition, if you wish to to build an RPM package include: +``` rpm-build - +``` + #### RHEL 6, 7, CentOS 6, 7, Fedora: - libedit-devel + +``` +libedit-devel +``` #### RHEL 7, CentOS 7: - mariadb-devel mariadb-embedded-devel + +``` +mariadb-devel mariadb-embedded-devel +``` #### RHEL 5, 7, CentOS 5, 6, Fedora 19, 20 - MariaDB-devel MariaDB-server +MariaDB-devel MariaDB-server #### Fedora 19, 20 - systemtap-sdt-devel -### All Ubuntu and Debian versions: +``` +systemtap-sdt-devel +``` + +### All Ubuntu and Debian versions + +``` build-essential libssl-dev libaio-dev ncurses-dev bison cmake perl libtool librabbitmq-dev +``` + If you want to build a DEB package, you will also need: +``` dpkg-dev +``` #### Ubuntu 14.04 or later, Debian 8 (Jessie) or later + +``` libmariadbclient-dev libmariadbd-dev +``` #### Earlier versions of Ubuntu or Debian For these, you will need to obtain the MariaDB embedded library. It has to be manually extracted from the tarball. But first ascertain what version of glibc is installed. Run the command: +``` dpkg -l | grep libc6 +``` + which will show the version number. If the version is less than 2.14 you should obtain the library from: [https://downloads.mariadb.org/interstitial/mariadb-5.5.41/bintar-linux-x86_64/mariadb-5.5.41-linux-x86_64.tar.gz](https://downloads.mariadb.org/interstitial/mariadb-5.5.41/bintar-linux-x86_64/mariadb-5.5.41-linux-x86_64.tar.gz). Otherwise, from: [https://downloads.mariadb.org/interstitial/mariadb-5.5.41/bintar-linux-glibc_214-x86_64/mariadb-5.5.41-linux-glibc_214-x86_64.tar.gz](https://downloads.mariadb.org/interstitial/mariadb-5.5.41/bintar-linux-glibc_214-x86_64/mariadb-5.5.41-linux-glibc_214-x86_64.tar.gz) -The suggested location for extracting the tarball is /usr so the operation can be done by the following commands: +The suggested location for extracting the tarball is `/usr` so the operation can be done by the following commands: +``` cd /usr tar -xzvf /path/to/mariadb.library.tar.gz +``` + where /path/to/mariadb.library.tar.gz is replaced by the actual path and name of the downloaded tarball. ### OpenSUSE -(mariadb-devel package, build fails????) +At the time this guide was written, the MariaDB development packages for OpenSUSE were broken and the build failed. The packages required are: - gcc gcc-c++ ncurses-devel bison glibc-devel cmake libgcc_s1 perl - make libtool libopenssl-devel libaio libaio-devel - libedit-devel librabbitmq-devel +``` +gcc gcc-c++ ncurses-devel bison glibc-devel cmake libgcc_s1 perl +make libtool libopenssl-devel libaio libaio-devel +libedit-devel librabbitmq-devel MariaDB-devel MariaDB-client MariaDB-server -(if zypper ask which MariaDB client should be installed 'MariaDB-client' or 'mariadb-client' - please select 'MariaDB-client') +``` +If zypper ask which MariaDB client should be installed `MariaDB-client` or `mariadb-client` + please select `MariaDB-client`. This is the package provided by the MariaDB repository. ##Obtaining the MaxScale Source Code + Now clone the GitHub project to your machine either via the web interface, your favorite graphical interface or the git command line - $ git clone https://github.com/mariadb-corporation/MaxScale - Cloning into 'MaxScale'... - remote: Counting objects: 16228, done. - ... +``` +$ git clone https://github.com/mariadb-corporation/MaxScale +Cloning into 'MaxScale'... +remote: Counting objects: 16228, done. +... +``` -Change directory to the MaxScale directory, create a build directory and change directory to that build directory +Change directory to the `MaxScale` directory, create a build directory and change directory to that build directory + +``` +$ cd MaxScale +$ mkdir build +$ cd build +``` - $ cd MaxScale - $ mkdir build - $ cd build - The next step is to run the cmake command to build the Makefile you need to compile Maxscale. There are a number of options you may give to configure cmake and point it to the various packages it requires. In this example we will assume the MariaDB developer packages have been installed as described above and set all the options required to locate these, along with options to build the unit tests and configure the installation target directory. If you run into any trouble while configuring CMake, you can always remove the -'CMakeCache.txt' file to clear CMake's internal cache. This resets all values to their -defaults and can be used to fix a 'stuck' configuration of CMake. This is also a good -reason why you should always build into a separate directory, because you can safely +`CMakeCache.txt` file to clear CMake's internal cache. This resets all values to their +defaults and can be used quickly force a reconfiguration of CMake variables. There is also a make target, `make rebuild_cache`, that cleans the CMake cache. +This is also a good reason why you should always build into a separate directory, because you can safely wipe the build directory clean without the danger of deleting important files when something goes wrong. Building 'out-of-source' also allows you to have multiple configurations of MaxScale at the same time. @@ -103,88 +142,96 @@ variables manually at configuration time. To display all CMake variables with their descriptions: - cmake -LH +``` +cmake -LH +``` When you are ready to run cmake: - $ cmake -DMYSQL\_DIR=/usr/mariadb-5.5.41-linux-x86_64/include/mysql \ - -DEMBEDDED\_LIB=/usr/mariadb-5.5.41-linux-x86\_64/lib/libmysqld.a \ - -DMYSQLCLIENT\_LIBRARIES=/usr/mariadb-5.5.41-linux-x86_64/lib/libmysqlclient.so \ - -DERRMSG=/usr/mariadb-5.5.41-linux-x86\_64/share/english/errmsg.sys \ - -DINSTALL\_DIR=/home/maxscale/MaxScale -DBUILD_TESTS=Y \ - -DINSTALL\_SYSTEM\_FILES=N \ - -DBUILD_BINLOG=Y .. -DBUILD_RABBITMQ=N/ - +``` +$ cmake -DMYSQL_DIR=/usr/mariadb-5.5.41-linux-x86_64/include/mysql \ +-DEMBEDDED_LIB=/usr/mariadb-5.5.41-linux-x86_64/lib/libmysqld.a \ +-DMYSQLCLIENT_LIBRARIES=/usr/mariadb-5.5.41-linux-x86_64/lib/libmysqlclient.so \ +-DERRMSG=/usr/mariadb-5.5.41-linux-x86_64/share/english/errmsg.sys \ +-DCMAKE_INSTALL_PREFIX=/home/maxscale/MaxScale -DBUILD_TESTS=Y \ +-DWITH_SCRIPTS=N +
-    -- CMake version: 2.8.12.2
-    -- The C compiler identification is GNU 4.4.7
-    -- The CXX compiler identification is GNU 4.4.7
-    -- Check for working C compiler: /usr/bin/cc
-    -- Check for working C compiler: /usr/bin/cc -- works
-    -- Detecting C compiler ABI info
-    -- Detecting C compiler ABI info - done
-    -- Check for working CXX compiler: /usr/bin/c++
-    -- Check for working CXX compiler: /usr/bin/c++ -- works
-    -- Detecting CXX compiler ABI info
-    -- Detecting CXX compiler ABI info - done
-    -- Library was found at: /lib64/libaio.so
-    -- Library was found at: /usr/lib64/libssl.so
-    -- Library was found at: /usr/lib64/libcrypt.so
-    -- Library was found at: /usr/lib64/libcrypto.so
-    -- Library was found at: /usr/lib64/libz.so
-    -- Library was found at: /usr/lib64/libm.so
-    -- Library was found at: /usr/lib64/libdl.so
-    -- Library was found at: /usr/lib64/librt.so
-    -- Library was found at: /usr/lib64/libpthread.so
-    -- Using errmsg.sys found at: /home/maxscale/usr/share/mysql/english/errmsg.sys
-    -- Using embedded library: /home/mpinto/usr/lib64/libmysqld.a
-    -- Valgrind found: /usr/bin/valgrind
-    -- Found dynamic MySQL client library: /home/maxscale/usr/lib64/libmysqlclient.so
-    -- Found static MySQL client library: /usr/lib/libmysqlclient.a
-    -- C Compiler supports: -Werror=format-security
-    -- Linking against: /home/mpinto/usr/lib64/libmysqlclient.so
-    -- Installing MaxScale to: /usr/local/maxscale/
-    -- Generating RPM packages
-    -- Found Doxygen: /usr/bin/doxygen (found version "1.6.1") 
-    -- Configuring done
-    -- Generating done
-    -- Build files have been written to: /home/maxscale/develop/build
+-- CMake version: 2.8.12.2
+-- The C compiler identification is GNU 4.4.7
+-- The CXX compiler identification is GNU 4.4.7
+-- Check for working C compiler: /usr/bin/cc
+-- Check for working C compiler: /usr/bin/cc -- works
+-- Detecting C compiler ABI info
+-- Detecting C compiler ABI info - done
+-- Check for working CXX compiler: /usr/bin/c++
+-- Check for working CXX compiler: /usr/bin/c++ -- works
+-- Detecting CXX compiler ABI info
+-- Detecting CXX compiler ABI info - done
+-- Library was found at: /lib64/libaio.so
+-- Library was found at: /usr/lib64/libssl.so
+-- Library was found at: /usr/lib64/libcrypt.so
+-- Library was found at: /usr/lib64/libcrypto.so
+-- Library was found at: /usr/lib64/libz.so
+-- Library was found at: /usr/lib64/libm.so
+-- Library was found at: /usr/lib64/libdl.so
+-- Library was found at: /usr/lib64/librt.so
+-- Library was found at: /usr/lib64/libpthread.so
+-- Using errmsg.sys found at: /home/maxscale/usr/share/mysql/english/errmsg.sys
+-- Using embedded library: /home/mpinto/usr/lib64/libmysqld.a
+-- Valgrind found: /usr/bin/valgrind
+-- Found dynamic MySQL client library: /home/maxscale/usr/lib64/libmysqlclient.so
+-- Found static MySQL client library: /usr/lib/libmysqlclient.a
+-- C Compiler supports: -Werror=format-security
+-- Linking against: /home/mpinto/usr/lib64/libmysqlclient.so
+-- Installing MaxScale to: /usr/local/maxscale/
+-- Generating RPM packages
+-- Found Doxygen: /usr/bin/doxygen (found version "1.6.1") 
+-- Configuring done
+-- Generating done
+-- Build files have been written to: /home/maxscale/develop/build
+```
 
 Once the cmake command is complete simply run make to build the MaxScale binaries.
 
-    $ make
-  
+```
+$ make
+
 
-    **Scanning dependencies of target utils**
-    [  1%] Building CXX object utils/CMakeFiles/utils.dir/skygw_utils.cc.o
-    **Linking CXX static library libutils.a**
-    [  1%] Built target utils
-    **Scanning dependencies of target log_manager**
-    [  2%] Building CXX object log_manager/CMakeFiles/log_manager.dir/log_manager.cc.o
-    ...
+**Scanning dependencies of target utils**
+[  1%] Building CXX object utils/CMakeFiles/utils.dir/skygw_utils.cc.o
+**Linking CXX static library libutils.a**
+[  1%] Built target utils
+**Scanning dependencies of target log_manager**
+[  2%] Building CXX object log_manager/CMakeFiles/log_manager.dir/log_manager.cc.o
+...
 
+``` After the completion of the make process the installation can be achieved by running the make install target. - $ make install - ... +``` +$ make install +... +``` -This will result in an installation being created which is identical to that which would be achieved by installing the binary package. The only difference is that init.d scripts aren't installed and the RabbitMQ components are not built. +This will result in an installation being created which is identical to that which would be achieved by installing the binary package. The only difference is that init.d scripts aren't installed. -By default, MaxScale installs to '/usr/local/skysql/maxscale' and places init.d scripts and ldconfig files into their folders. Change the CMAKE_INSTALL_PREFIX variable to your desired installation directory and set INSTALL_SYSTEM_FILES=N to prevent the init.d script and ldconfig file installation. +By default, MaxScale installs to `/usr/local/mariadb-maxscale` and places init.d scripts and ldconfig files into their folders. Change the `CMAKE_INSTALL_PREFIX` variable to your desired installation directory and set `WITH_SCRIPTS=N` to prevent the init.d script and ldconfig file installation. Other useful targets for Make are `documentation`, which generates the Doxygen documentation, and `uninstall` which uninstall MaxScale binaries after an install. ## Running the MaxScale testsuite -To run "make testall" you need to have four mysqld servers running on localhost. It assumes a master-slave replication setup with one slave and three slaves. +MaxScale has a core test suite for internal components and an extended suite of test for modules. To run the core tests, run `make testcore`. This will test the core maxscale executable. -The ports to which these servers are listening and the credentials to use for testing - can be specified in the 'macros.cmake' file. +To run `make testall`, the full test suite, you need to have four mysqld servers running on localhost. It assumes a master-slave replication setup with one slave and three slaves. -On the master full privileges on the databases "test" and "FOO" are needed, on the saves SELECT permissions on test.* should be sufficient. +The ports to which these servers are listening and the credentials to use for testing can be specified in the `macros.cmake` file found in the root source folder. -When you run the 'make testall' target after configuring the build with CMake a local version of MaxScale is installed into the build folder. After this a MaxScale instance is started and the test set is executed. +On the master full privileges on the databases `test` are needed, on the slaves `SELECT` permissions on `test.*` should be sufficient. -After testing has finished you can find a full testlog generated by CTest in Testing/Temporary/ directory and MaxScale's log files in the log/ directory of the build root. +When you run the `make testall` target after configuring the build with CMake a local version of MaxScale is installed into the build folder. After this a MaxScale instance is started and the test set is executed. + +After testing has finished you can find a full testlog generated by CTest in `Testing/Temporary/` directory and MaxScale's log files in the `log/` directory of the build root. diff --git a/Documentation/Getting-Started/Configuration-Guide.md b/Documentation/Getting-Started/Configuration-Guide.md index 893c937d8..350b943a0 100644 --- a/Documentation/Getting-Started/Configuration-Guide.md +++ b/Documentation/Getting-Started/Configuration-Guide.md @@ -1,30 +1,29 @@ -MaxScale -Configuration & Usage Scenarios +# MaxScale Configuration & Usage Scenarios -# Introduction +## Introduction -The purpose of this document is to describe how to configure MaxScale and to discuss some possible usage scenarios for MaxScale. MaxScale is designed with flexibility in mind, and consists of an event processing core with various support functions and plugin modules that tailor the behaviour of the MaxScale itself. +The purpose of this document is to describe how to configure MaxScale and to discuss some possible usage scenarios for MaxScale. MaxScale is designed with flexibility in mind, and consists of an event processing core with various support functions and plugin modules that tailor the behaviour of the MaxScale itself. -## Terms +### Terms - Term | Description --------------------|------------------ - service | A service represents a set of databases with a specific access mechanism that is offered to clients of MaxScale. The access mechanism defines the algorithm that MaxScale will use to direct particular requests to the individual databases. - server | A server represents an individual database server to which a client can be connected via MaxScale. - router | A router is a module within MaxScale that will route client requests to the various database servers which MaxScale provides a service interface to. -connection routing | Connection routing is a method of handling requests in which MaxScale will accept connections from a client and route data on that connection to a single database using a single connection. Connection based routing will not examine individual requests on a connection and it will not move that connection once it is established. -statement routing | Statement routing is a method of handling requests in which each request within a connection will be handled individually. Requests may be sent to one or more servers and connections may be dynamically added or removed from the session. - protocol | A protocol is a module of software that is used to communicate with another software entity within the system. MaxScale supports the dynamic loading of protocol modules to allow for increased flexibility. - module | A module is a separate code entity that may be loaded dynamically into MaxScale to increase the available functionality. Modules are implemented as run-time loadable shared objects. - monitor | A monitor is a module that can be executed within MaxScale to monitor the state of a set of database. The use of an internal monitor is optional, monitoring may be performed externally to MaxScale. - listener | A listener is the network endpoint that is used to listen for connections to MaxScale from the client applications. A listener is associated to a single service, however a service may have many listeners. -connection failover| When a connection currently being used between MaxScale and the database server fails a replacement will be automatically created to another server by MaxScale without client intervention - backend database | A term used to refer to a database that sits behind MaxScale and is accessed by applications via MaxScale. - filter | A module that can be placed between the client and the MaxScale router module. All client data passes through the filter module and may be examined or modified by the filter modules. Filters may be chained together to form processing pipelines. +| Term | Description +------------------- | ------------------ + service | A service represents a set of databases with a specific access mechanism that is offered to clients of MaxScale. The access mechanism defines the algorithm that MaxScale will use to direct particular requests to the individual databases. + server | A server represents an individual database server to which a client can be connected via MaxScale. + router | A router is a module within MaxScale that will route client requests to the various database servers which MaxScale provides a service interface to. +connection routing | Connection routing is a method of handling requests in which MaxScale will accept connections from a client and route data on that connection to a single database using a single connection. Connection based routing will not examine individual requests on a connection and it will not move that connection once it is established. +statement routing | Statement routing is a method of handling requests in which each request within a connection will be handled individually. Requests may be sent to one or more servers and connections may be dynamically added or removed from the session. + protocol | A protocol is a module of software that is used to communicate with another software entity within the system. MaxScale supports the dynamic loading of protocol modules to allow for increased flexibility. + module | A module is a separate code entity that may be loaded dynamically into MaxScale to increase the available functionality. Modules are implemented as run-time loadable shared objects. + monitor | A monitor is a module that can be executed within MaxScale to monitor the state of a set of database. The use of an internal monitor is optional, monitoring may be performed externally to MaxScale. + listener | A listener is the network endpoint that is used to listen for connections to MaxScale from the client applications. A listener is associated to a single service, however a service may have many listeners. +connection failover| When a connection currently being used between MaxScale and the database server fails a replacement will be automatically created to another server by MaxScale without client intervention + backend database | A term used to refer to a database that sits behind MaxScale and is accessed by applications via MaxScale. + filter | A module that can be placed between the client and the MaxScale router module. All client data passes through the filter module and may be examined or modified by the filter modules. Filters may be chained together to form processing pipelines. -# Configuration +## Configuration The MaxScale configuration is read from a file which can be located in a number of placing, MaxScale will search for the configuration file in a number of locations. @@ -38,11 +37,11 @@ An explicit path to a configuration file can be passed by using the `-f` option The configuration file itself is based on the ".ini" file format and consists of various sections that are used to build the configuration, these sections define services, servers, listeners, monitors and global settings. -## Global Settings +### Global Settings The global settings, in a section named `[MaxScale]`, allow various parameters that affect MaxScale as a whole to be tuned. Currently the only setting that is supported is the number of threads to use to handle the network traffic. MaxScale will also accept the section name of `[gateway]` for global settings. This is for backward compatibility with versions prior to the naming of MaxScale. -### `threads` +#### `threads` To control the number of threads that poll for network traffic set the parameter threads to a number. It is recommended that you start with a single thread and add more as you find the performance is not satisfactory. MaxScale is implemented to be very thread efficient, so a small number of threads is usually adequate to support reasonably heavy workloads. Adding more threads may not improve performance and can consume resources needlessly. @@ -56,7 +55,17 @@ threads=1 It should be noted that additional threads will be created to execute other internal services within MaxScale. This setting is used to configure the number of threads that will be used to manage the user connections. -### `log_messages` +#### `ms_timestamp` + +Enable or disable the high precision timestamps in logfiles. Enabling this adds millisecond precision to all logfile timestamps. + +``` +# Valid options are: +# ms_timestamp=<0|1> +ms_timestamp=1 +``` + +#### `log_messages` Enable or disable logging of status messages. This logfile is enabled by default and contains information about the modules MaxScale is using and details about the configuration. @@ -68,7 +77,7 @@ log_messages=1 To disable the log use the value 0 and to enable it use the value 1. -### `log_trace` +#### `log_trace` Enable or disable logging of tracing messages. This logfile is disabled by default due to the verbose nature of it. It contains information about the internal logic of MaxScale and the modules it is using. The trace log can be used to find out the reasons why some actions were done e.g routing a query to a master instead of a slave. @@ -80,7 +89,7 @@ log_trace=1 To disable the log use the value 0 and to enable it use the value 1. -### `log_debug` +#### `log_debug` Enable or disable logging of debugging messages. This logfile is disabled by default since it contains information only useful to the developers. @@ -92,7 +101,7 @@ log_debug=1 To disable the log use the value 0 and to enable it use the value 1. -## Service +### Service A service represents the database service that MaxScale offers to the clients. In general a service consists of a set of backend database servers and a routing algorithm that determines how MaxScale decides to send statements or route connections to those backend servers. @@ -109,7 +118,7 @@ type=service In order for MaxScale to forward any requests it must have at least one service defined within the configuration file. The definition of a service alone is not enough to allow MaxScale to forward requests however, the service is merely present to link together the other configuration elements. -### `router` +#### `router` The router parameter of a service defines the name of the router module that will be used to implement the routing algorithm between the client of MaxScale and the backend databases. Additionally routers may also be passed a comma separated list of options that are used to control the behaviour of the routing algorithm. The two parameters that control the routing choice are router and router_options. The router options are specific to a particular router and are used to modify the behaviour of the router. The read connection router can be passed options of master, slave or synced, an example of configuring a service to use this router and limiting the choice of servers to those in slave state would be as follows. @@ -125,9 +134,9 @@ router=readconnroute router_options=master,slave ``` -A more complete description of router options and what is available for a given router is included with the documentation of the router itself. +A more complete description of router options and what is available for a given router is included with the documentation of the router itself. -### `filters` +#### `filters` The filters option allow a set of filters to be defined for a service; requests from the client are passed through these filters before being sent to the router for dispatch to the backend server. The filters parameter takes one or more filter names, as defined within the filter definition section of the configuration file. Multiple filters are separated using the | character. @@ -137,7 +146,7 @@ filters=counter | QLA The requests pass through the filters from left to right in the order defined in the configuration parameter. -### `servers` +#### `servers` The servers parameter in a service definition provides a comma separated list of the backend servers that comprise the service. The server names are those used in the name section of a block with a type parameter of server (see below). @@ -145,7 +154,7 @@ The servers parameter in a service definition provides a comma separated list of servers=server1,server2,server3 ``` -### `user` +#### `user` The user parameter, along with the passwd parameter are used to define the credentials used to connect to the backend servers to extract the list of database users from the backend database that is used for the client authentication. @@ -182,7 +191,7 @@ MariaDB [(none)]> GRANT SHOW DATABASES ON *.* TO 'username'@'maxscalehost'; Query OK, 0 rows affected (0.00 sec) ``` -### `passwd` +#### `passwd` The passwd parameter provides the password information for the above user and may be either a plain text password or it may be an encrypted password. See the section on encrypting passwords for use in the MaxScale.cnf file. This user must be capable of connecting to the backend database and executing these SQL statements to load database names and grants from the backends: @@ -191,7 +200,7 @@ The passwd parameter provides the password information for the above user and ma * `SELECT * FROM INFORMATION_SCHEMA.SCHEMATA` * `SELECT GRANTEE,PRIVILEGE_TYPE FROM INFORMATION_SCHEMA.USER_PRIVILEGES` -### `enable_root_user` +#### `enable_root_user` This parameter controls the ability of the root user to connect to MaxScale and hence onwards to the backend servers via MaxScale. @@ -209,15 +218,15 @@ Values of `on` or `true` may also be given to enable the root user and `off` or enable_root_user=true ``` -### `localhost_match_wildcard_host` +#### `localhost_match_wildcard_host` This parameter enables matching of "127.0.0.1" (localhost) against "%" wildcard host for MySQL protocol authentication. The default value is `0`, so in order to authenticate a connection from the same machine as the one on which MaxScale is running, an explicit user@lcoalhost entry will be required in the MySQL user table. -### `version_string` +#### `version_string` This parameter sets a custom version string that is sent in the MySQL Handshake from MaxScale to clients. -Example: +Example: ``` version_string=5.5.37-MariaDB-RWsplit @@ -225,7 +234,7 @@ version_string=5.5.37-MariaDB-RWsplit If not set, the default value is the server version of the embedded MySQL/MariaDB library. Example: 5.5.35-MariaDB -### `weightby` +#### `weightby` The weightby parameter is used in conjunction with server parameters in order to control the load balancing applied in the router in use by the service. This allows varying weights to be applied to each server to create a non-uniform distribution of the load amongst the servers. @@ -237,15 +246,35 @@ serversize=10 The service would then have the parameter weightby set. If there are 4 servers defined in the service, serverA, serverB, serverC and serverD, with the serversize set as shown in the table below, the connections would balanced using the percentages in this table. - Server |serversize|% connections + Server |serversize|% connections ---------|----------|------------- -serverA | 10 | 18% -serverB | 15 | 27% -serverC | 10 | 18% +serverA | 10 | 18% +serverB | 15 | 27% +serverC | 10 | 18% serverD | 20 | 36% +#### `auth_all_servers` -## Server +This parameter controls whether only a single server or all of the servers are used when loading the users from the backend servers. This takes a boolean value and when enabled, creates a union of all the users and grants on all the servers. + +#### `strip_db_esc` + +The strip_db_esc parameter strips escape characters from database names of grants when loading the users from the backend server. Some visual database management tools automatically escape some characters and this might cause conflicts when MaxScale tries to authenticate users. + +This parameter takes a boolean value and when enabled, will strip all `\` characters from the database names. + +#### `connection_timeout` + +The connection_timeout parameter is used to disconnect sessions to MaxScale that have been idle for too long. The session timeouts are disabled by default. To enable them, define the timeout in seconds in the service's configuration section. + +Example: + +``` +[Test Service] +connection_timeout=300 +``` + +### Server Server sections are used to define the backend database servers that can be formed into a service. A server may be a member of one or more services within MaxScale. Servers are identified by a server name which is the section name in the configuration file. Servers have a type parameter of server, plus address port and protocol parameters. @@ -257,19 +286,19 @@ port=3000 protocol=MySQLBackend ``` -### `address` +#### `address` The IP address or hostname of the machine running the database server that is being defined. MaxScale will use this address to connect to the backend database server. -### `port` +#### `port` The port on which the database listens for incoming connections. MaxScale will use this port to connect to the database server. -### `protocol` +#### `protocol` The name for the protocol module to use to connect MaxScale to the database. Currently only one backend protocol is supported, the MySQLBackend module. -### `monitoruser` +#### `monitoruser` The monitor has a username and password that is used to connect to all servers for monitoring purposes, this may be overridden by supplying a monitoruser statement for each individual server @@ -277,7 +306,7 @@ The monitor has a username and password that is used to connect to all servers f monitoruser=mymonitoruser ``` -### `monitorpw` +#### `monitorpw` The monitor has a username and password that is used to connect to all servers for monitoring purposes, this may be overridden by supplying a monpasswd statement for the individual servers @@ -285,45 +314,45 @@ The monitor has a username and password that is used to connect to all servers f monitorpw=mymonitorpasswd ``` -The monpasswd parameter may be either a plain text password or it may be an encrypted password. See the section on encrypting passwords for use in the MaxScale.cnf file. +The monpasswd parameter may be either a plain text password or it may be an encrypted password. See the section on encrypting passwords for use in the MaxScale.cnf file. -## Listener +### Listener The listener defines a port and protocol pair that is used to listen for connections to a service. A service may have multiple listeners associated with it, either to support multiple protocols or multiple ports. As with other elements of the configuration the section name is the listener name and it can be selected freely. A type parameter is used to identify the section as a listener definition. Address is optional and it allows the user to limit connections to certain interface only. Socket is also optional and used for Unix socket connections. ``` [] type=listener -service=] -protocol=[MySQLClient|HTTPD] -address=[IP|hostname] -port= +service=] +protocol=[MySQLClient|HTTPD] +address=[IP|hostname] +port= socket= ``` -### `service` +#### `service` The service to which the listener is associated. This is the name of a service that is defined elsewhere in the configuration file. -### `protocol` +#### `protocol` The name of the protocol module that is used for the communication between the client and MaxScale itself. -### `address` +#### `address` The address option sets the address that will be used to bind the listening socket. The address may be specified as an IP address in 'dot notation' or as a hostname. If the address option is not included in the listener definition the listener will bind to all network interfaces. -### `port` +#### `port` The port to use to listen for incoming connections to MaxScale from the clients. If the port is omitted from the configuration a default port for the protocol will be used. -### `socket` +#### `socket` The `socket` option may be included in a listener definition, this configures the listener to use Unix domain sockets to listen for incoming connections. The parameter value given is the name of the socket to use. If a socket option and an address option is given then the listener will listen on both the specific IP address and the Unix socket. -## Filter +### Filter Filters provide a means to manipulate or process requests as they pass through MaxScale between the client side protocol and the query router. A filter should be defined in a section with a type of filter. @@ -347,19 +376,19 @@ passwd=6628C50E07CCE1F0392EDEEB9D1203F3 filters=QLA ``` -![image alt text](images/image_10.png) +![image alt text](images/image_10.png) See the Services section for more details on how to configure the various options of a service. Note that some filters require parsing of the statement which makes them compatible with statement-based routers only, such as Read/Write Split router. -### `module` +#### `module` The module parameter defines the name of the loadable module that implements the filter. -### `options` +#### `options` The options parameter is used to pass options to the filter to control the actions the filter will perform. The values that can be passed differ between filter implementation, the inclusion of an options parameter is optional. -### Other Parameters +#### Other Parameters Any other parameters present in the filters section will be passed to the filter to be interpreted by the filter. An example of this is the regexfilter that requires the two parameters `match` and `replace`: @@ -371,7 +400,7 @@ match=form replace=from ``` -## Monitor +### Monitor In order for the various router modules to function correctly they require information about the state of the servers that are part of the service they provide. MaxScale has the ability to internally monitor the state of the back-end database servers or that state may be feed into MaxScale from external monitoring systems. If automated monitoring and failover of services is required this is achieved by running a monitor module that is designed for the particular database architecture that is in use. @@ -408,29 +437,29 @@ backend_write_timeout=2 disable_master_failback=0 ``` -### `module` +#### `module` -The module parameter defines the name of the loadable module that implements the monitor. This module is loaded and executed on a separate thread within MaxScale. +The module parameter defines the name of the loadable module that implements the monitor. This module is loaded and executed on a separate thread within MaxScale. -### `servers` +#### `servers` The servers parameter is a comma separated list of server names to monitor, these are the names defined elsewhere in the configuration file. The set of servers monitored by a single monitor need not be the same as the set of servers used within any particular server, a single monitor instance may monitor servers in multiple servers. -### `user` +#### `user` The user parameter defines the username that the monitor will use to connect to the monitored databases. Depending on the monitoring module used this user will require specific privileges in order to determine the state of the nodes, details of those privileges can be found in the sections on each of the monitor modules. Individual servers may define override values for the user and password the monitor uses by setting the monuser and monpasswd parameters in the server section. -### `passwd` +#### `passwd` -The password parameter may be either a plain text password or it may be an encrypted password. See the section on encrypting passwords for use in the `MaxScale.cnf` file. +The password parameter may be either a plain text password or it may be an encrypted password. See the section on encrypting passwords for use in the `MaxScale.cnf` file. -### `monitor_interval` +#### `monitor_interval` The monitor_interval parameter sets the sampling interval in milliseconds for each monitor, the default value is 10000 milliseconds. -### `detect_replication_lag` +#### `detect_replication_lag` This options if set to 1 will allow MySQL monitor to collect the replication lag among all configured slaves by checking the content of `maxscale_schema.replication_heartbeat` table. The master server writes in and slaves fetch a UNIX timestamp from that there. @@ -444,7 +473,7 @@ A specific grant for the monitor user might be required in order to create schem This monitor option is not enabled by default. -### `detect_stale_master` +#### `detect_stale_master` This options if set to 1 will allow MySQL monitor to select the previous selected Master for next operations even if no slaves at all are found by the monitor polling. @@ -458,7 +487,7 @@ If MaxScale or monitor is restarted and the Replication is still not configured This option is not enabled by default and should be used at the administrator risk. -### `disable_master_failback` +#### `disable_master_failback` This option if set to 1 will allow Galera monitor to keep the existing selected master even if another node, after joining back the cluster may be selected as candidate master. @@ -468,65 +497,65 @@ By default, if a node takes a lower index than the current master one the monito The server status field may have the `SERVER_MASTER_STICKINESS` bit, meaning the current master selection is not based on the available rules but it's the one previously selected and then kept, accordingly to option value equal 1. -Anyway, a new master will be selected in case of current master failure, regardless the option value. +Anyway, a new master will be selected in case of current master failure, regardless the option value. -### `backend_connect_timeout` +#### `backend_connect_timeout` This option, with default value of `3` sets the monitor connect timeout to backends. -### `backend_read_timeout` +#### `backend_read_timeout` Default value is `1`. Read Timeout is the timeout in seconds for each attempt to read from the server. There are retries if necessary, so the total effective timeout value is three times the option value. That's for `mysql_real_connect` C API. -### `backend_write_timeout` +#### `backend_write_timeout` Default value is `2`. Write Timeout is the timeout in seconds for each attempt to write to the server. There is a retry if necessary, so the total effective timeout value is two times the option value. That's for `mysql_real_connect` C API. -# Protocol Modules +## Protocol Modules -The protocols supported by MaxScale are implemented as external modules that are loaded dynamically into the MaxScale core. These modules reside in the directory `$MAXSCALE_HOME/modules`, if the environment variable `$MAXSCALE_HOME` is not set it defaults to `/usr/local/skysql/MaxScale`. It may also be set by passing the `-c` option on the MaxScale command line. +The protocols supported by MaxScale are implemented as external modules that are loaded dynamically into the MaxScale core. These modules reside in the directory `$MAXSCALE_HOME/modules`, if the environment variable `$MAXSCALE_HOME` is not set it defaults to `/usr/local/mariadb-maxscale`. It may also be set by passing the `-c` option on the MaxScale command line. -## MySQLClient +### MySQLClient This is the implementation of the MySQL protocol that is used by clients of MaxScale to connect to MaxScale. -## MySQLBackend +### MySQLBackend The MySQLBackend protocol module is the implementation of the protocol that MaxScale uses to connect to the backend MySQL, MariaDB and Percona Server databases. This implementation is tailored for the MaxScale to MySQL Database traffic and is not a general purpose implementation of the MySQL protocol. -## telnetd +### telnetd The telnetd protocol module is used for connections to MaxScale itself for the purposes of creating interactive user sessions with the MaxScale instance itself. Currently this is used in conjunction with a special router implementation, the debugcli. -## maxscaled +### maxscaled The protocol used used by the maxadmin client application in order to connect to MaxScale and access the command line interface. -## HTTPD +### HTTPD This protocol module is currently still under development, it provides a means to create HTTP connections to MaxScale for use by web browsers or RESTful API clients. -# Router Modules +## Router Modules The main task of MaxScale is to accept database connections from client applications and route the connections or the statements sent over those connections to the various services supported by MaxScale. There are two flavours of routing that MaxScale can perform, connection based routing and statement based routine. These each have their own characteristics and costs associated with them. -## Connection Based Routing +### Connection Based Routing Connection based routing is a mechanism by which MaxScale will, for each incoming connection decide on an appropriate outbound server and will forward all statements to that server without examining the internals of the statement. Once an inbound connection is associated to a particular backend database it will remain connected to that server until the connection is closed or the server fails. The Read Connection Router is an example of connection-based routing. -## Statement Based Routing +### Statement Based Routing -Statement based routing is somewhat different, the routing modules examine every statement the client sends and determines, on a per statement basis, which of the set of backend servers in the service is best to execute the statement. This gives better dynamic balancing of the load within the cluster but comes at a cost. The query router must understand the statement that is being routed and may have to parse the statement in order to achieve this. +Statement based routing is somewhat different, the routing modules examine every statement the client sends and determines, on a per statement basis, which of the set of backend servers in the service is best to execute the statement. This gives better dynamic balancing of the load within the cluster but comes at a cost. The query router must understand the statement that is being routed and may have to parse the statement in order to achieve this. Parsing within the router adds overhead to the cost of routing and makes this type of router best suitable for loads in which the gains outweigh this added cost. The added cost from statement parsing also gives the possibility to create and use new type of filters which are based on statement processing. In contrast to the added processing cost, statement-based routing may increase the performance of the cluster by offloading statements away from the master when possible. -## Available Routing Modules +### Available Routing Modules Currently a small number of query routers are available, these are in different stages of completion and offer different facilities. -### Readconnroute +#### Readconnroute This is a connection based query router that was originally targeted at environments in which the clients already performed splitting of read and write queries into separate connections. @@ -534,7 +563,7 @@ Whenever a new connection is received the router will examine the state of all t The read connection router can be configured to balance the connections from the clients across all the backend servers that are running, just those backend servers that are currently replication slaves or those that are replication masters when routing to a master slave replication environment. When a Galera cluster environment is in use the servers can be filtered to just the set that are part of the cluster and in the _Synced_ state. These options are configurable via the router_options that can be set within a service. The `router_option` values supported are `master`, `slave` and `synced`. -#### Master/Slave Replication Setup +##### Master/Slave Replication Setup To set up MaxScale to route connections evenly between all the current slave servers in a replication cluster, a service entry of the form shown below is required: @@ -586,7 +615,7 @@ Connections to port 4007 would automatically be directed to the server that is t In order for MaxScale to be able to determine the state of these servers the **mysqlmon** monitor module should be run against the set of servers that comprise the service. -#### Galera Cluster Configuration for Read Connection router +##### Galera Cluster Configuration for Read Connection router Although not primarily designed for a multi-master replication setup, it is possible to use **readconnroute** in this situation. The **readconnroute** connection router can be used to balance the connections across a Galera cluster. A special monitor is available that detects if nodes are joined to a Galera Cluster, with the addition of a router option to only route connections to nodes marked as synced. MaxScale can ensure that users are never connected to a node that is not a full cluster member. @@ -629,7 +658,7 @@ router=readconnroute router_options=slave ``` -#### MySQL Cluster Configuration for Read Connection router +##### MySQL Cluster Configuration for Read Connection router The **readconnroute** connection router can be used to balance the connections across a MySQL cluster SQL nodes. A special monitor is available that detects if SQL nodes are connected to data nodes, with the addition of a router option to only route connections to nodes marked as NDB. MaxScale can ensure that users are never connected to a node that is not a full cluster member. @@ -642,7 +671,7 @@ servers=server1,server2 user=monitor passwd=monitor -[MySQL Cluster Service] +[MySQL Cluster Service] type=service router=readconnroute router_options=ndb @@ -657,11 +686,11 @@ port=4906 The `ndb` router option simply means: access all SQL nodes marked with NDB status, i.e. they are members of the cluster. -### Read/Write Split Router +#### Read/Write Split Router -The Read/Write Split Router is implemented in readwritesplit module. It is a statement-based router that has been designed for use within Master/Slave replication environments. It examines and optionally parses every statement to find out whether the statement can be routed to slave instead of master. +The Read/Write Split Router is implemented in readwritesplit module. It is a statement-based router that has been designed for use within Master/Slave replication environments. It examines and optionally parses every statement to find out whether the statement can be routed to slave instead of master. -#### Starting a readwritesplit router session +##### Starting a readwritesplit router session When client connects to readwritesplit service for the first time, client is authenticated against user data loaded from backend database. After successful authentication connection for client queries is created and followed by that, a readwritesplit router session is initialized. @@ -669,7 +698,7 @@ Router session processes its specific configuration parameters and establishes c ![image alt text](images/image_11.png) -#### Routing to *Master* +##### Routing to *Master* Routing to master is important for data consistency and because majority of writes are written to binlog and thus become replicated to slaves. @@ -685,7 +714,7 @@ The following operations are routed to master: In addition to these, if the **readwritesplit** service is configured with the `max_slave_replication_lag` parameter, and if all slaves suffer from too much replication lag, then statements will be routed to the _Master_. (There might be other similar configuration parameters in the future which limit the number of statements that will be routed to slaves.) -#### Routing to *Slave*s +##### Routing to *Slave*s The ability to route some statements to *Slave*s is important because it also decreases the load targeted to master. Moreover, it is possible to have multiple slaves to share the load in contrast to single master. @@ -696,29 +725,29 @@ Queries which can be routed to slaves must be auto committed and belong to one o * `SHOW` statements, and * system function calls. -#### Routing to every session backend +##### Routing to every session backend A third class of statements includes those which modify session data, such as session system variables, user-defined variables, the default database, etc. We call them session commands, and they must be replicated as they affect the future results of read and write operations, so they must be executed on all servers that could execute statements on behalf of this client. Session commands include for example: -* `SET` statements +* `SET` statements * `USE `*``* * system/user-defined variable assignments embedded in read-only statements, such as `SELECT (@myvar := 5)` -* `PREPARE` statements +* `PREPARE` statements * `QUIT`, `PING`, `STMT RESET`, `CHANGE USER`, etc. commands **NOTE: if variable assignment is embedded in a write statement it is routed to _Master_ only. For example, `INSERT INTO t1 values(@myvar:=5, 7)` would be routed to _Master_ only.** The router stores all of the executed session commands so that in case of a slave failure, a replacement slave can be chosen and the session command history can be repeated on that new slave. This means that the router stores each executed session command for the duration of the session. Applications that use long-running sessions might cause MaxScale to consume a growing amount of memory unless the sessions are closed. This can be solved by setting a connection timeout on the application side. -#### Configuring the Read/Write Split router +##### Configuring the Read/Write Split router -Read/Write Split router-specific settings are specified in the configuration file of MaxScale in its specific section. The section can be freely named but the name is used later as a reference from listener section. +Read/Write Split router-specific settings are specified in the configuration file of MaxScale in its specific section. The section can be freely named but the name is used later as a reference from listener section. The configuration consists of mandatory and optional parameters. -##### Mandatory parameters +###### Mandatory parameters `type` specifies the type of service. For **readwritesplit** module the type is `router`: @@ -743,7 +772,7 @@ user= passwd= ``` -##### Optional parameters +###### Optional parameters `max_slave_connections` sets the maximum number of slaves a router session uses at any moment. Default value is `1`. @@ -763,20 +792,26 @@ Please note max_slave_replication_lag must be greater than monitor interval. where ** is one of the following: * `LEAST_GLOBAL_CONNECTIONS`, the slave with least connections in total -* `LEAST_ROUTER_CONNECTIONS`, the slave with least connections from this router -* `LEAST_BEHIND_MASTER`, the slave with smallest replication lag -* `LEAST_CURRENT_OPERATIONS` (default), the slave with least active operations +* `LEAST_ROUTER_CONNECTIONS`, the slave with least connections from this router +* `LEAST_BEHIND_MASTER`, the slave with smallest replication lag +* `LEAST_CURRENT_OPERATIONS` (default), the slave with least active operations `use_sql_variables_in` specifies where should queries, which read session variable, be routed. The syntax for `use_sql_variable_in` is: use_sql_variables_in=[master|all] -When value all is used, queries reading session variables can be routed to any available slave (depending on selection criteria). Note, that queries modifying session variables are routed to all backend servers by default, excluding write queries with embedded session variable modifications, such as: +When value all is used, queries reading session variables can be routed to any available slave (depending on selection criteria). Note, that queries modifying session variables are routed to all backend servers by default, excluding write queries with embedded session variable modifications, such as: INSERT INTO test.t1 VALUES (@myid:=@myid+1) In above-mentioned case the user-defined variable would only be updated in the master where query would be routed due to `INSERT` statement. +`max_sescmd_history` sets a limit on how many session commands each session can execute before the connection is closed. The default is an unlimited number of session commands. + + max_sescmd_history=1500 + +When a limitation is set, it effectively creates a cap on the session's memory consumption. This might be useful if connection pooling is used and the sessions use large amounts of session commands. + An example of Read/Write Split router configuration : ``` @@ -806,9 +841,9 @@ port=4044 The client would merely connect to port 4044 on the MaxScale host and statements would be directed to the master, slave or all backends as appropriate. Determination of the master or slave status may be done via a monitor module within MaxScale or externally. In this latter case the server flags would need to be set via the MaxScale debug interface, in future versions an API will be available for this purpose. -#### Galera Cluster Configuration for Read/Write Split router +##### Galera Cluster Configuration for Read/Write Split router + - Galera monitor assigns Master and Slave roles to appropriate sync'ed Galera nodes. Using **readwritesplit** with Galera is seamless; the only change needed to the configuration above is replacing the list of MySQL replication servers with list of Galera nodes. With the same example as above: Simply configure a RWSplit Service with Galera nodes: @@ -824,11 +859,11 @@ passwd=mypass filters=qla|fetch|from ``` -### CLI +#### CLI The command line interface as used by `maxadmin`. This is a variant of the debugcli that is built slightly differently so that it may be accessed by the client application `maxadmin`. The CLI requires the use of the `maxscaled` protocol. -#### CLI Configuration +##### CLI Configuration There are two components to the definition required in order to run the command line interface to use with MaxAdmin; a service and a listener. @@ -849,11 +884,11 @@ port=6603 Note that this uses the default port of 6603 and confines the connections to localhost connections only. Remove the address= entry to allow connections from any machine on your network. Changing the port from 6603 will mean that you must allows pass a -p option to the MaxAdmin command. -### Debug CLI +#### Debug CLI The **debugcli** router is a special kind of statement based router. Rather than direct the statements at an external data source they are handled internally. These statements are simple text commands and the results are the output of debug commands within MaxScale. The service and listener definitions for a debug cli service only differ from other services in that they require no backend server definitions. -#### Debug CLI Configuration +##### Debug CLI Configuration The definition of the debug cli service is illustrated below @@ -869,7 +904,7 @@ protocol=telnetd port=4442 ``` -Connections using the telnet protocol to port 4442 of the MaxScale host will result in a new debug CLI session. A default username and password are used for this module, new users may be created using the add user command. As soon as any users are explicitly created the default username will no longer continue to work. The default username is admin with a password of skysql. +Connections using the telnet protocol to port 4442 of the MaxScale host will result in a new debug CLI session. A default username and password are used for this module, new users may be created using the add user command. As soon as any users are explicitly created the default username will no longer continue to work. The default username is admin with a password of mariadb. The debugcli supports two modes of operation, `developer` and `user`. The mode is set via the `router_options` parameter. The user mode is more suited to end-users and administrators, whilst the develop mode is explicitly targeted to software developing adding or maintaining the MaxScale code base. Details of the differences between the modes can be found in the debugging guide for MaxScale. The default is `user` mode. The following service definition would enable a developer version of the debugcli. @@ -905,7 +940,7 @@ protocol=telnetd port=4242 ``` -# Monitor Modules +## Monitor Modules Monitor modules are used by MaxScale to internally monitor the state of the backend databases in order to set the server flags for each of those servers. The router modules then use these flags to determine if the particular server is a suitable destination for routing connections for particular query classifications. The monitors are run within separate threads of MaxScale and do not affect the MaxScale performance. @@ -920,7 +955,7 @@ Parameters that apply to all monitors are: Other parameters are monitor specific. -## mysqlmon +### mysqlmon The MySQLMon monitor is a simple monitor designed for use with MySQL Master/Slave replication cluster. To execute the mysqlmon monitor an entry as shown below should be added to the MaxScale configuration file. @@ -958,7 +993,7 @@ Another option (`detect_stale_master=1`) may also allow to set a Stale Master wh Please note, those two options are not enabled by default. -## galeramon +### galeramon The Galeramon monitor is a simple router designed for use with MySQL Galera cluster. To execute the galeramon monitor an entry as shown below should be added to the MaxScale configuration file. @@ -1009,7 +1044,7 @@ Server 0x2d1b3c0 (server4) Node Id: 1 ``` -## ndbclustermon +### ndbclustermon The NDB Cluster Monitor (ndbclustermon) is a simple router designed for use with MySQL Cluster. To execute the ndclustermon monitor an entry as shown below should be added to the MaxScale configuration file. @@ -1063,7 +1098,7 @@ mysql> SHOW STATUS LIKE 'Ndb_cluster_node_id'; The value is stored in `node_id` server field. -# Filter Modules +## Filter Modules Currently four example filters are included in the MaxScale distribution @@ -1097,7 +1132,7 @@ Currently four example filters are included in the MaxScale distribution These filters are merely examples of what may be achieved with the filter API and are not sophisticated or consider as suitable for production use, they merely illustrate the functionality possible. -## Statement Counting Filter +### Statement Counting Filter The statement counting filter is implemented in the module names testfilter and merely keeps a count of the number of SQL statements executed. The filter requires no options to be passed and takes no parameters. The statement count can be viewed via the diagnostic and debug interface of MaxScale. @@ -1113,7 +1148,7 @@ Then add the filter to your service by including the filters= parameter in the s filters=counter -## Query Log All (QLA) Filter +### Query Log All (QLA) Filter The Query Log All Filter (qlafilter) simply writes all SQL statements to a log file along with a timestamp for the statement. An example of the file produced by the QLA filter is shown below @@ -1141,7 +1176,7 @@ Then add the filters= parameter into the service that you wish to log by adding A log file will be created for each client connection, the name of that log file will be `/tmp/QueryLog.`*``* -## Regular Expression Filter +### Regular Expression Filter The regular expression filter is a simple text based query rewriting filter. It allows a regular expression to be used to match text in a SQL query and then a string replacement to be made against that match. The filter is implemented by the regexfilter loadable module and is passed two parameters, a match string and a replacement string. @@ -1179,9 +1214,9 @@ would be replaced with before being sent to the server. Note that the text in the match string is case-insensitive. -## Tee Filter +### Tee Filter -The **tee** filter is a filter module for MaxScale that acts as a "plumbing" fitting in the MaxScale filter toolkit. It can be used in a filter pipeline of a service to make a copy of requests from the client and dispatch a copy of the request to another service within MaxScale. +The **tee** filter is a filter module for MaxScale that acts as a "plumbing" fitting in the MaxScale filter toolkit. It can be used in a filter pipeline of a service to make a copy of requests from the client and dispatch a copy of the request to another service within MaxScale. The configuration block for the **tee** filter requires the minimal filter parameters in its section within the `MaxScale.cnf` file that defines the filter to load and the service to send the duplicates to. @@ -1194,7 +1229,7 @@ service=Archive In addition parameters may be added to define patterns to match against to either include or exclude particular SQL statements to be duplicated. You may also define that the filter is only active for connections from a particular source or when a particular user is connected. -## Top Filter +### Top Filter The top filter is a filter module for MaxScale that monitors every SQL statement that passes through the filter. It measures the duration of that statement, the time between the statement being sent and the first result being returned. The top N times are kept, along with the SQL text itself and a list sorted on the execution times of the query is written to a file upon closure of the client session. @@ -1210,7 +1245,7 @@ count=10 In addition parameters may be added to define patterns to match against to either include or exclude particular SQL statements to be duplicated. You may also define that the filter is only active for connections from a particular source or when a particular user is connected. -# Encrypting Passwords +## Encrypting Passwords Passwords stored in the MaxScale.cnf file may optionally be encrypted for added security. This is done by creation of an encryption key on installation of MaxScale. Encryption keys may be created manually by executing the maxkeys utility with the argument of the filename to store the key. @@ -1218,7 +1253,7 @@ Passwords stored in the MaxScale.cnf file may optionally be encrypted for added Changing the encryption key for MaxScale will invalidate any currently encrypted keys stored in the MaxScale.cnf file. -## Creating Encrypted Passwords +### Creating Encrypted Passwords Encrypted passwords are created by executing the maxpasswd command with the password you require to encrypt as an argument. The environment variable `MAXSCALE_HOME` must be set, or MaxScale must be installed in the default location before maxpasswd can be executed. @@ -1236,13 +1271,13 @@ user=maxscale password=61DD955512C39A4A8BC4BB1E5F116705 ``` -# Reloading Configuration +## Reloading Configuration The current MaxScale configuration may be updated by editing the configuration file and then forcing MaxScale to reread the configuration file. To force MaxScale to reread the configuration file, send a SIGHUP signal to the MaxScale process or execute `reload config` in the `maxadmin` client. Some changes in configuration can not be dynamically applied and require a complete restart of MaxScale, whilst others will take some time to be applied. -## Limitations +### Limitations Services that are removed via the configuration update mechanism can not be physically removed from MaxScale until there are no longer any connections using the service. @@ -1250,7 +1285,7 @@ When the number of threads is decreased the threads will not actually be termina Monitors can not be completely removed from the running MaxScale. -# Authentication +## Authentication MySQL uses username, passwords and the client host in order to authenticate a user, so a typical user would be defined as user X at host Y and would be given a password to connect. MaxScale uses exactly the same rules as MySQL when users connect to the MaxScale instance, i.e. it will check the address from which the client is connecting and treat this in exactly the same way that MySQL would. MaxScale will pull the authentication data from one of the backend servers and use this to match the incoming connections, the assumption being that all the backend servers for a particular service will share the same set of user credentials. @@ -1269,7 +1304,7 @@ Username|Password|Client Host In this case the user *X* would be able to connect to MaxScale from host a giving the password of *pass1*. In addition MaxScale would be able to create connections for this user to the backend servers using the username *X* and password *pass1*, since the MaxScale host is also defined to have password *pass1*. User *X* would not however be able to connect from host *b* since they would need to provide the password *pass2* in order to connect to MaxScale, but then MaxScale would not be able to connect to the backends as it would also use the password *pass2* for these connections. -## Wildcard Hosts +### Wildcard Hosts Hostname mapping in MaxScale works in exactly the same way as for MySQL, if the wildcard is used for the host then any host other than the localhost (127.0.0.1) will match. It is important to consider that the localhost check will be performed at the MaxScale level and at the MySQL server level. @@ -1288,7 +1323,7 @@ MariaDB [mysql]> GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP ON employee. Query OK, 0 rows affected (0.00 sec) ``` -## Limitations +### Limitations At the time of writing the authentication mechanism within MaxScale does not support IPV6 address matching in connections rules. This is also in line with the current protocol modules that do not support IPV6. @@ -1304,11 +1339,11 @@ and short notations 192.%.% 192.168.% -# Error Reporting +## Error Reporting MaxScale is designed to be executed as a service, therefore all error reports, including configuration errors, are written to the MaxScale error log file. MaxScale will log to a set of files in the directory `$MAXSCALE_HOME/log`, the only exception to this is if the log directory is not writable, in which case a message is sent to the standard error descriptor. -## Troubleshooting +### Troubleshooting MaxScale binds on TCP ports and UNIX sockets as well. @@ -1329,4 +1364,3 @@ socket=/servers/maxscale/galera.sock TCP/IP Traffic must be permitted to 192.1681.3.33 port 4408 For Unix socket, the socket file path (example: `/servers/maxscale/galera.sock`) must be writable by the Unix user MaxScale runs as. - diff --git a/Documentation/Getting-Started/Getting-Started-With-MaxScale.md b/Documentation/Getting-Started/Getting-Started-With-MaxScale.md index 48d722ff2..f94e47bb3 100644 --- a/Documentation/Getting-Started/Getting-Started-With-MaxScale.md +++ b/Documentation/Getting-Started/Getting-Started-With-MaxScale.md @@ -18,8 +18,6 @@ The simplest way to install MaxScale is to use one of the binary packages that a * Select your operating system from the drop down box -![image alt text](images/image_0.png) - * Instructions that are specific for your operating system will then appear ![image alt text](images/image_1.png) @@ -65,9 +63,9 @@ modules it will search using a predescribed search path. The rules are: 1. Look in the current directory for the module 2. Look in $MAXSCALE_HOME/modules - 3. Look in /usr/local/skysql/maxscale/modules + 3. Look in /usr/local/mariadb-maxscale/modules -Configuration is read by default from the file \$MAXSCALE_HOME/etc/MaxScale.cnf, /etc/MaxScale.cnf. An example file is included in in the installation and can be found in the etc/ folder within the MaxScale installation. The default value of MAXSCALE_HOME can be overriden by using the -c flag on the command line. This should be immediately followed by the path to the MaxScale home directory. The -f flag can be used on the command line to set the name and the location of the configuration file. Without path expression the file is read from \$MAXSCALE_HOME/etc directory. +Configuration is read by default from the file $MAXSCALE_HOME/etc/MaxScale.cnf, /etc/MaxScale.cnf. An example file is included in in the installation and can be found in the etc/ folder within the MaxScale installation. The default value of MAXSCALE_HOME can be overriden by using the -c flag on the command line. This should be immediately followed by the path to the MaxScale home directory. The -f flag can be used on the command line to set the name and the location of the configuration file. Without path expression the file is read from \$MAXSCALE_HOME/etc directory. ## Administration Of MaxScale diff --git a/Documentation/Reference/Debug-And-Diagnostic-Support.md b/Documentation/Reference/Debug-And-Diagnostic-Support.md index 7045a42f6..8da5ec1b2 100644 --- a/Documentation/Reference/Debug-And-Diagnostic-Support.md +++ b/Documentation/Reference/Debug-And-Diagnostic-Support.md @@ -354,7 +354,7 @@ Connected to localhost. Escape character is '^]'. -Welcome the SkySQL MaxScale Debug Interface (V1.1.0). +Welcome the MariaDB MaxScale Debug Interface (V1.1.0). Type help for a list of available commands. @@ -364,7 +364,7 @@ Password: MaxScale> -As delivered MaxScale uses a default login name of admin with the password of skysql for connections to the debug interface. Users may be added to the CLI by use of the add user command. +As delivered MaxScale uses a default login name of admin with the password of mariadb for connections to the debug interface. Users may be added to the CLI by use of the add user command. This places you in the debug command line interface of MaxScale, there is a help system that will display the commands available to you @@ -1691,7 +1691,7 @@ Note, not all configuration elements can be changed dynamically currently. This ## Add user -The add user command is used to add new users to the debug CLI of MaxScale. The default behaviour of the CLI for MaxScale is to have a login name of admin and a fixed password of skysql. Adding new users will disable this default behaviour and limit the login access to the users that are added. +The add user command is used to add new users to the debug CLI of MaxScale. The default behaviour of the CLI for MaxScale is to have a login name of admin and a fixed password of mariadb. Adding new users will disable this default behaviour and limit the login access to the users that are added. **MaxScale>** add user admin july2013 @@ -1711,7 +1711,7 @@ User admin already exists. **MaxScale>**** ** -If you should forget or lose the the account details you may simply remove the passwd file in $MAXSCALE_HOME/etc and the system will revert to the default behaviour with admin/skysql as the account. +If you should forget or lose the the account details you may simply remove the passwd file in $MAXSCALE_HOME/etc and the system will revert to the default behaviour with admin/mariadb as the account. ## Enable/disable log diff --git a/Documentation/Reference/MaxAdmin.md b/Documentation/Reference/MaxAdmin.md index a6edf890c..7b8d6316c 100644 --- a/Documentation/Reference/MaxAdmin.md +++ b/Documentation/Reference/MaxAdmin.md @@ -650,7 +650,7 @@ A monitor that has been shutdown may be restarted using the restart monitor comm # Working With Administration Interface Users -A default installation of MaxScale allows connection to the administration interface using the username of admin and the password skysql. This username and password stay in effect as long as no other users have been created for the administration interface. As soon as the first user is added the use of admin/skysql as login credentials will be disabled. +A default installation of MaxScale allows connection to the administration interface using the username of admin and the password mariadb. This username and password stay in effect as long as no other users have been created for the administration interface. As soon as the first user is added the use of admin/mariadb as login credentials will be disabled. ## What Users Have Been Defined? @@ -666,7 +666,7 @@ In order to see the current users that have been defined for the administration User names: vilho, root, dba, massi, mark MaxScale> -Please note that if no users have been configured the default admin/skysql user will not be shown. +Please note that if no users have been configured the default admin/mariadb user will not be shown. MaxScale> show users Administration interface users: diff --git a/Documentation/About/MaxScale-1.0.4-Release-Notes.md b/Documentation/Release-Notes/MaxScale-1.0.4-Release-Notes.md similarity index 96% rename from Documentation/About/MaxScale-1.0.4-Release-Notes.md rename to Documentation/Release-Notes/MaxScale-1.0.4-Release-Notes.md index a34a1ce0f..db341e893 100644 --- a/Documentation/About/MaxScale-1.0.4-Release-Notes.md +++ b/Documentation/Release-Notes/MaxScale-1.0.4-Release-Notes.md @@ -136,5 +136,5 @@ Both RPM and Debian packages are available for MaxScale in addition to the tar b # MaxScale Home Default Value -The installation assumes that the default value for the environment variable MAXSCALE_HOME is set to /usr/local/skysql/maxscale. This is hard coded in the service startup file that is placed in /etc/init.d/maxscale by the installation process. +The installation assumes that the default value for the environment variable MAXSCALE_HOME is set to /usr/local/mariadb/maxscale. This is hard coded in the service startup file that is placed in /etc/init.d/maxscale by the installation process. diff --git a/Documentation/Release-Notes/MaxScale-1.0.5-Release-Notes.md b/Documentation/Release-Notes/MaxScale-1.0.5-Release-Notes.md new file mode 100644 index 000000000..4b65fd92f --- /dev/null +++ b/Documentation/Release-Notes/MaxScale-1.0.5-Release-Notes.md @@ -0,0 +1,113 @@ +MaxScale Release Notes 1.0.5 GA + +This document details the changes in version 1.0.5 since the release of the 1.0.4 GA of the MaxScale product. + +# New Features +No new features have been introduced since the GA version was released. SuSE Enterprise 11 and 12 packages are now also supplied. + +# Bug Fixes + +A number of bug fixes have been applied between the 1.0.4 initial GA release and this GA release. The table below lists the bugs that have been resolved. The details for each of these may be found in bugs.mariadb.com. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
IDSummary
519LOAD DATA LOCAL INFILE not handled?
714Error log flooded when too many connect errors causes the MaxScale host to be blocked
711Some MySQL Workbench Management actions hang with R/W split router
710make package install files in /etc/init.d
683Check for unsupported version of MariaDB
684Use mysql_config to determine include/lib directory paths and compiler options
689cmake ­DCMAKE_INSTALL_PREFIX has no effect
701set server maint fails on the command line
705Authentication fails when the user connects to a database with the SQL mode including ANSI_QUOTES
507R/W split does not send last_insert_id() to the master
700maxscale ­­version has no output
694RWSplit SELECT @a:=@a+1 as a, test.b from test breaks client session
685SELECT against readconnrouter fails when large volumes of data are returned and the tee filter is used
+ +# Known Issues + +There are a number bugs and known limitations within this version of MaxScale, the most serious of this are listed below. + +* The Read/Write Splitter is a little too strict when it receives errors from slave servers during execution of session commands. This can result in sessions being terminated in situation in which MaxScale could recover without terminating the sessions. + +* MaxScale can not manage authentication that uses wildcard matching in hostnames in the mysql.user table of the backend database. The only wildcards that can be used are in IP address entries. + +* When users have different passwords based on the host from which they connect MaxScale is unable to determine which password it should use to connect to the backend database. This results in failed connections and unusable usernames in MaxScale. + +# Packaging + +Both RPM and Debian packages are available for MaxScale in addition to the tar based releases previously distributed we now provide + +* CentOS/RedHat 5 + +* CentOS/RedHat 6 + +* CentOS/RedHat 7 + +* Debian 6 + +* Debian 7 + +* Ubuntu 12.04 LTS + +* Ubuntu 13.10 + +* Ubuntu 14.04 LTS + +* Fedora 19 + +* Fedora 20 + +* OpenSuSE 13 + +* SuSE Enterprise 11 + +* SuSE Enterprise 12 + +# MaxScale Home Default Value + +The installation assumes that the default value for the environment variable MAXSCALE_HOME is set to /usr/local/skysql/maxscale. This is hard coded in the service startup file that is placed in /etc/init.d/maxscale by the installation process. diff --git a/Documentation/Release-Notes/MaxScale-1.1-Release-Notes.md b/Documentation/Release-Notes/MaxScale-1.1-Release-Notes.md new file mode 100644 index 000000000..fed31ead7 --- /dev/null +++ b/Documentation/Release-Notes/MaxScale-1.1-Release-Notes.md @@ -0,0 +1,220 @@ +# MaxScale Release Notes + +## 1.1 RC + +This document details the changes in version 1.1 since the release of the 1.0.5 GA Release of the MaxScale product. + +## New Features + +### High Performance Binlog Relay +Replicate Binlog from the master to slave through MaxScale as simplified relay server for reduced network load and disaster recovery + +### Database Firewall Filter +Block queries based on columns in the query, where condition, query type(select, insert, delete, update), presence of wildcard, regular expression match and time of the query + +### Schema Sharding Router +Route to databases sharded by schema without application level knowledge of shard configuration + +### Hint based routing +Pass hints in the SQL statement to influence the routing decision based on replication lag or time out + +### Named Server Routing +Routing to a named server if incoming query matches a regular expression + +### Canonical Query logging +Convert incoming queries to canonical form and push the query and response into RabbitMQ Broker- for a RabbitMQ Client to later retrieve from + +### Nagios Plugin +Plugin scripts for monitoring MaxScale status and performance from a Nagios Server + +### Notification Service +Receive notification of security update and patches tailored to your MaxScale configuration + +### MySQL NDB cluster support +Connection based routing to MySQL NDB clusters + +## Bug Fixes + +A number of bug fixes have been applied between the 1.0.5 GA and this RC release. The table below lists the bugs that have been resolved. The details for each of these may be found in https://mariadb.atlassian.net/projects/MXS or in the former http://bugs.mariadb.com Bug database + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
IDSummary
MXS-47Session freeze when small tail packet
MXS-5Possible memory leak in readwritesplit router
736Memory leak while doing read/write splitting
733Init-script deletes bin/maxscale
732Build is broken: CentOS/RHEL 5 and SLES 11
730Regex filter and shorter than original replacement queries MaxScale
729PDO prepared statements bug introduced in Maxscale 1.0.5
721Documentation suggests SIGTERM to re-read config file
716$this->getReadConnection()->query('SET @id = 0;');
709"COPYRIGHT LICENSE README SETUP" files go to /usr/local/mariadb-maxscale/ after 'make package'
704"make testall" returns success status (exit code 0) even on failures
698Using invalid parameter in many maxadmin commands causes MaxScale to fail
693Freeing tee filter's orphaned sessions causes a segfault when embedded server closes
690CPU/architecture is hardcoded into debian/rules
686TestService fails because of the modules used in it aren't meant for actual use
677Race condition in tee filter clientReply
676"Write to backend failed. Session closed." when changing default database via readwritesplit with max_slave_connections != 100%
673MaxScale crashes if "Users table data" is empty and "show dbusers" is executed in maxadmin
670Tee filter: statement router loses statements when other router gets enough ahead
665Core: accessing freed memory when session is closed
659MaxScale doesn't shutdown if none of the configured services start
648use database is sent forever with tee filter to a readwrite split service
620enable_root_user=true generates errors to error log
612Service was started although no users could be loaded from database
600RWSplit: if session command fails in some backend, it is not dropped from routing session
587Hint filter don't work if listed before regex filter in configuration file
579serviceStartProtocol test crashes
506Don't write to shm/tmpfs by default without telling and without a way to override it
503TOC in the bundled PDFs doesn't link to actual sections
457Please provide a list of build dependencies for building MaxScale
361file_exists() *modifies* the file it checks for???
338Log manager spread down feature is disabled
159Memory leak. Dbusers are loaded into memory but not unloaded
+ + +## Known Issues + +There are a number bugs and known limitations within this version of MaxScale, the most serious of this are listed below. + +* The Read/Write Splitter is a little too strict when it receives errors from slave servers during execution of session commands. This can result in sessions being terminated in situation in which MaxScale could recover without terminating the sessions. + +* MaxScale can not manage authentication that uses wildcard matching in hostnames in the mysql.user table of the backend database. The only wildcards that can be used are in IP address entries. + +* When users have different passwords based on the host from which they connect MaxScale is unable to determine which password it should use to connect to the backend database. This results in failed connections and unusable usernames in MaxScale. + +* Service init script is missing after upgrade from 1.0 in RPM-based system. Can be fixed by reinstalling the package ('yum reinstall maxscale' or 'rpm -i --force /maxscale-1.1.rpm') + +## Packaging + +Both RPM and Debian packages are available for MaxScale in addition to the tar based releases previously distributed we now provide + +* CentOS/RedHat 5 + +* CentOS/RedHat 6 + +* CentOS/RedHat 7 + +* Debian 6 + +* Debian 7 + +* Ubuntu 12.04 LTS + +* Ubuntu 13.10 + +* Ubuntu 14.04 LTS + +* Fedora 19 + +* Fedora 20 + +* OpenSuSE 13 + +* SuSE Linux Enterprise 11 + +* SuSE Linux Enterprise 12 diff --git a/Documentation/Tutorials/Administration-Tutorial.md b/Documentation/Tutorials/Administration-Tutorial.md index 67a07ac78..d212b2e17 100644 --- a/Documentation/Tutorials/Administration-Tutorial.md +++ b/Documentation/Tutorials/Administration-Tutorial.md @@ -22,7 +22,7 @@ or $ /etc/init.d/maxscale start -It is also possible to start MaxScale by executing the maxscale command itself, in this case you must ensure that the environment is correctly setup or command line options are passed. The major elements to consider are the correct setting of the MAXSCALE\_HOME directory and to ensure that LD\_LIBRARY\_PATH. The LD\_LIBRARY\_PATH should include the lib directory that was installed as part of the MaxScale installation, the MAXSCALE\_HOME should point to /usr/local/skysql/maxscale if a default installation has been created or to the directory this was relocated to. Running the executable $MAXSCALE\_HOME/bin/maxscale will result in MaxScale running as a daemon process, unattached to the terminal in which it was started and using configuration files that it finds in the $MAXSCALE\_HOME directory. +It is also possible to start MaxScale by executing the maxscale command itself, in this case you must ensure that the environment is correctly setup or command line options are passed. The major elements to consider are the correct setting of the MAXSCALE\_HOME directory and to ensure that LD\_LIBRARY\_PATH. The LD\_LIBRARY\_PATH should include the lib directory that was installed as part of the MaxScale installation, the MAXSCALE\_HOME should point to /usr/local/mariadb-maxscale if a default installation has been created or to the directory this was relocated to. Running the executable $MAXSCALE\_HOME/bin/maxscale will result in MaxScale running as a daemon process, unattached to the terminal in which it was started and using configuration files that it finds in the $MAXSCALE\_HOME directory. Options may be passed to the MaxScale binary that alter this default behaviour, this options are documented in the table below. @@ -83,14 +83,14 @@ MaxScale will also stop gracefully if it received a hangup signal, to find the p In order to shutdown MaxScale using the maxadmin command you may either connect with maxadmin in interactive mode or pass the "shutdown maxscale" command you wish to execute as an argument to maxadmin. - $ maxadmin -pskysql shutdown maxscale + $ maxadmin -pmariadb shutdown maxscale ### Checking The Status Of The MaxScale Services It is possible to use the maxadmin command to obtain statistics regarding the services that are configured within your MaxScale configuration file. The maxadmin command "list services" will give very basic information regarding the services that are define. This command may be either run in interactive mode or passed on the maxadmin command line. - $ maxadmin -pskysql + $ maxadmin -pmariadb MaxScale> list services Services. @@ -118,7 +118,7 @@ It should be noted that network listeners count as a user of the service, theref To determine what client are currently connected to MaxScale you can use the "list clients" command within maxadmin. This will give you IP address and the ID’s of the DCB and session for that connection. As with any maxadmin command this can be passed on the command line or typed interactively in maxadmin. - $ maxadmin -pskysql list clients + $ maxadmin -pmariadb list clients Client Connections @@ -141,11 +141,11 @@ MaxScale write log data into four log files with varying degrees of detail. With It is possible to rotate either a single log file or all the log files with a single command. When the logfile is rotated, the current log file is closed and a new log file, with an increased sequence number in its name, is created. Log file rotation is achieved by use of the "flush log" or “flush logs” command in maxadmin. - $ maxadmin -pskysql flush logs + $ maxadmin -pmariadb flush logs Flushes all of the logs, whereas an individual log may be flushed with the "flush log" command. - $ maxadmin -pskysql + $ maxadmin -pmariadb MaxScale> flush log error MaxScale> flush log trace MaxScale> @@ -154,7 +154,7 @@ This may be integrated into the Linux logrotate mechanism by adding a configurat - @@ -175,14 +175,14 @@ One disadvantage with this is that the password used for the maxadmin command ha
/usr/local/skysql/maxscale/log/*.log { + /usr/local/mariadb-maxscale/log/*.log { monthly rotate 5 missingok @@ -163,7 +163,7 @@ sharedscripts postrotate \# run if maxscale is running if test -n "`ps acx|grep maxscale`"; then -/usr/local/skysql/maxscale/bin/maxadmin -pskysql flush logs +/usr/local/mariadb-maxscale/bin/maxadmin -pmariadb flush logs fi endscript }
- diff --git a/Documentation/Tutorials/Galera-Cluster-Connection-Routing-Tutorial.md b/Documentation/Tutorials/Galera-Cluster-Connection-Routing-Tutorial.md index 385f34066..f26bf8bab 100644 --- a/Documentation/Tutorials/Galera-Cluster-Connection-Routing-Tutorial.md +++ b/Documentation/Tutorials/Galera-Cluster-Connection-Routing-Tutorial.md @@ -60,7 +60,7 @@ If you wish to use two different usernames for the two different roles of monito ### Creating Your MaxScale Configuration -MaxScale configuration is held in an ini file that is located in the file MaxScale.cnf in the directory $MAXSCALE_HOME/etc, if you have installed in the default location then this file is available in /usr/local/skysql/maxscale/etc/MaxScale.cnf. This is not created as part of the installation process and must be manually created. A template file does exist within this directory that may be use as a basis for your configuration. +MaxScale configuration is held in an ini file that is located in the file MaxScale.cnf in the directory $MAXSCALE_HOME/etc, if you have installed in the default location then this file is available in /usr/local/mariadb-maxscale/etc/MaxScale.cnf. This is not created as part of the installation process and must be manually created. A template file does exist within this directory that may be use as a basis for your configuration. A global, maxscale, section is included within every MaxScale configuration file; this is used to set the values of various MaxScale wide parameters, perhaps the most important of these is the number of threads that MaxScale will use to execute the code that forwards requests and handles responses for clients. @@ -89,7 +89,7 @@ In order to instruct the router to which servers it should route we must add rou The final step in the service section is to add the username and password that will be used to populate the user data from the database cluster. There are two options for representing the password, either plain text or encrypted passwords may be used. In order to use encrypted passwords a set of keys must be generated that will be used by the encryption and decryption process. To generate the keys use the maxkeys command and pass the name of the secrets file in which the keys are stored. - % maxkeys /usr/local/skysql/maxscale/etc/.secrets + % maxkeys /usr/local/mariadb-maxscale/etc/.secrets % Once the keys have been created the maxpasswd command can be used to generate the encrypted password. @@ -178,9 +178,9 @@ or % service maxscale start -Check the error log in /usr/local/skysql/maxscale/log to see if any errors are detected in the configuration file and to confirm MaxScale has been started. Also the maxadmin command may be used to confirm that MaxScale is running and the services, listeners etc have been correctly configured. +Check the error log in /usr/local/mariadb-maxscale/log to see if any errors are detected in the configuration file and to confirm MaxScale has been started. Also the maxadmin command may be used to confirm that MaxScale is running and the services, listeners etc have been correctly configured. - % maxadmin -pskysql list services + % maxadmin -pmariadb list services Services. --------------------------+----------------------+--------+--------------- @@ -189,7 +189,7 @@ Check the error log in /usr/local/skysql/maxscale/log to see if any errors are d Galera Service | readconnroute | 1 | 1 CLI | cli | 2 | 2 --------------------------+----------------------+--------+--------------- - % maxadmin -pskysql list servers + % maxadmin -pmariadb list servers Servers. -------------------+-----------------+-------+-------------+------------------- Server | Address | Port | Connections | Status @@ -201,7 +201,7 @@ Check the error log in /usr/local/skysql/maxscale/log to see if any errors are d A Galera Cluster is a multi-master clustering technology, however the monitor is able to impose false notions of master and slave roles within a Galera Cluster in order to facilitate the use of Galera as if it were a standard MySQL Replication setup. This is merely an internal MaxScale convenience and has no impact on the behaviour of the cluster. - % maxadmin -pskysql list listeners + % maxadmin -pmariadb list listeners Listeners. ---------------------+--------------------+-----------------+-------+-------- diff --git a/Documentation/Tutorials/Galera-Cluster-Read-Write-Splitting-Tutorial.md b/Documentation/Tutorials/Galera-Cluster-Read-Write-Splitting-Tutorial.md index a2360ad34..5245c65d8 100644 --- a/Documentation/Tutorials/Galera-Cluster-Read-Write-Splitting-Tutorial.md +++ b/Documentation/Tutorials/Galera-Cluster-Read-Write-Splitting-Tutorial.md @@ -62,7 +62,7 @@ If you wish to use two different usernames for the two different roles of monito ### Creating Your MaxScale Configuration -MaxScale configuration is held in an ini file that is located in the file MaxScale.cnf in the directory $MAXSCALE_HOME/etc, if you have installed in the default location then this file is available in /usr/local/skysql/maxscale/etc/MaxScale.cnf. This is not created as part of the installation process and must be manually created. A template file does exist within this directory that may be use as a basis for your configuration. +MaxScale configuration is held in an ini file that is located in the file MaxScale.cnf in the directory $MAXSCALE_HOME/etc, if you have installed in the default location then this file is available in /usr/local/mariadb-maxscale/etc/MaxScale.cnf. This is not created as part of the installation process and must be manually created. A template file does exist within this directory that may be use as a basis for your configuration. A global, maxscale, section is included within every MaxScale configuration file; this is used to set the values of various MaxScale wide parameters, perhaps the most important of these is the number of threads that MaxScale will use to execute the code that forwards requests and handles responses for clients. @@ -83,7 +83,7 @@ The router for we need to use for this configuration is the readwritesplit modul The final step in the service sections is to add the username and password that will be used to populate the user data from the database cluster. There are two options for representing the password, either plain text or encrypted passwords may be used. In order to use encrypted passwords a set of keys must be generated that will be used by the encryption and decryption process. To generate the keys use the maxkeys command and pass the name of the secrets file in which the keys are stored. - % maxkeys /usr/local/skysql/maxscale/etc/.secrets + % maxkeys /usr/local/mariadb-maxscale/etc/.secrets % Once the keys have been created the maxpasswd command can be used to generate the encrypted password. @@ -183,9 +183,9 @@ or % service maxscale start -Check the error log in /usr/local/skysql/maxscale/log to see if any errors are detected in the configuration file and to confirm MaxScale has been started. Also the maxadmin command may be used to confirm that MaxScale is running and the services, listeners etc have been correctly configured. +Check the error log in /usr/local/mariadb-maxscale/log to see if any errors are detected in the configuration file and to confirm MaxScale has been started. Also the maxadmin command may be used to confirm that MaxScale is running and the services, listeners etc have been correctly configured. - % maxadmin -pskysql list services + % maxadmin -pmariadb list services Services. --------------------------+----------------------+--------+--------------- @@ -195,7 +195,7 @@ Check the error log in /usr/local/skysql/maxscale/log to see if any errors are d CLI | cli | 2 | 2 --------------------------+----------------------+--------+--------------- - % maxadmin -pskysql list servers + % maxadmin -pmariadb list servers Servers. -------------------+-----------------+-------+-------------+-------------------- Server | Address | Port | Connections | Status @@ -207,7 +207,7 @@ Check the error log in /usr/local/skysql/maxscale/log to see if any errors are d A Galera Cluster is a multi-master clustering technology, however the monitor is able to impose false notions of master and slave roles within a Galera Cluster in order to facilitate the use of Galera as if it were a standard MySQL Replication setup. This is merely an internal MaxScale convenience and has no impact on the behaviour of the cluster but does allow the monitor to create these pseudo roles which are utilised by the Read/Write Splitter. - % maxadmin -pskysql list listeners + % maxadmin -pmariadb list listeners Listeners. ---------------------+--------------------+-----------------+-------+-------- diff --git a/Documentation/Tutorials/MaxScale-Information-Schema.md b/Documentation/Tutorials/MaxScale-Information-Schema.md new file mode 100644 index 000000000..d4cdaa2c7 --- /dev/null +++ b/Documentation/Tutorials/MaxScale-Information-Schema.md @@ -0,0 +1,512 @@ +# MaxInfo Plugin +The maxinfo plugin is a special router plugin similar to the one used for implementing the server side component of the MaxAdmin interface. The plugin is designed to return data regarding the internals of MaxScale, it provides an information schema approach to monitoring the internals of MaxScale itself. + +The plugin is capable of returning data in one of two ways, either as MySQL result sets or as JSON encoded data. The choice of which mechanism used to return the data is determined by the type of the request the router receives. If a MySQL command is received then the router will return the results as a MySQL result set, if an HTTP request is received then the data will be returned as a JSON document. + +# Configuration + +The plugin is configured in the MaxScale.cnf plugin in much the same way as any other router service is configured, there needs to be a service section in the configuration file and also listeners defined for that service. The service does not however require any backend servers to be associated with it, or any monitors. + +The service entry needs to define the service name, the type as service and the router module to load. +The specified user, with the password (plain or encrypted via maxpassword utility) is allowed to connect via MySQL protocol. +Currently the user can connect to maxinfo from any remote IP and to localhost as well. + + [MaxInfo] + type=service + router=maxinfo + user=monitor + passwd=EBD2F49C3B375812A8CDEBA632ED8BBC + +The listener section defines the protocol, port and other information needed to create a listener for the service. To listen on a port using the MySQL protocol a section as shown below should be added to the configuration file. + + [MaxInfo Listener] + type=listener + service=MaxInfo + protocol=MySQLClient + port=9003 + +To listen with the HTTP protocol and hence return JSON documents a section as should below is required. + + [MaxInfo JSON Listener] + type=listener + service=MaxInfo + protocol=HTTPD + port=8003 + +If both the MySQL and JSON responses are required then a single service can be configured with both types of listener. + +As with any other listeners within MaxScale the listeners can be bound to a particular interface by use of the address= parameter. This allows the access to the maxinfo data to be limited to the localhost by adding an address=localhost parameter in the configuration file. + + [MaxInfo Listener] + type=listener + service=MaxInfo + protocol=MySQLClient + address=localhost + port=9003 + +# MySQL Interface to maxinfo + +The maxinfo supports a small subset of SQL statements in addition to the MySQL status and ping requests. These may be used for simple monitoring of MaxScale. + + % mysqladmin -hmaxscale.mariadb.com -P9003 -umonitor -pxyz ping + mysqld is alive + % mysqladmin -hmaxscale.mariadb.com -P9003 -umonitor -pxyz status + Uptime: 72 Threads: 1 Sessions: 11 + % + +The SQL command used to interact with maxinfo is the show command, a variety of show commands are available and will be described in the following sections. + +## Show variables + +The show variables command will display a set of name and value pairs for a number of MaxScale system variables. + + mysql> show variables; + +--------------------+-------------------------+ + | Variable_name | Value | + +--------------------+-------------------------+ + | version | 1.0.6-unstable | + | version_comment | MariaDB MaxScale | + | basedir | /home/mriddoch/skygate2 | + | MAXSCALE_VERSION | 1.0.6-unstable | + | MAXSCALE_THREADS | 1 | + | MAXSCALE_NBPOLLS | 3 | + | MAXSCALE_POLLSLEEP | 1000 | + | MAXSCALE_UPTIME | 223 | + | MAXSCALE_SESSIONS | 11 | + +--------------------+-------------------------+ + 9 rows in set (0.02 sec) + + mysql> + +The show variables command can also accept a limited like clause. This like clause must either be a literal string to match, a pattern starting with a %, a pattern ending with a % or a string with a % at both the start and the end. + + mysql> show variables like 'version'; + +---------------+----------------+ + | Variable_name | Value | + +---------------+----------------+ + | version | 1.0.6-unstable | + +---------------+----------------+ + 1 row in set (0.02 sec) + + mysql> show variables like 'version%'; + +-----------------+------------------+ + | Variable_name | Value | + +-----------------+------------------+ + | version | 1.0.6-unstable | + | version_comment | MariaDB MaxScale | + +-----------------+------------------+ + 2 rows in set (0.02 sec) + + mysql> show variables like '%comment'; + +-----------------+------------------+ + | Variable_name | Value | + +-----------------+------------------+ + | version_comment | MariaDB MaxScale | + +-----------------+------------------+ + 1 row in set (0.02 sec) + + mysql> show variables like '%ers%'; + +------------------+------------------+ + | Variable_name | Value | + +------------------+------------------+ + | version | 1.0.6-unstable | + | version_comment | MariaDB MaxScale | + | MAXSCALE_VERSION | 1.0.6-unstable | + +------------------+------------------+ + 3 rows in set (0.02 sec) + + mysql> + +## Show status + +The show status command displays a set of status counters, as with show variables the show status command can be passed a simplifed like clause to limit the values returned. + + mysql> show status; + +---------------------------+-------+ + | Variable_name | Value | + +---------------------------+-------+ + | Uptime | 156 | + | Uptime_since_flush_status | 156 | + | Threads_created | 1 | + | Threads_running | 1 | + | Threadpool_threads | 1 | + | Threads_connected | 11 | + | Connections | 11 | + | Client_connections | 2 | + | Backend_connections | 0 | + | Listeners | 9 | + | Zombie_connections | 0 | + | Internal_descriptors | 2 | + | Read_events | 22 | + | Write_events | 24 | + | Hangup_events | 0 | + | Error_events | 0 | + | Accept_events | 2 | + | Event_queue_length | 1 | + | Pending_events | 0 | + | Max_event_queue_length | 1 | + | Max_event_queue_time | 0 | + | Max_event_execution_time | 0 | + +---------------------------+-------+ + 22 rows in set (0.02 sec) + + mysql> + +## Show services + +The show services command will return a set of basic statistics regarding each of the configured services within MaxScale. + + mysql> show services; + +----------------+----------------+--------------+----------------+ + | Service Name | Router Module | No. Sessions | Total Sessions | + +----------------+----------------+--------------+----------------+ + | Test Service | readconnroute | 1 | 1 | + | Split Service | readwritesplit | 1 | 1 | + | Filter Service | readconnroute | 1 | 1 | + | Named Service | readwritesplit | 1 | 1 | + | QLA Service | readconnroute | 1 | 1 | + | Debug Service | debugcli | 1 | 1 | + | CLI | cli | 1 | 1 | + | MaxInfo | maxinfo | 4 | 4 | + +----------------+----------------+--------------+----------------+ + 8 rows in set (0.02 sec) + + mysql> + +The show services command does not accept a like clause and will ignore any like clause that is given. + +## Show listeners + +The show listeners command will return a set of status information for every listener defined within the MaxScale configuration file. + + mysql> show listeners; + +----------------+-----------------+-----------+------+---------+ + | Service Name | Protocol Module | Address | Port | State | + +----------------+-----------------+-----------+------+---------+ + | Test Service | MySQLClient | * | 4006 | Running | + | Split Service | MySQLClient | * | 4007 | Running | + | Filter Service | MySQLClient | * | 4008 | Running | + | Named Service | MySQLClient | * | 4010 | Running | + | QLA Service | MySQLClient | * | 4009 | Running | + | Debug Service | telnetd | localhost | 4242 | Running | + | CLI | maxscaled | localhost | 6603 | Running | + | MaxInfo | MySQLClient | * | 9003 | Running | + | MaxInfo | HTTPD | * | 8003 | Running | + +----------------+-----------------+-----------+------+---------+ + 9 rows in set (0.02 sec) + + mysql> + +The show listeners command will ignore any like clause passed to it. + +## Show sessions + +The show sessions command returns information on every active session within MaxScale. It will ignore any like clause passed to it. + + mysql> show sessions; + +-----------+---------------+----------------+---------------------------+ + | Session | Client | Service | State | + +-----------+---------------+----------------+---------------------------+ + | 0x1a92a60 | 127.0.0.1 | MaxInfo | Session ready for routing | + | 0x1a92100 | 80.240.130.35 | MaxInfo | Session ready for routing | + | 0x1a76a00 | | MaxInfo | Listener Session | + | 0x1a76020 | | MaxInfo | Listener Session | + | 0x1a75d40 | | CLI | Listener Session | + | 0x1a75220 | | Debug Service | Listener Session | + | 0x1a774b0 | | QLA Service | Listener Session | + | 0x1a78630 | | Named Service | Listener Session | + | 0x1a60270 | | Filter Service | Listener Session | + | 0x1a606f0 | | Split Service | Listener Session | + | 0x19b0380 | | Test Service | Listener Session | + +-----------+---------------+----------------+---------------------------+ + 11 rows in set (0.02 sec) + + mysql> + +## Show clients + +The show clients command reports a row for every client application connected to MaxScale. Like clauses are not available of the show clients command. + + mysql> show clients; + +-----------+---------------+---------+---------------------------+ + | Session | Client | Service | State | + +-----------+---------------+---------+---------------------------+ + | 0x1a92a60 | 127.0.0.1 | MaxInfo | Session ready for routing | + | 0x1a92100 | 80.240.130.35 | MaxInfo | Session ready for routing | + +-----------+---------------+---------+---------------------------+ + 2 rows in set (0.02 sec) + + mysql> + +## Show servers + +The show servers command returns data for each backend server configured within the MaxScale configuration file. This data includes the current number of connections MaxScale has to that server and the state of that server as monitored by MaxScale. + + mysql> show servers; + +---------+-----------+------+-------------+---------+ + | Server | Address | Port | Connections | Status | + +---------+-----------+------+-------------+---------+ + | server1 | 127.0.0.1 | 3306 | 0 | Running | + | server2 | 127.0.0.1 | 3307 | 0 | Down | + | server3 | 127.0.0.1 | 3308 | 0 | Down | + | server4 | 127.0.0.1 | 3309 | 0 | Down | + +---------+-----------+------+-------------+---------+ + 4 rows in set (0.02 sec) + + mysql> + +## Show modules + +The show modules command reports the information on the modules currently loaded into MaxScale. This includes the name type and version of each module. It also includes the API version the module has been written against and the current release status of the module. + + mysql> show modules; + +----------------+-------------+---------+-------------+----------------+ + | Module Name | Module Type | Version | API Version | Status | + +----------------+-------------+---------+-------------+----------------+ + | HTTPD | Protocol | V1.0.1 | 1.0.0 | In Development | + | maxscaled | Protocol | V1.0.0 | 1.0.0 | GA | + | telnetd | Protocol | V1.0.1 | 1.0.0 | GA | + | MySQLClient | Protocol | V1.0.0 | 1.0.0 | GA | + | mysqlmon | Monitor | V1.4.0 | 1.0.0 | GA | + | readwritesplit | Router | V1.0.2 | 1.0.0 | GA | + | readconnroute | Router | V1.1.0 | 1.0.0 | GA | + | debugcli | Router | V1.1.1 | 1.0.0 | GA | + | cli | Router | V1.0.0 | 1.0.0 | GA | + | maxinfo | Router | V1.0.0 | 1.0.0 | Alpha | + +----------------+-------------+---------+-------------+----------------+ + 10 rows in set (0.02 sec) + + mysql> + + +## Show monitors + +The show monitors command reports each monitor configured within the system and the state of that monitor. + + mysql> show monitors; + +---------------+---------+ + | Monitor | Status | + +---------------+---------+ + | MySQL Monitor | Running | + +---------------+---------+ + 1 row in set (0.02 sec) + + mysql> + + +## Show eventTimes + +The show eventTimes command returns a table of statistics that reflect the performance of the event queuing and execution portion of the MaxScale core. + + mysql> show eventTimes; + +---------------+-------------------+---------------------+ + | Duration | No. Events Queued | No. Events Executed | + +---------------+-------------------+---------------------+ + | < 100ms | 460 | 456 | + | 100 - 200ms | 0 | 3 | + | 200 - 300ms | 0 | 0 | + | 300 - 400ms | 0 | 0 | + | 400 - 500ms | 0 | 0 | + | 500 - 600ms | 0 | 0 | + | 600 - 700ms | 0 | 0 | + | 700 - 800ms | 0 | 0 | + | 800 - 900ms | 0 | 0 | + | 900 - 1000ms | 0 | 0 | + | 1000 - 1100ms | 0 | 0 | + | 1100 - 1200ms | 0 | 0 | + | 1200 - 1300ms | 0 | 0 | + | 1300 - 1400ms | 0 | 0 | + | 1400 - 1500ms | 0 | 0 | + | 1500 - 1600ms | 0 | 0 | + | 1600 - 1700ms | 0 | 0 | + | 1700 - 1800ms | 0 | 0 | + | 1800 - 1900ms | 0 | 0 | + | 1900 - 2000ms | 0 | 0 | + | 2000 - 2100ms | 0 | 0 | + | 2100 - 2200ms | 0 | 0 | + | 2200 - 2300ms | 0 | 0 | + | 2300 - 2400ms | 0 | 0 | + | 2400 - 2500ms | 0 | 0 | + | 2500 - 2600ms | 0 | 0 | + | 2600 - 2700ms | 0 | 0 | + | 2700 - 2800ms | 0 | 0 | + | 2800 - 2900ms | 0 | 0 | + | > 3000ms | 0 | 0 | + +---------------+-------------------+---------------------+ + 30 rows in set (0.02 sec) + + mysql> + +Each row represents a time interval, in 100ms increments, with the counts representing the number of events that were in the event queue for the length of time that row represents and the number of events that were executing of the time indicated by the row. + +# JSON Interface + +The simplified JSON interface takes the URL of the request made to maxinfo and maps that to a show command in the above section. + +## Variables + +The /variables URL will return the MaxScale variables, these variables can not be filtered via this interface. + + $ curl http://maxscale.mariadb.com:8003/variables + [ { "Variable_name" : "version", "Value" : "1.0.6-unstable"}, + { "Variable_name" : "version_comment", "Value" : "MariaDB MaxScale"}, + { "Variable_name" : "basedir", "Value" : "/home/mriddoch/skygate2"}, + { "Variable_name" : "MAXSCALE_VERSION", "Value" : "1.0.6-unstable"}, + { "Variable_name" : "MAXSCALE_THREADS", "Value" : 1}, + { "Variable_name" : "MAXSCALE_NBPOLLS", "Value" : 3}, + { "Variable_name" : "MAXSCALE_POLLSLEEP", "Value" : 1000}, + { "Variable_name" : "MAXSCALE_UPTIME", "Value" : 3948}, + { "Variable_name" : "MAXSCALE_SESSIONS", "Value" : 12}] + $ + +## Status + +Use of the /status URI will return the status information that would normally be returned by the show status command. No filtering of the status information is available via this interface + + $ curl http://maxscale.mariadb.com:8003/status + [ { "Variable_name" : "Uptime", "Value" : 3831}, + { "Variable_name" : "Uptime_since_flush_status", "Value" : 3831}, + { "Variable_name" : "Threads_created", "Value" : 1}, + { "Variable_name" : "Threads_running", "Value" : 1}, + { "Variable_name" : "Threadpool_threads", "Value" : 1}, + { "Variable_name" : "Threads_connected", "Value" : 12}, + { "Variable_name" : "Connections", "Value" : 12}, + { "Variable_name" : "Client_connections", "Value" : 3}, + { "Variable_name" : "Backend_connections", "Value" : 0}, + { "Variable_name" : "Listeners", "Value" : 9}, + { "Variable_name" : "Zombie_connections", "Value" : 0}, + { "Variable_name" : "Internal_descriptors", "Value" : 3}, + { "Variable_name" : "Read_events", "Value" : 469}, + { "Variable_name" : "Write_events", "Value" : 479}, + { "Variable_name" : "Hangup_events", "Value" : 12}, + { "Variable_name" : "Error_events", "Value" : 0}, + { "Variable_name" : "Accept_events", "Value" : 15}, + { "Variable_name" : "Event_queue_length", "Value" : 1}, + { "Variable_name" : "Pending_events", "Value" : 0}, + { "Variable_name" : "Max_event_queue_length", "Value" : 1}, + { "Variable_name" : "Max_event_queue_time", "Value" : 0}, + { "Variable_name" : "Max_event_execution_time", "Value" : 1}] + $ + +## Services + +The /services URI returns the data regarding the services defined within the configuration of MaxScale. Two counters are returned, the current number of sessions attached to this service and the total number connected since the service started. + + $ curl http://maxscale.mariadb.com:8003/services + [ { "Service Name" : "Test Service", "Router Module" : "readconnroute", "No. Sessions" : 1, "Total Sessions" : 1}, + { "Service Name" : "Split Service", "Router Module" : "readwritesplit", "No. Sessions" : 1, "Total Sessions" : 1}, + { "Service Name" : "Filter Service", "Router Module" : "readconnroute", "No. Sessions" : 1, "Total Sessions" : 1}, + { "Service Name" : "Named Service", "Router Module" : "readwritesplit", "No. Sessions" : 1, "Total Sessions" : 1}, + { "Service Name" : "QLA Service", "Router Module" : "readconnroute", "No. Sessions" : 1, "Total Sessions" : 1}, + { "Service Name" : "Debug Service", "Router Module" : "debugcli", "No. Sessions" : 1, "Total Sessions" : 1}, + { "Service Name" : "CLI", "Router Module" : "cli", "No. Sessions" : 1, "Total Sessions" : 1}, + { "Service Name" : "MaxInfo", "Router Module" : "maxinfo", "No. Sessions" : 5, "Total Sessions" : 20}] + $ + +## Listeners + +The /listeners URI will return a JSON array with one entry per listener, each entry is a JSON object that describes the configuration and state of that listener. + + $ curl http://maxscale.mariadb.com:8003/listeners + [ { "Service Name" : "Test Service", "Protocol Module" : "MySQLClient", "Address" : "*", "Port" : 4006, "State" : "Running"}, + { "Service Name" : "Split Service", "Protocol Module" : "MySQLClient", "Address" : "*", "Port" : 4007, "State" : "Running"}, + { "Service Name" : "Filter Service", "Protocol Module" : "MySQLClient", "Address" : "*", "Port" : 4008, "State" : "Running"}, + { "Service Name" : "Named Service", "Protocol Module" : "MySQLClient", "Address" : "*", "Port" : 4010, "State" : "Running"}, + { "Service Name" : "QLA Service", "Protocol Module" : "MySQLClient", "Address" : "*", "Port" : 4009, "State" : "Running"}, + { "Service Name" : "Debug Service", "Protocol Module" : "telnetd", "Address" : "localhost", "Port" : 4242, "State" : "Running"}, + { "Service Name" : "CLI", "Protocol Module" : "maxscaled", "Address" : "localhost", "Port" : 6603, "State" : "Running"}, + { "Service Name" : "MaxInfo", "Protocol Module" : "MySQLClient", "Address" : "*", "Port" : 9003, "State" : "Running"}, + { "Service Name" : "MaxInfo", "Protocol Module" : "HTTPD", "Address" : "*", "Port" : 8003, "State" : "Running"}] + $ + +## Modules + +The /modules URI returns data for each plugin that has been loaded into MaxScale. The plugin name, type and version are returned as is the version of the plugin API that the plugin was built against and the release status of the plugin. + + $ curl http://maxscale.mariadb.com:8003/modules + [ { "Module Name" : "HTTPD", "Module Type" : "Protocol", "Version" : "V1.0.1", "API Version" : "1.0.0", "Status" : "In Development"}, + { "Module Name" : "maxscaled", "Module Type" : "Protocol", "Version" : "V1.0.0", "API Version" : "1.0.0", "Status" : "GA"}, + { "Module Name" : "telnetd", "Module Type" : "Protocol", "Version" : "V1.0.1", "API Version" : "1.0.0", "Status" : "GA"}, + { "Module Name" : "MySQLClient", "Module Type" : "Protocol", "Version" : "V1.0.0", "API Version" : "1.0.0", "Status" : "GA"}, + { "Module Name" : "mysqlmon", "Module Type" : "Monitor", "Version" : "V1.4.0", "API Version" : "1.0.0", "Status" : "GA"}, + { "Module Name" : "readwritesplit", "Module Type" : "Router", "Version" : "V1.0.2", "API Version" : "1.0.0", "Status" : "GA"}, + { "Module Name" : "readconnroute", "Module Type" : "Router", "Version" : "V1.1.0", "API Version" : "1.0.0", "Status" : "GA"}, + { "Module Name" : "debugcli", "Module Type" : "Router", "Version" : "V1.1.1", "API Version" : "1.0.0", "Status" : "GA"}, + { "Module Name" : "cli", "Module Type" : "Router", "Version" : "V1.0.0", "API Version" : "1.0.0", "Status" : "GA"}, + { "Module Name" : "maxinfo", "Module Type" : "Router", "Version" : "V1.0.0", "API Version" : "1.0.0", "Status" : "Alpha"}] + $ + +## Sessions + +The /sessions URI returns a JSON array with an object for each active session within MaxScale. + + $ curl http://maxscale.mariadb.com:8003/sessions + [ { "Session" : "0x1a8e9a0", "Client" : "80.176.79.245", "Service" : "MaxInfo", "State" : "Session ready for routing"}, + { "Session" : "0x1a8e6d0", "Client" : "80.240.130.35", "Service" : "MaxInfo", "State" : "Session ready for routing"}, + { "Session" : "0x1a8ddd0", "Client" : , "Service" : "MaxInfo", "State" : "Listener Session"}, + { "Session" : "0x1a92da0", "Client" : , "Service" : "MaxInfo", "State" : "Listener Session"}, + { "Session" : "0x1a92ac0", "Client" : , "Service" : "CLI", "State" : "Listener Session"}, + { "Session" : "0x1a70e90", "Client" : , "Service" : "Debug Service", "State" : "Listener Session"}, + { "Session" : "0x1a758d0", "Client" : , "Service" : "QLA Service", "State" : "Listener Session"}, + { "Session" : "0x1a73a90", "Client" : , "Service" : "Named Service", "State" : "Listener Session"}, + { "Session" : "0x1a5c0b0", "Client" : , "Service" : "Filter Service", "State" : "Listener Session"}, + { "Session" : "0x1a5c530", "Client" : , "Service" : "Split Service", "State" : "Listener Session"}, + { "Session" : "0x19ac1c0", "Client" : , "Service" : "Test Service", "State" : "Listener Session"}] + $ + +## Clients + +The /clients URI is a limited version of the /sessions, in this case it only returns an entry for a session that represents a client connection. + + $ curl http://maxscale.mariadb.com:8003/clients + [ { "Session" : "0x1a90be0", "Client" : "80.176.79.245", "Service" : "MaxInfo", "State" : "Session ready for routing"}, + { "Session" : "0x1a8e9a0", "Client" : "127.0.0.1", "Service" : "MaxInfo", "State" : "Session ready for routing"}, + { "Session" : "0x1a8e6d0", "Client" : "80.240.130.35", "Service" : "MaxInfo", "State" : "Session ready for routing"}] + $ + +## Servers + +The /servers URI is used to retrieve information for each of the servers defined within the MaxScale configuration. This information includes the connection count and the current status as monitored by MaxScale. The connection count is only those connections made by MaxScale to those servers. + + $ curl http://maxscale.mariadb.com:8003/servers + [ { "Server" : "server1", "Address" : "127.0.0.1", "Port" : 3306, "Connections" : 0, "Status" : "Running"}, + { "Server" : "server2", "Address" : "127.0.0.1", "Port" : 3307, "Connections" : 0, "Status" : "Down"}, + { "Server" : "server3", "Address" : "127.0.0.1", "Port" : 3308, "Connections" : 0, "Status" : "Down"}, + { "Server" : "server4", "Address" : "127.0.0.1", "Port" : 3309, "Connections" : 0, "Status" : "Down"}] + $ + +## Event Times + +The /event/times URI returns an array of statistics that reflect the performance of the event queuing and execution portion of the MaxScale core. Each element is an object that represents a time bucket, in 100ms increments, with the counts representing the number of events that were in the event queue for the length of time that row represents and the number of events that were executing of the time indicated by the object. + + $ curl http://maxscale.mariadb.com:8003/event/times + [ { "Duration" : "< 100ms", "No. Events Queued" : 64, "No. Events Executed" : 63}, + { "Duration" : " 100 - 200ms", "No. Events Queued" : 0, "No. Events Executed" : 0}, + { "Duration" : " 200 - 300ms", "No. Events Queued" : 0, "No. Events Executed" : 0}, + { "Duration" : " 300 - 400ms", "No. Events Queued" : 0, "No. Events Executed" : 0}, + { "Duration" : " 400 - 500ms", "No. Events Queued" : 0, "No. Events Executed" : 0}, + { "Duration" : " 500 - 600ms", "No. Events Queued" : 0, "No. Events Executed" : 0}, + { "Duration" : " 600 - 700ms", "No. Events Queued" : 0, "No. Events Executed" : 0}, + { "Duration" : " 700 - 800ms", "No. Events Queued" : 0, "No. Events Executed" : 0}, + { "Duration" : " 800 - 900ms", "No. Events Queued" : 0, "No. Events Executed" : 0}, + { "Duration" : " 900 - 1000ms", "No. Events Queued" : 0, "No. Events Executed" : 0}, + { "Duration" : "1000 - 1100ms", "No. Events Queued" : 0, "No. Events Executed" : 0}, + { "Duration" : "1100 - 1200ms", "No. Events Queued" : 0, "No. Events Executed" : 0}, + { "Duration" : "1200 - 1300ms", "No. Events Queued" : 0, "No. Events Executed" : 0}, + { "Duration" : "1300 - 1400ms", "No. Events Queued" : 0, "No. Events Executed" : 0}, + { "Duration" : "1400 - 1500ms", "No. Events Queued" : 0, "No. Events Executed" : 0}, + { "Duration" : "1500 - 1600ms", "No. Events Queued" : 0, "No. Events Executed" : 0}, + { "Duration" : "1600 - 1700ms", "No. Events Queued" : 0, "No. Events Executed" : 0}, + { "Duration" : "1700 - 1800ms", "No. Events Queued" : 0, "No. Events Executed" : 0}, + { "Duration" : "1800 - 1900ms", "No. Events Queued" : 0, "No. Events Executed" : 0}, + { "Duration" : "1900 - 2000ms", "No. Events Queued" : 0, "No. Events Executed" : 0}, + { "Duration" : "2000 - 2100ms", "No. Events Queued" : 0, "No. Events Executed" : 0}, + { "Duration" : "2100 - 2200ms", "No. Events Queued" : 0, "No. Events Executed" : 0}, + { "Duration" : "2200 - 2300ms", "No. Events Queued" : 0, "No. Events Executed" : 0}, + { "Duration" : "2300 - 2400ms", "No. Events Queued" : 0, "No. Events Executed" : 0}, + { "Duration" : "2400 - 2500ms", "No. Events Queued" : 0, "No. Events Executed" : 0}, + { "Duration" : "2500 - 2600ms", "No. Events Queued" : 0, "No. Events Executed" : 0}, + { "Duration" : "2600 - 2700ms", "No. Events Queued" : 0, "No. Events Executed" : 0}, + { "Duration" : "2700 - 2800ms", "No. Events Queued" : 0, "No. Events Executed" : 0}, + { "Duration" : "2800 - 2900ms", "No. Events Queued" : 0, "No. Events Executed" : 0}, + { "Duration" : "> 3000ms", "No. Events Queued" : 0, "No. Events Executed" : 0}] diff --git a/Documentation/Tutorials/MySQL-Cluster-Setup.md b/Documentation/Tutorials/MySQL-Cluster-Setup.md index 45de0f2b0..0cf1209a3 100644 --- a/Documentation/Tutorials/MySQL-Cluster-Setup.md +++ b/Documentation/Tutorials/MySQL-Cluster-Setup.md @@ -249,7 +249,7 @@ Add these sections in MaxScale.cnf config file: Assuming MaxScale is installed in server1, start it - [root@server1 ~]# cd /usr/local/skysql/maxscale/bin + [root@server1 ~]# cd /usr/local/mariadb-maxscale/bin [root@server1 bin]# ./maxscale -c ../ diff --git a/Documentation/Tutorials/MySQL-Replication-Connection-Routing-Tutorial.md b/Documentation/Tutorials/MySQL-Replication-Connection-Routing-Tutorial.md index b95769402..8fd8b496a 100644 --- a/Documentation/Tutorials/MySQL-Replication-Connection-Routing-Tutorial.md +++ b/Documentation/Tutorials/MySQL-Replication-Connection-Routing-Tutorial.md @@ -70,7 +70,7 @@ If you wish to use two different usernames for the two different roles of monito ## Creating Your MaxScale Configuration -MaxScale configuration is held in an ini file that is located in the file MaxScale.cnf in the directory $MAXSCALE_HOME/etc, if you have installed in the default location then this file is available in /usr/local/skysql/maxscle/etc/MaxScale.cnf. This is not created as part of the installation process and must be manually created. A template file does exist within this directory that may be use as a basis for your configuration. +MaxScale configuration is held in an ini file that is located in the file MaxScale.cnf in the directory $MAXSCALE_HOME/etc, if you have installed in the default location then this file is available in /usr/local/mariadb/maxscle/etc/MaxScale.cnf. This is not created as part of the installation process and must be manually created. A template file does exist within this directory that may be use as a basis for your configuration. A global, maxscale, section is included within every MaxScale configuration file; this is used to set the values of various MaxScale wide parameters, perhaps the most important of these is the number of threads that MaxScale will use to execute the code that forwards requests and handles responses for clients. @@ -130,7 +130,7 @@ servers=dbserv1, dbserv2, dbserv3 The final step in the service sections is to add the username and password that will be used to populate the user data from the database cluster. There are two options for representing the password, either plain text or encrypted passwords may be used. In order to use encrypted passwords a set of keys must be generated that will be used by the encryption and decryption process. To generate the keys use the maxkeys command and pass the name of the secrets file in which the keys are stored. -% maxkeys /usr/local/skysql/maxscale/etc/.secrets +% maxkeys /usr/local/mariadb-maxscale/etc/.secrets % @@ -292,9 +292,9 @@ or % service maxscale start -Check the error log in /usr/local/skysql/maxscale/log to see if any errors are detected in the configuration file and to confirm MaxScale has been started. Also the maxadmin command may be used to confirm that MaxScale is running and the services, listeners etc have been correctly configured. +Check the error log in /usr/local/mariadb-maxscale/log to see if any errors are detected in the configuration file and to confirm MaxScale has been started. Also the maxadmin command may be used to confirm that MaxScale is running and the services, listeners etc have been correctly configured. -% maxadmin -pskysql list services +% maxadmin -pmariadb list services Services. @@ -312,7 +312,7 @@ CLI | cli | 2 | 2 --------------------------+----------------------+--------+--------------- -% maxadmin -pskysql list servers +% maxadmin -pmariadb list servers Servers. @@ -330,7 +330,7 @@ dbserv3 | 192.168.2.3 | 3306 | 0 | Running, Slave -------------------+-----------------+-------+-------------+-------------------- -% maxadmin -pskysql list listeners +% maxadmin -pmariadb list listeners Listeners. diff --git a/Documentation/Tutorials/MySQL-Replication-Read-Write-Splitting-Tutorial.md b/Documentation/Tutorials/MySQL-Replication-Read-Write-Splitting-Tutorial.md index 6a527d421..49c151dc2 100644 --- a/Documentation/Tutorials/MySQL-Replication-Read-Write-Splitting-Tutorial.md +++ b/Documentation/Tutorials/MySQL-Replication-Read-Write-Splitting-Tutorial.md @@ -70,7 +70,7 @@ If you wish to use two different usernames for the two different roles of monito ## Creating Your MaxScale Configuration -MaxScale configuration is held in an ini file that is located in the file MaxScale.cnf in the directory $MAXSCALE_HOME/etc, if you have installed in the default location then this file is available in /usr/local/skysql/maxscale/etc/MaxScale.cnf. This is not created as part of the installation process and must be manually created. A template file does exist within this directory that may be use as a basis for your configuration. +MaxScale configuration is held in an ini file that is located in the file MaxScale.cnf in the directory $MAXSCALE_HOME/etc, if you have installed in the default location then this file is available in /usr/local/mariadb-maxscale/etc/MaxScale.cnf. This is not created as part of the installation process and must be manually created. A template file does exist within this directory that may be use as a basis for your configuration. A global, maxscale, section is included within every MaxScale configuration file; this is used to set the values of various MaxScale wide parameters, perhaps the most important of these is the number of threads that MaxScale will use to execute the code that forwards requests and handles responses for clients. @@ -96,7 +96,7 @@ servers=dbserv1, dbserv2, dbserv3 The final step in the service sections is to add the username and password that will be used to populate the user data from the database cluster. There are two options for representing the password, either plain text or encrypted passwords may be used. In order to use encrypted passwords a set of keys must be generated that will be used by the encryption and decryption process. To generate the keys use the maxkeys command and pass the name of the secrets file in which the keys are stored. -% maxkeys /usr/local/skysql/maxscale/etc/.secrets +% maxkeys /usr/local/mariadb-maxscale/etc/.secrets % @@ -226,9 +226,9 @@ or % service maxscale start -Check the error log in /usr/local/skysql/maxscale/log to see if any errors are detected in the configuration file and to confirm MaxScale has been started. Also the maxadmin command may be used to confirm that MaxScale is running and the services, listeners etc have been correctly configured. +Check the error log in /usr/local/mariadb-maxscale/log to see if any errors are detected in the configuration file and to confirm MaxScale has been started. Also the maxadmin command may be used to confirm that MaxScale is running and the services, listeners etc have been correctly configured. -% maxadmin -pskysql list services +% maxadmin -pmariadb list services Services. @@ -244,7 +244,7 @@ CLI | cli | 2 | 2 --------------------------+----------------------+--------+--------------- -% maxadmin -pskysql list servers +% maxadmin -pmariadb list servers Servers. @@ -262,7 +262,7 @@ dbserv3 | 192.168.2.3 | 3306 | 0 | Running, Slave -------------------+-----------------+-------+-------------+-------------------- -% maxadmin -pskysql list listeners +% maxadmin -pmariadb list listeners Listeners. diff --git a/Documentation/Tutorials/Nagios-Plugins.md b/Documentation/Tutorials/Nagios-Plugins.md new file mode 100644 index 000000000..9ee3a87c7 --- /dev/null +++ b/Documentation/Tutorials/Nagios-Plugins.md @@ -0,0 +1,157 @@ +# MaxScale Nagios plugins, for Nagios 3.5.1 + +Massimiliano Pinto + +Last Updated: 12th March 2015 + +## Document History + +
/usr/local/skysql/maxscale/log/*.log { + /usr/local/mariadb-maxscale/log/*.log { monthly rotate 5 missingok nocompress sharedscripts postrotate -kill -USR1 `cat /usr/local/skysql/maxscale/log/maxscale.pid` +kill -USR1 `cat /usr/local/mariadb-maxscale/log/maxscale.pid` endscript }
+ + + + + + + + + + +
DateChangeWho
10th March 2015Initial versionMassimiliano Pinto
+ +# Introduction + +Nagios® Core™ is an Open Source system and network monitoring application. It watches hosts and services that you specify, alerting you when things go bad and when they get better. +Nagios plugins are compiled executables or scripts (Perl scripts, shell scripts, etc.) that can be run from a command line to check the status or a host or service. Nagios uses the results from plugins to determine the current status of hosts and services on your network. +Nagios core executes a plugin whenever there is a need to check the status of a service or host. + +While MaxScale resources and status can be monitored via CLI using maxadmin commands, Nagios Plugin provides an automated way for system administration and database administrators to monitor MaxScale. The diagram below provides view of how Nagios and MaxScale interact. +![Nagios and MaxScale interaction](images/HowMaxScaleWorksWithNagios.png) + + +There are three nagios plugin scripts that MaxScale provides. + +1. check_maxscale_threads.pl: This command provides you the status of current running threads and events in the queue on MaxScale Server. The Performance data associated with this command current and historic wait time for threads and events + +2. check_maxscale_resources.pl: This command provides you status of various resources on MaxScale server. The Performance data associated provides details on respective resources. +Current resources are: modules, services, listeners, servers, sessions, filters. + +3. check_maxscale_monitor.pl: This command provides you status of the configured monitor modules on MaxScale server. + +In order to use these scripts on your Nagios Server, you need to copy them from the MaxScale binary package or download them from source tree on github. + +# MaxScale Nagios Plugin Requirements + +MaxScale must be configured with 'maxscaled' protocol for the administration interface: + +Example of MaxScale.cnf file: + + [AdminInterface] + type=service + router=cli + + [AdminListener] + type=listener + service=AdminInterface + protocol=maxscaled + port=6603 + +## Prepare Nagios configuration files. + +Assuming Nagios installed on a separated server and the plugins are in /usr/lib64/nagios/plugins and configuration files are in /etc/nagios: + +* Copy MaxScale plugin scripts (./nagios/plugins/check_maxscale_*.pl) to /usr/lib64/nagios/plugins on Nagios Server +* Copy New commands and server1 definition (./nagios/plugins/maxscale_commands.cfg, server1.cfg) to /etc/nagios/objects/ on Nagios Server +* Edit /etc/nagios/nagios.cfg on Nagios Server + +and add (just after localhost.cfg or commnads.cfg) + + cfg_file=/etc/nagios/objects/maxscale_commands.cfg + cfg_file=/etc/nagios/objects/server1.cfg + +### Please note: +- modify server IP address in server1.cfg, pointing to MaxScale server +- maxadmin executable must be in the nagios server +- default MaxScale AdminInterface port is 6603 +- default maxadmin executable path is /usr/local/mariadb-maxscale/bin/maxadmin + It can be changed by -m option +- maxadmin executable could be copied from an existing maxscale installation (default location is /usr/local/mariadb-maxscale/bin/maxadmin) + +This example shows configuration that needs to be done on Nagios server in order to communicate to MaxScale server that is running on host server1. +In this example we are using the check_maxscale_resource as the check command + + #Check MaxScale sessions, on the remote machine. + define service{ + use local-service + host_name server1 + service_description MaxScale_sessions + check_command check_maxscale_resource!6603!admin!mariadb!sessions!/path_to/maxadmin + notifications_enabled 0 + } + +### Check new running monitors +* Restart Nagios and check new monitors are running in HTTP Interface "Current Status -> Services" on Nagios Server +* Look for any errors in /var/log/nagios/nagios.log or nagios.debug on Nagios Server + +# Nagios Plugin command line usage + + (1) ./check_maxscale_threads.pl -h + + MaxScale monitor checker plugin for Nagios + + Usage: check_maxscale_threads.pl [-r ] [-H ] [-P ] [-u ] [-p ] [-m ] [-h] + + Options: + -r = threads + -h = provide this usage message + -H = which host to connect to + -P = port to use + -u = username to connect as + -p = password to use for at + -m = /path/to/maxadmin + + (2) ./check_maxscale_resources.pl -h + + MaxScale monitor checker plugin for Nagios + + Usage: check_maxscale_resources.pl [-r ] [-H ] [-P ] [-u ] [-p ] [-m ] [-h] + + Options: + -r = modules|services|filters|listeners|servers|sessions + -h = provide this usage message + -H = which host to connect to + -P = port to use + -u = username to connect as + -p = password to use for at + -m = /path/to/maxadmin + + (3) ./check_maxscale_monitor.pl -h + + MaxScale monitor checker plugin for Nagios + + Usage: check_maxscale_monitor.pl [-r ] [-H ] [-P ] [-u ] [-p ] [-m ] [-h] + + Options: + -r = monitors + -h = provide this usage message + -H = which host to connect to + -P = port to use + -u = username to connect as + -p = password to use for at + -m = /path/to/maxadmin + +# Output description: + +Example for 'services' + + #./check_maxscale_resources.pl -r resources + + OK: 7 services found | services1=RW_Router;readwritesplit;1;1 services2=RW_Split;readwritesplit;1;1 services3=Test Service;readconnroute;1;1 services4=Master Service;readconnroute;2;2 services5=Debug Service;debugcli;1;1 services6=CLI;cli;2;145 services7=MaxInfo;maxinfo;2;2 + +Returns OK and the number of services + +Returns CRITICAL if no services are found + +The data after | char are so called performance data and may be collected by Nagios +output format is: + servicex=Name;router_module;NumUsers;TotalSessions diff --git a/Documentation/Tutorials/Notification-Service.md b/Documentation/Tutorials/Notification-Service.md new file mode 100644 index 000000000..03edd0378 --- /dev/null +++ b/Documentation/Tutorials/Notification-Service.md @@ -0,0 +1,94 @@ +# MaxScale Notification Service and Feedback Support + +Massimiliano Pinto + +Last Updated: 10th March 2015 + +## Contents + +## Document History + + + + + + + + + + + + +
DateChangeWho
10th March 2015Initial versionMassimiliano Pinto
+ + +## Overview + +The purpose of Notification Service in MaxScale is for a customer registered for the service to receive update notices, security bulletins, fixes and workarounds that are tailored to the database server configuration. + +## MaxScale Setup + +MaxScale may collect the installed plugins and send the informations nightly, between 2:00 AM and 4:59 AM. + +It tries to send data and if there is any failure (timeout, server is down, etc), the next retry is in 1800 seconds (30 minutes) + +This feature is not enabled by default: MaxScale must be configured in [feedback] section: + + + [feedback] + feedback_enable=1 + feedback_url=https://enterprise.mariadb.com/feedback/post + feedback_user_info=x-y-z-w + +The activation code that will be provided by MariaDB corp upon request by the customer and it shlud be put in feedback_user_info. + +Example: +feedback_user_info=0467009f-b04d-45b1-a77b-b6b2ec9c6cf4 + + +MaxScale generates the feedback report containing following information: + + -The activation code used to enable feedback + - MaxScale Version + - An identifier of the MaxScale installation, i.e. the HEX encoding of SHA1 digest of the first network interface MAC address + - Operating System (i.e Linux) + - Operating Suystem Distribution (i.e. CentOS release 6.5 (Final)) + - All the modules in use in MaxScale and their API and version + - MaxScale server UNIX_TIME at generation time + +MaxScale shall send the generated feedback report to a feedback server specified in feedback_url + + +## Manual Operation + +If it’s not possible to send data due to firewall or security settings the report could be generated manually (feedback_user_info is required) via MaxAdmin + + +MaxScale>show feedbackreport + + +Report could be saved to report.txt file: + + +maxadmin -uxxx -pyyy show feedbackreport > ./report.txt + +curl -F data=@./report.txt https://mariadb.org/feedback_plugin/post + + +Report Example: + + FEEDBACK_SERVER_UID 6B5C44AEA73137D049B02E6D1C7629EF431A350F + FEEDBACK_USER_INFO 0467009f-b04d-45b1-a77b-b6b2ec9c6cf4 + VERSION 1.0.6-unstable + NOW 1425914890 + PRODUCT maxscale + Uname_sysname Linux + Uname_distribution CentOS release 6.5 (Final) + module_maxscaled_type Protocol + module_maxscaled_version V1.0.0 + module_maxscaled_api 1.0.0 + module_maxscaled_releasestatus GA + module_telnetd_type Protocol + module_telnetd_version V1.0.1 + module_telnetd_api 1.0.0 + module_telnetd_releasestatus GA diff --git a/Documentation/Tutorials/Simple-Sharding-Tutorial.md b/Documentation/Tutorials/Simple-Sharding-Tutorial.md new file mode 100644 index 000000000..6419386fb --- /dev/null +++ b/Documentation/Tutorials/Simple-Sharding-Tutorial.md @@ -0,0 +1,98 @@ +#Simple Sharding with Two Servers + +![Schema Based Sharding](images/Simple-Sharding.png) + +Sharding is the method of splitting a single database server into separate parts. This tutorial describes a very simple way of sharding. Each schema is located on a different database server and MaxScale's **schemarouter** module is used to combine them into a single database server. + +MaxScale will appear to the client as a database server with the combination of all the schemas in all the configured servers. + +## Environment & Solution Space + +This document is designed as a simple tutorial on schema-based sharding using MaxScale in an environment in which you have two servers. The object of this tutorial is to have a system that, to the client side, acts like a single MySQL database but actually is sharded between the two servers. + +The process of setting and configuring MaxScale will be covered within this document. The installation and configuration of the MySQL servers will not be covered in-depth. The users should be configured according to the configuration guide. + +This tutorial will assume the user is running from one of the binary distributions available and has installed this in the default location. Building from source code in GitHub is covered in guides elsewhere as is installing to non-default locations. + +## Process + +The steps involved in creating a system from the binary distribution of MaxScale are: + +* Install the package relevant to your distribution + +* Create the required users on your MariaDB or MySQL server + +* Create a MaxScale configuration file + +### Installation + +The precise installation process will vary from one distribution to another details of what to do with the RPM and DEB packages can be found on the download site when you select the distribution you are downloading from. The process involves setting up your package manager to include the MariaDB repositories and then running the package manager for your distribution, RPM or apt-get. + +Upon successful completion of the installation command you will have MaxScale installed and ready to be run but without a configuration. You must create a configuration file before you first run MaxScale. + +### Creating Your MaxScale Configuration + +The first step in the creation of your MaxScale.cnf file is to define the global maxscale section. This section configures the number of threads MaxScale uses. A good rule of thumb is to use at most as may threads as you have CPUs. MaxScale uses few threads for internal operations so one or two threads less than the maximum should be enough. + +``` +[maxscale] +threads=8 +``` + +After this we configure two servers we will use to shard our database. The `accounts_east` server will hold one schema and the `accounts_west` will hold another schema. We will use these two servers to create our sharded database. + +``` +[accounts_east] +type=server +address=192.168.56.102 +port=3306 + +[accounts_west] +type=server +address=192.168.122.85 +port=3306 +``` + +The next step is to configure the service which the users connect to. This section defines which router to use, which servers to connect to and the credentials to use. The router we use in this tutorial is the `schemarouter`. + +``` +[Sharded Service] +type=service +router=schemarouter +servers=accounts_west,accounts_east +user=sharduser +passwd=YqztlYGDvZ8tVMe3GUm9XCwQi +``` + +After this we configure a listener for the service. The listener is the actual port the user connects to. We will use the port 4000. + +``` +[Sharded Service Listener] +type=listener +service=Sharded Service +protocol=MySQLClient +port=4000 +``` + +The final step is to configure a monitor which will monitor the state of the servers. The monitor will notify MaxScale if the servers are down. We add the two servers to the monitor, define the credentials to use and we set the monitoring cycle interval. + +``` +[MySQL Monitor] +type=monitor +module=mysqlmon +servers=accounts_west,accounts_east +user=monitoruser +passwd=7SP1Zcsow8TG+9EkEBVEbaKa +monitor_interval=1000 +``` + +After this we have a fully working configuration and we can move on to starting MaxScale. + +## Starting MaxScale + +Upon completion of the configuration process MaxScale is ready to be started . This may either be done manually by running the maxscale command or via the service interface. The service scripts are located in the `/etc/init.d/` folder and are accessible through both the `service` and `systemctl` commands. + +After starting MaxScale check the error log in /usr/local/mariadb-maxscale/log to see if any errors are detected in the configuration file. Also the maxadmin command may be used to confirm that MaxScale is running and the services, listeners etc have been correctly configured. + +MaxScale is now ready to start accepting client connections and routing them. Queries are routed to the right servers based on the database they target and switching between the shards is semaless since MaxScale keeps the session state intact between servers. + diff --git a/Documentation/Tutorials/images/HowMaxScaleWorksWithNagios.png b/Documentation/Tutorials/images/HowMaxScaleWorksWithNagios.png new file mode 100644 index 000000000..5e009b55c Binary files /dev/null and b/Documentation/Tutorials/images/HowMaxScaleWorksWithNagios.png differ diff --git a/Documentation/Tutorials/images/Simple-Sharding.png b/Documentation/Tutorials/images/Simple-Sharding.png new file mode 100644 index 000000000..6fe821c5c Binary files /dev/null and b/Documentation/Tutorials/images/Simple-Sharding.png differ diff --git a/Documentation/Upgrading-To-MaxScale-1.1.0.md b/Documentation/Upgrading-To-MaxScale-1.1.0.md new file mode 100644 index 000000000..343fc2fd7 --- /dev/null +++ b/Documentation/Upgrading-To-MaxScale-1.1.0.md @@ -0,0 +1,19 @@ +# Upgrading MaxScale from 1.0 to 1.1 + +This document describes upgrading MaxScale from version 1.0.5 to 1.1.0 and the major differences in the new version compared to the old version. The major changes can be found in the `Changelog.txt` file in the installation directory and the official release notes in the `ReleaseNotes.txt` file. + +## Installation + +If you are installing MaxScale from a RPM package, we recommend you back up your configuration and log files and that you remove the old installation of MaxScale completely. If you choose to upgrade MaxScale instead of removing it and re-installing it afterwards, the init scripts in `/etc/init.d` folder will be missing. This is due to the RPM packaging system but the script can be re-installed by running the `postinst` script found in the `/usr/local/mariadb-maxscale` folder. + +``` +# Re-install init scripts +cd /usr/local/mariadb-maxscale +./postinst +``` + +The 1.1.0 version of MaxScale installs into `/usr/local/mariadb-maxscale` instead of `/usr/local/skysql/maxscale`. This will cause external references to MaxScale's home directory to stop working so remember to update all paths with the new version. + +## MaxAdmin changes + +The MaxAdmin client's default password in MaxScale 1.1.0 is `mariadb` instead of `skysql`. diff --git a/Documentation/filters/Database-Firewall-Filter.md b/Documentation/filters/Database-Firewall-Filter.md new file mode 100644 index 000000000..9dc415a8a --- /dev/null +++ b/Documentation/filters/Database-Firewall-Filter.md @@ -0,0 +1,121 @@ +#Database Firewall filter + +## Overview +The database firewall filter is used to block queries that match a set of rules. It can be used to prevent harmful queries into the database or to limit the access to the database based on a more defined set of rules compared to the traditional GRANT-based rights management. + +## Configuration + +The database firewall filter only requires a minimal set of configurations in the MaxScale.cnf file. The actual rules of the database firewall filter are located in a separate text file. The following is an example of a database firewall filter configuration in the MaxScale.cnf file. + +``` +[Database Firewall] +type=filter +module=dbfwfilter +rules=/home/user/rules.txt +``` + +### Filter Options + +The database firewall filter does not support any filter options. + +### Filter Parameters + +The database firewall filter has one mandatory parameter that defines the location of the rule file. This is the `rules` parameter and it expects an absolute path to the rule file. + +## Rule syntax + +The rules are defined by using the following syntax. + +``` +rule NAME deny [wildcard | columns VALUE ... | + regex REGEX | limit_queries COUNT TIMEPERIOD HOLDOFF | + no_where_clause] [at_times VALUE...] [on_queries [select|update|insert|delete]]` +``` + +Rules always define a blocking action so the basic mode for the database firewall filter is to allow all queries that do not match a given set of rules. Rules are identified by their name and have a mandatory part and optional parts. + +The first step of defining a rule is to start with the keyword `rule` which identifies this line of text as a rule. The second token is identified as the name of the rule. After that the mandatory token `deny` is required to mark the start of the actual rule definition. + +### Mandatory rule parameters + +The database firewall filter's rules expect a single mandatory parameter for a rule. You can define multiple rules to cover situations where you would like to apply multiple mandatory rules to a query. + +#### Wildcard + +This rule blocks all queries that use the wildcard character *. + +#### Columns + +This rule expects a list of values after the `columns` keyword. These values are interpreted as column names and if a query targets any of these, it is blocked. + +#### Regex + +This rule blocks all queries matching a regex enclosed in single or double quotes. + +#### Limit_queries + +The limit_queries rule expects three parameters. The first parameter is the number of allowed queries during the time period. The second is the time period in seconds and the third is the amount of time for which the rule is considered active and blocking. + +#### No_where_clause + +This rule inspects the query and blocks it if it has no where clause. This way you can't do a DELETE FROM ... query without having the where clause. This does not prevent wrongful usage of the where clause e.g. DELETE FROM ... WHERE 1=1. + +### Optional rule parameters + +Each mandatory rule accepts one or more optional parameters. These are to be defined after the mandatory part of the rule. + +#### At_times + +This rule expects a list of time ranges that define the times when the rule in question is active. The time formats are expected to be ISO-8601 compliant and to be separated by a single dash (the - character). For example defining the active period of a rule to be 17:00 to 19:00 you would add `at times 17:00:00-19:00:00` to the end of the rule. + +#### On_queries + +This limits the rule to be active only on certain types of queries. + +### Applying rules to users + +To apply the defined rules to users use the following syntax. + +`users NAME ... match [any|all|strict_all] rules RULE ...` + +The first keyword is users which identifies this line as a user definition line. After this a list of user names and network addresses in the format `user@0.0.0.0` is expected. The first part is the user name and the second part is the network address. You can use the `%` character as the wildcard to enable user name matching from any address or network matching for all users. After the list of users and networks the keyword match is expected. + +After this either the keyword `any` `all` or `strict_all` is expected. This defined how the rules are matched. If `any` is used when the first rule is matched the query is considered blocked and the rest of the rules are skipped. If instead the `all` keyword is used all rules must match for the query to be blocked. The `strict_all` is the same as `all` but it checks the rules from left to right in the order they were listed. If one of these does not match, the rest of the rules are not checked. This could be usedful in situations where you would for example combine `limit_queries` and `regex` rules. By using `strict_all` you can have the `regex` rule first and the `limit_queries` rule second. This way the rule only matches if the `regex` rule matches enough times for the `limit_queries` rule to match. + +After the matching part comes the rules keyword after which a list of rule names is expected. This allows reusing of the rules and enables varying levels of query restriction. + +## Use Cases + +### Use Case 1 - Prevent rapid execution of specific queries + +To prevent the excessive use of a database we want to set a limit on the rate of queries. We only want to apply this limit to certain queries that cause unwanted behavior. To achieve this we can use a regular expression. + +First we define the limit on the rate of queries. The first parameter for the rule sets the number of allowed queries to 10 queries and the second parameter sets the rate of sampling to 5 seconds. If a user executes queries faster than this, any further queries that match the regular expression are blocked for 60 seconds. + +``` +rule limit_rate_of_queries deny limit_queries 10 5 60 +rule query_regex deny regex '.*select.*from.*user_data.*' +``` + +To apply these rules we combine them into a single rule by adding a `users` line to the rule file. + +``` +users %@% match all rules limit_rate_of_queries query_regex +``` + +### Use Case 2 - Only allow deletes with a where clause + +We have a table which contains all the managers of a company. We want to prevent accidental deletes into this table where the where clause is missing. This poses a problem, we don't want to require all the delete queries to have a where clause. We only want to prevent the data in the managers table from being deleted without a where clause. + +To achieve this, we need two rules. The first rule defines that all delete operations must have a where clause. This rule alone does us no good so we need a second one. The second rule blocks all queries that match a regular expression. + +``` +rule safe_delete deny no_where_clause on_queries delete +rule managers_table deny regex '.*from.*managers.*' +``` + +When we combine these two rules we get the result we want. To combine these two rules add the following line to the rule file. + +``` +users %@% match all rules safe_delete managers_table +``` diff --git a/Documentation/filters/Firewall-Filter.md b/Documentation/filters/Firewall-Filter.md deleted file mode 100644 index 9d99b359a..000000000 --- a/Documentation/filters/Firewall-Filter.md +++ /dev/null @@ -1,110 +0,0 @@ -#Firewall filter - -## Overview -The firewall filter is used to block queries that match a set of rules. It can be used to prevent harmful queries into the database or to limit the access to the database based on a more defined set of rules compared to the traditional GRANT-based rights management. - -## Configuration - -The firewall filter only requires a minimal set of configurations in the MaxScale.cnf file. The actual rules of the firewall filter are located in a separate text file. The following is an example of a firewall filter configuration in the MaxScale.cnf file. - - - [Firewall] - type=filter - module=fwfilter - rules=/home/user/rules.txt - -### Filter Options - -The firewall filter does not support any filter options. - -### Filter Parameters - -The firewall filter has one mandatory parameter that defines the location of the rule file. This is the `rules` parameter and it expects an absolute path to the rule file. - -## Rule syntax - -The rules are defined by using the following syntax. - -` rule NAME deny [wildcard | columns VALUE ... | - regex REGEX | limit_queries COUNT TIMEPERIOD HOLDOFF | - no_where_clause] [at_times VALUE...] [on_queries [select|update|insert|delete]]` - -Rules always define a blocking action so the basic mode for the firewall filter is to allow all queries that do not match a given set of rules. Rules are identified by their name and have a mandatory part and optional parts. - -The first step of defining a rule is to start with the keyword `rule` which identifies this line of text as a rule. The second token is identified as the name of the rule. After that the mandatory token `deny` is required to mark the start of the actual rule definition. - -### Mandatory rule parameters - -The firewall filter's rules expect a single mandatory parameter for a rule. You can define multiple rules to cover situations where you would like to apply multiple mandatory rules to a query. - -#### Wildcard - -This rule blocks all queries that use the wildcard character *. - -#### Columns - -This rule expects a list of values after the `columns` keyword. These values are interpreted as column names and if a query targets any of these, it is blocked. - -#### Regex - -This rule blocks all queries matching a regex enclosed in single or double quotes. - -#### Limit_queries - -The limit_queries rule expects three parameters. The first parameter is the number of allowed queries during the time period. The second is the time period in seconds and the third is the amount of time for which the rule is considered active and blocking. - -#### No_where_clause - -This rule inspects the query and blocks it if it has no where clause. This way you can't do a DELETE FROM ... query without having the where clause. This does not prevent wrongful usage of the where clause e.g. DELETE FROM ... WHERE 1=1. - -### Optional rule parameters - -Each mandatory rule accepts one or more optional parameters. These are to be defined after the mandatory part of the rule. - -#### At_times - -This rule expects a list of time ranges that define the times when the rule in question is active. The time formats are expected to be ISO-8601 compliant and to be separated by a single dash (the - character). For example defining the active period of a rule to be 17:00 to 19:00 you would add `at times 17:00:00-19:00:00` to the end of the rule. - -#### On_queries - -This limits the rule to be active only on certain types of queries. - -### Applying rules to users - -To apply the defined rules to users use the following syntax. - -`users NAME ... match [any|all|strict_all] rules RULE ...` - -The first keyword is users which identifies this line as a user definition line. After this a list of user names and network addresses in the format `user@0.0.0.0` is expected. The first part is the user name and the second part is the network address. You can use the `%` character as the wildcard to enable user name matching from any address or network matching for all users. After the list of users and networks the keyword match is expected. - -After this either the keyword `any` `all` or `strict_all` is expected. This defined how the rules are matched. If `any` is used when the first rule is matched the query is considered blocked and the rest of the rules are skipped. If instead the `all` keyword is used all rules must match for the query to be blocked. The `strict_all` is the same as `all` but it checks the rules from left to right in the order they were listed. If one of these does not match, the rest of the rules are not checked. This could be usedful in situations where you would for example combine `limit_queries` and `regex` rules. By using `strict_all` you can have the `regex` rule first and the `limit_queries` rule second. This way the rule only matches if the `regex` rule matches enough times for the `limit_queries` rule to match. - -After the matching part comes the rules keyword after which a list of rule names is expected. This allows reusing of the rules and enables varying levels of query restriction. - -## Examples - -### Example rule file - -The following is an example of a rule file which defines six rules and applies them to three sets of users. This rule file is used in all of the examples. - - rule block_wildcard deny wildcard at_times 8:00:00-17:00:00 - rule no_personal_info deny columns phone salary address on_queries select|delete at_times 12:00:00-18:00:00 - rule simple_regex deny regex '.*insert.*into.*select.*' - rule dos_block deny limit_queries 10000 1.0 500.0 at_times 12:00:00-18:00:00 - rule safe_delete deny no_where_clause on_queries delete - rule managers_table deny regex '.*from.*managers.*' - users John@% Jane@% match any rules no_personal_info block_wildcard - users %@80.120.% match any rules block_wildcard dos_block - users %@% match all rules safe_delete managers_table - -### Example 1 - Deny access to personal information and prevent huge queries during peak hours - -Assume that a database cluster with tables that have a large number of columns is under heavy load during certain times of the day. Now also assume that large selects and querying of personal information creates unwanted stress on the cluster. Now we wouldn't want to completely prevent all the users from accessing personal information or performing large select queries, we only want to block the users John and Jane. - -This can be achieved by creating two rules. One that blocks the usage of the wildcard and one that prevents queries that target a set of columns. To apply these rules to the users we define a users line into the rule file with both the rules and all the users we want to apply the rules to. The rules are defined in the example rule file on line 1 and 2 and the users line is defined on line 7. - -### Example 2 - Only safe deletes into the managers table - -We want to prevent accidental deletes into the managers table where the where clause is missing. This poses a problem, we don't want to require all the delete queries to have a where clause. We only want to prevent the data in the managers table from being deleted without a where clause. - -To achieve this, we need two rules. The first rule can be seen on line 5 in the example rule file. This defines that all delete operations must have a where clause. This rule alone does us no good so we need a second one. The second rule is defined on line 6 and it blocks all queries that match the provided regular expression. When we combine these two rules we get the result we want. You can see the application of these rules on line 9 of the example rule file. The usage of the `all` and `strict_all` matching mode requires that all the rules must match for the query to be blocked. This in effect combines the two rules into a more complex rule. diff --git a/Documentation/filters/Tee-Filter.md b/Documentation/filters/Tee-Filter.md index be77e83b8..777b16fa9 100644 --- a/Documentation/filters/Tee-Filter.md +++ b/Documentation/filters/Tee-Filter.md @@ -6,7 +6,7 @@ The tee filter is a filter module for MaxScale is a "plumbing" fitting in the Ma # Configuration -The configuration block for the TEE filter requires the minimal filter parameters in it’s section within the MaxScale.cnf file, stored in $MAXSCALE_HOME/etc/MaxScale.cnf, that defines the filter to load and the service to send the duplicates to. +The configuration block for the TEE filter requires the minimal filter parameters in it’s section within the MaxScale.cnf file, stored in $MAXSCALE_HOME/etc/MaxScale.cnf, that defines the filter to load and the service to send the duplicates to. Currently the tee filter does not support multi-statements. [DataMartFilter] diff --git a/Documentation/format.pl b/Documentation/format.pl new file mode 100644 index 000000000..461412fed --- /dev/null +++ b/Documentation/format.pl @@ -0,0 +1,29 @@ +open(my $in,"<",@ARGV[0]); +open(my $out,">",@ARGV[1]); +my $tbl = 0; +while(<$in>){ + if(//) + { + $tbl = 1; + } + elsif(/<\/table>/) + { + $tbl = 0; + } + else + { + if($tbl == 1) + { + s/\n//; + s/<\/tr>/\n/; + s///; + s/<\/td>/\t/; + s/^ +//; + s/ +$//; + } + s/[*]/\t/; + print $out "$_"; + } +} + diff --git a/Documentation/generate-pdf.cmake b/Documentation/generate-pdf.cmake index 982332e45..9ef8ebde8 100644 --- a/Documentation/generate-pdf.cmake +++ b/Documentation/generate-pdf.cmake @@ -7,12 +7,19 @@ find_package(Pandoc) if(PANDOC_FOUND AND BUILD_DIR) file(MAKE_DIRECTORY ${BUILD_DIR}/pdf) - file(GLOB_RECURSE MARKDOWN *.md) + file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/makepdf.sh DESTINATION ${BUILD_DIR}) + execute_process(COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_SOURCE_DIR}/Documentation ${BUILD_DIR}) + file(GLOB_RECURSE MARKDOWN ${CMAKE_CURRENT_BINARY_DIR}/*.md) + foreach(VAR ${MARKDOWN}) - string(REPLACE ".md" ".pdf" OUTPUT ${VAR}) - get_filename_component(DIR ${VAR} DIRECTORY) - string(REPLACE "${CMAKE_CURRENT_BINARY_DIR}" "${BUILD_DIR}/pdf" FILE ${OUTPUT}) - execute_process(COMMAND ${CMAKE_COMMAND} -E chdir ${DIR} ${PANDOC_EXECUTABLE} ${VAR} -o ${OUTPUT}) - execute_process(COMMAND ${CMAKE_COMMAND} -E copy ${OUTPUT} ${FILE}) + execute_process(COMMAND ${BUILD_DIR}/makepdf.sh ${VAR}) + execute_process(COMMAND ${CMAKE_COMMAND} -E echo ${VAR}) endforeach() + + file(GLOB PDF ${BUILD_DIR}/Documentation/*.pdf) + + foreach(FILE ${PDF}) + execute_process(COMMAND ${CMAKE_COMMAND} -E copy ${FILE} ${BUILD_DIR}/pdf/) + endforeach() + endif() diff --git a/Documentation/generate-txt-release.cmake b/Documentation/generate-txt-release.cmake new file mode 100644 index 000000000..f963e5b55 --- /dev/null +++ b/Documentation/generate-txt-release.cmake @@ -0,0 +1,12 @@ +# The BUILD_DIR variable is set at runtime + +cmake_minimum_required(VERSION 2.8.12) + +set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake/") + +file(MAKE_DIRECTORY ${BUILD_DIR}/txt) +file(GLOB_RECURSE MARKDOWN Release-Notes/*.md) +foreach(VAR ${MARKDOWN}) + get_filename_component(NEWNAME ${VAR} NAME) + execute_process(COMMAND perl ${CMAKE_CURRENT_BINARY_DIR}/format.pl ${VAR} ${BUILD_DIR}/txt/${NEWNAME}.txt) +endforeach() diff --git a/Documentation/makepdf.sh b/Documentation/makepdf.sh new file mode 100755 index 000000000..879a0bcc5 --- /dev/null +++ b/Documentation/makepdf.sh @@ -0,0 +1,61 @@ +#!/usr/bin/env bash + +# This template is really important. It includes changes to make images a reasonable size, +# gracefully handle Header 4 and Header 5 translation to paragraph and subparagraph, +# stick the MariaDB logo at the top of the document, etc. +template=$PWD/maxscale.latex + +# make sure we can find LaTeX. feel free to remove this if you have it somewhere more normal + + +pwd=$PWD + +input=$1 +if ! shift; then + echo "ERROR: must specify input filename" >&2 + exit 1 +fi + +file=${input##*/} +basename=${file%%.*} +basedir=${input%/*} + +# we have to cd to the location of the file so that images with relative paths can be found +if ! cd "$basedir"; then + echo "ERROR: could not cd to $basedir" >&2 + exit 1 +fi + +# this filter function can be used for some pipline of miscellaneous stuff you want to do to the input file +# if you want to add more filters, you can just build a normal Unix pipeline. +filter(){ + # this instructs pandoc to build a titleblock + # the idea is that the first line will be something like "MariaDB MaxScale" + # and the 2nd line will be something like "Configuration & Usage Scenarios". + # put a hard linebreak between those so they're both part of the "title". + # pandoc supports another line that is the author. right now I manually make that blank. + # and we add the current date to the end of the 2 lines in the titleblock + #date=$(date +"%B %e, %Y") + printf -v date "%(%B %e, %Y)T" + #awk ' /^$/ {p++} p==1{printf "%% %s\n", "'"$date"'";p++} !p{printf "%% "} {print} ' + awk ' NR==1{ printf "%% " } # put % in front of first line + NR==2{ printf " " } # put some space in front of 2nd line. pandoc requires this to continue the title + NR==3{ printf "%% %s", "'"$date"'" } # 3rd line becomes the date. + {printf "%s", $0} # now print whatever was actually on the line. (but leave off the newline) should have been blank for the 3rd line! + {printf "%s", "\n"} # newline. + ' +} + + +pandoc_vars=( + -V fontsize=12pt + -V version=1.10 + -V geometry:margin=1in + --toc + -t latex + --latex-engine=xelatex + --template="$template" +) + +pandoc "${pandoc_vars[@]}" <"$file" -o "${pwd}/${basename}.pdf" + diff --git a/Documentation/maxscale.latex b/Documentation/maxscale.latex new file mode 100644 index 000000000..03552e1e9 --- /dev/null +++ b/Documentation/maxscale.latex @@ -0,0 +1,219 @@ +\documentclass[$if(fontsize)$$fontsize$,$endif$$if(lang)$$lang$,$endif$$if(papersize)$$papersize$,$endif$$for(classoption)$$classoption$$sep$,$endfor$]{$documentclass$} + +% Package titlesec is used to support redefining paragraph and subparagraph +\usepackage[raggedright]{titlesec} +$if(fontfamily)$ +\usepackage{$fontfamily$} +$else$ +\usepackage{lmodern} +$endif$ +$if(linestretch)$ +\usepackage{setspace} +\setstretch{$linestretch$} +$endif$ +\usepackage{amssymb,amsmath} +\usepackage{ifxetex,ifluatex} +\usepackage{fixltx2e} % provides \textsubscript +\ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex + \usepackage[T1]{fontenc} + \usepackage[utf8]{inputenc} +$if(euro)$ + \usepackage{eurosym} +$endif$ +\else % if luatex or xelatex + \ifxetex + \usepackage{mathspec} + \usepackage{xltxtra,xunicode} + \else + \usepackage{fontspec} + \fi + \defaultfontfeatures{Mapping=tex-text,Scale=MatchLowercase} + \newcommand{\euro}{€} +$if(mainfont)$ + \setmainfont{$mainfont$} +$endif$ +$if(sansfont)$ + \setsansfont{$sansfont$} +$endif$ +$if(monofont)$ + \setmonofont[Mapping=tex-ansi]{$monofont$} +$endif$ +$if(mathfont)$ + \setmathfont(Digits,Latin,Greek){$mathfont$} +$endif$ +\fi +% use upquote if available, for straight quotes in verbatim environments +\IfFileExists{upquote.sty}{\usepackage{upquote}}{} +% use microtype if available +\IfFileExists{microtype.sty}{% +\usepackage{microtype} +\UseMicrotypeSet[protrusion]{basicmath} % disable protrusion for tt fonts +}{} +$if(geometry)$ +\usepackage[$for(geometry)$$geometry$$sep$,$endfor$]{geometry} +$endif$ +$if(lang)$ +\ifxetex + \usepackage{polyglossia} + \setmainlanguage{$mainlang$} +\else + \usepackage[shorthands=off,$lang$]{babel} +\fi +$endif$ +$if(natbib)$ +\usepackage{natbib} +\bibliographystyle{$if(biblio-style)$$biblio-style$$else$plainnat$endif$} +$endif$ +$if(biblatex)$ +\usepackage{biblatex} +$if(biblio-files)$ +\bibliography{$biblio-files$} +$endif$ +$endif$ +$if(listings)$ +\usepackage{listings} +$endif$ +$if(lhs)$ +\lstnewenvironment{code}{\lstset{language=Haskell,basicstyle=\small\ttfamily}}{} +$endif$ +$if(highlighting-macros)$ +$highlighting-macros$ +$endif$ +$if(verbatim-in-note)$ +\usepackage{fancyvrb} +\VerbatimFootnotes +$endif$ +$if(tables)$ +\usepackage{longtable,booktabs} +$endif$ +$if(graphics)$ +\usepackage{graphicx} +\makeatletter +\def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth\else\Gin@nat@width\fi} +\def\maxheight{\ifdim\Gin@nat@height>\textheight\textheight\else\Gin@nat@height\fi} +\makeatother +% Scale images if necessary, so that they will not overflow the page +% margins by default, and it is still possible to overwrite the defaults +% using explicit options in \includegraphics[width, height, ...]{} +%\setkeys{Gin}{width=.75\textwidth,keepaspectratio} +%\setkeys{Gin}{width=10cm,keepaspectratio} +%\setkeys{Gin}{width=\maxwidth,height=\maxheight,keepaspectratio} + +% we redefine the \includegraphics command so that it always scales with a factor of 0.5 +% for some reason, without this images are too large and grainy and make ugly PDFs +\let\ORIincludegraphics\includegraphics +\renewcommand{\includegraphics}[2][]{\ORIincludegraphics[scale=0.5,#1]{#2}} +$endif$ +\ifxetex + \usepackage[setpagesize=false, % page size defined by xetex + unicode=false, % unicode breaks when used with xetex + xetex]{hyperref} +\else + \usepackage[unicode=true]{hyperref} +\fi +\hypersetup{breaklinks=true, + bookmarks=true, + pdfauthor={$author-meta$}, + pdftitle={$title-meta$}, + colorlinks=true, + citecolor=$if(citecolor)$$citecolor$$else$blue$endif$, + urlcolor=$if(urlcolor)$$urlcolor$$else$blue$endif$, + linkcolor=$if(linkcolor)$$linkcolor$$else$magenta$endif$, + pdfborder={0 0 0}} +\urlstyle{same} % don't use monospace font for urls +$if(links-as-notes)$ +% Make links footnotes instead of hotlinks: +\renewcommand{\href}[2]{#2\footnote{\url{#1}}} +$endif$ +$if(strikeout)$ +\usepackage[normalem]{ulem} +% avoid problems with \sout in headers with hyperref: +\pdfstringdefDisableCommands{\renewcommand{\sout}{}} +$endif$ +\setlength{\parindent}{0pt} +\setlength{\parskip}{6pt plus 2pt minus 1pt} +\setlength{\emergencystretch}{3em} % prevent overfull lines +$if(numbersections)$ +\setcounter{secnumdepth}{5} +$else$ +\setcounter{secnumdepth}{0} +$endif$ +$if(verbatim-in-note)$ +\VerbatimFootnotes % allows verbatim text in footnotes +$endif$ + +% These depend on package titlesec +% These support "Header 4" and "Header 5" from markdown/pandoc. Those translate, respectively, +% to "paragraph" and "subparagraph" in LaTeX. Without these tricks, they're rendered on the same +% line as the following body text. +\titleformat{\paragraph}[hang]{\normalfont\normalsize\bfseries}{\theparagraph}{1em}{} +\titlespacing*{\paragraph}{0pt}{3.25ex plus 1ex minus .2ex}{0.5em} +\titleformat{\subparagraph}[hang]{\normalfont\normalsize\bfseries}{\theparagraph}{1em}{} +\titlespacing*{\subparagraph}{0pt}{3.25ex plus 1ex minus .2ex}{0.5em} + +$if(title)$ +\title{ + % Add MariaDB Corporation logo above document title + $title$$if(subtitle)$\\\vspace{0.5em}{\large $subtitle$}$endif$ +} +$endif$ +$if(author)$ +\author{$for(author)$$author$$sep$ \and $endfor$} +$endif$ +\date{$date$} +$for(header-includes)$ +$header-includes$ +$endfor$ + + +\begin{document} +$if(title)$ +\maketitle +$endif$ +$if(abstract)$ +\begin{abstract} +$abstract$ +\end{abstract} +$endif$ + +$for(include-before)$ +$include-before$ + +$endfor$ +$if(toc)$ +{ +\hypersetup{linkcolor=black} +\setcounter{tocdepth}{$toc-depth$} +\tableofcontents +} +$endif$ +$if(lot)$ +\listoftables +$endif$ +$if(lof)$ +\listoffigures +$endif$ +$body$ + +$if(natbib)$ +$if(biblio-files)$ +$if(biblio-title)$ +$if(book-class)$ +\renewcommand\bibname{$biblio-title$} +$else$ +\renewcommand\refname{$biblio-title$} +$endif$ +$endif$ +\bibliography{$biblio-files$} + +$endif$ +$endif$ +$if(biblatex)$ +\printbibliography$if(biblio-title)$[title=$biblio-title$]$endif$ + +$endif$ +$for(include-after)$ +$include-after$ + +$endfor$ +\end{document} diff --git a/Documentation/routers/schemarouter/SchemaRouter.md b/Documentation/routers/schemarouter/SchemaRouter.md new file mode 100644 index 000000000..cd6d032af --- /dev/null +++ b/Documentation/routers/schemarouter/SchemaRouter.md @@ -0,0 +1,54 @@ +#SchemaRouter Router + +The SchemaRouter router provides an easy and manageable sharding solution by building a single logical database server from multiple separate ones. Each database is shown to the client and queries targeting unique databases are routed to their respective servers. In addition to providing simple database-based sharding, the schemarouter router also enables cross-node session variable usage by routing all queries that modify the session to all nodes. + +## Routing Logic + +If a command line client is used, i.e. `mysql`, and a direct connection to the database is initialized without a default database, the router starts with no default server where the queries are routed. This means that each query that doesn't specify a database is routed to the first available server. + +If a `USE ` query is executed or a default database is defined when connecting to MaxScale, all queries without explicitly stated databases will be routed to the server which has this database. If multiple servers have the same database and the user connecting to MaxScale has rights to all of them, the database is associated to the first server that responds when the databases are mapped. In practice this means that query results will always come from a single server but the data might not always be from the same node. + +In almost all the cases these can be avoided by proper server configuration and the databases are always mapped to the same servers. More on configuration in the next chapter. + +## Configuration + +Here is an example configuration of the schemarouter router: + + [Shard Router] + type=service + router=schemarouter + servers=server1,server2 + user=myuser + passwd=mypwd + +The module generates the list of databases based on the servers parameter using the connecting client's credentials. The user and passwd parameters define the credentials that are used to fetch the authentication data from the database servers. The credentials used only require the same grants as mentioned in the configuration documentation. + +The list of databases is built by sending a SHOW DATABASES query to all the servers. This requires the user to have at least USAGE and SELECT grants on the databases that need be sharded. + +For example, if two servers have the database 'shard' and the following rights are granted only on one server, all queries targeting the database 'shard' would be routed to the server where the grants were given. +``` +# Execute this on both servers +CREATE USER 'john'@'%' IDENTIFIED BY 'password'; + +# Execute this only on the server where you want the queries to go +GRANT SELECT,USAGE ON shard.* TO 'john'@'%'; +``` +This would in effect allow the user 'john' to only see the database 'shard' on this server. Take notice that these grants are matched against MaxScale's hostname instead of the client's hostname. Only user authentication uses the client's hostname and all other grants use MaxScale's hostname. + +## Limitations + +The schemarouter router currently has some limitations due to the nature of the sharding implementation and the way the session variables are detected and routed. Here is a list of the current limitations. + +- Cross-database queries (e.g. SELECT column FROM database1.table UNION select column FROM database2.table) are not supported and are routed either to the first explicit database in the query, the current database in use or to the first available database, if none of the previous conditions are met. + +- Queries without explicit databases that are not session commands in them are either routed to the current or the first available database. This means that, for example when creating a new database, queries should be done directly on the node or the router should be equipped with the hint filter and a routing hint should be used. + +- Temporary tables are only created on the explicit database in the query or the current database in use. If no database is in use and no database is explicitly stated, the behavior of the router is undefined. + +- SELECT queries that modify session variables are not currently supported because uniform results can not be guaranteed. If such a query is executed, the behavior of the router is undefined. To work around this limitation the query must be executed in separate parts. + +- Queries targeting databases not mapped by the schemarouter router but still exist on the database server are not blocked but routed to the first available server. This possibly returns an error about database rights instead of a missing database. The behavior of the router is undefined in this case. + +## Examples + +[Here](../../Tutorials/Simple-Sharding-Tutorial.md) is a small tutorial on how to set up a sharded database. diff --git a/README b/README index d105075ae..8d3c08a13 100644 --- a/README +++ b/README @@ -29,7 +29,7 @@ issues and communicate with the MaxScale community. or use the [forum](http://groups.google.com/forum/#!forum/maxscale) interface Bugs can be reported in the MariaDB Corporation bugs database - [bug.mariadb.com](http://bugs.mariadb.com) + [https://mariadb.atlassian.net/secure/CreateIssue!default.jspa](https://mariadb.atlassian.net/secure/CreateIssue!default.jspa) under project MXS. \section Documentation diff --git a/SETUP b/SETUP deleted file mode 100644 index 4e51bef5d..000000000 --- a/SETUP +++ /dev/null @@ -1,33 +0,0 @@ -Installation and startup - -Untar the binary distribution in the desired location, -e.g. /usr/local/skysql - -Alternatively build from the source code using the instructions -in the README file and execute make install. - -Simply set the environment variable MAXSCALE_HOME to point to the -MaxScale directory, found inside the path into which the files have been copied, -e.g. MAXSCALE_HOME=/usr/local/skysql/maxscale - -Also you will need to optionaly set LD_LIBRARY_PATH to include the 'lib' folder, -found inside the path into which the files have been copied, -e.g. LD_LIBRARY_PATH=/usr/local/skysql/maxscale/lib - -To start MaxScale execute the command 'maxscale' from the bin folder, -e.g. /usr/local/skysql/maxscale/bin/maxscale or by using the -installed init.d script. - -Configuration - -You need to create and edit the file MaxScale.cnf in $MAXSCALE_HOME/etc. -You should define the set of server definitions you require, with the addresses -and ports of those servers. Also define the listening ports for your -various services. - -In order to view the internal activity of MaxScale you can telnet to -the port defined for the telnet listener. Initially you may login with -the user name of "admin" and the password "skysql". Once connected type -help for an overview of the commands and help for the more -detailed help on commands. Use the 'add user' command to add a new user -which will also remove the admin/skysql user. diff --git a/VERSION b/VERSION index 1464c521f..9084fa2f7 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.0.5 \ No newline at end of file +1.1.0 diff --git a/client/test/maxadmin_stress.sh b/client/test/maxadmin_stress.sh index 8d495dd4c..4d59ad50d 100644 --- a/client/test/maxadmin_stress.sh +++ b/client/test/maxadmin_stress.sh @@ -9,14 +9,14 @@ echo Running $clients parallel iterations of $cmdcnt commands for ((cnt=0; cnt<$clients; cnt++ )); do for ((i=0; i<$cmdcnt; i++ )); do - maxadmin -pskysql show services; + maxadmin -pmariadb show services; done >/dev/null & done >& /dev/null peak=0 while [ "`jobs -p`" != "" ]; do jobs >& /dev/null - zombies=`maxadmin -pskysql list dcbs | grep -ci zombies` + zombies=`maxadmin -pmariadb list dcbs | grep -ci zombies` if [ $zombies -gt $peak ] ; then peak=$zombies fi @@ -29,7 +29,7 @@ else passed=`expr $passed + 1` echo "Zombie collection ($peak): Passed" fi -zombies=`maxadmin -pskysql list dcbs | grep -ci zombies` +zombies=`maxadmin -pmariadb list dcbs | grep -ci zombies` if [ $zombies != "0" ]; then echo "Residual zombie DCBs: Failed" failure=`expr $failure + 1` @@ -37,7 +37,7 @@ else passed=`expr $passed + 1` echo "Residual zombie DCBs: Passed" fi -sessions=`maxadmin -pskysql list services | awk -F\| '/ cli/ { print $3 }'` +sessions=`maxadmin -pmariadb list services | awk -F\| '/ cli/ { print $3 }'` if [ $sessions -gt 3 ]; then echo "Session shutdown, $sessions: Failed" failure=`expr $failure + 1` @@ -46,7 +46,7 @@ else echo "Session shutdown: Passed" fi -sessions=`maxadmin -pskysql list services | awk -F\| '/ cli/ { print $4 }'` +sessions=`maxadmin -pmariadb list services | awk -F\| '/ cli/ { print $4 }'` echo "Test run complete. $passed passes, $failure failures" echo "$sessions CLI sessions executed" diff --git a/client/test/maxadmin_test.sh b/client/test/maxadmin_test.sh index dd3acaaa9..c58c1512e 100755 --- a/client/test/maxadmin_test.sh +++ b/client/test/maxadmin_test.sh @@ -1,7 +1,7 @@ #!/bin/sh failure=0 passed=0 -maxadmin -pskysql help >& /dev/null +maxadmin -pmariadb help >& /dev/null if [ $? -eq "1" ]; then echo "Auth test (correct password): Failed" failure=`expr $failure + 1` @@ -17,7 +17,7 @@ else passed=`expr $passed + 1` echo "Auth test (wrong password): Passed" fi -maxadmin --password=skysql help >& /dev/null +maxadmin --password=mariadb help >& /dev/null if [ $? -eq "1" ]; then echo "Auth test (long option): Failed" failure=`expr $failure + 1` @@ -33,7 +33,7 @@ for op in enable disable do for cmd in heartbeat root do - maxadmin -pskysql $op $cmd >& /dev/null + maxadmin -pmariadb $op $cmd >& /dev/null if [ $? -eq "1" ]; then echo "$op $cmd (missing arg): Failed" failure=`expr $failure + 1` @@ -42,7 +42,7 @@ do echo "$op $cmd (missing arg): Passed" fi - maxadmin -pskysql $op $cmd qwerty >& /dev/null + maxadmin -pmariadb $op $cmd qwerty >& /dev/null if [ $? -eq "1" ]; then echo "$op $cmd (invalid arg): Failed" failure=`expr $failure + 1` @@ -51,7 +51,7 @@ do echo "$op $cmd (invalied arg): Passed" fi - maxadmin -pskysql $op $cmd xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx >& /dev/null + maxadmin -pmariadb $op $cmd xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx >& /dev/null if [ $? -eq "1" ]; then echo "$op $cmd (long invalid arg): Failed" failure=`expr $failure + 1` @@ -65,7 +65,7 @@ done # # Test reload dbusers with short, and long garbage and without argument # -maxadmin -pskysql reload dbusers qwerty >& /dev/null +maxadmin -pmariadb reload dbusers qwerty >& /dev/null if [ $? -eq "1" ]; then echo "Reload dbusers (invalid arg): Failed" failure=`expr $failure + 1` @@ -74,7 +74,7 @@ else echo "Reload dbusers (invalid arg): Passed" fi -maxadmin -pskysql reload dbusers xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx >& /dev/null +maxadmin -pmariadb reload dbusers xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx >& /dev/null if [ $? -eq "1" ]; then echo "Reload dbusers (long invalid arg): Failed" failure=`expr $failure + 1` @@ -84,7 +84,7 @@ else fi -maxadmin -pskysql reload dbusers >& /dev/null +maxadmin -pmariadb reload dbusers >& /dev/null if [ $? -eq "1" ]; then echo "Reload dbusers (missing arg): Failed" failure=`expr $failure + 1` @@ -96,7 +96,7 @@ fi # # Test enable|disable log debug|trace|message|error # -maxadmin -pskysql enable log debug >& /dev/null +maxadmin -pmariadb enable log debug >& /dev/null if [ $? -eq "1" ]; then echo "Enable debug log: Failed" failure=`expr $failure + 1` @@ -105,7 +105,7 @@ else echo "Enable debug log: Passed" fi -maxadmin -pskysql enable log trace >& /dev/null +maxadmin -pmariadb enable log trace >& /dev/null if [ $? -eq "1" ]; then echo "Enable trace log: Failed" failure=`expr $failure + 1` @@ -114,7 +114,7 @@ else echo "Enable trace log: Passed" fi -maxadmin -pskysql enable log message >& /dev/null +maxadmin -pmariadb enable log message >& /dev/null if [ $? -eq "1" ]; then echo "Enable message log: Failed" failure=`expr $failure + 1` @@ -123,7 +123,7 @@ else echo "Enable message log: Passed" fi -maxadmin -pskysql enable log error >& /dev/null +maxadmin -pmariadb enable log error >& /dev/null if [ $? -eq "1" ]; then echo "Enable error log: Failed" failure=`expr $failure + 1` @@ -134,7 +134,7 @@ fi -maxadmin -pskysql disable log debug >& /dev/null +maxadmin -pmariadb disable log debug >& /dev/null if [ $? -eq "1" ]; then echo "Disable debug log: Failed" failure=`expr $failure + 1` @@ -143,7 +143,7 @@ else echo "Disable debug log: Passed" fi -maxadmin -pskysql disable log trace >& /dev/null +maxadmin -pmariadb disable log trace >& /dev/null if [ $? -eq "1" ]; then echo "Disable trace log: Failed" failure=`expr $failure + 1` @@ -157,7 +157,7 @@ fi # for cmd in monitor service do - maxadmin -pskysql restart $cmd >& /dev/null + maxadmin -pmariadb restart $cmd >& /dev/null if [ $? -eq "1" ]; then echo "Restart $cmd (missing arg): Failed" failure=`expr $failure + 1` @@ -166,7 +166,7 @@ do echo "Restart $cmd (missing arg): Passed" fi - maxadmin -pskysql restart $cmd xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx >& /dev/null + maxadmin -pmariadb restart $cmd xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx >& /dev/null if [ $? -eq "1" ]; then echo "Restart $cmd (long invalid arg): Failed" failure=`expr $failure + 1` @@ -175,7 +175,7 @@ do echo "Restart $cmd (long invalid arg): Passed" fi - maxadmin -pskysql restart $cmd qwerty >& /dev/null + maxadmin -pmariadb restart $cmd qwerty >& /dev/null if [ $? -eq "1" ]; then echo "Restart $cmd (invalid arg): Failed" failure=`expr $failure + 1` @@ -188,7 +188,7 @@ done # # Test set server qwerty master withaout, with invalid and with long invalid arg # -maxadmin -pskysql set server qwerty >& /dev/null +maxadmin -pmariadb set server qwerty >& /dev/null if [ $? -eq "1" ]; then echo "Set server qwerty (missing arg): Failed" failure=`expr $failure + 1` @@ -197,7 +197,7 @@ else echo "Set server (missing arg): Passed" fi -maxadmin -pskysql set server qwerty mamaster >& /dev/null +maxadmin -pmariadb set server qwerty mamaster >& /dev/null if [ $? -eq "1" ]; then echo "Set server qwerty (invalid arg): Failed" failure=`expr $failure + 1` @@ -206,7 +206,7 @@ else echo "Set server qwerty (invalid arg): Passed" fi -maxadmin -pskysql set server qwerty xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx >& /dev/null +maxadmin -pmariadb set server qwerty xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx >& /dev/null if [ $? -eq "1" ]; then echo "Set server qwerty (long invalid arg): Failed" failure=`expr $failure + 1` @@ -218,7 +218,7 @@ fi for cmd in clients dcbs filters listeners modules monitors services servers sessions threads do - maxadmin -pskysql list $cmd | grep -s '-' >& /dev/null + maxadmin -pmariadb list $cmd | grep -s '-' >& /dev/null if [ $? -eq "1" ]; then echo "list command ($cmd): Failed" failure=`expr $failure + 1` @@ -230,7 +230,7 @@ done for cmd in dcbs dbusers epoll filters modules monitors services servers sessions threads users do - maxadmin -pskysql show $cmd | grep -s ' ' >& /dev/null + maxadmin -pmariadb show $cmd | grep -s ' ' >& /dev/null if [ $? -eq "1" ]; then echo "show command ($cmd): Failed" failure=`expr $failure + 1` @@ -240,7 +240,7 @@ do fi done -master=`maxadmin -pskysql list servers | awk '/Master/ { print $1; }'` +master=`maxadmin -pmariadb list servers | awk '/Master/ { print $1; }'` if [ $? -eq "1" ]; then echo "Extract master server: Failed" failure=`expr $failure + 1` @@ -255,7 +255,7 @@ else passed=`expr $passed + 1` echo "Get master server: Passed" fi -maxadmin -pskysql show server $master | grep -s 'Master' >& /dev/null +maxadmin -pmariadb show server $master | grep -s 'Master' >& /dev/null if [ $? -eq "1" ]; then echo "show server master: Failed" failure=`expr $failure + 1` @@ -264,7 +264,7 @@ else echo "show server master: Passed" fi -maxadmin -pskysql set server $master maint >& /dev/null +maxadmin -pmariadb set server $master maint >& /dev/null if [ $? -eq "1" ]; then echo "set server $master maint: Failed" failure=`expr $failure + 1` @@ -273,7 +273,7 @@ else echo "set server $master maint: Passed" fi -maxadmin -pskysql list servers | grep $master | grep -s 'Maint' >& /dev/null +maxadmin -pmariadb list servers | grep $master | grep -s 'Maint' >& /dev/null if [ $? -eq "1" ]; then echo "set maintenance mode: Failed" failure=`expr $failure + 1` @@ -282,7 +282,7 @@ else echo "set maintenance mode: Passed" fi -maxadmin -pskysql clear server $master maint >& /dev/null +maxadmin -pmariadb clear server $master maint >& /dev/null if [ $? -eq "1" ]; then echo "clear server: Failed" failure=`expr $failure + 1` @@ -290,7 +290,7 @@ else passed=`expr $passed + 1` echo "clear server: Passed" fi -maxadmin -pskysql list servers | grep $master | grep -s 'Maint' >& /dev/null +maxadmin -pmariadb list servers | grep $master | grep -s 'Maint' >& /dev/null if [ $? -eq "0" ]; then echo "clear maintenance mode: Failed" failure=`expr $failure + 1` @@ -299,7 +299,7 @@ else echo "clear maintenance mode: Passed" fi -dcbs=`maxadmin -pskysql list dcbs | awk -F\| '/listening/ { if ( NF > 1 ) print $1 }'` +dcbs=`maxadmin -pmariadb list dcbs | awk -F\| '/listening/ { if ( NF > 1 ) print $1 }'` if [ $? -eq "1" ]; then echo "Get dcb listeners: Failed" failure=`expr $failure + 1` @@ -310,7 +310,7 @@ fi for i in $dcbs do - maxadmin -pskysql show dcb $i | grep -s 'listening' >& /dev/null + maxadmin -pmariadb show dcb $i | grep -s 'listening' >& /dev/null if [ $? -eq "1" ]; then echo "show dcb listeners: Failed" failure=`expr $failure + 1` @@ -325,7 +325,7 @@ done # for cmd in dcb eventq filter monitor server service sessions do - maxadmin -pskysql show $cmd qwerty | grep -s '-' >& /dev/null + maxadmin -pmariadb show $cmd qwerty | grep -s '-' >& /dev/null if [ $? -eq "0" ]; then echo "show $cmd (invalid arg): Failed" failure=`expr $failure + 1` @@ -340,7 +340,7 @@ done # for cmd in monitor service do - maxadmin -pskysql shutdown $cmd qwerty | grep -s '-' >& /dev/null + maxadmin -pmariadb shutdown $cmd qwerty | grep -s '-' >& /dev/null if [ $? -eq "0" ]; then echo "Shutdown $cmd (invalid extra arg):Failed" failure=`expr $failure + 1` @@ -351,7 +351,7 @@ do done -sessions=`maxadmin -pskysql list sessions | awk -F\| '/Listener/ { if ( NF > 1 ) print $1 }'` +sessions=`maxadmin -pmariadb list sessions | awk -F\| '/Listener/ { if ( NF > 1 ) print $1 }'` if [ $? -eq "1" ]; then echo "Get listener sessions: Failed" failure=`expr $failure + 1` @@ -362,7 +362,7 @@ fi for i in $sessions do - maxadmin -pskysql show session $i | grep -s 'Listener' >& /dev/null + maxadmin -pmariadb show session $i | grep -s 'Listener' >& /dev/null if [ $? -eq "1" ]; then echo "show session listeners: Failed" failure=`expr $failure + 1` @@ -372,7 +372,7 @@ do fi done -filters=`maxadmin -pskysql list filters | awk -F\| '{ if ( NF > 1 ) print $1 }'| grep -v Options` +filters=`maxadmin -pmariadb list filters | awk -F\| '{ if ( NF > 1 ) print $1 }'| grep -v Options` if [ $? -eq "1" ]; then echo "Get Filter list: Failed" failure=`expr $failure + 1` @@ -383,7 +383,7 @@ fi for i in $filters do - maxadmin -pskysql show filter $i | grep -s 'Filter' >& /dev/null + maxadmin -pmariadb show filter $i | grep -s 'Filter' >& /dev/null if [ $? -eq "1" ]; then echo "show filter: Failed" failure=`expr $failure + 1` @@ -393,7 +393,7 @@ do fi done -maxadmin -pskysql list services | \ +maxadmin -pmariadb list services | \ awk -F\| '{ if (NF > 1) { sub(/ +$/, "", $1); printf("show service \"%s\"\n", $1); } }' > script1.$$ grep -cs "show service" script1.$$ >/dev/null if [ $? -ne "0" ]; then @@ -403,7 +403,7 @@ else passed=`expr $passed + 1` echo "list services: Passed" fi -maxadmin -pskysql script1.$$ | grep -cs 'Service' > /dev/null +maxadmin -pmariadb script1.$$ | grep -cs 'Service' > /dev/null if [ $? -ne "0" ]; then echo "Show Service: Failed" failure=`expr $failure + 1` @@ -414,7 +414,7 @@ fi rm -f script1.$$ -maxadmin -pskysql list monitors | \ +maxadmin -pmariadb list monitors | \ awk -F\| '{ if (NF > 1) { sub(/ +$/, "", $1); printf("show monitor \"%s\"\n", $1); } }' > script1.$$ grep -cs "show monitor" script1.$$ >/dev/null if [ $? -ne "0" ]; then @@ -424,7 +424,7 @@ else passed=`expr $passed + 1` echo "list monitors: Passed" fi -maxadmin -pskysql script1.$$ | grep -cs 'Monitor' > /dev/null +maxadmin -pmariadb script1.$$ | grep -cs 'Monitor' > /dev/null if [ $? -ne "0" ]; then echo "Show Monitor: Failed" failure=`expr $failure + 1` @@ -435,7 +435,7 @@ fi rm -f script1.$$ -maxadmin -pskysql list sessions | \ +maxadmin -pmariadb list sessions | \ awk -F\| ' /^0x/ { if (NF > 1) { sub(/ +$/, "", $1); printf("show session \"%s\"\n", $1); } }' > script1.$$ grep -cs "show session" script1.$$ >/dev/null if [ $? -ne "0" ]; then @@ -445,7 +445,7 @@ else passed=`expr $passed + 1` echo "list sessions: Passed" fi -maxadmin -pskysql script1.$$ | grep -cs 'Session' > /dev/null +maxadmin -pmariadb script1.$$ | grep -cs 'Session' > /dev/null if [ $? -ne "0" ]; then echo "Show Session: Failed" failure=`expr $failure + 1` @@ -456,7 +456,7 @@ fi rm -f script1.$$ -maxadmin -pskysql list dcbs | \ +maxadmin -pmariadb list dcbs | \ awk -F\| ' /^ 0x/ { if (NF > 1) { sub(/ +$/, "", $1); sub(/ 0x/, "0x", $1); printf("show dcb \"%s\"\n", $1); } }' > script1.$$ grep -cs "show dcb" script1.$$ >/dev/null if [ $? -ne "0" ]; then @@ -466,7 +466,7 @@ else passed=`expr $passed + 1` echo "list dcbs: Passed" fi -maxadmin -pskysql script1.$$ | grep -cs 'DCB' > /dev/null +maxadmin -pmariadb script1.$$ | grep -cs 'DCB' > /dev/null if [ $? -ne "0" ]; then echo "Show DCB: Failed" failure=`expr $failure + 1` @@ -477,7 +477,7 @@ fi rm -f script1.$$ -maxadmin -pskysql list services | \ +maxadmin -pmariadb list services | \ awk -F\| '{ if (NF > 1) { sub(/ +$/, "", $1); printf("show dbusers \"%s\"\n", $1); } }' > script1.$$ grep -cs "show dbusers" script1.$$ >/dev/null if [ $? -ne "0" ]; then @@ -487,7 +487,7 @@ else passed=`expr $passed + 1` echo "list services: Passed" fi -maxadmin -pskysql script1.$$ | grep -cs 'Users table data' > /dev/null +maxadmin -pmariadb script1.$$ | grep -cs 'Users table data' > /dev/null if [ $? -ne "0" ]; then echo "Show dbusers: Failed" failure=`expr $failure + 1` diff --git a/testall.cmake b/cmake/testall.cmake similarity index 82% rename from testall.cmake rename to cmake/testall.cmake index 2c6868faa..fedbdf273 100644 --- a/testall.cmake +++ b/cmake/testall.cmake @@ -1,4 +1,4 @@ -execute_process(COMMAND /bin/sh -c "${CMAKE_BINARY_DIR}/bin/maxscale -c ${CMAKE_BINARY_DIR} &>/dev/null") +execute_process(COMMAND /bin/sh -c "${CMAKE_BINARY_DIR}/bin/maxscale -c ${CMAKE_BINARY_DIR} &>/dev/null 2> /dev/null > /dev/null") execute_process(COMMAND make test RESULT_VARIABLE RVAL) execute_process(COMMAND killall maxscale) if(NOT RVAL EQUAL 0) diff --git a/etc/postinst.in b/etc/postinst.in index 58bdaf54e..4bd193958 100755 --- a/etc/postinst.in +++ b/etc/postinst.in @@ -1,5 +1,5 @@ #!/bin/sh -install @CMAKE_INSTALL_PREFIX@/maxscale /etc/init.d/ -install @CMAKE_INSTALL_PREFIX@/maxscale.conf /etc/ld.so.conf.d/ +cp @CMAKE_INSTALL_PREFIX@/maxscale /etc/init.d/ +cp @CMAKE_INSTALL_PREFIX@/maxscale.conf /etc/ld.so.conf.d/ /sbin/ldconfig diff --git a/etc/postrm.in b/etc/postrm.in index c199f0b76..b1e86fca5 100755 --- a/etc/postrm.in +++ b/etc/postrm.in @@ -1,3 +1,6 @@ #!/bin/sh -rm /etc/init.d/maxscale -rm /etc/ld.so.conf.d/maxscale.conf +if [ "$1" -eq 0 ] +then + rm -f /etc/init.d/maxscale + rm -f /etc/ld.so.conf.d/maxscale.conf +fi diff --git a/etc/ubuntu/init.d/maxscale.in b/etc/ubuntu/init.d/maxscale.in index 269f88638..caf7d1408 100755 --- a/etc/ubuntu/init.d/maxscale.in +++ b/etc/ubuntu/init.d/maxscale.in @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash # # maxscale: The MariaDB Corporation MaxScale database proxy # @@ -58,7 +58,7 @@ RETVAL=0 start() { log_daemon_msg "Starting MaxScale" - start_daemon -p $MAXSCALE_PIDFILE $DAEMON 2> /dev/null + start_daemon -p $MAXSCALE_PIDFILE $DAEMON 2> /dev/null > /dev/null sleep 2 @@ -69,7 +69,6 @@ start() { stop() { log_daemon_msg "Stopping MaxScale" - killproc -p $PIDFILE $DAEMON 2>&1 /dev/null maxscale_wait_stop @@ -79,8 +78,6 @@ stop() { reload() { log_daemon_msg "Reloading MaxScale" - killproc -p $MAXSCALE_PIDFILE $DAEMON 1 - log_end_msg $? } diff --git a/log_manager/log_manager.cc b/log_manager/log_manager.cc index 11d1dbc23..b18e10e26 100644 --- a/log_manager/log_manager.cc +++ b/log_manager/log_manager.cc @@ -51,6 +51,10 @@ static int block_start_index; static int prevval; static simple_mutex_t msg_mutex; #endif +static int highprec = 0; +static int do_syslog = 1; +static int do_maxscalelog = 1; + /** * Variable holding the enabled logfiles information. * Used from log users to check enabled logs prior calling @@ -391,7 +395,13 @@ static bool logmanager_init_nomutex( fw = &lm->lm_filewriter; fn->fn_state = UNINIT; fw->fwr_state = UNINIT; - + + if(!do_syslog) + { + free(syslog_id_str); + syslog_id_str = NULL; + } + /** Initialize configuration including log file naming info */ if (!fnames_conf_init(fn, argc, argv)) { @@ -696,9 +706,11 @@ static int logmanager_write_log( else { sesid_str_len = 0; - } - timestamp_len = get_timestamp_len(); - + } + if(highprec) + timestamp_len = get_timestamp_len_hp(); + else + timestamp_len = get_timestamp_len(); cmplen = sesid_str_len > 0 ? sesid_str_len - sizeof(char) : 0; /** Find out how much can be safely written with current block size */ @@ -740,10 +752,17 @@ static int logmanager_write_log( } #endif /** Book space for log string from buffer */ + if(do_maxscalelog) + { wp = blockbuf_get_writepos(&bb, id, safe_str_len, flush); + } + else + { + wp = (char*)malloc(sizeof(char)*(timestamp_len-sizeof(char)+cmplen+str_len + 1)); + } #if defined (SS_LOG_DEBUG) @@ -758,8 +777,10 @@ static int logmanager_write_log( * to wp. * Returned timestamp_len doesn't include terminating null. */ - timestamp_len = snprint_timestamp(wp, timestamp_len); - + if(highprec) + timestamp_len = snprint_timestamp_hp(wp, timestamp_len); + else + timestamp_len = snprint_timestamp(wp, timestamp_len); if (sesid_str_len != 0) { /** @@ -809,8 +830,15 @@ static int logmanager_write_log( wp[safe_str_len-2]=' '; } wp[safe_str_len-1] = '\n'; - blockbuf_unregister(bb); + if(do_maxscalelog) + { + blockbuf_unregister(bb); + } + else + { + free(wp); + } /** * disable because cross-blockbuffer locking either causes deadlock * or run out of memory blocks. @@ -1364,12 +1392,12 @@ int skygw_log_write_flush( * Find out the length of log string (to be formatted str). */ va_start(valist, str); - len = vsnprintf(NULL, 0, str, valist); + len = sizeof(char) * vsnprintf(NULL, 0, str, valist); va_end(valist); /** * Add one for line feed. */ - len += 1; + len += sizeof(char); /** * Write log string to buffer and add to file write list. */ @@ -1698,11 +1726,14 @@ static bool fnames_conf_init( case 'l': /** record list of log file ids for syslogged */ + if(do_syslog) + { if (syslog_id_str != NULL) { free (syslog_id_str); } syslog_id_str = optarg; + } break; case 'm': @@ -1715,6 +1746,7 @@ static bool fnames_conf_init( case 's': /** record list of log file ids for later use */ + if(do_syslog) shmem_id_str = optarg; break; case 'h': @@ -3077,3 +3109,31 @@ void skygw_log_sync_all(void) skygw_message_send(lm->lm_logmes); skygw_message_wait(lm->lm_clientmes); } + +/** + * Toggle high precision logging + * @param val 0 for disabled, 1 for enabled + */ +void skygw_set_highp(int val) +{ + highprec = val; +} + + +/** + * Toggle syslog logging + * @param val 0 for disabled, 1 for enabled + */ +void logmanager_enable_syslog(int val) +{ + do_syslog = val; +} + +/** + * Toggle syslog logging + * @param val 0 for disabled, 1 for enabled + */ +void logmanager_enable_maxscalelog(int val) +{ + do_maxscalelog = val; +} \ No newline at end of file diff --git a/log_manager/log_manager.h b/log_manager/log_manager.h index f2146d303..ed708682a 100644 --- a/log_manager/log_manager.h +++ b/log_manager/log_manager.h @@ -119,6 +119,9 @@ int skygw_log_write_flush(logfile_id_t id, const char* format, ...); int skygw_log_enable(logfile_id_t id); int skygw_log_disable(logfile_id_t id); void skygw_log_sync_all(void); +void skygw_set_highp(int); +void logmanager_enable_syslog(int); +void logmanager_enable_maxscalelog(int); EXTERN_C_BLOCK_END diff --git a/log_manager/test/CMakeLists.txt b/log_manager/test/CMakeLists.txt index 253f56f74..bbcfd5791 100644 --- a/log_manager/test/CMakeLists.txt +++ b/log_manager/test/CMakeLists.txt @@ -2,4 +2,4 @@ add_executable(testlog testlog.c) add_executable(testorder testorder.c) target_link_libraries(testlog pthread log_manager utils) target_link_libraries(testorder pthread log_manager utils) -add_test(NAME TestLogOrder COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/logorder.sh 200 0 1000 ${CMAKE_CURRENT_BINARY_DIR}/logorder.log) +add_test(NAME Internal-TestLogOrder COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/logorder.sh 200 0 1000 ${CMAKE_CURRENT_BINARY_DIR}/logorder.log) diff --git a/macros.cmake b/macros.cmake index ba81e91e5..63bbd23d3 100644 --- a/macros.cmake +++ b/macros.cmake @@ -9,17 +9,14 @@ macro(set_maxscale_version) #MaxScale version number set(MAXSCALE_VERSION_MAJOR "1") - set(MAXSCALE_VERSION_MINOR "0") - set(MAXSCALE_VERSION_PATCH "6") + set(MAXSCALE_VERSION_MINOR "1") + set(MAXSCALE_VERSION_PATCH "0") set(MAXSCALE_VERSION_NUMERIC "${MAXSCALE_VERSION_MAJOR}.${MAXSCALE_VERSION_MINOR}.${MAXSCALE_VERSION_PATCH}") - set(MAXSCALE_VERSION "${MAXSCALE_VERSION_MAJOR}.${MAXSCALE_VERSION_MINOR}.${MAXSCALE_VERSION_PATCH}-unstable") + set(MAXSCALE_VERSION "${MAXSCALE_VERSION_MAJOR}.${MAXSCALE_VERSION_MINOR}.${MAXSCALE_VERSION_PATCH}") endmacro() macro(set_variables) - - # Build type - set(BUILD_TYPE "None" CACHE STRING "Build type, possible values are:None, Debug, DebugSymbols, Optimized.") # hostname or IP address of MaxScale's host set(TEST_HOST "127.0.0.1" CACHE STRING "hostname or IP address of MaxScale's host") @@ -30,6 +27,9 @@ macro(set_variables) # port of read/write split router module set(TEST_PORT_RW "4006" CACHE STRING "port of read/write split router module") + # port of schemarouter router module + set(TEST_PORT_DB "4010" CACHE STRING "port of schemarouter router module") + # port of read/write split router module with hints set(TEST_PORT_RW_HINT "4009" CACHE STRING "port of read/write split router module with hints") @@ -44,12 +44,12 @@ macro(set_variables) # password of MaxScale user set(TEST_PASSWORD "maxpwd" CACHE STRING "password of MaxScale user") - + # Use static version of libmysqld set(STATIC_EMBEDDED TRUE CACHE BOOL "Use static version of libmysqld") - + # Build RabbitMQ components - set(BUILD_RABBITMQ TRUE CACHE BOOL "Build RabbitMQ components") + set(BUILD_RABBITMQ FALSE CACHE BOOL "Build RabbitMQ components") # Build the binlog router set(BUILD_BINLOG TRUE CACHE BOOL "Build binlog router") @@ -58,7 +58,7 @@ macro(set_variables) set(GCOV FALSE CACHE BOOL "Use gcov build flags") # Install init.d scripts and ldconf configuration files - set(INSTALL_SYSTEM_FILES TRUE CACHE BOOL "Install init.d scripts and ldconf configuration files") + set(WITH_SCRIPTS TRUE CACHE BOOL "Install init.d scripts and ldconf configuration files") # Build tests set(BUILD_TESTS FALSE CACHE BOOL "Build tests") diff --git a/plugins/CMakeLists.txt b/plugins/CMakeLists.txt new file mode 100644 index 000000000..3ea1eb028 --- /dev/null +++ b/plugins/CMakeLists.txt @@ -0,0 +1,5 @@ +install(FILES nagios/check_maxscale_monitors.pl DESTINATION plugins/nagios) +install(FILES nagios/check_maxscale_resources.pl DESTINATION plugins/nagios) +install(FILES nagios/check_maxscale_threads.pl DESTINATION plugins/nagios) +install(FILES nagios/maxscale_commands.cfg DESTINATION plugins/nagios) +install(FILES nagios/server1.cfg DESTINATION plugins/nagios) diff --git a/plugins/nagios/check_maxscale_monitors.pl b/plugins/nagios/check_maxscale_monitors.pl new file mode 100755 index 000000000..42db839ae --- /dev/null +++ b/plugins/nagios/check_maxscale_monitors.pl @@ -0,0 +1,208 @@ +#!/usr/bin/perl +# +# +# +# This file is distributed as part of the MariaDB Corporation MaxScale. It is free +# software: you can redistribute it and/or modify it under the terms of the +# GNU General Public License as published by the Free Software Foundation, +# version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright MariaDB Corporation Ab 2013-2015 +# +# + +# +# @file check_maxscale_monitors.pl - Nagios plugin for MaxScale monitors +# +# Revision History +# +# Date Who Description +# 06-03-2015 Massimiliano Pinto Initial implementation +# + +#use strict; +#use warnings; +use Getopt::Std; + +my %opts; +my $TIMEOUT = 15; # we don't want to wait long for a response +my %ERRORS = ('UNKNOWN' , '3', + 'OK', '0', + 'WARNING', '1', + 'CRITICAL', '2'); + +my $curr_script = "$0"; +$curr_script =~ s{.*/}{}; + +sub usage { + my $rc = shift; + + print <<"EOF"; +MaxScale monitor checker plugin for Nagios + +Usage: $curr_script [-r ] [-H ] [-P ] [-u ] [-p ] [-m ] [-h] + +Options: + -r = monitors + -h = provide this usage message + -H = which host to connect to + -P = port to use + -u = username to connect as + -p = password to use for at + -m = /path/to/maxadmin +EOF + exit $rc; +} + +%opts =( + 'r' => 'monitors', # default maxscale resource to show + 'h' => '', # give help + 'H' => 'localhost', # host + 'u' => 'root', # username + 'p' => '', # password + 'P' => 6603, # port + 'm' => '/usr/local/mariadb-maxscale/bin/maxadmin', # maxadmin + ); + +my $MAXADMIN_DEFAULT = $opts{'m'}; + +getopts('r:hH:u:p:P:m:', \%opts) + or usage( $ERRORS{"UNKNOWN"} ); +usage( $ERRORS{'OK'} ) if $opts{'h'}; + +my $MAXADMIN_RESOURCE = $opts{'r'}; +my $MAXADMIN = $opts{'m'}; +if (!defined $MAXADMIN || length($MAXADMIN) == 0) { + $MAXADMIN = $MAXADMIN_DEFAULT; +} +-x $MAXADMIN or + die "$curr_script: Failed to find required tool: $MAXADMIN. Please install it or use the -m option to point to another location."; + +# Just in case of problems, let's not hang Nagios +$SIG{'ALRM'} = sub { + print ("UNKNOWN: No response from MaxScale server (alarm)\n"); + exit $ERRORS{"UNKNOWN"}; +}; +alarm($TIMEOUT); + +my $command = $MAXADMIN . ' -h ' . $opts{'H'} . ' -u ' . $opts{'u'} . ' -p "' . $opts{'p'} . '" -P ' . $opts{'P'} . ' ' . "show " . $MAXADMIN_RESOURCE; + +# +# print "maxadmin command: $command\n"; +# + +open (MAXSCALE, "$command 2>&1 |") + or die "can't get data out of Maxscale: $!"; + +my $hostname = qx{hostname}; chomp $hostname; +my $waiting_backend = 0; +my $start_output = 0; +my $n_monitors = 0; +my $performance_data=""; + + +my $resource_type = $MAXADMIN_RESOURCE; +chop($resource_type); + +my $resource_match = ucfirst("$resource_type Name"); + +my $this_key; +my %monitor_data; + +while ( ) { + chomp; + + if ( /(Failed|Unable) to connect to MaxScale/ ) { + printf "CRITICAL: $_\n"; + close(MAXSCALE); + exit(2); + } + + if ( /^Monitor\:/ ) { + $n_monitors++; + $this_key = 'monitor' . $n_monitors; + $monitor_data{$this_key} = { + '1name'=> '', + '2state' => '', + '3servers' => '', + '4interval' => '', + '5repl_lag' => '' + }; + + next; + } + + next if (/--/ || $_ eq ''); + + if ( /\s+Name/) { + + my $str; + my $perf_line; + my @data_row = split(':', $_); + my $name = $data_row[1]; + $name =~ s/^\s+|\s+$//g; + $monitor_data{$this_key}{'1name'}=$name; + + } + + if (/(\s+Monitor )(.*)/) { + $monitor_data{$this_key}{'2state'}=$2; + } + + if ( /Monitored servers\:/ ) { + my $server_list; + my @data_row = split(':', $_); + shift(@data_row); + foreach my $name (@data_row) { + $name =~ s/^\s+|\s+$//g; + $name =~ s/ //g; + $server_list .= $name . ":"; + } + chop($server_list); + $monitor_data{$this_key}{'3servers'}=$server_list; + } + + if ( /(Sampling interval\:)\s+(\d+) milliseconds/ ) { + $monitor_data{$this_key}{'4interval'}=$2; + } + + if ( /Replication lag\:/ ) { + my @data_row = split(':', $_); + my $name = $data_row[1]; + $name =~ s/^\s+|\s+$//g; + $monitor_data{$this_key}{'5repl_lag'}=$name; + } +} + + + for my $key ( sort(keys %monitor_data) ) { + my $local_hash = {}; + $performance_data .= " $key="; + $local_hash = $monitor_data{$key}; + my %new_hash = %$local_hash; + foreach my $key (sort (keys (%new_hash))) { + $performance_data .= $new_hash{$key} . ";"; + } + chop($performance_data); + } + + +if ($n_monitors) { + printf "OK: %d monitors found |%s\n", $n_monitors, $performance_data; + close(MAXSCALE); + exit 0; +} else { + printf "WARNING: 0 monitors found\n"; + close(MAXSCALE); + exit 1; +} + diff --git a/plugins/nagios/check_maxscale_resources.pl b/plugins/nagios/check_maxscale_resources.pl new file mode 100755 index 000000000..7e121374a --- /dev/null +++ b/plugins/nagios/check_maxscale_resources.pl @@ -0,0 +1,189 @@ +#!/usr/bin/perl +# +# +# +# This file is distributed as part of the MariaDB Corporation MaxScale. It is free +# software: you can redistribute it and/or modify it under the terms of the +# GNU General Public License as published by the Free Software Foundation, +# version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright MariaDB Corporation Ab 2013-2015 +# +# + +# +# @file check_maxscale_resources.pl - Nagios plugin for MaxScale resources +# +# Revision History +# +# Date Who Description +# 06-03-2015 Massimiliano Pinto Initial implementation +# + +#use strict; +#use warnings; +use Getopt::Std; + +my %opts; +my $TIMEOUT = 15; # we don't want to wait long for a response +my %ERRORS = ('UNKNOWN' , '3', + 'OK', '0', + 'WARNING', '1', + 'CRITICAL', '2'); + +my $curr_script = "$0"; +$curr_script =~ s{.*/}{}; + +sub usage { + my $rc = shift; + + print <<"EOF"; +MaxScale monitor checker plugin for Nagios + +Usage: $curr_script [-r ] [-H ] [-P ] [-u ] [-p ] [-m ] [-h] + +Options: + -r = modules|services|filters|listeners|servers|sessions + -h = provide this usage message + -H = which host to connect to + -P = port to use + -u = username to connect as + -p = password to use for at + -m = /path/to/maxadmin +EOF + exit $rc; +} + +%opts =( + 'r' => 'services', # default maxscale resource to show + 'h' => '', # give help + 'H' => 'localhost', # host + 'u' => 'root', # username + 'p' => '', # password + 'P' => 6603, # port + 'm' => '/usr/local/mariadb-maxscale/bin/maxadmin', # maxadmin + ); + +my $MAXADMIN_DEFAULT = $opts{'m'}; + +getopts('r:hH:u:p:P:m:', \%opts) + or usage( $ERRORS{"UNKNOWN"} ); +usage( $ERRORS{'OK'} ) if $opts{'h'}; + +my $MAXADMIN_RESOURCE = $opts{'r'}; +my $MAXADMIN = $opts{'m'}; +if (!defined $MAXADMIN || length($MAXADMIN) == 0) { + $MAXADMIN = $MAXADMIN_DEFAULT; +} + +-x $MAXADMIN or + die "$curr_script: Failed to find required tool: $MAXADMIN. Please install it or use the -m option to point to another location."; + +# Just in case of problems, let's not hang Nagios +$SIG{'ALRM'} = sub { + print ("UNKNOWN: No response from MaxScale server (alarm)\n"); + exit $ERRORS{"UNKNOWN"}; +}; +alarm($TIMEOUT); + +my $command = $MAXADMIN . ' -h ' . $opts{'H'} . ' -u ' . $opts{'u'} . ' -p "' . $opts{'p'} . '" -P ' . $opts{'P'} . ' ' . "list " . $MAXADMIN_RESOURCE; + +# +# print "maxadmin command: $command\n"; +# + +open (MAXSCALE, "$command 2>&1 |") or die "can't get data out of Maxscale: $!"; + +my $hostname = qx{hostname}; chomp $hostname; + +my $start_output = 0; +my $n_resources = 0; +my $performance_data=""; + + +my $resource_type = $MAXADMIN_RESOURCE; +chop($resource_type); + +my $resource_match = ucfirst("$resource_type Name"); + +if ($resource_type eq "listener") { + $resource_match = "Service Name"; +} +if ($resource_type eq "filter") { + $resource_match = "Filter"; +} +if ($resource_type eq "server") { + $resource_match = "Server"; +} +if ($resource_type eq "session") { + $resource_match = "Session"; +} + +# +# print "Matching [$resource_match]\n"; +# + +while ( ) { + chomp; + + if ( /(Failed|Unable) to connect to MaxScale/ ) { + printf "CRITICAL: $_\n"; + close(MAXSCALE); + exit(2); + } + + if ( ! /^$resource_match/ ) { + } else { + $start_output = 1; + next; + } + if ($start_output) { + next if (/--/ || $_ eq ''); + $n_resources++; + if ($resource_type ne "session") { + my $str; + my $perf_line; + my @data_row = split('\|', $_); + $performance_data .= "$MAXADMIN_RESOURCE$n_resources="; + foreach my $val (@data_row) { + $str = $val; + $str =~ s/^\s+|\s+$//g; + $perf_line .= $str . ';'; + } + chop($perf_line); + $performance_data .= $perf_line . ' '; + } + } +} + +chop($performance_data); + +############################################### +# +# print OK or CRITICAL based on $n_resources +# +################################################ + +if ($n_resources) { + if ($performance_data eq '') { + printf "OK: %d $MAXADMIN_RESOURCE found\n", $n_resources; + } else { + printf "OK: %d $MAXADMIN_RESOURCE found | %s\n", $n_resources, $performance_data; + } + close(MAXSCALE); + exit 0; +} else { + printf "CRITICAL: 0 $MAXADMIN_RESOURCE found\n"; + close(MAXSCALE); + exit 2; +} + diff --git a/plugins/nagios/check_maxscale_threads.pl b/plugins/nagios/check_maxscale_threads.pl new file mode 100755 index 000000000..faa06e4ba --- /dev/null +++ b/plugins/nagios/check_maxscale_threads.pl @@ -0,0 +1,246 @@ +#!/usr/bin/perl +# +# +# +# This file is distributed as part of the MariaDB Corporation MaxScale. It is free +# software: you can redistribute it and/or modify it under the terms of the +# GNU General Public License as published by the Free Software Foundation, +# version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright MariaDB Corporation Ab 2013-2015 +# +# + +# +# @file check_maxscale_threads.pl - Nagios plugin for MaxScale threads and events +# +# Revision History +# +# Date Who Description +# 06-03-2015 Massimiliano Pinto Initial implementation +# + +#use strict; +#use warnings; +use Getopt::Std; + +my %opts; +my $TIMEOUT = 15; # we don't want to wait long for a response +my %ERRORS = ('UNKNOWN' , '3', + 'OK', '0', + 'WARNING', '1', + 'CRITICAL', '2'); + +my $curr_script = "$0"; +$curr_script =~ s{.*/}{}; + +sub usage { + my $rc = shift; + + print <<"EOF"; +MaxScale monitor checker plugin for Nagios + +Usage: $curr_script [-r ] [-H ] [-P ] [-u ] [-p ] [-m ] [-h] + +Options: + -r = threads + -h = provide this usage message + -H = which host to connect to + -P = port to use + -u = username to connect as + -p = password to use for at + -m = /path/to/maxadmin +EOF + exit $rc; +} + +%opts =( + 'r' => 'threads', # default maxscale resource to show + 'h' => '', # give help + 'H' => 'localhost', # host + 'u' => 'root', # username + 'p' => '', # password + 'P' => 6603, # port + 'm' => '/usr/local/mariadb-maxscale/bin/maxadmin', # maxadmin + ); + +my $MAXADMIN_DEFAULT = $opts{'m'}; + +getopts('r:hH:u:p:P:m:', \%opts) + or usage( $ERRORS{"UNKNOWN"} ); +usage( $ERRORS{'OK'} ) if $opts{'h'}; + +my $MAXADMIN_RESOURCE = $opts{'r'}; +my $MAXADMIN = $opts{'m'}; +if (!defined $MAXADMIN || length($MAXADMIN) == 0) { + $MAXADMIN = $MAXADMIN_DEFAULT; +} +-x $MAXADMIN or + die "$curr_script: Failed to find required tool: $MAXADMIN. Please install it or use the -m option to point to another location."; + +# Just in case of problems, let's not hang Nagios +$SIG{'ALRM'} = sub { + print ("UNKNOWN: No response from MaxScale server (alarm)\n"); + exit $ERRORS{"UNKNOWN"}; +}; +alarm($TIMEOUT); + +my $command = $MAXADMIN . ' -h ' . $opts{'H'} . ' -u ' . $opts{'u'} . ' -p "' . $opts{'p'} . '" -P ' . $opts{'P'} . ' ' . "show " . $MAXADMIN_RESOURCE; + +# +# print "maxadmin command: $command\n"; +# + +open (MAXSCALE, "$command 2>&1 |") or die "can't get data out of Maxscale: $!"; + +my $hostname = qx{hostname}; chomp $hostname; +my $start_output = 0; +my $n_threads = 0; +my $p_threads = 0; +my $performance_data=""; + + +my $resource_type = $MAXADMIN_RESOURCE; +chop($resource_type); + +my $resource_match = ucfirst("$resource_type Name"); + +my $historic_thread_load_average = 0; +my $current_thread_load_average = 0; + +my %thread_data; +my %event_data; + +my $start_queue_len = 0; + +while ( ) { + chomp; + + if ( /(Failed|Unable) to connect to MaxScale/ ) { + printf "CRITICAL: $_\n"; + close(MAXSCALE); + exit(2); + } + + if ( /Historic Thread Load Average/) { + my $str; + my @data_row = split(':', $_); + foreach my $val (@data_row) { + $str = $val; + $str =~ s/^\s+|\s+$//g; + } + chop($str); + $historic_thread_load_average = $str; + } + + if (/Current Thread Load Average/) { + my $str; + my @data_row = split(':', $_); + foreach my $val (@data_row) { + $str = $val; + $str =~ s/^\s+|\s+$//g; + } + chop($str); + $current_thread_load_average = $str; + } + + if (/Minute Average/) { + my $str; + my $in_str; + my @data_row = split(',', $_); + foreach my $val (@data_row) { + my ($i,$j)= split(':', $val); + $i =~ s/^\s+|\s+$//g; + $j =~ s/^\s+|\s+$//g; + if ($start_queue_len) { + $event_data{$i} = $j; + } else { + $thread_data{$i} = $j; + } + } + } + + if ( /Pending event queue length averages/) { + $start_queue_len = 1; + next; + } + + if (/^\s+ID/ ) { + $start_output = 1; + next; + } + + if ($start_output && /^\s+\d/) { + $n_threads++; + if (/Processing/) { + $p_threads++; + } + } +} + +close(MAXSCALE); + +$command = $MAXADMIN . ' -h ' . $opts{'H'} . ' -u ' . $opts{'u'} . ' -p "' . $opts{'p'} . '" -P ' . $opts{'P'} . ' ' . "show epoll"; + +open (MAXSCALE, "$command 2>&1 |") or die "can't get data out of Maxscale: $!"; + +my $queue_len = 0; + +while ( ) { + chomp; + + if ( /(Failed|Unable) to connect to MaxScale/ ) { + printf "CRITICAL: $_\n"; + close(MAXSCALE); + exit(2); + } + + if ( ! /Current event queue length/ ) { + next; + } else { + my $str; + my @data_row = split(':', $_); + foreach my $val (@data_row) { + $str = $val; + $str =~ s/^\s+|\s+$//g; + } + $queue_len = $str; + + last; + } +} + +my $performance_data_thread = ""; +my $performance_data_event = ""; + +my $in_str; +my $in_key; +my $in_val; + +my @new_thread_array = @thread_data{'15 Minute Average', '5 Minute Average', '1 Minute Average'}; +my @new_event_array = @event_data{'15 Minute Average', '5 Minute Average', '1 Minute Average'}; + +$performance_data_thread = join(';', @new_thread_array); +$performance_data_event = join(';', @new_event_array); + +$performance_data .= "threads=$historic_thread_load_average;$current_thread_load_average avg_threads=$performance_data_thread avg_events=$performance_data_event"; + +if ($p_threads < $n_threads) { + printf "OK: Processing threads: %d/%d Events: %d | $performance_data\n", $p_threads, $n_threads, $queue_len; + close(MAXSCALE); + exit 0; +} else { + printf "WARNING: Processing threads: %d/%d Events: %d | $performance_data\n", $p_threads, $n_threads, $queue_len; + close(MAXSCALE); + exit 1; +} + diff --git a/plugins/nagios/maxscale_commands.cfg b/plugins/nagios/maxscale_commands.cfg new file mode 100644 index 000000000..23bbb3594 --- /dev/null +++ b/plugins/nagios/maxscale_commands.cfg @@ -0,0 +1,32 @@ +############################################################################### +# MAXSCALE_COMMANDS.CFG - SAMPLE COMMAND DEFINITIONS FOR NAGIOS 3.5.1 +# +# Massimiliano Pinto +# Last Modified: 06-03-2015 +# +# NOTES: This config file provides you with some example command definitions +# that you can reference in host, service, and contact definitions. +# +# You don't need to keep commands in a separate file from your other +# object definitions. This has been done just to make things easier to +# understand. +# +############################################################################### + +# check maxscale monitors +define command{ + command_name check_maxscale_monitors + command_line $USER1$/check_maxscale_monitors.pl -H $HOSTADDRESS$ -P $ARG1$ -u $ARG2$ -p $ARG3$ -r $ARG4$ -m $ARG5$ +} + +# check maxscale threads +define command{ + command_name check_maxscale_threads + command_line $USER1$/check_maxscale_threads.pl -H $HOSTADDRESS$ -P $ARG1$ -u $ARG2$ -p $ARG3$ -r $ARG4$ -m $ARG5$ +} + +# check maxscale resource (listeners, services, etc) +define command{ + command_name check_maxscale_resource + command_line $USER1$/check_maxscale_resources.pl -H $HOSTADDRESS$ -P $ARG1$ -u $ARG2$ -p $ARG3$ -r $ARG4$ -m $ARG5$ +} diff --git a/plugins/nagios/server1.cfg b/plugins/nagios/server1.cfg new file mode 100644 index 000000000..a223be4b9 --- /dev/null +++ b/plugins/nagios/server1.cfg @@ -0,0 +1,111 @@ + +############################################################################### +############################################################################### +# +# HOST DEFINITION +# +############################################################################### +############################################################################### + +# Define a host for the remote machine + +define host{ + use linux-server ; Name of host template to use + ; This host definition will inherit all variables that are defined + ; in (or inherited by) the linux-server host template definition. + host_name server1 + alias server1 + address xxx.xxx.xxx.xxx + } + + + +############################################################################### +############################################################################### +# +# HOST GROUP DEFINITION +# +############################################################################### +############################################################################### + +# Define an optional hostgroup for Linux machines + +define hostgroup{ + hostgroup_name linux-real-servers ; The name of the hostgroup + alias Linux Real Servers ; Long name of the group + members server1 ; Comma separated list of hosts that belong to this group + } + + + +# Check MaxScale modules, on the remote machine. +define service{ + use local-service ; Name of service template to use + host_name server1 + service_description MaxScale_modules + check_command check_maxscale_resource!6603!admin!mariadb!modules + notifications_enabled 0 + } + +# Check MaxScale services, on the remote machine. +define service{ + use local-service ; Name of service template to use + host_name server1 + service_description MaxScale_services + check_command check_maxscale_resource!6603!admin!mariadb!services + notifications_enabled 0 + } + +# Check MaxScale listeners, on the remote machine. +define service{ + use local-service ; Name of service template to use + host_name server1 + service_description MaxScale_listeners + check_command check_maxscale_resource!6603!admin!mariadb!listeners + notifications_enabled 0 + } + +# Check MaxScale servers, on the remote machine. +define service{ + use local-service ; Name of service template to use + host_name server1 + service_description MaxScale_servers + check_command check_maxscale_resource!6603!admin!mariadb!servers + notifications_enabled 0 + } + +# Check MaxScale sessions, on the remote machine. +define service{ + use local-service ; Name of service template to use + host_name server1 + service_description MaxScale_sessions + check_command check_maxscale_resource!6603!admin!mariadb!sessions + notifications_enabled 0 + } + +# Check MaxScale filters, on the remote machine. +define service{ + use local-service ; Name of service template to use + host_name server1 + service_description MaxScale_filters + check_command check_maxscale_resource!6603!admin!mariadb!filters + notifications_enabled 0 + } + +# Check MaxScale monitors, on the remote machine. +define service{ + use local-service ; Name of service template to use + host_name server1 + service_description MaxScale_monitors + check_command check_maxscale_monitors!6603!admin!mariadb!monitors + notifications_enabled 0 + } + +# Define a service to check Script on the remote machine, with maxadmin path +define service{ + use local-service ; Name of service template to use + host_name server1 + service_description MaxScale_threads + check_command check_maxscale_threads!6603!admin!mariadb!threads!/usr/local/mariadb-maxscale/bin/maxadmin + notifications_enabled 0 + } diff --git a/query_classifier/query_classifier.cc b/query_classifier/query_classifier.cc index f8da45567..46ab89729 100644 --- a/query_classifier/query_classifier.cc +++ b/query_classifier/query_classifier.cc @@ -1219,7 +1219,8 @@ inline void add_str(char** buf, int* buflen, int* bufsize, char* str) if(*buf) strcat(*buf," "); } - strcat(*buf,str); + if(*buf) + strcat(*buf,str); *buflen += isize; } @@ -1568,6 +1569,61 @@ char* skygw_get_qtype_str( return qtype_str; } +/** + * Returns an array of strings of databases that this query uses. + * If the database isn't defined in the query, it is assumed that this query + * only targets the current database. + * The value of @p size is set to the number of allocated strings. The caller is + * responsible for freeing all the allocated memory. + * @param querybuf GWBUF containing the query + * @param size Size of the resulting array + * @return A new array of strings containing the database names or NULL if no + * databases were found. + */ +char** skygw_get_database_names(GWBUF* querybuf,int* size) +{ + LEX* lex; + TABLE_LIST* tbl; + char **databases = NULL, **tmp = NULL; + int currsz = 0,i = 0; + + if( (lex = get_lex(querybuf)) == NULL) + { + goto retblock; + } + + lex->current_select = lex->all_selects_list; + + while(lex->current_select) + { + tbl = lex->current_select->table_list.first; + + while(tbl) + { + if(strcmp(tbl->db,"skygw_virtual") != 0) + { + if(i>= currsz) + { + tmp = (char**)realloc(databases, + sizeof(char*)*(currsz*2 + 1)); + if(tmp == NULL) + { + goto retblock; + } + databases = tmp; + currsz = currsz*2 + 1; + } + databases[i++] = strdup(tbl->db); + } + tbl=tbl->next_local; + } + lex->current_select = lex->current_select->next_select_in_list(); + } + +retblock: + *size = i; + return databases; +} skygw_query_op_t query_classifier_get_operation(GWBUF* querybuf) { diff --git a/query_classifier/query_classifier.h b/query_classifier/query_classifier.h index 754e69478..4d6c021f9 100644 --- a/query_classifier/query_classifier.h +++ b/query_classifier/query_classifier.h @@ -112,6 +112,7 @@ bool query_is_parsed(GWBUF* buf); bool skygw_query_has_clause(GWBUF* buf); char* skygw_get_qtype_str(skygw_query_type_t qtype); char* skygw_get_affected_fields(GWBUF* buf); +char** skygw_get_database_names(GWBUF* querybuf,int* size); EXTERN_C_BLOCK_END diff --git a/query_classifier/test/CMakeLists.txt b/query_classifier/test/CMakeLists.txt index ab0154657..44b66e461 100644 --- a/query_classifier/test/CMakeLists.txt +++ b/query_classifier/test/CMakeLists.txt @@ -11,4 +11,4 @@ endif() add_subdirectory(canonical_tests) add_executable(classify classify.c) target_link_libraries(classify query_classifier fullcore) -add_test(TestQueryClassifier classify ${CMAKE_CURRENT_SOURCE_DIR}/input.sql ${CMAKE_CURRENT_SOURCE_DIR}/expected.sql) +add_test(Internal-TestQueryClassifier classify ${CMAKE_CURRENT_SOURCE_DIR}/input.sql ${CMAKE_CURRENT_SOURCE_DIR}/expected.sql) diff --git a/query_classifier/test/canonical_tests/CMakeLists.txt b/query_classifier/test/canonical_tests/CMakeLists.txt index 26bc64602..1dafc428e 100644 --- a/query_classifier/test/canonical_tests/CMakeLists.txt +++ b/query_classifier/test/canonical_tests/CMakeLists.txt @@ -9,7 +9,7 @@ else() endif() add_executable(canonizer canonizer.c) target_link_libraries(canonizer pthread query_classifier z dl ssl aio crypt crypto rt m ${EMBEDDED_LIB} fullcore stdc++) -add_test(NAME TestCanonicalQuery COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/canontest.sh +add_test(NAME Internal-TestCanonicalQuery COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/canontest.sh ${CMAKE_CURRENT_BINARY_DIR}/test.log ${CMAKE_CURRENT_SOURCE_DIR}/input.sql ${CMAKE_CURRENT_BINARY_DIR}/output.sql diff --git a/rabbitmq_consumer/CMakeLists.txt b/rabbitmq_consumer/CMakeLists.txt index 655620a3c..2363b9e15 100644 --- a/rabbitmq_consumer/CMakeLists.txt +++ b/rabbitmq_consumer/CMakeLists.txt @@ -2,7 +2,7 @@ cmake_minimum_required(VERSION 2.6) include(../macros.cmake) enable_testing() set_variables() -set(CMAKE_INSTALL_PREFIX "/usr/local/skysql/rabbitmq-consumer" CACHE PATH "Prefix prepended to install directories.") +set(CMAKE_INSTALL_PREFIX "/usr/local/rabbitmq-consumer" CACHE PATH "Prefix prepended to install directories.") set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/../cmake") project("RabbitMQ Consumer") diff --git a/rabbitmq_consumer/rabbitmq-message-consumer.spec b/rabbitmq_consumer/rabbitmq-message-consumer.spec index bc43a7715..5e810c00f 100644 --- a/rabbitmq_consumer/rabbitmq-message-consumer.spec +++ b/rabbitmq_consumer/rabbitmq-message-consumer.spec @@ -2,7 +2,7 @@ %define name rabbitmq-message-consumer %define release beta %define version 1.0 -%define install_path /usr/local/skysql/maxscale/extra/consumer/ +%define install_path /usr/local/mariadb/rabbitmq-consumer/ BuildRoot: %{buildroot} Summary: rabbitmq-message-consumer diff --git a/server/MaxScale_template.cnf b/server/MaxScale_template.cnf index 90d994520..feec5695d 100644 --- a/server/MaxScale_template.cnf +++ b/server/MaxScale_template.cnf @@ -182,6 +182,12 @@ replace=select # router_options= # slave_selection_criteria=[LEAST_CURRENT_OPERATIONS|LEAST_BEHIND_MASTER] # +# router_options=max_sescmd_history specifies a limit on the number of 'session commands' +# a single session can execute. Please refer to the configuration guide for more details - optional. +# +# router_options= +# max_sescmd_history=2500 +# # max_slave_connections specifies how many slaves a router session can # connect to - optional. # diff --git a/server/core/CMakeLists.txt b/server/core/CMakeLists.txt index b60999901..6a481d370 100644 --- a/server/core/CMakeLists.txt +++ b/server/core/CMakeLists.txt @@ -1,14 +1,15 @@ if(BUILD_TESTS) file(GLOB FULLCORE_SRC *.c) add_library(fullcore STATIC ${FULLCORE_SRC}) - target_link_libraries(fullcore log_manager utils pthread ${EMBEDDED_LIB} ssl aio rt crypt dl crypto inih z m stdc++) + target_link_libraries(fullcore ${CURL_LIBRARIES} log_manager utils pthread ${EMBEDDED_LIB} ssl aio rt crypt dl crypto inih z m stdc++) endif() + add_executable(maxscale atomic.c buffer.c spinlock.c gateway.c gw_utils.c utils.c dcb.c load_utils.c session.c service.c server.c poll.c config.c users.c hashtable.c dbusers.c thread.c gwbitmask.c monitor.c adminusers.c secrets.c filter.c modutil.c hint.c - housekeeper.c memlog.c) -target_link_libraries(maxscale ${EMBEDDED_LIB} log_manager utils ssl aio pthread crypt dl crypto inih z rt m stdc++) + housekeeper.c memlog.c resultset.c) +target_link_libraries(maxscale ${EMBEDDED_LIB} ${CURL_LIBRARIES} log_manager utils ssl aio pthread crypt dl crypto inih z rt m stdc++) install(TARGETS maxscale DESTINATION bin) add_executable(maxkeys maxkeys.c secrets.c utils.c) diff --git a/server/core/adminusers.c b/server/core/adminusers.c index 2f1bc8fe8..64ea5f224 100644 --- a/server/core/adminusers.c +++ b/server/core/adminusers.c @@ -91,7 +91,7 @@ char *pw; initialise(); if (users == NULL) { - if (strcmp(username, "admin") == 0 && strcmp(password, "skysql") == 0) + if (strcmp(username, "admin") == 0 && strcmp(password, "mariadb") == 0) return 1; } else @@ -123,7 +123,7 @@ char uname[80], passwd[80]; sprintf(fname, "%s/etc/passwd", home); } else{ - sprintf(fname, "/usr/local/skysql/MaxScale/etc/passwd"); + sprintf(fname, "/usr/local/mariadb-maxscale/etc/passwd"); } if ((fp = fopen(fname, "r")) == NULL) return NULL; @@ -159,7 +159,7 @@ char fname[1024], *home, *cpasswd; sprintf(fname, "%s/etc/passwd", home); } else{ - sprintf(fname, "/usr/local/skysql/MaxScale/etc/passwd"); + sprintf(fname, "/usr/local/mariadb-maxscale/etc/passwd"); } if (users == NULL) @@ -253,12 +253,14 @@ char* admin_remove_user( /** * Open passwd file and remove user from the file. */ - if ((home = getenv("MAXSCALE_HOME")) != NULL && strlen(home) < 1024) { + if ((home = getenv("MAXSCALE_HOME")) != NULL && + strnlen(home,PATH_MAX) < PATH_MAX && + strnlen(home,PATH_MAX) > 0) { sprintf(fname, "%s/etc/passwd", home); sprintf(fname_tmp, "%s/etc/passwd_tmp", home); } else { - sprintf(fname, "/usr/local/skysql/MaxScale/etc/passwd"); - sprintf(fname_tmp, "/usr/local/skysql/MaxScale/etc/passwd_tmp"); + sprintf(fname, "/usr/local/mariadb-maxscale/etc/passwd"); + sprintf(fname_tmp, "/usr/local/mariadb-maxscale/etc/passwd_tmp"); } /** * Rewrite passwd file from memory. diff --git a/server/core/buffer.c b/server/core/buffer.c index 6f9a162be..4c788cdd2 100644 --- a/server/core/buffer.c +++ b/server/core/buffer.c @@ -336,6 +336,8 @@ gwbuf_append(GWBUF *head, GWBUF *tail) { if (!head) return tail; + if(!tail) + return head; CHK_GWBUF(head); head->tail->next = tail; head->tail = tail->tail; @@ -638,6 +640,8 @@ GWBUF *newbuf; char *ptr; int len; + if(orig == NULL) + return NULL; if (orig->next == NULL) return orig; diff --git a/server/core/config.c b/server/core/config.c index ad7c6fd6b..cc50e1b99 100644 --- a/server/core/config.c +++ b/server/core/config.c @@ -40,6 +40,8 @@ * internal router suppression of messages * 30/10/14 Massimiliano Pinto Added disable_master_failback parameter * 07/11/14 Massimiliano Pinto Addition of monitor timeouts for connect/read/write + * 20/02/15 Markus Mäkelä Added connection_timeout parameter for services + * 05/03/15 Massimiliano Pinto Added notification_feedback support * * @endverbatim */ @@ -49,14 +51,27 @@ #include #include #include -#include +#include #include #include #include #include +#include #include #include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + /** Defined in log_manager.cc */ extern int lm_enabled_logfiles_bitmask; @@ -69,12 +84,19 @@ static int process_config_update(CONFIG_CONTEXT *); static void free_config_context(CONFIG_CONTEXT *); static char *config_get_value(CONFIG_PARAMETER *, const char *); static int handle_global_item(const char *, const char *); +static int handle_feedback_item(const char *, const char *); static void global_defaults(); +static void feedback_defaults(); static void check_config_objects(CONFIG_CONTEXT *context); -static int internalService(char *router); - +int config_truth_value(char *str); +static int internalService(char *router); +int config_get_ifaddr(unsigned char *output); +int config_get_release_string(char* release); +FEEDBACK_CONF * config_get_feedback_data(); +void config_add_param(CONFIG_CONTEXT*,char*,char*); static char *config_file = NULL; static GATEWAY_CONF gateway; +static FEEDBACK_CONF feedback; char *version_string = NULL; @@ -120,6 +142,12 @@ CONFIG_PARAMETER *param, *p1; { return handle_global_item(name, value); } + + if (strcasecmp(section, "feedback") == 0) + { + return handle_feedback_item(name, value); + } + /* * If we already have some parameters for the object * add the parameters to that object. If not create @@ -191,6 +219,7 @@ int rval; } global_defaults(); + feedback_defaults(); config.object = ""; config.next = NULL; @@ -278,16 +307,41 @@ int error_count = 0; char *user; char *auth; char *enable_root_user; + char *connection_timeout; + char *auth_all_servers; + char *strip_db_esc; char *weightby; char *version_string; bool is_rwsplit = false; - + bool is_schemarouter = false; + char *allow_localhost_match_wildcard_host; + obj->element = service_alloc(obj->object, router); user = config_get_value(obj->parameters, "user"); auth = config_get_value(obj->parameters, "passwd"); enable_root_user = config_get_value( obj->parameters, "enable_root_user"); + + connection_timeout = + config_get_value( + obj->parameters, + "connection_timeout"); + + auth_all_servers = + config_get_value( + obj->parameters, + "auth_all_servers"); + + strip_db_esc = + config_get_value( + obj->parameters, + "strip_db_esc"); + + allow_localhost_match_wildcard_host = + config_get_value(obj->parameters, + "localhost_match_wildcard_host"); + weightby = config_get_value(obj->parameters, "weightby"); version_string = config_get_value(obj->parameters, @@ -298,7 +352,7 @@ int error_count = 0; is_rwsplit = true; } - char *allow_localhost_match_wildcard_host = + allow_localhost_match_wildcard_host = config_get_value(obj->parameters, "localhost_match_wildcard_host"); if (obj->element == NULL) /*< if module load failed */ @@ -332,6 +386,20 @@ int error_count = 0; serviceEnableRootUser( obj->element, config_truth_value(enable_root_user)); + + if (connection_timeout) + serviceSetTimeout( + obj->element, + atoi(connection_timeout)); + + if(auth_all_servers) + serviceAuthAllServers(obj->element, + config_truth_value(auth_all_servers)); + + if(strip_db_esc) + serviceStripDbEsc(obj->element, + config_truth_value(strip_db_esc)); + if (weightby) serviceWeightBy(obj->element, weightby); @@ -713,7 +781,7 @@ int error_count = 0; /* if id is not set, do it now */ if (gateway.id == 0) { setipaddress(&serv_addr.sin_addr, (address == NULL) ? "0.0.0.0" : address); - gateway.id = (unsigned long) (serv_addr.sin_addr.s_addr + port + getpid()); + gateway.id = (unsigned long) (serv_addr.sin_addr.s_addr + port != NULL ? atoi(port) : 0 + getpid()); } if (service && socket && protocol) { @@ -778,9 +846,6 @@ int error_count = 0; char *user; char *passwd; unsigned long interval = 0; - int replication_heartbeat = 0; - int detect_stale_master = 0; - int disable_master_failback = 0; int connect_timeout = 0; int read_timeout = 0; int write_timeout = 0; @@ -793,18 +858,6 @@ int error_count = 0; interval = strtoul(config_get_value(obj->parameters, "monitor_interval"), NULL, 10); } - if (config_get_value(obj->parameters, "detect_replication_lag")) { - replication_heartbeat = atoi(config_get_value(obj->parameters, "detect_replication_lag")); - } - - if (config_get_value(obj->parameters, "detect_stale_master")) { - detect_stale_master = atoi(config_get_value(obj->parameters, "detect_stale_master")); - } - - if (config_get_value(obj->parameters, "disable_master_failback")) { - disable_master_failback = atoi(config_get_value(obj->parameters, "disable_master_failback")); - } - if (config_get_value(obj->parameters, "backend_connect_timeout")) { connect_timeout = atoi(config_get_value(obj->parameters, "backend_connect_timeout")); } @@ -827,25 +880,12 @@ int error_count = 0; gateway.id = getpid(); } - /* add the maxscale-id to monitor data */ - monitorSetId(obj->element, gateway.id); + monitorStart(obj->element,obj->parameters); /* set monitor interval */ if (interval > 0) monitorSetInterval(obj->element, interval); - /* set replication heartbeat */ - if(replication_heartbeat == 1) - monitorSetReplicationHeartbeat(obj->element, replication_heartbeat); - - /* detect stale master */ - if(detect_stale_master == 1) - monitorDetectStaleMaster(obj->element, detect_stale_master); - - /* disable master failback */ - if(disable_master_failback == 1) - monitorDisableMasterFailback(obj->element, disable_master_failback); - /* set timeouts */ if (connect_timeout > 0) monitorSetNetworkTimeout(obj->element, MONITOR_CONNECT_TIMEOUT, connect_timeout); @@ -1176,6 +1216,16 @@ config_pollsleep() return gateway.pollsleep; } +/** + * Return the feedback config data pointer + * + * @return The feedback config data pointer + */ +FEEDBACK_CONF * +config_get_feedback_data() +{ + return &feedback; +} static struct { char *logname; @@ -1209,6 +1259,10 @@ int i; { gateway.pollsleep = atoi(value); } + else if (strcmp(name, "ms_timestamp") == 0) + { + skygw_set_highp(atoi(value)); + } else { for (i = 0; lognames[i].logname; i++) @@ -1225,12 +1279,52 @@ int i; return 1; } +/** + * Configuration handler for items in the feedback [feedback] section + * + * @param name The item name + * @param value The item value + * @return 0 on error + */ +static int +handle_feedback_item(const char *name, const char *value) +{ +int i; + if (strcmp(name, "feedback_enable") == 0) + { + feedback.feedback_enable = config_truth_value((char *)value); + } + else if (strcmp(name, "feedback_user_info") == 0) + { + feedback.feedback_user_info = strdup(value); + } + else if (strcmp(name, "feedback_url") == 0) + { + feedback.feedback_url = strdup(value); + } + if (strcmp(name, "feedback_timeout") == 0) + { + feedback.feedback_timeout = atoi(value); + } + if (strcmp(name, "feedback_connect_timeout") == 0) + { + feedback.feedback_connect_timeout = atoi(value); + } + if (strcmp(name, "feedback_frequency") == 0) + { + feedback.feedback_frequency = atoi(value); + } + return 1; +} + /** * Set the defaults for the global configuration options */ static void global_defaults() { + uint8_t mac_addr[6]=""; + struct utsname uname_data; gateway.n_threads = 1; gateway.n_nbpoll = DEFAULT_NBPOLLS; gateway.pollsleep = DEFAULT_POLLSLEEP; @@ -1239,6 +1333,42 @@ global_defaults() else gateway.version_string = NULL; gateway.id=0; + + /* get release string */ + if(!config_get_release_string(gateway.release_string)) + sprintf(gateway.release_string,"undefined"); + + /* get first mac_address in SHA1 */ + if(config_get_ifaddr(mac_addr)) { + gw_sha1_str(mac_addr, 6, gateway.mac_sha1); + } else { + memset(gateway.mac_sha1, '\0', sizeof(gateway.mac_sha1)); + memcpy(gateway.mac_sha1, "MAC-undef", 9); + } + + /* get uname info */ + if (uname(&uname_data)) + strcpy(gateway.sysname, "undefined"); + else + strncpy(gateway.sysname, uname_data.sysname, _SYSNAME_STR_LENGTH); +} + +/** + * Set the defaults for the feedback configuration options + */ +static void +feedback_defaults() +{ + feedback.feedback_enable = 0; + feedback.feedback_user_info = NULL; + feedback.feedback_last_action = _NOTIFICATION_SEND_PENDING; + feedback.feedback_timeout = _NOTIFICATION_OPERATION_TIMEOUT; + feedback.feedback_connect_timeout = _NOTIFICATION_CONNECT_TIMEOUT; + feedback.feedback_url = NULL; + feedback.feedback_frequency = 1800; + feedback.release_info = gateway.release_string; + feedback.sysname = gateway.sysname; + feedback.mac_sha1 = gateway.mac_sha1; } /** @@ -1281,20 +1411,27 @@ SERVER *server; char *user; char *auth; char *enable_root_user; - char* max_slave_conn_str; - char* max_slave_rlag_str; + + char *connection_timeout; + + char* auth_all_servers; + char* strip_db_esc; + char* max_slave_conn_str; + char* max_slave_rlag_str; char *version_string; char *allow_localhost_match_wildcard_host; enable_root_user = config_get_value(obj->parameters, "enable_root_user"); - user = config_get_value(obj->parameters, - "user"); + connection_timeout = config_get_value(obj->parameters, "connection_timeout"); + user = config_get_value(obj->parameters, + "user"); auth = config_get_value(obj->parameters, - "passwd"); - + "passwd"); + + auth_all_servers = config_get_value(obj->parameters, "auth_all_servers"); + strip_db_esc = config_get_value(obj->parameters, "strip_db_esc"); version_string = config_get_value(obj->parameters, "version_string"); - allow_localhost_match_wildcard_host = config_get_value(obj->parameters, "localhost_match_wildcard_host"); if (version_string) { @@ -1311,6 +1448,15 @@ SERVER *server; if (enable_root_user) serviceEnableRootUser(service, atoi(enable_root_user)); + if (connection_timeout) + serviceSetTimeout(service, atoi(connection_timeout)); + + + if(auth_all_servers) + serviceAuthAllServers(service, atoi(auth_all_servers)); + if(strip_db_esc) + serviceStripDbEsc(service, atoi(strip_db_esc)); + if (allow_localhost_match_wildcard_host) serviceEnableLocalhostMatchWildcardHost( service, @@ -1412,24 +1558,38 @@ SERVER *server; } else { - char *user; + char *user; char *auth; char *enable_root_user; + char *connection_timeout; char *allow_localhost_match_wildcard_host; + char *auth_all_servers; + char *strip_db_esc; enable_root_user = config_get_value(obj->parameters, "enable_root_user"); + + connection_timeout = config_get_value(obj->parameters, + "connection_timeout"); + + auth_all_servers = + config_get_value(obj->parameters, + "auth_all_servers"); + strip_db_esc = + config_get_value(obj->parameters, + "strip_db_esc"); + allow_localhost_match_wildcard_host = config_get_value(obj->parameters, "localhost_match_wildcard_host"); - - user = config_get_value(obj->parameters, - "user"); + + user = config_get_value(obj->parameters, + "user"); auth = config_get_value(obj->parameters, - "passwd"); + "passwd"); obj->element = service_alloc(obj->object, - router); - + router); + if (obj->element && user && auth) { serviceSetUser(obj->element, @@ -1438,6 +1598,9 @@ SERVER *server; if (enable_root_user) serviceEnableRootUser(obj->element, atoi(enable_root_user)); + if (connection_timeout) + serviceSetTimeout(obj->element, atoi(connection_timeout)); + if (allow_localhost_match_wildcard_host) serviceEnableLocalhostMatchWildcardHost( obj->element, @@ -1660,14 +1823,18 @@ static char *service_params[] = "servers", "user", "passwd", - "enable_root_user", - "localhost_match_wildcard_host", + "enable_root_user", + "connection_timeout", + "auth_all_servers", + "strip_db_esc", + "localhost_match_wildcard_host", "max_slave_connections", "max_slave_replication_lag", - "use_sql_variables_in", /*< rwsplit only */ - "version_string", - "filters", - "weightby", + "use_sql_variables_in", /*< rwsplit only */ + "subservices", + "version_string", + "filters", + "weightby", NULL }; @@ -1846,3 +2013,239 @@ int i; } return 0; } +/** + * Get the MAC address of first network interface + * + * and fill the provided allocated buffer with SHA1 encoding + * @param output Allocated 6 bytes buffer + * @return 1 on success, 0 on failure + * + */ +int +config_get_ifaddr(unsigned char *output) +{ + struct ifreq ifr; + struct ifconf ifc; + char buf[1024]; + struct ifreq* it; + struct ifreq* end; + int success = 0; + + int sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP); + if (sock == -1) { + return 0; + }; + + ifc.ifc_len = sizeof(buf); + ifc.ifc_buf = buf; + if (ioctl(sock, SIOCGIFCONF, &ifc) == -1) { + return 0; + } + + it = ifc.ifc_req; + end = it + (ifc.ifc_len / sizeof(struct ifreq)); + + for (; it != end; ++it) { + strcpy(ifr.ifr_name, it->ifr_name); + if (ioctl(sock, SIOCGIFFLAGS, &ifr) == 0) { + if (! (ifr.ifr_flags & IFF_LOOPBACK)) { /* don't count loopback */ + if (ioctl(sock, SIOCGIFHWADDR, &ifr) == 0) { + success = 1; + break; + } + } + } else { + return 0; + } + } + + if (success) + memcpy(output, ifr.ifr_hwaddr.sa_data, 6); + + return success; +} + +/** + * Get the linux distribution info + * + * @param release The allocated buffer where + * the found distribution is copied into. + * @return 1 on success, 0 on failure + * + */ +int +config_get_release_string(char* release) +{ + const char *masks[]= { + "/etc/*-version", "/etc/*-release", + "/etc/*_version", "/etc/*_release" + }; + + bool have_distribution; + char distribution[_RELEASE_STR_LENGTH]=""; + int fd; + int i; + char *to; + + have_distribution= false; + + /* get data from lsb-release first */ + if ((fd= open("/etc/lsb-release", O_RDONLY)) != -1) + { + /* LSB-compliant distribution! */ + size_t len= read(fd, (char*)distribution, sizeof(distribution)-1); + close(fd); + if (len != (size_t)-1) + { + distribution[len]= 0; + char *found= strstr(distribution, "DISTRIB_DESCRIPTION="); + if (found) + { + have_distribution = true; + char *end = strstr(found, "\n"); + if (end == NULL) + end = distribution + len; + found += 20; + + if (*found == '"' && end[-1] == '"') + { + found++; + end--; + } + *end = 0; + + to = strcpy(distribution, "lsb: "); + memmove(to, found, end - found + 1 < INT_MAX ? end - found + 1 : INT_MAX); + + strncpy(release, to, _RELEASE_STR_LENGTH); + + return 1; + } + } + } + + /* if not an LSB-compliant distribution */ + for (i= 0; !have_distribution && i < 4; i++) + { + glob_t found; + char *new_to; + + if (glob(masks[i], GLOB_NOSORT, NULL, &found) == 0) + { + int fd; + int k = 0; + int skipindex = 0; + int startindex = 0; + + for (k = 0; k< found.gl_pathc; k++) { + if (strcmp(found.gl_pathv[k], "/etc/lsb-release") == 0) { + skipindex = k; + } + } + + if ( skipindex == 0) + startindex++; + + if ((fd= open(found.gl_pathv[startindex], O_RDONLY)) != -1) + { + /* + +5 and -8 below cut the file name part out of the + full pathname that corresponds to the mask as above. + */ + new_to = strncpy(distribution, found.gl_pathv[0] + 5,_RELEASE_STR_LENGTH - 1); + new_to += 8; + *new_to++ = ':'; + *new_to++ = ' '; + + size_t to_len= distribution + sizeof(distribution) - 1 - new_to; + size_t len= read(fd, (char*)new_to, to_len); + + close(fd); + + if (len != (size_t)-1) + { + new_to[len]= 0; + char *end= strstr(new_to, "\n"); + if (end) + *end= 0; + + have_distribution= true; + strncpy(release, new_to, _RELEASE_STR_LENGTH); + } + } + } + globfree(&found); + } + + if (have_distribution) + return 1; + else + return 0; +} + +/** + * Add the 'send_feedback' task to the task list + */ +void +config_enable_feedback_task(void) { + FEEDBACK_CONF *cfg = config_get_feedback_data(); + int url_set = 0; + int user_info_set = 0; + int enable_set = cfg->feedback_enable; + + url_set = cfg->feedback_url != NULL && strlen(cfg->feedback_url); + user_info_set = cfg->feedback_user_info != NULL && strlen(cfg->feedback_user_info); + + if (enable_set && url_set && user_info_set) { + /* Add the task to the tasl list */ + if (hktask_add("send_feedback", module_feedback_send, cfg, cfg->feedback_frequency)) { + + LOGIF(LM, (skygw_log_write_flush( + LOGFILE_MESSAGE, + "Notification service feedback task started: URL=%s, User-Info=%s, Frequency %u seconds", + cfg->feedback_url, + cfg->feedback_user_info, + cfg->feedback_frequency))); + } + } else { + if (enable_set) { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error: Notification service feedback cannot start: feedback_enable=1 but" + " some required parameters are not set: %s%s%s", + url_set == 0 ? "feedback_url is not set" : "", (user_info_set == 0 && url_set == 0) ? ", " : "", user_info_set == 0 ? "feedback_user_info is not set" : ""))); + } else { + LOGIF(LT, (skygw_log_write_flush( + LOGFILE_TRACE, + "Notification service feedback is not enabled"))); + } + } +} + +/** + * Remove the 'send_feedback' task + */ +void +config_disable_feedback_task(void) { + hktask_remove("send_feedback"); +} + +unsigned long config_get_gateway_id() +{ + return gateway.id; +} +void config_add_param(CONFIG_CONTEXT* obj, char* key,char* value) +{ + CONFIG_PARAMETER* nptr = malloc(sizeof(CONFIG_PARAMETER)); + + if(nptr == NULL) + { + skygw_log_write(LOGFILE_ERROR,"Memory allocation failed when adding configuration parameters"); + return; + } + + nptr->name = strdup(key); + nptr->value = strdup(value); + nptr->next = obj->parameters; + obj->parameters = nptr; +} \ No newline at end of file diff --git a/server/core/dbusers.c b/server/core/dbusers.c index 32b53b969..75715a809 100644 --- a/server/core/dbusers.c +++ b/server/core/dbusers.c @@ -272,7 +272,7 @@ int add_mysql_users_with_host_ipv4(USERS *users, char *user, char *host, char *p } else { if (strcmp(anydb, "N") == 0) { if (db != NULL) - key.resource = strdup(db); + key.resource = strdup(db); else key.resource = NULL; } else { @@ -324,6 +324,112 @@ int add_mysql_users_with_host_ipv4(USERS *users, char *user, char *host, char *p return ret; } +/** + * Add the database specific grants from mysql.db table into the service resources hashtable + * environment. + * + * @param service The current service + * @param users The users table into which to load the users + * @return -1 on any error or the number of users inserted (0 means no users at all) + */ +static int +addDatabases(SERVICE *service, MYSQL *con) +{ + MYSQL_ROW row; + MYSQL_RES *result = NULL; + char *service_user = NULL; + char *service_passwd = NULL; + int ndbs = 0; + + char *get_showdbs_priv_query = LOAD_MYSQL_DATABASE_NAMES; + + serviceGetUser(service, &service_user, &service_passwd); + + if (service_user == NULL || service_passwd == NULL) + return -1; + + if (mysql_query(con, get_showdbs_priv_query)) { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : Loading database names for service %s encountered " + "error: %s.", + service->name, + mysql_error(con)))); + return -1; + } + + result = mysql_store_result(con); + + if (result == NULL) { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : Loading database names for service %s encountered " + "error: %s.", + service->name, + mysql_error(con)))); + return -1; + } + + /* Result has only one row */ + row = mysql_fetch_row(result); + + if (row) { + ndbs = atoi(row[0]); + } else { + ndbs = 0; + + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "%s: Unable to load database grant information, MaxScale " + "authentication will proceed without including database " + "permissions. To correct this GRANT select permission " + "on mysql.db to the user %s.", + service->name, service_user))); + } + + /* free resut set */ + mysql_free_result(result); + + if (!ndbs) { + /* return if no db names are available */ + return 0; + } + + if (mysql_query(con, "SHOW DATABASES")) { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : Loading database names for service %s encountered " + "error: %s.", + service->name, + mysql_error(con)))); + + return -1; + } + + result = mysql_store_result(con); + + if (result == NULL) { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : Loading database names for service %s encountered " + "error: %s.", + service->name, + mysql_error(con)))); + + return -1; + } + + /* insert key and value "" */ + while ((row = mysql_fetch_row(result))) { + skygw_log_write(LOGFILE_DEBUG,"%s: Adding database %s to the resouce hash.",service->name,row[0]); + resource_add(service->resources, row[0], ""); + } + + mysql_free_result(result); + + return ndbs; +} + /** * Load the database specific grants from mysql.db table into the service resources hashtable * environment. @@ -420,11 +526,12 @@ getDatabases(SERVICE *service, MYSQL *con) } /* Now populate service->resources hashatable with db names */ - service->resources = resource_alloc(); + service->resources = resource_alloc(); /* insert key and value "" */ while ((row = mysql_fetch_row(result))) { - resource_add(service->resources, row[0], ""); + skygw_log_write(LOGFILE_DEBUG,"%s: Adding database %s to the resouce hash.",service->name,row[0]); + resource_add(service->resources, row[0], ""); } mysql_free_result(result); @@ -432,6 +539,468 @@ getDatabases(SERVICE *service, MYSQL *con) return ndbs; } +/** + * Load the user/passwd from mysql.user table into the service users' hashtable + * environment from all the backend servers. + * + * @param service The current service + * @param users The users table into which to load the users + * @return -1 on any error or the number of users inserted + * (0 means no users at all) + */ +static int +getAllUsers(SERVICE *service, USERS *users) +{ + MYSQL *con = NULL; + MYSQL_ROW row; + MYSQL_RES *result = NULL; + char *service_user = NULL; + char *service_passwd = NULL; + char *dpwd = NULL; + int total_users = 0; + SERVER_REF *server; + char *users_query, *tmp; + unsigned char hash[SHA_DIGEST_LENGTH]=""; + char *users_data = NULL; + char *final_data = NULL; + char dbnm[MYSQL_DATABASE_MAXLEN+1]; + int nusers = -1; + int users_data_row_len = MYSQL_USER_MAXLEN + + MYSQL_HOST_MAXLEN + + MYSQL_PASSWORD_LEN + + sizeof(char) + + MYSQL_DATABASE_MAXLEN; + int dbnames = 0; + int db_grants = 0; + + if (serviceGetUser(service, &service_user, &service_passwd) == 0) + { + ss_dassert(service_passwd == NULL || service_user == NULL); + return -1; + } + + if (service->svc_do_shutdown) + { + return -1; + } + + dpwd = decryptPassword(service_passwd); + final_data = (char*) malloc(sizeof(char)); + *final_data = '\0'; + + /** + * Attempt to connect to one of the databases database or until we run + * out of databases + * to try + */ + server = service->dbref; + + if(server == NULL) + { + goto cleanup; + } + + service->resources = resource_alloc(); + + while(server != NULL) + { + + con = mysql_init(NULL); + + if (con == NULL) + { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : mysql_init: %s", + mysql_error(con)))); + goto cleanup; + } + + /** Set read, write and connect timeout values */ + if (gw_mysql_set_timeouts(con, + DEFAULT_READ_TIMEOUT, + DEFAULT_WRITE_TIMEOUT, + DEFAULT_CONNECT_TIMEOUT)) + { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : failed to set timeout values for backend " + "connection."))); + mysql_close(con); + goto cleanup; + } + + if (mysql_options(con, MYSQL_OPT_USE_REMOTE_CONNECTION, NULL)) + { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : failed to set external connection. " + "It is needed for backend server connections."))); + mysql_close(con); + goto cleanup; + } + + + while(!service->svc_do_shutdown && + server != NULL && + (mysql_real_connect(con, + server->server->name, + service_user, + dpwd, + NULL, + server->server->port, + NULL, + 0) == NULL)) + { + server = server->next; + } + + + if (server == NULL) + { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : Unable to get user data from backend database " + "for service [%s]. Missing server information.", + service->name))); + mysql_close(con); + goto cleanup; + } + + /** Count users. Start with users and db grants for users */ + if (mysql_query(con, MYSQL_USERS_WITH_DB_COUNT)) { + if (mysql_errno(con) != ER_TABLEACCESS_DENIED_ERROR) { + /* This is an error we cannot handle, return */ + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : Loading users for service [%s] encountered " + "error: [%s].", + service->name, + mysql_error(con)))); + mysql_close(con); + goto cleanup; + } else { + /* + * We have got ER_TABLEACCESS_DENIED_ERROR + * try counting users from mysql.user without DB names. + */ + if (mysql_query(con, MYSQL_USERS_COUNT)) { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : Loading users for service [%s] encountered " + "error: [%s].", + service->name, + mysql_error(con)))); + mysql_close(con); + goto cleanup; + } + } + } + + result = mysql_store_result(con); + + if (result == NULL) { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : Loading users for service [%s] encountered " + "error: [%s].", + service->name, + mysql_error(con)))); + mysql_close(con); + goto cleanup; + } + + row = mysql_fetch_row(result); + + nusers = atoi(row[0]); + + mysql_free_result(result); + + if (!nusers) { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : Counting users for service %s returned 0", + service->name))); + mysql_close(con); + goto cleanup; + } + + if(service->enable_root) { + /* enable_root for MySQL protocol module means load the root user credentials from backend databases */ + users_query = LOAD_MYSQL_USERS_WITH_DB_QUERY; + } else { + users_query = LOAD_MYSQL_USERS_WITH_DB_QUERY_NO_ROOT; + } + + /* send first the query that fetches users and db grants */ + if (mysql_query(con, users_query)) { + /* + * An error occurred executing the query + * + * Check mysql_errno() against ER_TABLEACCESS_DENIED_ERROR) + */ + + if (1142 != mysql_errno(con)) { + /* This is an error we cannot handle, return */ + + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : Loading users with dbnames for service [%s] encountered " + "error: [%s], MySQL errno %i", + service->name, + mysql_error(con), + mysql_errno(con)))); + + mysql_close(con); + + goto cleanup; + } else { + /* + * We have got ER_TABLEACCESS_DENIED_ERROR + * try loading users from mysql.user without DB names. + */ + + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "%s: Unable to load database grant information, MaxScale " + "authentication will proceed without including database " + "permissions. To correct this GRANT select permission " + "on msql.db to the user %s.", + service->name, service_user))); + + /* check for root user select */ + if(service->enable_root) { + users_query = LOAD_MYSQL_USERS_QUERY " ORDER BY HOST DESC"; + } else { + users_query = LOAD_MYSQL_USERS_QUERY USERS_QUERY_NO_ROOT " ORDER BY HOST DESC"; + } + + if (mysql_query(con, users_query)) { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : Loading users for service [%s] encountered " + "error: [%s], code %i", + service->name, + mysql_error(con), + mysql_errno(con)))); + + mysql_close(con); + + goto cleanup; + } + + /* users successfully loaded but without db grants */ + + LOGIF(LM, (skygw_log_write_flush( + LOGFILE_MESSAGE, + "Loading users from [mysql.user] without access to [mysql.db] for " + "service [%s]. MaxScale Authentication with DBname on connect " + "will not consider database grants.", + service->name))); + } + } else { + /* + * users successfully loaded with db grants. + */ + skygw_log_write(LOGFILE_DEBUG,"[%s] Loading users with db grants.",service->name); + db_grants = 1; + } + + result = mysql_store_result(con); + + if (result == NULL) { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : Loading users for service %s encountered " + "error: %s.", + service->name, + mysql_error(con)))); + + mysql_free_result(result); + mysql_close(con); + + goto cleanup; + } + + users_data = (char *)calloc(nusers, (users_data_row_len * sizeof(char)) + 1); + + if (users_data == NULL) { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : Memory allocation for user data failed due to " + "%d, %s.", + errno, + strerror(errno)))); + mysql_free_result(result); + mysql_close(con); + + goto cleanup; + } + + if (db_grants) { + /* load all mysql database names */ + dbnames = addDatabases(service, con); + + LOGIF(LD, (skygw_log_write( + LOGFILE_DEBUG, + "Loaded %d MySQL Database Names for service [%s]", + dbnames, + service->name))); + } else { + service->resources = NULL; + } + + while ((row = mysql_fetch_row(result))) { + + /** + * Up to six fields could be returned. + * user,host,passwd,concat(),anydb,db + * passwd+1 (escaping the first byte that is '*') + */ + + int rc = 0; + char *password = NULL; + + if (row[2] != NULL) { + /* detect mysql_old_password (pre 4.1 protocol) */ + if (strlen(row[2]) == 16) { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "%s: The user %s@%s has on old password in the " + "backend database. MaxScale does not support these " + "old passwords. This user will not be able to connect " + "via MaxScale. Update the users password to correct " + "this.", + service->name, + row[0], + row[1]))); + continue; + } + + if (strlen(row[2]) > 1) + password = row[2] +1; + else + password = row[2]; + } + + /* + * add user@host and DB global priv and specificsa grant (if possible) + */ + + if (db_grants) { + bool havedb = false; + /* we have dbgrants, store them */ + if(row[5]){ + unsigned long *rowlen = mysql_fetch_lengths(result); + memcpy(dbnm,row[5],rowlen[5]); + memset(dbnm + rowlen[5],0,1); + havedb = true; + if(service->strip_db_esc) { + strip_escape_chars(dbnm); + LOGIF(LD, (skygw_log_write( + LOGFILE_DEBUG, + "[%s]: %s -> %s", + service->name, + row[5], + dbnm))); + } + } + rc = add_mysql_users_with_host_ipv4(users, row[0], row[1], password, row[4],havedb ? dbnm : NULL); + skygw_log_write(LOGFILE_DEBUG,"%s: Adding user:%s host:%s anydb:%s db:%s.", + service->name,row[0],row[1],row[4], + havedb ? dbnm : NULL); + } else { + /* we don't have dbgrants, simply set ANY DB for the user */ + rc = add_mysql_users_with_host_ipv4(users, row[0], row[1], password, "Y", NULL); + } + + if (rc == 1) { + if (db_grants) { + char dbgrant[MYSQL_DATABASE_MAXLEN + 1]=""; + if (row[4] != NULL) { + if (strcmp(row[4], "Y")) + strcpy(dbgrant, "ANY"); + else { + if (row[5]) + strncpy(dbgrant, row[5], MYSQL_DATABASE_MAXLEN); + } + } + + if (!strlen(dbgrant)) + strcpy(dbgrant, "no db"); + + /* Log the user being added with its db grants */ + LOGIF(LD, (skygw_log_write_flush( + LOGFILE_DEBUG, + "%s: User %s@%s for database %s added to " + "service user table.", + service->name, + row[0], + row[1], + dbgrant))); + } else { + /* Log the user being added (without db grants) */ + LOGIF(LD, (skygw_log_write_flush( + LOGFILE_DEBUG, + "%s: User %s@%s added to service user table.", + service->name, + row[0], + row[1]))); + } + + /* Append data in the memory area for SHA1 digest */ + strncat(users_data, row[3], users_data_row_len); + + total_users++; + } else { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Warning: Failed to add user %s@%s for service [%s]. " + "This user will be unavailable via MaxScale.", + row[0], + row[1], + service->name))); + } + } + + mysql_free_result(result); + mysql_close(con); + + if((tmp = realloc(final_data, (strlen(final_data) + strlen(users_data) + 1) * sizeof(char))) == NULL) + { + free(users_data); + goto cleanup; + } + + final_data = tmp; + + strcat(final_data,users_data); + free(users_data); + + if(service->users_from_all) + { + server = server->next; + } + else + { + server = NULL; + } + } + + /* compute SHA1 digest for users' data */ + SHA1((const unsigned char *) final_data, strlen(final_data), hash); + + memcpy(users->cksum, hash, SHA_DIGEST_LENGTH); + + cleanup: + + free(dpwd); + free(final_data); + + return total_users; +} + + /** * Load the user/passwd form mysql.user table into the service users' hashtable * environment. @@ -469,6 +1038,12 @@ getUsers(SERVICE *service, USERS *users) ss_dassert(service_passwd == NULL || service_user == NULL); return -1; } + + if(service->users_from_all) + { + return getAllUsers(service,users); + } + con = mysql_init(NULL); if (con == NULL) { @@ -874,6 +1449,7 @@ getUsers(SERVICE *service, USERS *users) return total_users; } + /** * Allocate a new MySQL users table for mysql specific users@host as key * diff --git a/server/core/dcb.c b/server/core/dcb.c index 3d619ff93..5c5d0a81d 100644 --- a/server/core/dcb.c +++ b/server/core/dcb.c @@ -70,6 +70,7 @@ #include #include #include +#include /** Defined in log_manager.cc */ extern int lm_enabled_logfiles_bitmask; @@ -802,7 +803,8 @@ int dcb_read( if (r <= 0 && l_errno != EAGAIN && - l_errno != EWOULDBLOCK) + l_errno != EWOULDBLOCK && + l_errno != 0) { n = -1; goto return_n; @@ -816,6 +818,9 @@ int dcb_read( n = 0; goto return_n; } + + dcb->last_read = hkheartbeat; + bufsize = MIN(b, MAX_BUFFER_SIZE); if ((buffer = gwbuf_alloc(bufsize)) == NULL) @@ -1582,8 +1587,10 @@ va_list args; int dcb_isclient(DCB *dcb) { - if(dcb->session) { - if (dcb->session->client) { + if (dcb->state != DCB_STATE_LISTENING && dcb->session) + { + if (dcb->session->client) + { return (dcb->session && dcb == dcb->session->client); } } @@ -2168,3 +2175,52 @@ dcb_null_auth(DCB *dcb, SERVER *server, SESSION *session, GWBUF *buf) { return 0; } + +/** + * Return DCB counts optionally filtered by usage + * + * @param usage The usage of the DCB + * @return A count of DCBs in the desired state + */ +int +dcb_count_by_usage(DCB_USAGE usage) +{ +int rval = 0; +DCB *ptr; + + spinlock_acquire(&dcbspin); + ptr = allDCBs; + while (ptr) + { + switch (usage) + { + case DCB_USAGE_CLIENT: + if (dcb_isclient(ptr)) + rval++; + break; + case DCB_USAGE_LISTENER: + if (ptr->state == DCB_STATE_LISTENING) + rval++; + break; + case DCB_USAGE_BACKEND: + if (dcb_isclient(ptr) == 0 + && ptr->dcb_role == DCB_ROLE_REQUEST_HANDLER) + rval++; + break; + case DCB_USAGE_INTERNAL: + if (ptr->dcb_role == DCB_ROLE_REQUEST_HANDLER) + rval++; + break; + case DCB_USAGE_ZOMBIE: + if (DCB_ISZOMBIE(ptr)) + rval++; + break; + case DCB_USAGE_ALL: + rval++; + break; + } + ptr = ptr->next; + } + spinlock_release(&dcbspin); + return rval; +} diff --git a/server/core/gateway.c b/server/core/gateway.c index d7ade597a..bcb5a3cfa 100644 --- a/server/core/gateway.c +++ b/server/core/gateway.c @@ -53,7 +53,7 @@ #include #include #include -#include +#include #include #include #include @@ -79,6 +79,8 @@ # define _GNU_SOURCE #endif +time_t MaxScaleStarted; + extern char *program_invocation_name; extern char *program_invocation_short_name; @@ -148,6 +150,8 @@ static struct option long_options[] = { {"config", required_argument, 0, 'f'}, {"nodaemon", no_argument, 0, 'd'}, {"log", required_argument, 0, 'l'}, + {"syslog", required_argument, 0, 's'}, + {"maxscalelog", required_argument, 0, 'S'}, {"version", no_argument, 0, 'v'}, {"help", no_argument, 0, '?'}, {0, 0, 0, 0} @@ -632,7 +636,8 @@ static bool resolve_maxscale_homedir( * 3. if /etc/MaxScale/MaxScale.cnf didn't exist or wasn't accessible, home * isn't specified. Thus, try to access $PWD/MaxScale.cnf . */ - tmp = strndup(getenv("PWD"), PATH_MAX); + char *pwd = getenv("PWD"); + tmp = strndup(pwd ? pwd : "PWD_NOT_SET", PATH_MAX); tmp2 = get_expanded_pathname(p_home_dir, tmp, default_cnf_fname); free(tmp2); /*< full path isn't needed so simply free it */ @@ -991,7 +996,11 @@ static void usage(void) " -f|--config=... relative|absolute pathname of MaxScale configuration file\n" " (default: $MAXSCALE_HOME/etc/MaxScale.cnf)\n" " -l|--log=... log to file or shared memory\n" - " -lfile or -lshm - defaults to file\n" + " -lfile or -lshm - defaults to shared memory\n" + " -s|--syslog= log messages to syslog." + " True or false - defaults to true\n" + " -S|--maxscalelog= log messages to MaxScale log." + " True or false - defaults to true\n" " -v|--version print version info and exit\n" " -?|--help show this help\n" , progname); @@ -1053,7 +1062,9 @@ int main(int argc, char **argv) char* cnf_file_arg = NULL; /*< conf filename from cmd-line arg */ void* log_flush_thr = NULL; int option_index; - int logtofile = 1; /* Use shared memory or file */ + int logtofile = 0; /* Use shared memory or file */ + int syslog_enabled = 1; /** Log to syslog */ + int maxscalelog_enabled = 1; /** Log with MaxScale */ ssize_t log_flush_timeout_ms = 0; sigset_t sigset; sigset_t sigpipe_mask; @@ -1093,7 +1104,7 @@ int main(int argc, char **argv) goto return_main; } } - while ((opt = getopt_long(argc, argv, "dc:f:l:v?", + while ((opt = getopt_long(argc, argv, "dc:f:l:vs:S:?", long_options, &option_index)) != -1) { bool succp = true; @@ -1198,7 +1209,28 @@ int main(int argc, char **argv) succp = false; } break; - + case 'S': + if(strstr(optarg,"=")) + { + strtok(optarg,"= "); + maxscalelog_enabled = config_truth_value(strtok(NULL,"= ")); + } + else + { + maxscalelog_enabled = config_truth_value(optarg); + } + break; + case 's': + if(strstr(optarg,"=")) + { + strtok(optarg,"= "); + syslog_enabled = config_truth_value(strtok(NULL,"= ")); + } + else + { + syslog_enabled = config_truth_value(optarg); + } + break; case '?': usage(); rc = EXIT_SUCCESS; @@ -1560,7 +1592,19 @@ int main(int argc, char **argv) argv[0] = "MaxScale"; argv[1] = "-j"; argv[2] = buf; + + if(!syslog_enabled) + { + printf("Syslog logging is disabled.\n"); + } + if(!maxscalelog_enabled) + { + printf("MaxScale logging is disabled.\n"); + } + logmanager_enable_syslog(syslog_enabled); + logmanager_enable_maxscalelog(maxscalelog_enabled); + if (logtofile) { argv[3] = "-l"; /*< write to syslog */ @@ -1796,6 +1840,8 @@ int main(int argc, char **argv) LOGIF(LM, (skygw_log_write(LOGFILE_MESSAGE, "MaxScale started with %d server threads.", config_threadcount()))); + + MaxScaleStarted = time(0); /*< * Serve clients. */ @@ -1950,3 +1996,9 @@ static int write_pid_file(char *home_dir) { /* success */ return 0; } + +int +MaxScaleUptime() +{ + return time(0) - MaxScaleStarted; +} diff --git a/server/core/housekeeper.c b/server/core/housekeeper.c index 2225e628f..584910b6d 100644 --- a/server/core/housekeeper.c +++ b/server/core/housekeeper.c @@ -116,9 +116,20 @@ HKTASK *task, *ptr; ptr = ptr->next; } if (ptr) + { + if (strcmp(ptr->name, name) == 0) + { + spinlock_release(&tasklock); + free(task->name); + free(task); + return 0; + } ptr->next = task; + } else + { tasks = task; + } spinlock_release(&tasklock); return task->nextdue; diff --git a/server/core/load_utils.c b/server/core/load_utils.c index 3fe975f5c..172f24ad0 100644 --- a/server/core/load_utils.c +++ b/server/core/load_utils.c @@ -23,12 +23,13 @@ * @verbatim * Revision History * - * Date Who Description - * 13/06/13 Mark Riddoch Initial implementation - * 14/06/13 Mark Riddoch Updated to add call to ModuleInit if one is - * defined in the loaded module. - * Also updated to call fixed GetModuleObject - * 02/06/14 Mark Riddoch Addition of module info + * Date Who Description + * 13/06/13 Mark Riddoch Initial implementation + * 14/06/13 Mark Riddoch Updated to add call to ModuleInit if one is + * defined in the loaded module. + * Also updated to call fixed GetModuleObject + * 02/06/14 Mark Riddoch Addition of module info + * 26/02/15 Massimiliano Pinto Addition of module_feedback_send * * @endverbatim */ @@ -42,6 +43,11 @@ #include #include #include +#include +#include +#include +#include +#include /** Defined in log_manager.cc */ extern int lm_enabled_logfiles_bitmask; @@ -58,13 +64,52 @@ static void register_module(const char *module, void *modobj, MODULE_INFO *info); static void unregister_module(const char *module); +int module_create_feedback_report(GWBUF **buffer, MODULES *modules, FEEDBACK_CONF *cfg); +int do_http_post(GWBUF *buffer, void *cfg); + +struct MemoryStruct { + char *data; + size_t size; +}; + +/** + * Callback write routine for curl library, getting remote server reply + * + * @param contents New data to add + * @param size Data size + * @param nmemb Elements in the buffer + * @param userp Pointer to the buffer + * @return 0 on failure, memory size on success + * + */ +static size_t +WriteMemoryCallback(void *contents, size_t size, size_t nmemb, void *userp) +{ + size_t realsize = size * nmemb; + struct MemoryStruct *mem = (struct MemoryStruct *)userp; + + mem->data = realloc(mem->data, mem->size + realsize + 1); + if(mem->data == NULL) { + /* out of memory! */ + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error in module_feedback_send(), not enough memory for realloc"))); + return 0; + } + + memcpy(&(mem->data[mem->size]), contents, realsize); + mem->size += realsize; + mem->data[mem->size] = 0; + + return realsize; +} char* get_maxscale_home(void) { char* home = getenv("MAXSCALE_HOME"); if (home == NULL) { - home = "/usr/local/skysql/MaxScale"; + home = "/usr/local/mariadb-maxscale"; } return home; } @@ -73,7 +118,7 @@ char* get_maxscale_home(void) /** * Load the dynamic library related to a gateway module. The routine * will look for library files in the current directory, - * $MAXSCALE_HOME/modules and /usr/local/skysql/MaxScale/modules. + * $MAXSCALE_HOME/modules and /usr/local/mariadb-maxscale/modules. * * @param module Name of the module to load * @param type Type of module, used purely for registration @@ -408,3 +453,444 @@ MODULES *ptr = registered; } dcb_printf(dcb, "----------------+-------------+---------+-------+-------------------------\n\n"); } + +/** + * Print Modules to a DCB + * + * Diagnostic routine to display all the loaded modules + */ +void +moduleShowFeedbackReport(DCB *dcb) +{ +GWBUF *buffer; +MODULES *modules_list = registered; +FEEDBACK_CONF *feedback_config = config_get_feedback_data(); + + if (!module_create_feedback_report(&buffer, modules_list, feedback_config)) { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error in module_create_feedback_report(): gwbuf_alloc() failed to allocate memory"))); + + return; + } + dcb_printf(dcb, (char *)GWBUF_DATA(buffer)); + gwbuf_free(buffer); +} + +/** + * Provide a row to the result set that defines the set of modules + * + * @param set The result set + * @param data The index of the row to send + * @return The next row or NULL + */ +static RESULT_ROW * +moduleRowCallback(RESULTSET *set, void *data) +{ +int *rowno = (int *)data; +int i = 0;; +char *stat, buf[20]; +RESULT_ROW *row; +MODULES *ptr; + + ptr = registered; + while (i < *rowno && ptr) + { + i++; + ptr = ptr->next; + } + if (ptr == NULL) + { + free(data); + return NULL; + } + (*rowno)++; + row = resultset_make_row(set); + resultset_row_set(row, 0, ptr->module); + resultset_row_set(row, 1, ptr->type); + resultset_row_set(row, 2, ptr->version); + sprintf(buf, "%d.%d.%d", ptr->info->api_version.major, + ptr->info->api_version.minor, + ptr->info->api_version.patch); + resultset_row_set(row, 3, buf); + resultset_row_set(row, 4, ptr->info->status == MODULE_IN_DEVELOPMENT + ? "In Development" + : (ptr->info->status == MODULE_ALPHA_RELEASE + ? "Alpha" + : (ptr->info->status == MODULE_BETA_RELEASE + ? "Beta" + : (ptr->info->status == MODULE_GA + ? "GA" + : (ptr->info->status == MODULE_EXPERIMENTAL + ? "Experimental" : "Unknown"))))); + return row; +} + +/** + * Return a resultset that has the current set of modules in it + * + * @return A Result set + */ +RESULTSET * +moduleGetList() +{ +RESULTSET *set; +int *data; + + if ((data = (int *)malloc(sizeof(int))) == NULL) + return NULL; + *data = 0; + if ((set = resultset_create(moduleRowCallback, data)) == NULL) + { + free(data); + return NULL; + } + resultset_add_column(set, "Module Name", 18, COL_TYPE_VARCHAR); + resultset_add_column(set, "Module Type", 12, COL_TYPE_VARCHAR); + resultset_add_column(set, "Version", 10, COL_TYPE_VARCHAR); + resultset_add_column(set, "API Version", 8, COL_TYPE_VARCHAR); + resultset_add_column(set, "Status", 15, COL_TYPE_VARCHAR); + + return set; +} + +/** + * Send loaded modules info to notification service + * + * @param data The configuration details of notification service + */ +void +module_feedback_send(void* data) { + MODULES *modules_list = registered; + CURL *curl = NULL; + CURLcode res; + struct curl_httppost *formpost=NULL; + struct curl_httppost *lastptr=NULL; + GWBUF *buffer = NULL; + void *data_ptr=NULL; + long http_code = 0; + int last_action = _NOTIFICATION_SEND_PENDING; + time_t now; + struct tm *now_tm; + int hour; + int n_mod=0; + char hex_setup_info[2 * SHA_DIGEST_LENGTH + 1]=""; + int http_send = 0; + + now = time(NULL); + now_tm = localtime(&now); + hour = now_tm->tm_hour; + + FEEDBACK_CONF *feedback_config = (FEEDBACK_CONF *) data; + + /* Configuration check */ + + if (feedback_config->feedback_enable == 0 || feedback_config->feedback_url == NULL || feedback_config->feedback_user_info == NULL) { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error in module_feedback_send(): some mandatory parameters are not set" + " feedback_enable=%u, feedback_url=%s, feedback_user_info=%s", + feedback_config->feedback_enable, + feedback_config->feedback_url == NULL ? "NULL" : feedback_config->feedback_url, + feedback_config->feedback_user_info == NULL ? "NULL" : feedback_config->feedback_user_info))); + + feedback_config->feedback_last_action = _NOTIFICATION_SEND_ERROR; + + return; + } + + /** + * Task runs nightly, from 2 AM to 4 AM + * + * If it's done in that time interval, it will be skipped + */ + + if (hour > 4 || hour < 2) { + /* It's not the rigt time, mark it as to be done and return */ + feedback_config->feedback_last_action = _NOTIFICATION_SEND_PENDING; + + LOGIF(LT, (skygw_log_write_flush( + LOGFILE_TRACE, + "module_feedback_send(): execution skipped, current hour [%d]" + " is not within the proper interval (from 2 AM to 4 AM)", + hour))); + + return; + } + + /* Time to run the task: if a previous run was succesfull skip next runs */ + if (feedback_config->feedback_last_action == _NOTIFICATION_SEND_OK) { + /* task was done before, return */ + + LOGIF(LT, (skygw_log_write_flush( + LOGFILE_TRACE, + "module_feedback_send(): execution skipped because of previous succesful run: hour is [%d], last_action [%d]", + hour, feedback_config->feedback_last_action))); + + return; + } + + LOGIF(LT, (skygw_log_write_flush( + LOGFILE_TRACE, + "module_feedback_send(): task now runs: hour is [%d], last_action [%d]", + hour, feedback_config->feedback_last_action))); + + if (!module_create_feedback_report(&buffer, modules_list, feedback_config)) { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error in module_create_feedback_report(): gwbuf_alloc() failed to allocate memory"))); + + feedback_config->feedback_last_action = _NOTIFICATION_SEND_ERROR; + + return; + } + + /* try sending data via http/https post */ + http_send = do_http_post(buffer, feedback_config); + + if (http_send == 0) { + feedback_config->feedback_last_action = _NOTIFICATION_SEND_OK; + } else { + feedback_config->feedback_last_action = _NOTIFICATION_SEND_ERROR; + + LOGIF(LT, (skygw_log_write_flush( + LOGFILE_TRACE, + "Error in module_create_feedback_report(): do_http_post ret_code is %d", http_send))); + } + + LOGIF(LT, (skygw_log_write_flush( + LOGFILE_TRACE, + "module_feedback_send(): task completed: hour is [%d], last_action [%d]", + hour, + feedback_config->feedback_last_action))); + + gwbuf_free(buffer); + +} + +/** + * Create the feedback report as string. + * I t could be sent to notification service + * or just printed via maxadmin/telnet + * + * @param buffe The pointr for GWBUF allocation, to be freed by the caller + * @param modules The mouleds list + * @param cfg The feedback configuration + * @return 0 on failure, 1 on success + * + */ + +int +module_create_feedback_report(GWBUF **buffer, MODULES *modules, FEEDBACK_CONF *cfg) { + + MODULES *ptr = modules; + int n_mod = 0; + char *data_ptr=NULL; + char hex_setup_info[2 * SHA_DIGEST_LENGTH + 1]=""; + time_t now; + struct tm *now_tm; + int report_max_bytes=0; + if(buffer == NULL) + return 0; + + now = time(NULL); + + /* count loaded modules */ + while (ptr) + { + ptr = ptr->next; + n_mod++; + } + + /* module lists pointer is set back to the head */ + ptr = modules; + + /** + * allocate gwbuf for data to send + * + * each module gives 4 rows + * product and release rows add 7 rows + * row is _NOTIFICATION_REPORT_ROW_LEN bytes long + */ + + report_max_bytes = ((n_mod * 4) + 7) * (_NOTIFICATION_REPORT_ROW_LEN + 1); + *buffer = gwbuf_alloc(report_max_bytes); + + if (*buffer == NULL) { + return 0; + } + + /* encode MAC-sha1 to HEX */ + gw_bin2hex(hex_setup_info, cfg->mac_sha1, SHA_DIGEST_LENGTH); + + + data_ptr = (char *)GWBUF_DATA(*buffer); + + snprintf(data_ptr, _NOTIFICATION_REPORT_ROW_LEN, "FEEDBACK_SERVER_UID\t%s\n", hex_setup_info); + data_ptr+=strlen(data_ptr); + snprintf(data_ptr, _NOTIFICATION_REPORT_ROW_LEN, "FEEDBACK_USER_INFO\t%s\n", cfg->feedback_user_info == NULL ? "not_set" : cfg->feedback_user_info); + data_ptr+=strlen(data_ptr); + snprintf(data_ptr, _NOTIFICATION_REPORT_ROW_LEN, "VERSION\t%s\n", MAXSCALE_VERSION); + data_ptr+=strlen(data_ptr); + snprintf(data_ptr, _NOTIFICATION_REPORT_ROW_LEN * 2, "NOW\t%lu\nPRODUCT\t%s\n", now, "maxscale"); + data_ptr+=strlen(data_ptr); + snprintf(data_ptr, _NOTIFICATION_REPORT_ROW_LEN, "Uname_sysname\t%s\n", cfg->sysname); + data_ptr+=strlen(data_ptr); + snprintf(data_ptr, _NOTIFICATION_REPORT_ROW_LEN, "Uname_distribution\t%s\n", cfg->release_info); + data_ptr+=strlen(data_ptr); + + while (ptr) + { + snprintf(data_ptr, _NOTIFICATION_REPORT_ROW_LEN * 2, "module_%s_type\t%s\nmodule_%s_version\t%s\n", ptr->module, ptr->type, ptr->module, ptr->version); + data_ptr+=strlen(data_ptr); + + if (ptr->info) { + snprintf(data_ptr, _NOTIFICATION_REPORT_ROW_LEN, "module_%s_api\t%d.%d.%d\n", + ptr->module, + ptr->info->api_version.major, + ptr->info->api_version.minor, + ptr->info->api_version.patch); + + data_ptr+=strlen(data_ptr); + snprintf(data_ptr, _NOTIFICATION_REPORT_ROW_LEN, "module_%s_releasestatus\t%s\n", + ptr->module, + ptr->info->status == MODULE_IN_DEVELOPMENT + ? "In Development" + : (ptr->info->status == MODULE_ALPHA_RELEASE + ? "Alpha" + : (ptr->info->status == MODULE_BETA_RELEASE + ? "Beta" + : (ptr->info->status == MODULE_GA + ? "GA" + : (ptr->info->status == MODULE_EXPERIMENTAL + ? "Experimental" : "Unknown"))))); + data_ptr+=strlen(data_ptr); + } + ptr = ptr->next; + } + + return 1; +} + +/** + * Send data to notification service via http/https + * + * @param buffer The GWBUF with data to send + * @param cfg The configuration details of notification service + * @return 0 on success, != 0 on failure + */ +int +do_http_post(GWBUF *buffer, void *cfg) { + CURL *curl = NULL; + CURLcode res; + struct curl_httppost *formpost=NULL; + struct curl_httppost *lastptr=NULL; + long http_code = 0; + struct MemoryStruct chunk; + int ret_code = 1; + + FEEDBACK_CONF *feedback_config = (FEEDBACK_CONF *) cfg; + + /* allocate first memory chunck for httpd servr reply */ + chunk.data = malloc(1); /* will be grown as needed by the realloc above */ + chunk.size = 0; /* no data at this point */ + + /* Initializing curl library for data send via HTTP */ + curl_global_init(CURL_GLOBAL_DEFAULT); + + curl = curl_easy_init(); + + if (curl) { + char error_message[CURL_ERROR_SIZE]=""; + + curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, error_message); + curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1); + curl_easy_setopt(curl, CURLOPT_CONNECTTIMEOUT, feedback_config->feedback_connect_timeout); + curl_easy_setopt(curl, CURLOPT_TIMEOUT, feedback_config->feedback_timeout); + + /* curl API call for data send via HTTP POST using a "file" type input */ + curl_formadd(&formpost, + &lastptr, + CURLFORM_COPYNAME, "data", + CURLFORM_BUFFER, "report.txt", + CURLFORM_BUFFERPTR, (char *)GWBUF_DATA(buffer), + CURLFORM_BUFFERLENGTH, strlen((char *)GWBUF_DATA(buffer)), + CURLFORM_CONTENTTYPE, "text/plain", + CURLFORM_END); + + curl_easy_setopt(curl, CURLOPT_HEADER, 1); + + /* some servers don't like requests that are made without a user-agent field, so we provide one */ + curl_easy_setopt(curl, CURLOPT_USERAGENT, "MaxScale-agent/http-1.0"); + /* Force HTTP/1.0 */ + curl_easy_setopt(curl, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_1_0); + + curl_easy_setopt(curl, CURLOPT_URL, feedback_config->feedback_url); + curl_easy_setopt(curl, CURLOPT_HTTPPOST, formpost); + + /* send all data to this function */ + curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback); + + /* we pass our 'chunk' struct to the callback function */ + curl_easy_setopt(curl, CURLOPT_WRITEDATA, (void *)&chunk); + + /* Perform the request, res will get the return code */ + res = curl_easy_perform(curl); + + /* Check for errors */ + if(res != CURLE_OK) { + ret_code = 2; + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error: do_http_post(), curl call for [%s] failed due: %s, %s", + feedback_config->feedback_url, + curl_easy_strerror(res), + error_message))); + goto cleanup; + } else { + curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &http_code); + } + + if (http_code == 302) { + char *from = strstr(chunk.data, "

ok

"); + if (from) { + ret_code = 0; + } else { + ret_code = 3; + goto cleanup; + } + } else { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error: do_http_post(), Bad HTTP Code from remote server: %lu", + http_code))); + ret_code = 4; + goto cleanup; + } + } else { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error: do_http_post(), curl object not initialized"))); + ret_code = 1; + goto cleanup; + } + + LOGIF(LT, (skygw_log_write_flush( + LOGFILE_TRACE, + "do_http_post() ret_code [%d], HTTP code [%d]", + ret_code, http_code))); + cleanup: + + if (chunk.data) + free(chunk.data); + + if (curl) { + curl_easy_cleanup(curl); + curl_formfree(formpost); + } + + curl_global_cleanup(); + + return ret_code; +} + diff --git a/server/core/modutil.c b/server/core/modutil.c index 9ba57bdf6..a4209a09d 100644 --- a/server/core/modutil.c +++ b/server/core/modutil.c @@ -63,6 +63,23 @@ unsigned char *ptr; return ptr[4] == 0x03; // COM_QUERY } +/** + * Check if a GWBUF structure is a MySQL COM_STMT_PREPARE packet + * + * @param buf Buffer to check + * @return True if GWBUF is a COM_STMT_PREPARE packet + */ +int +modutil_is_SQL_prepare(GWBUF *buf) +{ +unsigned char *ptr; + + if (GWBUF_LENGTH(buf) < 5) + return 0; + ptr = GWBUF_DATA(buf); + return ptr[4] == 0x16 ; // COM_STMT_PREPARE +} + /** * Extract the SQL portion of a COM_QUERY packet * @@ -243,7 +260,7 @@ modutil_get_SQL(GWBUF *buf) unsigned int len, length; char *ptr, *dptr, *rval = NULL; - if (!modutil_is_SQL(buf)) + if (!modutil_is_SQL(buf) && !modutil_is_SQL_prepare(buf)) return rval; ptr = GWBUF_DATA(buf); length = *ptr++; @@ -515,25 +532,72 @@ return_packetbuf: /** * Parse the buffer and split complete packets into individual buffers. * Any partial packets are left in the old buffer. - * @param p_readbuf Buffer to split - * @return Head of the chain of complete packets + * @param p_readbuf Buffer to split, set to NULL if no partial packets are left + * @return Head of the chain of complete packets, all in a single, contiguous buffer */ GWBUF* modutil_get_complete_packets(GWBUF** p_readbuf) { - GWBUF *buff = NULL, *packet = NULL; + GWBUF *buff = NULL, *packet; + uint8_t *ptr,*end; + int len,blen,total = 0; + + if(p_readbuf == NULL || (*p_readbuf) == NULL || + gwbuf_length(*p_readbuf) < 3) + return NULL; + + packet = gwbuf_make_contiguous(*p_readbuf); + packet->next = NULL; + *p_readbuf = packet; + ptr = (uint8_t*)packet->start; + end = (uint8_t*)packet->end; + len = gw_mysql_get_byte3(ptr) + 4; + blen = gwbuf_length(packet); - while((packet = modutil_get_next_MySQL_packet(p_readbuf)) != NULL) + if(len == blen) { - buff = gwbuf_append(buff,packet); + *p_readbuf = NULL; + return packet; } - + else if(len > blen) + { + return NULL; + } + + while(total + len < blen) + { + ptr += len; + total += len; + len = gw_mysql_get_byte3(ptr) + 4; + } + + /** Full packets only, return original */ + if(total + len == blen) + { + *p_readbuf = NULL; + return packet; + } + + /** The next packet is a partial, split into complete and partial packets */ + if((buff = gwbuf_alloc(total)) == NULL) + { + skygw_log_write(LOGFILE_ERROR, + "Error: Failed to allocate new buffer " + " of %d bytes while splitting buffer" + " into complete packets.", + total); + return NULL; + } + buff->next = NULL; + gwbuf_set_type(buff,GWBUF_TYPE_MYSQL); + memcpy(buff->start,packet->start,total); + gwbuf_consume(packet,total); return buff; } /** * Count the number of EOF, OK or ERR packets in the buffer. Only complete * packets are inspected and the buffer is assumed to only contain whole packets. - * If partial packets are in the buffer, they are ingnored. The caller must handle the + * If partial packets are in the buffer, they are ignored. The caller must handle the * detection of partial packets in buffers. * @param reply Buffer to use * @param use_ok Whether the DEPRECATE_EOF flag is set @@ -541,7 +605,7 @@ GWBUF* modutil_get_complete_packets(GWBUF** p_readbuf) * @return Number of EOF packets */ int -modutil_count_signal_packets(GWBUF *reply, int use_ok, int n_found) +modutil_count_signal_packets(GWBUF *reply, int use_ok, int n_found, int* more) { unsigned char* ptr = (unsigned char*) reply->start; unsigned char* end = (unsigned char*) reply->end; @@ -549,6 +613,7 @@ modutil_count_signal_packets(GWBUF *reply, int use_ok, int n_found) int pktlen, eof = 0, err = 0; int errlen = 0, eoflen = 0; int iserr = 0, iseof = 0; + bool moreresults = false; while(ptr < end) { @@ -568,8 +633,9 @@ modutil_count_signal_packets(GWBUF *reply, int use_ok, int n_found) } } - if((ptr + pktlen) > end) + if((ptr + pktlen) > end || (eof + n_found) >= 2) { + moreresults = PTR_EOF_MORE_RESULTS(ptr); ptr = prev; break; } @@ -599,6 +665,8 @@ modutil_count_signal_packets(GWBUF *reply, int use_ok, int n_found) } } + *more = moreresults; + return(eof + err); } @@ -676,3 +744,97 @@ static void modutil_reply_routing_error( poll_add_epollin_event_to_dcb(backend_dcb, buf); return; } + +/** + * Find the first occurrence of a character in a string. This function ignores + * escaped characters and all characters that are enclosed in single or double quotes. + * @param ptr Pointer to area of memory to inspect + * @param c Character to search for + * @param len Size of the memory area + * @return Pointer to the first non-escaped, non-quoted occurrence of the character. + * If the character is not found, NULL is returned. + */ +void* strnchr_esc(char* ptr,char c, int len) +{ + char* p = (char*)ptr; + char* start = p; + bool quoted = false, escaped = false; + char qc; + + while(p < start + len) + { + if(escaped) + { + escaped = false; + } + else if(*p == '\\') + { + escaped = true; + } + else if((*p == '\'' || *p == '"') && !quoted) + { + quoted = true; + qc = *p; + } + else if(quoted && *p == qc) + { + quoted = false; + } + else if(*p == c && !escaped && !quoted) + { + return p; + } + p++; + } + + return NULL; +} + +/** + * Create a COM_QUERY packet from a string. + * @param query Query to create. + * @return Pointer to GWBUF with the query or NULL if an error occurred. + */ +GWBUF* modutil_create_query(char* query) +{ + if(query == NULL) + return NULL; + + GWBUF* rval = gwbuf_alloc(strlen(query) + 5); + int pktlen = strlen(query) + 1; + unsigned char* ptr; + + if(rval) + { + ptr = (unsigned char*)rval->start; + *ptr++ = (pktlen); + *ptr++ = (pktlen)>>8; + *ptr++ = (pktlen)>>16; + *ptr++ = 0x0; + *ptr++ = 0x03; + memcpy(ptr,query,strlen(query)); + gwbuf_set_type(rval,GWBUF_TYPE_MYSQL); + } + + return rval; +} + +/** + * Count the number of statements in a query. + * @param buffer Buffer to analyze. + * @return Number of statements. + */ +int modutil_count_statements(GWBUF* buffer) +{ + char* ptr = ((char*)(buffer)->start + 5); + char* end = ((char*)(buffer)->end); + int num = 1; + + while((ptr = strnchr_esc(ptr,';', end - ptr))) + { + num++; + ptr++; + } + + return num; +} \ No newline at end of file diff --git a/server/core/monitor.c b/server/core/monitor.c index 3584978e9..a8401e6bb 100644 --- a/server/core/monitor.c +++ b/server/core/monitor.c @@ -78,9 +78,9 @@ MONITOR *mon; free(mon); return NULL; } - mon->handle = (*mon->module->startMonitor)(NULL); - mon->state = MONITOR_STATE_RUNNING; + mon->handle = NULL; + spinlock_acquire(&monLock); mon->next = allMonitors; allMonitors = mon; @@ -125,9 +125,9 @@ MONITOR *ptr; * @param monitor The Monitor that should be started */ void -monitorStart(MONITOR *monitor) +monitorStart(MONITOR *monitor, void* params) { - monitor->handle = (*monitor->module->startMonitor)(monitor->handle); + monitor->handle = (*monitor->module->startMonitor)(monitor->handle,params); monitor->state = MONITOR_STATE_RUNNING; } @@ -279,22 +279,6 @@ MONITOR *ptr; return ptr; } - -/** - * Set the id of the monitor. - * - * @param mon The monitor instance - * @param id The id for the monitor - */ - -void -monitorSetId(MONITOR *mon, unsigned long id) -{ - if (mon->module->defaultId != NULL) { - mon->module->defaultId(mon->handle, id); - } -} - /** * Set the monitor sampling interval. * @@ -310,48 +294,6 @@ monitorSetInterval (MONITOR *mon, unsigned long interval) } } -/** - * Enable Replication Heartbeat support in monitor. - * - * @param mon The monitor instance - * @param enable The enabling value is 1, 0 turns it off - */ -void -monitorSetReplicationHeartbeat(MONITOR *mon, int enable) -{ - if (mon->module->replicationHeartbeat != NULL) { - mon->module->replicationHeartbeat(mon->handle, enable); - } -} - -/** - * Enable Stale Master assignement. - * - * @param mon The monitor instance - * @param enable The enabling value is 1, 0 turns it off - */ -void -monitorDetectStaleMaster(MONITOR *mon, int enable) -{ - if (mon->module->detectStaleMaster != NULL) { - mon->module->detectStaleMaster(mon->handle, enable); - } -} - -/** - * Disable Master Failback - * - * @param mon The monitor instance - * @param disable The value 1 disable the failback, 0 keeps it - */ -void -monitorDisableMasterFailback(MONITOR *mon, int disable) -{ - if (mon->module->disableMasterFailback != NULL) { - mon->module->disableMasterFailback(mon->handle, disable); - } -} - /** * Set Monitor timeouts for connect/read/write * @@ -365,3 +307,66 @@ monitorSetNetworkTimeout(MONITOR *mon, int type, int value) { mon->module->setNetworkTimeout(mon->handle, type, value); } } + +/** + * Provide a row to the result set that defines the set of monitors + * + * @param set The result set + * @param data The index of the row to send + * @return The next row or NULL + */ +static RESULT_ROW * +monitorRowCallback(RESULTSET *set, void *data) +{ +int *rowno = (int *)data; +int i = 0;; +char buf[20]; +RESULT_ROW *row; +MONITOR *ptr; + + spinlock_acquire(&monLock); + ptr = allMonitors; + while (i < *rowno && ptr) + { + i++; + ptr = ptr->next; + } + if (ptr == NULL) + { + spinlock_release(&monLock); + free(data); + return NULL; + } + (*rowno)++; + row = resultset_make_row(set); + resultset_row_set(row, 0, ptr->name); + resultset_row_set(row, 1, ptr->state & MONITOR_STATE_RUNNING + ? "Running" : "Stopped"); + spinlock_release(&monLock); + return row; +} + +/** + * Return a resultset that has the current set of monitors in it + * + * @return A Result set + */ +RESULTSET * +monitorGetList() +{ +RESULTSET *set; +int *data; + + if ((data = (int *)malloc(sizeof(int))) == NULL) + return NULL; + *data = 0; + if ((set = resultset_create(monitorRowCallback, data)) == NULL) + { + free(data); + return NULL; + } + resultset_add_column(set, "Monitor", 20, COL_TYPE_VARCHAR); + resultset_add_column(set, "Status", 10, COL_TYPE_VARCHAR); + + return set; +} diff --git a/server/core/poll.c b/server/core/poll.c index f357de72f..377310cb0 100644 --- a/server/core/poll.c +++ b/server/core/poll.c @@ -28,9 +28,11 @@ #include #include #include -#include +#include #include +#include #include +#include #define PROFILE_POLL 0 @@ -151,8 +153,8 @@ static struct { int n_hup; /*< Number of hangup events */ int n_accept; /*< Number of accept events */ int n_polls; /*< Number of poll cycles */ - int n_pollev; /*< Number of polls returnign events */ - int n_nbpollev; /*< Number of polls returnign events */ + int n_pollev; /*< Number of polls returning events */ + int n_nbpollev; /*< Number of polls returning events */ int n_nothreads; /*< Number of times no threads are polling */ int n_fds[MAXNFDS]; /*< Number of wakeups with particular n_fds value */ @@ -731,6 +733,7 @@ unsigned long qtime; dcb->evq.processing_events = ev; dcb->evq.pending_events = 0; pollStats.evq_pending--; + ss_dassert(pollStats.evq_pending >= 0); } spinlock_release(&pollqlock); @@ -1524,3 +1527,104 @@ int i; dcb_printf(pdcb, " > %2d00ms | %-10d | %-10d\n", N_QUEUE_TIMES, queueStats.qtimes[N_QUEUE_TIMES], queueStats.exectimes[N_QUEUE_TIMES]); } + +/** + * Return a poll statistic from the polling subsystem + * + * @param stat The required statistic + * @return The value of that statistic + */ +int +poll_get_stat(POLL_STAT stat) +{ + switch (stat) + { + case POLL_STAT_READ: + return pollStats.n_read; + case POLL_STAT_WRITE: + return pollStats.n_write; + case POLL_STAT_ERROR: + return pollStats.n_error; + case POLL_STAT_HANGUP: + return pollStats.n_hup; + case POLL_STAT_ACCEPT: + return pollStats.n_accept; + case POLL_STAT_EVQ_LEN: + return pollStats.evq_length; + case POLL_STAT_EVQ_PENDING: + return pollStats.evq_pending; + case POLL_STAT_EVQ_MAX: + return pollStats.evq_max; + case POLL_STAT_MAX_QTIME: + return (int)queueStats.maxqtime; + case POLL_STAT_MAX_EXECTIME: + return (int)queueStats.maxexectime; + } + return 0; +} + +/** + * Provide a row to the result set that defines the event queue statistics + * + * @param set The result set + * @param data The index of the row to send + * @return The next row or NULL + */ +static RESULT_ROW * +eventTimesRowCallback(RESULTSET *set, void *data) +{ +int *rowno = (int *)data; +char buf[40]; +RESULT_ROW *row; + + if (*rowno >= N_QUEUE_TIMES) + { + free(data); + return NULL; + } + row = resultset_make_row(set); + if (*rowno == 0) + resultset_row_set(row, 0, "< 100ms"); + else if (*rowno == N_QUEUE_TIMES - 1) + { + sprintf(buf, "> %2d00ms", N_QUEUE_TIMES); + resultset_row_set(row, 0, buf); + } + else + { + sprintf(buf, "%2d00 - %2d00ms", *rowno, (*rowno) + 1); + resultset_row_set(row, 0, buf); + } + sprintf(buf, "%d", queueStats.qtimes[*rowno]); + resultset_row_set(row, 1, buf); + sprintf(buf, "%d", queueStats.exectimes[*rowno]); + resultset_row_set(row, 2, buf); + (*rowno)++; + return row; +} + +/** + * Return a resultset that has the current set of services in it + * + * @return A Result set + */ +RESULTSET * +eventTimesGetList() +{ +RESULTSET *set; +int *data; + + if ((data = (int *)malloc(sizeof(int))) == NULL) + return NULL; + *data = 0; + if ((set = resultset_create(eventTimesRowCallback, data)) == NULL) + { + free(data); + return NULL; + } + resultset_add_column(set, "Duration", 20, COL_TYPE_VARCHAR); + resultset_add_column(set, "No. Events Queued", 12, COL_TYPE_VARCHAR); + resultset_add_column(set, "No. Events Executed", 12, COL_TYPE_VARCHAR); + + return set; +} diff --git a/server/core/resultset.c b/server/core/resultset.c new file mode 100644 index 000000000..7f9d88bbf --- /dev/null +++ b/server/core/resultset.c @@ -0,0 +1,468 @@ +/* + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free + * software: you can redistribute it and/or modify it under the terms of the + * GNU General Public License as published by the Free Software Foundation, + * version 2. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 51 + * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Copyright MariaDB Corporation Ab 2013-2014 + */ + +/** + * @file resultset.c - Implementation of a generic result set mechanism + * + * @verbatim + * Revision History + * + * Date Who Description + * 17/02/15 Mark Riddoch Initial implementation + * + * @endverbatim + */ + +#include +#include +#include +#include +#include + + +static int mysql_send_fieldcount(DCB *, int); +static int mysql_send_columndef(DCB *, char *, int, int, uint8_t); +static int mysql_send_eof(DCB *, int); +static int mysql_send_row(DCB *, RESULT_ROW *, int); + + +/** + * Create a generic result set + * + * @param func Function to call for each row + * @param data Data to pass to the row retrieval function + * @return An empty resultset or NULL on error + */ +RESULTSET * +resultset_create(RESULT_ROW_CB func, void *data) +{ +RESULTSET *rval; + + if ((rval = (RESULTSET *)malloc(sizeof(RESULTSET))) != NULL) + { + rval->n_cols = 0; + rval->column = NULL; + rval->userdata = data; + rval->fetchrow = func; + } + return rval; +} + +/** + * Free a previously allocated resultset + * + * @param resultset The result set to free + */ +void +resultset_free(RESULTSET *resultset) +{ +RESULT_COLUMN *col; + + if (resultset != NULL) + { + col = resultset->column; + while (col) + { + RESULT_COLUMN *next; + + next = col->next; + resultset_column_free(col); + col = next; + } + free(resultset); + } +} + +/** + * Add a new column to a result set. Columns are added to the right + * of the result set, i.e. the existing order is maintained. + * + * @param set The result set + * @param name The column name + * @param len The column length + * @param type The column type + * @return The numebr of columns added to the result set + */ +int +resultset_add_column(RESULTSET *set, char *name, int len, RESULT_COL_TYPE type) +{ +RESULT_COLUMN *newcol, *ptr; + + if ((newcol = (RESULT_COLUMN *)malloc(sizeof(RESULT_COLUMN))) == NULL) + return 0; + if ((newcol->name = strdup(name)) == NULL) + { + free(newcol); + return 0; + } + newcol->type = type; + newcol->len = len; + newcol->next = NULL; + + if (set->column == NULL) + set->column = newcol; + else + { + ptr = set->column; + while (ptr->next) + ptr = ptr->next; + ptr->next = newcol; + } + set->n_cols++; + return 1; +} + +/** + * Free a result set column + * + * @param col Column to free + */ +void +resultset_column_free(RESULT_COLUMN *col) +{ + free(col->name); + free(col); +} + +/** + * Create a blank row, a row with all values NULL, for a result + * set. + * + * @param set The result set the row will be part of + * @return The NULL result set row + */ +RESULT_ROW * +resultset_make_row(RESULTSET *set) +{ +RESULT_ROW *row; +int i; + + if ((row = (RESULT_ROW *)malloc(sizeof(RESULT_ROW))) == NULL) + return NULL; + row->n_cols = set->n_cols; + if ((row->cols = (char **)malloc(row->n_cols * sizeof(char *))) == NULL) + { + free(row); + return NULL; + } + + for (i = 0; i < set->n_cols; i++) + row->cols[i] = NULL; + return row; +} + +/** + * Free a result set row. If a column in the row has a non-null values + * then the data is assumed to be a malloc'd pointer and will be free'd. + * If any value is not a malloc'd pointer it should be removed before + * making this call. + * + * @param row The row to free + */ +void +resultset_free_row(RESULT_ROW *row) +{ +int i; + + for (i = 0; i < row->n_cols; i++) + if (row->cols[i]) + free(row->cols[i]); + free(row->cols); + free(row); +} + +/** + * Add a value in a particular column of the row . The value is + * a NULL terminated string and will be copied into malloc'd + * storage by this routine. + * + * @param row The row ro add the column into + * @param col The column number (0 to n_cols - 1) + * @param value The column value, may be NULL + * @return The number of columns inserted + */ +int +resultset_row_set(RESULT_ROW *row, int col, char *value) +{ + if (col < 0 || col >= row->n_cols) + return 0; + if (value) + { + if ((row->cols[col] = strdup(value)) == NULL) + return 0; + return 1; + } + else if (row->cols[col]) + free(row->cols[col]); + row->cols[col] = NULL; + return 1; +} + +/** + * Stream a result set using the MySQL protocol for encodign the result + * set. Each row is retrieved by calling the function passed in the + * argument list. + * + * @param set The result set to stream + * @param dcb The connection to stream the result set to + */ +void +resultset_stream_mysql(RESULTSET *set, DCB *dcb) +{ +RESULT_COLUMN *col; +RESULT_ROW *row; +uint8_t seqno = 2; + + mysql_send_fieldcount(dcb, set->n_cols); + + col = set->column; + while (col) + { + mysql_send_columndef(dcb, col->name, col->type, col->len, seqno++); + col = col->next; + } + mysql_send_eof(dcb, seqno++); + while ((row = (*set->fetchrow)(set, set->userdata)) != NULL) + { + mysql_send_row(dcb, row, seqno++); + resultset_free_row(row); + } + mysql_send_eof(dcb, seqno); +} + +/** + * Send the field count packet in a response packet sequence. + * + * @param dcb DCB of connection to send result set to + * @param count Number of columns in the result set + * @return Non-zero on success + */ +static int +mysql_send_fieldcount(DCB *dcb, int count) +{ +GWBUF *pkt; +uint8_t *ptr; + + if ((pkt = gwbuf_alloc(5)) == NULL) + return 0; + ptr = GWBUF_DATA(pkt); + *ptr++ = 0x01; // Payload length + *ptr++ = 0x00; + *ptr++ = 0x00; + *ptr++ = 0x01; // Sequence number in response + *ptr++ = count; // Length of result string + return dcb->func.write(dcb, pkt); +} + + +/** + * Send the column definition packet in a response packet sequence. + * + * @param dcb The DCB of the connection + * @param name Name of the column + * @param type Column type + * @param len Column length + * @param seqno Packet sequence number + * @return Non-zero on success + */ +static int +mysql_send_columndef(DCB *dcb, char *name, int type, int len, uint8_t seqno) +{ +GWBUF *pkt; +uint8_t *ptr; +int plen; + + if ((pkt = gwbuf_alloc(26 + strlen(name))) == NULL) + return 0; + ptr = GWBUF_DATA(pkt); + plen = 22 + strlen(name); + *ptr++ = plen & 0xff; + *ptr++ = (plen >> 8) & 0xff; + *ptr++ = (plen >> 16)& 0xff; + *ptr++ = seqno; // Sequence number in response + *ptr++ = 3; // Catalog is always def + *ptr++ = 'd'; + *ptr++ = 'e'; + *ptr++ = 'f'; + *ptr++ = 0; // Schema name length + *ptr++ = 0; // virtual table name length + *ptr++ = 0; // Table name length + *ptr++ = strlen(name); // Column name length; + while (*name) + *ptr++ = *name++; // Copy the column name + *ptr++ = 0; // Orginal column name + *ptr++ = 0x0c; // Length of next fields always 12 + *ptr++ = 0x3f; // Character set + *ptr++ = 0; + *ptr++ = len & 0xff; // Length of column + *ptr++ = (len >> 8) & 0xff; + *ptr++ = (len >> 16) & 0xff; + *ptr++ = (len >> 24) & 0xff; + *ptr++ = type; + *ptr++ = 0x81; // Two bytes of flags + if (type == 0xfd) + *ptr++ = 0x1f; + else + *ptr++ = 0x00; + *ptr++= 0; + *ptr++= 0; + *ptr++= 0; + return dcb->func.write(dcb, pkt); +} + + +/** + * Send an EOF packet in a response packet sequence. + * + * @param dcb The client connection + * @param seqno The sequence number of the EOF packet + * @return Non-zero on success + */ +static int +mysql_send_eof(DCB *dcb, int seqno) +{ +GWBUF *pkt; +uint8_t *ptr; + + if ((pkt = gwbuf_alloc(9)) == NULL) + return 0; + ptr = GWBUF_DATA(pkt); + *ptr++ = 0x05; + *ptr++ = 0x00; + *ptr++ = 0x00; + *ptr++ = seqno; // Sequence number in response + *ptr++ = 0xfe; // Length of result string + *ptr++ = 0x00; // No Errors + *ptr++ = 0x00; + *ptr++ = 0x02; // Autocommit enabled + *ptr++ = 0x00; + return dcb->func.write(dcb, pkt); +} + + + +/** + * Send a row packet in a response packet sequence. + * + * @param dcb The client connection + * @param row The row to send + * @param seqno The sequence number of the EOF packet + * @return Non-zero on success + */ +static int +mysql_send_row(DCB *dcb, RESULT_ROW *row, int seqno) +{ +GWBUF *pkt; +int i, len = 4; +uint8_t *ptr; + + for (i = 0; i < row->n_cols; i++) + { + if (row->cols[i]) + len += strlen(row->cols[i]); + len++; + } + + if ((pkt = gwbuf_alloc(len)) == NULL) + return 0; + ptr = GWBUF_DATA(pkt); + len -= 4; + *ptr++ = len & 0xff; + *ptr++ = (len >> 8) & 0xff; + *ptr++ = (len >> 16) & 0xff; + *ptr++ = seqno; + for (i = 0; i < row->n_cols; i++) + { + if (row->cols[i]) + { + len = strlen(row->cols[i]); + *ptr++ = len; + strncpy((char *)ptr, row->cols[i], len); + ptr += len; + } + else + { + *ptr++ = 0; // NULL column + } + } + + return dcb->func.write(dcb, pkt); +} + +/** + * Return true if the string only contains numerics + * + * @param value String to test + * @return Non-zero if the string is made of of numeric values + */ +static int +value_is_numeric(char *value) +{ + while (*value) + { + if (!isdigit(*value)) + return 0; + value++; + } + return 1; +} + +/** + * Stream a result set encoding it as a JSON object + * Each row is retrieved by calling the function passed in the + * argument list. + * + * @param set The result set to stream + * @param dcb The connection to stream the result set to + */ +void +resultset_stream_json(RESULTSET *set, DCB *dcb) +{ +RESULT_COLUMN *col; +RESULT_ROW *row; +int rowno = 0; + + + dcb_printf(dcb, "[ "); + while ((row = (*set->fetchrow)(set, set->userdata)) != NULL) + { + int i = 0; + if (rowno++ > 0) + dcb_printf(dcb, ",\n"); + dcb_printf(dcb, "{ "); + col = set->column; + while (col) + { + + dcb_printf(dcb, "\"%s\" : ", col->name); + if (row->cols[i] && value_is_numeric(row->cols[i])) + dcb_printf(dcb, "%s", row->cols[i]); + else if (row->cols[i]) + dcb_printf(dcb, "\"%s\"", row->cols[i]); + else + dcb_printf(dcb, "NULL"); + i++; + col = col->next; + if (col) + dcb_printf(dcb, ", "); + } + resultset_free_row(row); + dcb_printf(dcb, "}"); + } + dcb_printf(dcb, "]\n"); +} diff --git a/server/core/secrets.c b/server/core/secrets.c index 32fe59467..3eae2cc48 100644 --- a/server/core/secrets.c +++ b/server/core/secrets.c @@ -73,7 +73,7 @@ static int reported = 0; home = getenv("MAXSCALE_HOME"); if (home == NULL) { - home = "/usr/local/skysql/MaxScale"; + home = "/usr/local/mariadb-maxscale"; } snprintf(secret_file, 255, "%s/etc/.secrets", home); diff --git a/server/core/server.c b/server/core/server.c index 0f39c0edc..425089d93 100644 --- a/server/core/server.c +++ b/server/core/server.c @@ -659,3 +659,75 @@ SERVER_PARAM *param = server->parameters; } return NULL; } + +/** + * Provide a row to the result set that defines the set of servers + * + * @param set The result set + * @param data The index of the row to send + * @return The next row or NULL + */ +static RESULT_ROW * +serverRowCallback(RESULTSET *set, void *data) +{ +int *rowno = (int *)data; +int i = 0;; +char *stat, buf[20]; +RESULT_ROW *row; +SERVER *ptr; + + spinlock_acquire(&server_spin); + ptr = allServers; + while (i < *rowno && ptr) + { + i++; + ptr = ptr->next; + } + if (ptr == NULL) + { + spinlock_release(&server_spin); + free(data); + return NULL; + } + (*rowno)++; + row = resultset_make_row(set); + resultset_row_set(row, 0, ptr->unique_name); + resultset_row_set(row, 1, ptr->name); + sprintf(buf, "%d", ptr->port); + resultset_row_set(row, 2, buf); + sprintf(buf, "%d", ptr->stats.n_current); + resultset_row_set(row, 3, buf); + stat = server_status(ptr); + resultset_row_set(row, 4, stat); + free(stat); + spinlock_release(&server_spin); + return row; +} + +/** + * Return a resultset that has the current set of servers in it + * + * @return A Result set + */ +RESULTSET * +serverGetList() +{ +RESULTSET *set; +int *data; + + if ((data = (int *)malloc(sizeof(int))) == NULL) + return NULL; + *data = 0; + if ((set = resultset_create(serverRowCallback, data)) == NULL) + { + free(data); + return NULL; + } + resultset_add_column(set, "Server", 20, COL_TYPE_VARCHAR); + resultset_add_column(set, "Address", 15, COL_TYPE_VARCHAR); + resultset_add_column(set, "Port", 5, COL_TYPE_VARCHAR); + resultset_add_column(set, "Connections", 8, COL_TYPE_VARCHAR); + resultset_add_column(set, "Status", 20, COL_TYPE_VARCHAR); + + return set; +} diff --git a/server/core/service.c b/server/core/service.c index 167f8a4e7..ad1a266b6 100644 --- a/server/core/service.c +++ b/server/core/service.c @@ -34,6 +34,8 @@ * 09/09/14 Massimiliano Pinto Added service option for localhost authentication * 13/10/14 Massimiliano Pinto Added hashtable for resources (i.e database names for MySQL services) * 06/02/15 Mark Riddoch Added caching of authentication data + * 18/02/15 Mark Riddoch Added result set management + * 03/03/15 Massimiliano Pinto Added config_enable_feedback_task() call in serviceStartAll * * @endverbatim */ @@ -57,6 +59,8 @@ #include #include #include +#include +#include /** Defined in log_manager.cc */ extern int lm_enabled_logfiles_bitmask; @@ -127,6 +131,9 @@ SERVICE *service; } service->name = strdup(servname); service->routerModule = strdup(router); + service->users_from_all = false; + service->resources = NULL; + if (service->name == NULL || service->routerModule == NULL) { if (service->name) @@ -222,7 +229,7 @@ GWPROTOCOL *funcs; { /* Try loading authentication data from file cache */ char *ptr, path[4097]; - strcpy(path, "/usr/local/skysql/MaxScale"); + strcpy(path, "/usr/local/mariadb-maxscale"); if ((ptr = getenv("MAXSCALE_HOME")) != NULL) { strncpy(path, ptr, 4096); @@ -250,9 +257,9 @@ GWPROTOCOL *funcs; else { /* Save authentication data to file cache */ - char *ptr, path[4096]; + char *ptr, path[4097]; int mkdir_rval = 0; - strcpy(path, "/usr/local/skysql/MaxScale"); + strcpy(path, "/usr/local/mariadb-maxscale"); if ((ptr = getenv("MAXSCALE_HOME")) != NULL) { strncpy(path, ptr, 4096); @@ -296,7 +303,8 @@ GWPROTOCOL *funcs; LOGFILE_ERROR, "Service %s: failed to load any user " "information. Authentication will " - "probably fail as a result."))); + "probably fail as a result.", + service->name))); } /* At service start last update is set to USERS_REFRESH_TIME seconds earlier. @@ -431,6 +439,12 @@ int listeners = 0; service->stats.started = time(0); } + /** Add the task that monitors session timeouts */ + if(service->conn_timeout > 0) + { + hktask_add("connection_timeout",session_close_timeouts,NULL,5); + } + return listeners; } @@ -467,6 +481,8 @@ serviceStartAll() SERVICE *ptr; int n = 0,i; + config_enable_feedback_task(); + ptr = allServices; while (ptr && !ptr->svc_do_shutdown) { @@ -803,6 +819,61 @@ serviceEnableRootUser(SERVICE *service, int action) return 1; } +/** + * Enable/Disable loading the user data from only one server or all of them + * + * @param service The service we are setting the data for + * @param action 1 for root enable, 0 for disable access + * @return 0 on failure + */ + +int +serviceAuthAllServers(SERVICE *service, int action) +{ + if (action != 0 && action != 1) + return 0; + + service->users_from_all = action; + + return 1; +} + +/** + * Whether to strip escape characters from the name of the database the client + * is connecting to. + * @param service Service to configure + * @param action 0 for disabled, 1 for enabled + * @return 1 if successful, 0 on error + */ +int serviceStripDbEsc(SERVICE* service, int action) +{ + if (action != 0 && action != 1) + return 0; + + service->strip_db_esc = action; + + return 1; +} + + +/** + * Sets the session timeout for the service. + * @param service Service to configure + * @param val Timeout in seconds + * @return 1 on success, 0 when the value is invalid + */ +int +serviceSetTimeout(SERVICE *service, int val) +{ + + if(val < 0) + return 0; + service->conn_timeout = val; + + return 1; +} + + /** * Trim whitespace from the from an rear of a string * @@ -1511,3 +1582,179 @@ void service_shutdown() } spinlock_release(&service_spin); } + +/** + * Return the count of all sessions active for all services + * + * @return Count of all active sessions + */ +int +serviceSessionCountAll() +{ +SERVICE *ptr; +int rval = 0; + + spinlock_acquire(&service_spin); + ptr = allServices; + while (ptr) + { + rval += ptr->stats.n_current; + ptr = ptr->next; + } + spinlock_release(&service_spin); + return rval; +} + +/** + * Provide a row to the result set that defines the set of service + * listeners + * + * @param set The result set + * @param data The index of the row to send + * @return The next row or NULL + */ +static RESULT_ROW * +serviceListenerRowCallback(RESULTSET *set, void *data) +{ +int *rowno = (int *)data; +int i = 0;; +char buf[20]; +RESULT_ROW *row; +SERVICE *ptr; +SERV_PROTOCOL *lptr = NULL; + + spinlock_acquire(&service_spin); + ptr = allServices; + if (ptr) + lptr = ptr->ports; + while (i < *rowno && ptr) + { + lptr = ptr->ports; + while (i < *rowno && lptr) + { + if ((lptr = lptr->next) != NULL) + i++; + } + if (i < *rowno) + { + ptr = ptr->next; + if (ptr && (lptr = ptr->ports) != NULL) + i++; + } + } + if (lptr == NULL) + { + spinlock_release(&service_spin); + free(data); + return NULL; + } + (*rowno)++; + row = resultset_make_row(set); + resultset_row_set(row, 0, ptr->name); + resultset_row_set(row, 1, lptr->protocol); + resultset_row_set(row, 2, (lptr && lptr->address) ? lptr->address : "*"); + sprintf(buf, "%d", lptr->port); + resultset_row_set(row, 3, buf); + resultset_row_set(row, 4, + (!lptr->listener || !lptr->listener->session || + lptr->listener->session->state == SESSION_STATE_LISTENER_STOPPED) ? + "Stopped" : "Running"); + spinlock_release(&service_spin); + return row; +} + +/** + * Return a resultset that has the current set of services in it + * + * @return A Result set + */ +RESULTSET * +serviceGetListenerList() +{ +RESULTSET *set; +int *data; + + if ((data = (int *)malloc(sizeof(int))) == NULL) + return NULL; + *data = 0; + if ((set = resultset_create(serviceListenerRowCallback, data)) == NULL) + { + free(data); + return NULL; + } + resultset_add_column(set, "Service Name", 25, COL_TYPE_VARCHAR); + resultset_add_column(set, "Protocol Module", 20, COL_TYPE_VARCHAR); + resultset_add_column(set, "Address", 15, COL_TYPE_VARCHAR); + resultset_add_column(set, "Port", 5, COL_TYPE_VARCHAR); + resultset_add_column(set, "State", 8, COL_TYPE_VARCHAR); + + return set; +} + +/** + * Provide a row to the result set that defines the set of services + * + * @param set The result set + * @param data The index of the row to send + * @return The next row or NULL + */ +static RESULT_ROW * +serviceRowCallback(RESULTSET *set, void *data) +{ +int *rowno = (int *)data; +int i = 0;; +char buf[20]; +RESULT_ROW *row; +SERVICE *ptr; + + spinlock_acquire(&service_spin); + ptr = allServices; + while (i < *rowno && ptr) + { + i++; + ptr = ptr->next; + } + if (ptr == NULL) + { + spinlock_release(&service_spin); + free(data); + return NULL; + } + (*rowno)++; + row = resultset_make_row(set); + resultset_row_set(row, 0, ptr->name); + resultset_row_set(row, 1, ptr->routerModule); + sprintf(buf, "%d", ptr->stats.n_current); + resultset_row_set(row, 2, buf); + sprintf(buf, "%d", ptr->stats.n_sessions); + resultset_row_set(row, 3, buf); + spinlock_release(&service_spin); + return row; +} + +/** + * Return a resultset that has the current set of services in it + * + * @return A Result set + */ +RESULTSET * +serviceGetList() +{ +RESULTSET *set; +int *data; + + if ((data = (int *)malloc(sizeof(int))) == NULL) + return NULL; + *data = 0; + if ((set = resultset_create(serviceRowCallback, data)) == NULL) + { + free(data); + return NULL; + } + resultset_add_column(set, "Service Name", 25, COL_TYPE_VARCHAR); + resultset_add_column(set, "Router Module", 20, COL_TYPE_VARCHAR); + resultset_add_column(set, "No. Sessions", 10, COL_TYPE_VARCHAR); + resultset_add_column(set, "Total Sessions", 10, COL_TYPE_VARCHAR); + + return set; +} diff --git a/server/core/session.c b/server/core/session.c index fb1dda79f..561007040 100644 --- a/server/core/session.c +++ b/server/core/session.c @@ -42,6 +42,7 @@ #include #include #include +#include /** Defined in log_manager.cc */ extern int lm_enabled_logfiles_bitmask; @@ -607,14 +608,21 @@ SESSION *ptr; ptr = allSessions; while (ptr) { + double idle = (hkheartbeat - ptr->client->last_read); + idle = idle > 0 ? idle/10.0:0; dcb_printf(dcb, "Session %d (%p)\n",ptr->ses_id, ptr); dcb_printf(dcb, "\tState: %s\n", session_state(ptr->state)); dcb_printf(dcb, "\tService: %s (%p)\n", ptr->service->name, ptr->service); dcb_printf(dcb, "\tClient DCB: %p\n", ptr->client); if (ptr->client && ptr->client->remote) - dcb_printf(dcb, "\tClient Address: %s\n", ptr->client->remote); + dcb_printf(dcb, "\tClient Address: %s%s%s\n", + ptr->client->user?ptr->client->user:"", + ptr->client->user?"@":"", + ptr->client->remote); dcb_printf(dcb, "\tConnected: %s", asctime_r(localtime_r(&ptr->stats.connect, &result), timebuf)); + if(ptr->client->state == DCB_STATE_POLLING) + dcb_printf(dcb, "\tIdle: %.0f seconds\n",idle); ptr = ptr->next; } spinlock_release(&session_spin); @@ -636,14 +644,21 @@ struct tm result; char buf[30]; int i; + double idle = (hkheartbeat - ptr->client->last_read); + idle = idle > 0 ? idle/10.f:0; dcb_printf(dcb, "Session %d (%p)\n",ptr->ses_id, ptr); dcb_printf(dcb, "\tState: %s\n", session_state(ptr->state)); dcb_printf(dcb, "\tService: %s (%p)\n", ptr->service->name, ptr->service); dcb_printf(dcb, "\tClient DCB: %p\n", ptr->client); if (ptr->client && ptr->client->remote) - dcb_printf(dcb, "\tClient Address: %s\n", ptr->client->remote); + dcb_printf(dcb, "\tClient Address: %s%s%s\n", + ptr->client->user?ptr->client->user:"", + ptr->client->user?"@":"", + ptr->client->remote); dcb_printf(dcb, "\tConnected: %s", asctime_r(localtime_r(&ptr->stats.connect, &result), buf)); + if(ptr->client->state == DCB_STATE_POLLING) + dcb_printf(dcb, "\tIdle: %.0f seconds",idle); if (ptr->n_filters) { for (i = 0; i < ptr->n_filters; i++) @@ -909,3 +924,133 @@ SESSION *get_all_sessions() { return allSessions; } + +/** + * Close sessions that have been idle for too long. + * + * If the time since a session last sent data is grater than the set value in the + * service, it is disconnected. The default value for the timeout for a service is 0. + * This means that connections are never timed out. + * @param data NULL, this is only here to satisfy the housekeeper function requirements. + */ +void session_close_timeouts(void* data) +{ + SESSION* ses; + + spinlock_acquire(&session_spin); + ses = get_all_sessions(); + spinlock_release(&session_spin); + + while(ses) + { + if(ses->client && ses->client->state == DCB_STATE_POLLING && + ses->service->conn_timeout > 0 && + hkheartbeat - ses->client->last_read > ses->service->conn_timeout * 10) + { + ses->client->func.hangup(ses->client); + } + + spinlock_acquire(&session_spin); + ses = ses->next; + spinlock_release(&session_spin); + + } +} + +/** + * Callback structure for the session list extraction + */ +typedef struct { + int index; + SESSIONLISTFILTER filter; +} SESSIONFILTER; + +/** + * Provide a row to the result set that defines the set of sessions + * + * @param set The result set + * @param data The index of the row to send + * @return The next row or NULL + */ +static RESULT_ROW * +sessionRowCallback(RESULTSET *set, void *data) +{ +SESSIONFILTER *cbdata = (SESSIONFILTER *)data; +int i = 0; +char buf[20]; +RESULT_ROW *row; +SESSION *ptr; + + spinlock_acquire(&session_spin); + ptr = allSessions; + /* Skip to the first non-listener if not showing listeners */ + while (ptr && cbdata->filter == SESSION_LIST_CONNECTION && + ptr->state == SESSION_STATE_LISTENER) + { + ptr = ptr->next; + } + while (i < cbdata->index && ptr) + { + if (cbdata->filter == SESSION_LIST_CONNECTION && + ptr->state != SESSION_STATE_LISTENER) + { + i++; + } + else if (cbdata->filter == SESSION_LIST_ALL) + { + i++; + } + ptr = ptr->next; + } + /* Skip to the next non-listener if not showing listeners */ + while (ptr && cbdata->filter == SESSION_LIST_CONNECTION && + ptr->state == SESSION_STATE_LISTENER) + { + ptr = ptr->next; + } + if (ptr == NULL) + { + spinlock_release(&session_spin); + free(data); + return NULL; + } + cbdata->index++; + row = resultset_make_row(set); + sprintf(buf, "%p", ptr); + resultset_row_set(row, 0, buf); + resultset_row_set(row, 1, ((ptr->client && ptr->client->remote) + ? ptr->client->remote : "")); + resultset_row_set(row, 2, (ptr->service && ptr->service->name + ? ptr->service->name : "")); + resultset_row_set(row, 3, session_state(ptr->state)); + spinlock_release(&session_spin); + return row; +} + +/** + * Return a resultset that has the current set of sessions in it + * + * @return A Result set + */ +RESULTSET * +sessionGetList(SESSIONLISTFILTER filter) +{ +RESULTSET *set; +SESSIONFILTER *data; + + if ((data = (SESSIONFILTER *)malloc(sizeof(SESSIONFILTER))) == NULL) + return NULL; + data->index = 0; + data->filter = filter; + if ((set = resultset_create(sessionRowCallback, data)) == NULL) + { + free(data); + return NULL; + } + resultset_add_column(set, "Session", 16, COL_TYPE_VARCHAR); + resultset_add_column(set, "Client", 15, COL_TYPE_VARCHAR); + resultset_add_column(set, "Service", 15, COL_TYPE_VARCHAR); + resultset_add_column(set, "State", 15, COL_TYPE_VARCHAR); + + return set; +} diff --git a/server/core/test/CMakeLists.txt b/server/core/test/CMakeLists.txt index 2a0977088..626a75f3e 100644 --- a/server/core/test/CMakeLists.txt +++ b/server/core/test/CMakeLists.txt @@ -1,3 +1,4 @@ +execute_process(COMMAND ${CMAKE_COMMAND} -E copy ${ERRMSG} ${CMAKE_CURRENT_BINARY_DIR}) add_executable(test_mysql_users test_mysql_users.c) add_executable(test_hash testhash.c) add_executable(test_hint testhint.c) @@ -12,6 +13,7 @@ add_executable(test_server testserver.c) add_executable(test_users testusers.c) add_executable(test_adminusers testadminusers.c) add_executable(testmemlog testmemlog.c) +add_executable(testfeedback testfeedback.c) target_link_libraries(test_mysql_users MySQLClient fullcore) target_link_libraries(test_hash fullcore) target_link_libraries(test_hint fullcore) @@ -26,31 +28,35 @@ target_link_libraries(test_server fullcore) target_link_libraries(test_users fullcore) target_link_libraries(test_adminusers fullcore) target_link_libraries(testmemlog fullcore) -add_test(testMySQLUsers test_mysql_users) -add_test(TestHash test_hash) -add_test(TestHint test_hint) -add_test(TestSpinlock test_spinlock) -add_test(TestFilter test_filter) -add_test(TestBuffer test_buffer) -add_test(TestDCB test_dcb) -add_test(TestModutil test_modutil) -add_test(TestPoll test_poll) -add_test(TestService test_service) -add_test(TestServer test_server) -add_test(TestUsers test_users) -add_test(TestAdminUsers test_adminusers) -add_test(TestMemlog testmemlog) -set_tests_properties(testMySQLUsers - TestHash - TestHint - TestSpinlock - TestFilter - TestBuffer - TestDCB - TestModutil - TestPoll - TestService - TestServer - TestUsers - TestAdminUsers - TestMemlog PROPERTIES ENVIRONMENT MAXSCALE_HOME=${CMAKE_BINARY_DIR}/) +target_link_libraries(testfeedback fullcore) +add_test(Internal-TestMySQLUsers test_mysql_users) +add_test(Internal-TestHash test_hash) +add_test(Internal-TestHint test_hint) +add_test(Internal-TestSpinlock test_spinlock) +add_test(Internal-TestFilter test_filter) +add_test(Internal-TestBuffer test_buffer) +add_test(Internal-TestDCB test_dcb) +add_test(Internal-TestModutil test_modutil) +add_test(Internal-TestPoll test_poll) +add_test(Internal-TestService test_service) +add_test(Internal-TestServer test_server) +add_test(Internal-TestUsers test_users) +add_test(Internal-TestAdminUsers test_adminusers) +add_test(Internal-TestMemlog testmemlog) +add_test(TestFeedback testfeedback) +set_tests_properties(Internal-TestMySQLUsers + Internal-TestHash + Internal-TestHint + Internal-TestSpinlock + Internal-TestFilter + Internal-TestBuffer + Internal-TestDCB + Internal-TestModutil + Internal-TestPoll + Internal-TestService + Internal-TestServer + Internal-TestUsers + Internal-TestAdminUsers + Internal-TestMemlog + TestFeedback PROPERTIES ENVIRONMENT MAXSCALE_HOME=${CMAKE_BINARY_DIR}/) +set_tests_properties(TestFeedback PROPERTIES TIMEOUT 30) diff --git a/server/core/test/testadminusers.c b/server/core/test/testadminusers.c index 7dfd9ef9c..ae52bd8b7 100644 --- a/server/core/test/testadminusers.c +++ b/server/core/test/testadminusers.c @@ -37,7 +37,7 @@ /** * test1 default user * - * Test that the username password admin/skysql is accepted if no users + * Test that the username password admin/mariadb is accepted if no users * have been created and that no other users are accepted * * WARNING: $MAXSCALE_HOME/etc/passwd must be removed before this test is run @@ -45,7 +45,7 @@ static int test1() { - if (admin_verify("admin", "skysql") == 0) + if (admin_verify("admin", "mariadb") == 0) { fprintf(stderr, "admin_verify: test 1.1 (default user) failed.\n"); return 1; @@ -270,7 +270,7 @@ char *home, buf[1024]; /* Unlink any existing password file before running this test */ if ((home = getenv("MAXSCALE_HOME")) == NULL || strlen(home) >= 1024) - home = "/usr/local/skysql"; + home = "/usr/local/mariadb-maxscale"; sprintf(buf, "%s/etc/passwd", home); if(!is_valid_posix_path(buf)) exit(1); @@ -284,7 +284,7 @@ char *home, buf[1024]; result += test5(); /* Add the default user back so other tests can use it */ - admin_add_user("admin", "skysql"); + admin_add_user("admin", "mariadb"); exit(result); } diff --git a/server/core/test/testfeedback.c b/server/core/test/testfeedback.c new file mode 100644 index 000000000..6549305c3 --- /dev/null +++ b/server/core/test/testfeedback.c @@ -0,0 +1,119 @@ +/* + * This file is distributed as part of MaxScale. It is free + * software: you can redistribute it and/or modify it under the terms of the + * GNU General Public License as published by the Free Software Foundation, + * version 2. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 51 + * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Copyright MariaDB Corporation Ab 2014 + */ + +/** + * + * @verbatim + * Revision History + * + * Date Who Description + * 09-03-2015 Markus Mäkelä Initial implementation + * 10-03-2015 Massimiliano Pinto Added http_check + * + * @endverbatim + */ + +#define FAILTEST(s) printf("TEST FAILED: " s "\n");return 1; +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static char* server_options[] = { + "MariaDB Corporation MaxScale", + "--no-defaults", + "--datadir=.", + "--language=.", + "--skip-innodb", + "--default-storage-engine=myisam", + NULL +}; + +const int num_elements = (sizeof(server_options) / sizeof(char *)) - 1; + +static char* server_groups[] = { + "embedded", + "server", + "server", + "embedded", + "server", + "server", + NULL +}; + + +int main(int argc, char** argv) +{ + FEEDBACK_CONF* fc; + GWBUF* buf; + regex_t re; + char* home; + char* cnf; + + hkinit(); + home = getenv("MAXSCALE_HOME"); + + if(home == NULL) + { + FAILTEST("MAXSCALE_HOME was not defined."); + } + printf("Home: %s\n",home); + + cnf = malloc(strlen(home) + strlen("/etc/MaxScale.cnf") + 1); + strcpy(cnf,home); + strcat(cnf,"/etc/MaxScale.cnf"); + + printf("Config: %s\n",cnf); + + + if(mysql_library_init(num_elements, server_options, server_groups)) + { + FAILTEST("Failed to initialize embedded library."); + } + + config_load(cnf); + + if ((fc = config_get_feedback_data()) == NULL) + { + FAILTEST("Configuration for Feedback was NULL."); + } + + + regcomp(&re,fc->feedback_user_info,0); + + module_create_feedback_report(&buf,NULL,fc); + + if(regexec(&re,(char*)buf->start,0,NULL,0)) + { + FAILTEST("Regex match of 'user_info' failed."); + } + + if (do_http_post(buf, fc) != 0) + { + FAILTEST("Http send failed\n"); + } + mysql_library_end(); + return 0; +} \ No newline at end of file diff --git a/server/core/test/testhash.c b/server/core/test/testhash.c index 776c8d93a..60f759839 100644 --- a/server/core/test/testhash.c +++ b/server/core/test/testhash.c @@ -33,7 +33,7 @@ #include #include -#include "../../include/hashtable.h" +#include static void read_lock(HASHTABLE *table) diff --git a/server/core/test/testservice.c b/server/core/test/testservice.c index 06ac27f6e..ca6fe6618 100644 --- a/server/core/test/testservice.c +++ b/server/core/test/testservice.c @@ -31,8 +31,18 @@ #include #include #include +#include #include -#include + + +static bool success = false; + +int hup(DCB* dcb) +{ + success = true; + return 1; +} + /** * test1 Allocate a service and do lots of other things * @@ -40,19 +50,25 @@ static int test1() { -SERVICE *service; -int result; -int argc = 3; -char buffer[1024]; -sprintf(buffer,"%s",TEST_LOG_DIR); -char* argv[] = { - "log_manager", - "-j", - buffer, - NULL -}; -skygw_logmanager_init(argc,argv); -poll_init(); +SERVICE *service; +SESSION *session; +DCB *dcb; +int result; +int argc = 3; + +init_test_env(); +/* char* argv[] = */ +/* { */ +/* "log_manager", */ +/* "-j", */ +/* TEST_LOG_DIR, */ +/* NULL */ +/* }; */ + +/* skygw_logmanager_init(argc,argv); */ +/* poll_init(); */ +/* hkinit(); */ + /* Service tests */ ss_dfprintf(stderr, "testservice : creating service called MyService with router nonexistent"); @@ -81,10 +97,33 @@ poll_init(); result = serviceStartAll(); skygw_log_sync_all(); ss_info_dassert(0 != result, "Start all should succeed"); + + ss_dfprintf(stderr, "\t..done\nTiming out a session."); + + service->conn_timeout = 1; + result = serviceStart(service); + skygw_log_sync_all(); + ss_info_dassert(0 != result, "Start should succeed"); + result = serviceStop(service); + skygw_log_sync_all(); + ss_info_dassert(0 != result, "Stop should succeed"); + + if((dcb = dcb_alloc(DCB_ROLE_REQUEST_HANDLER)) == NULL) + return 1; + ss_info_dassert(dcb != NULL, "DCB allocation failed"); + session = session_alloc(service,dcb); + ss_info_dassert(session != NULL, "Session allocation failed"); + session->client->state = DCB_STATE_POLLING; + session->client->func.hangup = hup; + sleep(15); + + ss_info_dassert(success, "Session timeout failed"); + ss_dfprintf(stderr, "\t..done\nStopping Service."); ss_info_dassert(0 != serviceStop(service), "Stop should succeed"); - ss_dfprintf(stderr, "\t..done\n"); + ss_dfprintf(stderr, "\t..done\n"); + /** This is never used in MaxScale and will always fail due to service's * stats.n_current value never being decremented */ /* diff --git a/server/core/utils.c b/server/core/utils.c index da9378867..a26c2b4e5 100644 --- a/server/core/utils.c +++ b/server/core/utils.c @@ -42,6 +42,7 @@ #include #include #include +#include /** Defined in log_manager.cc */ extern int lm_enabled_logfiles_bitmask; @@ -235,3 +236,30 @@ int gw_getsockerrno( return_eno: return eno; } + +/** + * Create a HEX(SHA1(SHA1(password))) + * + * @param password The password to encrypt + * @return The new allocated encrypted password, that the caller must free + * + */ +char *create_hex_sha1_sha1_passwd(char *passwd) { + uint8_t hash1[SHA_DIGEST_LENGTH]=""; + uint8_t hash2[SHA_DIGEST_LENGTH]=""; + char *hexpasswd=NULL; + + if ((hexpasswd = (char *)calloc(SHA_DIGEST_LENGTH * 2 + 1, 1)) == NULL) + return NULL; + + /* hash1 is SHA1(real_password) */ + gw_sha1_str((uint8_t *)passwd, strlen(passwd), hash1); + + /* hash2 is the SHA1(input data), where input_data = SHA1(real_password) */ + gw_sha1_str(hash1, SHA_DIGEST_LENGTH, hash2); + + /* dbpass is the HEX form of SHA1(SHA1(real_password)) */ + gw_bin2hex(hexpasswd, hash2, SHA_DIGEST_LENGTH); + + return hexpasswd; +} diff --git a/server/include/dcb.h b/server/include/dcb.h index e360a8fbb..7eedfbec3 100644 --- a/server/include/dcb.h +++ b/server/include/dcb.h @@ -261,7 +261,7 @@ typedef struct dcb { SPINLOCK polloutlock; int polloutbusy; int writecheck; - + unsigned long last_read; /*< Last time the DCB received data */ unsigned int high_water; /**< High water mark */ unsigned int low_water; /**< Low water mark */ struct server *server; /**< The associated backend server */ @@ -271,6 +271,18 @@ typedef struct dcb { #endif } DCB; +/** + * The DCB usage filer used for returning DCB's in use for a certain reason + */ +typedef enum { + DCB_USAGE_CLIENT, + DCB_USAGE_LISTENER, + DCB_USAGE_BACKEND, + DCB_USAGE_INTERNAL, + DCB_USAGE_ZOMBIE, + DCB_USAGE_ALL +} DCB_USAGE; + #if defined(FAKE_CODE) unsigned char dcb_fake_write_errno[10240]; __int32_t dcb_fake_write_ev[10240]; @@ -319,6 +331,7 @@ int dcb_add_callback(DCB *, DCB_REASON, int (*)(struct dcb *, DCB_REASON, void int dcb_remove_callback(DCB *, DCB_REASON, int (*)(struct dcb *, DCB_REASON, void *), void *); int dcb_isvalid(DCB *); /* Check the DCB is in the linked list */ +int dcb_count_by_usage(DCB_USAGE); /* Return counts of DCBs */ bool dcb_set_state(DCB* dcb, dcb_state_t new_state, dcb_state_t* old_state); void dcb_call_foreach (struct server* server, DCB_REASON reason); diff --git a/server/include/hk_heartbeat.h b/server/include/hk_heartbeat.h new file mode 100644 index 000000000..f1ea1e0bc --- /dev/null +++ b/server/include/hk_heartbeat.h @@ -0,0 +1,11 @@ +#ifndef _HK_HEARTBEAT_H +#define _HK_HEARTBEAT_H + +/** + * The global housekeeper heartbeat value. This value is increamente + * every 100ms and may be used for crude timing etc. + */ + +extern unsigned long hkheartbeat; + +#endif diff --git a/server/include/housekeeper.h b/server/include/housekeeper.h index 0379ff23a..0cd2d8a71 100644 --- a/server/include/housekeeper.h +++ b/server/include/housekeeper.h @@ -19,7 +19,7 @@ */ #include #include - +#include /** * @file housekeeper.h A mechanism to have task run periodically * @@ -52,12 +52,6 @@ typedef struct hktask { *next; /*< Next task in the list */ } HKTASK; -/** - * The global housekeeper heartbeat value. This value is increamente - * every 100ms and may be used for crude timing etc. - */ -extern unsigned long hkheartbeat; - extern void hkinit(); extern int hktask_add(char *name, void (*task)(void *), void *data, int frequency); extern int hktask_oneshot(char *name, void (*task)(void *), void *data, int when); diff --git a/server/include/config.h b/server/include/maxconfig.h similarity index 75% rename from server/include/config.h rename to server/include/maxconfig.h index 93bd095c1..12b1b98fa 100644 --- a/server/include/config.h +++ b/server/include/maxconfig.h @@ -1,5 +1,5 @@ -#ifndef _CONFIG_H -#define _CONFIG_H +#ifndef _MAXSCALE_CONFIG_H +#define _MAXSCALE_CONFIG_H /* * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the @@ -18,7 +18,8 @@ * Copyright MariaDB Corporation Ab 2013-2014 */ #include - +#include +#include /** * @file config.h The configuration handling elements * @@ -30,12 +31,15 @@ * 07/05/14 Massimiliano Pinto Added version_string to global configuration * 23/05/14 Massimiliano Pinto Added id to global configuration * 17/10/14 Mark Riddoch Added poll tuning configuration parameters + * 05/03/15 Massimiliano Pinto Added sysname, release, sha1_mac to gateway struct * * @endverbatim */ #define DEFAULT_NBPOLLS 3 /**< Default number of non block polls before we block */ #define DEFAULT_POLLSLEEP 1000 /**< Default poll wait time (milliseconds) */ +#define _SYSNAME_STR_LENGTH 256 /**< sysname len */ +#define _RELEASE_STR_LENGTH 256 /**< release len */ /** * Maximum length for configuration parameter value. */ @@ -92,22 +96,25 @@ typedef struct config_context { * The gateway global configuration data */ typedef struct { - int n_threads; /**< Number of polling threads */ - char *version_string; /**< The version string of embedded database library */ - unsigned long id; /**< MaxScale ID */ + int n_threads; /**< Number of polling threads */ + char *version_string; /**< The version string of embedded database library */ + char release_string[_SYSNAME_STR_LENGTH]; /**< The release name string of the system */ + char sysname[_SYSNAME_STR_LENGTH]; /**< The release name string of the system */ + uint8_t mac_sha1[SHA_DIGEST_LENGTH]; /*< The SHA1 digest of an interface MAC address */ + unsigned long id; /**< MaxScale ID */ unsigned int n_nbpoll; /**< Tune number of non-blocking polls */ unsigned int pollsleep; /**< Wait time in blocking polls */ } GATEWAY_CONF; -extern int config_load(char *); -extern int config_reload(); -extern int config_threadcount(); -extern unsigned int config_nbpolls(); -extern unsigned int config_pollsleep(); -CONFIG_PARAMETER* config_get_param(CONFIG_PARAMETER* params, const char* name); -config_param_type_t config_get_paramtype(CONFIG_PARAMETER* param); -CONFIG_PARAMETER* config_clone_param(CONFIG_PARAMETER* param); -int config_truth_value(char *str); +extern int config_load(char *); +extern int config_reload(); +extern int config_threadcount(); +extern unsigned int config_nbpolls(); +extern unsigned int config_pollsleep(); +CONFIG_PARAMETER* config_get_param(CONFIG_PARAMETER* params, const char* name); +config_param_type_t config_get_paramtype(CONFIG_PARAMETER* param); +CONFIG_PARAMETER* config_clone_param(CONFIG_PARAMETER* param); +extern int config_truth_value(char *); bool config_set_qualified_param( CONFIG_PARAMETER* param, void* val, @@ -131,4 +138,8 @@ bool config_get_valtarget( CONFIG_PARAMETER* param, const char* name, /*< if NULL examine current param only */ config_param_type_t ptype); + +void config_enable_feedback_task(void); +void config_disable_feedback_task(void); +unsigned long config_get_gateway_id(void); #endif diff --git a/server/include/modules.h b/server/include/modules.h index adda2b255..51e10b29d 100644 --- a/server/include/modules.h +++ b/server/include/modules.h @@ -19,6 +19,7 @@ */ #include #include +#include /** * @file modules.h Utilities for loading modules @@ -28,12 +29,15 @@ * @verbatim * Revision History * - * Date Who Description - * 13/06/13 Mark Riddoch Initial implementation - * 08/07/13 Mark Riddoch Addition of monitor modules - * 29/05/14 Mark Riddoch Addition of filter modules - * 01/10/14 Mark Riddoch Addition of call to unload all modules on - * shutdown + * Date Who Description + * 13/06/13 Mark Riddoch Initial implementation + * 08/07/13 Mark Riddoch Addition of monitor modules + * 29/05/14 Mark Riddoch Addition of filter modules + * 01/10/14 Mark Riddoch Addition of call to unload all modules on + * shutdown + * 19/02/15 Mark Riddoch Addition of moduleGetList + * 26/02/15 Massimiliano Pinto Addition of module_feedback_send + * * @endverbatim */ @@ -63,6 +67,9 @@ extern void unload_module(const char *module); extern void unload_all_modules(); extern void printModules(); extern void dprintAllModules(DCB *); -char* get_maxscale_home(void); +extern RESULTSET *moduleGetList(); +extern char *get_maxscale_home(void); +extern void module_feedback_send(void*); +extern void moduleShowFeedbackReport(DCB *dcb); #endif diff --git a/server/include/modutil.h b/server/include/modutil.h index d40779c4c..4964b7b31 100644 --- a/server/include/modutil.h +++ b/server/include/modutil.h @@ -33,14 +33,19 @@ */ #include #include +#include #define PTR_IS_RESULTSET(b) (b[0] == 0x01 && b[1] == 0x0 && b[2] == 0x0 && b[3] == 0x01) #define PTR_IS_EOF(b) (b[0] == 0x05 && b[1] == 0x0 && b[2] == 0x0 && b[4] == 0xfe) #define PTR_IS_OK(b) (b[4] == 0x00) #define PTR_IS_ERR(b) (b[4] == 0xff) #define PTR_IS_LOCAL_INFILE(b) (b[4] == 0xfb) +#define IS_FULL_RESPONSE(buf) (modutil_count_signal_packets(buf,0,0) == 2) +#define PTR_EOF_MORE_RESULTS(b) ((PTR_IS_EOF(b) && ptr[7] & 0x08)) + extern int modutil_is_SQL(GWBUF *); +extern int modutil_is_SQL_prepare(GWBUF *); extern int modutil_extract_SQL(GWBUF *, char **, int *); extern int modutil_MySQL_Query(GWBUF *, char **, int *, int *); extern char *modutil_get_SQL(GWBUF *); @@ -52,6 +57,8 @@ GWBUF* modutil_get_complete_packets(GWBUF** p_readbuf); int modutil_MySQL_query_len(GWBUF* buf, int* nbytes_missing); void modutil_reply_parse_error(DCB* backend_dcb, char* errstr, uint32_t flags); void modutil_reply_auth_error(DCB* backend_dcb, char* errstr, uint32_t flags); +int modutil_count_statements(GWBUF* buffer); +GWBUF* modutil_create_query(char* query); GWBUF *modutil_create_mysql_err_msg( int packet_number, @@ -60,5 +67,5 @@ GWBUF *modutil_create_mysql_err_msg( const char *statemsg, const char *msg); -int modutil_count_signal_packets(GWBUF*,int,int); +int modutil_count_signal_packets(GWBUF*,int,int,int*); #endif diff --git a/server/include/monitor.h b/server/include/monitor.h index c08e8153e..e114f3424 100644 --- a/server/include/monitor.h +++ b/server/include/monitor.h @@ -19,6 +19,7 @@ */ #include #include +#include /** * @file monitor.h The interface to the monitor module @@ -35,6 +36,7 @@ * 28/08/14 Massimiliano Pinto Addition of detectStaleMaster * 30/10/14 Massimiliano Pinto Addition of disableMasterFailback * 07/11/14 Massimiliano Pinto Addition of setNetworkTimeout + * 19/02/15 Mark Riddoch Addition of monitorGetList * * @endverbatim */ @@ -65,7 +67,7 @@ * monitored. */ typedef struct { - void *(*startMonitor)(void *); + void *(*startMonitor)(void *, void*); void (*stopMonitor)(void *); void (*registerServer)(void *, SERVER *); void (*unregisterServer)(void *, SERVER *); @@ -73,17 +75,13 @@ typedef struct { void (*diagnostics)(DCB *, void *); void (*setInterval)(void *, size_t); void (*setNetworkTimeout)(void *, int, int); - void (*defaultId)(void *, unsigned long); - void (*replicationHeartbeat)(void *, int); - void (*detectStaleMaster)(void *, int); - void (*disableMasterFailback)(void *, int); } MONITOR_OBJECT; /** * The monitor API version number. Any change to the monitor module API * must change these versions usign the rules defined in modinfo.h */ -#define MONITOR_VERSION {1, 0, 0} +#define MONITOR_VERSION {2, 0, 0} /** Monitor's poll frequency */ #define MON_BASE_INTERVAL_MS 100 @@ -132,15 +130,12 @@ extern MONITOR *monitor_find(char *); extern void monitorAddServer(MONITOR *, SERVER *); extern void monitorAddUser(MONITOR *, char *, char *); extern void monitorStop(MONITOR *); -extern void monitorStart(MONITOR *); +extern void monitorStart(MONITOR *, void*); extern void monitorStopAll(); extern void monitorShowAll(DCB *); extern void monitorShow(DCB *, MONITOR *); extern void monitorList(DCB *); -extern void monitorSetId(MONITOR *, unsigned long); extern void monitorSetInterval (MONITOR *, unsigned long); -extern void monitorSetReplicationHeartbeat(MONITOR *, int); -extern void monitorDetectStaleMaster(MONITOR *, int); -extern void monitorDisableMasterFailback(MONITOR *, int); extern void monitorSetNetworkTimeout(MONITOR *, int, int); +extern RESULTSET *monitorGetList(); #endif diff --git a/server/include/notification.h b/server/include/notification.h new file mode 100644 index 000000000..6e40f5aee --- /dev/null +++ b/server/include/notification.h @@ -0,0 +1,64 @@ +#ifndef _NOTIFICATION_SERVICE_H +#define _NOTIFICATION_SERVICE_H +/* + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free + * software: you can redistribute it and/or modify it under the terms of the + * GNU General Public License as published by the Free Software Foundation, + * version 2. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 51 + * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Copyright MariaDB Corporation Ab 2013-2014 + */ + +/** + * @file notification.h + * + * The configuration stuct for notification/feedback service + * + * @verbatim + * Revision History + * + * Date Who Description + * 02/03/15 Massimiliano Pinto Initial implementation + * + * @endverbatim + */ + +#define _NOTIFICATION_CONNECT_TIMEOUT 30 +#define _NOTIFICATION_OPERATION_TIMEOUT 30 +#define _NOTIFICATION_SEND_PENDING 0 +#define _NOTIFICATION_SEND_OK 1 +#define _NOTIFICATION_SEND_ERROR 2 +#define _NOTIFICATION_REPORT_ROW_LEN 255 + +#include + +/** + * The configuration and usage information data for feeback service + */ + +typedef struct { + int feedback_enable; /**< Enable/Disable Notification feedback */ + char *feedback_url; /**< URL to which the data is sent */ + char *feedback_user_info; /**< User info included in the feedback data sent */ + int feedback_timeout; /**< An attempt to write/read the data times out and fails after this many seconds */ + int feedback_connect_timeout; /**< An attempt to send the data times out and fails after this many seconds */ + int feedback_last_action; /**< Holds the feedback last send action status */ + int feedback_frequency; /*< Frequency of the housekeeper task */ + char *release_info; /**< Operating system Release name */ + char *sysname; /**< Operating system name */ + uint8_t *mac_sha1; /**< First available MAC address*/ +} FEEDBACK_CONF; + +extern char *gw_bin2hex(char *out, const uint8_t *in, unsigned int len); +extern void gw_sha1_str(const uint8_t *in, int in_len, uint8_t *out); +extern FEEDBACK_CONF * config_get_feedback_data(); +#endif diff --git a/server/include/poll.h b/server/include/poll.h index 24bf0645d..e778c580c 100644 --- a/server/include/poll.h +++ b/server/include/poll.h @@ -19,6 +19,7 @@ */ #include #include +#include /** * @file poll.h The poll related functionality @@ -33,6 +34,22 @@ */ #define MAX_EVENTS 1000 +/** +* A statistic identifier that can be returned by poll_get_stat +*/ +typedef enum { + POLL_STAT_READ, + POLL_STAT_WRITE, + POLL_STAT_ERROR, + POLL_STAT_HANGUP, + POLL_STAT_ACCEPT, + POLL_STAT_EVQ_LEN, + POLL_STAT_EVQ_PENDING, + POLL_STAT_EVQ_MAX, + POLL_STAT_MAX_QTIME, + POLL_STAT_MAX_EXECTIME +} POLL_STAT; + extern void poll_init(); extern int poll_add_dcb(DCB *); extern int poll_remove_dcb(DCB *); @@ -46,4 +63,6 @@ extern void dShowThreads(DCB *dcb); void poll_add_epollin_event_to_dcb(DCB* dcb, GWBUF* buf); extern void dShowEventQ(DCB *dcb); extern void dShowEventStats(DCB *dcb); +extern int poll_get_stat(POLL_STAT stat); +extern RESULTSET *eventTimesGetList(); #endif diff --git a/server/include/resultset.h b/server/include/resultset.h new file mode 100644 index 000000000..bec76bb50 --- /dev/null +++ b/server/include/resultset.h @@ -0,0 +1,88 @@ +#ifndef _RESULTSET_H +#define _RESULTSET_H +/* + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free + * software: you can redistribute it and/or modify it under the terms of the + * GNU General Public License as published by the Free Software Foundation, + * version 2. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 51 + * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Copyright MariaDB Corporation Ab 2013-2014 + */ + +/** + * @file resultset.h The MaxScale generic result set mechanism + * + * @verbatim + * Revision History + * + * Date Who Description + * 17/02/15 Mark Riddoch Initial implementation + * + * @endverbatim + */ +#include + + +/** + * Column types + */ +typedef enum { + COL_TYPE_VARCHAR = 0x0f, + COL_TYPE_VARSTRING = 0xfd +} RESULT_COL_TYPE; + +/** + * The result set column definition. Each result set has an order linked + * list of column definitions. + */ +typedef struct resultcolumn { + char *name; /*< Column name */ + int len; /*< Column length */ + RESULT_COL_TYPE type; /*< Column type */ + struct resultcolumn *next; /*< next column */ +} RESULT_COLUMN; + +/** + * A representation of a row within a result set. + */ +typedef struct resultrow { + int n_cols; /*< No. of columns in row */ + char **cols; /*< The columns themselves */ +} RESULT_ROW; + +struct resultset; + +/** + * Type of callback function used to supply each row + */ +typedef RESULT_ROW * (*RESULT_ROW_CB)(struct resultset *, void *); + +/** + * The representation of the result set itself. + */ +typedef struct resultset { + int n_cols; /*< No. of columns */ + RESULT_COLUMN *column; /*< Linked list of column definitions */ + RESULT_ROW_CB fetchrow; /*< Fetch a row for the result set */ + void *userdata; /*< User data for the fetch row call */ +} RESULTSET; + +extern RESULTSET *resultset_create(RESULT_ROW_CB, void *); +extern void resultset_free(RESULTSET *); +extern int resultset_add_column(RESULTSET *, char *, int, RESULT_COL_TYPE); +extern void resultset_column_free(RESULT_COLUMN *); +extern RESULT_ROW *resultset_make_row(RESULTSET *); +extern void resultset_free_row(RESULT_ROW *); +extern int resultset_row_set(RESULT_ROW *, int, char *); +extern void resultset_stream_mysql(RESULTSET *, DCB *); +extern void resultset_stream_json(RESULTSET *, DCB *); +#endif diff --git a/server/include/server.h b/server/include/server.h index 1eb2d3eeb..459869a0b 100644 --- a/server/include/server.h +++ b/server/include/server.h @@ -18,6 +18,7 @@ * Copyright MariaDB Corporation Ab 2013-2014 */ #include +#include /** * @file service.h @@ -41,6 +42,7 @@ * 30/07/14 Massimiliano Pinto Addition of NDB status for MySQL Cluster * 30/08/14 Massimiliano Pinto Addition of SERVER_STALE_STATUS * 27/10/14 Massimiliano Pinto Addition of SERVER_MASTER_STICKINESS + * 19/02/15 Mark Riddoch Addition of serverGetList * * @endverbatim */ @@ -187,4 +189,5 @@ extern void serverAddParameter(SERVER *, char *, char *); extern char *serverGetParameter(SERVER *, char *); extern void server_update(SERVER *, char *, char *, char *); extern void server_set_unique_name(SERVER *, char *); +extern RESULTSET *serverGetList(); #endif diff --git a/server/include/service.h b/server/include/service.h index ab18e5d29..c95a73095 100644 --- a/server/include/service.h +++ b/server/include/service.h @@ -24,7 +24,8 @@ #include #include #include -#include "config.h" +#include +#include /** * @file service.h @@ -136,12 +137,17 @@ typedef struct service { svc_config_param; /*< list of config params and values */ int svc_config_version; /*< Version number of configuration */ bool svc_do_shutdown; /*< tells the service to exit loops etc. */ + bool users_from_all; /*< Load users from one server or all of them */ + bool strip_db_esc; /*< Remove the '\' characters from database names + * when querying them from the server. MySQL Workbench seems + * to escape at least the underscore character. */ SPINLOCK users_table_spin; /**< The spinlock for users data refresh */ SERVICE_REFRESH_RATE rate_limit; /**< The refresh rate limit for users table */ FILTER_DEF **filters; /**< Ordered list of filters */ int n_filters; /**< Number of filters */ + int conn_timeout; /*< Session timeout in seconds */ char *weightby; struct service *next; /**< The next service in the linked list */ } SERVICE; @@ -172,9 +178,12 @@ extern int serviceSetUser(SERVICE *, char *, char *); extern int serviceGetUser(SERVICE *, char **, char **); extern void serviceSetFilters(SERVICE *, char *); extern int serviceEnableRootUser(SERVICE *, int ); +extern int serviceSetTimeout(SERVICE *, int ); extern void serviceWeightBy(SERVICE *, char *); extern char *serviceGetWeightingParameter(SERVICE *); extern int serviceEnableLocalhostMatchWildcardHost(SERVICE *, int); +int serviceStripDbEsc(SERVICE* service, int action); +int serviceAuthAllServers(SERVICE *service, int action); extern void service_update(SERVICE *, char *, char *, char *); extern int service_refresh_users(SERVICE *); extern void printService(SERVICE *); @@ -193,4 +202,7 @@ extern void dListServices(DCB *); extern void dListListeners(DCB *); char* service_get_name(SERVICE* svc); void service_shutdown(); +extern int serviceSessionCountAll(); +extern RESULTSET *serviceGetList(); +extern RESULTSET *serviceGetListenerList(); #endif diff --git a/server/include/session.h b/server/include/session.h index e008cc4ff..908277e41 100644 --- a/server/include/session.h +++ b/server/include/session.h @@ -33,6 +33,7 @@ * 02-09-2013 Massimiliano Pinto Added session ref counter * 29-05-2014 Mark Riddoch Support for filter mechanism * added + * 20-02-2015 Markus Mäkelä Added session timeouts * * @endverbatim */ @@ -40,6 +41,7 @@ #include #include #include +#include #include #include @@ -99,6 +101,14 @@ typedef struct { void *session; } SESSION_FILTER; +/** + * Filter type for the sessionGetList call + */ +typedef enum { + SESSION_LIST_ALL, + SESSION_LIST_CONNECTION +} SESSIONLISTFILTER; + /** * The session status block * @@ -167,5 +177,7 @@ bool session_link_dcb(SESSION *, struct dcb *); SESSION* get_session_by_router_ses(void* rses); void session_enable_log(SESSION* ses, logfile_id_t id); void session_disable_log(SESSION* ses, logfile_id_t id); +void session_close_timeouts(void* data); +RESULTSET *sessionGetList(SESSIONLISTFILTER); #endif diff --git a/server/include/test_utils.h b/server/include/test_utils.h new file mode 100644 index 000000000..538c5d101 --- /dev/null +++ b/server/include/test_utils.h @@ -0,0 +1,26 @@ +#ifndef TEST_UTILS_H +#define TEST_UTILS_H +#include +#include +#include +#include +#include + +void init_test_env() +{ + int argc = 3; + + char* argv[] = + { + "log_manager", + "-j", + TEST_LOG_DIR, + NULL + }; + + skygw_logmanager_init(argc,argv); + poll_init(); + hkinit(); +} + +#endif diff --git a/server/modules/filter/CMakeLists.txt b/server/modules/filter/CMakeLists.txt index 87520abc7..3febf4ff5 100644 --- a/server/modules/filter/CMakeLists.txt +++ b/server/modules/filter/CMakeLists.txt @@ -26,14 +26,19 @@ add_library(topfilter SHARED topfilter.c) target_link_libraries(topfilter log_manager utils) install(TARGETS topfilter DESTINATION modules) -add_library(fwfilter SHARED fwfilter.c) -target_link_libraries(fwfilter log_manager utils query_classifier) -install(TARGETS fwfilter DESTINATION modules) +add_library(dbfwfilter SHARED dbfwfilter.c) +target_link_libraries(dbfwfilter log_manager utils query_classifier) +install(TARGETS dbfwfilter DESTINATION modules) add_library(namedserverfilter SHARED namedserverfilter.c) target_link_libraries(namedserverfilter log_manager utils) install(TARGETS namedserverfilter DESTINATION modules) +if(BUILD_SLAVELAG) + add_library(slavelag SHARED slavelag.c) + target_link_libraries(slavelag log_manager utils query_classifier) + install(TARGETS slavelag DESTINATION modules) +endif() add_subdirectory(hint) diff --git a/server/modules/filter/fwfilter.c b/server/modules/filter/dbfwfilter.c similarity index 98% rename from server/modules/filter/fwfilter.c rename to server/modules/filter/dbfwfilter.c index 8fbfc9435..63d07b5a9 100644 --- a/server/modules/filter/fwfilter.c +++ b/server/modules/filter/dbfwfilter.c @@ -1300,6 +1300,7 @@ bool rule_matches(FW_INSTANCE* my_instance, FW_SESSION* my_session, GWBUF *queue char *ptr,*where,*msg = NULL; char emsg[512]; int qlen; + unsigned char* memptr = (unsigned char*)queue->start; bool is_sql, is_real, matches; skygw_query_op_t optype = QUERY_OP_UNDEFINED; STRLINK* strln = NULL; @@ -1312,15 +1313,15 @@ bool rule_matches(FW_INSTANCE* my_instance, FW_SESSION* my_session, GWBUF *queue tm_now = localtime(&time_now); matches = false; - is_sql = modutil_is_SQL(queue); + is_sql = modutil_is_SQL(queue) || modutil_is_SQL_prepare(queue); if(is_sql){ if(!query_is_parsed(queue)){ parse_query(queue); } optype = query_classifier_get_operation(queue); - modutil_extract_SQL(queue, &ptr, &qlen); is_real = skygw_is_real_query(queue); + qlen = gw_mysql_get_byte3(memptr) - 1; } if(rulelist->rule->on_queries == QUERY_OP_UNDEFINED || rulelist->rule->on_queries & optype){ @@ -1547,18 +1548,20 @@ bool check_match_any(FW_INSTANCE* my_instance, FW_SESSION* my_session, GWBUF *qu bool is_sql, rval = false; int qlen; char *fullquery = NULL,*ptr; - + unsigned char* memptr = (unsigned char*)queue->start; RULELIST* rulelist; - is_sql = modutil_is_SQL(queue); + is_sql = modutil_is_SQL(queue) || modutil_is_SQL_prepare(queue); if(is_sql){ if(!query_is_parsed(queue)){ parse_query(queue); } - modutil_extract_SQL(queue, &ptr, &qlen); - fullquery = malloc((qlen + 1) * sizeof(char)); - memcpy(fullquery,ptr,qlen); - memset(fullquery + qlen,0,1); + + qlen = gw_mysql_get_byte3(memptr); + qlen = qlen < 0xffffff ? qlen : 0xffffff; + fullquery = malloc((qlen) * sizeof(char)); + memcpy(fullquery,memptr + 5,qlen - 1); + memset(fullquery + qlen - 1,0,1); } if((rulelist = user->rules_or) == NULL) @@ -1598,21 +1601,22 @@ bool check_match_all(FW_INSTANCE* my_instance, FW_SESSION* my_session, GWBUF *qu { bool is_sql, rval = true; int qlen; + unsigned char* memptr = (unsigned char*)queue->start; char *fullquery = NULL,*ptr; RULELIST* rulelist; - is_sql = modutil_is_SQL(queue); - + is_sql = modutil_is_SQL(queue) || modutil_is_SQL_prepare(queue); + if(is_sql){ if(!query_is_parsed(queue)){ parse_query(queue); } - modutil_extract_SQL(queue, &ptr, &qlen); - fullquery = malloc((qlen + 1) * sizeof(char)); - memcpy(fullquery,ptr,qlen); - memset(fullquery + qlen,0,1); - + qlen = gw_mysql_get_byte3(memptr); + qlen = qlen < 0xffffff ? qlen : 0xffffff; + fullquery = malloc((qlen) * sizeof(char)); + memcpy(fullquery,memptr + 5,qlen - 1); + memset(fullquery + qlen - 1,0,1); } if(strict_all) diff --git a/server/modules/filter/qlafilter.c b/server/modules/filter/qlafilter.c index 74c176e02..c9af0a839 100644 --- a/server/modules/filter/qlafilter.c +++ b/server/modules/filter/qlafilter.c @@ -99,7 +99,7 @@ static FILTER_OBJECT MyObject = { */ typedef struct { int sessions; /* The count of sessions */ - char *filebase; /* The filemane base */ + char *filebase; /* The filename base */ char *source; /* The source of the client connection */ char *userName; /* The user name to filter on */ char *match; /* Optional text to match against */ @@ -424,8 +424,8 @@ struct timeval tv; "%02d:%02d:%02d.%-3d %d/%02d/%d, ", t.tm_hour, t.tm_min, t.tm_sec, (int)(tv.tv_usec / 1000), t.tm_mday, t.tm_mon + 1, 1900 + t.tm_year); - fwrite(ptr, sizeof(char), length, my_session->fp); - fwrite("\n", sizeof(char), 1, my_session->fp); + fprintf(my_session->fp,"%s\n",ptr); + } free(ptr); } diff --git a/server/modules/filter/slavelag.c b/server/modules/filter/slavelag.c new file mode 100644 index 000000000..c12d7a2a7 --- /dev/null +++ b/server/modules/filter/slavelag.c @@ -0,0 +1,403 @@ +/* + * This file is distributed as part of MaxScale by MariaDB Corporation. It is free + * software: you can redistribute it and/or modify it under the terms of the + * GNU General Public License as published by the Free Software Foundation, + * version 2. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 51 + * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Copyright MariaDB Corporation Ab 2014 + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** Defined in log_manager.cc */ +extern int lm_enabled_logfiles_bitmask; +extern size_t log_ses_count[]; +extern __thread log_info_t tls_log_info; + +/** + * @file slavelag.c - a very simple filter designed to send queries to the + * master server after data modification has occurred. This is done to prevent + * replication lag affecting the outcome of a select query. + * + * @verbatim + * + * Two optional parameters that define the behavior after a data modifying query + * is executed: + * + * count= Queries to route to master after data modification. + * time=
//; + s/