diff --git a/.gitignore b/.gitignore index 6e659cc6b..c761d00c4 100644 --- a/.gitignore +++ b/.gitignore @@ -30,6 +30,7 @@ depend.mk *~ *# .#* +._* # Vi swap files .*.swp diff --git a/CMakeLists.txt b/CMakeLists.txt index b3f7f9571..d50b5093d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,5 +1,5 @@ cmake_minimum_required(VERSION 2.6) - +message(STATUS "CMake version: ${CMAKE_VERSION}") include(macros.cmake) enable_testing() @@ -8,25 +8,43 @@ set_maxscale_version() set(CMAKE_INSTALL_PREFIX "${INSTALL_DIR}" CACHE INTERNAL "Prefix prepended to install directories." FORCE) + +set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/") + project(MaxScale) check_deps() check_dirs() +find_package(Valgrind) +find_package(MySQLClient) set(CMAKE_INSTALL_RPATH ${CMAKE_INSTALL_RPATH}:${CMAKE_INSTALL_PREFIX}/lib:${CMAKE_INSTALL_PREFIX}/modules) -configure_file(${CMAKE_SOURCE_DIR}/server/include/version.h.in ${CMAKE_SOURCE_DIR}/server/include/version.h) -configure_file(${CMAKE_SOURCE_DIR}/maxscale.conf.in ${CMAKE_SOURCE_DIR}/maxscale.conf.prep @ONLY) -configure_file(${CMAKE_SOURCE_DIR}/etc/init.d/maxscale.in ${CMAKE_SOURCE_DIR}/etc/init.d/maxscale.prep @ONLY) -configure_file(${CMAKE_SOURCE_DIR}/etc/ubuntu/init.d/maxscale.in ${CMAKE_SOURCE_DIR}/etc/ubuntu/init.d/maxscale.prep @ONLY) +file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/server/include) +configure_file(${CMAKE_SOURCE_DIR}/server/include/version.h.in ${CMAKE_BINARY_DIR}/server/include/version.h) +configure_file(${CMAKE_SOURCE_DIR}/maxscale.conf.in ${CMAKE_BINARY_DIR}/maxscale.conf.prep @ONLY) +configure_file(${CMAKE_SOURCE_DIR}/etc/init.d/maxscale.in ${CMAKE_BINARY_DIR}/etc/init.d/maxscale.prep @ONLY) +configure_file(${CMAKE_SOURCE_DIR}/etc/ubuntu/init.d/maxscale.in ${CMAKE_BINARY_DIR}/etc/ubuntu/init.d/maxscale.prep @ONLY) +configure_file(${CMAKE_SOURCE_DIR}/server/test/maxscale_test.h.in ${CMAKE_BINARY_DIR}/server/include/maxscale_test.h) set(CMAKE_C_FLAGS "-Wall -fPIC") set(CMAKE_CXX_FLAGS "-Wall -fPIC") +set(DEBUG_FLAGS "-ggdb -pthread -pipe -Wformat -fstack-protector --param=ssp-buffer-size=4") -if(BUILD_TYPE MATCHES Debug) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -ggdb -pthread -pipe -DSS_DEBUG -Wformat -Werror=format-security -fstack-protector --param=ssp-buffer-size=4") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -ggdb -pthread -pipe -DSS_DEBUG -Wformat -Werror=format-security -fstack-protector --param=ssp-buffer-size=4") +if((CMAKE_C_COMPILER_ID STREQUAL "GNU") AND (NOT (CMAKE_C_COMPILER_VERSION VERSION_LESS 4.2))) + message(STATUS "C Compiler supports: -Werror=format-security") + set(DEBUG_FLAGS "${DEBUG_FLAGS} -Werror=format-security") +endif() + + +if(BUILD_TYPE STREQUAL Debug) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${DEBUG_FLAGS} -DSS_DEBUG") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${DEBUG_FLAGS} -DSS_DEBUG") + message(STATUS "Generating debugging symbols and enabling debugging code") +elseif(BUILD_TYPE STREQUAL DebugSymbols) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${DEBUG_FLAGS}") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${DEBUG_FLAGS}") message(STATUS "Generating debugging symbols") elseif(BUILD_TYPE MATCHES Optimized) if(NOT (DEFINED OLEVEL)) @@ -54,6 +72,10 @@ if(GCOV) set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -lgcov") endif() +if(FAKE_CODE) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DFAKE_CODE") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DFAKE_CODE") +endif() subdirs(MYSQL_DIR_ALL ${MYSQL_DIR}) foreach(DIR ${MYSQL_DIR_ALL}) @@ -67,22 +89,26 @@ include_directories(query_classifier) include_directories(server/include) include_directories(server/inih) include_directories(server/modules/include) +include_directories(${CMAKE_BINARY_DIR}/server/include) add_subdirectory(utils) add_subdirectory(log_manager) add_subdirectory(query_classifier) add_subdirectory(server) add_subdirectory(client) - +if(BUILD_RABBITMQ) + find_package(RabbitMQ) + add_subdirectory(rabbitmq_consumer) +endif() # Install startup scripts and ldconfig files if( NOT ( (DEFINED INSTALL_SYSTEM_FILES) AND ( NOT ( INSTALL_SYSTEM_FILES ) ) ) ) - install(FILES maxscale.conf.prep RENAME maxscale.conf DESTINATION /etc/ld.so.conf.d/ PERMISSIONS WORLD_EXECUTE WORLD_READ) + install(FILES ${CMAKE_BINARY_DIR}/maxscale.conf.prep RENAME maxscale.conf DESTINATION /etc/ld.so.conf.d/ PERMISSIONS WORLD_EXECUTE WORLD_READ) if(DEB_BASED) - install(FILES etc/ubuntu/init.d/maxscale.prep RENAME maxscale DESTINATION /etc/init.d/ PERMISSIONS WORLD_EXECUTE) + install(FILES ${CMAKE_BINARY_DIR}/etc/ubuntu/init.d/maxscale.prep RENAME maxscale DESTINATION /etc/init.d/ PERMISSIONS WORLD_EXECUTE) else() - install(FILES etc/init.d/maxscale.prep RENAME maxscale DESTINATION /etc/init.d/ PERMISSIONS WORLD_EXECUTE) + install(FILES ${CMAKE_BINARY_DIR}/etc/init.d/maxscale.prep RENAME maxscale DESTINATION /etc/init.d/ PERMISSIONS WORLD_EXECUTE) endif() message(STATUS "Installing maxscale.conf to: /etc/ld.so.conf.d") message(STATUS "Installing startup scripts to: /etc/init.d") @@ -94,48 +120,102 @@ message(STATUS "Installing MaxScale to: ${CMAKE_INSTALL_PREFIX}/") install(FILES server/MaxScale_template.cnf DESTINATION etc) install(FILES ${ERRMSG} DESTINATION mysql) install(FILES ${DOCS} DESTINATION Documentation) +install(FILES ${CMAKE_SOURCE_DIR}/COPYRIGHT DESTINATION ${CMAKE_INSTALL_PREFIX}/) +install(FILES ${CMAKE_SOURCE_DIR}/README DESTINATION ${CMAKE_INSTALL_PREFIX}/) +install(FILES ${CMAKE_SOURCE_DIR}/LICENSE DESTINATION ${CMAKE_INSTALL_PREFIX}/) +install(FILES ${CMAKE_SOURCE_DIR}/SETUP DESTINATION ${CMAKE_INSTALL_PREFIX}/) +install(DIRECTORY DESTINATION log) -# See if we are on a RPM-capable or DEB-capable system -find_program(RPMBUILD rpmbuild) -find_program(DEBBUILD dpkg-buildpackage) +if(${CMAKE_VERSION} VERSION_LESS 2.8.12) + message(WARNING "CMake version is ${CMAKE_VERSION}. Building of packages requires version 2.8.12 or greater.") +else() + # See if we are on a RPM-capable or DEB-capable system + find_program(RPMBUILD rpmbuild) + find_program(DEBBUILD dpkg-buildpackage) + set(CPACK_GENERATOR "TGZ") + if(NOT ( ${RPMBUILD} STREQUAL "RPMBUILD-NOTFOUND" ) ) + message(STATUS "Generating RPM packages") + set(CPACK_GENERATOR "${CPACK_GENERATOR};RPM") + endif() -if(NOT ( ${RPMBUILD} STREQUAL "RPMBUILD-NOTFOUND" ) ) - message(STATUS "Generating RPM packages") - set(CPACK_GENERATOR "${CPACK_GENERATOR};RPM") + if(NOT ( ${DEBBUILD} STREQUAL "DEBBUILD-NOTFOUND" ) ) + set(CPACK_GENERATOR "${CPACK_GENERATOR};DEB") + execute_process(COMMAND dpgk --print-architecture OUTPUT_VARIABLE DEB_ARCHITECTURE) + set(CPACK_DEBIAN_PACKAGE_ARCHITECTURE ${DEB_ARCHITECTURE}) + set (CPACK_DEBIAN_PACKAGE_SHLIBDEPS ON) + message(STATUS "Generating DEB packages for ${DEB_ARCHITECTURE}") + endif() + + set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "MaxScale") + set(CPACK_PACKAGE_VERSION_MAJOR "${MAXSCALE_VERSION_MAJOR}") + set(CPACK_PACKAGE_VERSION_MINOR "${MAXSCALE_VERSION_MINOR}") + set(CPACK_PACKAGE_VERSION_PATCH "${MAXSCALE_VERSION_PATCH}") + set(CPACK_PACKAGE_CONTACT "MariaDB Corporation Ab") + set(CPACK_PACKAGE_FILE_NAME "maxscale-${MAXSCALE_VERSION}") + set(CPACK_PACKAGE_NAME "maxscale") + set(CPACK_PACKAGE_VENDOR "MariaDB Corporation Ab") + set(CPACK_PACKAGE_DESCRIPTION_FILE ${CMAKE_SOURCE_DIR}/README) + set(CPACK_PACKAGING_INSTALL_PREFIX "${CMAKE_INSTALL_PREFIX}") + set(CPACK_RPM_SPEC_INSTALL_POST "/sbin/ldconfig") + set(CPACK_RPM_PACKAGE_NAME "maxscale") + set(CPACK_RPM_PACKAGE_VENDOR "MariaDB Corporation Ab") + set(CPACK_RPM_PACKAGE_LICENSE "GPLv2") + set(CPACK_RPM_EXCLUDE_FROM_AUTO_FILELIST_ADDITION "/etc /etc/ld.so.conf.d /etc/init.d /etc/rc.d/init.d") + set(CPACK_RPM_SPEC_MORE_DEFINE "%define ignore \#") + set(CPACK_RPM_USER_FILELIST "%ignore /etc/init.d") + set(CPACK_RPM_USER_FILELIST "%ignore /etc/ld.so.conf.d") + set(CPACK_RPM_USER_FILELIST "%ignore /etc") + include(CPack) endif() -if(NOT ( ${DEBBUILD} STREQUAL "DEBBUILD-NOTFOUND" ) ) - set(CPACK_GENERATOR "${CPACK_GENERATOR};DEB") - execute_process(COMMAND dpgk --print-architecture OUTPUT_VARIABLE DEB_ARCHITECTURE) - set(CPACK_DEBIAN_PACKAGE_ARCHITECTURE ${DEB_ARCHITECTURE}) - message(STATUS "Generating DEB packages for ${DEB_ARCHITECTURE}") -endif() -set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "MaxScale") -set(CPACK_PACKAGE_VERSION_MAJOR "${MAXSCALE_VERSION_MAJOR}") -set(CPACK_PACKAGE_VERSION_MINOR "${MAXSCALE_VERSION_MINOR}") -set(CPACK_PACKAGE_VERSION_PATCH "${MAXSCALE_VERSION_PATCH}") -set(CPACK_PACKAGE_CONTACT "SkySQL Ab") -set(CPACK_PACKAGE_FILE_NAME "maxscale-${MAXSCALE_VERSION}") -set(CPACK_PACKAGE_NAME "maxscale") -set(CPACK_PACKAGE_VENDOR "SkySQL Ab") -set(CPACK_PACKAGE_DESCRIPTION_FILE ${CMAKE_SOURCE_DIR}/README) -set(CPACK_PACKAGING_INSTALL_PREFIX "${CMAKE_INSTALL_PREFIX}") -set(CPACK_RPM_SPEC_INSTALL_POST "/sbin/ldconfig") -set(CPACK_RPM_PACKAGE_NAME "maxscale") -set(CPACK_RPM_PACKAGE_VENDOR "SkySQL Ab") -set(CPACK_RPM_PACKAGE_LICENSE "GPLv2") -set(CPACK_RPM_PACKAGE_AUTOREQPROV " no") -set(CPACK_RPM_EXCLUDE_FROM_AUTO_FILELIST_ADDITION "/etc /etc/ld.so.conf.d /etc/init.d /etc/rc.d/init.d") -set(CPACK_RPM_SPEC_MORE_DEFINE "%define ignore \#") -set(CPACK_RPM_USER_FILELIST "%ignore /etc/init.d") -set(CPACK_RPM_USER_FILELIST "%ignore /etc/ld.so.conf.d") -set(CPACK_RPM_USER_FILELIST "%ignore /etc") -include(CPack) +add_custom_target(buildtests + COMMAND ${CMAKE_COMMAND} -DDEPS_OK=Y -DBUILD_TESTS=Y -DBUILD_TYPE=Debug -DINSTALL_DIR=${CMAKE_BINARY_DIR} -DINSTALL_SYSTEM_FILES=N ${CMAKE_SOURCE_DIR} + COMMAND make + COMMENT "Building test suite..." VERBATIM +) + add_custom_target(testall COMMAND ${CMAKE_COMMAND} -DDEPS_OK=Y -DBUILD_TESTS=Y -DBUILD_TYPE=Debug -DINSTALL_DIR=${CMAKE_BINARY_DIR} -DINSTALL_SYSTEM_FILES=N ${CMAKE_SOURCE_DIR} COMMAND make install + COMMAND ${CMAKE_COMMAND} -E copy_if_different ${CMAKE_SOURCE_DIR}/server/test/MaxScale_test.cnf ${CMAKE_BINARY_DIR}/etc/MaxScale.cnf COMMAND /bin/sh -c "${CMAKE_BINARY_DIR}/bin/maxscale -c ${CMAKE_BINARY_DIR} &>/dev/null" - COMMAND make test - COMMAND /bin/sh -c "killall -KILL maxscale" - COMMENT "Running full test suite") \ No newline at end of file + COMMAND /bin/sh -c "make test || echo \"Test results written to: ${CMAKE_BINARY_DIR}/Testing/Temporary/\"" + COMMAND killall maxscale + COMMENT "Running full test suite..." VERBATIM) + +# uninstall target +# see http://www.cmake.org/Wiki/CMake_FAQ#Can_I_do_.22make_uninstall.22_with_CMake.3F +configure_file( + "${CMAKE_CURRENT_SOURCE_DIR}/cmake_uninstall.cmake.in" + "${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake" + IMMEDIATE @ONLY) + +add_custom_target(uninstall + COMMAND ${CMAKE_COMMAND} -P ${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake) + +find_package(Doxygen) + +if(DOXYGEN_FOUND) + configure_file( + "${CMAKE_CURRENT_SOURCE_DIR}/doxygate.in" + "${CMAKE_CURRENT_BINARY_DIR}/doxygate" + IMMEDIATE @ONLY) + + add_custom_target(documentation + COMMAND ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/doxygate) + +endif() + +# Testall target with Valgrind +if(VALGRIND_FOUND) +add_custom_target(testall-valgrind + COMMAND ${CMAKE_COMMAND} -DDEPS_OK=Y -DBUILD_TESTS=Y -DBUILD_TYPE=Debug -DINSTALL_DIR=${CMAKE_BINARY_DIR} -DINSTALL_SYSTEM_FILES=N ${CMAKE_SOURCE_DIR} + COMMAND make install + COMMAND ${CMAKE_COMMAND} -E copy_if_different ${CMAKE_SOURCE_DIR}/server/test/MaxScale_test.cnf ${CMAKE_BINARY_DIR}/etc/MaxScale.cnf + COMMAND /bin/sh -c "valgrind --track-fds=yes --leak-check=full --show-leak-kinds=all --log-file=${CMAKE_BINARY_DIR}/valgrind.log ${CMAKE_BINARY_DIR}/bin/maxscale -c ${CMAKE_BINARY_DIR} &>/dev/null" + COMMAND /bin/sh -c "make test || echo \"Test results written to: ${CMAKE_BINARY_DIR}/Testing/Temporary/\"" + COMMAND killall maxscale + COMMENT "Running full test suite with Valgrind..." VERBATIM) + +endif() diff --git a/COPYRIGHT b/COPYRIGHT index 47d6ca846..83c3e4f14 100644 --- a/COPYRIGHT +++ b/COPYRIGHT @@ -1,4 +1,4 @@ -This source code is distributed as part of SkySQL MaxScale. It is free +This source code is distributed as part of MariaDB Corporation MaxScale. It is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, version 2. @@ -12,9 +12,9 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -Copyright SkySQL Ab 2013 +Copyright MariaDB Corporation Ab 2013 -SkySQL Corporation Ab +MariaDB Corporation Corporation Ab Tekniikantie 12 02150 Espoo Finland diff --git a/Documentation/MariaDB MaxScale Administration Tutorial.pdf b/Documentation/MariaDB MaxScale Administration Tutorial.pdf new file mode 100755 index 000000000..187ac0950 Binary files /dev/null and b/Documentation/MariaDB MaxScale Administration Tutorial.pdf differ diff --git a/Documentation/MariaDB MaxScale 1.0.4 Release Notes.pdf b/Documentation/MariaDB MaxScale 1.0.4 Release Notes.pdf new file mode 100644 index 000000000..f8c47d5c0 Binary files /dev/null and b/Documentation/MariaDB MaxScale 1.0.4 Release Notes.pdf differ diff --git a/Documentation/MariaDB MaxScale Configuration Guide.pdf b/Documentation/MariaDB MaxScale Configuration Guide.pdf new file mode 100755 index 000000000..8924c85e7 Binary files /dev/null and b/Documentation/MariaDB MaxScale Configuration Guide.pdf differ diff --git a/Documentation/MariaDB MaxScale Debug-And-Diagnostic-Support.pdf b/Documentation/MariaDB MaxScale Debug-And-Diagnostic-Support.pdf new file mode 100755 index 000000000..b4313e2ab Binary files /dev/null and b/Documentation/MariaDB MaxScale Debug-And-Diagnostic-Support.pdf differ diff --git a/Documentation/MariaDB MaxScale Filter Tutorial.pdf b/Documentation/MariaDB MaxScale Filter Tutorial.pdf new file mode 100755 index 000000000..7a3337af6 Binary files /dev/null and b/Documentation/MariaDB MaxScale Filter Tutorial.pdf differ diff --git a/Documentation/MariaDB MaxScale Galera Cluster Connection Routing Tutorial.pdf b/Documentation/MariaDB MaxScale Galera Cluster Connection Routing Tutorial.pdf new file mode 100755 index 000000000..e6a684054 Binary files /dev/null and b/Documentation/MariaDB MaxScale Galera Cluster Connection Routing Tutorial.pdf differ diff --git a/Documentation/MariaDB MaxScale Galera Cluster Read-Write Splitting Tutorial.pdf b/Documentation/MariaDB MaxScale Galera Cluster Read-Write Splitting Tutorial.pdf new file mode 100755 index 000000000..7ca94cf87 Binary files /dev/null and b/Documentation/MariaDB MaxScale Galera Cluster Read-Write Splitting Tutorial.pdf differ diff --git a/Documentation/MariaDB MaxScale HA with Corosync-Pacemaker.pdf b/Documentation/MariaDB MaxScale HA with Corosync-Pacemaker.pdf new file mode 100755 index 000000000..fe07170c2 Binary files /dev/null and b/Documentation/MariaDB MaxScale HA with Corosync-Pacemaker.pdf differ diff --git a/Documentation/MariaDB MaxScale Limitations.pdf b/Documentation/MariaDB MaxScale Limitations.pdf new file mode 100755 index 000000000..46762a18a Binary files /dev/null and b/Documentation/MariaDB MaxScale Limitations.pdf differ diff --git a/Documentation/MariaDB MaxScale MaxAdmin.pdf b/Documentation/MariaDB MaxScale MaxAdmin.pdf new file mode 100755 index 000000000..a22f785a3 Binary files /dev/null and b/Documentation/MariaDB MaxScale MaxAdmin.pdf differ diff --git a/Documentation/MariaDB MaxScale MySQL Replication Connection Routing Tutorial.pdf b/Documentation/MariaDB MaxScale MySQL Replication Connection Routing Tutorial.pdf new file mode 100755 index 000000000..e2e9bc7e1 Binary files /dev/null and b/Documentation/MariaDB MaxScale MySQL Replication Connection Routing Tutorial.pdf differ diff --git a/Documentation/MariaDB MaxScale MySQL Replication Read-Write Splitting Tutorial.pdf b/Documentation/MariaDB MaxScale MySQL Replication Read-Write Splitting Tutorial.pdf new file mode 100755 index 000000000..c2eb92106 Binary files /dev/null and b/Documentation/MariaDB MaxScale MySQL Replication Read-Write Splitting Tutorial.pdf differ diff --git a/Documentation/MariaDB MaxScale_ Getting Started With MaxScale.pdf b/Documentation/MariaDB MaxScale_ Getting Started With MaxScale.pdf new file mode 100755 index 000000000..3d66adabb Binary files /dev/null and b/Documentation/MariaDB MaxScale_ Getting Started With MaxScale.pdf differ diff --git a/Documentation/MaxAdmin The MaxScale Administration And Monitoring Client.pdf b/Documentation/MaxAdmin The MaxScale Administration And Monitoring Client.pdf deleted file mode 100644 index 9ccd75b42..000000000 Binary files a/Documentation/MaxAdmin The MaxScale Administration And Monitoring Client.pdf and /dev/null differ diff --git a/Documentation/MaxScale Configuration And Usage Scenarios.pdf b/Documentation/MaxScale Configuration And Usage Scenarios.pdf deleted file mode 100644 index b297e427c..000000000 Binary files a/Documentation/MaxScale Configuration And Usage Scenarios.pdf and /dev/null differ diff --git a/Documentation/MaxScale Debug And Diagnostic Support.pdf b/Documentation/MaxScale Debug And Diagnostic Support.pdf deleted file mode 100644 index 746a78583..000000000 Binary files a/Documentation/MaxScale Debug And Diagnostic Support.pdf and /dev/null differ diff --git a/Documentation/MaxScale HA with Corosync and Pacemaker.pdf b/Documentation/MaxScale HA with Corosync and Pacemaker.pdf deleted file mode 100644 index aa0daac7c..000000000 Binary files a/Documentation/MaxScale HA with Corosync and Pacemaker.pdf and /dev/null differ diff --git a/Documentation/MaxScale MySQL Cluster setup.pdf b/Documentation/MaxScale MySQL Cluster setup.pdf deleted file mode 100644 index 5d4490fd6..000000000 Binary files a/Documentation/MaxScale MySQL Cluster setup.pdf and /dev/null differ diff --git a/Documentation/RabbitMQ Setup And MaxScale Integration.pdf b/Documentation/RabbitMQ Setup And MaxScale Integration.pdf deleted file mode 100644 index e9c728087..000000000 Binary files a/Documentation/RabbitMQ Setup And MaxScale Integration.pdf and /dev/null differ diff --git a/Documentation/experimental/ConfigurationGuide.asciidoc b/Documentation/experimental/ConfigurationGuide.asciidoc new file mode 100644 index 000000000..2857606b1 --- /dev/null +++ b/Documentation/experimental/ConfigurationGuide.asciidoc @@ -0,0 +1,999 @@ + + +MaxScale + +Configuration & Usage Scenarios + + +Mark Riddoch + +Last Updated: 2nd July 2014 + + +== Contents + +Contents +Document History +Introduction +Terms +Configuration +Global Settings +Threads +Service +Router +Filters +Servers +User +Passwd +weightby +Server +Address +Port +Protocol +Monitoruser +MonitorPw +Listener +Service +Protocol +Address +Port +Filter +Module +Options +Other Parameters +Monitor +Module +Servers +User +Passwd +Protocol Modules +MySQLClient +MySQLBackend +Telnetd +maxscaled +HTTPD +Router Modules +Connection Based Routing +Statement Based Routing +Available Routing Modules +Readconnroute +Master/Slave Replication Setup +Galera Cluster Configuration +Readwritesplit +Master/Slave Replication Setup +Debugcli +Debug CLI Configuration +CLI +CLI Configuration +Monitor Modules +Mysqlmon +Galeramon +Filter Modules +Statement Counting Filter +Query Log All Filter +Regular Expression Filter +Tee Filter +Encrypting Passwords +Creating Encrypted Passwords +Configuration Updates +Limitations +Authentication +Wildcard Hosts +Limitations +Error Reporting + + +== +== +== Document History + +|=== +|*Date*|*Change*|*Who* + +|21st July 2013|Initial version|Mark Riddoch +|23rd July 2013|Addition of default user and password for a monitor and discussion of monitor user requirements|Mark Riddoch +|13th November 2013|state for Galera Monitor is “synced”|Massimiliano Pinto +|2nd December 2013|Updated the description of the command line arguments to match the code updates.|Mark Riddoch +|6th February 2014|Added “enable_root_user” as a service parameter|Massimiliano Pinto +|7th February 2014|Addition of bind address information|Mark Riddoch +|3rd March 2014|MySQL authentication with hostnames|Massimiliano Pinto +|3rd March 2014|Addition of section that describes authentication requirements and the rules for creating user credentials|Mark Riddoch +|28th March 2014|Unix socket support|Massimiliano Pinto +|8th May 2014|Added “version_string” parameter in service|Massimiliano Pinto +|29th May 2014|Added troubleshooting section|Massimiliano Pinto +|2nd June 2014|Correction of some typos, clarification of the meaning of session modification statements and the default user for the CLI.|Mark Riddoch +|4th June 2014|Addition of “monitor_interval” for monitors|Massimiliano Pinto +|6th June 2014|Addition of filters sections|Mark Riddoch +|27th June 2014|Addition of server weighting, the configuration for the maxadmin client|Mark Riddoch +|2nd July 2014|Addition of new readwritesplit router options with description and examples.|Vilho Raatikka +|=== +== +== Introduction + +The purpose of this document is to describe how to configure MaxScale and to discuss some possible usage scenarios for MaxScale. MaxScale is designed with flexibility in mind, and consists of an event processing core with various support functions and plugin modules that tailor the behaviour of the MaxScale itself. + +=== Terms + +|=== +|*Term*|*Description* + +|service|A service represents a set of databases with a specific access mechanism that is offered to clients of MaxScale. The access mechanism defines the algorithm that MaxScale will use to direct particular requests to the individual databases. +|server|A server represents an individual database server to which a client can be connected via MaxScale. +|router|A router is a module within MaxScale that will route client requests to the various database servers which MaxScale provides a service interface to. +|connection routing|Connection routing is a method of handling requests in which MaxScale will accept connections from a client and route data on that connection to a single database using a single connection. Connection based routing will not examine individual quests on a connection and it will not move that connection once it is established. +|statement routing|Statement routing is a method of handling requests in which each request within a connection will be handled individually. Requests may be sent to one or more servers and connections may be dynamically added or removed from the session. +|protocol|A protocol is a module of software that is used to communicate with another software entity within the system. MaxScale supports the dynamic loading of protocol modules to allow for increased flexibility. +|module|A module is a separate code entity that may be loaded dynamically into MaxScale to increase the available functionality. Modules are implemented as run-time loadable shared objects. +|monitor|A monitor is a module that can be executed within MaxScale to monitor the state of a set of database. The use of an internal monitor is optional, monitoring may be performed externally to MaxScale. +|listener|A listener is the network endpoint that is used to listen for connections to MaxScale from the client applications. A listener is associated to a single service, however a service may have many listeners. +|connection failover|When a connection currently being used between MaxScale and the database server fails a replacement will be automatically created to another server by MaxScale without client intervention +|backend database|A term used to refer to a database that sits behind MaxScale and is accessed by applications via MaxScale. +|filter|A module that can be placed between the client and the MaxScale router module. All client data passes through the filter module and may be examined or modified by the filter modules. +|=== + + +== +== +== Configuration + +The MaxScale configuration is read from a file which can be located in a number of placing, MaxScale will search for the configuration file in a number of locations. + + . If the environment variable MAXSCALE_HOME is set then MaxScale will look for a configuration file called MaxScale.cnf in the directory $MAXSCALE_HOME/etc + . If MAXSCALE_HOME is not set or the configuration file is not in the location above MaxScale will look for a file in /etc/MaxScale.cnf + +Alternatively MaxScale can be started with the -c flag and the path of the MaxScale home directory tree. + +An explicit path to a configuration file can be passed by using the -f option to MaxScale. + +The configuration file itself is based on the “ini” file format and consists of various sections that are used to build the configuration, these sections define services, servers, listeners, monitors and global settings. + +=== Global Settings +The global settings, in a section named [MaxScale], allow various parameters that affect MaxScale as a whole to be tuned. Currently the only setting that is supported is the number of threads to use to handle the network traffic. MaxScale will also accept the section name of [gateway] for global settings. This is for backward compatibility with versions prior to the naming of MaxScale. + +==== Threads +To control the number of threads that poll for network traffic set the parameter threads to a number. It is recommended that you start with a single thread and add more as you find the performance is not satisfactory. MaxScale is implemented to be very thread efficient, so a small number of threads is usually adequate to support reasonably heavy workloads. Adding more threads may not improve performance and can consume resources needlessly. + +---- +# Valid options are: +# threads= +[MaxScale] +threads=1 +---- + +It should be noted that additional threads will be created to execute other internal services within MaxScale, this setting is merely used to configure the number of threads that will be used to manage the user connections. + +=== Service +A service represents the database service that MaxScale offers to the clients. In general a service consists of a set of backend database servers and a routing algorithm that determines how MaxScale decides to send statements or route connections to those backend servers. + +A service may be considered as a virtual database server that MaxScale makes available to its clients. + +Several different services may be defined using the same set of backend servers. For example a connection based routing service might be used by clients that already performed internal read/write splitting, whilst a different statement based router may be used by clients that are not written with this functionality in place. Both sets of applications could access the same data in the same databases. + +A service is identified by a service name, which is the name of the configuration file section and a type parameter of service + +---- +[Test Service] +type=service +---- + +In order for MaxScale to forward any requests it must have at least one service defined within the configuration file. The definition of a service alone is not enough to allow MaxScale to forward requests however, the service is merely present to link together the other configuration elements. + +==== Router +The router parameter of a service defines the name of the router module that will be used to implement the routing algorithm between the client of MaxScale and the backend databases. Additionally routers may also be passed a comma separated list of options that are used to control the behaviour of the routing algorithm. The two parameters that control the routing choice are router and router_options. The router options are specific to a particular router and are used to modify the behaviour of the router. The read connection router can be passed options of master, slave or synced, an example of configuring a service to use this router and limiting the choice of servers to those in slave state would be as follows. + +---- +router=readconnroute +router_options=slave +---- + +To change the router to connect on to servers in the master state as well as slave servers, the router options can be modified to include the master state. + +---- +router=readconnroute +router_options=master,slave +---- + +A more complete description of router options and what is available for a given router is included with the documentation of the router itself. + +==== Filters +The filters option allow a set of filters to be defined for a service; requests from the client are passed through these filters before being sent to the router for dispatch to the backend server. The filters parameter takes one or more filter names, as defined within the filter definition section of the configuration file. Multiple filters are separated using the | character. + ++filters=counter | QLA+ + +The requests pass through the filters from left to right in the order defined in the configuration parameter. + +==== Servers +The servers parameter in a service definition provides a comma separated list of the backend servers that comprise the service. The server names are those used in the name section of a block with a type parameter of server (see below). + ++servers=server1,server2,server3+ + +==== User +The user parameter, along with the passwd parameter are used to define the credentials used to connect to the backend servers to extract the list of database users from the backend database that is used for the client authentication. + +---- +user=maxscale +passwd=Mhu87p2D +---- + +Authentication of incoming connections is performed by MaxScale itself rather than by the database server to which the client is connected. The client will authenticate itself with MaxScale, using the username, hostname and password information that MaxScale has extracted from the backend database servers. For a detailed discussion of how this impacts the authentication process please see the “Authentication” section below. + +The host matching criteria is restricted to IPv4, IPv6 will be added in a future release. + +Existing user configuration in the backend databases must be checked and may be updated before successful MaxScale authentication: + + +In order for MaxScale to obtain all the data it must be given a username it can use to connect to the database and retrieve that data. This is the parameter that gives MaxScale the username to use for this purpose. + +The account used must be able to select from the mysql.user table, the following is an example showing how to create this user. + +---- +MariaDB [mysql]> create user 'maxscale'@'maxscalehost' identified by 'Mhu87p2D'; +Query OK, 0 rows affected (0.01 sec) + +MariaDB [mysql]> grant SELECT on mysql.user to 'maxscale'@'maxscalehost'; +---- +Query OK, 0 rows affected (0.00 sec) + +==== Passwd +The auth parameter provides the password information for the above user and may be either a plain text password or it may be an encrypted password. See the section on encrypting passwords for use in the MaxScale.cnf file. This user must be capable of connecting to the backend database and executing the SQL statement “SELECT user, host, password FROM mysql.user”. + +*enable_root_user* + +This parameter controls the ability of the root user to connect to MaxScale and hence onwards to the backend servers via MaxScale. + +The default value is 0, disabling the ability of the root user to connect to MaxScale. + +Example for enabling root user: + +enable_root_user=1 + +Values of “on” or “true” may also be given to enable the root user and “off” or “false” may be given to disable the use of the root user. + ++enable_root_user=true+ + +*version_string* + +This parameter sets a custom version string that is sent in the MySQL Handshake from MaxScale to clients. + +Example: + +version_string=5.5.37-MariaDB-RWsplit + +If not set, the default value is the server version of the embedded MySQL/MariaDB library. Example: 5.5.35-MariaDB + +==== weightby +The weightby parameter is used in conjunction with server parameters in order to control the load balancing applied in the router in use by the service. This allows varying weights to be applied to each server to create a non-uniform distribution of the load amongst the servers. + +An example of this might be to define a parameter for each server that represents the amount of resource available on the server, we could call this serversize. Every server should then have a serversize parameter set for the server. + ++serversize=10+ + +The service would then have the parameter weightby set. If there are 4 servers defined in the service, serverA, serverB, serverC and serverD, with the serversize set as shown in the table below, the connections would balanced using the percentages in this table. + +|=== +|Server|serversize|% connections + +|serverA|10|18% +|serverB|15|27% +|serverC|10|18% +|serverD|20|36% +|=== + +=== Server + +Server sections are used to define the backend database servers that can be formed into a service. A server may be a member of one or more services within MaxScale. Servers are identified by a server name which is the section name in the configuration file. Servers have a type parameter of server, plus address port and protocol parameters. + +---- +[server1] +type=server +address=127.0.0.1 +port=3000 +protocol=MySQLBackend +---- +==== Address +The IP address or hostname of the machine running the database server that is being defined. MaxScale will use this address to connect to the backend database server. + +==== Port +The port on which the database listens for incoming connections. MaxScale will use this port to connect to the database server. + +==== Protocol +The name for the protocol module to use to connect MaxScale to the database. Currently only one backend protocol is supported, the MySQLBackend module. + +==== Monitoruser +The monitor has a username and password that is used to connect to all servers for monitoring purposes, this may be overridden by supplying a monitoruser statement for each individual server + ++monitoruser=mymonitoruser+ + +==== MonitorPw +The monitor has a username and password that is used to connect to all servers for monitoring purposes, this may be overridden by supplying a monpasswd statement for the individual servers + +---- +monitorpw=mymonitorpasswd + +---- +The monpasswd parameter may be either a plain text password or it may be an encrypted password. See the section on encrypting passwords for use in the MaxScale.cnf file. + +=== Listener + +The listener defines a port and protocol pair that is used to listen for connections to a service. A service may have multiple listeners associated with it, either to support multiple protocols or multiple ports. As with other elements of the configuration the section name is the listener name and a type parameter is used to identify the section as a listener definition. + +---- +[Test Listener] +type=listener +service=Test Service +protocol=MySQLClient +address=localhost +port=4008 +socket=/tmp/testlistener.sock +---- +==== Service +The service to which the listener is associated. This is the name of a service that is defined elsewhere in the configuration file. + +==== Protocol +The name of the protocol module that is used for the communication between the client and MaxScale itself. + +==== Address +The address option sets the address that will be used to bind the listening socket. The address may be specified as an IP address in ‘dot notation’ or as a hostname. If the address option is not included in the listener definition the listener will bind to all network interfaces. + +==== Port +The port to use to listen for incoming connections to MaxScale from the clients. If the port is omitted from the configuration a default port for the protocol will be used. + +*Socket* + +The socket option may be included in a listener definition, this configures the listener to use Unix domain sockets to listen for incoming connections. The parameter value given is the name of the socket to use. + +If a socket option and an address option is given then the listener will listen on both the specific IP address and the Unix socket. + +=== Filter +Filters provide a means to manipulate or process requests as they pass through MaxScale between the client side protocol and the query router. A filter should be defined in a section with a type of filter. + +---- +[QLA] +type=filter +module=qlafilter +options=/tmp/QueryLog +---- + +The section name may then be used in one or more services by using the filters= parameter in the service section. In order to use the above filter for a service called “QLA Service”, an entry of the following form would exist for that service. + +---- +[QLA Service] +type=service +router=readconnroute +router_options=slave +servers=server1,server2,server3,server4 +user=massi +passwd=6628C50E07CCE1F0392EDEEB9D1203F3 +filters=QLA +---- + +See the Services section for more details on how to configure the various options of a service. + +==== Module +The module parameter defines the name of the loadable module that implements the filter. + +==== Options +The options parameter is used to pass options to the filter to control the actions the filter will perform. The values that can be passed differ between filter implementation, the inclusion of an options parameter is optional. + +==== Other Parameters +Any other parameters present in the filters section will be passed to the filter to be interpreted by the filter. An example of this is the regexfilter that requires the two parameters match and replace + +---- +[regex] +type=filter +module=regexfilter +match=form +replace=from +---- + +=== Monitor + +In order for the various router modules to function correctly they require information about the state of the servers that are part of the service they provide. MaxScale has the ability to internally monitor the state of the back-end database servers or that state may be feed into MaxScale from external monitoring systems. If automated monitoring and failover of services is required this is achieved by running a monitor module that is designed for the particular database architecture that is in use. + +Monitors are defined in much the same way as other elements in the configuration file, with the section name being the name of the monitor instance and the type being set to monitor. + +---- +[MySQL Monitor] +type=monitor +module=mysqlmon +servers=server1,server2,server3 +user=dbmonitoruser +passwd=dbmonitorpwd +monitor_interval=8000 + +---- +==== Module +The module parameter defines the name of the loadable module that implements the monitor. This module is loaded and executed on a separate thread within MaxScale. + +==== Servers +The servers parameter is a comma separated list of server names to monitor, these are the names defined elsewhere in the configuration file. The set of servers monitored by a single monitor need not be the same as the set of servers used within any particular server, a single monitor instance may monitor servers in multiple servers. + +==== User +The user parameter defines the username that the monitor will use to connect to the monitored databases. Depending on the monitoring module used this user will require specific privileges in order to determine the state of the nodes, details of those privileges can be found in the sections on each of the monitor modules. + +Individual servers may define override values for the user and password the monitor uses by setting the monuser and monpasswd parameters in the server section. + +==== Passwd +The password parameter may be either a plain text password or it may be an encrypted password. See the section on encrypting passwords for use in the MaxScale.cnf file. + +*Monitor_interval* + +The monitor_interval parameter sets the sampling interval in milliseconds for each monitor, the default value is 10000 milliseconds. +== +== +== Protocol Modules +The protocols supported by MaxScale are implemented as external modules that are loaded dynamically into the MaxScale core. These modules reside in the directory $MAXSCALE_HOME/module, if the environment variable $MAXSCALE_HOME is not set it defaults to /usr/local/skysql/MaxScale. It may also be set by passing the -c option on the MaxScale command line. + +=== MySQLClient + +This is the implementation of the MySQL protocol that is used by clients of MaxScale to connect to MaxScale. + +=== MySQLBackend + +The MySQLBackend protocol module is the implementation of the protocol that MaxScale uses to connect to the backend MySQL, MariaDB and Percona Server databases. This implementation is tailored for the MaxScale to MySQL Database traffic and is not a general purpose implementation of the MySQL protocol. + +=== Telnetd + +The telnetd protocol module is used for connections to MaxScale itself for the purposes of creating interactive user sessions with the MaxScale instance itself. Currently this is used in conjunction with a special router implementation, the debugcli. + +=== maxscaled +The protocol used used by the maxadmin client application in order to connect to MaxScale and access the command line interface. + +=== HTTPD + +This protocol module is currently still under development, it provides a means to create HTTP connections to MaxScale for use by web browsers or RESTful API clients. +== +== +== Router Modules +The main task of MaxScale is to accept database connections from client applications and route the connections or the statements sent over those connections to the various services supported by MaxScale. + +There are two flavours of routing that MaxScale can perform, connection based routing and statement based routine. These each have their own characteristics and costs associated with them. + +=== Connection Based Routing + +Connection based routing is a mechanism by which MaxScale will, for each incoming connection decide on an appropriate outbound server and will forward all statements to that server without examining the internals of the statement. Once an inbound connection is associated to a particular backend database it will remain connected to that server until the connection is closed or the server fails. + +=== Statement Based Routing + +Statement based routing is somewhat different, the routing modules examine every statement the client sends and determines, on a per statement basis, which of the set of backend servers in the service is best to execute the statement. This gives better dynamic balancing of the load within the cluster but comes at a cost. The query router must understand the statement that is being routing and will typically need to parse the statement in order to achieve this. This parsing within the router adds a significant overhead to the cost of routing and makes this type of router only really suitable for loads in which the gains outweigh this added cost. + +=== Available Routing Modules + +Currently a small number of query routers are available, these are in different stages of completion and offer different facilities. + +==== Readconnroute +This is a statement based query router that was originally targeted at environments in which the clients already performed splitting of read and write queries into separate connections. + +Whenever a new connection is received the router will examine the state of all the servers that form part of the service and route the connection to the server with least connections currently that matches the filter constraints given in the router options. This results in a balancing of the active connections, however different connections may have different lifetimes and the connections may become unbalanced when later viewed. + +The readconnroute router can be configured to balance the connections from the clients across all the backend servers that are running, just those backend servers that are currently replication slaves or those that are replication masters when routing to a master slave replication environment. When a Galera cluster environment is in use the servers can be filtered to just the set that are part of the cluster and in the ‘synced’ state. These options are configurable via the router_options that can be set within a service. The router_option strings supported are “master”, “slave” and “synced”. + +===== Master/Slave Replication Setup + +To setup MaxScale to route connections evenly between all the current slave servers in a replication cluster, a service entry of the form shown below is required. + +---- +[Read Service] +type=service +router=readconnroute +router_options=slave +servers=server1,server2,server3,server4 +user=maxscale +auth=thepasswd +---- + +With the addition of a listener for this service, which defines the port and protocol that MaxScale uses + +---- +[Read Listener] +type=listener +service=Read Service +protocol=MySQLClient +port=4006 +---- + +the client can now connect to port 4006 on the host which is running MaxScale. Statements sent using this connection will then be routed to one of the slaves in the server set defined in the Read Service. Exactly which is selected will be determined by balancing the number of connections to each of those whose current state is “slave”. + +Altering the router options to be slave, master would result in the connections being balanced between all the servers within the cluster. + +It is assumed that the client will have a separate connection to the master server, however this can be routed via MaxScale, allowing MaxScale to manage the determination of which server is master. To do this you would add a second service and listener definition for the master server. + +---- +[Write Service] +type=service +router=readconnroute +router_options=master +servers=server1,server2,server3,server4 +user=maxscale +auth=thepasswd + +[Write Listener] +type=listener +service=Write Service +protocol=MySQLClient +port=4007 +---- + +This allows the clients to direct write requests to port 4007 and read requests to port 4006 of the MaxScale host without the clients needing to understand the configuration of the Master/Slave replication cluster. + +Connections to port 4007 would automatically be directed to the server that is the master for replication at the time connection is opened. Whilst this is a simple mapping to a single server it does give the advantage that the clients have no requirement to track which server is currently the master, devolving responsibility for managing the failover to MaxScale. + +In order for MaxScale to be able to determine the state of these servers the mysqlmon monitor module should be run against the set of servers that comprise the service. + +===== Galera Cluster Configuration + +Although not primarily designed for a multi-master replication setup, it is possible to use the readconnroute in this situation. The readconnroute connection router can be used to balance the connections across a Galera cluster. A special monitor is available that detects if nodes are joined to a Galera Cluster, with the addition of a router option to only route connections to nodes marked as synced. MaxScale can ensure that users are never connected to a node that is not a full cluster member. + +---- +[Galera Service] +type=service +router=readconnroute +router_options=synced +servers=server1,server2,server3,server4 +user=maxscale +auth=thepasswd + +[Galera Listener] +type=listener +service=Galera Service +protocol=MySQLClient +port=3336 +---- + +---- +[Galera Monitor] +type=monitor +module=galeramon +servers=server1,server2,server3,server4 +user=galeramon +passwd=galeramon + + +---- +The specialized Galera monitor can also select one of the node in the cluster as master, the others will be marked as slave. + +These roles are only assigned to synced nodes. + +It then possible to have services/listeners with router_options=master or slave accessing a subset of all galera nodes. + +The “synced” simply means: access all nodes. + +Examples: + +---- +[Galera Master Service] +type=service +router=readconnroute +router_options=master + +[Galera Slave Service] +type=service +router=readconnroute +router_options=slave + +---- +The Master and Slave roles are also available for the Read/Write Split router operation + +==== Readwritesplit + +The readwritesplit is a statement based router that has been designed for use within Master/Slave replication environments. It examines every statement, parsing it to determine if the statement falls into one of three categories; + * read only statement + * possible write statement +session modification statement +Each of these three categories has a different action associated with it. Read only statements are sent to a slave server in the replication cluster. Possible write statements, which may include read statements that have an undeterminable side effect, are sent to the current replication master. Statements that modify the session are sent to all the servers, with the result that is generated by the master server being returned to the user. + +Session modification statements must be replicated as they affect the future results of read and write operations, so they must be executed on all servers that could execute statements on behalf of this client. + +Currently the readwritesplit router module is under development and has the following limitations: + * Connection failover support has not yet been implemented. Client connections will fail if the master server fails over. +===== Master/Slave Replication Setup + +To setup the readwritesplit connection router in a master/slave failover environment is extremely simple, a service definition is required with the router defined for the service and an associated listener. + +The router_options parameter is not required but it can be used to specify how slave(s) are selected. Available option is slave_selection_criteria and possible value are LEAST_BEHIND_MASTER and LEAST_CURRENT_OPERATIONS. + +max_slave_connections is a readwritesplit-only option, which sets the upper limit for the number of slaves a router session can use. max_slave_replication_lag is (currently) another readwritesplit-specific option, which sets maximum allowed lag for slave in seconds. The criteria is checked when router chooses slaves and only slaves having smaller lag are eligible for selection. The lag is not checked after connection phase. + +---- +[Split Service] +type=service +router=readwritesplit +router_options=slave_selection_criteria=LEAST_BEHIND_MASTER +max_slave_connections=50% +max_slave_replication_lag=30 +servers=server1,server2,server3,server4 +user=maxscale +auth=thepasswd + +[Split Listener] +type=listener +service=Split Service +protocol=MySQLClient +port=3336 +---- + +The client would merely connect to port 3336 on the MaxScale host and statements would be directed to the master or slave as appropriate. Determination of the master or slave status may be done via a monitor module within MaxScale or externally. In this latter case the server flags would need to be set via the MaxScale debug interface, in future versions an API will be available for this purpose. + ++++Galera Cluster Configuration+++ + +Master and Slave roles that galera monitor assign to nodes make possible the Read Write split approach to Galera Cluster as well. + +Simply configure a Split Service with galera nodes: + +---- +[Galera Split Service] +type=service +router=readwritesplit +---- +servers=galera_node1,galera_node2,galera_node3 + + +==== Debugcli + +The debugcli is a special case of a statement based router. Rather than direct the statements at an external data source they are handled internally. These statements are simple text commands and the results are the output of debug commands within MaxScale. The service and listener definitions for a debug cli service only differ from other services in that they require no backend server definitions. + +===== Debug CLI Configuration + +The definition of the debug cli service is illustrated below + +---- +[Debug Service] +type=service +router=debugcli + +[Debug Listener] +type=listener +service=Debug Service +protocol=telnetd +port=4442 +---- + +Connections using the telnet protocol to port 4442 of the MaxScale host will result in a new debug CLI session. A default username and password are used for this module, new users may be created using the add user command. As soon as any users are explicitly created the default username will no longer continue to work. The default username is admin with a password of skysql. + +The debugcli supports two modes of operation, developer mode and user mode. The mode is set via the router_options parameter of the debugcli. The user mode is more suited to end-users and administrators, whilst the develop mode is explicitly targeted to software developing adding or maintaining the MaxScale code base. Details of the differences between the modes can be found in the debugging guide for MaxScale. The default mode for the debugcli is user mode. The following service definition would enable a developer version of the debugcli. + +---- +[Debug Service] +type=service +router=debugcli +---- +router_options=developer + +It should be noted that both a user and a developer version of the debugcli may be defined within the same instance of MaxScale, however they must be defined as two distinct services, each with a distinct listener. +---- + +[Debug Service] +type=service +router=debugcli +router_options=developer + +[Debug Listener] +type=listener +service=Debug Service +protocol=telnetd +port=4442 + +[Admin Service] +type=service +router=debugcli + +[Admin Listener] +type=listener +service=Debug Service +protocol=telnetd +---- +port=4242 + +==== CLI +The command line interface as used by maxadmin. This is a variant of the debugcli that is built slightly differently so that it may be accessed by the client application maxadmin. The CLI requires the use of the maxscaled protocol. + +===== CLI Configuration +There are two components to the definition required in order to run the command line interface to use with MaxAdmin; a service and a listener. + +The default entries required are shown below. +---- + +[CLI] +type=service +router=cli + +[CLI Listener] +type=listener +service=CLI +protocol=maxscaled +address=localhost +port=6603 +---- + +Note that this uses the default port of 6603 and confines the connections to localhost connections only. Remove the address= entry to allow connections from any machine on your network. Changing the port from 6603 will mean that you must allows pass a -p option to the MaxAdmin command. +== +== +== Monitor Modules +Monitor modules are used by MaxScale to internally monitor the state of the backend databases in order to set the server flags for each of those servers. The router modules then use these flags to determine if the particular server is a suitable destination for routing connections for particular query classifications. The monitors are run within separate threads of MaxScale and do not affect the MaxScale performance. + +The use of monitors is optional, it is possible to run MaxScale with external monitoring, in which case arrangements must be made for an external entity to set the status of each of the servers that MaxScale can route to. + +=== Mysqlmon + +The MySQLMon monitor is a simple monitor designed for use with MySQL Master/Slave replication cluster. To execute the mysqlmon monitor an entry as shown below should be added to the MaxScale configuration file. + +---- +[MySQL Monitor] +type=monitor +module=mysqlmon +servers=server1,server2,server3,server4 +---- + +This will monitor the 4 servers; server1, server2, server3 and server4. It will set the status of running or failed and master or slave for each of the servers. + +The monitor uses the username given in the monitor section or the server specific user that is given in the server section to connect to the server. This user must have sufficient permissions on the database to determine the state of replication. The roles that must be granted to this user are REPLICATION SLAVE and REPLICATION CLIENT. + +To create a user that can be used to monitor the state of the cluster, the following commands could be used. + +---- +MariaDB [mysql]> create user 'maxscalemon'@'maxscalehost' identified by 'Ha79hjds'; +Query OK, 0 rows affected (0.01 sec) + +MariaDB [mysql]> grant REPLICATION SLAVE on *.* to 'maxscalemon'@'maxscalehost'; +Query OK, 0 rows affected (0.00 sec) + +MariaDB [mysql]> grant REPLICATION CLIENT on *.* to 'maxscalemon'@'maxscalehost'; +Query OK, 0 rows affected (0.00 sec) + +MariaDB [mysql]> +---- + +Assuming that MaxScale is running on the host maxscalehost. + +=== Galeramon + +The Galeramon monitor is a simple router designed for use with MySQL Galera cluster. To execute the galeramon monitor an entry as shown below should be added to the MaxScale configuration file. + +---- +[Galera Monitor] +type=monitor +module=galeramon +servers=server1,server2,server3,server4 +---- + +This will monitor the 4 servers; server1, server2, server3 and server4. It will set the status of running or failed and joined for those servers that reported the Galera JOINED status. + +The user that is configured for use with the Galera monitor must have sufficient privileges to select from the information_schema database and GLOBAL_STATUS table within that database. + +To create a user that can be used to monitor the state of the cluster, the following commands could be used. + +---- +MariaDB [mysql]> create user 'maxscalemon'@'maxscalehost' identified by 'Ha79hjds'; +Query OK, 0 rows affected (0.01 sec) + +MariaDB [mysql]> grant SELECT on INFORMATION_SCHEMA.GLOBAL_STATUS to 'maxscalemon'@'maxscalehost'; +Query OK, 0 rows affected (0.00 sec) + +MariaDB [mysql]> +---- + +Assuming that MaxScale is running on the host maxscalehost. + + +The Galera monitor can also assign Master and Slave roles to the configured nodes: + +among the set of synced servers, the one with the lowest value of ‘wsrep_local_index’ is selected as the current master while the others are slaves. + +This way is possible to configure the node access based not only on ‘synced’ state but even on Master and Slave role enabling the use of Read Write split operation on a Galera cluster and avoiding any possible write conflict. + +Example status for a Galera server node is: + +---- +Server 0x261fe50 (server2) + Server: 192.168.1.101 +---- + Status: Master, Synced, Running + + +== +== +== Filter Modules +Currently four example filters are included in the MaxScale distribution + +|=== +|*Module*|*Description* + +|testfilter|Statement counting Filter - a simple filter that counts the number of SQL statements executed within a session. Results may be viewed via the debug interface. +|qlafilter|Query Logging Filter - a simple query logging filter that write all statements for a session into a log file for that session. +|regexfilter|Query Rewrite Filter - an example of how filters can alter the query contents. This filter allows a regular expression to be defined, along with replacement text that should be substituted for every match of that regular expression. +|tee|A filter that duplicates SQL requests and sends the duplicates to another service within MaxScale. +|=== + +These filters are merely examples of what may be achieved with the filter API and are not sophisticated or consider as suitable for production use, they merely illustrate the functionality possible. + +=== Statement Counting Filter +The statement counting filter is implemented in the module names testfilter and merely keeps a count of the number of SQL statements executed. The filter requires no options to be passed and takes no parameters. The statement count can be viewed via the diagnostic and debug interface of MaxScale. + +In order to add this filter to an existing service create a filter section to name the filter as follows + +---- +[counter] +type=filter +module=testfilter +---- + +Then add the filter to your service by including the filters= parameter in the service section. + ++filters=counter+ + +=== Query Log All Filter +The QLA filter simply writes all SQL statements to a log file along with a timestamp for the statement. An example of the file produced by the QLA filter is shown below + +---- +00:36:04.922 5/06/2014, select @@version_comment limit 1 +00:36:12.663 5/06/2014, SELECT DATABASE() +00:36:12.664 5/06/2014, show databases +00:36:12.665 5/06/2014, show tables +---- + +A new file is created for each client connection, the name of the logfile can be controlled by the use of the router options. No parameters are used by the QLA filter. The filter is implemented by the loadable module qlafilter. + +To add the QLA filter to a service you must create a filter section to name the filter, associated the loadable module and define the filename option. + +---- +[QLA] +type=filter +module=qlafilter +options=/tmp/QueryLog +---- + +Then add the filters= parameter into the service that you wish to log by adding this parameter to the service section + ++filters=QLA+ + +A log file will be created for each client connection, the name of that log file will be /tmp/QueryLog. + +=== Regular Expression Filter +The regular expression filter is a simple text based query rewriting filter. It allows a regular expression to be used to match text in a SQL query and then a string replacement to be made against that match. The filter is implemented by the regexfilter loadable module and is passed two parameters, a match string and a replacement string. + +To add the filter to your service you must first create a filter section to name the filter and give the match and replacement strings. Here we define a filter that will convert to MariaDB 10 command show all slaves status to the older form of show slave status for MariaDB 5.5. + +---- +[slavestatus] +type=filter +module=regexfilter +match=show *all *slaves +replace=show slave +---- + +You must then add this filter to your service by adding the filters= option + ++filters=slavestatus+ + +Another example would be a filter to convert from the MySQL 5.1 create table syntax that used the TYPE keyword to the newer ENGINE keyword. + +---- +[EnginerFilter] +type=filter +module=regexfilter +match=TYPE +replace=ENGINE +---- + +This would then change the SQL sent by a client application written to work with MySQL 5.1 into SQL that was compliant with MySQL 5.5. The statement + ++create table supplier(id integer, name varchar(80)) type=innodb+ + +would be replaced with + ++create table supplier(id integer, name varchar(80)) ENGINE=innodb+ + +before being sent to the server. Note that the text in the match string is case independent. + +=== Tee Filter +The tee filter is a filter module for MaxScale is a “plumbing” fitting in the MaxScale filter toolkit. It can be used in a filter pipeline of a service to make a copy of requests from the client and dispatch a copy of the request to another service within MaxScale. + +The configuration block for the TEE filter requires the minimal filter parameters in it’s section within the MaxScale.cnf file that defines the filter to load and the service to send the duplicates to. + +---- +[ArchieveFilter] +type=filter +module=tee +service=Archieve + +---- +In addition parameters may be added to define patterns to match against to either include or exclude particular SQL statements to be duplicated. You may also define that the filter is only active for connections from a particular source or when a particular user is connected. +== +== +== Encrypting Passwords + +Passwords stored in the MaxScale.cnf file may optionally be encrypted for added security. This is done by creation of an encryption key on installation of MaxScale. Encryption keys may be created manually by executing the maxkeys utility with the argument of the filename to store the key. + ++maxkeys $MAXSCALE_HOME/etc/.secrets+ + +Changing the encryption key for MaxScale will invalidate any currently encrypted keys stored in the MaxScale.cnf file. + +=== Creating Encrypted Passwords + +Encrypted passwords are created by executing the maxpasswd command with the password you require to encrypt as an argument. The environment variable MAXSCALE_HOME must be set, or MaxScale must be installed in the default location before maxpasswd can be executed. + +---- +maxpasswd MaxScalePw001 +61DD955512C39A4A8BC4BB1E5F116705 +---- + +The output of the maxpasswd command is a hexadecimal string, this should be inserted into the MaxScale.cnf file in place of the ordinary, plain text, password. MaxScale will determine this as an encrypted password and automatically decrypt it before sending it the database server. + +---- +[Split Service] +type=service +router=readwritesplit +servers=server1,server2,server3,server4 +user=maxscale +password=61DD955512C39A4A8BC4BB1E5F116705 +---- +== +== +== Configuration Updates +The current MaxScale configuration may be updating by editing the configuration file and then forcing MaxScale to reread the configuration file. To force MaxScale to reread the configuration file a SIGTERM signal is sent to the MaxScale process. + +Some changes in configuration can not be dynamically changed and require a complete restart of MaxScale, whilst others will take some time to be applied. + +=== Limitations +Services that are removed via the configuration update mechanism can not be physically removed from MaxScale until there are no longer any connections using the service. + +When the number of threads is decreased the threads will not actually be terminated until such time as they complete the current operation of that thread. + +Monitors can not be completely removed from the running MaxScale. +== +== +== Authentication +MySQL uses username, passwords and the client host in order to authenticate a user, so a typical user would be defined as user X at host Y and would be given a password to connect. MaxScale uses exactly the same rules as MySQL when users connect to the MaxScale instance, i.e. it will check the address from which the client is connecting and treat this in exactly the same way that MySQL would. MaxScale will pull the authentication data from one of the backend servers and use this to match the incoming connections, the assumption being that all the backend servers for a particular service will share the same set of user credentials. + +It is important to understand, however, that when MaxScale itself makes connections to the backend servers the backend server will see all connections as originating from the host that runs MaxScale and not the original host from which the client connected to MaxScale. Therefore the backend servers should be configured to allow connections from the MaxScale host for every user that can connect from any host. Since there is only a single password within the database server for a given host, this limits the configuration such that a given user name must have the same password for every host from which they can connect. + +To clarify, if a user X is defined as using password _pass1_ from host a and _pass2_ from host b then there must be an entry in the user table for user X form the MaxScale host, say _pass1_. + +This would result in rows in the user table as follows +|=== +|*Username*|*Password*|*Client Host* + +|X|pass1|a +|X|pass2|b +|X|pass1|MaxScale +|=== + + +In this case the user X would be able to connect to MaxScale from host a giving the password of _pass1_. In addition MaxScale would be able to create connections for this user to the backend servers using the username X and password _pass1_, since the MaxScale host is also defined to have password _pass1_. User X would not however be able to connect from host b since they would need to provide the password _pass2_ in order to connect to MaxScale, but then MaxScale would not be able to connect to the backends as it would also use the password _pass2_ for these connections. + +=== Wildcard Hosts + +Hostname mapping in MaxScale works in exactly the same way as for MySQL, if the wildcard is used for the host then any host other than the localhost (127.0.0.1) will match. It is important to consider that the localhost check will be performed at the MaxScale level and at the MySQL server level. + +If MaxScale and the databases are on separate hosts there are two important changes in behaviour to consider: + + . Clients running on the same machine as the backend database now may access the database using the wildcard entry. The localhost check between the client and MaxScale will allow the use of the wildcard, since the client is not running on the MaxScale host. Also the wildcard entry can be used on the database host as MaxScale is making that connection and it is not running on the same host as the database. + . Clients running on the same host as MaxScale can not access the database via MaxScale using the wildcard entry since the connection to MaxScale will be from the localhost. These clients are able to access the database directly, as they will use the wildcard entry. + +If MaxScale is running on the same host as one or more of the database nodes to which it is acting as a proxy then the wildcard host entries can be used to connect to MaxScale but not to connect onwards to the database running on the same node. + +In all these cases the issue may be solved by adding an explicit entry for the localhost address that has the same password as the wildcard entry. This may be done using a statement as below for each of the databases that are required: + ++MariaDB [mysql]> GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP ON employee.* 'user1'@'localhost' IDENTIFIED BY ‘xxx’;+ + +Query OK, 0 rows affected (0.00 sec) + +=== Limitations + +At the time of writing the authentication mechanism within MaxScale does not support IPV6 address matching in connections rules. This is also in line with the current protocol modules that do not support IPV6. + +Partial address matching, such as 10.% is also not supported in the current version of MaxScale. +== +== Error Reporting +MaxScale is designed to be executed as a service, therefore all error reports, including configuration errors, are written to the MaxScale error log file. MaxScale will log to a set of files in the directory $MAXSCALE_HOME/log, the only exception to this is if the log directory is not writable, in which case a message is sent to the standard error descriptor. + + + +Troubleshooting + +MaxScale binds on TCP ports and UNIX sockets as well. + +If there is a local firewall in the server where MaxScale is installed, the IP and port must be configured in order to receive connections from outside. + +If the firewall is a network facility among all the involved servers, a configuration update is required as well. + +Example: + +---- +[Galera Listener] +type=listener +---- + address=192.1681.3.33 + +---- + port=4408 + socket=/servers/maxscale/galera.sock +---- + +TCP/IP Traffic must be permitted to 192.1681.3.33 port 4408 + +For Unix socket, the socket file path (example: /servers/maxscale/galera.sock) must be writable by the Unix user MaxScale runs as. + + diff --git a/Documentation/experimental/ConfigurationGuide.md b/Documentation/experimental/ConfigurationGuide.md new file mode 100644 index 000000000..5787e04ac --- /dev/null +++ b/Documentation/experimental/ConfigurationGuide.md @@ -0,0 +1,1228 @@ +MaxScale + +Configuration & Usage Scenarios + +Mark Riddoch + +Last Updated: 2nd July 2014 + +# Contents + +[[TOC]] + +# Document History + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
DateChangeWho
21st July 2013Initial versionMark Riddoch
23rd July 2013Addition of default user and password for a monitor and discussion of monitor user requirements +New monitor documented for Galera clusters +Addition of example Galera cluster configurationMark Riddoch
13th November 2013state for Galera Monitor is "synced"Massimiliano Pinto
2nd December 2013Updated the description of the command line arguments to match the code updates. +Improved descriptions and general documentation. +Enhanced example configurationsMark Riddoch
6th February 2014Added “enable_root_user” as a service parameterMassimiliano Pinto
7th February 2014Addition of bind address information +Clarification of user configuration required for monitoring users and the user needed to fetch the user dataMark Riddoch
3rd March 2014MySQL authentication with hostnamesMassimiliano Pinto
3rd March 2014Addition of section that describes authentication requirements and the rules for creating user credentialsMark Riddoch
28th March 2014Unix socket supportMassimiliano Pinto
8th May 2014Added “version_string” parameter in serviceMassimiliano Pinto
29th May 2014Added troubleshooting sectionMassimiliano Pinto
2nd June 2014Correction of some typos, clarification of the meaning of session modification statements and the default user for the CLI. +Addition of debugcli configuration option for developer and user modes.Mark Riddoch
4th June 2014Addition of “monitor_interval” for monitorsMassimiliano Pinto
6th June 2014Addition of filters sectionsMark Riddoch
27th June 2014Addition of server weighting, the configuration for the maxadmin clientMark Riddoch
2nd July 2014Addition of new readwritesplit router options with description and examples.Vilho Raatikka
+ + +# Introduction + +The purpose of this document is to describe how to configure MaxScale and to discuss some possible usage scenarios for MaxScale. MaxScale is designed with flexibility in mind, and consists of an event processing core with various support functions and plugin modules that tailor the behaviour of the MaxScale itself. + +## Terms + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TermDescription
serviceA service represents a set of databases with a specific access mechanism that is offered to clients of MaxScale. The access mechanism defines the algorithm that MaxScale will use to direct particular requests to the individual databases.
serverA server represents an individual database server to which a client can be connected via MaxScale.
routerA router is a module within MaxScale that will route client requests to the various database servers which MaxScale provides a service interface to.
connection routingConnection routing is a method of handling requests in which MaxScale will accept connections from a client and route data on that connection to a single database using a single connection. Connection based routing will not examine individual quests on a connection and it will not move that connection once it is established.
statement routingStatement routing is a method of handling requests in which each request within a connection will be handled individually. Requests may be sent to one or more servers and connections may be dynamically added or removed from the session.
protocolA protocol is a module of software that is used to communicate with another software entity within the system. MaxScale supports the dynamic loading of protocol modules to allow for increased flexibility.
moduleA module is a separate code entity that may be loaded dynamically into MaxScale to increase the available functionality. Modules are implemented as run-time loadable shared objects.
monitorA monitor is a module that can be executed within MaxScale to monitor the state of a set of database. The use of an internal monitor is optional, monitoring may be performed externally to MaxScale.
listenerA listener is the network endpoint that is used to listen for connections to MaxScale from the client applications. A listener is associated to a single service, however a service may have many listeners.
connection failoverWhen a connection currently being used between MaxScale and the database server fails a replacement will be automatically created to another server by MaxScale without client intervention
backend databaseA term used to refer to a database that sits behind MaxScale and is accessed by applications via MaxScale.
filterA module that can be placed between the client and the MaxScale router module. All client data passes through the filter module and may be examined or modified by the filter modules. + +Filters may be chained together to form processing pipelines.
+ + +# Configuration + +The MaxScale configuration is read from a file which can be located in a number of placing, MaxScale will search for the configuration file in a number of locations. + +1. If the environment variable MAXSCALE_HOME is set then MaxScale will look for a configuration file called MaxScale.cnf in the directory $MAXSCALE_HOME/etc + +2. If MAXSCALE_HOME is not set or the configuration file is not in the location above MaxScale will look for a file in /etc/MaxScale.cnf + +Alternatively MaxScale can be started with the -c flag and the path of the MaxScale home directory tree. + +An explicit path to a configuration file can be passed by using the -f option to MaxScale. + +The configuration file itself is based on the "ini" file format and consists of various sections that are used to build the configuration, these sections define services, servers, listeners, monitors and global settings. + +## Global Settings + +The global settings, in a section named [MaxScale], allow various parameters that affect MaxScale as a whole to be tuned. Currently the only setting that is supported is the number of threads to use to handle the network traffic. MaxScale will also accept the section name of [gateway] for global settings. This is for backward compatibility with versions prior to the naming of MaxScale. + +### Threads + +To control the number of threads that poll for network traffic set the parameter threads to a number. It is recommended that you start with a single thread and add more as you find the performance is not satisfactory. MaxScale is implemented to be very thread efficient, so a small number of threads is usually adequate to support reasonably heavy workloads. Adding more threads may not improve performance and can consume resources needlessly. + +`# Valid options are:` + +`# threads=` + +`[MaxScale]` + +`threads=1` + +It should be noted that additional threads will be created to execute other internal services within MaxScale, this setting is merely used to configure the number of threads that will be used to manage the user connections. + +## Service + +A service represents the database service that MaxScale offers to the clients. In general a service consists of a set of backend database servers and a routing algorithm that determines how MaxScale decides to send statements or route connections to those backend servers. + +A service may be considered as a virtual database server that MaxScale makes available to its clients. + +Several different services may be defined using the same set of backend servers. For example a connection based routing service might be used by clients that already performed internal read/write splitting, whilst a different statement based router may be used by clients that are not written with this functionality in place. Both sets of applications could access the same data in the same databases. + +A service is identified by a service name, which is the name of the configuration file section and a type parameter of service + +`[Test Service]` + +`type=service` + +In order for MaxScale to forward any requests it must have at least one service defined within the configuration file. The definition of a service alone is not enough to allow MaxScale to forward requests however, the service is merely present to link together the other configuration elements. + +### Router + +The router parameter of a service defines the name of the router module that will be used to implement the routing algorithm between the client of MaxScale and the backend databases. Additionally routers may also be passed a comma separated list of options that are used to control the behaviour of the routing algorithm. The two parameters that control the routing choice are `router` and `router_options`. The router options are specific to a particular router and are used to modify the behaviour of the router. The read connection router can be passed options of master, slave or synced, an example of configuring a service to use this router and limiting the choice of servers to those in slave state would be as follows. + +`router=readconnroute` + +`router_options=slave` + +To change the router to connect on to servers in the master state as well as slave servers, the router options can be modified to include the master state. + +`router=readconnroute` + +`router_options=master,slave` + +A more complete description of router options and what is available for a given router is included with the documentation of the router itself. + +### Filters + +The `filters` option allow a set of filters to be defined for a service; requests from the client are passed through these filters before being sent to the router for dispatch to the backend server. The filters parameter takes one or more filter names, as defined within the filter definition section of the configuration file. Multiple filters are separated using the | character. + +`filters=counter | QLA` + +The requests pass through the filters from left to right in the order defined in the configuration parameter. + +### Servers + +The servers parameter in a service definition provides a comma separated list of the backend servers that comprise the service. The server names are those used in the name section of a block with a type parameter of server (see below). + +`servers=server1,server2,server3` + +### User + +The `user` parameter, along with the `passwd` parameter are used to define the credentials used to connect to the backend servers to extract the list of database users from the backend database that is used for the client authentication. + +`user=maxscale` + +`passwd=Mhu87p2D` + +Authentication of incoming connections is performed by MaxScale itself rather than by the database server to which the client is connected. The client will authenticate itself with MaxScale, using the username, hostname and password information that MaxScale has extracted from the backend database servers. For a detailed discussion of how this impacts the authentication process please see the "Authentication" section below. + +The host matching criteria is restricted to IPv4, IPv6 will be added in a future release. + +Existing user configuration in the backend databases must be checked and may be updated before successful MaxScale authentication: + +In order for MaxScale to obtain all the data it must be given a username it can use to connect to the database and retrieve that data. This is the parameter that gives MaxScale the username to use for this purpose. + +The account used must be able to select from the mysql.user table, the following is an example showing how to create this user. + +`MariaDB [mysql]> create user 'maxscale'@'maxscalehost' identified by 'Mhu87p2D';` + +`Query OK, 0 rows affected (0.01 sec)` + +`MariaDB [mysql]> grant SELECT on mysql.user to 'maxscale'@'maxscalehost';` + +`Query OK, 0 rows affected (0.00 sec)` + +### Passwd + +The auth parameter provides the password information for the above user and may be either a plain text password or it may be an encrypted password. See the section on encrypting passwords for use in the MaxScale.cnf file. This user must be capable of connecting to the backend database and executing the SQL statement "SELECT user, host, password FROM mysql.user". + +**enable_root_user** + +This parameter controls the ability of the root user to connect to MaxScale and hence onwards to the backend servers via MaxScale. + +The default value is 0, disabling the ability of the root user to connect to MaxScale. + +Example for enabling root user: + +`enable_root_user=1` + +Values of "on" or “true” may also be given to enable the root user and “off” or “false” may be given to disable the use of the root user. + +`enable_root_user=true` + +**version_string** + +This parameter sets a custom version string that is sent in the MySQL Handshake from MaxScale to clients. + +Example: + +`version_string=5.5.37-MariaDB-RWsplit` + +If not set, the default value is the server version of the embedded MySQL/MariaDB library. Example: `5.5.35-MariaDB` + +### weightby + +The weightby parameter is used in conjunction with server parameters in order to control the load balancing applied in the router in use by the service. This allows varying weights to be applied to each server to create a non-uniform distribution of the load amongst the servers. + +An example of this might be to define a parameter for each server that represents the amount of resource available on the server, we could call this serversize. Every server should then have a serversize parameter set for the server. + +`serversize=10` + +The service would then have the parameter weightby set. If there are 4 servers defined in the service, serverA, serverB, serverC and serverD, with the serversize set as shown in the table below, the connections would balanced using the percentages in this table. + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Serverserversize% connections
serverA1018%
serverB1527%
serverC1018%
serverD2036%
+ + +## Server + +Server sections are used to define the backend database servers that can be formed into a service. A server may be a member of one or more services within MaxScale. Servers are identified by a server name which is the section name in the configuration file. Servers have a type parameter of server, plus address port and protocol parameters. + +`[server1]` + +`type=server` + +`address=127.0.0.1` + +`port=3000` + +`protocol=MySQLBackend` + +### Address + +The IP address or hostname of the machine running the database server that is being defined. MaxScale will use this address to connect to the backend database server. + +### Port + +The port on which the database listens for incoming connections. MaxScale will use this port to connect to the database server. + +### Protocol + +The name for the protocol module to use to connect MaxScale to the database. Currently only one backend protocol is supported, the MySQLBackend module. + +### Monitoruser + +The monitor has a username and password that is used to connect to all servers for monitoring purposes, this may be overridden by supplying a monitoruser statement for each individual server + +`monitoruser=mymonitoruser` + +### MonitorPw + +The monitor has a username and password that is used to connect to all servers for monitoring purposes, this may be overridden by supplying a monpasswd statement for the individual servers + +`monitorpw=mymonitorpasswd` + +The monpasswd parameter may be either a plain text password or it may be an encrypted password. See the section on encrypting passwords for use in the MaxScale.cnf file. + +## Listener + +The listener defines a port and protocol pair that is used to listen for connections to a service. A service may have multiple listeners associated with it, either to support multiple protocols or multiple ports. As with other elements of the configuration the section name is the listener name and a type parameter is used to identify the section as a listener definition. + +`[Test Listener]` + +`type=listener` + +`service=Test Service` + +`protocol=MySQLClient` + +`address=localhost` + +`port=4008` + +`socket=/tmp/testlistener.sock` + +### Service + +The service to which the listener is associated. This is the name of a service that is defined elsewhere in the configuration file. + +### Protocol + +The name of the protocol module that is used for the communication between the client and MaxScale itself. + +### Address + +The address option sets the address that will be used to bind the listening socket. The address may be specified as an IP address in ‘dot notation’ or as a hostname. If the address option is not included in the listener definition the listener will bind to all network interfaces. + +### Port + +The port to use to listen for incoming connections to MaxScale from the clients. If the port is omitted from the configuration a default port for the protocol will be used. + +**Socket** + +The socket option may be included in a listener definition, this configures the listener to use Unix domain sockets to listen for incoming connections. The parameter value given is the name of the socket to use. + +If a socket option and an address option is given then the listener will listen on both the specific IP address and the Unix socket. + +## Filter + +Filters provide a means to manipulate or process requests as they pass through MaxScale between the client side protocol and the query router. A filter should be defined in a section with a type of filter. + +`[QLA]` + +`type=filter` + +`module=qlafilter` + +`options=/tmp/QueryLog` + +The section name may then be used in one or more services by using the `filters=` parameter in the service section. In order to use the above filter for a service called "QLA Service", an entry of the following form would exist for that service. + +`[QLA Service]` + +`type=service` + +`router=readconnroute` + +`router_options=slave` + +`servers=server1,server2,server3,server4` + +`user=massi` + +`passwd=6628C50E07CCE1F0392EDEEB9D1203F3` + +`filters=QLA` + +See the Services section for more details on how to configure the various options of a service. + +### Module + +The module parameter defines the name of the loadable module that implements the filter. + +### Options + +The options parameter is used to pass options to the filter to control the actions the filter will perform. The values that can be passed differ between filter implementation, the inclusion of an options parameter is optional. + +### Other Parameters + +Any other parameters present in the filters section will be passed to the filter to be interpreted by the filter. An example of this is the regexfilter that requires the two parameters `match` and `replace` + +`[regex]` + +`type=filter` + +`module=regexfilter` + +`match=form` + +`replace=from` + +## Monitor + +In order for the various router modules to function correctly they require information about the state of the servers that are part of the service they provide. MaxScale has the ability to internally monitor the state of the back-end database servers or that state may be feed into MaxScale from external monitoring systems. If automated monitoring and failover of services is required this is achieved by running a monitor module that is designed for the particular database architecture that is in use. + +Monitors are defined in much the same way as other elements in the configuration file, with the section name being the name of the monitor instance and the type being set to monitor. + +`[MySQL Monitor]` + +`type=monitor` + +`module=mysqlmon` + +`servers=server1,server2,server3` + +`user=dbmonitoruser` + +`passwd=dbmonitorpwd` + +`monitor_interval=8000` + +### Module + +The module parameter defines the name of the loadable module that implements the monitor. This module is loaded and executed on a separate thread within MaxScale. + +### Servers + +The servers parameter is a comma separated list of server names to monitor, these are the names defined elsewhere in the configuration file. The set of servers monitored by a single monitor need not be the same as the set of servers used within any particular server, a single monitor instance may monitor servers in multiple servers. + +### User + +The user parameter defines the username that the monitor will use to connect to the monitored databases. Depending on the monitoring module used this user will require specific privileges in order to determine the state of the nodes, details of those privileges can be found in the sections on each of the monitor modules. + +Individual servers may define override values for the user and password the monitor uses by setting the monuser and monpasswd parameters in the server section. + +### Passwd + +The password parameter may be either a plain text password or it may be an encrypted password. See the section on encrypting passwords for use in the MaxScale.cnf file. + +**Monitor_interval** + +The monitor_interval parameter sets the sampling interval in milliseconds for each monitor, the default value is 10000 milliseconds. + +# Protocol Modules + +The protocols supported by MaxScale are implemented as external modules that are loaded dynamically into the MaxScale core. These modules reside in the directory $MAXSCALE_HOME/module, if the environment variable $MAXSCALE_HOME is not set it defaults to /usr/local/skysql/MaxScale. It may also be set by passing the -c option on the MaxScale command line. + +## MySQLClient + +This is the implementation of the MySQL protocol that is used by clients of MaxScale to connect to MaxScale. + +## MySQLBackend + +The MySQLBackend protocol module is the implementation of the protocol that MaxScale uses to connect to the backend MySQL, MariaDB and Percona Server databases. This implementation is tailored for the MaxScale to MySQL Database traffic and is not a general purpose implementation of the MySQL protocol. + +## Telnetd + +The telnetd protocol module is used for connections to MaxScale itself for the purposes of creating interactive user sessions with the MaxScale instance itself. Currently this is used in conjunction with a special router implementation, the debugcli. + +## maxscaled + +The protocol used used by the maxadmin client application in order to connect to MaxScale and access the command line interface. + +## HTTPD + +This protocol module is currently still under development, it provides a means to create HTTP connections to MaxScale for use by web browsers or RESTful API clients. + +# Router Modules + +The main task of MaxScale is to accept database connections from client applications and route the connections or the statements sent over those connections to the various services supported by MaxScale. + +There are two flavours of routing that MaxScale can perform, connection based routing and statement based routine. These each have their own characteristics and costs associated with them. + +## Connection Based Routing + +Connection based routing is a mechanism by which MaxScale will, for each incoming connection decide on an appropriate outbound server and will forward all statements to that server without examining the internals of the statement. Once an inbound connection is associated to a particular backend database it will remain connected to that server until the connection is closed or the server fails. + +## Statement Based Routing + +Statement based routing is somewhat different, the routing modules examine every statement the client sends and determines, on a per statement basis, which of the set of backend servers in the service is best to execute the statement. This gives better dynamic balancing of the load within the cluster but comes at a cost. The query router must understand the statement that is being routing and will typically need to parse the statement in order to achieve this. This parsing within the router adds a significant overhead to the cost of routing and makes this type of router only really suitable for loads in which the gains outweigh this added cost. + +## Available Routing Modules + +Currently a small number of query routers are available, these are in different stages of completion and offer different facilities. + +### Readconnroute + +This is a statement based query router that was originally targeted at environments in which the clients already performed splitting of read and write queries into separate connections. + +Whenever a new connection is received the router will examine the state of all the servers that form part of the service and route the connection to the server with least connections currently that matches the filter constraints given in the router options. This results in a balancing of the active connections, however different connections may have different lifetimes and the connections may become unbalanced when later viewed. + +The readconnroute router can be configured to balance the connections from the clients across all the backend servers that are running, just those backend servers that are currently replication slaves or those that are replication masters when routing to a master slave replication environment. When a Galera cluster environment is in use the servers can be filtered to just the set that are part of the cluster and in the ‘synced’ state. These options are configurable via the router_options that can be set within a service. The router_option strings supported are "master", “slave” and “synced”. + +#### Master/Slave Replication Setup + +To setup MaxScale to route connections evenly between all the current slave servers in a replication cluster, a service entry of the form shown below is required. + +`[Read Service]` + +`type=service` + +`router=readconnroute` + +`router_options=slave` + +`servers=server1,server2,server3,server4` + +`user=maxscale` + +`auth=thepasswd` + +With the addition of a listener for this service, which defines the port and protocol that MaxScale uses + +`[Read Listener]` + +`type=listener` + +`service=Read Service` + +`protocol=MySQLClient` + +`port=4006` + +the client can now connect to port 4006 on the host which is running MaxScale. Statements sent using this connection will then be routed to one of the slaves in the server set defined in the Read Service. Exactly which is selected will be determined by balancing the number of connections to each of those whose current state is "slave". + +Altering the router options to be `slave, master` would result in the connections being balanced between all the servers within the cluster. + +It is assumed that the client will have a separate connection to the master server, however this can be routed via MaxScale, allowing MaxScale to manage the determination of which server is master. To do this you would add a second service and listener definition for the master server. + +`[Write Service]` + +`type=service` + +`router=readconnroute` + +`router_options=master` + +`servers=server1,server2,server3,server4` + +`user=maxscale` + +`auth=thepasswd` + +`[Write Listener]` + +`type=listener` + +`service=Write Service` + +`protocol=MySQLClient` + +`port=4007` + +This allows the clients to direct write requests to port 4007 and read requests to port 4006 of the MaxScale host without the clients needing to understand the configuration of the Master/Slave replication cluster. + +Connections to port 4007 would automatically be directed to the server that is the master for replication at the time connection is opened. Whilst this is a simple mapping to a single server it does give the advantage that the clients have no requirement to track which server is currently the master, devolving responsibility for managing the failover to MaxScale. + +In order for MaxScale to be able to determine the state of these servers the mysqlmon monitor module should be run against the set of servers that comprise the service. + +#### Galera Cluster Configuration + +Although not primarily designed for a multi-master replication setup, it is possible to use the readconnroute in this situation. The readconnroute connection router can be used to balance the connections across a Galera cluster. A special monitor is available that detects if nodes are joined to a Galera Cluster, with the addition of a router option to only route connections to nodes marked as synced. MaxScale can ensure that users are never connected to a node that is not a full cluster member. + +`[Galera Service]` + +`type=service` + +`router=readconnroute` + +`router_options=synced` + +`servers=server1,server2,server3,server4` + +`user=maxscale` + +`auth=thepasswd` + +`[Galera Listener]` + +`type=listener` + +`service=Galera Service` + +`protocol=MySQLClient` + +`port=3336` + +`[Galera Monitor]` + +`type=monitor` + +`module=galeramon` + +`servers=server1,server2,server3,server4` + +`user=galeramon` + +`passwd=galeramon` + +The specialized Galera monitor can also select one of the node in the cluster as master, the others will be marked as slave. + +These roles are only assigned to synced nodes. + +It then possible to have services/listeners with router_options=master or slave accessing a subset of all galera nodes. + +The "synced" simply means: access all nodes. + +Examples: + +`[Galera Master Service]` + +`type=service` + +`router=readconnroute` + +`router_options=master` + +`[Galera Slave Service]` + +`type=service` + +`router=readconnroute` + +`router_options=slave` + +The Master and Slave roles are also available for the Read/Write Split router operation + +### Readwritesplit + +The readwritesplit is a statement based router that has been designed for use within Master/Slave replication environments. It examines every statement, parsing it to determine if the statement falls into one of three categories; + +* read only statement + +* possible write statement + +* session modification statement (NOTE: A session modification statement is any statement that is executed that may affect the behaviour of subsequent statements within the current connection. Examples of such statements are the USE SQL statement or a SET statement using the SESSION scope. PREPARE STMT clauses are session statements in MaxScale since they are executed in every backend server.) + +Each of these three categories has a different action associated with it. Read only statements are sent to a slave server in the replication cluster. Possible write statements, which may include read statements that have an undeterminable side effect, are sent to the current replication master. Statements that modify the session are sent to all the servers, with the result that is generated by the master server being returned to the user. + +Session modification statements must be replicated as they affect the future results of read and write operations, so they must be executed on all servers that could execute statements on behalf of this client. + +Currently the readwritesplit router module is under development and has the following limitations: + +* Connection failover support has not yet been implemented. Client connections will fail if the master server fails over. + +#### Master/Slave Replication Setup + +To setup the readwritesplit connection router in a master/slave failover environment is extremely simple, a service definition is required with the router defined for the service and an associated listener. + +The router_options parameter is not required but it can be used to specify how slave(s) are selected. Available option is `slave_selection_criteria` and possible value are `LEAST_BEHIND_MASTER` and `LEAST_CURRENT_OPERATIONS`. + +`max_slave_connections `is a readwritesplit-only option, which sets the upper limit for the number of slaves a router session can use. `max_slave_replication_lag` is (currently) another readwritesplit-specific option, which sets maximum allowed lag for slave in seconds. The criteria is checked when router chooses slaves and only slaves having smaller lag are eligible for selection. The lag is not checked after connection phase. + +`[Split Service]` + +`type=service` + +`router=readwritesplit` + +`router_options=slave_selection_criteria=LEAST_BEHIND_MASTER` + +`max_slave_connections=50%` + +`max_slave_replication_lag=30` + +`servers=server1,server2,server3,server4` + +`user=maxscale` + +`auth=thepasswd` + +`[Split Listener]` + +`type=listener` + +`service=Split Service` + +`protocol=MySQLClient` + +`port=3336` + +The client would merely connect to port 3336 on the MaxScale host and statements would be directed to the master or slave as appropriate. Determination of the master or slave status may be done via a monitor module within MaxScale or externally. In this latter case the server flags would need to be set via the MaxScale debug interface, in future versions an API will be available for this purpose. + +Galera Cluster Configuration + +Master and Slave roles that galera monitor assign to nodes make possible the Read Write split approach to Galera Cluster as well. + +Simply configure a Split Service with galera nodes: + +`[Galera Split Service]` + +`type=service` + +`router=readwritesplit` + +`servers=galera_node1,galera_node2,galera_node3` + +### Debugcli + +The debugcli is a special case of a statement based router. Rather than direct the statements at an external data source they are handled internally. These statements are simple text commands and the results are the output of debug commands within MaxScale. The service and listener definitions for a debug cli service only differ from other services in that they require no backend server definitions. + +#### Debug CLI Configuration + +The definition of the debug cli service is illustrated below + +`[Debug Service]` + +`type=service` + +`router=debugcli` + +`[Debug Listener]` + +`type=listener` + +`service=Debug Service` + +`protocol=telnetd` + +`port=4442` + +Connections using the telnet protocol to port 4442 of the MaxScale host will result in a new debug CLI session. A default username and password are used for this module, new users may be created using the `add user` command. As soon as any users are explicitly created the default username will no longer continue to work. The default username is admin with a password of skysql. + +The debugcli supports two modes of operation, developer mode and user mode. The mode is set via the router_options parameter of the debugcli. The user mode is more suited to end-users and administrators, whilst the develop mode is explicitly targeted to software developing adding or maintaining the MaxScale code base. Details of the differences between the modes can be found in the debugging guide for MaxScale. The default mode for the debugcli is user mode. The following service definition would enable a developer version of the debugcli. + +`[Debug Service]` + +`type=service` + +`router=debugcli` + +`router_options=developer` + +It should be noted that both a user and a developer version of the debugcli may be defined within the same instance of MaxScale, however they must be defined as two distinct services, each with a distinct listener. + +`[Debug Service]` + +`type=service` + +`router=debugcli` + +`router_options=developer` + +`[Debug Listener]` + +`type=listener` + +`service=Debug Service` + +`protocol=telnetd` + +`port=4442` + +`[Admin Service]` + +`type=service` + +`router=debugcli` + +`[Admin Listener]` + +`type=listener` + +`service=Debug Service` + +`protocol=telnetd` + +`port=4242` + +### CLI + +The command line interface as used by maxadmin. This is a variant of the debugcli that is built slightly differently so that it may be accessed by the client application maxadmin. The CLI requires the use of the maxscaled protocol. + +#### CLI Configuration + +There are two components to the definition required in order to run the command line interface to use with MaxAdmin; a service and a listener. + +The default entries required are shown below. + +`[CLI]` + +`type=service` + +`router=cli` + +`[CLI Listener]` + +`type=listener` + +`service=CLI` + +`protocol=maxscaled` + +`address=localhost` + +`port=6603` + +Note that this uses the default port of 6603 and confines the connections to localhost connections only. Remove the `address= `entry to allow connections from any machine on your network. Changing the port from 6603 will mean that you must allows pass a -p option to the MaxAdmin command. + +# Monitor Modules + +Monitor modules are used by MaxScale to internally monitor the state of the backend databases in order to set the server flags for each of those servers. The router modules then use these flags to determine if the particular server is a suitable destination for routing connections for particular query classifications. The monitors are run within separate threads of MaxScale and do not affect the MaxScale performance. + +The use of monitors is optional, it is possible to run MaxScale with external monitoring, in which case arrangements must be made for an external entity to set the status of each of the servers that MaxScale can route to. + +## Mysqlmon + +The MySQLMon monitor is a simple monitor designed for use with MySQL Master/Slave replication cluster. To execute the mysqlmon monitor an entry as shown below should be added to the MaxScale configuration file. + +`[MySQL Monitor]` + +`type=monitor` + +`module=mysqlmon` + +`servers=server1,server2,server3,server4` + +This will monitor the 4 servers; server1, server2, server3 and server4. It will set the status of running or failed and master or slave for each of the servers. + +The monitor uses the username given in the monitor section or the server specific user that is given in the server section to connect to the server. This user must have sufficient permissions on the database to determine the state of replication. The roles that must be granted to this user are REPLICATION SLAVE and REPLICATION CLIENT. + +To create a user that can be used to monitor the state of the cluster, the following commands could be used. + +`MariaDB [mysql]> create user 'maxscalemon'@'maxscalehost' identified by 'Ha79hjds';` + +`Query OK, 0 rows affected (0.01 sec)` + +`MariaDB [mysql]> grant REPLICATION SLAVE on *.* to 'maxscalemon'@'maxscalehost';` + +`Query OK, 0 rows affected (0.00 sec)` + +`MariaDB [mysql]> grant REPLICATION CLIENT on *.* to 'maxscalemon'@'maxscalehost';` + +`Query OK, 0 rows affected (0.00 sec)` + +`MariaDB [mysql]> ` + +Assuming that MaxScale is running on the host maxscalehost. + +## Galeramon + +The Galeramon monitor is a simple router designed for use with MySQL Galera cluster. To execute the galeramon monitor an entry as shown below should be added to the MaxScale configuration file. + +`[Galera Monitor]` + +`type=monitor` + +`module=galeramon` + +`servers=server1,server2,server3,server4` + +This will monitor the 4 servers; server1, server2, server3 and server4. It will set the status of running or failed and joined for those servers that reported the Galera JOINED status. + +The user that is configured for use with the Galera monitor must have sufficient privileges to select from the information_schema database and GLOBAL_STATUS table within that database. + +To create a user that can be used to monitor the state of the cluster, the following commands could be used. + +`MariaDB [mysql]> create user 'maxscalemon'@'maxscalehost' identified by 'Ha79hjds';` + +`Query OK, 0 rows affected (0.01 sec)` + +`MariaDB [mysql]> grant SELECT on INFORMATION_SCHEMA.GLOBAL_STATUS to 'maxscalemon'@'maxscalehost';` + +`Query OK, 0 rows affected (0.00 sec)` + +`MariaDB [mysql]> ` + +Assuming that MaxScale is running on the host maxscalehost. + +The Galera monitor can also assign Master and Slave roles to the configured nodes: + +among the set of synced servers, the one with the lowest value of ‘wsrep_local_index’ is selected as the current master while the others are slaves. + +This way is possible to configure the node access based not only on ‘synced’ state but even on Master and Slave role enabling the use of Read Write split operation on a Galera cluster and avoiding any possible write conflict. + +Example status for a Galera server node is: + +`Server 0x261fe50 (server2)` + +` Server: 192.168.1.101` + +` Status: Master, Synced, Running` + +# Filter Modules + +Currently four example filters are included in the MaxScale distribution + + + + + + + + + + + + + + + + + + + + + + +
ModuleDescription
testfilterStatement counting Filter - a simple filter that counts the number of SQL statements executed within a session. Results may be viewed via the debug interface.
qlafilterQuery Logging Filter - a simple query logging filter that write all statements for a session into a log file for that session.
regexfilterQuery Rewrite Filter - an example of how filters can alter the query contents. This filter allows a regular expression to be defined, along with replacement text that should be substituted for every match of that regular expression.
teeA filter that duplicates SQL requests and sends the duplicates to another service within MaxScale.
+ + +These filters are merely examples of what may be achieved with the filter API and are not sophisticated or consider as suitable for production use, they merely illustrate the functionality possible. + +## Statement Counting Filter + +The statement counting filter is implemented in the module names `testfilter` and merely keeps a count of the number of SQL statements executed. The filter requires no options to be passed and takes no parameters. The statement count can be viewed via the diagnostic and debug interface of MaxScale. + +In order to add this filter to an existing service create a filter section to name the filter as follows + +`[counter]` + +`type=filter` + +`module=testfilter` + +Then add the filter to your service by including the filters= parameter in the service section. + +`filters=counter` + +## Query Log All Filter + +The QLA filter simply writes all SQL statements to a log file along with a timestamp for the statement. An example of the file produced by the QLA filter is shown below + +`00:36:04.922 5/06/2014, select @@version_comment limit 1` + +`00:36:12.663 5/06/2014, SELECT DATABASE()` + +`00:36:12.664 5/06/2014, show databases` + +`00:36:12.665 5/06/2014, show tables` + +A new file is created for each client connection, the name of the logfile can be controlled by the use of the router options. No parameters are used by the QLA filter. The filter is implemented by the loadable module `qlafilter`. + +To add the QLA filter to a service you must create a filter section to name the filter, associated the loadable module and define the filename option. + +`[QLA]` + +`type=filter` + +`module=qlafilter` + +`options=/tmp/QueryLog` + +Then add the filters= parameter into the service that you wish to log by adding this parameter to the service section + +`filters=QLA` + +A log file will be created for each client connection, the name of that log file will be /tmp/QueryLog. + +## Regular Expression Filter + +The regular expression filter is a simple text based query rewriting filter. It allows a regular expression to be used to match text in a SQL query and then a string replacement to be made against that match. The filter is implemented by the `regexfilter` loadable module and is passed two parameters, a match string and a replacement string. + +To add the filter to your service you must first create a filter section to name the filter and give the match and replacement strings. Here we define a filter that will convert to MariaDB 10 command show all slaves status to the older form of show slave status for MariaDB 5.5. + +`[slavestatus]` + +`type=filter` + +`module=regexfilter` + +`match=show *all *slaves` + +`replace=show slave` + +You must then add this filter to your service by adding the filters= option + +`filters=slavestatus` + +Another example would be a filter to convert from the MySQL 5.1 `create table `syntax that used the `TYPE` keyword to the newer `ENGINE` keyword. + +`[EnginerFilter]` + +`type=filter` + +`module=regexfilter` + +`match=TYPE` + +`replace=ENGINE` + +This would then change the SQL sent by a client application written to work with MySQL 5.1 into SQL that was compliant with MySQL 5.5. The statement + +`create table supplier(id integer, name varchar(80)) type=innodb` + +would be replaced with + +`create table supplier(id integer, name varchar(80)) ENGINE=innodb` + +before being sent to the server. Note that the text in the match string is case independent. + +## Tee Filter + +The tee filter is a filter module for MaxScale is a "plumbing" fitting in the MaxScale filter toolkit. It can be used in a filter pipeline of a service to make a copy of requests from the client and dispatch a copy of the request to another service within MaxScale. + +The configuration block for the TEE filter requires the minimal filter parameters in it’s section within the MaxScale.cnf file that defines the filter to load and the service to send the duplicates to. + +`[ArchieveFilter]` + +`type=filter` + +`module=tee` + +`service=Archieve` + +In addition parameters may be added to define patterns to match against to either include or exclude particular SQL statements to be duplicated. You may also define that the filter is only active for connections from a particular source or when a particular user is connected. + +# Encrypting Passwords + +Passwords stored in the MaxScale.cnf file may optionally be encrypted for added security. This is done by creation of an encryption key on installation of MaxScale. Encryption keys may be created manually by executing the `maxkeys` utility with the argument of the filename to store the key. + +`maxkeys $MAXSCALE_HOME/etc/.secrets` + +Changing the encryption key for MaxScale will invalidate any currently encrypted keys stored in the MaxScale.cnf file. + +## Creating Encrypted Passwords + +Encrypted passwords are created by executing the `maxpasswd` command with the password you require to encrypt as an argument. The environment variable MAXSCALE_HOME must be set, or MaxScale must be installed in the default location before `maxpasswd` can be executed. + +`maxpasswd MaxScalePw001` + +`61DD955512C39A4A8BC4BB1E5F116705` + +The output of the `maxpasswd` command is a hexadecimal string, this should be inserted into the MaxScale.cnf file in place of the ordinary, plain text, password. MaxScale will determine this as an encrypted password and automatically decrypt it before sending it the database server. + +`[Split Service]` + +`type=service` + +`router=readwritesplit` + +`servers=server1,server2,server3,server4` + +`user=maxscale` + +`password=61DD955512C39A4A8BC4BB1E5F116705` + +# Configuration Updates + +The current MaxScale configuration may be updating by editing the configuration file and then forcing MaxScale to reread the configuration file. To force MaxScale to reread the configuration file a SIGTERM signal is sent to the MaxScale process. + +Some changes in configuration can not be dynamically changed and require a complete restart of MaxScale, whilst others will take some time to be applied. + +## Limitations + +Services that are removed via the configuration update mechanism can not be physically removed from MaxScale until there are no longer any connections using the service. + +When the number of threads is decreased the threads will not actually be terminated until such time as they complete the current operation of that thread. + +Monitors can not be completely removed from the running MaxScale. + +# Authentication + +MySQL uses username, passwords and the client host in order to authenticate a user, so a typical user would be defined as user X at host Y and would be given a password to connect. MaxScale uses exactly the same rules as MySQL when users connect to the MaxScale instance, i.e. it will check the address from which the client is connecting and treat this in exactly the same way that MySQL would. MaxScale will pull the authentication data from one of the backend servers and use this to match the incoming connections, the assumption being that all the backend servers for a particular service will share the same set of user credentials. + +It is important to understand, however, that when MaxScale itself makes connections to the backend servers the backend server will see all connections as originating from the host that runs MaxScale and not the original host from which the client connected to MaxScale. Therefore the backend servers should be configured to allow connections from the MaxScale host for every user that can connect from any host. Since there is only a single password within the database server for a given host, this limits the configuration such that a given user name must have the same password for every host from which they can connect. + +To clarify, if a user X is defined as using password *pass1* from host a and *pass2* from host b then there must be an entry in the user table for user X form the MaxScale host, say *pass1*. + +This would result in rows in the user table as follows + + + + + + + + + + + + + + + + + + + + + + +
UsernamePasswordClient Host
Xpass1a
Xpass2b
Xpass1MaxScale
+ + +In this case the user X would be able to connect to MaxScale from host a giving the password of *pass1*. In addition MaxScale would be able to create connections for this user to the backend servers using the username X and password *pass1*, since the MaxScale host is also defined to have password *pass1*. User X would not however be able to connect from host b since they would need to provide the password *pass2* in order to connect to MaxScale, but then MaxScale would not be able to connect to the backends as it would also use the password *pass2* for these connections. + +## Wildcard Hosts + +Hostname mapping in MaxScale works in exactly the same way as for MySQL, if the wildcard is used for the host then any host other than the localhost (127.0.0.1) will match. It is important to consider that the localhost check will be performed at the MaxScale level and at the MySQL server level. + +If MaxScale and the databases are on separate hosts there are two important changes in behaviour to consider: + +1. Clients running on the same machine as the backend database now may access the database using the wildcard entry. The localhost check between the client and MaxScale will allow the use of the wildcard, since the client is not running on the MaxScale host. Also the wildcard entry can be used on the database host as MaxScale is making that connection and it is not running on the same host as the database. + +2. Clients running on the same host as MaxScale can not access the database via MaxScale using the wildcard entry since the connection to MaxScale will be from the localhost. These clients are able to access the database directly, as they will use the wildcard entry. + +If MaxScale is running on the same host as one or more of the database nodes to which it is acting as a proxy then the wildcard host entries can be used to connect to MaxScale but not to connect onwards to the database running on the same node. + +In all these cases the issue may be solved by adding an explicit entry for the localhost address that has the same password as the wildcard entry. This may be done using a statement as below for each of the databases that are required: + +`MariaDB [mysql]> GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP ON employee.* 'user1'@'localhost' IDENTIFIED BY ‘xxx’;` + +`Query OK, 0 rows affected (0.00 sec)` + +## Limitations + +At the time of writing the authentication mechanism within MaxScale does not support IPV6 address matching in connections rules. This is also in line with the current protocol modules that do not support IPV6. + +Partial address matching, such as 10.% is also not supported in the current version of MaxScale. + +# Error Reporting + +MaxScale is designed to be executed as a service, therefore all error reports, including configuration errors, are written to the MaxScale error log file. MaxScale will log to a set of files in the directory $MAXSCALE_HOME/log, the only exception to this is if the log directory is not writable, in which case a message is sent to the standard error descriptor. + +Troubleshooting + +MaxScale binds on TCP ports and UNIX sockets as well. + +If there is a local firewall in the server where MaxScale is installed, the IP and port must be configured in order to receive connections from outside. + +If the firewall is a network facility among all the involved servers, a configuration update is required as well. + +Example: + +`[Galera Listener]` + +`type=listener` + + `address=192.1681.3.33` + +` port=4408` + +` socket=/servers/maxscale/galera.sock` + +` ` + +TCP/IP Traffic must be permitted to `192.1681.3.33 port 4408` + +For Unix socket, the socket file path (example: `/servers/maxscale/galera.sock)` must be writable by the Unix user MaxScale runs as. + diff --git a/Documentation/experimental/ConfigurationGuide.textile b/Documentation/experimental/ConfigurationGuide.textile new file mode 100644 index 000000000..6f38fc489 --- /dev/null +++ b/Documentation/experimental/ConfigurationGuide.textile @@ -0,0 +1,1447 @@ +MaxScale + +Configuration & Usage Scenarios + +Mark Riddoch + +Last Updated: 2nd July 2014 + +h1(#contents). Contents + +[[TOC]] + +h1(#document-history). Document History + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Date + +Change + +Who +
+21st July 2013 + +Initial version + +Mark Riddoch +
+23rd July 2013 + +Addition of default user and password for a monitor and discussion of monitor user requirements New monitor documented for Galera clusters Addition of example Galera cluster configuration + +Mark Riddoch +
+13th November 2013 + +state for Galera Monitor is "synced" + +Massimiliano Pinto +
+2nd December 2013 + +Updated the description of the command line arguments to match the code updates. Improved descriptions and general documentation. Enhanced example configurations + +Mark Riddoch +
+6th February 2014 + +Added “enable_root_user” as a service parameter + +Massimiliano Pinto +
+7th February 2014 + +Addition of bind address information Clarification of user configuration required for monitoring users and the user needed to fetch the user data + +Mark Riddoch +
+3rd March 2014 + +MySQL authentication with hostnames + +Massimiliano Pinto +
+3rd March 2014 + +Addition of section that describes authentication requirements and the rules for creating user credentials + +Mark Riddoch +
+28th March 2014 + +Unix socket support + +Massimiliano Pinto +
+8th May 2014 + +Added “version_string” parameter in service + +Massimiliano Pinto +
+29th May 2014 + +Added troubleshooting section + +Massimiliano Pinto +
+2nd June 2014 + +Correction of some typos, clarification of the meaning of session modification statements and the default user for the CLI. Addition of debugcli configuration option for developer and user modes. + +Mark Riddoch +
+4th June 2014 + +Addition of “monitor_interval” for monitors + +Massimiliano Pinto +
+6th June 2014 + +Addition of filters sections + +Mark Riddoch +
+27th June 2014 + +Addition of server weighting, the configuration for the maxadmin client + +Mark Riddoch +
+2nd July 2014 + +Addition of new readwritesplit router options with description and examples. + +Vilho Raatikka +
+ + +h1(#introduction). Introduction + +The purpose of this document is to describe how to configure MaxScale and to discuss some possible usage scenarios for MaxScale. MaxScale is designed with flexibility in mind, and consists of an event processing core with various support functions and plugin modules that tailor the behaviour of the MaxScale itself. + +h2(#terms). Terms + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Term + +Description +
+service + +A service represents a set of databases with a specific access mechanism that is offered to clients of MaxScale. The access mechanism defines the algorithm that MaxScale will use to direct particular requests to the individual databases. +
+server + +A server represents an individual database server to which a client can be connected via MaxScale. +
+router + +A router is a module within MaxScale that will route client requests to the various database servers which MaxScale provides a service interface to. +
+connection routing + +Connection routing is a method of handling requests in which MaxScale will accept connections from a client and route data on that connection to a single database using a single connection. Connection based routing will not examine individual quests on a connection and it will not move that connection once it is established. +
+statement routing + +Statement routing is a method of handling requests in which each request within a connection will be handled individually. Requests may be sent to one or more servers and connections may be dynamically added or removed from the session. +
+protocol + +A protocol is a module of software that is used to communicate with another software entity within the system. MaxScale supports the dynamic loading of protocol modules to allow for increased flexibility. +
+module + +A module is a separate code entity that may be loaded dynamically into MaxScale to increase the available functionality. Modules are implemented as run-time loadable shared objects. +
+monitor + +A monitor is a module that can be executed within MaxScale to monitor the state of a set of database. The use of an internal monitor is optional, monitoring may be performed externally to MaxScale. +
+listener + +A listener is the network endpoint that is used to listen for connections to MaxScale from the client applications. A listener is associated to a single service, however a service may have many listeners. +
+connection failover + +When a connection currently being used between MaxScale and the database server fails a replacement will be automatically created to another server by MaxScale without client intervention +
+backend database + +A term used to refer to a database that sits behind MaxScale and is accessed by applications via MaxScale. +
+filter + +A module that can be placed between the client and the MaxScale router module. All client data passes through the filter module and may be examined or modified by the filter modules. + +Filters may be chained together to form processing pipelines. +
+ + +h1(#configuration). Configuration + +The MaxScale configuration is read from a file which can be located in a number of placing, MaxScale will search for the configuration file in a number of locations. + +# If the environment variable MAXSCALE_HOME is set then MaxScale will look for a configuration file called MaxScale.cnf in the directory $MAXSCALE_HOME/etc +# If MAXSCALE_HOME is not set or the configuration file is not in the location above MaxScale will look for a file in /etc/MaxScale.cnf + +Alternatively MaxScale can be started with the -c flag and the path of the MaxScale home directory tree. + +An explicit path to a configuration file can be passed by using the -f option to MaxScale. + +The configuration file itself is based on the "ini" file format and consists of various sections that are used to build the configuration, these sections define services, servers, listeners, monitors and global settings. + +h2(#global-settings). Global Settings + +The global settings, in a section named [MaxScale], allow various parameters that affect MaxScale as a whole to be tuned. Currently the only setting that is supported is the number of threads to use to handle the network traffic. MaxScale will also accept the section name of [gateway] for global settings. This is for backward compatibility with versions prior to the naming of MaxScale. + +h3(#threads). Threads + +To control the number of threads that poll for network traffic set the parameter threads to a number. It is recommended that you start with a single thread and add more as you find the performance is not satisfactory. MaxScale is implemented to be very thread efficient, so a small number of threads is usually adequate to support reasonably heavy workloads. Adding more threads may not improve performance and can consume resources needlessly. + +@# Valid options are:@ + +@# threads=@ + +@[MaxScale]@ + +@threads=1@ + +It should be noted that additional threads will be created to execute other internal services within MaxScale, this setting is merely used to configure the number of threads that will be used to manage the user connections. + +h2(#service). Service + +A service represents the database service that MaxScale offers to the clients. In general a service consists of a set of backend database servers and a routing algorithm that determines how MaxScale decides to send statements or route connections to those backend servers. + +A service may be considered as a virtual database server that MaxScale makes available to its clients. + +Several different services may be defined using the same set of backend servers. For example a connection based routing service might be used by clients that already performed internal read/write splitting, whilst a different statement based router may be used by clients that are not written with this functionality in place. Both sets of applications could access the same data in the same databases. + +A service is identified by a service name, which is the name of the configuration file section and a type parameter of service + +@[Test Service]@ + +@type=service@ + +In order for MaxScale to forward any requests it must have at least one service defined within the configuration file. The definition of a service alone is not enough to allow MaxScale to forward requests however, the service is merely present to link together the other configuration elements. + +h3(#router). Router + +The router parameter of a service defines the name of the router module that will be used to implement the routing algorithm between the client of MaxScale and the backend databases. Additionally routers may also be passed a comma separated list of options that are used to control the behaviour of the routing algorithm. The two parameters that control the routing choice are @router@ and @router_options@. The router options are specific to a particular router and are used to modify the behaviour of the router. The read connection router can be passed options of master, slave or synced, an example of configuring a service to use this router and limiting the choice of servers to those in slave state would be as follows. + +@router=readconnroute@ + +@router_options=slave@ + +To change the router to connect on to servers in the master state as well as slave servers, the router options can be modified to include the master state. + +@router=readconnroute@ + +@router_options=master,slave@ + +A more complete description of router options and what is available for a given router is included with the documentation of the router itself. + +h3(#filters). Filters + +The @filters@ option allow a set of filters to be defined for a service; requests from the client are passed through these filters before being sent to the router for dispatch to the backend server. The filters parameter takes one or more filter names, as defined within the filter definition section of the configuration file. Multiple filters are separated using the | character. + +@filters=counter | QLA@ + +The requests pass through the filters from left to right in the order defined in the configuration parameter. + +h3(#servers). Servers + +The servers parameter in a service definition provides a comma separated list of the backend servers that comprise the service. The server names are those used in the name section of a block with a type parameter of server (see below). + +@servers=server1,server2,server3@ + +h3(#user). User + +The @user@ parameter, along with the @passwd@ parameter are used to define the credentials used to connect to the backend servers to extract the list of database users from the backend database that is used for the client authentication. + +@user=maxscale@ + +@passwd=Mhu87p2D@ + +Authentication of incoming connections is performed by MaxScale itself rather than by the database server to which the client is connected. The client will authenticate itself with MaxScale, using the username, hostname and password information that MaxScale has extracted from the backend database servers. For a detailed discussion of how this impacts the authentication process please see the "Authentication" section below. + +The host matching criteria is restricted to IPv4, IPv6 will be added in a future release. + +Existing user configuration in the backend databases must be checked and may be updated before successful MaxScale authentication: + +In order for MaxScale to obtain all the data it must be given a username it can use to connect to the database and retrieve that data. This is the parameter that gives MaxScale the username to use for this purpose. + +The account used must be able to select from the mysql.user table, the following is an example showing how to create this user. + +MariaDB [mysql]> create user 'maxscale'@'maxscalehost' identified by 'Mhu87p2D'; + +@Query OK, 0 rows affected (0.01 sec)@ + +MariaDB [mysql]> grant SELECT on mysql.user to 'maxscale'@'maxscalehost'; + +@Query OK, 0 rows affected (0.00 sec)@ + +h3(#passwd). Passwd + +The auth parameter provides the password information for the above user and may be either a plain text password or it may be an encrypted password. See the section on encrypting passwords for use in the MaxScale.cnf file. This user must be capable of connecting to the backend database and executing the SQL statement "SELECT user, host, password FROM mysql.user". + +*enable_root_user* + +This parameter controls the ability of the root user to connect to MaxScale and hence onwards to the backend servers via MaxScale. + +The default value is 0, disabling the ability of the root user to connect to MaxScale. + +Example for enabling root user: + +@enable_root_user=1@ + +Values of "on" or “true” may also be given to enable the root user and “off” or “false” may be given to disable the use of the root user. + +@enable_root_user=true@ + +*version_string* + +This parameter sets a custom version string that is sent in the MySQL Handshake from MaxScale to clients. + +Example: + +@version_string=5.5.37-MariaDB-RWsplit@ + +If not set, the default value is the server version of the embedded MySQL/MariaDB library. Example: @5.5.35-MariaDB@ + +h3(#weightby). weightby + +The weightby parameter is used in conjunction with server parameters in order to control the load balancing applied in the router in use by the service. This allows varying weights to be applied to each server to create a non-uniform distribution of the load amongst the servers. + +An example of this might be to define a parameter for each server that represents the amount of resource available on the server, we could call this serversize. Every server should then have a serversize parameter set for the server. + +@serversize=10@ + +The service would then have the parameter weightby set. If there are 4 servers defined in the service, serverA, serverB, serverC and serverD, with the serversize set as shown in the table below, the connections would balanced using the percentages in this table. + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Server + +serversize + +% connections +
+serverA + +10 + +18% +
+serverB + +15 + +27% +
+serverC + +10 + +18% +
+serverD + +20 + +36% +
+ + +h2(#server). Server + +Server sections are used to define the backend database servers that can be formed into a service. A server may be a member of one or more services within MaxScale. Servers are identified by a server name which is the section name in the configuration file. Servers have a type parameter of server, plus address port and protocol parameters. + +@[server1]@ + +@type=server@ + +@address=127.0.0.1@ + +@port=3000@ + +@protocol=MySQLBackend@ + +h3(#address). Address + +The IP address or hostname of the machine running the database server that is being defined. MaxScale will use this address to connect to the backend database server. + +h3(#port). Port + +The port on which the database listens for incoming connections. MaxScale will use this port to connect to the database server. + +h3(#protocol). Protocol + +The name for the protocol module to use to connect MaxScale to the database. Currently only one backend protocol is supported, the MySQLBackend module. + +h3(#monitoruser). Monitoruser + +The monitor has a username and password that is used to connect to all servers for monitoring purposes, this may be overridden by supplying a monitoruser statement for each individual server + +@monitoruser=mymonitoruser@ + +h3(#monitorpw). MonitorPw + +The monitor has a username and password that is used to connect to all servers for monitoring purposes, this may be overridden by supplying a monpasswd statement for the individual servers + +@monitorpw=mymonitorpasswd@ + +The monpasswd parameter may be either a plain text password or it may be an encrypted password. See the section on encrypting passwords for use in the MaxScale.cnf file. + +h2(#listener). Listener + +The listener defines a port and protocol pair that is used to listen for connections to a service. A service may have multiple listeners associated with it, either to support multiple protocols or multiple ports. As with other elements of the configuration the section name is the listener name and a type parameter is used to identify the section as a listener definition. + +@[Test Listener]@ + +@type=listener@ + +@service=Test Service@ + +@protocol=MySQLClient@ + +@address=localhost@ + +@port=4008@ + +@socket=/tmp/testlistener.sock@ + +h3(#service-1). Service + +The service to which the listener is associated. This is the name of a service that is defined elsewhere in the configuration file. + +h3(#protocol-1). Protocol + +The name of the protocol module that is used for the communication between the client and MaxScale itself. + +h3(#address-1). Address + +The address option sets the address that will be used to bind the listening socket. The address may be specified as an IP address in ‘dot notation' or as a hostname. If the address option is not included in the listener definition the listener will bind to all network interfaces. + +h3(#port-1). Port + +The port to use to listen for incoming connections to MaxScale from the clients. If the port is omitted from the configuration a default port for the protocol will be used. + +*Socket* + +The socket option may be included in a listener definition, this configures the listener to use Unix domain sockets to listen for incoming connections. The parameter value given is the name of the socket to use. + +If a socket option and an address option is given then the listener will listen on both the specific IP address and the Unix socket. + +h2(#filter). Filter + +Filters provide a means to manipulate or process requests as they pass through MaxScale between the client side protocol and the query router. A filter should be defined in a section with a type of filter. + +@[QLA]@ + +@type=filter@ + +@module=qlafilter@ + +@options=/tmp/QueryLog@ + +The section name may then be used in one or more services by using the @filters=@ parameter in the service section. In order to use the above filter for a service called "QLA Service", an entry of the following form would exist for that service. + +@[QLA Service]@ + +@type=service@ + +@router=readconnroute@ + +@router_options=slave@ + +@servers=server1,server2,server3,server4@ + +@user=massi@ + +@passwd=6628C50E07CCE1F0392EDEEB9D1203F3@ + +@filters=QLA@ + +See the Services section for more details on how to configure the various options of a service. + +h3(#module). Module + +The module parameter defines the name of the loadable module that implements the filter. + +h3(#options). Options + +The options parameter is used to pass options to the filter to control the actions the filter will perform. The values that can be passed differ between filter implementation, the inclusion of an options parameter is optional. + +h3(#other-parameters). Other Parameters + +Any other parameters present in the filters section will be passed to the filter to be interpreted by the filter. An example of this is the regexfilter that requires the two parameters @match@ and @replace@ + +@[regex]@ + +@type=filter@ + +@module=regexfilter@ + +@match=form@ + +@replace=from@ + +h2(#monitor). Monitor + +In order for the various router modules to function correctly they require information about the state of the servers that are part of the service they provide. MaxScale has the ability to internally monitor the state of the back-end database servers or that state may be feed into MaxScale from external monitoring systems. If automated monitoring and failover of services is required this is achieved by running a monitor module that is designed for the particular database architecture that is in use. + +Monitors are defined in much the same way as other elements in the configuration file, with the section name being the name of the monitor instance and the type being set to monitor. + +@[MySQL Monitor]@ + +@type=monitor@ + +@module=mysqlmon@ + +@servers=server1,server2,server3@ + +@user=dbmonitoruser@ + +@passwd=dbmonitorpwd@ + +@monitor_interval=8000@ + +h3(#module-1). Module + +The module parameter defines the name of the loadable module that implements the monitor. This module is loaded and executed on a separate thread within MaxScale. + +h3(#servers-1). Servers + +The servers parameter is a comma separated list of server names to monitor, these are the names defined elsewhere in the configuration file. The set of servers monitored by a single monitor need not be the same as the set of servers used within any particular server, a single monitor instance may monitor servers in multiple servers. + +h3(#user-1). User + +The user parameter defines the username that the monitor will use to connect to the monitored databases. Depending on the monitoring module used this user will require specific privileges in order to determine the state of the nodes, details of those privileges can be found in the sections on each of the monitor modules. + +Individual servers may define override values for the user and password the monitor uses by setting the monuser and monpasswd parameters in the server section. + +h3(#passwd-1). Passwd + +The password parameter may be either a plain text password or it may be an encrypted password. See the section on encrypting passwords for use in the MaxScale.cnf file. + +*Monitor_interval* + +The monitor_interval parameter sets the sampling interval in milliseconds for each monitor, the default value is 10000 milliseconds. + +h1(#protocol-modules). Protocol Modules + +The protocols supported by MaxScale are implemented as external modules that are loaded dynamically into the MaxScale core. These modules reside in the directory MAXSCALE_HOME/module, if the environment variable MAXSCALE_HOME is not set it defaults to /usr/local/skysql/MaxScale. It may also be set by passing the -c option on the MaxScale command line. + +h2(#mysqlclient). MySQLClient + +This is the implementation of the MySQL protocol that is used by clients of MaxScale to connect to MaxScale. + +h2(#mysqlbackend). MySQLBackend + +The MySQLBackend protocol module is the implementation of the protocol that MaxScale uses to connect to the backend MySQL, MariaDB and Percona Server databases. This implementation is tailored for the MaxScale to MySQL Database traffic and is not a general purpose implementation of the MySQL protocol. + +h2(#telnetd). Telnetd + +The telnetd protocol module is used for connections to MaxScale itself for the purposes of creating interactive user sessions with the MaxScale instance itself. Currently this is used in conjunction with a special router implementation, the debugcli. + +h2(#maxscaled). maxscaled + +The protocol used used by the maxadmin client application in order to connect to MaxScale and access the command line interface. + +h2(#httpd). HTTPD + +This protocol module is currently still under development, it provides a means to create HTTP connections to MaxScale for use by web browsers or RESTful API clients. + +h1(#router-modules). Router Modules + +The main task of MaxScale is to accept database connections from client applications and route the connections or the statements sent over those connections to the various services supported by MaxScale. + +There are two flavours of routing that MaxScale can perform, connection based routing and statement based routine. These each have their own characteristics and costs associated with them. + +h2(#connection-based-routing). Connection Based Routing + +Connection based routing is a mechanism by which MaxScale will, for each incoming connection decide on an appropriate outbound server and will forward all statements to that server without examining the internals of the statement. Once an inbound connection is associated to a particular backend database it will remain connected to that server until the connection is closed or the server fails. + +h2(#statement-based-routing). Statement Based Routing + +Statement based routing is somewhat different, the routing modules examine every statement the client sends and determines, on a per statement basis, which of the set of backend servers in the service is best to execute the statement. This gives better dynamic balancing of the load within the cluster but comes at a cost. The query router must understand the statement that is being routing and will typically need to parse the statement in order to achieve this. This parsing within the router adds a significant overhead to the cost of routing and makes this type of router only really suitable for loads in which the gains outweigh this added cost. + +h2(#available-routing-modules). Available Routing Modules + +Currently a small number of query routers are available, these are in different stages of completion and offer different facilities. + +h3(#readconnroute). Readconnroute + +This is a statement based query router that was originally targeted at environments in which the clients already performed splitting of read and write queries into separate connections. + +Whenever a new connection is received the router will examine the state of all the servers that form part of the service and route the connection to the server with least connections currently that matches the filter constraints given in the router options. This results in a balancing of the active connections, however different connections may have different lifetimes and the connections may become unbalanced when later viewed. + +The readconnroute router can be configured to balance the connections from the clients across all the backend servers that are running, just those backend servers that are currently replication slaves or those that are replication masters when routing to a master slave replication environment. When a Galera cluster environment is in use the servers can be filtered to just the set that are part of the cluster and in the ‘synced' state. These options are configurable via the router_options that can be set within a service. The router_option strings supported are "master", “slave” and “synced”. + +h4(#masterslave-replication-setup). Master/Slave Replication Setup + +To setup MaxScale to route connections evenly between all the current slave servers in a replication cluster, a service entry of the form shown below is required. + +@[Read Service]@ + +@type=service@ + +@router=readconnroute@ + +@router_options=slave@ + +@servers=server1,server2,server3,server4@ + +@user=maxscale@ + +@auth=thepasswd@ + +With the addition of a listener for this service, which defines the port and protocol that MaxScale uses + +@[Read Listener]@ + +@type=listener@ + +@service=Read Service@ + +@protocol=MySQLClient@ + +@port=4006@ + +the client can now connect to port 4006 on the host which is running MaxScale. Statements sent using this connection will then be routed to one of the slaves in the server set defined in the Read Service. Exactly which is selected will be determined by balancing the number of connections to each of those whose current state is "slave". + +Altering the router options to be @slave, master@ would result in the connections being balanced between all the servers within the cluster. + +It is assumed that the client will have a separate connection to the master server, however this can be routed via MaxScale, allowing MaxScale to manage the determination of which server is master. To do this you would add a second service and listener definition for the master server. + +@[Write Service]@ + +@type=service@ + +@router=readconnroute@ + +@router_options=master@ + +@servers=server1,server2,server3,server4@ + +@user=maxscale@ + +@auth=thepasswd@ + +@[Write Listener]@ + +@type=listener@ + +@service=Write Service@ + +@protocol=MySQLClient@ + +@port=4007@ + +This allows the clients to direct write requests to port 4007 and read requests to port 4006 of the MaxScale host without the clients needing to understand the configuration of the Master/Slave replication cluster. + +Connections to port 4007 would automatically be directed to the server that is the master for replication at the time connection is opened. Whilst this is a simple mapping to a single server it does give the advantage that the clients have no requirement to track which server is currently the master, devolving responsibility for managing the failover to MaxScale. + +In order for MaxScale to be able to determine the state of these servers the mysqlmon monitor module should be run against the set of servers that comprise the service. + +h4(#galera-cluster-configuration). Galera Cluster Configuration + +Although not primarily designed for a multi-master replication setup, it is possible to use the readconnroute in this situation. The readconnroute connection router can be used to balance the connections across a Galera cluster. A special monitor is available that detects if nodes are joined to a Galera Cluster, with the addition of a router option to only route connections to nodes marked as synced. MaxScale can ensure that users are never connected to a node that is not a full cluster member. + +@[Galera Service]@ + +@type=service@ + +@router=readconnroute@ + +@router_options=synced@ + +@servers=server1,server2,server3,server4@ + +@user=maxscale@ + +@auth=thepasswd@ + +@[Galera Listener]@ + +@type=listener@ + +@service=Galera Service@ + +@protocol=MySQLClient@ + +@port=3336@ + +@[Galera Monitor]@ + +@type=monitor@ + +@module=galeramon@ + +@servers=server1,server2,server3,server4@ + +@user=galeramon@ + +@passwd=galeramon@ + +The specialized Galera monitor can also select one of the node in the cluster as master, the others will be marked as slave. + +These roles are only assigned to synced nodes. + +It then possible to have services/listeners with router_options=master or slave accessing a subset of all galera nodes. + +The "synced" simply means: access all nodes. + +Examples: + +@[Galera Master Service]@ + +@type=service@ + +@router=readconnroute@ + +@router_options=master@ + +@[Galera Slave Service]@ + +@type=service@ + +@router=readconnroute@ + +@router_options=slave@ + +The Master and Slave roles are also available for the Read/Write Split router operation + +h3(#readwritesplit). Readwritesplit + +The readwritesplit is a statement based router that has been designed for use within Master/Slave replication environments. It examines every statement, parsing it to determine if the statement falls into one of three categories; + +* read only statement +* possible write statement +* session modification statement (NOTE: A session modification statement is any statement that is executed that may affect the behaviour of subsequent statements within the current connection. Examples of such statements are the USE SQL statement or a SET statement using the SESSION scope. PREPARE STMT clauses are session statements in MaxScale since they are executed in every backend server.) + +Each of these three categories has a different action associated with it. Read only statements are sent to a slave server in the replication cluster. Possible write statements, which may include read statements that have an undeterminable side effect, are sent to the current replication master. Statements that modify the session are sent to all the servers, with the result that is generated by the master server being returned to the user. + +Session modification statements must be replicated as they affect the future results of read and write operations, so they must be executed on all servers that could execute statements on behalf of this client. + +Currently the readwritesplit router module is under development and has the following limitations: + +* Connection failover support has not yet been implemented. Client connections will fail if the master server fails over. + +h4(#masterslave-replication-setup-1). Master/Slave Replication Setup + +To setup the readwritesplit connection router in a master/slave failover environment is extremely simple, a service definition is required with the router defined for the service and an associated listener. + +The router_options parameter is not required but it can be used to specify how slave(s) are selected. Available option is @slave_selection_criteria@ and possible value are @LEAST_BEHIND_MASTER@ and @LEAST_CURRENT_OPERATIONS@. + +@max_slave_connections@is a readwritesplit-only option, which sets the upper limit for the number of slaves a router session can use. @max_slave_replication_lag@ is (currently) another readwritesplit-specific option, which sets maximum allowed lag for slave in seconds. The criteria is checked when router chooses slaves and only slaves having smaller lag are eligible for selection. The lag is not checked after connection phase. + +@[Split Service]@ + +@type=service@ + +@router=readwritesplit@ + +@router_options=slave_selection_criteria=LEAST_BEHIND_MASTER@ + +@max_slave_connections=50%@ + +@max_slave_replication_lag=30@ + +@servers=server1,server2,server3,server4@ + +@user=maxscale@ + +@auth=thepasswd@ + +@[Split Listener]@ + +@type=listener@ + +@service=Split Service@ + +@protocol=MySQLClient@ + +@port=3336@ + +The client would merely connect to port 3336 on the MaxScale host and statements would be directed to the master or slave as appropriate. Determination of the master or slave status may be done via a monitor module within MaxScale or externally. In this latter case the server flags would need to be set via the MaxScale debug interface, in future versions an API will be available for this purpose. + +Galera Cluster Configuration + +Master and Slave roles that galera monitor assign to nodes make possible the Read Write split approach to Galera Cluster as well. + +Simply configure a Split Service with galera nodes: + +@[Galera Split Service]@ + +@type=service@ + +@router=readwritesplit@ + +@servers=galera_node1,galera_node2,galera_node3@ + +h3(#debugcli). Debugcli + +The debugcli is a special case of a statement based router. Rather than direct the statements at an external data source they are handled internally. These statements are simple text commands and the results are the output of debug commands within MaxScale. The service and listener definitions for a debug cli service only differ from other services in that they require no backend server definitions. + +h4(#debug-cli-configuration). Debug CLI Configuration + +The definition of the debug cli service is illustrated below + +@[Debug Service]@ + +@type=service@ + +@router=debugcli@ + +@[Debug Listener]@ + +@type=listener@ + +@service=Debug Service@ + +@protocol=telnetd@ + +@port=4442@ + +Connections using the telnet protocol to port 4442 of the MaxScale host will result in a new debug CLI session. A default username and password are used for this module, new users may be created using the @add user@ command. As soon as any users are explicitly created the default username will no longer continue to work. The default username is admin with a password of skysql. + +The debugcli supports two modes of operation, developer mode and user mode. The mode is set via the router_options parameter of the debugcli. The user mode is more suited to end-users and administrators, whilst the develop mode is explicitly targeted to software developing adding or maintaining the MaxScale code base. Details of the differences between the modes can be found in the debugging guide for MaxScale. The default mode for the debugcli is user mode. The following service definition would enable a developer version of the debugcli. + +@[Debug Service]@ + +@type=service@ + +@router=debugcli@ + +@router_options=developer@ + +It should be noted that both a user and a developer version of the debugcli may be defined within the same instance of MaxScale, however they must be defined as two distinct services, each with a distinct listener. + +@[Debug Service]@ + +@type=service@ + +@router=debugcli@ + +@router_options=developer@ + +@[Debug Listener]@ + +@type=listener@ + +@service=Debug Service@ + +@protocol=telnetd@ + +@port=4442@ + +@[Admin Service]@ + +@type=service@ + +@router=debugcli@ + +@[Admin Listener]@ + +@type=listener@ + +@service=Debug Service@ + +@protocol=telnetd@ + +@port=4242@ + +h3(#cli). CLI + +The command line interface as used by maxadmin. This is a variant of the debugcli that is built slightly differently so that it may be accessed by the client application maxadmin. The CLI requires the use of the maxscaled protocol. + +h4(#cli-configuration). CLI Configuration + +There are two components to the definition required in order to run the command line interface to use with MaxAdmin; a service and a listener. + +The default entries required are shown below. + +@[CLI]@ + +@type=service@ + +@router=cli@ + +@[CLI Listener]@ + +@type=listener@ + +@service=CLI@ + +@protocol=maxscaled@ + +@address=localhost@ + +@port=6603@ + +Note that this uses the default port of 6603 and confines the connections to localhost connections only. Remove the @address=@entry to allow connections from any machine on your network. Changing the port from 6603 will mean that you must allows pass a -p option to the MaxAdmin command. + +h1(#monitor-modules). Monitor Modules + +Monitor modules are used by MaxScale to internally monitor the state of the backend databases in order to set the server flags for each of those servers. The router modules then use these flags to determine if the particular server is a suitable destination for routing connections for particular query classifications. The monitors are run within separate threads of MaxScale and do not affect the MaxScale performance. + +The use of monitors is optional, it is possible to run MaxScale with external monitoring, in which case arrangements must be made for an external entity to set the status of each of the servers that MaxScale can route to. + +h2(#mysqlmon). Mysqlmon + +The MySQLMon monitor is a simple monitor designed for use with MySQL Master/Slave replication cluster. To execute the mysqlmon monitor an entry as shown below should be added to the MaxScale configuration file. + +@[MySQL Monitor]@ + +@type=monitor@ + +@module=mysqlmon@ + +@servers=server1,server2,server3,server4@ + +This will monitor the 4 servers; server1, server2, server3 and server4. It will set the status of running or failed and master or slave for each of the servers. + +The monitor uses the username given in the monitor section or the server specific user that is given in the server section to connect to the server. This user must have sufficient permissions on the database to determine the state of replication. The roles that must be granted to this user are REPLICATION SLAVE and REPLICATION CLIENT. + +To create a user that can be used to monitor the state of the cluster, the following commands could be used. + +MariaDB [mysql]> create user 'maxscalemon'@'maxscalehost' identified by 'Ha79hjds'; + +@Query OK, 0 rows affected (0.01 sec)@ + +MariaDB [mysql]> grant REPLICATION SLAVE on *.* to 'maxscalemon'@'maxscalehost'; + +@Query OK, 0 rows affected (0.00 sec)@ + +MariaDB [mysql]> grant REPLICATION CLIENT on *.* to 'maxscalemon'@'maxscalehost'; + +@Query OK, 0 rows affected (0.00 sec)@ + +@MariaDB [mysql]>@ + +Assuming that MaxScale is running on the host maxscalehost. + +h2(#galeramon). Galeramon + +The Galeramon monitor is a simple router designed for use with MySQL Galera cluster. To execute the galeramon monitor an entry as shown below should be added to the MaxScale configuration file. + +@[Galera Monitor]@ + +@type=monitor@ + +@module=galeramon@ + +@servers=server1,server2,server3,server4@ + +This will monitor the 4 servers; server1, server2, server3 and server4. It will set the status of running or failed and joined for those servers that reported the Galera JOINED status. + +The user that is configured for use with the Galera monitor must have sufficient privileges to select from the information_schema database and GLOBAL_STATUS table within that database. + +To create a user that can be used to monitor the state of the cluster, the following commands could be used. + +MariaDB [mysql]> create user 'maxscalemon'@'maxscalehost' identified by 'Ha79hjds'; + +@Query OK, 0 rows affected (0.01 sec)@ + +MariaDB [mysql]> grant SELECT on INFORMATION_SCHEMA.GLOBAL_STATUS to 'maxscalemon'@'maxscalehost'; + +@Query OK, 0 rows affected (0.00 sec)@ + +@MariaDB [mysql]>@ + +Assuming that MaxScale is running on the host maxscalehost. + +The Galera monitor can also assign Master and Slave roles to the configured nodes: + +among the set of synced servers, the one with the lowest value of ‘wsrep_local_index' is selected as the current master while the others are slaves. + +This way is possible to configure the node access based not only on ‘synced' state but even on Master and Slave role enabling the use of Read Write split operation on a Galera cluster and avoiding any possible write conflict. + +Example status for a Galera server node is: + +@Server 0x261fe50 (server2)@ + +@Server: 192.168.1.101@ + +@Status: Master, Synced, Running@ + +h1(#filter-modules). Filter Modules + +Currently four example filters are included in the MaxScale distribution + + + + + + + + + + + + + + + + + + + + + + +
+Module + +Description +
+testfilter + +Statement counting Filter - a simple filter that counts the number of SQL statements executed within a session. Results may be viewed via the debug interface. +
+qlafilter + +Query Logging Filter - a simple query logging filter that write all statements for a session into a log file for that session. +
+regexfilter + +Query Rewrite Filter - an example of how filters can alter the query contents. This filter allows a regular expression to be defined, along with replacement text that should be substituted for every match of that regular expression. +
+tee + +A filter that duplicates SQL requests and sends the duplicates to another service within MaxScale. +
+ + +These filters are merely examples of what may be achieved with the filter API and are not sophisticated or consider as suitable for production use, they merely illustrate the functionality possible. + +h2(#statement-counting-filter). Statement Counting Filter + +The statement counting filter is implemented in the module names @testfilter@ and merely keeps a count of the number of SQL statements executed. The filter requires no options to be passed and takes no parameters. The statement count can be viewed via the diagnostic and debug interface of MaxScale. + +In order to add this filter to an existing service create a filter section to name the filter as follows + +@[counter]@ + +@type=filter@ + +@module=testfilter@ + +Then add the filter to your service by including the filters= parameter in the service section. + +@filters=counter@ + +h2(#query-log-all-filter). Query Log All Filter + +The QLA filter simply writes all SQL statements to a log file along with a timestamp for the statement. An example of the file produced by the QLA filter is shown below + +00:36:04.922 5/06/2014, select @@version_comment limit 1 + +@00:36:12.663 5/06/2014, SELECT DATABASE()@ + +@00:36:12.664 5/06/2014, show databases@ + +@00:36:12.665 5/06/2014, show tables@ + +A new file is created for each client connection, the name of the logfile can be controlled by the use of the router options. No parameters are used by the QLA filter. The filter is implemented by the loadable module @qlafilter@. + +To add the QLA filter to a service you must create a filter section to name the filter, associated the loadable module and define the filename option. + +@[QLA]@ + +@type=filter@ + +@module=qlafilter@ + +@options=/tmp/QueryLog@ + +Then add the filters= parameter into the service that you wish to log by adding this parameter to the service section + +@filters=QLA@ + +A log file will be created for each client connection, the name of that log file will be /tmp/QueryLog. + +h2(#regular-expression-filter). Regular Expression Filter + +The regular expression filter is a simple text based query rewriting filter. It allows a regular expression to be used to match text in a SQL query and then a string replacement to be made against that match. The filter is implemented by the @regexfilter@ loadable module and is passed two parameters, a match string and a replacement string. + +To add the filter to your service you must first create a filter section to name the filter and give the match and replacement strings. Here we define a filter that will convert to MariaDB 10 command show all slaves status to the older form of show slave status for MariaDB 5.5. + +@[slavestatus]@ + +@type=filter@ + +@module=regexfilter@ + +@match=show *all *slaves@ + +@replace=show slave@ + +You must then add this filter to your service by adding the filters= option + +@filters=slavestatus@ + +Another example would be a filter to convert from the MySQL 5.1 @create table@syntax that used the @TYPE@ keyword to the newer @ENGINE@ keyword. + +@[EnginerFilter]@ + +@type=filter@ + +@module=regexfilter@ + +@match=TYPE@ + +@replace=ENGINE@ + +This would then change the SQL sent by a client application written to work with MySQL 5.1 into SQL that was compliant with MySQL 5.5. The statement + +@create table supplier(id integer, name varchar(80)) type=innodb@ + +would be replaced with + +@create table supplier(id integer, name varchar(80)) ENGINE=innodb@ + +before being sent to the server. Note that the text in the match string is case independent. + +h2(#tee-filter). Tee Filter + +The tee filter is a filter module for MaxScale is a "plumbing" fitting in the MaxScale filter toolkit. It can be used in a filter pipeline of a service to make a copy of requests from the client and dispatch a copy of the request to another service within MaxScale. + +The configuration block for the TEE filter requires the minimal filter parameters in it's section within the MaxScale.cnf file that defines the filter to load and the service to send the duplicates to. + +@[ArchieveFilter]@ + +@type=filter@ + +@module=tee@ + +@service=Archieve@ + +In addition parameters may be added to define patterns to match against to either include or exclude particular SQL statements to be duplicated. You may also define that the filter is only active for connections from a particular source or when a particular user is connected. + +h1(#encrypting-passwords). Encrypting Passwords + +Passwords stored in the MaxScale.cnf file may optionally be encrypted for added security. This is done by creation of an encryption key on installation of MaxScale. Encryption keys may be created manually by executing the @maxkeys@ utility with the argument of the filename to store the key. + +@maxkeys $MAXSCALE_HOME/etc/.secrets@ + +Changing the encryption key for MaxScale will invalidate any currently encrypted keys stored in the MaxScale.cnf file. + +h2(#creating-encrypted-passwords). Creating Encrypted Passwords + +Encrypted passwords are created by executing the @maxpasswd@ command with the password you require to encrypt as an argument. The environment variable MAXSCALE_HOME must be set, or MaxScale must be installed in the default location before @maxpasswd@ can be executed. + +@maxpasswd MaxScalePw001@ + +@61DD955512C39A4A8BC4BB1E5F116705@ + +The output of the @maxpasswd@ command is a hexadecimal string, this should be inserted into the MaxScale.cnf file in place of the ordinary, plain text, password. MaxScale will determine this as an encrypted password and automatically decrypt it before sending it the database server. + +@[Split Service]@ + +@type=service@ + +@router=readwritesplit@ + +@servers=server1,server2,server3,server4@ + +@user=maxscale@ + +@password=61DD955512C39A4A8BC4BB1E5F116705@ + +h1(#configuration-updates). Configuration Updates + +The current MaxScale configuration may be updating by editing the configuration file and then forcing MaxScale to reread the configuration file. To force MaxScale to reread the configuration file a SIGTERM signal is sent to the MaxScale process. + +Some changes in configuration can not be dynamically changed and require a complete restart of MaxScale, whilst others will take some time to be applied. + +h2(#limitations). Limitations + +Services that are removed via the configuration update mechanism can not be physically removed from MaxScale until there are no longer any connections using the service. + +When the number of threads is decreased the threads will not actually be terminated until such time as they complete the current operation of that thread. + +Monitors can not be completely removed from the running MaxScale. + +h1(#authentication). Authentication + +MySQL uses username, passwords and the client host in order to authenticate a user, so a typical user would be defined as user X at host Y and would be given a password to connect. MaxScale uses exactly the same rules as MySQL when users connect to the MaxScale instance, i.e. it will check the address from which the client is connecting and treat this in exactly the same way that MySQL would. MaxScale will pull the authentication data from one of the backend servers and use this to match the incoming connections, the assumption being that all the backend servers for a particular service will share the same set of user credentials. + +It is important to understand, however, that when MaxScale itself makes connections to the backend servers the backend server will see all connections as originating from the host that runs MaxScale and not the original host from which the client connected to MaxScale. Therefore the backend servers should be configured to allow connections from the MaxScale host for every user that can connect from any host. Since there is only a single password within the database server for a given host, this limits the configuration such that a given user name must have the same password for every host from which they can connect. + +To clarify, if a user X is defined as using password _pass1_ from host a and _pass2_ from host b then there must be an entry in the user table for user X form the MaxScale host, say _pass1_. + +This would result in rows in the user table as follows + + + + + + + + + + + + + + + + + + + + + + +
+Username + +Password + +Client Host +
+X + +pass1 + +a +
+X + +pass2 + +b +
+X + +pass1 + +MaxScale +
+ + +In this case the user X would be able to connect to MaxScale from host a giving the password of _pass1_. In addition MaxScale would be able to create connections for this user to the backend servers using the username X and password _pass1_, since the MaxScale host is also defined to have password _pass1_. User X would not however be able to connect from host b since they would need to provide the password _pass2_ in order to connect to MaxScale, but then MaxScale would not be able to connect to the backends as it would also use the password _pass2_ for these connections. + +h2(#wildcard-hosts). Wildcard Hosts + +Hostname mapping in MaxScale works in exactly the same way as for MySQL, if the wildcard is used for the host then any host other than the localhost (127.0.0.1) will match. It is important to consider that the localhost check will be performed at the MaxScale level and at the MySQL server level. + +If MaxScale and the databases are on separate hosts there are two important changes in behaviour to consider: + +# Clients running on the same machine as the backend database now may access the database using the wildcard entry. The localhost check between the client and MaxScale will allow the use of the wildcard, since the client is not running on the MaxScale host. Also the wildcard entry can be used on the database host as MaxScale is making that connection and it is not running on the same host as the database. +# Clients running on the same host as MaxScale can not access the database via MaxScale using the wildcard entry since the connection to MaxScale will be from the localhost. These clients are able to access the database directly, as they will use the wildcard entry. + +If MaxScale is running on the same host as one or more of the database nodes to which it is acting as a proxy then the wildcard host entries can be used to connect to MaxScale but not to connect onwards to the database running on the same node. + +In all these cases the issue may be solved by adding an explicit entry for the localhost address that has the same password as the wildcard entry. This may be done using a statement as below for each of the databases that are required: + +MariaDB [mysql]> GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP ON employee.* 'user1'@'localhost' IDENTIFIED BY ‘xxx’; + +@Query OK, 0 rows affected (0.00 sec)@ + +h2(#limitations-1). Limitations + +At the time of writing the authentication mechanism within MaxScale does not support IPV6 address matching in connections rules. This is also in line with the current protocol modules that do not support IPV6. + +Partial address matching, such as 10.% is also not supported in the current version of MaxScale. + +h1(#error-reporting). Error Reporting + +MaxScale is designed to be executed as a service, therefore all error reports, including configuration errors, are written to the MaxScale error log file. MaxScale will log to a set of files in the directory $MAXSCALE_HOME/log, the only exception to this is if the log directory is not writable, in which case a message is sent to the standard error descriptor. + +Troubleshooting + +MaxScale binds on TCP ports and UNIX sockets as well. + +If there is a local firewall in the server where MaxScale is installed, the IP and port must be configured in order to receive connections from outside. + +If the firewall is a network facility among all the involved servers, a configuration update is required as well. + +Example: + +@[Galera Listener]@ + +@type=listener@ + +bc. `address=192.1681.3.33` + + +@port=4408@ + +@socket=/servers/maxscale/galera.sock@ + +` ` + +TCP/IP Traffic must be permitted to @192.1681.3.33 port 4408@ + +For Unix socket, the socket file path (example: @/servers/maxscale/galera.sock)@ must be writable by the Unix user MaxScale runs as. + diff --git a/Documentation/experimental/ConfigurationGuide.xml b/Documentation/experimental/ConfigurationGuide.xml new file mode 100644 index 000000000..7445b586d --- /dev/null +++ b/Documentation/experimental/ConfigurationGuide.xml @@ -0,0 +1,1252 @@ + + +
+ + + + w: www.mariadb.come: info@mariadb.com + + + + + + + + MaxScale + + Configuration & Usage Scenarios + + + Mark Riddoch + + Last Updated: 2nd July 2014 + + + <anchor id="h.own38oq23q1h"/>Contents + + + Contents + + + Document + + History + + + Introduction + + + Terms + + + Configuration + + + Global + + Settings + + + Threads + + + Service + + + Router + + + Filters + + + Servers + + + User + + + Passwd + + + weightby + + + Server + + + Address + + + Port + + + Protocol + + + Monitoruser + + + MonitorPw + + + Listener + + + Service + + + Protocol + + + Address + + + Port + + + Filter + + + Module + + + Options + + + Other + + Parameters + + + Monitor + + + Module + + + Servers + + + User + + + Passwd + + + Protocol + + Modules + + + MySQLClient + + + MySQLBackend + + + Telnetd + + + maxscaled + + + HTTPD + + + Router + + Modules + + + Connection + + Based + + Routing + + + Statement + + Based + + Routing + + + Available + + Routing + + Modules + + + Readconnroute + + + Master + / + Slave + + Replication + + Setup + + + Galera + + Cluster + + Configuration + + + Readwritesplit + + + Master + / + Slave + + Replication + + Setup + + + Debugcli + + + Debug + + CLI + + Configuration + + + CLI + + + CLI + + Configuration + + + Monitor + + Modules + + + Mysqlmon + + + Galeramon + + + Filter + + Modules + + + Statement + + Counting + + Filter + + + Query + + Log + + All + + Filter + + + Regular + + Expression + + Filter + + + Tee + + Filter + + + Encrypting + + Passwords + + + Creating + + Encrypted + + Passwords + + + Configuration + + Updates + + + Limitations + + + Authentication + + + Wildcard + + Hosts + + + Limitations + + + Error + + Reporting + + + + + + + <anchor id="h.q8c6oow7f5cv"/> + + + + <anchor id="h.fpm61wmsats7"/><anchor id="h.88m40r5skidw"/>Document History + + + DateChangeWho21st July 2013Initial versionMark Riddoch23rd July 2013Addition of default user and password for a monitor and discussion of monitor user requirementsNew monitor documented for Galera clustersAddition of example Galera cluster configurationMark Riddoch13th November 2013state for Galera Monitor is “synced”Massimiliano Pinto2nd December 2013Updated the description of the command line arguments to match the code updates.Improved descriptions and general documentation.Enhanced example configurationsMark Riddoch6th February 2014Added “enable_root_user” as a service parameterMassimiliano Pinto7th February 2014Addition of bind address informationClarification of user configuration required for monitoring users and the user needed to fetch the user dataMark Riddoch3rd March 2014MySQL authentication with hostnamesMassimiliano Pinto3rd March 2014Addition of section that describes authentication requirements and the rules for creating user credentialsMark Riddoch28th March 2014Unix socket supportMassimiliano Pinto8th   May   2014Added “version_string” parameter in serviceMassimiliano Pinto29th May 2014Added troubleshooting sectionMassimiliano Pinto2nd June 2014Correction of some typos, clarification of the meaning of session modification statements and the default user for the CLI.Addition of debugcli configuration option for developer and user modes.Mark Riddoch4th June 2014Addition of “monitor_interval” for monitorsMassimiliano Pinto6th June 2014Addition of filters sectionsMark Riddoch27th June 2014Addition of server weighting, the configuration for the maxadmin clientMark Riddoch2nd July 2014Addition of new readwritesplit router options with description and examples.Vilho Raatikka + + + + + <anchor id="h.g9ymbzaalazf"/><anchor id="h.y6q5rjfrde71"/>Introduction + + The purpose of this document is to describe how to configure MaxScale and to discuss some possible usage scenarios for MaxScale. MaxScale is designed with flexibility in mind, and consists of an event processing core with various support functions and plugin modules that tailor the behaviour of the MaxScale itself. + + + + <anchor id="h.k498ywyi4d7y"/>Terms + + + TermDescriptionserviceA service represents a set of databases with a specific access mechanism that is offered to clients of MaxScale. The access mechanism defines the algorithm that MaxScale will use to direct particular requests to the individual databases.serverA server represents an individual database server to which a client can be connected via MaxScale.routerA router is a module within MaxScale that will route client requests to the various database servers which MaxScale provides a service interface to.connection routingConnection routing is a method of handling requests in which MaxScale will accept connections from a client and route data on that connection to a single database using a single connection. Connection based routing will not examine individual quests on a connection and it will not move that connection once it is established.statement routingStatement routing is a method of handling requests in which each request within a connection will be handled individually. Requests may be sent to one or more servers and connections may be dynamically added or removed from the session.protocolA protocol is a module of software that is used to communicate with another software entity within the system. MaxScale supports the dynamic loading of protocol modules to allow for increased flexibility.moduleA module is a separate code entity that may be loaded dynamically into MaxScale to increase the available functionality. Modules are implemented as run-time loadable shared objects.monitorA monitor is a module that can be executed within MaxScale to monitor the state of a set of database. The use of an internal monitor is optional, monitoring may be performed externally to MaxScale.listenerA listener is the network endpoint that is used to listen for connections to MaxScale from the client applications. A listener is associated to a single service, however a service may have many listeners.connection failoverWhen a connection currently being used between MaxScale and the database server fails a replacement will be automatically created to another server by MaxScale without client interventionbackend databaseA term used to refer to a database that sits behind MaxScale and is accessed by applications via MaxScale.filterA module that can be placed between the client and the MaxScale router module. All client data passes through the filter module and may be examined or modified by the filter modules.Filters may be chained together to form processing pipelines. + + + + + + + <anchor id="h.vk0ub0w64yy6"/> + + + + <anchor id="h.4o6tqejp081r"/><anchor id="h.ubko889dk8db"/>Configuration + + The MaxScale configuration is read from a file which can be located in a number of placing, MaxScale will search for the configuration file in a number of locations. + + + + If the environment variable MAXSCALE_HOME is set then MaxScale will look for a configuration file called MaxScale.cnf in the directory $MAXSCALE_HOME/etc + + + If MAXSCALE_HOME is not set or the configuration file is not in the location above MaxScale will look for a file in /etc/MaxScale.cnf + + + + Alternatively MaxScale can be started with the -c flag and the path of the MaxScale home directory tree. + + An explicit path to a configuration file can be passed by using the -f option to MaxScale. + + The configuration file itself is based on the “ini” file format and consists of various sections that are used to build the configuration, these sections define services, servers, listeners, monitors and global settings. + + + + <anchor id="h.w748vjpky28w"/>Global Settings + The global settings,  in a section named [MaxScale], allow various parameters that affect MaxScale as a whole to be tuned. Currently the only setting that is supported is the number of threads to use to handle the network traffic. MaxScale will also accept the section name of [gateway] for global settings. This is for backward compatibility with versions prior to the naming of MaxScale. + + + <anchor id="h.cc6sgznkcb5p"/>Threads + To control the number of threads that poll for network traffic set the parameter threads to a number. It is recommended that you start with a single thread and add more as you find the performance is not satisfactory. MaxScale is implemented to be very thread efficient, so a small number of threads is usually adequate to support reasonably heavy workloads.  Adding more threads may not improve performance and can consume resources needlessly. + + # Valid options are: + #       threads=<number of epoll threads> + [MaxScale] + threads=1 + + It should be noted that additional threads will be created to execute other internal services within MaxScale, this setting is merely used to configure the number of threads that will be used to manage the user connections. + + + <anchor id="h.7t3508ifyp7e"/>Service + A service represents the database service that MaxScale offers to the clients. In general a service consists of a set of backend database servers and a routing algorithm that determines how MaxScale decides to send statements or route connections to those backend servers. + + A service may be considered as a virtual database server that MaxScale makes available to its clients. + + Several different services may be defined using the same set of backend servers. For example a connection based routing service might be used by clients that already performed internal read/write splitting, whilst a different statement based router may be used by clients that are not written with this functionality in place. Both sets of applications could access the same data in the same databases. + + A service is identified by a service name, which is the name of the configuration file section and a type parameter of service + + [Test Service] + type=service + + In order for MaxScale to forward any requests it must have at least one service defined within the configuration file. The definition of a service alone is not enough to allow MaxScale to forward requests however, the service is merely present to link together the other configuration elements. + + + <anchor id="h.jmc3h9lmhkvo"/>Router + The router parameter of a service defines the name of the router module that will be used to implement the routing algorithm between the client of MaxScale and the backend databases. Additionally routers may also be passed a comma separated list of options that are used to control the behaviour of the routing algorithm. The two parameters that control the routing choice are router and router_options. The router options are specific to a particular router and are used to modify the behaviour of the router. The read connection router can be passed options of master, slave or synced, an example of configuring a service to use this router and limiting the choice of servers to those in slave state would be as follows. + + router=readconnroute + router_options=slave + + To change the router to connect on to servers in the  master state as well as slave servers, the router options can be modified to include the master state. + + router=readconnroute + router_options=master,slave + + A more complete description of router options and what is available for a given router is included with the documentation of the router itself. + + + <anchor id="h.yy2uxdpf8a2h"/>Filters + The filters option allow a set of filters to be defined for a service; requests from the client are passed through these filters before being sent to the router for dispatch to the backend server.  The filters parameter takes one or more filter names, as defined within the filter definition section of the configuration file. Multiple filters are separated using the | character. + + filters=counter | QLA + + The requests pass through the filters from left to right in the order defined in the configuration parameter. + + + <anchor id="h.5ls02rb3i8o6"/>Servers + The servers parameter in a service definition provides a comma separated list of the backend servers that comprise the service. The server names are those used in the name section of a block with a type parameter of server (see below). + + servers=server1,server2,server3 + + + <anchor id="h.fo8xmknd8dhy"/>User + The user parameter, along with the passwd parameter are used to define the credentials used to connect to the backend servers to extract the list of database users from the backend database that is used for the client authentication. + + user=maxscale + passwd=Mhu87p2D + + Authentication of incoming connections is performed by MaxScale itself rather than by the database server to which the client is connected. The client will authenticate itself with MaxScale, using the username, hostname and password information that MaxScale has extracted from the backend database servers. For a detailed discussion of how this impacts the authentication process please see the “Authentication” section below. + + The host matching criteria is restricted to IPv4, IPv6 will be added in a future release. + + Existing user configuration in the backend databases must be checked and may be updated before successful MaxScale authentication: + + + In order for MaxScale to obtain all the data it must be given a username it can use to connect to the database and retrieve that data. This is the parameter that gives MaxScale the username to use for this purpose. + + The account used must be able to select from the mysql.user table, the following is an example showing how to create this user. + + MariaDB [mysql]> create user 'maxscale'@'maxscalehost' identified by 'Mhu87p2D'; + Query OK, 0 rows affected (0.01 sec) + + MariaDB [mysql]> grant SELECT on mysql.user to 'maxscale'@'maxscalehost'; + Query OK, 0 rows affected (0.00 sec) + + + <anchor id="h.vrlqacwiw4jg"/>Passwd + The auth parameter provides the password information for the above user and may be either a plain text password or it may be an encrypted password.  See the section on encrypting passwords for use in the MaxScale.cnf file. This user must be capable of connecting to the backend database and executing the SQL statement “SELECT user, host, password FROM mysql.user”. + + enable_root_user + This parameter controls the ability of the root user to connect to MaxScale and hence onwards to the backend servers via MaxScale. + + The default value is 0, disabling the ability of the root user to connect to MaxScale. + + Example for enabling root user: + enable_root_user=1 + + Values of “on” or “true” may also be given to enable the root user and “off” or “false” may be given to disable the use of the root user. + enable_root_user=true + + version_string + This parameter sets a custom version string that is sent in the MySQL Handshake from MaxScale to clients. + + Example: + version_string=5.5.37-MariaDB-RWsplit + + If not set, the default value is the server version of the embedded MySQL/MariaDB library. Example: 5.5.35-MariaDB + + + <anchor id="h.xj2swtiaru1h"/>weightby + The weightby parameter is used in conjunction with server parameters in order to control the load balancing applied in the router in use by the service. This allows varying weights to be applied to each server to create a non-uniform distribution of the load amongst the servers. + + An example of this might be to define a parameter for each server that represents the amount of resource available on the server, we could call this serversize. Every server should then have a serversize parameter set for the server. + + serversize=10 + + The service would then have the parameter weightby set. If there are 4 servers defined in the service, serverA, serverB, serverC and serverD, with the serversize set as shown in the table below, the connections would balanced using the percentages in this table. + + + Serverserversize% connectionsserverA1018%serverB1527%serverC1018%serverD2036% + + + + + <anchor id="h.kd35wrn1e1ws"/>Server + + Server sections are used to define the backend database servers that can be formed into a service. A server may be a member of one or more services within MaxScale. Servers are identified by a server name which is the section name in the configuration file. Servers have a type parameter of server, plus address port and protocol parameters. + + [server1] + type=server + address=127.0.0.1 + port=3000 + protocol=MySQLBackend + + + <anchor id="h.cxj4lm9g1sy8"/>Address + The IP address or hostname of the machine running the database server that is being defined. MaxScale will use this address to connect to the backend database server. + + + <anchor id="h.ciwlfjhbqadx"/>Port + The port on which the database listens for incoming connections. MaxScale will use this port to connect to the database server. + + + <anchor id="h.5bsc6nq0s7v"/>Protocol + The name for the protocol module to use to connect MaxScale to the database. Currently only one backend protocol is supported, the MySQLBackend module. + + + <anchor id="h.3h2ruf9uyi6h"/>Monitoruser + The monitor has a username and password that is used to connect to all servers for monitoring purposes, this may be overridden by supplying a monitoruser statement for each individual server + + monitoruser=mymonitoruser + + + <anchor id="h.mea194zh1ok7"/>MonitorPw + The monitor has a username and password that is used to connect to all servers for monitoring purposes, this may be overridden by supplying a monpasswd statement for the individual servers + + monitorpw=mymonitorpasswd + + The monpasswd parameter may be either a plain text password or it may be an encrypted password.  See the section on encrypting passwords for use in the MaxScale.cnf file. + + + + <anchor id="h.klgpjo2v8n3j"/>Listener + + The listener defines a port and protocol pair that is used to listen for connections to a service. A service may have multiple listeners associated with it, either to support multiple protocols or multiple ports. As with other elements of the configuration the section name is the listener name and a type parameter is used to identify the section as a listener definition. + + [Test Listener] + type=listener + service=Test Service + protocol=MySQLClient + address=localhost + port=4008 + socket=/tmp/testlistener.sock + + + <anchor id="h.fq271s2tm13u"/>Service + The service to which the listener is associated. This is the name of a service that is defined elsewhere in the configuration file. + + + <anchor id="h.fcq6bkajtpth"/>Protocol + The name of the protocol module that is used for the communication between the client and MaxScale itself. + + + <anchor id="h.7f2ts045r69r"/>Address + The address option sets the address that will be used to bind the listening socket. The address may be specified as an IP address in ‘dot notation’ or as a hostname. If the address option is not included in the listener definition the listener will bind to all network interfaces. + + + <anchor id="h.qpt43h1jgrjt"/>Port + The port to use to listen for incoming connections to MaxScale from the clients. If the port is omitted from the configuration a default port for the protocol will be used. + + Socket + The socket option may be included in a listener definition, this configures the listener to use Unix domain sockets to listen for incoming connections. The parameter value given is the name of the socket to use. + + If a socket option and an address option is given then the listener will listen on both the specific IP address and the Unix socket. + + + + <anchor id="h.d83gvvdu0a6u"/>Filter + Filters provide a means to manipulate or process requests as they pass through MaxScale between the client side protocol and the query router. A filter should be defined in a section with a type of filter. + + [QLA] + type=filter + module=qlafilter + options=/tmp/QueryLog + + The section name may then be used in one or more services by using the filters= parameter in the service section. In order to use the above filter for a service called “QLA Service”, an entry of the following form would exist for that service. + + [QLA Service] + type=service + router=readconnroute + router_options=slave + servers=server1,server2,server3,server4 + user=massi + passwd=6628C50E07CCE1F0392EDEEB9D1203F3 + filters=QLA + + See the Services section for more details on how to configure the various options of a service. + + + <anchor id="h.yzitu3tel4mc"/>Module + The module parameter defines the name of the loadable module that implements the filter. + + + <anchor id="h.z2uocwc6sxb7"/>Options + The options parameter is used to pass options to the filter to control the actions the filter will perform. The values that can be passed differ between filter implementation, the inclusion of an options parameter is optional. + + + <anchor id="h.hf5x5mwdf6p9"/>Other Parameters + Any other parameters present in the filters section will be passed to the filter to be interpreted by the filter. An example of this is the regexfilter that requires the two parameters match and replace + + [regex] + type=filter + module=regexfilter + match=form + replace=from + + + + <anchor id="h.i06or51fzsfy"/>Monitor + + In order for the various router modules to function correctly they require information about the state of the servers that are part of the service they provide. MaxScale has the ability to internally monitor the state of the back-end database servers or that state may be feed into MaxScale from external monitoring systems. If automated monitoring and failover of services is required this is achieved by running a monitor module that is designed for the particular database architecture that is in use. + + Monitors are defined in much the same way as other elements in the configuration file, with the section name being the name of the monitor instance and the type being set to monitor. + + [MySQL Monitor] + type=monitor + module=mysqlmon + servers=server1,server2,server3 + user=dbmonitoruser + passwd=dbmonitorpwd + monitor_interval=8000 + + + + <anchor id="h.gp2tqaepqn7z"/>Module + The module parameter defines the name of the loadable module that implements the monitor. This module is loaded and executed on a separate thread within MaxScale. + + + <anchor id="h.ivnmlnywz8ns"/>Servers + The servers parameter is a comma separated list of server names to monitor, these are the names defined elsewhere in the configuration file. The set of servers monitored by a single monitor need not be the same as the set of servers used within any particular server, a single monitor instance may monitor servers in multiple servers. + + + <anchor id="h.3et4j1l44lwu"/>User + The user parameter defines the username that the monitor will use to connect to the monitored databases. Depending on the monitoring module used this user will require specific privileges in order to determine the state of the nodes, details of those privileges can be found in the sections on each of the monitor modules. + + Individual servers may define override values for the user and password the monitor uses by setting the monuser and monpasswd parameters in the server section. + + + + <anchor id="h.ipwrd09imfrk"/>Passwd + The password parameter may be either a plain text password or it may be an encrypted password. See the section on encrypting passwords for use in the MaxScale.cnf file. + + Monitor_interval + The monitor_interval parameter sets the sampling interval in milliseconds for each monitor, the default value is 10000 milliseconds. + + + + <anchor id="h.69fdt08mh3yp"/> + + + + <anchor id="h.y6n2m34i2k5c"/><anchor id="h.ygst0hfs8omz"/>Protocol Modules + The protocols supported by MaxScale are implemented as external modules that are loaded dynamically into the MaxScale core. These modules reside in the directory $MAXSCALE_HOME/module, if the environment variable $MAXSCALE_HOME is not set it defaults to /usr/local/skysql/MaxScale. It may also be set by passing the -c option on the MaxScale command line. + + + + <anchor id="h.a4918nf156ek"/>MySQLClient + + This is the implementation of the MySQL protocol that is used by clients of MaxScale to connect to MaxScale. + + + <anchor id="h.rldj49cubl6i"/>MySQLBackend + + The MySQLBackend protocol module is the implementation of the protocol that MaxScale uses to connect to the backend MySQL, MariaDB and Percona Server databases. This implementation is tailored for the MaxScale to MySQL Database traffic and is not a general purpose implementation of the MySQL protocol. + + + <anchor id="h.58d5zioxsja5"/>Telnetd + + The telnetd protocol module is used for connections to MaxScale itself for the purposes of creating interactive user sessions with the MaxScale instance itself. Currently this is used in conjunction with a special router implementation, the debugcli. + + + <anchor id="h.jdto0yzavzc9"/>maxscaled + The protocol used used by the maxadmin client application in order to connect to MaxScale and access the command line interface. + + + <anchor id="h.duyws6hxz60c"/>HTTPD + + This protocol module is currently still under development, it provides a means to create HTTP connections to MaxScale for use by web browsers or RESTful API clients. + + + + <anchor id="h.jt9twr1lq0ka"/> + + + + <anchor id="h.gf20q4cljo58"/><anchor id="h.zgwd621heux4"/>Router Modules + The main task of MaxScale is to accept database connections from client applications and route the connections or the statements sent over those connections to the various services supported by MaxScale. + + There are two flavours of routing that MaxScale can perform, connection based routing and statement based routine. These each have their own characteristics and costs associated with them. + + + <anchor id="h.o5rkohp32mbd"/>Connection Based Routing + + Connection based routing is a mechanism by which MaxScale will, for each incoming connection decide on an appropriate outbound server and will forward all statements to that server without examining the internals of the statement. Once an inbound connection is associated to a particular backend database it will remain connected to that server until the connection is closed or the server fails. + + + <anchor id="h.d8vwkqs1j4ew"/>Statement Based Routing + + Statement based routing is somewhat different, the routing modules examine every statement the client sends and determines, on a per statement basis, which of the set of backend servers in the service is best to execute the statement. This gives better dynamic balancing of the load within the cluster but comes at a cost. The query router must understand the statement that is being routing and will typically need to parse the statement in order to achieve this. This parsing within the router adds a significant overhead to the cost of routing and makes this type of router only really suitable for loads in which the gains outweigh this added cost. + + + <anchor id="h.x51co1biupg7"/>Available Routing Modules + + Currently a small number of query routers are available, these are in different stages of completion and offer different facilities. + + + <anchor id="h.j90bj6uoru33"/>Readconnroute + This is a statement based query router that was originally targeted at environments in which the clients already performed splitting of read and write queries into separate connections. + + Whenever a new connection is received the router will examine the state of all the servers that form part of the service and route the connection to the server with least connections currently that matches the filter constraints given in the router options. This results in a balancing of the active connections, however different connections may have different lifetimes and the connections may become unbalanced when later viewed. + + The readconnroute router can be configured to balance the connections from the clients across all the backend servers that are running, just those backend servers that are currently replication slaves or those that are replication masters when routing to a master slave replication environment. When a Galera cluster environment is in use the servers can be filtered to just the set that are part of the cluster and in the ‘synced’ state. These options are configurable via the router_options that can be set within a service. The router_option strings supported are “master”, “slave” and “synced”. + + + + <anchor id="h.wtxa4l6bwnfm"/>Master/Slave Replication Setup + + To setup MaxScale to route connections evenly between all the current slave servers in a replication cluster, a service entry of the form shown below is required. + + [Read Service] + type=service + router=readconnroute + router_options=slave + servers=server1,server2,server3,server4 + user=maxscale + auth=thepasswd + + With the addition of a listener for this service, which defines the port and protocol that MaxScale uses + [Read Listener] + type=listener + service=Read Service + protocol=MySQLClient + port=4006 + + the client can now connect to port 4006 on the host which is running MaxScale. Statements sent using this connection will then be routed to one of the slaves in the server set defined in the Read Service. Exactly which is selected will be determined by balancing the number of connections to each of those whose current state is “slave”. + + Altering the router options to be slave, master would result in the connections being balanced between all the servers within the cluster. + + It is assumed that the client will have a separate connection to the master server, however this can be routed via MaxScale, allowing MaxScale to manage the determination of which server is master. To do this you would add a second service and listener definition for the master server. + + [Write Service] + type=service + router=readconnroute + router_options=master + servers=server1,server2,server3,server4 + user=maxscale + auth=thepasswd + + [Write Listener] + type=listener + service=Write Service + protocol=MySQLClient + port=4007 + + This allows the clients to direct write requests to port 4007 and read requests to port 4006 of the MaxScale host without the clients needing to understand the configuration of the Master/Slave replication cluster. + + Connections to port 4007 would automatically be directed to the server that is the master for replication at the time connection is opened. Whilst this is a simple mapping to a single server it does give the advantage that the clients have no requirement to track which server is currently the master, devolving responsibility for managing the failover to MaxScale. + + In order for MaxScale to be able to determine the state of these servers the mysqlmon monitor module should be run against the set of servers that comprise the service. + + + <anchor id="h.ekf2nk2y5rdv"/>Galera Cluster Configuration + + Although not primarily designed for a multi-master replication setup, it is possible to use the readconnroute in this situation.  The readconnroute connection router can be used to balance the connections across a Galera cluster. A special monitor is available that detects if nodes are joined to a Galera Cluster, with the addition of a router option to only route connections to nodes marked as synced. MaxScale can ensure that users are never connected to a node that is not a full cluster member. + + [Galera Service] + type=service + router=readconnroute + router_options=synced + servers=server1,server2,server3,server4 + user=maxscale + auth=thepasswd + + [Galera Listener] + type=listener + service=Galera Service + protocol=MySQLClient + port=3336 + + [Galera Monitor] + type=monitor + module=galeramon + servers=server1,server2,server3,server4 + user=galeramon + passwd=galeramon + + + The specialized Galera monitor can also select one of the node in the cluster as master, the others will be marked as slave. + These roles are only assigned to synced nodes. + + It then possible to have services/listeners with router_options=master or slave accessing a subset of all galera nodes. + The “synced” simply means: access all nodes. + + Examples: + + [Galera Master Service] + type=service + router=readconnroute + router_options=master + + [Galera Slave Service] + type=service + router=readconnroute + router_options=slave + + The Master and Slave roles are also available for the Read/Write Split router operation + + + <anchor id="h.xgjo4h9nd6wh"/>Readwritesplit + + The readwritesplit is a statement based router that has been designed for use within Master/Slave replication environments. It examines every statement, parsing it to determine if the statement falls into one of three categories; + + + read only statement + + + possible write statement + + + session modification statement A session modification statement is any statement that is executed that may affect the behaviour of subsequent statements within the current connection. Examples of such statements are the USE SQL statement or a SET statement using the SESSION scope. PREPARE STMT clauses are session statements in MaxScale since they are executed in every backend server. + + + Each of these three categories has a different action associated with it. Read only statements are sent to a slave server in the replication cluster. Possible write statements, which may include read statements that have an undeterminable side effect, are sent to the current replication master. Statements that modify the session are sent to all the servers, with the result that is generated by the master server being returned to the user. + + Session modification statements must be replicated as they affect the future results of read and write operations, so they must be executed on all servers that could execute statements on behalf of this client. + + Currently the readwritesplit router module is under development and has the following limitations: + + + Connection failover support has not yet been implemented. Client connections will fail if the master server fails over. + + + + + <anchor id="h.bfxsnbank28n"/>Master/Slave Replication Setup + + To setup the readwritesplit connection router in a master/slave failover environment is extremely simple, a service definition is required with the router defined for the service and an associated listener. + The router_options parameter is not required but it can be used to specify how slave(s) are selected. Available option is slave_selection_criteria and possible value are LEAST_BEHIND_MASTER and LEAST_CURRENT_OPERATIONS. + max_slave_connections is a readwritesplit-only option, which sets the upper limit for the number of slaves a router session can use. max_slave_replication_lag is (currently) another readwritesplit-specific option, which sets maximum allowed lag for slave in seconds. The criteria is checked when router chooses slaves and only slaves having smaller lag are eligible for selection. The lag is not checked after connection phase. + + [Split Service] + type=service + router=readwritesplit + router_options=slave_selection_criteria=LEAST_BEHIND_MASTER + max_slave_connections=50% + max_slave_replication_lag=30 + servers=server1,server2,server3,server4 + user=maxscale + auth=thepasswd + + [Split Listener] + type=listener + service=Split Service + protocol=MySQLClient + port=3336 + + The client would merely connect to port 3336 on the MaxScale host and statements would be directed to the master or slave as appropriate. Determination of the master or slave status may be done via a monitor module within MaxScale or externally. In this latter case the server flags would need to be set via the MaxScale debug interface, in future versions an API will be available for this purpose. + + Galera Cluster Configuration + Master and Slave roles that galera monitor assign to nodes make possible the Read Write split approach to Galera Cluster as well. + + Simply configure a Split Service with galera nodes: + + [Galera Split Service] + type=service + router=readwritesplit + servers=galera_node1,galera_node2,galera_node3 + + + + + <anchor id="h.ocyc3m30hnsp"/>Debugcli + + The debugcli is a special case of a statement based router. Rather than direct the statements at an external data source they are handled internally. These statements are simple text commands and the results are the output of debug commands within MaxScale. The service and listener definitions for a debug cli service only differ from other services in that they require no backend server definitions. + + + <anchor id="h.hqwc7nucjb6n"/>Debug CLI Configuration + + The definition of the debug cli service is illustrated below + + [Debug Service] + type=service + router=debugcli + + [Debug Listener] + type=listener + service=Debug Service + protocol=telnetd + port=4442 + + Connections using the telnet protocol to port 4442 of the MaxScale host will result in a new debug CLI session. A default username and password are used for this module, new users may be created using the add user command. As soon as any users are explicitly created the default username will no longer continue to work. The default username is admin with a password of skysql. + + The debugcli supports two modes of operation, developer mode and user mode. The mode is set via the router_options parameter of the debugcli. The user mode is more suited to end-users and administrators, whilst the develop mode is explicitly targeted to software developing adding or maintaining the MaxScale code base. Details of the differences between the modes can be found in the debugging guide for MaxScale. The default mode for the debugcli is user mode. The following service definition would enable a developer version of the debugcli. + + [Debug Service] + type=service + router=debugcli + router_options=developer + + It should be noted that both a user and a developer version of the debugcli may be defined within the same instance of MaxScale, however they must be defined as two distinct services, each with a distinct listener. + + [Debug Service] + type=service + router=debugcli + router_options=developer + + [Debug Listener] + type=listener + service=Debug Service + protocol=telnetd + port=4442 + + [Admin Service] + type=service + router=debugcli + + [Admin Listener] + type=listener + service=Debug Service + protocol=telnetd + port=4242 + + + <anchor id="h.eqh26vvsnoyx"/>CLI + The command line interface as used by maxadmin. This is a variant of the debugcli that is built slightly differently so that it may be accessed by the client application maxadmin. The CLI requires the use of the maxscaled protocol. + + + <anchor id="h.ufb8c39hpesa"/>CLI Configuration + There are two components to the definition required in order to run the command line interface to use with MaxAdmin; a service and a listener. + The default entries required are shown below. + + [CLI] + type=service + router=cli + + [CLI Listener] + type=listener + service=CLI + protocol=maxscaled + address=localhost + port=6603 + + Note that this uses the default port of 6603 and confines the connections to localhost connections only. Remove the address= entry to allow connections from any machine on your network. Changing the port from 6603 will mean that you must allows pass a -p option to the MaxAdmin command. + + + + <anchor id="h.8jmljd5b73ch"/> + + + + <anchor id="h.dubkezlgyiet"/><anchor id="h.1et7gierken4"/>Monitor Modules + Monitor modules are used by MaxScale to internally monitor the state of the backend databases in order to set the server flags for each of those servers. The router modules then use these flags to determine if the particular server is a suitable destination for routing connections for particular query classifications. The monitors are run within separate threads of MaxScale and do not affect the MaxScale performance. + + The use of monitors is optional, it is possible to run MaxScale with external monitoring, in which case arrangements must be made for an external entity to set the status of each of the servers that MaxScale can route to. + + + <anchor id="h.dmyjk8rz7ujv"/>Mysqlmon + + The MySQLMon monitor is a simple monitor designed for use with MySQL Master/Slave replication cluster. To execute the mysqlmon monitor an entry as shown below should be added to the MaxScale configuration file. + + [MySQL Monitor] + type=monitor + module=mysqlmon + servers=server1,server2,server3,server4 + + This will monitor the 4 servers; server1, server2, server3 and server4. It will set the status of running or failed and master or slave for each of the servers. + + The monitor uses the username given in the monitor section or the server specific user that is given in the server section to connect to the server. This user must have sufficient permissions on the database to determine the state of replication. The roles that must be granted to this user are REPLICATION SLAVE and REPLICATION CLIENT. + + To create a user that can be used to monitor the state of the cluster, the following commands could be used. + + MariaDB [mysql]> create user 'maxscalemon'@'maxscalehost' identified by 'Ha79hjds'; + Query OK, 0 rows affected (0.01 sec) + + MariaDB [mysql]> grant REPLICATION SLAVE on *.* to 'maxscalemon'@'maxscalehost'; + Query OK, 0 rows affected (0.00 sec) + + MariaDB [mysql]> grant REPLICATION CLIENT on *.* to 'maxscalemon'@'maxscalehost'; + Query OK, 0 rows affected (0.00 sec) + + MariaDB [mysql]> + + Assuming that MaxScale is running on the host maxscalehost. + + + <anchor id="h.wymeq7isxh75"/>Galeramon + + The Galeramon monitor is a simple router designed for use with MySQL Galera cluster. To execute the galeramon monitor an entry as shown below should be added to the MaxScale configuration file. + + [Galera Monitor] + type=monitor + module=galeramon + servers=server1,server2,server3,server4 + + This will monitor the 4 servers; server1, server2, server3 and server4. It will set the status of running or failed and joined for those servers that reported the Galera JOINED status. + + The user that is configured for use with the Galera monitor must have sufficient privileges to select from the information_schema database and GLOBAL_STATUS table within that database. + + To create a user that can be used to monitor the state of the cluster, the following commands could be used. + + MariaDB [mysql]> create user 'maxscalemon'@'maxscalehost' identified by 'Ha79hjds'; + Query OK, 0 rows affected (0.01 sec) + + MariaDB [mysql]> grant SELECT on INFORMATION_SCHEMA.GLOBAL_STATUS to 'maxscalemon'@'maxscalehost'; + Query OK, 0 rows affected (0.00 sec) + + MariaDB [mysql]> + + Assuming that MaxScale is running on the host maxscalehost. + + + The Galera monitor can also assign Master and Slave roles to the configured nodes: + + among the set of synced servers, the one with the lowest value of ‘wsrep_local_index’ is selected as the current master while the others are slaves. + + This way is possible to configure the node access based not only on ‘synced’ state but even on Master and Slave role enabling the use of Read Write split operation on a Galera cluster and avoiding any possible write conflict. + + Example status for a Galera server node is: + + Server 0x261fe50 (server2) +         Server:                192.168.1.101 +         Status:         Master, Synced, Running + + + + + + <anchor id="h.j1c67xshxln"/> + + + + <anchor id="h.sge7r9zx7x4"/><anchor id="h.qhqjtykx4m1m"/>Filter Modules + Currently four example filters are included in the MaxScale distribution + + + ModuleDescriptiontestfilterStatement counting Filter - a simple filter that counts the number of SQL statements executed within a session. Results may be viewed via the debug interface.qlafilterQuery Logging Filter - a simple query logging filter that write all statements for a session into a log file for that session.regexfilterQuery Rewrite Filter - an example of how filters can alter the query contents. This filter allows a regular expression to be defined, along with replacement text that should be substituted for every match of that regular expression.teeA filter that duplicates SQL requests and sends the duplicates to another service within MaxScale. + + + These filters are merely examples of what may be achieved with the filter API and are not sophisticated or consider as suitable for production use, they merely illustrate the functionality possible. + + + <anchor id="h.ysp4c1tvbz2t"/>Statement Counting Filter + The statement counting filter is implemented in the module names testfilter and merely keeps a count of the number of SQL statements executed. The filter requires no options to be passed and takes no parameters. The statement count can be viewed via the diagnostic and debug interface of MaxScale. + + In order to add this filter to an existing service create a filter section to name the filter as follows + + [counter] + type=filter + module=testfilter + + Then add the filter to your service by including the filters= parameter in the service section. + + filters=counter + + + + <anchor id="h.fsgdmoofnux2"/>Query Log All Filter + The QLA filter simply writes all SQL statements to a log file along with a timestamp for the statement. An example of the file produced by the QLA filter is shown below + + 00:36:04.922 5/06/2014, select @@version_comment limit 1 + 00:36:12.663 5/06/2014, SELECT DATABASE() + 00:36:12.664 5/06/2014, show databases + 00:36:12.665 5/06/2014, show tables + + A new file is created for each client connection, the name of the logfile can be controlled by the use of the router options. No parameters are used by the QLA filter. The filter is implemented by the loadable module qlafilter. + + To add the QLA filter to a service you must create a filter section to name the filter, associated the loadable module and define the filename option. + + [QLA] + type=filter + module=qlafilter + options=/tmp/QueryLog + + Then add the filters= parameter into the service that you wish to log by adding this parameter to the service section + + filters=QLA + + A log file will be created for each client connection, the name of that log file will be /tmp/QueryLog.<number> + + + + <anchor id="h.uqxr7wxzkcl7"/>Regular Expression Filter + The regular expression filter is a simple text based query rewriting filter. It allows a regular expression to be used to match text in a SQL query and then a string replacement to be made against that match. The filter is implemented by the regexfilter loadable module and is passed two parameters, a match string and a replacement string. + + To add the filter to your service you must first create a filter section to name the filter and give the match and replacement strings. Here we define a filter that will convert to MariaDB 10 command show all slaves status to the older form of show slave status for MariaDB 5.5. + + [slavestatus] + type=filter + module=regexfilter + match=show *all *slaves + replace=show slave + + You must then add this filter to your service by adding the filters= option + + filters=slavestatus + Another example would be a filter to convert from the MySQL 5.1 create table syntax that used the TYPE keyword to the newer ENGINE keyword. + + [EnginerFilter] + type=filter + module=regexfilter + match=TYPE + replace=ENGINE + + This would then change the SQL sent by a client application written to work with MySQL 5.1 into SQL that was compliant with MySQL 5.5. The statement + + create table supplier(id integer, name varchar(80)) type=innodb + + would be replaced with + + create table supplier(id integer, name varchar(80)) ENGINE=innodb + + before being sent to the server. Note that the text in the match string is case independent. + + + <anchor id="h.wle3ebajc9u7"/>Tee Filter + The tee filter is a filter module for MaxScale is a “plumbing” fitting in the MaxScale filter toolkit. It can be used in a filter pipeline of a service to make a copy of requests from the client and dispatch a copy of the request to another service within MaxScale. + + The configuration block for the TEE filter requires the minimal filter parameters in it’s section within the MaxScale.cnf file that defines the filter to load and the service to send the duplicates to. + + [ArchieveFilter] + type=filter + module=tee + service=Archieve + + In addition parameters may be added to define patterns to match against to either include or exclude particular SQL statements to be duplicated. You may also define that the filter is only active for connections from a particular source or when a particular user is connected. + + + + <anchor id="h.f7y85in8tebq"/> + + + + <anchor id="h.9f32pn7wijl8"/><anchor id="h.dc263y31zsda"/>Encrypting Passwords + + Passwords stored in the MaxScale.cnf file may optionally be encrypted for added security. This is done by creation of an encryption key on installation of MaxScale. Encryption keys may be created manually by executing the maxkeys utility with the argument of the filename to store the key. + + maxkeys $MAXSCALE_HOME/etc/.secrets + + Changing the encryption key for MaxScale will invalidate any currently encrypted keys stored in the MaxScale.cnf file. + + + + <anchor id="h.l0qb1nls7ei3"/>Creating Encrypted Passwords + + Encrypted passwords are created by executing the maxpasswd command with the password you require to encrypt as an argument. The environment variable MAXSCALE_HOME must be set, or MaxScale must be installed in the default location before maxpasswd can be executed. + + maxpasswd MaxScalePw001 + 61DD955512C39A4A8BC4BB1E5F116705 + + The output of the maxpasswd command is a hexadecimal string, this should be inserted into the MaxScale.cnf file in place of the ordinary, plain text, password. MaxScale will determine this as an encrypted password and automatically decrypt it before sending it the database server. + + [Split Service] + type=service + router=readwritesplit + servers=server1,server2,server3,server4 + user=maxscale + password=61DD955512C39A4A8BC4BB1E5F116705 + + + + <anchor id="h.4musj6ccb4a8"/> + + + + <anchor id="h.wm8guj3t9im2"/><anchor id="h.wnbvl0ktgm03"/>Configuration Updates + The current MaxScale configuration may be updating by editing the configuration file and then forcing MaxScale to reread the configuration file. To force MaxScale to reread the configuration file a SIGTERM signal is sent to the MaxScale process. + + Some changes in configuration can not be dynamically changed and require a complete restart of MaxScale, whilst others will take some time to be applied. + + + + <anchor id="h.5zjikdpkopj4"/>Limitations + Services that are removed via the configuration update mechanism can not be physically removed from MaxScale until there are no longer any connections using the service. + + When the number of threads is decreased the threads will not actually be terminated until such time as they complete the current operation of that thread. + + Monitors can not be completely removed from the running MaxScale. + + + + <anchor id="h.usbajk3czrtm"/> + + + + <anchor id="h.gaix3h5c8twh"/><anchor id="h.lwzmfeuphxuk"/>Authentication + MySQL uses username, passwords and the client host in order to authenticate a user, so a typical user would be defined as user X at host Y and would be given a password to connect. MaxScale uses exactly the same rules as MySQL when users connect to the MaxScale instance, i.e. it will check the address from which the client is connecting and treat this in exactly the same way that MySQL would. MaxScale will pull the authentication data from one of the backend servers and use this to match the incoming connections, the assumption being that all the backend servers for a particular service will share the same set of user credentials. + + It is important to understand, however, that when MaxScale itself makes connections to the backend servers the backend server will see all connections as originating from the host that runs MaxScale and not the original host from which the client connected to MaxScale. Therefore the backend servers should be configured to allow connections from the MaxScale host for every user that can connect from any host. Since there is only a single password within the database server for a given host, this limits the configuration such that a given user name must have the same password for every host from which they can connect. + + To clarify, if a user X is defined as using password pass1 from host a and pass2 from host b then there must be an entry in the user table for user X form the MaxScale host, say pass1. + + This would result in rows in the user table as follows + + UsernamePasswordClient HostXpass1aXpass2bXpass1MaxScale + + + + In this case the user X would be able to connect to MaxScale from host a giving the password of pass1. In addition MaxScale would be able to create connections for this user to the backend servers using the username X and password pass1, since the MaxScale host is also defined to have password pass1. User X would not however be able to connect from host b since they would need to provide the password pass2 in order to connect to MaxScale, but then MaxScale would not be able to connect to the backends as it would also use the password pass2 for these connections. + + + <anchor id="h.mmwkd4ufl8il"/>Wildcard Hosts + + Hostname mapping in MaxScale works in exactly the same way as for MySQL, if the wildcard is used for the host then any host other than the localhost (127.0.0.1) will match. It is important to consider that the localhost check will be performed at the MaxScale level and at the MySQL server level. + + If MaxScale and the databases are on separate hosts there are two important changes in behaviour to consider: + + + + Clients running on the same machine as the backend database now may access the database using the wildcard entry. The localhost check between the client and MaxScale will allow the use of the wildcard, since the client is not running on the MaxScale host. Also the wildcard entry can be used on the database host as MaxScale is making that connection and it is not running on the same host as the database. + + + Clients running on the same host as MaxScale can not access the database via MaxScale using the wildcard entry since the connection to MaxScale will be from the localhost. These clients are able to access the database directly, as they will use the wildcard entry. + + + + If MaxScale is running on the same host as one or more of the database nodes to which it is acting as a proxy then the wildcard host entries can be used to connect to MaxScale but not to connect onwards to the database running on the same node. + + In all these cases the issue may be solved by adding an explicit entry for the localhost address that has the same password as the wildcard entry. This may be done using a statement as below for each of the databases that are required: + + MariaDB [mysql]> GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP ON employee.* 'user1'@'localhost' IDENTIFIED BY ‘xxx’; + Query OK, 0 rows affected (0.00 sec) + + + + <anchor id="h.tkrwvuo1c0qp"/>Limitations + + At the time of writing the authentication mechanism within MaxScale does not support IPV6 address matching in connections rules. This is also in line with the current protocol modules that do not support IPV6. + + Partial address matching, such as 10.% is also not supported in the current version of MaxScale. + + + <anchor id="h.qk790nez3h72"/><anchor id="h.gaqtg0neens3"/>Error Reporting + MaxScale is designed to be executed as a service, therefore all error reports, including configuration errors, are written to the MaxScale error log file. MaxScale will log to a set of files in the directory $MAXSCALE_HOME/log, the only exception to this is if the log directory is not writable, in which case a message is sent to the standard error descriptor. + + + + Troubleshooting + MaxScale binds on TCP ports and UNIX sockets as well. + + If there is a local firewall in the server where MaxScale is installed, the IP and port must be configured in order to receive connections from outside. + + If the firewall is a network facility among all the involved servers, a configuration update is required as well. + + Example: + [Galera Listener] + type=listener +         address=192.1681.3.33 +         port=4408 +         socket=/servers/maxscale/galera.sock +   + TCP/IP Traffic must be permitted to 192.1681.3.33 port 4408 + + For Unix socket, the socket file path (example: /servers/maxscale/galera.sock) must be writable by the Unix user MaxScale runs as. + + + + + Page: 4 +
diff --git a/Documentation/filters/MariaDB MaxScale_ Filter_ Regex Filter.pdf b/Documentation/filters/MariaDB MaxScale_ Filter_ Regex Filter.pdf new file mode 100755 index 000000000..75c38183d Binary files /dev/null and b/Documentation/filters/MariaDB MaxScale_ Filter_ Regex Filter.pdf differ diff --git a/Documentation/filters/MariaDB MaxScale_ Filter_ Tee Filter.pdf b/Documentation/filters/MariaDB MaxScale_ Filter_ Tee Filter.pdf new file mode 100755 index 000000000..fa49fcb5f Binary files /dev/null and b/Documentation/filters/MariaDB MaxScale_ Filter_ Tee Filter.pdf differ diff --git a/Documentation/filters/MariaDB MaxScale_ Filter_ TopN Filter.pdf b/Documentation/filters/MariaDB MaxScale_ Filter_ TopN Filter.pdf new file mode 100755 index 000000000..ab7190c1e Binary files /dev/null and b/Documentation/filters/MariaDB MaxScale_ Filter_ TopN Filter.pdf differ diff --git a/Documentation/filters/Regex Filter.pdf b/Documentation/filters/Regex Filter.pdf deleted file mode 100644 index f45fef043..000000000 Binary files a/Documentation/filters/Regex Filter.pdf and /dev/null differ diff --git a/Documentation/filters/Tee Filter.pdf b/Documentation/filters/Tee Filter.pdf deleted file mode 100644 index f8de502ca..000000000 Binary files a/Documentation/filters/Tee Filter.pdf and /dev/null differ diff --git a/Documentation/filters/Top Filter.pdf b/Documentation/filters/Top Filter.pdf deleted file mode 100644 index a7cb2061d..000000000 Binary files a/Documentation/filters/Top Filter.pdf and /dev/null differ diff --git a/Documentation/history/MariaDB MaxScale 1.0.3 Release Notes.pdf b/Documentation/history/MariaDB MaxScale 1.0.3 Release Notes.pdf new file mode 100644 index 000000000..eb04e9146 Binary files /dev/null and b/Documentation/history/MariaDB MaxScale 1.0.3 Release Notes.pdf differ diff --git a/Documentation/MaxScale 1.0.1beta Release Notes.pdf b/Documentation/history/MaxScale 1.0.1beta Release Notes.pdf similarity index 100% rename from Documentation/MaxScale 1.0.1beta Release Notes.pdf rename to Documentation/history/MaxScale 1.0.1beta Release Notes.pdf diff --git a/FindMySQLClient.cmake b/FindMySQLClient.cmake new file mode 100644 index 000000000..5b4f53eff --- /dev/null +++ b/FindMySQLClient.cmake @@ -0,0 +1,33 @@ +# This CMake file tries to find the the MySQL client library +# The following variables are set: +# MYSQLCLIENT_FOUND - System has MySQL client +# MYSQLCLIENT_STATIC_FOUND - System has statically linked MySQL client +# MYSQLCLIENT_LIBRARIES - The MySQL client library +# MYSQLCLIENT_STATIC_LIBRARIES - The static MySQL client library +# MYSQLCLIENT_HEADERS - The MySQL client headers + +find_library(MYSQLCLIENT_LIBRARIES NAMES mysqlclient PATH_SUFFIXES mysql mariadb) +if(${MYSQLCLIENT_LIBRARIES} MATCHES "NOTFOUND") + set(MYSQLCLIENT_FOUND FALSE CACHE INTERNAL "") + message(STATUS "Dynamic MySQL client library not found.") + unset(MYSQLCLIENT_LIBRARIES) +else() + set(MYSQLCLIENT_FOUND TRUE CACHE INTERNAL "") + message(STATUS "Found dynamic MySQL client library: ${MYSQLCLIENT_LIBRARIES}") +endif() + +set(OLD_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES}) +set(CMAKE_FIND_LIBRARY_SUFFIXES ".a") +find_library(MYSQLCLIENT_STATIC_LIBRARIES NAMES mysqlclient PATH_SUFFIXES mysql mariadb) +set(CMAKE_FIND_LIBRARY_SUFFIXES ${OLD_SUFFIXES}) + +if(${MYSQLCLIENT_STATIC_LIBRARIES} MATCHES "NOTFOUND") + set(MYSQLCLIENT_STATIC_FOUND FALSE CACHE INTERNAL "") + message(STATUS "Static MySQL client library not found.") + unset(MYSQLCLIENT_STATIC_LIBRARIES) +else() + set(MYSQLCLIENT_STATIC_FOUND TRUE CACHE INTERNAL "") + message(STATUS "Found statc MySQL client library: ${MYSQLCLIENT_STATIC_LIBRARIES}") +endif() + +find_path(MYSQLCLIENT_HEADERS mysql.h PATH_SUFFIXES mysql mariadb) \ No newline at end of file diff --git a/FindRabbitMQ.cmake b/FindRabbitMQ.cmake new file mode 100644 index 000000000..5f984e79a --- /dev/null +++ b/FindRabbitMQ.cmake @@ -0,0 +1,23 @@ +# This CMake file tries to find the the RabbitMQ library +# The following variables are set: +# RABBITMQ_FOUND - System has RabbitMQ client +# RABBITMQ_LIBRARIES - The RabbitMQ client library +# RABBITMQ_HEADERS - The RabbitMQ client headers +include(CheckCSourceCompiles) +find_library(RABBITMQ_LIBRARIES NAMES rabbitmq) +find_path(RABBITMQ_HEADERS amqp.h PATH_SUFFIXES mysql mariadb) + +if(${RABBITMQ_LIBRARIES} MATCHES "NOTFOUND") + set(RABBITMQ_FOUND FALSE CACHE INTERNAL "") + message(STATUS "RabbitMQ library not found.") + unset(RABBITMQ_LIBRARIES) +else() + set(RABBITMQ_FOUND TRUE CACHE INTERNAL "") + message(STATUS "Found RabbitMQ library: ${RABBITMQ_LIBRARIES}") +endif() + +set(CMAKE_REQUIRED_INCLUDES ${RABBITMQ_HEADERS}) +check_c_source_compiles("#include \n int main(){if(AMQP_DELIVERY_PERSISTENT){return 0;}return 1;}" HAVE_RABBITMQ50) +if(NOT HAVE_RABBITMQ50) + message(FATAL_ERROR "Old version of RabbitMQ-C library found. Version 0.5 or newer is required.") +endif() \ No newline at end of file diff --git a/FindValgrind.cmake b/FindValgrind.cmake new file mode 100644 index 000000000..977d0028a --- /dev/null +++ b/FindValgrind.cmake @@ -0,0 +1,13 @@ +# This CMake file tries to find the Valgrind executable +# The following variables are set: +# VALGRIND_FOUND - System has Valgrind +# VALGRIND_EXECUTABLE - The Valgrind executable file +find_program(VALGRIND_EXECUTABLE valgrind) +if(VALGRIND_EXECUTABLE STREQUAL "VALGRIND_EXECUTABLE-NOTFOUND") + message(STATUS "Valgrind not found.") + set(VALGRIND_FOUND FALSE CACHE INTERNAL "") + unset(VALGRIND_EXECUTABLE) +else() + message(STATUS "Valgrind found: ${VALGRIND_EXECUTABLE}") + set(VALGRIND_FOUND TRUE CACHE INTERNAL "") +endif() \ No newline at end of file diff --git a/Makefile b/Makefile index fe49583b0..aee79e163 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -# This file is distributed as part of the SkySQL Gateway. It is free +# This file is distributed as part of the MariaDB Corporation MaxScale. It is free # software: you can redistribute it and/or modify it under the terms of the # GNU General Public License as published by the Free Software Foundation, # version 2. @@ -12,7 +12,7 @@ # this program; if not, write to the Free Software Foundation, Inc., 51 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # -# Copyright SkySQL Ab 2013 +# Copyright MariaDB Corporation Ab 2013 # # Revision History # Date Who Description diff --git a/README b/README index 1cf93f3b6..023c89ce7 100644 --- a/README +++ b/README @@ -1,16 +1,16 @@ -/** \mainpage MariaDB MaxScale +/** \mainpage MaxScale by MariaDB Corporation -MaxScale is an intelligent proxy that allows forwarding of +The MariaDB Corporation MaxScale is an intelligent proxy that allows forwarding of database statements to one or more database servers using complex rules, a semantic understanding of the database statements and the roles of the various servers within the backend cluster of databases. MaxScale is designed to provide load balancing and high availability -functionality transparantly to the applications. In addition it provides +functionality transparently to the applications. In addition it provides a highly scalable and flexibile architecture, with plugin components to support different protocols and routing decissions. -MaxScale is implemented in C and makes entensive use of the +MaxScale is implemented in C and makes extensive use of the asynchronous I/O capabilities of the Linux operating system. The epoll system is used to provide the event driven framework for the input and output via sockets. @@ -28,99 +28,11 @@ issues and communicate with the MaxScale community. Send email to [maxscale@googlegroups.com](mailto:maxscale@googlegroups.com) or use the [forum](http://groups.google.com/forum/#!forum/maxscale) interface -Bugs can be reported in the MariaDB bugs database - [bug.skysql.com](http://bugs.mariadb.com) - -There are two options if you would like to try MaxScale, either building -from the source code contained in GitHub or by downloading pre-built packages -from the MariaDB website. http://www.mariadb.com/downloads. - -\section Branching policy within the MaxScale repository - -The branching policy within this repository is resonably simple, the master branch will always -contain the last released MaxScale code. The develop branch is the latest development copy of -MaxScale and is aimed to be always buildable and useable. Features will be developed on seperate -branches and only merged into the develop branch when they are complete. +Bugs can be reported in the MariaDB Corporation bugs database + [bug.mariadb.com](http://bugs.mariadb.com) \section Building Building MaxScale -There are two methods that make be used to build this version of MaxScale, either -with hadncrafted makefiles or by using cmake. CMake is a newer implementation of -the MaxScale build system and is the prefered choice for creating MaxScale executables -and packages from the source code. - -\section Building Building MaxScale with CMake - -You can also build MaxScale with CMake which makes the build process a bit more simple. - -All the same dependencies are required as with the normal MaxScale build with the addition of CMake -version 2.6 for regular builds and 2.8.12 or newer if you wish to generate packages. - -CMake tries to find all the required directories and files on its own but if it can't find them or you wish to -explicitly state the locations you can pass additional options to CMake by using the -D flag. To confirm the variable -values, you can run CMake in interactive mode by using the -i flag or use a CMake GUI (for example, ccmake for command line). - -It is highly recommended to make a separate build directory to build into. This keeps the source and build trees clean and -makes it easy to get rid of everything you built by simply deleting the build directory. - -To build MaxScale using CMake: - - cd - - mkdir build - - cd build - - cmake .. - - make - - make install - -This generates the required makefiles in the current directory, compiles and links all the programs and installs -all the required files in their right places. - -If you have your headers and libraries in non-standard locations, you can define those locations at configuration time as such: - - cmake -D= - -By default, MaxScale installs to '/usr/local/skysql/maxscale' and places init.d scripts and ldconfig files into their folders. Change the INSTALL_DIR -variable to your desired installation directory and set INSTALL_SYSTEM_FILES=N to prevent the init.d script and ldconfig file installation. - -If you run into any trouble while configuring CMake, you can always remove the 'CMakeCache.txt' file to clear CMake's -internal cache. This resets all values to their defaults and can be used to fix a 'stuck' configuration of CMake. This -is also a good reason why you should always build into a separate directory, because you can safely wipe the build directory clean without the -danger of deleting important files when something goes wrong. - -The default values that CMake uses can be found in the 'macros.cmake' file. If you wish to change these, edit the 'macros.cmake' file -or define the variables manually at configuration time. - -All the variables that control the CMake build process: - -INSTALL_DIR= Installation directory -BUILD_TYPE=[None|Debug|Release] Type of the build, defaults to Release (optimized) -INSTALL_SYSTEM_FILES=[Y|N] Install startup scripts and ld configuration files -EMBEDDED_LIB= Path to the embedded library location (libmysqld.a for static and libmysqld.so for dynamic) -MYSQL_DIR= Path to MySQL headers -ERRMSG= Path to errmsg.sys file -STATIC_EMBEDDED=[Y|N] Whether to link the static or the dynamic verson of the library -GCOV=[Y|N] Generate gcov output -OLEVEL=<0-3> Level of optimization -BUILD_TESTS=[Y|N] Build tests -DEPS_OK=[Y|N] Check dependencies, use N when you want to force a recheck of values -DEBUG_OUTPUT=[Y|N] Produce debugging output when configuring CMake -RABBITMQ_LIB= Path to RabbitMQ-C libraries -RABBITMQ_HEADERS= Path to RabbitMQ-C headers -MYSQL_CLIENT_LIB= Path to MySQL client libraries -MYSQL_CLIENT_HEADERS= Path to MySQL client headers - -/section Building MaxScale without cmake - -This method of building MaxScale was the initial mechanism that was implemented -however it has now been superceeded by the use of cmake. Currently it may still -be used, however the plan is to withdraw this mechanism by the end of 2014 and -move completely to the use of cmake for building MaxScale. - Edit the file build_gateway.inc in your MaxScale directory and set the ROOT_PATH to the directory in which you have installed the MaxScale source code. Set the INC_PATH/MYSQL_ROOT/MYSQL_HEADERS variables @@ -135,18 +47,22 @@ MariaDB-5.5.34-centos6-x86_64-common.rpm MariaDB-5.5.34-centos6-x86_64-compat.rpm MariaDB-5.5.34-centos6-x86_64-devel.rpm -Please backup any existent my.cnf file before installing the RPMs +Please backup any existing my.cnf file before installing the RPMs Install the RPM files using: rpm -i MariaDB-5.5.34-centos6-x86_64-common.rpm MariaDB-5.5.34-centos6-x86_64-compat.rpm MariaDB-5.5.34-centos6-x86_64-devel.rpm -Note, if you wish to relocate the package to avoid an exisitng MariaDB +Note, if you wish to relocate the package to avoid an existing MariaDB or MySQL installation you will need to use the --force option in addition to the --relocate option. rpm -i --force --relocate=/usr/=$PREFIX/usr/ MariaDB-5.5.34-centos6-x86_64-common.rpm MariaDB-5.5.34-centos6-x86_64-compat.rpm MariaDB-5.5.34-centos6-x86_64-devel.rpm +You can also use the included 'unpack_rpm.sh' script to unpack the RPMs without installing them. + + ./unpack_rpm + This README assumes $PREFIX = $HOME. MaxScale may be built with the embedded MariaDB library either linked @@ -178,7 +94,7 @@ Example: Please note the errmsg.sys file is NOT included in the RPMs at the -curent time, it must be taken from an existing MariaDB setup. The +current time, it must be taken from an existing MariaDB setup. The version of the errmsg.sys file must match the version of the developer package you are using. A version mismatch will cause the library to fail to initialise. @@ -240,6 +156,74 @@ max_connections=4096 Please check errmsg.sys is found in the MaxScale install_dir DEST/MaxScale/mysql +\section Building Building MaxScale with CMake + +You can also build MaxScale with CMake which makes the build process a bit more simple. + +All the same dependencies are required as with the normal MaxScale build with the addition of CMake +version 2.6 for regular builds and 2.8.12 or newer if you wish to generate packages. + +CMake tries to find all the required directories and files on its own but if it can't find them or you wish to +explicitly state the locations you can pass additional options to CMake by using the -D flag. To confirm the variable +values, you can run CMake in interactive mode by using the -i flag or use a CMake GUI (for example, ccmake for command line). + +It is highly recommended to make a separate build directory to build into. This keeps the source and build trees clean and +makes it easy to get rid of everything you built by simply deleting the build directory. + +To build MaxScale using CMake: + + cd + + mkdir build + + cd build + + cmake .. + + make + + make install + +This generates the required makefiles in the current directory, compiles and links all the programs and installs +all the required files in their right places. + +If you have your headers and libraries in non-standard locations, you can define those locations at configuration time as such: + + cmake -D= + +By default, MaxScale installs to '/usr/local/skysql/maxscale' and places init.d scripts and ldconfig files into their folders. Change the INSTALL_DIR +variable to your desired installation directory and set INSTALL_SYSTEM_FILES=N to prevent the init.d script and ldconfig file installation. + +If you run into any trouble while configuring CMake, you can always remove the 'CMakeCache.txt' file to clear CMake's +internal cache. This resets all values to their defaults and can be used to fix a 'stuck' configuration of CMake. This +is also a good reason why you should always build into a separate directory, because you can safely wipe the build directory clean without the +danger of deleting important files when something goes wrong. + +The default values that CMake uses can be found in the 'macros.cmake' file. If you wish to change these, edit the 'macros.cmake' file +or define the variables manually at configuration time. + +All the variables that control the CMake build process: + +INSTALL_DIR= Installation directory + +BUILD_TYPE= Type of the build. One of None, Debug, DebugSymbols, Optimized. (default None) + DebugSymbols enables debugging symbols, Debug enables debugging symbols and code, Optimized builds an optimized version. + +INSTALL_SYSTEM_FILES=[Y|N] Install startup scripts and ld configuration files +EMBEDDED_LIB= Path to the embedded library location (libmysqld.a for static and libmysqld.so for dynamic) +MYSQL_DIR= Path to MySQL headers +ERRMSG= Path to errmsg.sys file +STATIC_EMBEDDED=[Y|N] Whether to link the static or the dynamic verson of the library +GCOV=[Y|N] Generate gcov output +OLEVEL=<0-3> Level of optimization +BUILD_TESTS=[Y|N] Build tests +DEPS_OK=[Y|N] Check dependencies, use N when you want to force a recheck of values +DEBUG_OUTPUT=[Y|N] Produce debugging output when configuring CMake +RABBITMQ_LIBRARIES= Path to RabbitMQ-C libraries +RABBITMQ_HEADERS= Path to RabbitMQ-C headers +MYSQLCLIENT_LIBRARIES= Path to MySQL client libraries +MYSQLCLIENT_HEADERS= Path to MySQL client headers + \section Running Running MaxScale MaxScale consists of a core executable and a number of modules that implement diff --git a/VERSION b/VERSION index 1f6191473..ed69ddf2c 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.0.1-beta +1.0.2-beta diff --git a/client/Makefile b/client/Makefile index 22220db2d..e97cc878b 100644 --- a/client/Makefile +++ b/client/Makefile @@ -12,7 +12,7 @@ # this program; if not, write to the Free Software Foundation, Inc., 51 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # -# Copyright SkySQL Ab 2014 +# Copyright MariaDB Corporation Ab 2014 # # Revision History # Date Who Description diff --git a/client/maxadmin.c b/client/maxadmin.c index 97459171e..6a3255b19 100644 --- a/client/maxadmin.c +++ b/client/maxadmin.c @@ -13,7 +13,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2014 + * Copyright MariaDB Corporation Ab 2014 */ /** @@ -183,11 +183,22 @@ char c; len += strlen(argv[i]) + 1; } - cmd = malloc(len); - strcpy(cmd, argv[optind]); - for (i = optind +1; i < argc; i++) { + cmd = malloc(len + (2 * argc)); // Allow for quotes + strncpy(cmd, argv[optind],len + (2 * argc)); + for (i = optind +1; i < argc; i++) + { strcat(cmd, " "); - strcat(cmd, argv[i]); + /* Arguments after the seconf are quoted to allow for names + * that contain white space + */ + if (i - optind > 1) + { + strcat(cmd, "\""); + strcat(cmd, argv[i]); + strcat(cmd, "\""); + } + else + strcat(cmd, argv[i]); } if (access(cmd, R_OK) == 0) @@ -317,6 +328,7 @@ int keepalive = 1; { fprintf(stderr, "Unable to connect to MaxScale at %s, %s: %s\n", hostname, port, strerror(errno)); + close(so); return -1; } if (setsockopt(so, SOL_SOCKET, diff --git a/client/test/maxadmin_test.sh b/client/test/maxadmin_test.sh index b04312da4..5540fc380 100644 --- a/client/test/maxadmin_test.sh +++ b/client/test/maxadmin_test.sh @@ -19,58 +19,58 @@ else fi maxadmin --password=skysql help >& /dev/null if [ $? -eq "1" ]; then - echo "Auth test (long option): Failed" + echo "Auth test (long option): Failed" failure=`expr $failure + 1` else passed=`expr $passed + 1` - echo "Auth test (long option): Passed" + echo "Auth test (long option): Passed" fi maxadmin -pskysql enable log debug >& /dev/null if [ $? -eq "1" ]; then - echo "Enable debug log: Failed" + echo "Enable debug log: Failed" failure=`expr $failure + 1` else passed=`expr $passed + 1` - echo "Enable debug log: Passed" + echo "Enable debug log: Passed" fi maxadmin -pskysql enable log trace >& /dev/null if [ $? -eq "1" ]; then - echo "Enable trace log: Failed" + echo "Enable trace log: Failed" failure=`expr $failure + 1` else passed=`expr $passed + 1` - echo "Enable trace log: Passed" + echo "Enable trace log: Passed" fi maxadmin -pskysql disable log debug >& /dev/null if [ $? -eq "1" ]; then - echo "Disable debug log: Failed" + echo "Disable debug log: Failed" failure=`expr $failure + 1` else passed=`expr $passed + 1` - echo "Disable debug log: Passed" + echo "Disable debug log: Passed" fi maxadmin -pskysql disable log trace >& /dev/null if [ $? -eq "1" ]; then - echo "Disable trace log: Failed" + echo "Disable trace log: Failed" failure=`expr $failure + 1` else passed=`expr $passed + 1` - echo "Disable trace log: Passed" + echo "Disable trace log: Passed" fi for cmd in clients dcbs filters listeners modules monitors services servers sessions threads do maxadmin -pskysql list $cmd | grep -s '-' >& /dev/null if [ $? -eq "1" ]; then - echo "list command ($cmd): Failed" + echo "list command ($cmd): Failed" failure=`expr $failure + 1` else passed=`expr $passed + 1` - echo "list command ($cmd): Passed" + echo "list command ($cmd): Passed" fi done @@ -206,5 +206,110 @@ do fi done +maxadmin -pskysql list services | \ + awk -F\| '{ if (NF > 1) { sub(/ +$/, "", $1); printf("show service \"%s\"\n", $1); } }' > script1.$$ +grep -cs "show service" script1.$$ >/dev/null +if [ $? -ne "0" ]; then + echo "list services: Failed" + failure=`expr $failure + 1` +else + passed=`expr $passed + 1` + echo "list services: Passed" +fi +maxadmin -pskysql script1.$$ | grep -cs 'Service' > /dev/null +if [ $? -ne "0" ]; then + echo "Show Service: Failed" + failure=`expr $failure + 1` +else + passed=`expr $passed + 1` + echo "Show Service: Passed" +fi +rm -f script1.$$ + + +maxadmin -pskysql list monitors | \ + awk -F\| '{ if (NF > 1) { sub(/ +$/, "", $1); printf("show monitor \"%s\"\n", $1); } }' > script1.$$ +grep -cs "show monitor" script1.$$ >/dev/null +if [ $? -ne "0" ]; then + echo "list monitors: Failed" + failure=`expr $failure + 1` +else + passed=`expr $passed + 1` + echo "list monitors: Passed" +fi +maxadmin -pskysql script1.$$ | grep -cs 'Monitor' > /dev/null +if [ $? -ne "0" ]; then + echo "Show Monitor: Failed" + failure=`expr $failure + 1` +else + passed=`expr $passed + 1` + echo "Show Monitor: Passed" +fi +rm -f script1.$$ + + +maxadmin -pskysql list sessions | \ + awk -F\| ' /^0x/ { if (NF > 1) { sub(/ +$/, "", $1); printf("show session \"%s\"\n", $1); } }' > script1.$$ +grep -cs "show session" script1.$$ >/dev/null +if [ $? -ne "0" ]; then + echo "list sessions: Failed" + failure=`expr $failure + 1` +else + passed=`expr $passed + 1` + echo "list sessions: Passed" +fi +maxadmin -pskysql script1.$$ | grep -cs 'Session' > /dev/null +if [ $? -ne "0" ]; then + echo "Show Session: Failed" + failure=`expr $failure + 1` +else + passed=`expr $passed + 1` + echo "Show Session: Passed" +fi +rm -f script1.$$ + + +maxadmin -pskysql list dcbs | \ + awk -F\| ' /^ 0x/ { if (NF > 1) { sub(/ +$/, "", $1); sub(/ 0x/, "0x", $1); printf("show dcb \"%s\"\n", $1); } }' > script1.$$ +grep -cs "show dcb" script1.$$ >/dev/null +if [ $? -ne "0" ]; then + echo "list dcbs: Failed" + failure=`expr $failure + 1` +else + passed=`expr $passed + 1` + echo "list dcbs: Passed" +fi +maxadmin -pskysql script1.$$ | grep -cs 'DCB' > /dev/null +if [ $? -ne "0" ]; then + echo "Show DCB: Failed" + failure=`expr $failure + 1` +else + passed=`expr $passed + 1` + echo "Show DCB: Passed" +fi +rm -f script1.$$ + + +maxadmin -pskysql list services | \ + awk -F\| '{ if (NF > 1) { sub(/ +$/, "", $1); printf("show dbusers \"%s\"\n", $1); } }' > script1.$$ +grep -cs "show dbusers" script1.$$ >/dev/null +if [ $? -ne "0" ]; then + echo "list services: Failed" + failure=`expr $failure + 1` +else + passed=`expr $passed + 1` + echo "list services: Passed" +fi +maxadmin -pskysql script1.$$ | grep -cs 'Users table data' > /dev/null +if [ $? -ne "0" ]; then + echo "Show dbusers: Failed" + failure=`expr $failure + 1` +else + passed=`expr $passed + 1` + echo "Show dbusers: Passed" +fi +rm -f script1.$$ + + echo "Test run complete. $passed passes, $failure failures" exit $failure diff --git a/cmake_uninstall.cmake.in b/cmake_uninstall.cmake.in new file mode 100644 index 000000000..7740ff850 --- /dev/null +++ b/cmake_uninstall.cmake.in @@ -0,0 +1,24 @@ +# "make uninstall" helper +# see http://www.cmake.org/Wiki/CMake_FAQ#Can_I_do_.22make_uninstall.22_with_CMake.3F + +if(NOT EXISTS "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt") + message(FATAL_ERROR "Cannot find install manifest: @CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt") +endif(NOT EXISTS "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt") + +file(READ "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt" files) +string(REGEX REPLACE "\n" ";" files "${files}") +foreach(file ${files}) + message(STATUS "Uninstalling $ENV{DESTDIR}${file}") + if(IS_SYMLINK "$ENV{DESTDIR}${file}" OR EXISTS "$ENV{DESTDIR}${file}") + exec_program( + "@CMAKE_COMMAND@" ARGS "-E remove \"$ENV{DESTDIR}${file}\"" + OUTPUT_VARIABLE rm_out + RETURN_VALUE rm_retval + ) + if(NOT "${rm_retval}" STREQUAL 0) + message(FATAL_ERROR "Problem when removing $ENV{DESTDIR}${file}") + endif(NOT "${rm_retval}" STREQUAL 0) + else(IS_SYMLINK "$ENV{DESTDIR}${file}" OR EXISTS "$ENV{DESTDIR}${file}") + message(STATUS "File $ENV{DESTDIR}${file} does not exist.") + endif(IS_SYMLINK "$ENV{DESTDIR}${file}" OR EXISTS "$ENV{DESTDIR}${file}") +endforeach(file) diff --git a/debian/changelog b/debian/changelog index e416d995a..846f67a10 100644 --- a/debian/changelog +++ b/debian/changelog @@ -2,9 +2,9 @@ maxscale (1.0-beta) UNRELEASED; urgency=low * Beta release - -- Timofey Turenko Fri, 05 Jul 2014 14:00:00 +0200 + -- Timofey Turenko Fri, 05 Jul 2014 14:00:00 +0200 maxscale (0.7-1) UNRELEASED; urgency=low * Initial release. (Closes: #XXXXXX) - -- Timofey Turenko Tue, 11 Mar 2014 22:59:35 +0200 + -- Timofey Turenko Tue, 11 Mar 2014 22:59:35 +0200 diff --git a/debian/control b/debian/control index be0a062ec..cb85e28ea 100644 --- a/debian/control +++ b/debian/control @@ -9,7 +9,7 @@ Package: maxscale Architecture: any Depends: ${shlibs:Depends}, ${misc:Depends} Description: MaxScale - The SkySQL MaxScale is an intelligent proxy that allows forwarding of + The MariaDB Corporation MaxScale is an intelligent proxy that allows forwarding of database statements to one or more database servers using complex rules, a semantic understanding of the database statements and the roles of the various servers within the backend cluster of databases. diff --git a/doxygate.in b/doxygate.in new file mode 100644 index 000000000..00e20b168 --- /dev/null +++ b/doxygate.in @@ -0,0 +1,1520 @@ +# Doxyfile 1.6.1 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project +# +# All text after a hash (#) is considered a comment and will be ignored +# The format is: +# TAG = value [value, ...] +# For lists items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (" ") + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the config file +# that follow. The default is UTF-8 which is also the encoding used for all +# text before the first occurrence of this tag. Doxygen uses libiconv (or the +# iconv built into libc) for the transcoding. See +# http://www.gnu.org/software/libiconv for the list of possible encodings. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded +# by quotes) that should identify the project. + +PROJECT_NAME = MaxScale + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. +# This could be handy for archiving the generated documentation or +# if some version control system is used. + +PROJECT_NUMBER = + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) +# base path where the generated documentation will be put. +# If a relative path is entered, it will be relative to the location +# where doxygen was started. If left blank the current directory will be used. + +OUTPUT_DIRECTORY = @CMAKE_BINARY_DIR@/Documentation + +# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create +# 4096 sub-directories (in 2 levels) under the output directory of each output +# format and will distribute the generated files over these directories. +# Enabling this option can be useful when feeding doxygen a huge amount of +# source files, where putting all generated files in the same directory would +# otherwise cause performance problems for the file system. + +CREATE_SUBDIRS = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# The default language is English, other supported languages are: +# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, +# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, +# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English +# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, +# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrilic, Slovak, +# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. + +OUTPUT_LANGUAGE = English + +# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will +# include brief member descriptions after the members that are listed in +# the file and class documentation (similar to JavaDoc). +# Set to NO to disable this. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend +# the brief description of a member or function before the detailed description. +# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator +# that is used to form the text in various listings. Each string +# in this list, if found as the leading text of the brief description, will be +# stripped from the text and the result after processing the whole list, is +# used as the annotated text. Otherwise, the brief description is used as-is. +# If left blank, the following values are used ("$name" is automatically +# replaced with the name of the entity): "The $name class" "The $name widget" +# "The $name file" "is" "provides" "specifies" "contains" +# "represents" "a" "an" "the" + +ABBREVIATE_BRIEF = + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# Doxygen will generate a detailed section even if there is only a brief +# description. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full +# path before files name in the file list and in the header files. If set +# to NO the shortest path that makes the file name unique will be used. + +FULL_PATH_NAMES = YES + +# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag +# can be used to strip a user-defined part of the path. Stripping is +# only done if one of the specified strings matches the left-hand part of +# the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the +# path to strip. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of +# the path mentioned in the documentation of a class, which tells +# the reader which header file to include in order to use a class. +# If left blank only the name of the header file containing the class +# definition is used. Otherwise one should specify the include paths that +# are normally passed to the compiler using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter +# (but less readable) file names. This can be useful is your file systems +# doesn't support long names like on DOS, Mac, or CD-ROM. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen +# will interpret the first line (until the first dot) of a JavaDoc-style +# comment as the brief description. If set to NO, the JavaDoc +# comments will behave just like regular Qt-style comments +# (thus requiring an explicit @brief command for a brief description.) + +JAVADOC_AUTOBRIEF = NO + +# If the QT_AUTOBRIEF tag is set to YES then Doxygen will +# interpret the first line (until the first dot) of a Qt-style +# comment as the brief description. If set to NO, the comments +# will behave just like regular Qt-style comments (thus requiring +# an explicit \brief command for a brief description.) + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen +# treat a multi-line C++ special comment block (i.e. a block of //! or /// +# comments) as a brief description. This used to be the default behaviour. +# The new default is to treat a multi-line C++ comment block as a detailed +# description. Set this tag to YES if you prefer the old behaviour instead. + +MULTILINE_CPP_IS_BRIEF = NO + +# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented +# member inherits the documentation from any documented member that it +# re-implements. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce +# a new page for each member. If set to NO, the documentation of a member will +# be part of the file/class/namespace that contains it. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. +# Doxygen uses this value to replace tabs by spaces in code fragments. + +TAB_SIZE = 8 + +# This tag can be used to specify a number of aliases that acts +# as commands in the documentation. An alias has the form "name=value". +# For example adding "sideeffect=\par Side Effects:\n" will allow you to +# put the command \sideeffect (or @sideeffect) in the documentation, which +# will result in a user-defined paragraph with heading "Side Effects:". +# You can put \n's in the value part of an alias to insert newlines. + +ALIASES = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C +# sources only. Doxygen will then generate output that is more tailored for C. +# For instance, some of the names that are used will be different. The list +# of all members will be omitted, etc. + +OPTIMIZE_OUTPUT_FOR_C = YES + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java +# sources only. Doxygen will then generate output that is more tailored for +# Java. For instance, namespaces will be presented as packages, qualified +# scopes will look different, etc. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources only. Doxygen will then generate output that is more tailored for +# Fortran. + +OPTIMIZE_FOR_FORTRAN = NO + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for +# VHDL. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Doxygen selects the parser to use depending on the extension of the files it parses. +# With this tag you can assign which parser to use for a given extension. +# Doxygen has a built-in mapping, but you can override or extend it using this tag. +# The format is ext=language, where ext is a file extension, and language is one of +# the parsers supported by doxygen: IDL, Java, Javascript, C#, C, C++, D, PHP, +# Objective-C, Python, Fortran, VHDL, C, C++. For instance to make doxygen treat +# .inc files as Fortran files (default is PHP), and .f files as C (default is Fortran), +# use: inc=Fortran f=C. Note that for custom extensions you also need to set FILE_PATTERNS otherwise the files are not read by doxygen. + +EXTENSION_MAPPING = + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should +# set this tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); v.s. +# func(std::string) {}). This also make the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. + +BUILTIN_STL_SUPPORT = NO + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. +# Doxygen will parse them like normal C++ but will assume all classes use public +# instead of private inheritance when no explicit protection keyword is present. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate getter +# and setter methods for a property. Setting this option to YES (the default) +# will make doxygen to replace the get and set methods by a property in the +# documentation. This will only work if the methods are indeed getting or +# setting a simple type. If this is not the case, or you want to show the +# methods anyway, you should set this option to NO. + +IDL_PROPERTY_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES, then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. + +DISTRIBUTE_GROUP_DOC = NO + +# Set the SUBGROUPING tag to YES (the default) to allow class member groups of +# the same type (for instance a group of public functions) to be put as a +# subgroup of that type (e.g. under the Public Functions section). Set it to +# NO to prevent subgrouping. Alternatively, this can be done per class using +# the \nosubgrouping command. + +SUBGROUPING = YES + +# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum +# is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically +# be useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. + +TYPEDEF_HIDES_STRUCT = NO + +# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to +# determine which symbols to keep in memory and which to flush to disk. +# When the cache is full, less often used symbols will be written to disk. +# For small to medium size projects (<1000 input files) the default value is +# probably good enough. For larger projects a too small cache size can cause +# doxygen to be busy swapping symbols to and from disk most of the time +# causing a significant performance penality. +# If the system has enough physical memory increasing the cache will improve the +# performance by keeping more symbols in memory. Note that the value works on +# a logarithmic scale so increasing the size by one will rougly double the +# memory usage. The cache size is given by this formula: +# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, +# corresponding to a cache size of 2^16 = 65536 symbols + +SYMBOL_CACHE_SIZE = 0 + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in +# documentation are documented, even if no documentation was available. +# Private class members and static file members will be hidden unless +# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES + +EXTRACT_ALL = YES + +# If the EXTRACT_PRIVATE tag is set to YES all private members of a class +# will be included in the documentation. + +EXTRACT_PRIVATE = NO + +# If the EXTRACT_STATIC tag is set to YES all static members of a file +# will be included in the documentation. + +EXTRACT_STATIC = YES + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) +# defined locally in source files will be included in the documentation. +# If set to NO only classes defined in header files are included. + +EXTRACT_LOCAL_CLASSES = YES + +# This flag is only useful for Objective-C code. When set to YES local +# methods, which are defined in the implementation section but not in +# the interface are included in the documentation. +# If set to NO (the default) only methods in the interface are included. + +EXTRACT_LOCAL_METHODS = NO + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base +# name of the file that contains the anonymous namespace. By default +# anonymous namespace are hidden. + +EXTRACT_ANON_NSPACES = NO + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all +# undocumented members of documented classes, files or namespaces. +# If set to NO (the default) these members will be included in the +# various overviews, but no documentation section is generated. +# This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. +# If set to NO (the default) these classes will be included in the various +# overviews. This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all +# friend (class|struct|union) declarations. +# If set to NO (the default) these declarations will be included in the +# documentation. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any +# documentation blocks found inside the body of a function. +# If set to NO (the default) these blocks will be appended to the +# function's detailed documentation block. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation +# that is typed after a \internal command is included. If the tag is set +# to NO (the default) then the documentation will be excluded. +# Set it to YES to include the internal documentation. + +INTERNAL_DOCS = NO + +# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate +# file names in lower-case letters. If set to YES upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# and Mac users are advised to set this option to NO. + +CASE_SENSE_NAMES = YES + +# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen +# will show members with their full class and namespace scopes in the +# documentation. If set to YES the scope will be hidden. + +HIDE_SCOPE_NAMES = NO + +# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen +# will put a list of the files that are included by a file in the documentation +# of that file. + +SHOW_INCLUDE_FILES = YES + +# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] +# is inserted in the documentation for inline members. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen +# will sort the (detailed) documentation of file and class members +# alphabetically by member name. If set to NO the members will appear in +# declaration order. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the +# brief documentation of file, namespace and class members alphabetically +# by member name. If set to NO (the default) the members will appear in +# declaration order. + +SORT_BRIEF_DOCS = NO + +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the (brief and detailed) documentation of class members so that constructors and destructors are listed first. If set to NO (the default) the constructors will appear in the respective orders defined by SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. + +SORT_MEMBERS_CTORS_1ST = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the +# hierarchy of group names into alphabetical order. If set to NO (the default) +# the group names will appear in their defined order. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be +# sorted by fully-qualified names, including namespaces. If set to +# NO (the default), the class list will be sorted only by class name, +# not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the +# alphabetical list. + +SORT_BY_SCOPE_NAME = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or +# disable (NO) the todo list. This list is created by putting \todo +# commands in the documentation. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable (YES) or +# disable (NO) the test list. This list is created by putting \test +# commands in the documentation. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable (YES) or +# disable (NO) the bug list. This list is created by putting \bug +# commands in the documentation. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or +# disable (NO) the deprecated list. This list is created by putting +# \deprecated commands in the documentation. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional +# documentation sections, marked by \if sectionname ... \endif. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines +# the initial value of a variable or define consists of for it to appear in +# the documentation. If the initializer consists of more lines than specified +# here it will be hidden. Use a value of 0 to hide initializers completely. +# The appearance of the initializer of individual variables and defines in the +# documentation can be controlled using \showinitializer or \hideinitializer +# command in the documentation regardless of this setting. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated +# at the bottom of the documentation of classes and structs. If set to YES the +# list will mention the files that were used to generate the documentation. + +SHOW_USED_FILES = YES + +# If the sources in your project are distributed over multiple directories +# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy +# in the documentation. The default is NO. + +SHOW_DIRECTORIES = YES + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. +# This will remove the Files entry from the Quick Index and from the +# Folder Tree View (if specified). The default is YES. + +SHOW_FILES = YES + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the +# Namespaces page. +# This will remove the Namespaces entry from the Quick Index +# and from the Folder Tree View (if specified). The default is YES. + +SHOW_NAMESPACES = YES + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command , where is the value of +# the FILE_VERSION_FILTER tag, and is the name of an input file +# provided by doxygen. Whatever the program writes to standard output +# is used as the file version. See the manual for examples. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed by +# doxygen. The layout file controls the global structure of the generated output files +# in an output format independent way. The create the layout file that represents +# doxygen's defaults, run doxygen with the -l option. You can optionally specify a +# file name after the option, if omitted DoxygenLayout.xml will be used as the name +# of the layout file. + +LAYOUT_FILE = + +#--------------------------------------------------------------------------- +# configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated +# by doxygen. Possible values are YES and NO. If left blank NO is used. + +QUIET = NO + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated by doxygen. Possible values are YES and NO. If left blank +# NO is used. + +WARNINGS = YES + +# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings +# for undocumented members. If EXTRACT_ALL is set to YES then this flag will +# automatically be disabled. + +WARN_IF_UNDOCUMENTED = YES + +# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some +# parameters in a documented function, or documenting parameters that +# don't exist or using markup commands wrongly. + +WARN_IF_DOC_ERROR = YES + +# This WARN_NO_PARAMDOC option can be abled to get warnings for +# functions that are documented, but have no documentation for their parameters +# or return value. If set to NO (the default) doxygen will only warn about +# wrong or incomplete parameter documentation, but not about the absence of +# documentation. + +WARN_NO_PARAMDOC = NO + +# The WARN_FORMAT tag determines the format of the warning messages that +# doxygen can produce. The string should contain the $file, $line, and $text +# tags, which will be replaced by the file and line number from which the +# warning originated and the warning text. Optionally the format may contain +# $version, which will be replaced by the version of the file (if it could +# be obtained via FILE_VERSION_FILTER) + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning +# and error messages should be written. If left blank the output is written +# to stderr. + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag can be used to specify the files and/or directories that contain +# documented source files. You may enter file names like "myfile.cpp" or +# directories like "/usr/src/myproject". Separate the files or directories +# with spaces. + +INPUT = @CMAKE_SOURCE_DIR@/README @CMAKE_SOURCE_DIR@/server/core @CMAKE_SOURCE_DIR@/server/modules @CMAKE_SOURCE_DIR@/server/include \ + @CMAKE_SOURCE_DIR@/log_manager @CMAKE_SOURCE_DIR@/query_classifier @CMAKE_SOURCE_DIR@/utils + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is +# also the default input encoding. Doxygen uses libiconv (or the iconv built +# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for +# the list of possible encodings. + +INPUT_ENCODING = UTF-8 + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank the following patterns are tested: +# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx +# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90 + +FILE_PATTERNS = *.c *.h + +# The RECURSIVE tag can be used to turn specify whether or not subdirectories +# should be searched for input files as well. Possible values are YES and NO. +# If left blank NO is used. + +RECURSIVE = YES + +# The EXCLUDE tag can be used to specify files and/or directories that should +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. + +EXCLUDE = + +# The EXCLUDE_SYMLINKS tag can be used select whether or not files or +# directories that are symbolic links (a Unix filesystem feature) are excluded +# from the input. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. Note that the wildcards are matched +# against the file with absolute path, so to exclude all test directories +# for example use the pattern */test/* + +EXCLUDE_PATTERNS = + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# AClass::ANamespace, ANamespace::*Test + +EXCLUDE_SYMBOLS = + +# The EXAMPLE_PATH tag can be used to specify one or more files or +# directories that contain example code fragments that are included (see +# the \include command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank all files are included. + +EXAMPLE_PATTERNS = + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude +# commands irrespective of the value of the RECURSIVE tag. +# Possible values are YES and NO. If left blank NO is used. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or +# directories that contain image that are included in the documentation (see +# the \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command , where +# is the value of the INPUT_FILTER tag, and is the name of an +# input file. Doxygen will then use the output that the filter program writes +# to standard output. +# If FILTER_PATTERNS is specified, this tag will be +# ignored. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. +# Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. +# The filters are a list of the form: +# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further +# info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER +# is applied to all files. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will be used to filter the input files when producing source +# files to browse (i.e. when SOURCE_BROWSER is set to YES). + +FILTER_SOURCE_FILES = NO + +#--------------------------------------------------------------------------- +# configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will +# be generated. Documented entities will be cross-referenced with these sources. +# Note: To get rid of all source code in the generated output, make sure also +# VERBATIM_HEADERS is set to NO. + +SOURCE_BROWSER = YES + +# Setting the INLINE_SOURCES tag to YES will include the body +# of functions and classes directly in the documentation. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct +# doxygen to hide any special comment blocks from generated source code +# fragments. Normal C and C++ comments will always remain visible. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES +# then for each documented function all documented +# functions referencing it will be listed. + +REFERENCED_BY_RELATION = NO + +# If the REFERENCES_RELATION tag is set to YES +# then for each documented function all documented entities +# called/used by that function will be listed. + +REFERENCES_RELATION = NO + +# If the REFERENCES_LINK_SOURCE tag is set to YES (the default) +# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from +# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will +# link to the source code. +# Otherwise they will link to the documentation. + +REFERENCES_LINK_SOURCE = YES + +# If the USE_HTAGS tag is set to YES then the references to source code +# will point to the HTML generated by the htags(1) tool instead of doxygen +# built-in source browser. The htags tool is part of GNU's global source +# tagging system (see http://www.gnu.org/software/global/global.html). You +# will need version 4.8.6 or higher. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen +# will generate a verbatim copy of the header file for each class for +# which an include is specified. Set to NO to disable this. + +VERBATIM_HEADERS = YES + +#--------------------------------------------------------------------------- +# configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index +# of all compounds will be generated. Enable this if the project +# contains a lot of classes, structs, unions or interfaces. + +ALPHABETICAL_INDEX = NO + +# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then +# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns +# in which this list will be split (can be a number in the range [1..20]) + +COLS_IN_ALPHA_INDEX = 5 + +# In case all classes in a project start with a common prefix, all +# classes will be put under the same header in the alphabetical index. +# The IGNORE_PREFIX tag can be used to specify one or more prefixes that +# should be ignored while generating the index headers. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES (the default) Doxygen will +# generate HTML output. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `html' will be used as the default path. + +HTML_OUTPUT = html + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for +# each generated HTML page (for example: .htm,.php,.asp). If it is left blank +# doxygen will generate files with .html extension. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a personal HTML header for +# each generated HTML page. If it is left blank doxygen will generate a +# standard header. + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a personal HTML footer for +# each generated HTML page. If it is left blank doxygen will generate a +# standard footer. + +HTML_FOOTER = + +# If the HTML_TIMESTAMP tag is set to YES then the generated HTML +# documentation will contain the timesstamp. + +HTML_TIMESTAMP = NO + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading +# style sheet that is used by each HTML page. It can be used to +# fine-tune the look of the HTML output. If the tag is left blank doxygen +# will generate a default style sheet. Note that doxygen will try to copy +# the style sheet file to the HTML output directory, so don't put your own +# stylesheet in the HTML output directory as well, or it will be erased! + +HTML_STYLESHEET = + +# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, +# files or namespaces will be aligned in HTML using tables. If set to +# NO a bullet list will be used. + +HTML_ALIGN_MEMBERS = YES + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. For this to work a browser that supports +# JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox +# Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari). + +HTML_DYNAMIC_SECTIONS = NO + +# If the GENERATE_DOCSET tag is set to YES, additional index files +# will be generated that can be used as input for Apple's Xcode 3 +# integrated development environment, introduced with OSX 10.5 (Leopard). +# To create a documentation set, doxygen will generate a Makefile in the +# HTML output directory. Running make will produce the docset in that +# directory and running "make install" will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find +# it at startup. +# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html for more information. + +GENERATE_DOCSET = NO + +# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the +# feed. A documentation feed provides an umbrella under which multiple +# documentation sets from a single provider (such as a company or product suite) +# can be grouped. + +DOCSET_FEEDNAME = "Doxygen generated docs" + +# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that +# should uniquely identify the documentation set bundle. This should be a +# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen +# will append .docset to the name. + +DOCSET_BUNDLE_ID = org.doxygen.Project + +# If the GENERATE_HTMLHELP tag is set to YES, additional index files +# will be generated that can be used as input for tools like the +# Microsoft HTML help workshop to generate a compiled HTML help file (.chm) +# of the generated HTML documentation. + +GENERATE_HTMLHELP = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can +# be used to specify the file name of the resulting .chm file. You +# can add a path in front of the file if the result should not be +# written to the html output directory. + +CHM_FILE = + +# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can +# be used to specify the location (absolute path including file name) of +# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run +# the HTML help compiler on the generated index.hhp. + +HHC_LOCATION = + +# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag +# controls if a separate .chi index file is generated (YES) or that +# it should be included in the master .chm file (NO). + +GENERATE_CHI = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING +# is used to encode HtmlHelp index (hhk), content (hhc) and project file +# content. + +CHM_INDEX_ENCODING = + +# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag +# controls whether a binary table of contents is generated (YES) or a +# normal table of contents (NO) in the .chm file. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members +# to the contents of the HTML help documentation and to the tree view. + +TOC_EXPAND = NO + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and QHP_VIRTUAL_FOLDER +# are set, an additional index file will be generated that can be used as input for +# Qt's qhelpgenerator to generate a Qt Compressed Help (.qch) of the generated +# HTML documentation. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can +# be used to specify the file name of the resulting .qch file. +# The path specified is relative to the HTML output folder. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating +# Qt Help Project output. For more information please see +# http://doc.trolltech.com/qthelpproject.html#namespace + +QHP_NAMESPACE = + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating +# Qt Help Project output. For more information please see +# http://doc.trolltech.com/qthelpproject.html#virtual-folders + +QHP_VIRTUAL_FOLDER = doc + +# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to add. +# For more information please see +# http://doc.trolltech.com/qthelpproject.html#custom-filters + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the custom filter to add.For more information please see +# Qt Help Project / Custom Filters. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this project's +# filter section matches. +# Qt Help Project / Filter Attributes. + +QHP_SECT_FILTER_ATTRS = + +# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can +# be used to specify the location of Qt's qhelpgenerator. +# If non-empty doxygen will try to run qhelpgenerator on the generated +# .qhp file. + +QHG_LOCATION = + +# The DISABLE_INDEX tag can be used to turn on/off the condensed index at +# top of each HTML page. The value NO (the default) enables the index and +# the value YES disables it. + +DISABLE_INDEX = NO + +# This tag can be used to set the number of enum values (range [1..20]) +# that doxygen will group on one line in the generated HTML documentation. + +ENUM_VALUES_PER_LINE = 4 + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. +# If the tag value is set to YES, a side panel will be generated +# containing a tree-like index structure (just like the one that +# is generated for HTML Help). For this to work a browser that supports +# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). +# Windows users are probably better off using the HTML help feature. + +GENERATE_TREEVIEW = YES + +# By enabling USE_INLINE_TREES, doxygen will generate the Groups, Directories, +# and Class Hierarchy pages using a tree view instead of an ordered list. + +USE_INLINE_TREES = NO + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be +# used to set the initial width (in pixels) of the frame in which the tree +# is shown. + +TREEVIEW_WIDTH = 250 + +# Use this tag to change the font size of Latex formulas included +# as images in the HTML documentation. The default is 10. Note that +# when you change the font size after a successful doxygen run you need +# to manually remove any form_*.png images from the HTML output directory +# to force them to be regenerated. + +FORMULA_FONTSIZE = 10 + +# When the SEARCHENGINE tag is enable doxygen will generate a search box for the HTML output. The underlying search engine uses javascript +# and DHTML and should work on any modern browser. Note that when using HTML help (GENERATE_HTMLHELP) or Qt help (GENERATE_QHP) +# there is already a search function so this one should typically +# be disabled. + +SEARCHENGINE = YES + +#--------------------------------------------------------------------------- +# configuration options related to the LaTeX output +#--------------------------------------------------------------------------- + +# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will +# generate Latex output. + +GENERATE_LATEX = NO + +# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `latex' will be used as the default path. + +LATEX_OUTPUT = latex + +# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be +# invoked. If left blank `latex' will be used as the default command name. + +LATEX_CMD_NAME = latex + +# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to +# generate index for LaTeX. If left blank `makeindex' will be used as the +# default command name. + +MAKEINDEX_CMD_NAME = makeindex + +# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact +# LaTeX documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_LATEX = NO + +# The PAPER_TYPE tag can be used to set the paper type that is used +# by the printer. Possible values are: a4, a4wide, letter, legal and +# executive. If left blank a4wide will be used. + +PAPER_TYPE = a4wide + +# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX +# packages that should be included in the LaTeX output. + +EXTRA_PACKAGES = + +# The LATEX_HEADER tag can be used to specify a personal LaTeX header for +# the generated latex document. The header should contain everything until +# the first chapter. If it is left blank doxygen will generate a +# standard header. Notice: only use this tag if you know what you are doing! + +LATEX_HEADER = + +# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated +# is prepared for conversion to pdf (using ps2pdf). The pdf file will +# contain links (just like the HTML output) instead of page references +# This makes the output suitable for online browsing using a pdf viewer. + +PDF_HYPERLINKS = YES + +# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of +# plain latex in the generated Makefile. Set this option to YES to get a +# higher quality PDF documentation. + +USE_PDFLATEX = YES + +# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. +# command to the generated LaTeX files. This will instruct LaTeX to keep +# running if errors occur, instead of asking the user for help. +# This option is also used when generating formulas in HTML. + +LATEX_BATCHMODE = NO + +# If LATEX_HIDE_INDICES is set to YES then doxygen will not +# include the index chapters (such as File Index, Compound Index, etc.) +# in the output. + +LATEX_HIDE_INDICES = NO + +# If LATEX_SOURCE_CODE is set to YES then doxygen will include source code with syntax highlighting in the LaTeX output. Note that which sources are shown also depends on other settings such as SOURCE_BROWSER. + +LATEX_SOURCE_CODE = NO + +#--------------------------------------------------------------------------- +# configuration options related to the RTF output +#--------------------------------------------------------------------------- + +# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output +# The RTF output is optimized for Word 97 and may not look very pretty with +# other RTF readers or editors. + +GENERATE_RTF = NO + +# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `rtf' will be used as the default path. + +RTF_OUTPUT = rtf + +# If the COMPACT_RTF tag is set to YES Doxygen generates more compact +# RTF documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_RTF = NO + +# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated +# will contain hyperlink fields. The RTF file will +# contain links (just like the HTML output) instead of page references. +# This makes the output suitable for online browsing using WORD or other +# programs which support those fields. +# Note: wordpad (write) and others do not support links. + +RTF_HYPERLINKS = NO + +# Load stylesheet definitions from file. Syntax is similar to doxygen's +# config file, i.e. a series of assignments. You only have to provide +# replacements, missing definitions are set to their default value. + +RTF_STYLESHEET_FILE = + +# Set optional variables used in the generation of an rtf document. +# Syntax is similar to doxygen's config file. + +RTF_EXTENSIONS_FILE = + +#--------------------------------------------------------------------------- +# configuration options related to the man page output +#--------------------------------------------------------------------------- + +# If the GENERATE_MAN tag is set to YES (the default) Doxygen will +# generate man pages + +GENERATE_MAN = NO + +# The MAN_OUTPUT tag is used to specify where the man pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `man' will be used as the default path. + +MAN_OUTPUT = man + +# The MAN_EXTENSION tag determines the extension that is added to +# the generated man pages (default is the subroutine's section .3) + +MAN_EXTENSION = .3 + +# If the MAN_LINKS tag is set to YES and Doxygen generates man output, +# then it will generate one additional man file for each entity +# documented in the real man page(s). These additional files +# only source the real man page, but without them the man command +# would be unable to find the correct page. The default is NO. + +MAN_LINKS = NO + +#--------------------------------------------------------------------------- +# configuration options related to the XML output +#--------------------------------------------------------------------------- + +# If the GENERATE_XML tag is set to YES Doxygen will +# generate an XML file that captures the structure of +# the code including all documentation. + +GENERATE_XML = NO + +# The XML_OUTPUT tag is used to specify where the XML pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `xml' will be used as the default path. + +XML_OUTPUT = xml + +# The XML_SCHEMA tag can be used to specify an XML schema, +# which can be used by a validating XML parser to check the +# syntax of the XML files. + +XML_SCHEMA = + +# The XML_DTD tag can be used to specify an XML DTD, +# which can be used by a validating XML parser to check the +# syntax of the XML files. + +XML_DTD = + +# If the XML_PROGRAMLISTING tag is set to YES Doxygen will +# dump the program listings (including syntax highlighting +# and cross-referencing information) to the XML output. Note that +# enabling this will significantly increase the size of the XML output. + +XML_PROGRAMLISTING = YES + +#--------------------------------------------------------------------------- +# configuration options for the AutoGen Definitions output +#--------------------------------------------------------------------------- + +# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will +# generate an AutoGen Definitions (see autogen.sf.net) file +# that captures the structure of the code including all +# documentation. Note that this feature is still experimental +# and incomplete at the moment. + +GENERATE_AUTOGEN_DEF = NO + +#--------------------------------------------------------------------------- +# configuration options related to the Perl module output +#--------------------------------------------------------------------------- + +# If the GENERATE_PERLMOD tag is set to YES Doxygen will +# generate a Perl module file that captures the structure of +# the code including all documentation. Note that this +# feature is still experimental and incomplete at the +# moment. + +GENERATE_PERLMOD = NO + +# If the PERLMOD_LATEX tag is set to YES Doxygen will generate +# the necessary Makefile rules, Perl scripts and LaTeX code to be able +# to generate PDF and DVI output from the Perl module output. + +PERLMOD_LATEX = NO + +# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be +# nicely formatted so it can be parsed by a human reader. +# This is useful +# if you want to understand what is going on. +# On the other hand, if this +# tag is set to NO the size of the Perl module output will be much smaller +# and Perl will parse it just the same. + +PERLMOD_PRETTY = YES + +# The names of the make variables in the generated doxyrules.make file +# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. +# This is useful so different doxyrules.make files included by the same +# Makefile don't overwrite each other's variables. + +PERLMOD_MAKEVAR_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the preprocessor +#--------------------------------------------------------------------------- + +# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will +# evaluate all C-preprocessor directives found in the sources and include +# files. + +ENABLE_PREPROCESSING = YES + +# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro +# names in the source code. If set to NO (the default) only conditional +# compilation will be performed. Macro expansion can be done in a controlled +# way by setting EXPAND_ONLY_PREDEF to YES. + +MACRO_EXPANSION = NO + +# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES +# then the macro expansion is limited to the macros specified with the +# PREDEFINED and EXPAND_AS_DEFINED tags. + +EXPAND_ONLY_PREDEF = NO + +# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files +# in the INCLUDE_PATH (see below) will be search if a #include is found. + +SEARCH_INCLUDES = YES + +# The INCLUDE_PATH tag can be used to specify one or more directories that +# contain include files that are not input files but should be processed by +# the preprocessor. + +INCLUDE_PATH = + +# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard +# patterns (like *.h and *.hpp) to filter out the header-files in the +# directories. If left blank, the patterns specified with FILE_PATTERNS will +# be used. + +INCLUDE_FILE_PATTERNS = *.h + +# The PREDEFINED tag can be used to specify one or more macro names that +# are defined before the preprocessor is started (similar to the -D option of +# gcc). The argument of the tag is a list of macros of the form: name +# or name=definition (no spaces). If the definition and the = are +# omitted =1 is assumed. To prevent a macro definition from being +# undefined via #undef or recursively expanded use the := operator +# instead of the = operator. + +PREDEFINED = + +# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then +# this tag can be used to specify a list of macro names that should be expanded. +# The macro definition that is found in the sources will be used. +# Use the PREDEFINED tag if you want to use a different macro definition. + +EXPAND_AS_DEFINED = + +# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then +# doxygen's preprocessor will remove all function-like macros that are alone +# on a line, have an all uppercase name, and do not end with a semicolon. Such +# function macros are typically used for boiler-plate code, and will confuse +# the parser if not removed. + +SKIP_FUNCTION_MACROS = YES + +#--------------------------------------------------------------------------- +# Configuration::additions related to external references +#--------------------------------------------------------------------------- + +# The TAGFILES option can be used to specify one or more tagfiles. +# Optionally an initial location of the external documentation +# can be added for each tagfile. The format of a tag file without +# this location is as follows: +# +# TAGFILES = file1 file2 ... +# Adding location for the tag files is done as follows: +# +# TAGFILES = file1=loc1 "file2 = loc2" ... +# where "loc1" and "loc2" can be relative or absolute paths or +# URLs. If a location is present for each tag, the installdox tool +# does not have to be run to correct the links. +# Note that each tag file must have a unique name +# (where the name does NOT include the path) +# If a tag file is not located in the directory in which doxygen +# is run, you must also specify the path to the tagfile here. + +TAGFILES = + +# When a file name is specified after GENERATE_TAGFILE, doxygen will create +# a tag file that is based on the input files it reads. + +GENERATE_TAGFILE = + +# If the ALLEXTERNALS tag is set to YES all external classes will be listed +# in the class index. If set to NO only the inherited external classes +# will be listed. + +ALLEXTERNALS = NO + +# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed +# in the modules index. If set to NO, only the current project's groups will +# be listed. + +EXTERNAL_GROUPS = YES + +# The PERL_PATH should be the absolute path and name of the perl script +# interpreter (i.e. the result of `which perl'). + +PERL_PATH = /usr/bin/perl + +#--------------------------------------------------------------------------- +# Configuration options related to the dot tool +#--------------------------------------------------------------------------- + +# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will +# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base +# or super classes. Setting the tag to NO turns the diagrams off. Note that +# this option is superseded by the HAVE_DOT option below. This is only a +# fallback. It is recommended to install and use dot, since it yields more +# powerful graphs. + +CLASS_DIAGRAMS = YES + +# You can define message sequence charts within doxygen comments using the \msc +# command. Doxygen will then run the mscgen tool (see +# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the +# documentation. The MSCGEN_PATH tag allows you to specify the directory where +# the mscgen tool resides. If left empty the tool is assumed to be found in the +# default search path. + +MSCGEN_PATH = + +# If set to YES, the inheritance and collaboration graphs will hide +# inheritance and usage relations if the target is undocumented +# or is not a class. + +HIDE_UNDOC_RELATIONS = YES + +# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is +# available from the path. This tool is part of Graphviz, a graph visualization +# toolkit from AT&T and Lucent Bell Labs. The other options in this section +# have no effect if this option is set to NO (the default) + +HAVE_DOT = YES + +# By default doxygen will write a font called FreeSans.ttf to the output +# directory and reference it in all dot files that doxygen generates. This +# font does not include all possible unicode characters however, so when you need +# these (or just want a differently looking font) you can specify the font name +# using DOT_FONTNAME. You need need to make sure dot is able to find the font, +# which can be done by putting it in a standard location or by setting the +# DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory +# containing the font. + +DOT_FONTNAME = FreeSans + +# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. +# The default size is 10pt. + +DOT_FONTSIZE = 10 + +# By default doxygen will tell dot to use the output directory to look for the +# FreeSans.ttf font (which doxygen will put there itself). If you specify a +# different font using DOT_FONTNAME you can set the path where dot +# can find it using this tag. + +DOT_FONTPATH = + +# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect inheritance relations. Setting this tag to YES will force the +# the CLASS_DIAGRAMS tag to NO. + +CLASS_GRAPH = YES + +# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect implementation dependencies (inheritance, containment, and +# class references variables) of the class with other documented classes. + +COLLABORATION_GRAPH = YES + +# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for groups, showing the direct groups dependencies + +GROUP_GRAPHS = YES + +# If the UML_LOOK tag is set to YES doxygen will generate inheritance and +# collaboration diagrams in a style similar to the OMG's Unified Modeling +# Language. + +UML_LOOK = NO + +# If set to YES, the inheritance and collaboration graphs will show the +# relations between templates and their instances. + +TEMPLATE_RELATIONS = NO + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT +# tags are set to YES then doxygen will generate a graph for each documented +# file showing the direct and indirect include dependencies of the file with +# other documented files. + +INCLUDE_GRAPH = YES + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and +# HAVE_DOT tags are set to YES then doxygen will generate a graph for each +# documented header file showing the documented files that directly or +# indirectly include this file. + +INCLUDED_BY_GRAPH = YES + +# If the CALL_GRAPH and HAVE_DOT options are set to YES then +# doxygen will generate a call dependency graph for every global function +# or class method. Note that enabling this option will significantly increase +# the time of a run. So in most cases it will be better to enable call graphs +# for selected functions only using the \callgraph command. + +CALL_GRAPH = NO + +# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then +# doxygen will generate a caller dependency graph for every global function +# or class method. Note that enabling this option will significantly increase +# the time of a run. So in most cases it will be better to enable caller +# graphs for selected functions only using the \callergraph command. + +CALLER_GRAPH = NO + +# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen +# will graphical hierarchy of all classes instead of a textual one. + +GRAPHICAL_HIERARCHY = YES + +# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES +# then doxygen will show the dependencies a directory has on other directories +# in a graphical way. The dependency relations are determined by the #include +# relations between the files in the directories. + +DIRECTORY_GRAPH = YES + +# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images +# generated by dot. Possible values are png, jpg, or gif +# If left blank png will be used. + +DOT_IMAGE_FORMAT = png + +# The tag DOT_PATH can be used to specify the path where the dot tool can be +# found. If left blank, it is assumed the dot tool can be found in the path. + +DOT_PATH = + +# The DOTFILE_DIRS tag can be used to specify one or more directories that +# contain dot files that are included in the documentation (see the +# \dotfile command). + +DOTFILE_DIRS = + +# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of +# nodes that will be shown in the graph. If the number of nodes in a graph +# becomes larger than this value, doxygen will truncate the graph, which is +# visualized by representing a node as a red box. Note that doxygen if the +# number of direct children of the root node in a graph is already larger than +# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note +# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. + +DOT_GRAPH_MAX_NODES = 50 + +# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the +# graphs generated by dot. A depth value of 3 means that only nodes reachable +# from the root by following a path via at most 3 edges will be shown. Nodes +# that lay further from the root node will be omitted. Note that setting this +# option to 1 or 2 may greatly reduce the computation time needed for large +# code bases. Also note that the size of a graph can be further restricted by +# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. + +MAX_DOT_GRAPH_DEPTH = 0 + +# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent +# background. This is disabled by default, because dot on Windows does not +# seem to support this out of the box. Warning: Depending on the platform used, +# enabling this option may lead to badly anti-aliased labels on the edges of +# a graph (i.e. they become hard to read). + +DOT_TRANSPARENT = NO + +# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output +# files in one run (i.e. multiple -o and -T options on the command line). This +# makes dot run faster, but since only newer versions of dot (>1.8.10) +# support this, this feature is disabled by default. + +DOT_MULTI_TARGETS = NO + +# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will +# generate a legend page explaining the meaning of the various boxes and +# arrows in the dot generated graphs. + +GENERATE_LEGEND = YES + +# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will +# remove the intermediate dot files that are used to generate +# the various graphs. + +DOT_CLEANUP = YES diff --git a/etc/init.d/maxscale b/etc/init.d/maxscale index dde616fb3..40f368326 100755 --- a/etc/init.d/maxscale +++ b/etc/init.d/maxscale @@ -1,6 +1,6 @@ #!/bin/sh # -# maxscale: The SkySQL MaxScale database proxy +# maxscale: The MariaDB Corporation MaxScale database proxy # # description: MaxScale provides database specific proxy functionality # diff --git a/etc/init.d/maxscale.in b/etc/init.d/maxscale.in index bf96d6f32..00c9777aa 100755 --- a/etc/init.d/maxscale.in +++ b/etc/init.d/maxscale.in @@ -1,6 +1,6 @@ #!/bin/sh # -# maxscale: The SkySQL MaxScale database proxy +# maxscale: The MariaDB Corporation MaxScale database proxy # # description: MaxScale provides database specific proxy functionality # diff --git a/etc/ubuntu/init.d/maxscale b/etc/ubuntu/init.d/maxscale index c81ffb475..82bc05632 100755 --- a/etc/ubuntu/init.d/maxscale +++ b/etc/ubuntu/init.d/maxscale @@ -1,6 +1,6 @@ #!/bin/sh # -# maxscale: The SkySQL MaxScale database proxy +# maxscale: The MariaDB Corporation MaxScale database proxy # # description: MaxScale provides database specific proxy functionality # diff --git a/etc/ubuntu/init.d/maxscale.in b/etc/ubuntu/init.d/maxscale.in index 600e8f5f8..3610b8fd9 100644 --- a/etc/ubuntu/init.d/maxscale.in +++ b/etc/ubuntu/init.d/maxscale.in @@ -1,6 +1,6 @@ #!/bin/sh # -# maxscale: The SkySQL MaxScale database proxy +# maxscale: The MariaDB Corporation MaxScale database proxy # # description: MaxScale provides database specific proxy functionality # diff --git a/log_manager/CMakeLists.txt b/log_manager/CMakeLists.txt index 1cbe6cd87..fdef33f6c 100644 --- a/log_manager/CMakeLists.txt +++ b/log_manager/CMakeLists.txt @@ -1,3 +1,6 @@ +if(LOG_DEBUG) + add_definitions(-DSS_LOG_DEBUG) +endif() add_library(log_manager SHARED log_manager.cc) target_link_libraries(log_manager pthread aio stdc++) install(TARGETS log_manager DESTINATION lib) diff --git a/log_manager/log_manager.cc b/log_manager/log_manager.cc index 2b274a610..1d5512756 100644 --- a/log_manager/log_manager.cc +++ b/log_manager/log_manager.cc @@ -1,5 +1,5 @@ /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -13,7 +13,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 */ #include #include @@ -58,24 +58,29 @@ static simple_mutex_t msg_mutex; */ int lm_enabled_logfiles_bitmask = 0; +/** + * Thread-specific struct variable for storing current session id and currently + * enabled log files for the session. + */ +__thread log_info_t tls_log_info = {0, 0}; + +/** + * Global counter for each log file type. It indicates for how many sessions + * each log type is currently enabled. + */ +ssize_t log_ses_count[LOGFILE_LAST] = {0}; + /** * BUFSIZ comes from the system. It equals with block size or * its multiplication. */ #define MAX_LOGSTRLEN BUFSIZ -#if defined(SS_PROF) -/** - * These counters may be inaccurate but give some idea of how - * things are going. - */ - -#endif /** * Path to directory in which all files are stored to shared memory * by the OS. */ -const char* shm_pathname = "/dev/shm"; +const char* shm_pathname_prefix = "/dev/shm/"; /** Logfile ids from call argument '-s' */ char* shmem_id_str = NULL; @@ -89,7 +94,9 @@ char* syslog_ident_str = NULL; */ static int lmlock; static logmanager_t* lm; - +static bool flushall_flag; +static bool flushall_started_flag; +static bool flushall_done_flag; /** Writer thread structure */ struct filewriter_st { @@ -162,7 +169,8 @@ struct logfile_st { mlist_t lf_blockbuf_list; int lf_buf_size; bool lf_flushflag; - int lf_spinlock; /**< lf_flushflag */ + bool lf_rotateflag; + int lf_spinlock; /**< lf_flushflag & lf_rotateflag */ int lf_npending_writes; #if defined(SS_DEBUG) skygw_chk_t lf_chk_tail; @@ -232,6 +240,11 @@ static bool logfile_init( static void logfile_done(logfile_t* logfile); static void logfile_free_memory(logfile_t* lf); static void logfile_flush(logfile_t* lf); +static void logfile_rotate(logfile_t* lf); +static bool logfile_create(logfile_t* lf); +static bool logfile_open_file(filewriter_t* fw, logfile_t* lf); +static char* form_full_file_name(strpart_t* parts, logfile_t* lf, int seqnoidx); + static bool filewriter_init( logmanager_t* logmanager, filewriter_t* fw, @@ -249,14 +262,16 @@ static bool logmanager_register(bool writep); static void logmanager_unregister(void); static bool logmanager_init_nomutex(int argc, char* argv[]); static void logmanager_done_nomutex(void); + static int logmanager_write_log( logfile_id_t id, - bool flush, - bool use_valist, - bool spread_down, - size_t len, - const char* str, - va_list valist); + bool flush, + bool use_valist, + bool spread_down, + bool rotate, + size_t len, + const char* str, + va_list valist); static blockbuf_t* blockbuf_init(logfile_id_t id); static void blockbuf_node_done(void* bb_data); @@ -270,11 +285,17 @@ static void blockbuf_register(blockbuf_t* bb); static void blockbuf_unregister(blockbuf_t* bb); static bool logfile_set_enabled(logfile_id_t id, bool val); static char* add_slash(char* str); -static bool file_exists_and_is_writable(char* filename, bool* writable); + +static bool check_file_and_path( + char* filename, + bool* writable, + bool do_log); + static bool file_is_symlink(char* filename); - - - +static int skygw_log_disable_raw(logfile_id_t id, bool emergency); /*< no locking */ +static int find_last_seqno(strpart_t* parts, int seqno, int seqnoidx); +void flushall_logfiles(bool flush); +bool thr_flushall_check(); const char* get_suffix_default(void) { @@ -336,6 +357,12 @@ static bool logmanager_init_nomutex( bool succp = false; lm = (logmanager_t *)calloc(1, sizeof(logmanager_t)); + + if (lm == NULL) + { + err = 1; + goto return_succp; + } #if defined(SS_DEBUG) lm->lm_chk_top = CHK_NUM_LOGMANAGER; lm->lm_chk_tail = CHK_NUM_LOGMANAGER; @@ -345,7 +372,15 @@ static bool logmanager_init_nomutex( simple_mutex_init(&msg_mutex, "Message mutex"); #endif lm->lm_clientmes = skygw_message_init(); - lm->lm_logmes = skygw_message_init(); + lm->lm_logmes = skygw_message_init(); + + if (lm->lm_clientmes == NULL || + lm->lm_logmes == NULL) + { + err = 1; + goto return_succp; + } + lm->lm_enabled_logfiles |= LOGFILE_ERROR; lm->lm_enabled_logfiles |= LOGFILE_MESSAGE; #if defined(SS_DEBUG) @@ -356,35 +391,50 @@ static bool logmanager_init_nomutex( fw = &lm->lm_filewriter; fn->fn_state = UNINIT; fw->fwr_state = UNINIT; - - /** - * Set global variable - */ - lm_enabled_logfiles_bitmask = lm->lm_enabled_logfiles; /** Initialize configuration including log file naming info */ - if (!fnames_conf_init(fn, argc, argv)) { - goto return_succp; + if (!fnames_conf_init(fn, argc, argv)) + { + err = 1; + goto return_succp; } /** Initialize logfiles */ - if(!logfiles_init(lm)) { - goto return_succp; + if(!logfiles_init(lm)) + { + err = 1; + goto return_succp; } - /** Initialize filewriter data and open the (first) log file(s) - * for each log file type. */ - if (!filewriter_init(lm, fw, lm->lm_clientmes, lm->lm_logmes)) { - goto return_succp; + /** + * Set global variable + */ + lm_enabled_logfiles_bitmask = lm->lm_enabled_logfiles; + + /** + * Initialize filewriter data and open the log file + * for each log file type. + */ + if (!filewriter_init(lm, fw, lm->lm_clientmes, lm->lm_logmes)) + { + err = 1; + goto return_succp; } /** Initialize and start filewriter thread */ fw->fwr_thread = skygw_thread_init("filewriter thr", thr_filewriter_fun, (void *)fw); - - if ((err = skygw_thread_start(fw->fwr_thread)) != 0) { - goto return_succp; + + if (fw->fwr_thread == NULL) + { + err = 1; + goto return_succp; + } + + if ((err = skygw_thread_start(fw->fwr_thread)) != 0) + { + goto return_succp; } /** Wait message from filewriter_thr */ skygw_message_wait(fw->fwr_clientmes); @@ -393,10 +443,11 @@ static bool logmanager_init_nomutex( lm->lm_enabled = true; return_succp: - if (err != 0) { - /** This releases memory of all created objects */ - logmanager_done_nomutex(); - fprintf(stderr, "* Initializing logmanager failed.\n"); + if (err != 0) + { + /** This releases memory of all created objects */ + logmanager_done_nomutex(); + fprintf(stderr, "*\n* Error : Initializing log manager failed.\n*\n"); } return succp; } @@ -404,23 +455,14 @@ return_succp: /** - * @node Initializes log managing routines in SkySQL Gateway. + * Initializes log managing routines in MariaDB Corporation MaxScale. * * Parameters: - * @param p_ctx - in, give - * pointer to memory location where logmanager stores private write - * buffer. + * @param argc number of arguments in argv array * - * @param argc - in, use - * number of arguments in argv array + * @param argv arguments array * - * @param argv - in, use - * arguments array - * - * @return - * - * - * @details (write detailed description here) + * @return true if succeed, otherwise false * */ bool skygw_logmanager_init( @@ -444,7 +486,12 @@ return_succp: return succp; } - +/** + * Release resources of log manager. + * + * Lock must have been acquired before calling + * this function. + */ static void logmanager_done_nomutex(void) { int i; @@ -489,17 +536,7 @@ static void logmanager_done_nomutex(void) /** - * @node This function is provided for atexit() system function. - * - * Parameters: - * @param void - - * - * - * @return void - * - * - * @details (write detailed description here) - * + * This function is provided for atexit() system function. */ void skygw_logmanager_exit(void) { @@ -507,20 +544,9 @@ void skygw_logmanager_exit(void) } /** - * @node End execution of log manager + * End execution of log manager * - * Parameters: - * @param p_ctx - in, take - * pointer to memory location including context pointer. Context will - * be freed in this function. - * - * @param logmanager - in, use - * pointer to logmanager. - * - * @return void - * - * - * @details Stops file writing thread, releases filewriter, and logfiles. + * Stops file writing thread, releases filewriter, and logfiles. * */ void skygw_logmanager_done(void) @@ -572,42 +598,32 @@ static logfile_t* logmanager_get_logfile( /** - * @node Finds write position from block buffer for log string and writes there. - * + * Finds write position from block buffer for log string and writes there. + * * Parameters: * - * @param id - in, use - * logfile object identifier - * - * @param flush - in, use - * indicates whether log string must be written to disk immediately - * - * @param use_valist - in, use - * does write involve formatting of the string and use of valist argument - * - * @param spread_down - in, use - * if true, log string is spread to all logs having larger id. - * - * @param str_len - in, use - * length of formatted string - * - * @param str - in, use - * string to be written to log - * - * @param valist - in, use - * variable-length argument list for formatting the string - * - * @return + * @param id logfile object identifier + * @param flush indicates whether log string must be written to disk + * immediately + * @param use_valist does write involve formatting of the string and use of + * valist argument + * @param spread_down if true, log string is spread to all logs having + * larger id. + * @param rotate if set, closes currently open log file and opens a + * new one + * @param str_len length of formatted string + * @param str string to be written to log + * @param valist variable-length argument list for formatting the string * + * @return 0 if succeed, -1 otherwise * - * @details (write detailed description here) - * */ static int logmanager_write_log( logfile_id_t id, bool flush, bool use_valist, bool spread_down, + bool rotate, size_t str_len, const char* str, va_list valist) @@ -631,6 +647,7 @@ static int logmanager_write_log( true, false, false, + false, strlen(errstr)+1, errstr, valist); @@ -647,29 +664,46 @@ static int logmanager_write_log( CHK_LOGFILE(lf); /** - * When string pointer is NULL, case is skygw_log_flush and no - * writing is involved. With flush && str != NULL case is - * skygw_log_write_flush. + * When string pointer is NULL, operation is either flush or rotate. */ - if (str == NULL) { - ss_dassert(flush); - logfile_flush(lf); /**< here we wake up file writer */ - } else { + if (str == NULL) + { + if (flush) + { + logfile_flush(lf); /*< wakes up file writer */ + } + else if (rotate) + { + logfile_rotate(lf); /*< wakes up file writer */ + } + } + else + { /** Length of string that will be written, limited by bufsize */ int safe_str_len; + /** Length of session id */ + int sesid_str_len; + /** 2 braces, 2 spaces and terminating char */ + if (id == LOGFILE_TRACE && tls_log_info.li_sesid != 0) + { + sesid_str_len = 2+2+get_decimal_len(tls_log_info.li_sesid)+1; + } + else + { + sesid_str_len = 0; + } timestamp_len = get_timestamp_len(); - /** Findout how much can be safely written with current block size */ - if (timestamp_len-1+str_len > lf->lf_buf_size) - { - safe_str_len = lf->lf_buf_size; - } + /** Find out how much can be safely written with current block size */ + if (timestamp_len-1+MAX(sesid_str_len-1,0)+str_len > lf->lf_buf_size) + { + safe_str_len = lf->lf_buf_size; + } else - { - safe_str_len = timestamp_len-1+str_len; - } - + { + safe_str_len = timestamp_len-1+MAX(sesid_str_len-1,0)+str_len; + } /** * Seek write position and register to block buffer. * Then print formatted string to write position. @@ -677,35 +711,29 @@ static int logmanager_write_log( #if defined (SS_LOG_DEBUG) { - - char *copy,*tok; - int tokval; + char *copy,*tok; + int tokval; - simple_mutex_lock(&msg_mutex,true); + simple_mutex_lock(&msg_mutex,true); + copy = strdup(str); + tok = strtok(copy,"|"); + tok = strtok(NULL,"|"); - copy = strdup(str); - - tok = strtok(copy,"|"); - - tok = strtok(NULL,"|"); - - if(strstr(str,"message|") && tok){ - - tokval = atoi(tok); - - if(prevval > 0){ - ss_dassert(tokval == (prevval + 1)); - } - - prevval = tokval; - } - - free(copy); - simple_mutex_unlock(&msg_mutex); + if(strstr(str,"message|") && tok) + { + tokval = atoi(tok); + if(prevval > 0) + { + ss_dassert(tokval == (prevval + 1)); + } + prevval = tokval; + } + free(copy); + simple_mutex_unlock(&msg_mutex); } #endif - + /** Book space for log string from buffer */ wp = blockbuf_get_writepos(&bb, id, safe_str_len, @@ -727,16 +755,31 @@ static int logmanager_write_log( */ timestamp_len = snprint_timestamp(wp, timestamp_len); - - + if (sesid_str_len != 0) + { + /** + * Write session id + */ + snprintf(wp+timestamp_len, + sesid_str_len, + "[%lu] ", + tls_log_info.li_sesid); + sesid_str_len -= 1; /*< don't calculate terminating char anymore */ + } /** * Write next string to overwrite terminating null character * of the timestamp string. */ if (use_valist) { - vsnprintf(wp+timestamp_len, safe_str_len-timestamp_len, str, valist); + vsnprintf(wp+timestamp_len+sesid_str_len, + safe_str_len-timestamp_len-sesid_str_len, + str, + valist); } else { - snprintf(wp+timestamp_len, safe_str_len-timestamp_len, "%s", str); + snprintf(wp+timestamp_len+sesid_str_len, + safe_str_len-timestamp_len-sesid_str_len, + "%s", + str); } /** write to syslog */ @@ -755,7 +798,12 @@ static int logmanager_write_log( break; } } - wp[safe_str_len-1] = '\n'; + /** remove double line feed */ + if (wp[safe_str_len-2] == '\n') + { + wp[safe_str_len-2]=' '; + } + wp[safe_str_len-1] = '\n'; blockbuf_unregister(bb); /** @@ -813,12 +861,18 @@ static int logmanager_write_log( blockbuf_unregister(bb_c); } } /* if (spread_down) */ - } + } /* if (str == NULL) */ return_err: return err; } +/** + * Register writer to a block buffer. When reference counter is non-zero the + * flusher thread doesn't write the block to disk. + * + * @param bb block buffer + */ static void blockbuf_register( blockbuf_t* bb) { @@ -827,7 +881,12 @@ static void blockbuf_register( atomic_add(&bb->bb_refcount, 1); } - +/** + * Unregister writer from block buffer. If the buffer got filled up and there + * are no other registered writers anymore, notify the flusher thread. + * + * @param bb block buffer + */ static void blockbuf_unregister( blockbuf_t* bb) { @@ -981,21 +1040,46 @@ static char* blockbuf_get_writepos( }else if(bb->bb_state == BB_CLEARED){ /** - *Move the full buffer to the end of the list + *Move the cleared buffer to the end of the list if it is the first one in the list */ simple_mutex_unlock(&bb->bb_mutex); simple_mutex_lock(&bb_list->mlist_mutex, true); - if(node->mlnode_next){ - bb_list->mlist_first = node->mlnode_next; - bb_list->mlist_last->mlnode_next = node; - node->mlnode_next = NULL; - bb_list->mlist_last = node; - node = bb_list->mlist_first; - } + if(node == bb_list->mlist_first) + { - bb->bb_state = BB_READY; + if(bb_list->mlist_nodecount > 1 && + node != bb_list->mlist_last){ + bb_list->mlist_last->mlnode_next = bb_list->mlist_first; + bb_list->mlist_first = bb_list->mlist_first->mlnode_next; + bb_list->mlist_last->mlnode_next->mlnode_next = NULL; + bb_list->mlist_last = bb_list->mlist_last->mlnode_next; + } + + ss_dassert(node == bb_list->mlist_last); + + simple_mutex_unlock(&bb_list->mlist_mutex); + simple_mutex_lock(&bb->bb_mutex, true); + + bb->bb_state = BB_READY; + + simple_mutex_unlock(&bb->bb_mutex); + simple_mutex_lock(&bb_list->mlist_mutex, true); + node = bb_list->mlist_first; + } + else + { + if(node->mlnode_next){ + node = node->mlnode_next; + }else{ + node = bb_list->mlist_first; + } + continue; + } + + + }else if (bb->bb_state == BB_READY){ /** @@ -1120,10 +1204,10 @@ int skygw_log_enable( { bool err = 0; - if (!logmanager_register(true)) { - //fprintf(stderr, "ERROR: Can't register to logmanager\n"); - err = -1; - goto return_err; + if (!logmanager_register(true)) + { + err = -1; + goto return_err; } CHK_LOGMANAGER(lm); @@ -1140,25 +1224,36 @@ return_err: return err; } - int skygw_log_disable( - logfile_id_t id) + logfile_id_t id) /*< no locking */ +{ + int rc; + + rc = skygw_log_disable_raw(id, false); + + return rc; +} + +static int skygw_log_disable_raw( + logfile_id_t id, + bool emergency) /*< no locking */ { bool err = 0; - if (!logmanager_register(true)) { - //fprintf(stderr, "ERROR: Can't register to logmanager\n"); + if (!logmanager_register(true)) + { err = -1; goto return_err; } CHK_LOGMANAGER(lm); - if (logfile_set_enabled(id, false)) { - lm->lm_enabled_logfiles &= ~id; - /** - * Set global variable - */ - lm_enabled_logfiles_bitmask = lm->lm_enabled_logfiles; + if (emergency || logfile_set_enabled(id, false)) + { + lm->lm_enabled_logfiles &= ~id; + /** + * Set global variable + */ + lm_enabled_logfiles_bitmask = lm->lm_enabled_logfiles; } logmanager_unregister(); @@ -1189,6 +1284,7 @@ static bool logfile_set_enabled( true, false, false, + false, strlen(errstr)+1, errstr, notused); @@ -1214,6 +1310,7 @@ static bool logfile_set_enabled( true, false, false, + false, strlen(logstr)+1, logstr, notused); @@ -1242,26 +1339,25 @@ int skygw_log_write_flush( va_list valist; size_t len; - if (!logmanager_register(true)) { - //fprintf(stderr, "ERROR: Can't register to logmanager\n"); - err = -1; - goto return_err; + if (!logmanager_register(true)) + { + err = -1; + goto return_err; } CHK_LOGMANAGER(lm); - /** - * If particular log is disabled only unregister and return. - */ - if (!(lm->lm_enabled_logfiles & id)) { - err = 1; - goto return_unregister; + /** + * If particular log is disabled in general and it is not enabled for + * the current session, then unregister and return. + */ + if (!LOG_IS_ENABLED(id)) + { + err = 1; + goto return_unregister; } /** * Find out the length of log string (to be formatted str). */ - - - va_start(valist, str); len = vsnprintf(NULL, 0, str, valist); va_end(valist); @@ -1273,7 +1369,7 @@ int skygw_log_write_flush( * Write log string to buffer and add to file write list. */ va_start(valist, str); - err = logmanager_write_log(id, true, true, true, len, str, valist); + err = logmanager_write_log(id, true, true, true, false, len, str, valist); va_end(valist); if (err != 0) { @@ -1298,23 +1394,22 @@ int skygw_log_write( va_list valist; size_t len; - if (!logmanager_register(true)) { - //fprintf(stderr, "ERROR: Can't register to logmanager\n"); - err = -1; - goto return_err; + if (!logmanager_register(true)) + { + err = -1; + goto return_err; } CHK_LOGMANAGER(lm); /** - * If particular log is disabled only unregister and return. + * If particular log is disabled in general and it is not enabled for + * the current session, then unregister and return. */ - if (!(lm->lm_enabled_logfiles & id)) { + if (!LOG_IS_ENABLED(id)) + { err = 1; goto return_unregister; } - - - /** * Find out the length of log string (to be formatted str). */ @@ -1330,7 +1425,7 @@ int skygw_log_write( */ va_start(valist, str); - err = logmanager_write_log(id, false, true, true, len, str, valist); + err = logmanager_write_log(id, false, true, true, false, len, str, valist); va_end(valist); if (err != 0) { @@ -1357,7 +1452,7 @@ int skygw_log_flush( goto return_err; } CHK_LOGMANAGER(lm); - err = logmanager_write_log(id, true, false, false, 0, NULL, valist); + err = logmanager_write_log(id, true, false, false, false, 0, NULL, valist); if (err != 0) { fprintf(stderr, "skygw_log_flush failed.\n"); @@ -1370,6 +1465,60 @@ return_err: return err; } +/** + * Replace current logfile with new file with increased sequence number on + * its name. + */ +int skygw_log_rotate( + logfile_id_t id) +{ + int err = 0; + logfile_t* lf; + va_list valist; /**< Dummy, must be present but it is not processed */ + + if (!logmanager_register(false)) + { + ss_dfprintf(stderr, + "Can't register to logmanager, rotating failed\n"); + goto return_err; + } + CHK_LOGMANAGER(lm); + lf = &lm->lm_logfile[id]; + + LOGIF(LM, (skygw_log_write( + LOGFILE_MESSAGE, + "Log rotation is called for %s.", + lf->lf_full_file_name))); + + err = logmanager_write_log(id, false, false, false, true, 0, NULL, valist); + + if (err != 0) + { + LOGIF(LE, (skygw_log_write( + LOGFILE_ERROR, + "Log file rotation failed for file %s.", + lf->lf_full_file_name))); + + fprintf(stderr, "skygw_log_rotate failed.\n"); + goto return_unregister; + } + +return_unregister: + LOGIF(LM, (skygw_log_write_flush( + LOGFILE_MESSAGE, + "File %s use for log writing..", + lf->lf_full_file_name))); + + logmanager_unregister(); + + return_err: + + return err; +} + + + + /** * @node Register as a logging client to logmanager. * @@ -1764,7 +1913,7 @@ static bool logfiles_init( write_syslog); if (!succp) { - fprintf(stderr, "Initializing logfiles failed\n"); + fprintf(stderr, "*\n* Error : Initializing log files failed.\n"); break; } lid <<= 1; @@ -1783,43 +1932,284 @@ static void logfile_flush( skygw_message_send(lf->lf_logmes); } +/** + * Set rotate flag for a log file and wake up the writer thread which then + * performs the actual rotation task. + * + * @param lf logfile pointer + */ +static void logfile_rotate( + logfile_t* lf) +{ + CHK_LOGFILE(lf); + acquire_lock(&lf->lf_spinlock); + lf->lf_rotateflag = true; + release_lock(&lf->lf_spinlock); + skygw_message_send(lf->lf_logmes); +} + +/** + * Forms complete path name for logfile and tests that the file doesn't conflict + * with any existing file and it is writable. + * + * @param lf logfile pointer + * + * @return true if succeed, false if failed + * + * @note Log file openings are not TOCTOU-safe. It is not likely that + * multiple copies of same files are opened in parallel but it is possible by + * using log manager in parallel with multiple processes and by configuring + * log manager to use same directories among those processes. + */ +static bool logfile_create( + logfile_t* lf) +{ + bool namecreatefail; + bool nameconflicts; + bool store_shmem; + bool writable; + bool succp; + strpart_t spart[3]; /*< string parts of which the file is composed of */ + + /** + * sparts is an array but next pointers are used to walk through + * the list of string parts. + */ + spart[0].sp_next = &spart[1]; + spart[1].sp_next = &spart[2]; + spart[2].sp_next = NULL; + + spart[1].sp_string = lf->lf_name_prefix; + spart[2].sp_string = lf->lf_name_suffix; + + store_shmem = lf->lf_store_shmem; + + do { + namecreatefail = false; + nameconflicts = false; + + spart[0].sp_string = lf->lf_filepath; + /** + * Create name for log file. Seqno is added between prefix & + * suffix (index == 2) + */ + lf->lf_full_file_name = + form_full_file_name(spart, lf, 2); + + if (store_shmem) + { + spart[0].sp_string = lf->lf_linkpath; + /** + * Create name for link file + */ + lf->lf_full_link_name = form_full_file_name(spart,lf,2); + } + /** + * At least one of the files couldn't be created. Increase + * sequence number and retry until succeeds. + */ + if (lf->lf_full_file_name == NULL || + (store_shmem && lf->lf_full_link_name == NULL)) + { + namecreatefail = true; + goto file_create_fail; + } + + /** + * If file exists but is different type, create fails and + * new, increased sequence number is added to file name. + */ + if (check_file_and_path(lf->lf_full_file_name, &writable, true)) + { + /** Found similarly named file which isn't writable */ + if (!writable || file_is_symlink(lf->lf_full_file_name)) + { + nameconflicts = true; + goto file_create_fail; + } + } + else + { + /** + * Opening the file failed for some other reason than + * existing non-writable file. Shut down. + */ + if (!writable) + { + succp = false; + goto return_succp; + } + } + + if (store_shmem) + { + if (check_file_and_path(lf->lf_full_link_name, &writable, true)) + { + /** Found similarly named link which isn't writable */ + if (!writable) + { + nameconflicts = true; + } + } + else + { + /** + * Opening the file failed for some other reason than + * existing non-writable file. Shut down. + */ + if (!writable) + { + succp = false; + goto return_succp; + } + } + } +file_create_fail: + if (namecreatefail || nameconflicts) + { + lf->lf_name_seqno += 1; + + if (lf->lf_full_file_name != NULL) + { + free(lf->lf_full_file_name); + lf->lf_full_file_name = NULL; + } + if (lf->lf_full_link_name != NULL) + { + free(lf->lf_full_link_name); + lf->lf_full_link_name = NULL; + } + } + } while (namecreatefail || nameconflicts); + + succp = true; + +return_succp: + return succp; +} + +/** + * Opens a log file and writes header to the beginning of it. File name, FILE*, + * and file descriptor are stored to skygw_file_t struct which is stored in + * filewriter strcuture passed as parameter. + * + * @param fw filewriter pointer + * @param lf logfile pointer + * + * @return true if succeed; the resulting skygw_file_t is written in filewriter, + * false if failed. + * + */ +static bool logfile_open_file( + filewriter_t* fw, + logfile_t* lf) +{ + bool succp; + char* start_msg_str; + int err; + + if (lf->lf_store_shmem) + { + /** Create symlink pointing to log file */ + fw->fwr_file[lf->lf_id] = skygw_file_init( + lf->lf_full_file_name, + lf->lf_full_link_name); + } + else + { + /** Create normal disk-resident log file */ + fw->fwr_file[lf->lf_id] = skygw_file_init( + lf->lf_full_file_name, + NULL); + } + + if (fw->fwr_file[lf->lf_id] == NULL) + { + fprintf(stderr, + "Error : opening logfile %s failed.\n", + lf->lf_full_file_name); + succp = false; + goto return_succp; + } + + if (lf->lf_enabled) + { + start_msg_str = strdup("---\tLogging is enabled.\n"); + } + else + { + start_msg_str = strdup("---\tLogging is disabled.\n"); + } + err = skygw_file_write(fw->fwr_file[lf->lf_id], + (void *)start_msg_str, + strlen(start_msg_str), + true); + + if (err != 0) + { + fprintf(stderr, + "Error : writing to file %s failed due to %d, %s. " + "Exiting MaxScale.\n", + lf->lf_full_file_name, + err, + strerror(err)); + succp = false; + goto return_succp; + } + free(start_msg_str); + succp = true; + +return_succp: + return succp; +} + /** * @node Combine all name parts from left to right. * * Parameters: - * @param parts - - * + * @param parts * - * @param seqno - in, use - * specifies the the sequence number which will be added as a part - * of full file name. - * seqno == -1 indicates that sequence number won't be used. + * @param seqno specifies the the sequence number which will be added as a part + * of full file name. seqno == -1 indicates that sequence number won't be used. * - * @param seqnoidx - in, use - * Specifies the seqno position in the 'array' of name parts. - * If seqno == -1 seqnoidx will be set -1 as well. + * @param seqnoidx Specifies the seqno position in the 'array' of name parts. + * If seqno == -1 seqnoidx will be set -1 as well. * * @return Pointer to filename, of NULL if failed. * - * - * @details (write detailed description here) - * */ static char* form_full_file_name( strpart_t* parts, - int seqno, + logfile_t* lf, int seqnoidx) { int i; + int seqno; size_t s; size_t fnlen; char* filename = NULL; char* seqnostr = NULL; strpart_t* p; - - if (seqno != -1) + + if (lf->lf_name_seqno != -1) { + int file_sn; + int link_sn = 0; + char* tmp = parts[0].sp_string; + + file_sn = find_last_seqno(parts, lf->lf_name_seqno, seqnoidx); + + if (lf->lf_linkpath != NULL) + { + tmp = parts[0].sp_string; + parts[0].sp_string = lf->lf_linkpath; + link_sn = find_last_seqno(parts, lf->lf_name_seqno, seqnoidx); + parts[0].sp_string = tmp; + } + lf->lf_name_seqno = MAX(file_sn, link_sn); + + seqno = lf->lf_name_seqno; s = UINTLEN(seqno); seqnostr = (char *)malloc((int)s+1); } @@ -1831,6 +2221,7 @@ static char* form_full_file_name( */ s = 0; seqnoidx = -1; + seqno = lf->lf_name_seqno; } if (parts == NULL || parts->sp_string == NULL) { @@ -1853,20 +2244,24 @@ static char* form_full_file_name( } p = p->sp_next; } - - if (fnlen > NAME_MAX) { + + if (fnlen > NAME_MAX) + { fprintf(stderr, "Error : Too long file name= %d.\n", (int)fnlen); goto return_filename; } - filename = (char*)calloc(1, fnlen); - snprintf(seqnostr, s+1, "%d", seqno); - + + if (seqnostr != NULL) + { + snprintf(seqnostr, s+1, "%d", seqno); + } + for (i=0, p=parts; p->sp_string != NULL; i++, p=p->sp_next) { - if (i == seqnoidx) + if (seqnostr != NULL && i == seqnoidx) { - strcat(filename, seqnostr); + strcat(filename, seqnostr); /*< add sequence number */ } strcat(filename, p->sp_string); @@ -1912,66 +2307,157 @@ static char* add_slash( return str; } -/** - * @node Check if the file exists in the local file system and if it does, - * whether it is writable. - * - * Parameters: - * @param filename - - * - * - * @param writable - - * - * - * @return - * + +/** + * @node Check if the path and file exist in the local file system and if they do, + * check if they are accessible and writable. * - * @details Note, that an space character is written to the end of file. + * Parameters: + * @param filename file to be checked + * + * @param writable flag indicating whether file was found writable or not + * if writable is NULL, check is skipped. + * + * @return true & writable if file exists and it is writable, + * true & not writable if file exists but it can't be written, + * false & writable if file doesn't exist but directory could be written, and + * false & not writable if directory can't be written. + * + * @details Note, that a space character is written to the end of file. * TODO: recall what was the reason for not succeeding with simply * calling access, and fstat. vraa 26.11.13 - * */ -static bool file_exists_and_is_writable( - char* filename, - bool* writable) +static bool check_file_and_path( + char* filename, + bool* writable, + bool do_log) { - int fd; - bool exists = true; - - if (filename == NULL) - { - exists = false; - } - else - { - fd = open(filename, O_CREAT|O_EXCL, S_IRWXU); - - /** file exist */ - if (fd == -1) - { - /** Open file and write a byte for test */ - fd = open(filename, O_CREAT|O_RDWR, S_IRWXU|S_IRWXG); - - if (fd != -1) - { - char c = ' '; - if (write(fd, &c, 1) == 1) - { - *writable = true; - } - close(fd); - } - } - else - { - close(fd); - unlink(filename); - exists = false; - } - } - return exists; + int fd; + bool exists; + + if (filename == NULL) + { + exists = false; + + if (writable) + { + *writable = false; + } + } + else + { + fd = open(filename, O_CREAT|O_EXCL, S_IRWXU); + + if (fd == -1) + { + /** File exists, check permission to read/write */ + if (errno == EEXIST) + { + /** Open file and write a byte for test */ + fd = open(filename, O_CREAT|O_RDWR, S_IRWXU|S_IRWXG); + + if (fd == -1) + { + if (do_log && file_is_symlink(filename)) + { + fprintf(stderr, + "*\n* Error : Can't access " + "file pointed to by %s due " + "to %s.\n", + filename, + strerror(errno)); + } + else if (do_log) + { + fprintf(stderr, + "*\n* Error : Can't access %s due " + "to %s.\n", + filename, + strerror(errno)); + } + if (writable) + { + *writable = false; + } + } + else + { + if (writable) + { + char c = ' '; + if (write(fd, &c, 1) == 1) + { + *writable = true; + } + else + { + if (do_log && + file_is_symlink(filename)) + { + fprintf(stderr, + "*\n* Error : Can't write to " + "file pointed to by %s due to " + "%s.\n", + filename, + strerror(errno)); + } + else if (do_log) + { + fprintf(stderr, + "*\n* Error : Can't write to " + "%s due to %s.\n", + filename, + strerror(errno)); + } + *writable = false; + } + } + close(fd); + } + exists = true; + } + else + { + if (do_log && file_is_symlink(filename)) + { + fprintf(stderr, + "*\n* Error : Can't access the file " + "pointed to by %s due to %s.\n", + filename, + strerror(errno)); + } + else if (do_log) + { + fprintf(stderr, + "*\n* Error : Can't access %s due to %s.\n", + filename, + strerror(errno)); + } + exists = false; + + if (writable) + { + *writable = false; + } + } + } + else + { + close(fd); + unlink(filename); + exists = false; + + if (writable) + { + *writable = true; + } + } + } + return exists; } + + static bool file_is_symlink( char* filename) { @@ -2011,8 +2497,6 @@ static bool file_is_symlink( * * @return true if succeed, false otherwise * - * - * @details (write detailed description here) * */ static bool logfile_init( @@ -2024,12 +2508,6 @@ static bool logfile_init( { bool succp = false; fnames_conf_t* fn = &logmanager->lm_fnames_conf; - /** string parts of which the file is composed of */ - strpart_t strparts[3]; - bool namecreatefail; - bool nameconflicts; - bool writable; - logfile->lf_state = INIT; #if defined(SS_DEBUG) logfile->lf_chk_top = CHK_NUM_LOGFILE; @@ -2043,127 +2521,53 @@ static bool logfile_init( logfile->lf_name_seqno = 1; logfile->lf_lmgr = logmanager; logfile->lf_flushflag = false; + logfile->lf_rotateflag= false; logfile->lf_spinlock = 0; logfile->lf_store_shmem = store_shmem; logfile->lf_write_syslog = write_syslog; logfile->lf_buf_size = MAX_LOGSTRLEN; logfile->lf_enabled = logmanager->lm_enabled_logfiles & logfile_id; - /** - * strparts is an array but next pointers are used to walk through - * the list of string parts. - */ - strparts[0].sp_next = &strparts[1]; - strparts[1].sp_next = &strparts[2]; - strparts[2].sp_next = NULL; - - strparts[1].sp_string = logfile->lf_name_prefix; - strparts[2].sp_string = logfile->lf_name_suffix; /** * If file is stored in shared memory in /dev/shm, a link * pointing to shm file is created and located to the file * directory. */ - if (store_shmem) { - logfile->lf_filepath = strdup(shm_pathname); + if (store_shmem) + { + char* c; + pid_t pid = getpid(); + int len = strlen(shm_pathname_prefix)+ + get_decimal_len((size_t)pid) + 1; + + c = (char *)calloc(len, sizeof(char)); + + if (c == NULL) + { + succp = false; + goto return_with_succp; + } + sprintf(c, "%s%d", shm_pathname_prefix, pid); + logfile->lf_filepath = c; + + if (mkdir(c, S_IRWXU | S_IRWXG) != 0 && + errno != EEXIST) + { + succp = false; + goto return_with_succp; + } logfile->lf_linkpath = strdup(fn->fn_logpath); logfile->lf_linkpath = add_slash(logfile->lf_linkpath); - } else { + } + else + { logfile->lf_filepath = strdup(fn->fn_logpath); } logfile->lf_filepath = add_slash(logfile->lf_filepath); - - do { - namecreatefail = false; - nameconflicts = false; - - strparts[0].sp_string = logfile->lf_filepath; - /** - * Create name for log file. Seqno is added between prefix & - * suffix (index == 2) - */ - logfile->lf_full_file_name = - form_full_file_name(strparts, logfile->lf_name_seqno, 2); - - if (store_shmem) { - strparts[0].sp_string = logfile->lf_linkpath; - /** - * Create name for link file - */ - logfile->lf_full_link_name = - form_full_file_name(strparts, - logfile->lf_name_seqno, - 2); - fprintf(stderr, "%s\t: %s->%s\n", - STRLOGNAME(logfile_id), - logfile->lf_full_link_name, - logfile->lf_full_file_name); - } - else - { - fprintf(stderr, "%s\t: %s\n", - STRLOGNAME(logfile_id), - logfile->lf_full_file_name); - } - /** - * At least one of the files couldn't be created. Increase - * sequence number and retry until succeeds. - */ - if (logfile->lf_full_file_name == NULL || - (store_shmem && logfile->lf_full_link_name == NULL)) - { - namecreatefail = true; - goto file_create_fail; - } - /** - * If file exists but is different type, create fails and - * new, increased sequence number is added to file name. - */ - if (file_exists_and_is_writable(logfile->lf_full_file_name, - &writable)) - { - if (!writable || - file_is_symlink(logfile->lf_full_file_name)) - { - nameconflicts = true; - goto file_create_fail; - } - } - - if (store_shmem) - { - writable = false; - - if (file_exists_and_is_writable( - logfile->lf_full_link_name, - &writable)) - { - if (!writable || - !file_is_symlink(logfile->lf_full_link_name)) - { - nameconflicts = true; - goto file_create_fail; - } - } - } - file_create_fail: - if (namecreatefail || nameconflicts) - { - logfile->lf_name_seqno += 1; - - if (logfile->lf_full_file_name != NULL) - { - free(logfile->lf_full_file_name); - logfile->lf_full_file_name = NULL; - } - if (logfile->lf_full_link_name != NULL) - { - free(logfile->lf_full_link_name); - logfile->lf_full_link_name = NULL; - } - - } - } while (namecreatefail || nameconflicts); + if (!(succp = logfile_create(logfile))) + { + goto return_with_succp; + } /** * Create a block buffer list for log file. Clients' writes go to buffers * from where separate log flusher thread writes them to disk. @@ -2175,21 +2579,37 @@ static bool logfile_init( MAXNBLOCKBUFS) == NULL) { ss_dfprintf(stderr, - "Initializing logfile blockbuf list " - "failed\n"); + "*\n* Error : Initializing buffers for log files " + "failed."); logfile_free_memory(logfile); goto return_with_succp; } + +#if defined(SS_DEBUG) + if (store_shmem) + { + fprintf(stderr, "%s\t: %s->%s\n", + STRLOGNAME(logfile_id), + logfile->lf_full_link_name, + logfile->lf_full_file_name); + } + else + { + fprintf(stderr, "%s\t: %s\n", + STRLOGNAME(logfile_id), + logfile->lf_full_file_name); + } +#endif succp = true; logfile->lf_state = RUN; CHK_LOGFILE(logfile); return_with_succp: - if (!succp) { + if (!succp) + { logfile_done(logfile); } - ss_dassert(logfile->lf_state == RUN || - logfile->lf_state == DONE); + ss_dassert(logfile->lf_state == RUN || logfile->lf_state == DONE); return succp; } @@ -2217,12 +2637,18 @@ static void logfile_done( { switch(lf->lf_state) { case RUN: - CHK_LOGFILE(lf); - ss_dassert(lf->lf_npending_writes == 0); + CHK_LOGFILE(lf); + ss_dassert(lf->lf_npending_writes == 0); + /** fallthrough */ case INIT: - mlist_done(&lf->lf_blockbuf_list); - logfile_free_memory(lf); - lf->lf_state = DONE; + /** Test if list is initialized before freeing it */ + if (lf->lf_blockbuf_list.mlist_versno != 0) + { + mlist_done(&lf->lf_blockbuf_list); + } + logfile_free_memory(lf); + lf->lf_state = DONE; + /** fallthrough */ case DONE: case UNINIT: default: @@ -2242,16 +2668,14 @@ static void logfile_free_memory( } /** - * @node Initialize filewriter struct to a given address + * @node Initialize filewriter data and open the log file for each log file type. * - * Parameters: - * @param fw - - * + * @param logmanager Log manager struct + * @param fw File writer struct + * @param clientmes Messaging from file writer to log manager + * @param logmes Messaging from loggers to file writer thread * - * @return - * - * - * @details (write detailed description here) + * @return true if succeed, false if failed * */ static bool filewriter_init( @@ -2264,7 +2688,6 @@ static bool filewriter_init( logfile_t* lf; logfile_id_t id; int i; - char* start_msg_str; CHK_LOGMANAGER(logmanager); @@ -2282,40 +2705,25 @@ static bool filewriter_init( if (fw->fwr_logmes == NULL || fw->fwr_clientmes == NULL) { goto return_succp; } - for (i=LOGFILE_FIRST; i<=LOGFILE_LAST; i <<= 1) { + + for (i=LOGFILE_FIRST; i<=LOGFILE_LAST; i <<= 1) + { id = (logfile_id_t)i; lf = logmanager_get_logfile(logmanager, id); - if (lf->lf_store_shmem) - { - /** Create symlink pointing to log file */ - fw->fwr_file[id] = skygw_file_init(lf->lf_full_file_name, - lf->lf_full_link_name); - } - else - { - /** Create normal disk-resident log file */ - fw->fwr_file[id] = skygw_file_init(lf->lf_full_file_name, - NULL); - } - - if (fw->fwr_file[id] == NULL) { - goto return_succp; - } - if (lf->lf_enabled) { - start_msg_str = strdup("---\tLogging is enabled.\n"); - } else { - start_msg_str = strdup("---\tLogging is disabled.\n"); - } - skygw_file_write(fw->fwr_file[id], - (void *)start_msg_str, - strlen(start_msg_str), - true); - free(start_msg_str); - } + if (!(succp = logfile_open_file(fw, lf))) + { + fprintf(stderr, + "Error : opening log file %s failed. Exiting " + "MaxScale\n", + lf->lf_full_file_name); + goto return_succp; + } + } /*< for */ fw->fwr_state = RUN; CHK_FILEWRITER(fw); succp = true; + return_succp: if (!succp) { filewriter_done(fw); @@ -2336,9 +2744,10 @@ static void filewriter_done( case INIT: fw->fwr_logmes = NULL; fw->fwr_clientmes = NULL; - for (i=LOGFILE_FIRST; i<=LOGFILE_LAST; i++) { + for (i=LOGFILE_FIRST; i<=LOGFILE_LAST; i++) + { id = (logfile_id_t)i; - skygw_file_done(fw->fwr_file[id]); + skygw_file_close(fw->fwr_file[id], true); } fw->fwr_state = DONE; case DONE: @@ -2386,7 +2795,8 @@ static void filewriter_done( * by file writer which traverses the list and accesses block buffers * included in list nodes. * List modifications are protected with version numbers. - * Before modification, version is increased by one to be odd. After the + * Before + modification, version is increased by one to be odd. After the * completion, it is increased again to even. List can be read only when * version is even and read is consistent only if version hasn't changed * during the read. @@ -2405,12 +2815,15 @@ static void* thr_filewriter_fun( int i; blockbuf_state_t flush_blockbuf; /**< flush single block buffer. */ bool flush_logfile; /**< flush logfile */ - bool flushall_logfiles;/**< flush all logfiles */ + bool do_flushall = false; + bool rotate_logfile; /*< close current and open new file */ size_t vn1; size_t vn2; thr = (skygw_thread_t *)data; fwr = (filewriter_t *)skygw_thread_get_data(thr); + flushall_logfiles(false); + CHK_FILEWRITER(fwr); ss_debug(skygw_thread_set_state(thr, THR_RUNNING)); @@ -2423,26 +2836,64 @@ static void* thr_filewriter_fun( * Reset message to avoid redundant calls. */ skygw_message_wait(fwr->fwr_logmes); - - flushall_logfiles = skygw_thread_must_exit(thr); + if(skygw_thread_must_exit(thr)){ + flushall_logfiles(true); + } /** Process all logfiles which have buffered writes. */ - for (i=LOGFILE_FIRST; i<=LOGFILE_LAST; i <<= 1) { + for (i=LOGFILE_FIRST; i<=LOGFILE_LAST; i <<= 1) + { retry_flush_on_exit: /** * Get file pointer of current logfile. */ + + + + do_flushall = thr_flushall_check(); file = fwr->fwr_file[i]; lf = &lm->lm_logfile[(logfile_id_t)i]; /** - * read and reset logfile's flushflag + * read and reset logfile's flush- and rotateflag */ acquire_lock(&lf->lf_spinlock); - flush_logfile = lf->lf_flushflag; - lf->lf_flushflag = false; + flush_logfile = lf->lf_flushflag; + rotate_logfile = lf->lf_rotateflag; + lf->lf_flushflag = false; + lf->lf_rotateflag = false; release_lock(&lf->lf_spinlock); - + /** + * Log rotation : + * Close current, and open a new file for the log. + */ + if (rotate_logfile) + { + bool succp; + + lf->lf_name_seqno += 1; /*< new sequence number */ + + if (!(succp = logfile_create(lf))) + { + lf->lf_name_seqno -= 1; /*< restore */ + } + else if ((succp = logfile_open_file(fwr, lf))) + { + skygw_file_close(file, false); /*< close old file */ + } + + if (!succp) + { + LOGIF(LE, (skygw_log_write( + LOGFILE_ERROR, + "Error : Log rotation failed. " + "Creating replacement file %s " + "failed. Continuing " + "logging to existing file.", + lf->lf_full_file_name))); + } + continue; + } /** * get logfile's block buffer list */ @@ -2454,7 +2905,10 @@ static void* thr_filewriter_fun( #endif node = bb_list->mlist_first; - while (node != NULL) { + while (node != NULL) + { + int err = 0; + CHK_MLIST_NODE(node); bb = (blockbuf_t *)node->mlnode_data; CHK_BLOCKBUF(bb); @@ -2467,25 +2921,39 @@ static void* thr_filewriter_fun( if (bb->bb_buf_used != 0 && (flush_blockbuf == BB_FULL || flush_logfile || - flushall_logfiles)) + do_flushall)) { /** * buffer is at least half-full * -> write to disk */ - while(bb->bb_refcount > 0) { + while(bb->bb_refcount > 0) + { simple_mutex_unlock( &bb->bb_mutex); simple_mutex_lock( &bb->bb_mutex, true); } - - skygw_file_write(file, - (void *)bb->bb_buf, - bb->bb_buf_used, - (flush_logfile || - flushall_logfiles)); + err = skygw_file_write( + file, + (void *)bb->bb_buf, + bb->bb_buf_used, + (flush_logfile || + do_flushall)); + if (err) + { + fprintf(stderr, + "Error : Write to %s log " + ": %s failed due to %d, " + "%s. Disabling the log.", + STRLOGNAME((logfile_id_t)i), + lf->lf_full_file_name, + err, + strerror(err)); + /** Force log off */ + skygw_log_disable_raw((logfile_id_t)i, true); + } /** * Reset buffer's counters and mark * not full. @@ -2519,13 +2987,28 @@ static void* thr_filewriter_fun( * Loop is restarted to ensure that all logfiles are * flushed. */ - if (!flushall_logfiles && skygw_thread_must_exit(thr)) + + if(flushall_started_flag){ + flushall_started_flag = false; + flushall_done_flag = true; + i = LOGFILE_FIRST; + goto retry_flush_on_exit; + } + + if (!thr_flushall_check() && skygw_thread_must_exit(thr)) { - flushall_logfiles = true; + flushall_logfiles(true); i = LOGFILE_FIRST; goto retry_flush_on_exit; } - } /* for */ + }/* for */ + + if(flushall_done_flag){ + flushall_done_flag = false; + flushall_logfiles(false); + skygw_message_send(fwr->fwr_clientmes); + } + } /* while (!skygw_thread_must_exit) */ ss_debug(skygw_thread_set_state(thr, THR_STOPPED)); @@ -2565,3 +3048,94 @@ static void fnames_conf_free_memory( if (fn->fn_err_suffix != NULL) free(fn->fn_err_suffix); if (fn->fn_logpath != NULL) free(fn->fn_logpath); } + +/** + * Find the file with biggest sequence number from given directory and return + * the sequence number. + * + * @param parts string parts of which the file name is composed of + * @param seqno the sequence number to start with, if seqno is -1 just return + * + * @return the biggest sequence number used + */ +static int find_last_seqno( + strpart_t* parts, + int seqno, + int seqnoidx) +{ + strpart_t* p; + char* snstr; + int snstrlen; + + if (seqno == -1) + { + return seqno; + } + snstrlen = UINTLEN(INT_MAX); + snstr = (char *)calloc(1, snstrlen); + p = parts; + + while (true) + { + int i; + char filename[NAME_MAX] = {0}; + /** Form name with next seqno */ + snprintf(snstr, snstrlen, "%d", seqno+1); + + for (i=0, p=parts; p->sp_string != NULL; i++, p=p->sp_next) + { + if (snstr != NULL && i == seqnoidx) + { + strncat(filename, snstr, NAME_MAX - 1); /*< add sequence number */ + } + strncat(filename, p->sp_string, NAME_MAX - 1); + + if (p->sp_next == NULL) + { + break; + } + } + + if (check_file_and_path(filename, NULL, false)) + { + seqno++; + } + else + { + break; + } + } + free(snstr); + + return seqno; +} + +bool thr_flushall_check() +{ + bool rval = false; + simple_mutex_lock(&lm->lm_mutex,true); + rval = flushall_flag; + if(rval && !flushall_started_flag && !flushall_done_flag){ + flushall_started_flag = true; + } + simple_mutex_unlock(&lm->lm_mutex); + return rval; +} + +void flushall_logfiles(bool flush) +{ + simple_mutex_lock(&lm->lm_mutex,true); + flushall_flag = flush; + simple_mutex_unlock(&lm->lm_mutex); +} + +/** + * Flush all log files synchronously + */ +void skygw_log_sync_all(void) +{ + skygw_log_write(LOGFILE_TRACE,"Starting log flushing to disk."); + flushall_logfiles(true); + skygw_message_send(lm->lm_logmes); + skygw_message_wait(lm->lm_clientmes); +} diff --git a/log_manager/log_manager.h b/log_manager/log_manager.h index 6a4c1d6cc..fa5e212f7 100644 --- a/log_manager/log_manager.h +++ b/log_manager/log_manager.h @@ -1,5 +1,5 @@ /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -13,9 +13,10 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 */ - +#if !defined(LOG_MANAGER_H) +# define LOG_MANAGER_H typedef struct filewriter_st filewriter_t; typedef struct logfile_st logfile_t; @@ -41,17 +42,55 @@ typedef enum { typedef enum { FILEWRITER_INIT, FILEWRITER_RUN, FILEWRITER_DONE } filewriter_state_t; +/** +* Thread-specific logging information. +*/ +typedef struct log_info_st +{ + size_t li_sesid; + int li_enabled_logs; +} log_info_t; + #define LE LOGFILE_ERROR #define LM LOGFILE_MESSAGE #define LT LOGFILE_TRACE #define LD LOGFILE_DEBUG +/** + * Check if specified log type is enabled in general or if it is enabled + * for the current session. + */ +#define LOG_IS_ENABLED(id) (((lm_enabled_logfiles_bitmask & id) || \ + (log_ses_count[id] > 0 && \ + tls_log_info.li_enabled_logs & id)) ? true : false) + + +#define LOG_MAY_BE_ENABLED(id) (((lm_enabled_logfiles_bitmask & id) || \ + log_ses_count[id] > 0) ? true : false) +/** + * Execute the given command if specified log is enabled in general or + * if there is at least one session for whom the log is enabled. + */ +#define LOGIF_MAYBE(id,cmd) if (LOG_MAY_BE_ENABLED(id)) \ + { \ + cmd; \ + } + +/** + * Execute the given command if specified log is enabled in general or + * if the log is enabled for the current session. + */ +#define LOGIF(id,cmd) if (LOG_IS_ENABLED(id)) \ + { \ + cmd; \ + } + +#if !defined(LOGIF) #define LOGIF(id,cmd) if (lm_enabled_logfiles_bitmask & id) \ - { \ - cmd; \ - } \ - -#define LOG_IS_ENABLED(id) ((lm_enabled_logfiles_bitmask & id) ? true : false) + { \ + cmd; \ + } +#endif /** * UNINIT means zeroed memory buffer allocated for the struct. @@ -74,10 +113,11 @@ void skygw_logmanager_exit(void); void skygw_log_done(void); int skygw_log_write(logfile_id_t id, const char* format, ...); int skygw_log_flush(logfile_id_t id); +int skygw_log_rotate(logfile_id_t id); int skygw_log_write_flush(logfile_id_t id, const char* format, ...); int skygw_log_enable(logfile_id_t id); int skygw_log_disable(logfile_id_t id); - +void skygw_log_sync_all(void); EXTERN_C_BLOCK_END @@ -90,3 +130,5 @@ const char* get_msg_suffix_default(void); const char* get_err_prefix_default(void); const char* get_err_suffix_default(void); const char* get_logpath_default(void); + +#endif /** LOG_MANAGER_H */ diff --git a/log_manager/test/testlog.c b/log_manager/test/testlog.c index c21d30e17..797af4d07 100644 --- a/log_manager/test/testlog.c +++ b/log_manager/test/testlog.c @@ -1,5 +1,5 @@ /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -13,7 +13,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 */ diff --git a/log_manager/test/testorder.c b/log_manager/test/testorder.c index e2bb94a7b..da87c5325 100644 --- a/log_manager/test/testorder.c +++ b/log_manager/test/testorder.c @@ -1,5 +1,5 @@ /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -13,7 +13,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 */ #include @@ -33,7 +33,8 @@ int main(int argc, char** argv) char *message; char** optstr; long msg_index = 1; - + struct timespec ts1; + ts1.tv_sec = 0; memset(cwd,0,1024); if( argc <4){ @@ -45,8 +46,9 @@ int main(int argc, char** argv) } block_size = atoi(argv[3]); - if(block_size < 1){ - fprintf(stderr,"Message size too small, must be at least 1 byte long."); + if(block_size < 1 || block_size > 1024){ + fprintf(stderr,"Message size too small or large, must be at least 1 byte long and must not exceed 1024 bytes."); + return 1; } @@ -78,7 +80,12 @@ int main(int argc, char** argv) for(i = 0;i 8192){ + fprintf(stderr,"Error: Message too long"); + break; + } + memset(message + strlen(message), ' ', msgsize); memset(message + block_size - 1,'\0',1); if(interval > 0 && i % interval == 0){ err = skygw_log_write_flush(LOGFILE_ERROR, message); @@ -89,8 +96,8 @@ int main(int argc, char** argv) fprintf(stderr,"Error: log_manager returned %d",err); break; } - usleep(100); - //printf("%s\n",message); + ts1.tv_nsec = 100*1000000; + nanosleep(&ts1, NULL); } skygw_log_flush(LOGFILE_ERROR); diff --git a/macros.cmake b/macros.cmake index 7e3e5d250..3b363e30d 100644 --- a/macros.cmake +++ b/macros.cmake @@ -1,11 +1,17 @@ +function(debugmsg MSG) + if(DEBUG_OUTPUT) + message(STATUS "DEBUG: ${MSG}") + endif() +endfunction() + macro(set_maxscale_version) #MaxScale version number set(MAXSCALE_VERSION_MAJOR "1") set(MAXSCALE_VERSION_MINOR "0") - set(MAXSCALE_VERSION_PATCH "1") + set(MAXSCALE_VERSION_PATCH "4") set(MAXSCALE_VERSION_NUMERIC "${MAXSCALE_VERSION_MAJOR}.${MAXSCALE_VERSION_MINOR}.${MAXSCALE_VERSION_PATCH}") - set(MAXSCALE_VERSION "${MAXSCALE_VERSION_MAJOR}.${MAXSCALE_VERSION_MINOR}.${MAXSCALE_VERSION_PATCH}-beta") + set(MAXSCALE_VERSION "${MAXSCALE_VERSION_MAJOR}.${MAXSCALE_VERSION_MINOR}.${MAXSCALE_VERSION_PATCH}-stable") endmacro() @@ -21,17 +27,20 @@ macro(set_variables) set(TEST_HOST "127.0.0.1" CACHE STRING "hostname or IP address of MaxScale's host") # port of read connection router module - set(TEST_PORT_RW "4008" CACHE STRING "port of read connection router module") + set(TEST_PORT "4008" CACHE STRING "port of read connection router module") # port of read/write split router module set(TEST_PORT_RW "4006" CACHE STRING "port of read/write split router module") # port of read/write split router module with hints - set(TEST_PORT_RW_HINT "4006" CACHE STRING "port of read/write split router module with hints") + set(TEST_PORT_RW_HINT "4009" CACHE STRING "port of read/write split router module with hints") # master test server server_id set(TEST_MASTER_ID "3000" CACHE STRING "master test server server_id") + # master test server port + set(MASTER_PORT "3000" CACHE STRING "master test server port") + # username of MaxScale user set(TEST_USER "maxuser" CACHE STRING "username of MaxScale user") @@ -51,17 +60,18 @@ macro(set_variables) set(INSTALL_SYSTEM_FILES TRUE CACHE BOOL "Install init.d scripts and ldconf configuration files") # Build tests - set(BUILD_TESTS TRUE CACHE BOOL "Build tests") + set(BUILD_TESTS FALSE CACHE BOOL "Build tests") endmacro() macro(check_deps) + # Check for libraries MaxScale depends on set(MAXSCALE_DEPS aio ssl crypt crypto z m dl rt pthread) foreach(lib ${MAXSCALE_DEPS}) find_library(lib${lib} ${lib}) - if((DEFINED lib${lib}) AND (${lib${lib}} STREQUAL "lib${lib}-NOTFOUND")) + if((DEFINED lib${lib}) AND (${lib${lib}} MATCHES "NOTFOUND")) set(DEPS_ERROR TRUE) set(FAILED_DEPS "${FAILED_DEPS} lib${lib}") elseif(DEBUG_OUTPUT) @@ -82,79 +92,105 @@ macro(check_dirs) set(DEPS_OK TRUE CACHE BOOL "If all the dependencies were found.") # Find the MySQL headers if they were not defined + if(DEFINED MYSQL_DIR) - if(DEBUG_OUTPUT) - message(STATUS "Searching for MySQL headers at: ${MYSQL_DIR}") - endif() + debugmsg("Searching for MySQL headers at: ${MYSQL_DIR}") find_path(MYSQL_DIR_LOC mysql.h PATHS ${MYSQL_DIR} PATH_SUFFIXES mysql mariadb NO_DEFAULT_PATH) + else() + find_path(MYSQL_DIR_LOC mysql.h PATH_SUFFIXES mysql mariadb) endif() - find_path(MYSQL_DIR_LOC mysql.h PATH_SUFFIXES mysql mariadb) - if(DEBUG_OUTPUT) - message(STATUS "Search returned: ${MYSQL_DIR_LOC}") - endif() - if(${MYSQL_DIR_LOC} STREQUAL "MYSQL_DIR_LOC-NOTFOUND") + +debugmsg("Search returned: ${MYSQL_DIR_LOC}") + + if(${MYSQL_DIR_LOC} MATCHES "NOTFOUND") set(DEPS_OK FALSE CACHE BOOL "If all the dependencies were found.") message(FATAL_ERROR "Fatal Error: MySQL headers were not found.") else() - message(STATUS "Using MySQL headers found at: ${MYSQL_DIR}") set(MYSQL_DIR ${MYSQL_DIR_LOC} CACHE PATH "Path to MySQL headers" FORCE) + message(STATUS "Using MySQL headers found at: ${MYSQL_DIR}") endif() - set(MYSQL_DIR_LOC "" INTERNAL) + + unset(MYSQL_DIR_LOC) # Find the errmsg.sys file if it was not defied if( DEFINED ERRMSG ) + debugmsg("Looking for errmsg.sys at: ${ERRMSG}") + if(NOT(IS_DIRECTORY ${ERRMSG})) + get_filename_component(ERRMSG ${ERRMSG} PATH) + debugmsg("Path to file is: ${ERRMSG}") + endif() find_file(ERRMSG_FILE errmsg.sys PATHS ${ERRMSG} NO_DEFAULT_PATH) - endif() - find_file(ERRMSG_FILE errmsg.sys PATHS /usr/share/mysql /usr/local/share/mysql PATH_SUFFIXES english) - if(${ERRMSG_FILE} MATCHES "ERRMSG_FILE-NOTFOUND") - set(DEPS_OK FALSE CACHE BOOL "If all the dependencies were found.") - message(FATAL_ERROR "Fatal Error: The errmsg.sys file was not found, please define the path to it by using -DERRMSG=") + if(${ERRMSG_FILE} MATCHES "NOTFOUND") + message(FATAL_ERROR "Fatal Error: The errmsg.sys file was not found at ${ERRMSG}") + else() + message(STATUS "Using custom errmsg.sys found at: ${ERRMSG_FILE}") + endif() else() - message(STATUS "Using errmsg.sys found at: ${ERRMSG_FILE}") + find_file(ERRMSG_FILE errmsg.sys PATHS /usr/share/mysql /usr/local/share/mysql PATH_SUFFIXES english) + if(${ERRMSG_FILE} MATCHES "NOTFOUND") + set(DEPS_OK FALSE CACHE BOOL "If all the dependencies were found.") + message(FATAL_ERROR "Fatal Error: The errmsg.sys file was not found, please define the path to it by using -DERRMSG=") + else() + message(STATUS "Using errmsg.sys found at: ${ERRMSG_FILE}") + endif() endif() set(ERRMSG ${ERRMSG_FILE} CACHE FILEPATH "Path to the errmsg.sys file." FORCE) - set(ERRMSG_FILE "" INTERNAL) + unset(ERRMSG_FILE) # Find the embedded mysql library - if(STATIC_EMBEDDED) + + if (DEFINED EMBEDDED_LIB) + if( NOT (IS_DIRECTORY ${EMBEDDED_LIB}) ) + debugmsg("EMBEDDED_LIB is not a directory: ${EMBEDDED_LIB}") + if(${CMAKE_VERSION} VERSION_LESS 2.12 ) + set(COMP_VAR PATH) + else() + set(COMP_VAR DIRECTORY) + endif() + get_filename_component(EMBEDDED_LIB ${EMBEDDED_LIB} ${COMP_VAR}) + debugmsg("EMBEDDED_LIB directory component: ${EMBEDDED_LIB}") + endif() + debugmsg("Searching for the embedded library at: ${EMBEDDED_LIB}") + endif() + if(STATIC_EMBEDDED) + + debugmsg("Using the static embedded library...") set(OLD_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES}) set(CMAKE_FIND_LIBRARY_SUFFIXES ".a") if (DEFINED EMBEDDED_LIB) - if(DEBUG_OUTPUT) - message(STATUS "Searching for libmysqld.a at: ${EMBEDDED_LIB}") - endif() + debugmsg("Searching for libmysqld.a at: ${EMBEDDED_LIB}") find_library(EMBEDDED_LIB_STATIC libmysqld.a PATHS ${EMBEDDED_LIB} PATH_SUFFIXES mysql mariadb NO_DEFAULT_PATH) else() find_library(EMBEDDED_LIB_STATIC libmysqld.a PATH_SUFFIXES mysql mariadb) endif() - if(DEBUG_OUTPUT) - message(STATUS "Search returned: ${EMBEDDED_LIB_STATIC}") - endif() + debugmsg("Search returned: ${EMBEDDED_LIB_STATIC}") + set(EMBEDDED_LIB ${EMBEDDED_LIB_STATIC} CACHE FILEPATH "Path to libmysqld" FORCE) set(CMAKE_FIND_LIBRARY_SUFFIXES ${OLD_SUFFIXES}) - set(OLD_SUFFIXES "" INTERNAL) - else() + else() + debugmsg("Using the dynamic embedded library...") + set(OLD_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES}) + set(CMAKE_FIND_LIBRARY_SUFFIXES ".so") if (DEFINED EMBEDDED_LIB) - if(DEBUG_OUTPUT) - message(STATUS "Searching for libmysqld.so at: ${EMBEDDED_LIB}") - endif() + debugmsg("Searching for libmysqld.so at: ${EMBEDDED_LIB}") find_library(EMBEDDED_LIB_DYNAMIC mysqld PATHS ${EMBEDDED_LIB} PATH_SUFFIXES mysql mariadb NO_DEFAULT_PATH) else() find_library(EMBEDDED_LIB_DYNAMIC mysqld PATH_SUFFIXES mysql mariadb) endif() - if(DEBUG_OUTPUT) - message(STATUS "Search returned: ${EMBEDDED_LIB_DYNAMIC}") - endif() + debugmsg("Search returned: ${EMBEDDED_LIB_DYNAMIC}") set(EMBEDDED_LIB ${EMBEDDED_LIB_DYNAMIC} CACHE FILEPATH "Path to libmysqld" FORCE) - + set(CMAKE_FIND_LIBRARY_SUFFIXES ${OLD_SUFFIXES}) + endif() - set(EMBEDDED_LIB_DYNAMIC "" INTERNAL) - set(EMBEDDED_LIB_STATIC "" INTERNAL) + + unset(EMBEDDED_LIB_DYNAMIC) + unset(EMBEDDED_LIB_STATIC) + unset(OLD_SUFFIXES) # Inform the user about the embedded library - if( (${EMBEDDED_LIB} STREQUAL "EMBEDDED_LIB_STATIC-NOTFOUND") OR (${EMBEDDED_LIB} STREQUAL "EMBEDDED_LIB_DYNAMIC-NOTFOUND")) + if( (${EMBEDDED_LIB} MATCHES "NOTFOUND") OR (${EMBEDDED_LIB} MATCHES "NOTFOUND")) set(DEPS_OK FALSE CACHE BOOL "If all the dependencies were found.") message(FATAL_ERROR "Library not found: libmysqld. If your install of MySQL is in a non-default location, please provide the location with -DEMBEDDED_LIB=") else() @@ -176,43 +212,63 @@ macro(check_dirs) else() set(DEB_BASED FALSE CACHE BOOL "If init.d script uses /lib/lsb/init-functions instead of /etc/rc.d/init.d/functions.") endif() - set(DEB_FNC "" INTERNAL) - set(RPM_FNC "" INTERNAL) + unset(DEB_FNC) + unset(RPM_FNC) + + #Find the MySQL client library +# find_library(MYSQLCLIENT_LIBRARIES NAMES mysqlclient PATH_SUFFIXES mysql mariadb) +# if(${MYSQLCLIENT_LIBRARIES} MATCHES "NOTFOUND") +# set(MYSQLCLIENT_FOUND FALSE CACHE INTERNAL "") +# message(STATUS "Cannot find MySQL client library: Login tests disabled.") +# else() +# set(MYSQLCLIENT_FOUND TRUE CACHE INTERNAL "") +# message(STATUS "Found MySQL client library: ${MYSQLCLIENT_LIBRARIES}") +# endif() #Check RabbitMQ headers and libraries if(BUILD_RABBITMQ) - - if(DEFINED RABBITMQ_LIB) - find_library(RMQ_LIB rabbitmq PATHS ${RABBITMQ_LIB} NO_DEFAULT_PATH) - endif() - find_library(RMQ_LIB rabbitmq) - if(RMQ_LIB STREQUAL "RMQ_LIB-NOTFOUND") - set(DEPS_OK FALSE CACHE BOOL "If all the dependencies were found.") - message(FATAL_ERROR "Cannot find RabbitMQ libraries, please define the path to the libraries with -DRABBITMQ_LIB=") - else() - set(RABBITMQ_LIB ${RMQ_LIB} CACHE PATH "Path to RabbitMQ libraries" FORCE) - message(STATUS "Using RabbitMQ libraries found at: ${RABBITMQ_LIB}") - endif() - - if(DEFINED RABBITMQ_HEADERS) - find_file(RMQ_HEADERS amqp.h PATHS ${RABBITMQ_HEADERS} NO_DEFAULT_PATH) - endif() - find_file(RMQ_HEADERS amqp.h) - if(RMQ_HEADERS STREQUAL "RMQ_HEADERS-NOTFOUND") - set(DEPS_OK FALSE CACHE BOOL "If all the dependencies were found.") - message(FATAL_ERROR "Cannot find RabbitMQ headers, please define the path to the headers with -DRABBITMQ_HEADERS=") - else() - set(RABBITMQ_HEADERS ${RMQ_HEADERS} CACHE PATH "Path to RabbitMQ headers" FORCE) - message(STATUS "Using RabbitMQ headers found at: ${RABBITMQ_HEADERS}") - endif() - + find_package(RabbitMQ) +# include(CheckCSourceCompiles) +# +# if(DEFINED RABBITMQ_LIB) +# find_library(RMQ_LIB rabbitmq PATHS ${RABBITMQ_LIB} NO_DEFAULT_PATH) +# else() +# find_library(RMQ_LIB rabbitmq) +# endif() +# if(RMQ_LIB MATCHES "NOTFOUND") +# set(DEPS_OK FALSE CACHE BOOL "If all the dependencies were found.") +# message(FATAL_ERROR "Cannot find RabbitMQ libraries, please define the path to the libraries with -DRABBITMQ_LIB=") +# else() +# set(RABBITMQ_LIB ${RMQ_LIB} CACHE PATH "Path to RabbitMQ libraries" FORCE) +# message(STATUS "Using RabbitMQ libraries found at: ${RABBITMQ_LIB}") +# endif() +# +# if(DEFINED RABBITMQ_HEADERS) +# find_file(RMQ_HEADERS amqp.h PATHS ${RABBITMQ_HEADERS} NO_DEFAULT_PATH) +# else() +# find_file(RMQ_HEADERS amqp.h) +# endif() +# if(RMQ_HEADERS MATCHES "NOTFOUND") +# set(DEPS_OK FALSE CACHE BOOL "If all the dependencies were found.") +# message(FATAL_ERROR "Cannot find RabbitMQ headers, please define the path to the headers with -DRABBITMQ_HEADERS=") +# else() +# set(RABBITMQ_HEADERS ${RMQ_HEADERS} CACHE PATH "Path to RabbitMQ headers" FORCE) +# message(STATUS "Using RabbitMQ headers found at: ${RABBITMQ_HEADERS}") +# endif() +# +# set(CMAKE_REQUIRED_INCLUDES ${RABBITMQ_HEADERS}) +# check_c_source_compiles("#include \n int main(){if(AMQP_DELIVERY_PERSISTENT){return 0;}return 1;}" HAVE_RMQ50) +# if(NOT HAVE_RMQ50) +# message(FATAL_ERROR "Old version of RabbitMQ-C library found. Version 0.5 or newer is required.") +# endif() +# endif() endmacro() function(subdirs VAR DIRPATH) -if(${CMAKE_VERSION} VERSION_LESS 2.12 ) +if(${CMAKE_VERSION} VERSION_LESS 2.8.12 ) set(COMP_VAR PATH) else() set(COMP_VAR DIRECTORY) @@ -224,4 +280,4 @@ endif() endforeach() list(REMOVE_DUPLICATES ALLDIRS) set(${VAR} "${ALLDIRS}" CACHE PATH " " FORCE) -endfunction() \ No newline at end of file +endfunction() diff --git a/makefile.inc b/makefile.inc index 279cea6a4..0d63b5192 100644 --- a/makefile.inc +++ b/makefile.inc @@ -25,7 +25,7 @@ endif # -O2 -g -pipe -Wformat -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -fstack-protector --param=ssp-buffer-size=4 -fPIC CFLAGS := $(CFLAGS) -Wall -LDLIBS := $(LDLIBS) -pthread +LDLIBS := $(LDLIBS) -pthread -lm LDMYSQL := -lmysqld CPP_LDLIBS := -lstdc++ diff --git a/query_classifier/CMakeLists.txt b/query_classifier/CMakeLists.txt index 8f8ab0186..42270cd2c 100644 --- a/query_classifier/CMakeLists.txt +++ b/query_classifier/CMakeLists.txt @@ -1,6 +1,5 @@ add_library(query_classifier SHARED query_classifier.cc) -target_link_libraries(query_classifier ${EMBEDDED_LIB}) install(TARGETS query_classifier DESTINATION lib) if(BUILD_TESTS) add_subdirectory(test) -endif() \ No newline at end of file +endif() diff --git a/query_classifier/query_classifier.cc b/query_classifier/query_classifier.cc index ecf305308..dfcf8d149 100644 --- a/query_classifier/query_classifier.cc +++ b/query_classifier/query_classifier.cc @@ -1,7 +1,7 @@ /** * @section LICENCE * - * This file is distributed as part of the SkySQL Gateway. It is + * This file is distributed as part of the MariaDB Corporation MaxScale. It is * free software: you can redistribute it and/or modify it under * the terms of the GNU General Public License as published by the * Free Software Foundation, version 2. @@ -16,7 +16,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301 USA. * - * Copyright SkySQL Ab + * Copyright MariaDB Corporation Ab * * @file * @@ -30,12 +30,7 @@ # undef MYSQL_CLIENT #endif -#include -#include "../utils/skygw_types.h" -#include "../utils/skygw_debug.h" -#include -#include - +#include #include #include #include @@ -55,12 +50,20 @@ #include #include +#include "../utils/skygw_types.h" +#include "../utils/skygw_debug.h" +#include +#include +#include + #include #include #include #include -extern int lm_enabled_logfiles_bitmask; +extern int lm_enabled_logfiles_bitmask; +extern size_t log_ses_count[]; +extern __thread log_info_t tls_log_info; #define QTYPE_LESS_RESTRICTIVE_THAN_WRITE(t) (t= ~((size_t)0) - 1 || (query_str = (char *)malloc(len+1)) == NULL) { /** Free parsing info data */ parsing_info_done(pi); @@ -359,50 +362,29 @@ static bool create_parse_tree( Parser_state parser_state; bool failp = FALSE; const char* virtual_db = "skygw_virtual"; -#if defined(SS_DEBUG_EXTRA) - LOGIF(LM, (skygw_log_write_flush( - LOGFILE_MESSAGE, - "[readwritesplit:create_parse_tree] 1."))); -#endif - if (parser_state.init(thd, thd->query(), thd->query_length())) { + + if (parser_state.init(thd, thd->query(), thd->query_length())) + { failp = TRUE; goto return_here; } -#if defined(SS_DEBUG_EXTRA) - LOGIF(LM, (skygw_log_write_flush( - LOGFILE_MESSAGE, - "[readwritesplit:create_parse_tree] 2."))); -#endif mysql_reset_thd_for_next_command(thd); -#if defined(SS_DEBUG_EXTRA) - LOGIF(LM, (skygw_log_write_flush( - LOGFILE_MESSAGE, - "[readwritesplit:create_parse_tree] 3."))); -#endif /** * Set some database to thd so that parsing won't fail because of * missing database. Then parse. */ failp = thd->set_db(virtual_db, strlen(virtual_db)); -#if defined(SS_DEBUG_EXTRA) - LOGIF(LM, (skygw_log_write_flush( - LOGFILE_MESSAGE, - "[readwritesplit:create_parse_tree] 4."))); -#endif - if (failp) { + if (failp) + { LOGIF(LE, (skygw_log_write_flush( LOGFILE_ERROR, "Error : Failed to set database in thread context."))); } failp = parse_sql(thd, &parser_state, NULL); -#if defined(SS_DEBUG_EXTRA) - LOGIF(LM, (skygw_log_write_flush( - LOGFILE_MESSAGE, - "[readwritesplit:create_parse_tree] 5."))); -#endif - if (failp) { + if (failp) + { LOGIF(LD, (skygw_log_write( LOGFILE_DEBUG, "%lu [readwritesplit:create_parse_tree] failed to " @@ -414,16 +396,14 @@ return_here: } /** - * @node Set new query type if new is more restrictive than old. + * Set new query type if new is more restrictive than old. * * Parameters: - * @param qtype - - * + * @param qtype Existing type * - * @param new_type - - * + * @param new_type New query type * - * @return + * @return Query type as an unsigned int value which must be casted to qtype. * * * @details The implementation relies on that enumerated values correspond @@ -440,13 +420,11 @@ static u_int32_t set_query_type( } /** - * @node Detect query type, read-only, write, or session update + * Detect query type by examining parsed representation of it. * - * Parameters: - * @param thd - - * + * @param thd MariaDB thread context. * - * @return + * @return Copy of query type value. * * * @details Query type is deduced by checking for certain properties @@ -470,11 +448,12 @@ static skygw_query_type_t resolve_query_type( * When force_data_modify_op_replication is TRUE, gateway distributes * all write operations to all nodes. */ - bool force_data_modify_op_replication; - +#if defined(NOT_IN_USE) + bool force_data_modify_op_replication; + force_data_modify_op_replication = FALSE; +#endif /* NOT_IN_USE */ ss_info_dassert(thd != NULL, ("thd is NULL\n")); - force_data_modify_op_replication = FALSE; lex = thd->lex; /** SELECT ..INTO variable|OUTFILE|DUMPFILE */ @@ -584,19 +563,21 @@ static skygw_query_type_t resolve_query_type( if (is_log_table_write_query(lex->sql_command) || is_update_query(lex->sql_command)) { +#if defined(NOT_IN_USE) if (thd->variables.sql_log_bin == 0 && force_data_modify_op_replication) { /** Not replicated */ type |= QUERY_TYPE_SESSION_WRITE; } - else + else +#endif /* NOT_IN_USE */ { /** Written to binlog, that is, replicated except tmp tables */ type |= QUERY_TYPE_WRITE; /*< to master */ - if (lex->create_info.options & HA_LEX_CREATE_TMP_TABLE && - lex->sql_command == SQLCOM_CREATE_TABLE) + if (lex->sql_command == SQLCOM_CREATE_TABLE && + (lex->create_info.options & HA_LEX_CREATE_TMP_TABLE)) { type |= QUERY_TYPE_CREATE_TMP_TABLE; /*< remember in router */ } @@ -613,6 +594,7 @@ static skygw_query_type_t resolve_query_type( break; case SQLCOM_SELECT: + case SQLCOM_SHOW_SLAVE_STAT: type |= QUERY_TYPE_READ; break; @@ -639,7 +621,17 @@ static skygw_query_type_t resolve_query_type( type |= QUERY_TYPE_PREPARE_NAMED_STMT; goto return_qtype; break; - + + case SQLCOM_SHOW_DATABASES: + type |= QUERY_TYPE_SHOW_DATABASES; + goto return_qtype; + break; + + case SQLCOM_SHOW_TABLES: + type |= QUERY_TYPE_SHOW_TABLES; + goto return_qtype; + break; + default: break; } @@ -823,8 +815,7 @@ static skygw_query_type_t resolve_query_type( LOGIF(LD, (skygw_log_write( LOGFILE_DEBUG, "%lu [resolve_query_type] " - "Unknown functype %d. Something " - "has gone wrong.", + "Functype %d.", pthread_self(), ftype))); break; @@ -852,6 +843,11 @@ return_qtype: * Checks if statement causes implicit COMMIT. * autocommit_stmt gets values 1, 0 or -1 if stmt is enable, disable or * something else than autocommit. + * + * @param lex Parse tree + * @param autocommit_stmt memory address for autocommit status + * + * @return true if statement causes implicit commit and false otherwise */ static bool skygw_stmt_causes_implicit_commit( LEX* lex, @@ -881,7 +877,7 @@ static bool skygw_stmt_causes_implicit_commit( } else { - succp =false; + succp = false; } break; default: @@ -897,7 +893,9 @@ return_succp: * Finds out if stmt is SET autocommit * and if the new value matches with the enable_cmd argument. * - * Returns 1, 0, or -1 if command was: + * @param lex parse tree + * + * @return 1, 0, or -1 if command was: * enable, disable, or not autocommit, respectively. */ static int is_autocommit_stmt( @@ -968,9 +966,11 @@ char* skygw_query_classifier_get_stmtname( } /** - *Returns the LEX struct of the parsed GWBUF - *@param The parsed GWBUF - *@return Pointer to the LEX struct or NULL if an error occurred or the query was not parsed + * Get the parse tree from parsed querybuf. + * @param querybuf The parsed GWBUF + * + * @return Pointer to the LEX struct or NULL if an error occurred or the query + * was not parsed */ LEX* get_lex(GWBUF* querybuf) { @@ -1040,15 +1040,16 @@ char** skygw_get_table_names(GWBUF* querybuf,int* tblsize, bool fullnames) TABLE_LIST* tbl; int i = 0, currtblsz = 0; - char **tables, - **tmp; + char **tables = NULL, + **tmp = NULL; - if((lex = get_lex(querybuf)) == NULL) - { + if( (lex = get_lex(querybuf)) == NULL || + lex->current_select == NULL ) + { goto retblock; } - lex->current_select = lex->all_selects_list; + lex->current_select = lex->all_selects_list; while(lex->current_select){ @@ -1075,30 +1076,31 @@ char** skygw_get_table_names(GWBUF* querybuf,int* tblsize, bool fullnames) } } + if(tmp != NULL){ + char *catnm = NULL; - char *catnm = NULL; - - if(fullnames) - { - if(tbl->db && strcmp(tbl->db,"skygw_virtual") != 0) - { - catnm = (char*)calloc(strlen(tbl->db) + strlen(tbl->table_name) + 2,sizeof(char)); - strcpy(catnm,tbl->db); - strcat(catnm,"."); - strcat(catnm,tbl->table_name); - } - } + if(fullnames) + { + if(tbl->db && strcmp(tbl->db,"skygw_virtual") != 0) + { + catnm = (char*)calloc(strlen(tbl->db) + strlen(tbl->table_name) + 2,sizeof(char)); + strcpy(catnm,tbl->db); + strcat(catnm,"."); + strcat(catnm,tbl->table_name); + } + } - if(catnm) - { - tables[i++] = catnm; - } - else - { - tables[i++] = strdup(tbl->table_name); - } + if(catnm) + { + tables[i++] = catnm; + } + else + { + tables[i++] = strdup(tbl->table_name); + } - tbl=tbl->next_local; + tbl=tbl->next_local; + } } lex->current_select = lex->current_select->next_select_in_list(); } @@ -1177,7 +1179,7 @@ bool is_drop_table_query(GWBUF* querybuf) lex->sql_command == SQLCOM_DROP_TABLE; } -/* +/** * Replace user-provided literals with question marks. Return a copy of the * querystr with replacements. * @@ -1375,3 +1377,49 @@ static void parsing_info_set_plain_str( pi->pi_query_plain_str = str; } + +/** + * Generate a string of query type value. + * Caller must free the memory of the resulting string. + * + * @param qtype Query type value, combination of values listed in + * query_classifier.h + * + * @return string representing the query type value + */ +char* skygw_get_qtype_str( + skygw_query_type_t qtype) +{ + int t1 = (int)qtype; + int t2 = 1; + skygw_query_type_t t = QUERY_TYPE_UNKNOWN; + char* qtype_str = NULL; + + /** + * Test values (bits) and clear matching bits from t1 one by one until + * t1 is completely cleared. + */ + while (t1 != 0) + { + if (t1&t2) + { + t = (skygw_query_type_t)t2; + + if (qtype_str == NULL) + { + qtype_str = strdup(STRQTYPE(t)); + } + else + { + size_t len = strlen(STRQTYPE(t)); + /** reallocate space for delimiter, new string and termination */ + qtype_str = (char *)realloc(qtype_str, strlen(qtype_str)+1+len+1); + snprintf(qtype_str+strlen(qtype_str), 1+len+1, "|%s", STRQTYPE(t)); + } + /** Remove found value from t1 */ + t1 &= ~t2; + } + t2 <<= 1; + } + return qtype_str; +} diff --git a/query_classifier/query_classifier.h b/query_classifier/query_classifier.h index 4ad960524..3600bce5a 100644 --- a/query_classifier/query_classifier.h +++ b/query_classifier/query_classifier.h @@ -1,5 +1,5 @@ /* -This file is distributed as part of the SkySQL Gateway. It is free +This file is distributed as part of the MariaDB Corporation MaxScale. It is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, version 2. @@ -13,11 +13,12 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -Copyright SkySQL Ab +Copyright MariaDB Corporation Ab */ /** getpid */ +#include #include #include #include @@ -54,7 +55,9 @@ typedef enum { QUERY_TYPE_PREPARE_STMT = 0x020000, /*< Prepared stmt with id provided by server:all */ QUERY_TYPE_EXEC_STMT = 0x040000, /*< Execute prepared statement:master or any */ QUERY_TYPE_CREATE_TMP_TABLE = 0x080000, /*< Create temporary table:master (could be all) */ - QUERY_TYPE_READ_TMP_TABLE = 0x100000 /*< Read temporary table:master (could be any) */ + QUERY_TYPE_READ_TMP_TABLE = 0x100000, /*< Read temporary table:master (could be any) */ + QUERY_TYPE_SHOW_DATABASES = 0x200000, /*< Show list of databases */ + QUERY_TYPE_SHOW_TABLES = 0x400000 /*< Show list of tables */ } skygw_query_type_t; @@ -91,6 +94,7 @@ bool parse_query (GWBUF* querybuf); parsing_info_t* parsing_info_init(void (*donefun)(void *)); void parsing_info_done(void* ptr); bool query_is_parsed(GWBUF* buf); +char* skygw_get_qtype_str(skygw_query_type_t qtype); EXTERN_C_BLOCK_END diff --git a/query_classifier/test/CMakeLists.txt b/query_classifier/test/CMakeLists.txt index ceaad8110..3ed80e8a6 100644 --- a/query_classifier/test/CMakeLists.txt +++ b/query_classifier/test/CMakeLists.txt @@ -1 +1,14 @@ -add_subdirectory(canonical_tests) \ No newline at end of file +if(${ERRMSG} MATCHES "ERRMSG-NOTFOUND") + message(FATAL_ERROR "The errmsg.sys file was not found, please define the path with -DERRMSG=") +else() + if(${CMAKE_VERSION} VERSION_LESS 2.8) + execute_process(COMMAND cp ${ERRMSG} ${CMAKE_CURRENT_BINARY_DIR}) + else() + file(COPY ${ERRMSG} DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) + endif() +endif() + +add_subdirectory(canonical_tests) +add_executable(classify classify.c) +target_link_libraries(classify query_classifier fullcore) +add_test(TestQueryClassifier classify ${CMAKE_CURRENT_SOURCE_DIR}/input.sql ${CMAKE_CURRENT_SOURCE_DIR}/expected.sql) \ No newline at end of file diff --git a/query_classifier/test/canonical_tests/CMakeLists.txt b/query_classifier/test/canonical_tests/CMakeLists.txt index 4777fad8d..26bc64602 100644 --- a/query_classifier/test/canonical_tests/CMakeLists.txt +++ b/query_classifier/test/canonical_tests/CMakeLists.txt @@ -1,6 +1,11 @@ -file(COPY ${ERRMSG} DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) if(${ERRMSG} MATCHES "ERRMSG-NOTFOUND") message(FATAL_ERROR "The errmsg.sys file was not found, please define the path with -DERRMSG=") +else() + if(${CMAKE_VERSION} VERSION_LESS 2.8) + execute_process(COMMAND cp ${ERRMSG} ${CMAKE_CURRENT_BINARY_DIR}) + else() + file(COPY ${ERRMSG} DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) + endif() endif() add_executable(canonizer canonizer.c) target_link_libraries(canonizer pthread query_classifier z dl ssl aio crypt crypto rt m ${EMBEDDED_LIB} fullcore stdc++) diff --git a/query_classifier/test/canonical_tests/canonizer.c b/query_classifier/test/canonical_tests/canonizer.c index 107ccb670..6df5f0049 100644 --- a/query_classifier/test/canonical_tests/canonizer.c +++ b/query_classifier/test/canonical_tests/canonizer.c @@ -1,3 +1,4 @@ +#include #include #include #include @@ -7,7 +8,7 @@ #include static char* server_options[] = { - "SkySQL Gateway", + "MariaDB Corporation MaxScale", "--datadir=./", "--language=./", "--skip-innodb", @@ -18,104 +19,61 @@ static char* server_options[] = { const int num_elements = (sizeof(server_options) / sizeof(char *)) - 1; static char* server_groups[] = { - "embedded", - "server", - "server", - NULL + "embedded", + "server", + "server", + NULL }; int main(int argc, char** argv) { - int fdin,fdout,i=0,fnamelen,fsz,lines = 0; - unsigned int psize; - GWBUF** qbuff; - char *qin, *outnm, *buffer, *tok; + unsigned int psize; + GWBUF* qbuff; + char *tok; + char readbuff[4092]; + FILE* infile; + FILE* outfile; + + if(argc != 3){ + printf("Usage: canonizer \n"); + return 1; + } - if(argc != 3){ - printf("Usage: canonizer \n"); - return 1; - } + if(mysql_library_init(num_elements, server_options, server_groups)){ + printf("Embedded server init failed.\n"); + return 1; + } + + infile = fopen(argv[1],"rb"); + outfile = fopen(argv[2],"wb"); + + if(infile == NULL || outfile == NULL){ + printf("Opening files failed.\n"); + return 1; + } - - - bool failed = mysql_library_init(num_elements, server_options, server_groups); - - if(failed){ - printf("Embedded server init failed.\n"); - return 1; - } - - fnamelen = strlen(argv[1]) + 16; - fdin = open(argv[1],O_RDONLY); - fsz = lseek(fdin,0,SEEK_END); - lseek(fdin,0,SEEK_SET); - - if(!(buffer = malloc(sizeof(char)*fsz + 1))){ - printf("Error: Failed to allocate memory."); - return 1; - } - - read(fdin,buffer,fsz); - buffer[fsz] = '\0'; - - - - i = 0; - int bsz = 4,z=0; - qbuff = calloc(bsz,sizeof(GWBUF*)); - tok = strtok(buffer,"\n"); - - while(tok){ - - if(i>=bsz){ - GWBUF** tmp = calloc(bsz*2,sizeof(GWBUF*)); - if(!tmp){ - printf("Error: Failed to allocate memory."); - return 1; - } - - for(z=0;z 0){ - qin = strdup(tok); - psize = strlen(qin); - qbuff[i] = gwbuf_alloc(psize + 6); - *(qbuff[i]->sbuf->data + 0) = (unsigned char)psize; - *(qbuff[i]->sbuf->data + 1) = (unsigned char)(psize>>8); - *(qbuff[i]->sbuf->data + 2) = (unsigned char)(psize>>16); - *(qbuff[i]->sbuf->data + 4) = 0x03; - memcpy(qbuff[i]->sbuf->data + 5,qin,psize); - *(qbuff[i]->sbuf->data + 5 + psize) = 0x00; - tok = strtok(NULL,"\n\0"); - free(qin); - i++; - } - } - - fdout = open(argv[2],O_TRUNC|O_CREAT|O_WRONLY,S_IRWXU|S_IXGRP|S_IXOTH); - - for(i = 0;isbuf->data + 0) = (unsigned char)psize; + *(qbuff->sbuf->data + 1) = (unsigned char)(psize>>8); + *(qbuff->sbuf->data + 2) = (unsigned char)(psize>>16); + *(qbuff->sbuf->data + 4) = 0x03; + memcpy(qbuff->start + 5,readbuff,psize + 1); + parse_query(qbuff); + tok = skygw_get_canonical(qbuff); + fprintf(outfile,"%s\n",tok); + free(tok); + gwbuf_free(qbuff); + } + } + fclose(infile); + fclose(outfile); + mysql_library_end(); + + return 0; } diff --git a/query_classifier/test/canonical_tests/expected.sql b/query_classifier/test/canonical_tests/expected.sql index 1126f6016..7303d585e 100755 --- a/query_classifier/test/canonical_tests/expected.sql +++ b/query_classifier/test/canonical_tests/expected.sql @@ -1,17 +1,18 @@ -select md5("?") =?, sleep(?), rand(?) -select * from my1 where md5("?") =? -select md5("?") =? -select * from my1 where md5("?") =? -select sleep(?) -select * from tst where lname='?' -select ?,?,?,?,?,? from tst -select * from tst where fname like '?' -select * from tst where lname like '?' order by fname -insert into tst values ("?","?"),("?",?),("?","?") -drop table if exists tst -create table tst(fname varchar(30), lname varchar(30)) -update tst set lname="?" where fname like '?' or lname like '?' -delete from tst where lname like '?' and fname like '?' -select ? from tst where fname='?' or lname like '?' -select ?,?,?,? from tst where name='?' or name='?' or name='?' or name='?' -select count(?),count(?),count(?),count(?),count (?),count(?) from tst +select md5("?") =?, sleep(?), rand(?); +select * from my1 where md5("?") =?; +select md5("?") =?; +select * from my1 where md5("?") =?; +select sleep(?); +select * from tst where lname='?'; +select ?,?,?,?,?,? from tst; +select * from tst where fname like '?'; +select * from tst where lname like '?' order by fname; +insert into tst values ("?","?"),("?",?),("?","?"); +drop table if exists tst; +create table tst(fname varchar(30), lname varchar(30)); +update tst set lname="?" where fname like '?' or lname like '?'; +delete from tst where lname like '?' and fname like '?'; +select ? from tst where fname='?' or lname like '?'; +select ?,?,?,? from tst where name='?' or name='?' or name='?' or name='?'; +select count(?),count(?),count(?),count(?),count (?),count(?) from tst; +select count(?),count(?),count(?),count(?),count (?),count(?) from tst; diff --git a/query_classifier/test/classify.c b/query_classifier/test/classify.c new file mode 100644 index 000000000..5ceadea9c --- /dev/null +++ b/query_classifier/test/classify.c @@ -0,0 +1,181 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static char* server_options[] = { + "MariaDB Corporation MaxScale", + "--no-defaults", + "--datadir=.", + "--language=.", + "--skip-innodb", + "--default-storage-engine=myisam", + NULL +}; + +const int num_elements = (sizeof(server_options) / sizeof(char *)) - 1; + +static char* server_groups[] = { + "embedded", + "server", + "server", + NULL +}; + +int main(int argc, char** argv) +{ + if(argc < 3){ + fprintf(stderr,"Usage: classify "); + return 1; + } + int rd = 0,buffsz = getpagesize(),strsz = 0,ex_val = 0; + char buffer[1024], *strbuff = (char*)calloc(buffsz,sizeof(char)); + FILE *input,*expected; + + if(mysql_library_init(num_elements, server_options, server_groups)) + { + printf("Error: Cannot initialize Embedded Library."); + return 1; + } + + input = fopen(argv[1],"rb"); + expected = fopen(argv[2],"rb"); + + while((rd = fread(buffer,sizeof(char),1023,input))){ + + /**Fill the read buffer*/ + + if(strsz + rd >= buffsz){ + + char* tmp = realloc(strbuff,(buffsz*2)*sizeof(char)); + + if(tmp == NULL){ + free(strbuff); + fclose(input); + fclose(expected); + mysql_library_end(); + fprintf(stderr,"Error: Memory allocation failed."); + return 1; + } + strbuff = tmp; + buffsz *= 2; + } + + memcpy(strbuff+strsz,buffer,rd); + strsz += rd; + *(strbuff+strsz) = '\0'; + + char *tok,*nlptr; + + /**Remove newlines*/ + while((nlptr = strpbrk(strbuff,"\n")) != NULL && (nlptr - strbuff) < strsz){ + memmove(nlptr,nlptr+1,strsz - (nlptr + 1 - strbuff)); + strsz -= 1; + } + + + /**Parse read buffer for full queries*/ + + while(strpbrk(strbuff,";") != NULL){ + tok = strpbrk(strbuff,";"); + unsigned int qlen = tok - strbuff + 1; + GWBUF* buff = gwbuf_alloc(qlen+6); + *((unsigned char*)(buff->start)) = qlen; + *((unsigned char*)(buff->start + 1)) = (qlen >> 8); + *((unsigned char*)(buff->start + 2)) = (qlen >> 16); + *((unsigned char*)(buff->start + 3)) = 0x00; + *((unsigned char*)(buff->start + 4)) = 0x03; + memcpy(buff->start+5, strbuff, qlen); + memmove(strbuff,tok + 1, strsz - qlen); + strsz -= qlen; + memset(strbuff + strsz,0,buffsz - strsz); + skygw_query_type_t type = query_classifier_get_type(buff); + char qtypestr[64]; + char expbuff[256]; + int expos = 0; + + while((rd = fgetc(expected)) != '\n' && !feof(expected)){ + expbuff[expos++] = rd; + } + expbuff[expos] = '\0'; + + if(type == QUERY_TYPE_UNKNOWN){ + sprintf(qtypestr,"QUERY_TYPE_UNKNOWN"); + } + if(type & QUERY_TYPE_LOCAL_READ){ + sprintf(qtypestr,"QUERY_TYPE_LOCAL_READ"); + } + if(type & QUERY_TYPE_READ){ + sprintf(qtypestr,"QUERY_TYPE_READ"); + } + if(type & QUERY_TYPE_WRITE){ + sprintf(qtypestr,"QUERY_TYPE_WRITE"); + } + if(type & QUERY_TYPE_MASTER_READ){ + sprintf(qtypestr,"QUERY_TYPE_MASTER_READ"); + } + if(type & QUERY_TYPE_SESSION_WRITE){ + sprintf(qtypestr,"QUERY_TYPE_SESSION_WRITE"); + } + if(type & QUERY_TYPE_USERVAR_READ){ + sprintf(qtypestr,"QUERY_TYPE_USERVAR_READ"); + } + if(type & QUERY_TYPE_SYSVAR_READ){ + sprintf(qtypestr,"QUERY_TYPE_SYSVAR_READ"); + } + if(type & QUERY_TYPE_GSYSVAR_READ){ + sprintf(qtypestr,"QUERY_TYPE_GSYSVAR_READ"); + } + if(type & QUERY_TYPE_GSYSVAR_WRITE){ + sprintf(qtypestr,"QUERY_TYPE_GSYSVAR_WRITE"); + } + if(type & QUERY_TYPE_BEGIN_TRX){ + sprintf(qtypestr,"QUERY_TYPE_BEGIN_TRX"); + } + if(type & QUERY_TYPE_ENABLE_AUTOCOMMIT){ + sprintf(qtypestr,"QUERY_TYPE_ENABLE_AUTOCOMMIT"); + } + if(type & QUERY_TYPE_DISABLE_AUTOCOMMIT){ + sprintf(qtypestr,"QUERY_TYPE_DISABLE_AUTOCOMMIT"); + } + if(type & QUERY_TYPE_ROLLBACK){ + sprintf(qtypestr,"QUERY_TYPE_ROLLBACK"); + } + if(type & QUERY_TYPE_COMMIT){ + sprintf(qtypestr,"QUERY_TYPE_COMMIT"); + } + if(type & QUERY_TYPE_PREPARE_NAMED_STMT){ + sprintf(qtypestr,"QUERY_TYPE_PREPARE_NAMED_STMT"); + } + if(type & QUERY_TYPE_PREPARE_STMT){ + sprintf(qtypestr,"QUERY_TYPE_PREPARE_STMT"); + } + if(type & QUERY_TYPE_EXEC_STMT){ + sprintf(qtypestr,"QUERY_TYPE_EXEC_STMT"); + } + if(type & QUERY_TYPE_CREATE_TMP_TABLE){ + sprintf(qtypestr,"QUERY_TYPE_CREATE_TMP_TABLE"); + } + if(type & QUERY_TYPE_READ_TMP_TABLE){ + sprintf(qtypestr,"QUERY_TYPE_READ_TMP_TABLE"); + } + + if(strcmp(qtypestr,expbuff) != 0){ + printf("Error in output: '%s' was expected but got '%s'",expbuff,qtypestr); + ex_val = 1; + } + + gwbuf_free(buff); + } + } + fclose(input); + fclose(expected); + mysql_library_end(); + free(strbuff); + return ex_val; +} diff --git a/query_classifier/test/expected.sql b/query_classifier/test/expected.sql new file mode 100644 index 000000000..23b7c9465 --- /dev/null +++ b/query_classifier/test/expected.sql @@ -0,0 +1,13 @@ +QUERY_TYPE_READ +QUERY_TYPE_READ +QUERY_TYPE_WRITE +QUERY_TYPE_WRITE +QUERY_TYPE_CREATE_TMP_TABLE +QUERY_TYPE_GSYSVAR_WRITE +QUERY_TYPE_SYSVAR_READ +QUERY_TYPE_USERVAR_READ +QUERY_TYPE_COMMIT +QUERY_TYPE_DISABLE_AUTOCOMMIT +QUERY_TYPE_BEGIN_TRX +QUERY_TYPE_ROLLBACK +QUERY_TYPE_COMMIT diff --git a/query_classifier/test/input.sql b/query_classifier/test/input.sql new file mode 100644 index 000000000..1b297b423 --- /dev/null +++ b/query_classifier/test/input.sql @@ -0,0 +1,13 @@ +select sleep(2); +select * from tst where lname like '%e%' order by fname; +insert into tst values ("Jane","Doe"),("Daisy","Duck"),("Marie","Curie"); +update tst set fname="Farmer", lname="McDonald" where lname="%Doe" and fname="John"; +create temporary table tmp as select * from t1; +/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */; +select @@server_id; +select @OLD_SQL_NOTES; +SET autocommit=1; +SET autocommit=0; +BEGIN; +ROLLBACK; +COMMIT; diff --git a/query_classifier/test/testmain.c b/query_classifier/test/testmain.c index 00000de98..08a25d4fa 100644 --- a/query_classifier/test/testmain.c +++ b/query_classifier/test/testmain.c @@ -13,7 +13,7 @@ static char datadir[1024] = ""; static char mysqldir[1024] = ""; static char* server_options[] = { - "SkySQL Gateway", + "MariaDB Corporation MaxScale", "--datadir=", "--default-storage-engine=myisam", NULL diff --git a/rabbitmq_consumer/CMakeLists.txt b/rabbitmq_consumer/CMakeLists.txt index aee18a6da..20ab2a46e 100644 --- a/rabbitmq_consumer/CMakeLists.txt +++ b/rabbitmq_consumer/CMakeLists.txt @@ -1,19 +1,17 @@ -if (NOT ( DEFINED MYSQL_CLIENT_LIB ) ) - find_library(MYSQL_CLIENT_LIB NAMES mysqlclient PATHS /usr/lib /usr/lib64 PATH_SUFFIXES mysql mariadb) -endif() +if(RABBITMQ_FOUND AND MYSQLCLIENT_FOUND) -if (NOT ( DEFINED MYSQL_CLIENT_HEADERS ) ) - find_path(MYSQL_CLIENT_HEADERS NAMES mysql.h PATH_SUFFIXES mysql mariadb) -endif() - -if( ( RABBITMQ_LIB AND RABBITMQ_HEADERS ) AND ( NOT ( ${MYSQL_CLIENT_LIB} STREQUAL "MYSQL_CLIENT_LIB-NOTFOUND" ) ) AND ( NOT ( ${MYSQL_CLIENT_HEADERS} STREQUAL "MYSQL_CLIENT_HEADERS-NOTFOUND" ) ) ) - include_directories(${MYSQL_CLIENT_HEADERS}) + include_directories(${MYSQLCLIENT_HEADERS}) add_executable (consumer consumer.c) - target_link_libraries(consumer ${MYSQL_CLIENT_LIB} rabbitmq inih) + + if(MYSQLCLIENT_FOUND) + target_link_libraries(consumer ${MYSQLCLIENT_LIBRARIES} rabbitmq inih) + elseif(MYSQLCLIENT_STATIC_FOUND) + target_link_libraries(consumer ${MYSQLCLIENT_STATIC_LIBRARIES} rabbitmq inih) + endif() + install(TARGETS consumer DESTINATION bin) install(FILES consumer.cnf DESTINATION etc) - else() message(FATAL_ERROR "Error: Can not find requred libraries and headers: librabbitmq libmysqlclient") diff --git a/rabbitmq_consumer/consumer.c b/rabbitmq_consumer/consumer.c index 8ccabf401..75fedc786 100644 --- a/rabbitmq_consumer/consumer.c +++ b/rabbitmq_consumer/consumer.c @@ -143,7 +143,7 @@ int connectToServer(MYSQL* server) } memset(qstr,0,bsz); - sprintf(qstr,DB_TABLE); + sprintf(qstr,"%s",DB_TABLE); if(mysql_query(server,qstr)){ fprintf(stderr,"Error: Could not send query MySQL server: %s\n",mysql_error(server)); } diff --git a/rabbitmq_consumer/inih/._LICENSE.txt b/rabbitmq_consumer/inih/._LICENSE.txt deleted file mode 100755 index 7fa6eb8a6..000000000 Binary files a/rabbitmq_consumer/inih/._LICENSE.txt and /dev/null differ diff --git a/rabbitmq_consumer/inih/._README.txt b/rabbitmq_consumer/inih/._README.txt deleted file mode 100755 index 7fa6eb8a6..000000000 Binary files a/rabbitmq_consumer/inih/._README.txt and /dev/null differ diff --git a/rabbitmq_consumer/inih/._cpp b/rabbitmq_consumer/inih/._cpp deleted file mode 100755 index 17b8574a4..000000000 Binary files a/rabbitmq_consumer/inih/._cpp and /dev/null differ diff --git a/rabbitmq_consumer/inih/._examples b/rabbitmq_consumer/inih/._examples deleted file mode 100755 index 17b8574a4..000000000 Binary files a/rabbitmq_consumer/inih/._examples and /dev/null differ diff --git a/rabbitmq_consumer/inih/._extra b/rabbitmq_consumer/inih/._extra deleted file mode 100755 index 17b8574a4..000000000 Binary files a/rabbitmq_consumer/inih/._extra and /dev/null differ diff --git a/rabbitmq_consumer/inih/._ini.c b/rabbitmq_consumer/inih/._ini.c deleted file mode 100755 index 17b8574a4..000000000 Binary files a/rabbitmq_consumer/inih/._ini.c and /dev/null differ diff --git a/rabbitmq_consumer/inih/._ini.h b/rabbitmq_consumer/inih/._ini.h deleted file mode 100755 index 17b8574a4..000000000 Binary files a/rabbitmq_consumer/inih/._ini.h and /dev/null differ diff --git a/rabbitmq_consumer/inih/._tests b/rabbitmq_consumer/inih/._tests deleted file mode 100755 index 7fa6eb8a6..000000000 Binary files a/rabbitmq_consumer/inih/._tests and /dev/null differ diff --git a/rabbitmq_consumer/inih/cpp/._INIReader.cpp b/rabbitmq_consumer/inih/cpp/._INIReader.cpp deleted file mode 100755 index 17b8574a4..000000000 Binary files a/rabbitmq_consumer/inih/cpp/._INIReader.cpp and /dev/null differ diff --git a/rabbitmq_consumer/inih/cpp/._INIReader.h b/rabbitmq_consumer/inih/cpp/._INIReader.h deleted file mode 100755 index 17b8574a4..000000000 Binary files a/rabbitmq_consumer/inih/cpp/._INIReader.h and /dev/null differ diff --git a/rabbitmq_consumer/inih/cpp/._INIReaderTest.cpp b/rabbitmq_consumer/inih/cpp/._INIReaderTest.cpp deleted file mode 100755 index 17b8574a4..000000000 Binary files a/rabbitmq_consumer/inih/cpp/._INIReaderTest.cpp and /dev/null differ diff --git a/rabbitmq_consumer/inih/examples/._config.def b/rabbitmq_consumer/inih/examples/._config.def deleted file mode 100755 index 17b8574a4..000000000 Binary files a/rabbitmq_consumer/inih/examples/._config.def and /dev/null differ diff --git a/rabbitmq_consumer/inih/examples/._ini_dump.c b/rabbitmq_consumer/inih/examples/._ini_dump.c deleted file mode 100755 index 17b8574a4..000000000 Binary files a/rabbitmq_consumer/inih/examples/._ini_dump.c and /dev/null differ diff --git a/rabbitmq_consumer/inih/examples/._ini_example.c b/rabbitmq_consumer/inih/examples/._ini_example.c deleted file mode 100755 index 17b8574a4..000000000 Binary files a/rabbitmq_consumer/inih/examples/._ini_example.c and /dev/null differ diff --git a/rabbitmq_consumer/inih/examples/._ini_xmacros.c b/rabbitmq_consumer/inih/examples/._ini_xmacros.c deleted file mode 100755 index 17b8574a4..000000000 Binary files a/rabbitmq_consumer/inih/examples/._ini_xmacros.c and /dev/null differ diff --git a/rabbitmq_consumer/inih/examples/._test.ini b/rabbitmq_consumer/inih/examples/._test.ini deleted file mode 100755 index 17b8574a4..000000000 Binary files a/rabbitmq_consumer/inih/examples/._test.ini and /dev/null differ diff --git a/rabbitmq_consumer/inih/extra/._Makefile.static b/rabbitmq_consumer/inih/extra/._Makefile.static deleted file mode 100755 index 17b8574a4..000000000 Binary files a/rabbitmq_consumer/inih/extra/._Makefile.static and /dev/null differ diff --git a/rabbitmq_consumer/inih/tests/._bad_comment.ini b/rabbitmq_consumer/inih/tests/._bad_comment.ini deleted file mode 100755 index 7fa6eb8a6..000000000 Binary files a/rabbitmq_consumer/inih/tests/._bad_comment.ini and /dev/null differ diff --git a/rabbitmq_consumer/inih/tests/._bad_multi.ini b/rabbitmq_consumer/inih/tests/._bad_multi.ini deleted file mode 100755 index 7fa6eb8a6..000000000 Binary files a/rabbitmq_consumer/inih/tests/._bad_multi.ini and /dev/null differ diff --git a/rabbitmq_consumer/inih/tests/._bad_section.ini b/rabbitmq_consumer/inih/tests/._bad_section.ini deleted file mode 100755 index 7fa6eb8a6..000000000 Binary files a/rabbitmq_consumer/inih/tests/._bad_section.ini and /dev/null differ diff --git a/rabbitmq_consumer/inih/tests/._baseline_multi.txt b/rabbitmq_consumer/inih/tests/._baseline_multi.txt deleted file mode 100755 index 7fa6eb8a6..000000000 Binary files a/rabbitmq_consumer/inih/tests/._baseline_multi.txt and /dev/null differ diff --git a/rabbitmq_consumer/inih/tests/._baseline_single.txt b/rabbitmq_consumer/inih/tests/._baseline_single.txt deleted file mode 100755 index 7fa6eb8a6..000000000 Binary files a/rabbitmq_consumer/inih/tests/._baseline_single.txt and /dev/null differ diff --git a/rabbitmq_consumer/inih/tests/._bom.ini b/rabbitmq_consumer/inih/tests/._bom.ini deleted file mode 100755 index 7fa6eb8a6..000000000 Binary files a/rabbitmq_consumer/inih/tests/._bom.ini and /dev/null differ diff --git a/rabbitmq_consumer/inih/tests/._multi_line.ini b/rabbitmq_consumer/inih/tests/._multi_line.ini deleted file mode 100755 index 7fa6eb8a6..000000000 Binary files a/rabbitmq_consumer/inih/tests/._multi_line.ini and /dev/null differ diff --git a/rabbitmq_consumer/inih/tests/._normal.ini b/rabbitmq_consumer/inih/tests/._normal.ini deleted file mode 100755 index 7fa6eb8a6..000000000 Binary files a/rabbitmq_consumer/inih/tests/._normal.ini and /dev/null differ diff --git a/rabbitmq_consumer/inih/tests/._unittest.bat b/rabbitmq_consumer/inih/tests/._unittest.bat deleted file mode 100755 index 7fa6eb8a6..000000000 Binary files a/rabbitmq_consumer/inih/tests/._unittest.bat and /dev/null differ diff --git a/rabbitmq_consumer/inih/tests/._unittest.c b/rabbitmq_consumer/inih/tests/._unittest.c deleted file mode 100755 index 7fa6eb8a6..000000000 Binary files a/rabbitmq_consumer/inih/tests/._unittest.c and /dev/null differ diff --git a/rabbitmq_consumer/inih/tests/._user_error.ini b/rabbitmq_consumer/inih/tests/._user_error.ini deleted file mode 100755 index 7fa6eb8a6..000000000 Binary files a/rabbitmq_consumer/inih/tests/._user_error.ini and /dev/null differ diff --git a/replication_listener/COPYING.SkySQL b/replication_listener/COPYING.SkySQL index 9845b0fe8..52951686d 100644 --- a/replication_listener/COPYING.SkySQL +++ b/replication_listener/COPYING.SkySQL @@ -1,7 +1,7 @@ -Portions of this software contain modifications contributed by SkySQL, Ab. +Portions of this software contain modifications contributed by MariaDB Corporation, Ab. These contributions are used with the following license: -Copyright (c) 2013, SkySQL Ab. All rights reserved. +Copyright (c) 2013, MariaDB Corporation Ab. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions @@ -12,7 +12,7 @@ are met: copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of the SkySQL Ab. nor the names of its + * Neither the name of the MariaDB Corporation Ab. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/replication_listener/access_method_factory.cpp b/replication_listener/access_method_factory.cpp index 2f6f6ca3b..b44f69692 100644 --- a/replication_listener/access_method_factory.cpp +++ b/replication_listener/access_method_factory.cpp @@ -1,10 +1,10 @@ /* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. -Copyright (c) 2013, SkySQL Ab +Copyright (c) 2013, MariaDB Corporation Ab Portions of this file contain modifications contributed and copyrighted by -SkySQL, Ab. Those modifications are gratefully acknowledged and are described +MariaDB Corporation, Ab. Those modifications are gratefully acknowledged and are described briefly in the source code. This program is free software; you can redistribute it and/or @@ -23,10 +23,10 @@ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /* -SkySQL change details: +MariaDB Corporation change details: - Removed unnecessary file driver -Author: Jan Lindström (jan.lindstrom@skysql.com +Author: Jan Lindström (jan.lindstrom@mariadb.com */ #include "access_method_factory.h" diff --git a/replication_listener/access_method_factory.h b/replication_listener/access_method_factory.h index af245fa36..e402a1185 100644 --- a/replication_listener/access_method_factory.h +++ b/replication_listener/access_method_factory.h @@ -1,10 +1,10 @@ /* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. -Copyright (c) 2013, SkySQL Ab +Copyright (c) 2013-2014, MariaDB Corporation Ab Portions of this file contain modifications contributed and copyrighted by -SkySQL, Ab. Those modifications are gratefully acknowledged and are described +MariaDB Corporation, Ab. Those modifications are gratefully acknowledged and are described briefly in the source code. This program is free software; you can redistribute it and/or diff --git a/replication_listener/basic_content_handler.cpp b/replication_listener/basic_content_handler.cpp index 7d24b9c0f..ffcc35fde 100644 --- a/replication_listener/basic_content_handler.cpp +++ b/replication_listener/basic_content_handler.cpp @@ -1,10 +1,10 @@ /* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. -Copyright (c) 2013, SkySQL Ab +Copyright (c) 2013, MariaDB Corporation Ab Portions of this file contain modifications contributed and copyrighted by -SkySQL, Ab. Those modifications are gratefully acknowledged and are described +MariaDB Corporation, Ab. Those modifications are gratefully acknowledged and are described briefly in the source code. This program is free software; you can redistribute it and/or @@ -23,10 +23,10 @@ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /* -SkySQL change details: +MariaDB Corporation change details: - Added GTID event handler -Author: Jan Lindström (jan.lindstrom@skysql.com +Author: Jan Lindström (jan.lindstrom@mariadb.com */ diff --git a/replication_listener/basic_content_handler.h b/replication_listener/basic_content_handler.h index 449d0f8d4..7a83838af 100644 --- a/replication_listener/basic_content_handler.h +++ b/replication_listener/basic_content_handler.h @@ -1,10 +1,10 @@ /* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. -Copyright (c) 2013, SkySQL Ab +Copyright (c) 2013-2014, MariaDB Corporation Ab Portions of this file contain modifications contributed and copyrighted by -SkySQL, Ab. Those modifications are gratefully acknowledged and are described +MariaDB Corporation, Ab. Those modifications are gratefully acknowledged and are described briefly in the source code. This program is free software; you can redistribute it and/or @@ -23,10 +23,10 @@ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /* -SkySQL change details: +MariaDB Corporation change details: - Added GTID event handler -Author: Jan Lindström (jan.lindstrom@skysql.com +Author: Jan Lindström (jan.lindstrom@mariadb.com */ diff --git a/replication_listener/binary_log.cpp b/replication_listener/binary_log.cpp index 0e643269d..20c87c896 100644 --- a/replication_listener/binary_log.cpp +++ b/replication_listener/binary_log.cpp @@ -1,10 +1,10 @@ /* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. -Copyright (c) 2013, SkySQL Ab +Copyright (c) 2013, MariaDB Corporation Ab Portions of this file contain modifications contributed and copyrighted by -SkySQL, Ab. Those modifications are gratefully acknowledged and are described +MariaDB Corporation, Ab. Those modifications are gratefully acknowledged and are described briefly in the source code. This program is free software; you can redistribute it and/or @@ -23,11 +23,11 @@ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /* -SkySQL change details: +MariaDB Corporation change details: - Added support for setting binlog position based on GTID - Added support for MySQL and MariDB server types -Author: Jan Lindström (jan.lindstrom@skysql.com +Author: Jan Lindström (jan.lindstrom@mariadb.com */ diff --git a/replication_listener/binlog_api.h b/replication_listener/binlog_api.h index 2a73423af..33d32fd1d 100644 --- a/replication_listener/binlog_api.h +++ b/replication_listener/binlog_api.h @@ -1,10 +1,10 @@ /* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. -Copyright (c) 2013, SkySQL Ab +Copyright (c) 2013-2014, MariaDB Corporation Ab Portions of this file contain modifications contributed and copyrighted by -SkySQL, Ab. Those modifications are gratefully acknowledged and are described +MariaDB Corporation, Ab. Those modifications are gratefully acknowledged and are described briefly in the source code. This program is free software; you can redistribute it and/or @@ -23,11 +23,11 @@ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /* -SkySQL change details: +MariaDB Corporation change details: - Added support for setting binlog position based on GTID - Added support for MySQL and MariDB server types -Author: Jan Lindström (jan.lindstrom@skysql.com +Author: Jan Lindström (jan.lindstrom@mariadb.com */ diff --git a/replication_listener/binlog_driver.cpp b/replication_listener/binlog_driver.cpp index ef2512b5c..81c019332 100644 --- a/replication_listener/binlog_driver.cpp +++ b/replication_listener/binlog_driver.cpp @@ -1,10 +1,10 @@ /* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. - Copyright (c) 2013, SkySQL Ab + Copyright (c) 2013, MariaDB Corporation Ab Portions of this file contain modifications contributed and copyrighted by - SkySQL, Ab. Those modifications are gratefully acknowledged and are described + MariaDB Corporation, Ab. Those modifications are gratefully acknowledged and are described briefly in the source code. This program is free software; you can redistribute it and/or @@ -23,10 +23,10 @@ 02110-1301 USA */ /* -SkySQL change details: +MariaDB Corporation change details: - Added support for GTID event handling for both MySQL and MariaDB -Author: Jan Lindström (jan.lindstrom@skysql.com +Author: Jan Lindström (jan.lindstrom@mariadb.com */ diff --git a/replication_listener/binlog_driver.h b/replication_listener/binlog_driver.h index cdec99e06..44f9fb3c4 100644 --- a/replication_listener/binlog_driver.h +++ b/replication_listener/binlog_driver.h @@ -1,10 +1,10 @@ /* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. -Copyright (c) 2013, SkySQL Ab +Copyright (c) 2013-2014, MariaDB Corporation Ab Portions of this file contain modifications contributed and copyrighted by -SkySQL, Ab. Those modifications are gratefully acknowledged and are described +MariaDB Corporation, Ab. Those modifications are gratefully acknowledged and are described briefly in the source code. This program is free software; you can redistribute it and/or @@ -23,11 +23,11 @@ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /* -SkySQL change details: +MariaDB Corporation change details: - Added support for GTID event handling for both MySQL and MariaDB - Added support for setting binlog position based on GTID -Author: Jan Lindström (jan.lindstrom@skysql.com +Author: Jan Lindström (jan.lindstrom@mariadb.com */ diff --git a/replication_listener/binlog_event.cpp b/replication_listener/binlog_event.cpp index 77723e310..d89142333 100644 --- a/replication_listener/binlog_event.cpp +++ b/replication_listener/binlog_event.cpp @@ -1,10 +1,10 @@ /* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. -Copyright (c) 2013, SkySQL Ab +Copyright (c) 2013, MariaDB Corporation Ab Portions of this file contain modifications contributed and copyrighted by -SkySQL, Ab. Those modifications are gratefully acknowledged and are described +MariaDB Corporation, Ab. Those modifications are gratefully acknowledged and are described briefly in the source code. This program is free software; you can redistribute it and/or @@ -23,10 +23,10 @@ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /* -SkySQL change details: +MariaDB Corporation change details: - Added support for GTID event handling for both MySQL and MariaDB -Author: Jan Lindström (jan.lindstrom@skysql.com +Author: Jan Lindström (jan.lindstrom@mariadb.com */ diff --git a/replication_listener/binlog_event.h b/replication_listener/binlog_event.h index 2b0d650e2..45aa31a83 100644 --- a/replication_listener/binlog_event.h +++ b/replication_listener/binlog_event.h @@ -1,10 +1,10 @@ /* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. -Copyright (c) 2013, SkySQL Ab +Copyright (c) 2013-2014, MariaDB Corporation Ab Portions of this file contain modifications contributed and copyrighted by -SkySQL, Ab. Those modifications are gratefully acknowledged and are described +MariaDB Corporation, Ab. Those modifications are gratefully acknowledged and are described briefly in the source code. This program is free software; you can redistribute it and/or @@ -23,10 +23,10 @@ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /* -SkySQL change details: +MariaDB Corporation change details: - Added support for GTID event handling for both MySQL and MariaDB -Author: Jan Lindström (jan.lindstrom@skysql.com +Author: Jan Lindström (jan.lindstrom@mariadb.com */ #ifndef _BINLOG_EVENT_H diff --git a/replication_listener/gtid.cpp b/replication_listener/gtid.cpp index e5e1c1516..7091dfc92 100644 --- a/replication_listener/gtid.cpp +++ b/replication_listener/gtid.cpp @@ -1,8 +1,8 @@ /* -Copyright (C) 2013, SkySQL Ab +Copyright (C) 2013, MariaDB Corporation Ab -This file is distributed as part of the SkySQL Gateway. It is free +This file is distributed as part of the MariaDB Corporation MaxScale. It is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, version 2. @@ -16,7 +16,7 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -Author: Jan Lindström jan.lindstrom@skysql.com +Author: Jan Lindström jan.lindstrom@mariadb.com */ diff --git a/replication_listener/gtid.h b/replication_listener/gtid.h index b0ca43548..218e2a30f 100644 --- a/replication_listener/gtid.h +++ b/replication_listener/gtid.h @@ -1,7 +1,7 @@ /* -Copyright (C) 2013, SkySQL Ab +Copyright (C) 2013-2014, MariaDB Corporation Ab -This file is distributed as part of the SkySQL Gateway. It is free +This file is distributed as part of the MariaDB Corporation MaxScale. It is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, version 2. @@ -15,7 +15,7 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -Author: Jan Lindström jan.lindstrom@skysql.com +Author: Jan Lindström jan.lindstrom@mariadb.com */ diff --git a/replication_listener/listener_exception.h b/replication_listener/listener_exception.h index b9da4f0ae..b38312b58 100644 --- a/replication_listener/listener_exception.h +++ b/replication_listener/listener_exception.h @@ -1,8 +1,8 @@ /* -Copyright (C) 2013, SkySQL Ab +Copyright (C) 2013-2014, MariaDB Corporation Ab -This file is distributed as part of the SkySQL Gateway. It is free +This file is distributed as part of the MariaDB Corporation MaxScale. It is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, version 2. @@ -16,7 +16,7 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -Author: Jan Lindström jan.lindstrom@skysql.com +Author: Jan Lindström jan.lindstrom@mariadb.com */ diff --git a/replication_listener/protocol.cpp b/replication_listener/protocol.cpp index 0bc58b458..317500ce8 100644 --- a/replication_listener/protocol.cpp +++ b/replication_listener/protocol.cpp @@ -1,10 +1,10 @@ /* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. -Copyright (c) 2013, SkySQL Ab +Copyright (c) 2013, MariaDB Corporation Ab Portions of this file contain modifications contributed and copyrighted by -SkySQL, Ab. Those modifications are gratefully acknowledged and are described +MariaDB Corporation, Ab. Those modifications are gratefully acknowledged and are described briefly in the source code. This program is free software; you can redistribute it and/or @@ -23,10 +23,10 @@ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /* -SkySQL change details: +MariaDB Corporation change details: - Added support for GTID event handling for both MySQL and MariaDB -Author: Jan Lindström (jan.lindstrom@skysql.com +Author: Jan Lindström (jan.lindstrom@mariadb.com */ #include diff --git a/replication_listener/tcp_driver.cpp b/replication_listener/tcp_driver.cpp index a36b8bf68..a0aa88986 100644 --- a/replication_listener/tcp_driver.cpp +++ b/replication_listener/tcp_driver.cpp @@ -1,10 +1,10 @@ /* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. -Copyright (c) 2013, SkySQL Ab +Copyright (c) 2013, MariaDB Corporation Ab Portions of this file contain modifications contributed and copyrighted by -SkySQL, Ab. Those modifications are gratefully acknowledged and are described +MariaDB Corporation, Ab. Those modifications are gratefully acknowledged and are described briefly in the source code. This program is free software; you can redistribute it and/or @@ -23,12 +23,12 @@ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /* -SkySQL change details: +MariaDB Corporation change details: - Added support for GTID event handling for both MySQL and MariaDB - Added support for starting binlog dump from GTID position - Added error handling using exceptions -Author: Jan Lindström (jan.lindstrom@skysql.com +Author: Jan Lindström (jan.lindstrom@mariadb.com */ #include "binlog_api.h" diff --git a/replication_listener/tcp_driver.h b/replication_listener/tcp_driver.h index 268f3dcee..530249276 100644 --- a/replication_listener/tcp_driver.h +++ b/replication_listener/tcp_driver.h @@ -1,10 +1,10 @@ /* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. -Copyright (c) 2013, SkySQL Ab +Copyright (c) 2013-2014, MariaDB Corporation Ab Portions of this file contain modifications contributed and copyrighted by -SkySQL, Ab. Those modifications are gratefully acknowledged and are described +MariaDB Corporation, Ab. Those modifications are gratefully acknowledged and are described briefly in the source code. This program is free software; you can redistribute it and/or @@ -23,12 +23,12 @@ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /* -SkySQL change details: +MariaDB Corporation change details: - Added support for GTID event handling for both MySQL and MariaDB - Added support for starting binlog dump from GTID position - Added support for MariaDB server -Author: Jan Lindström (jan.lindstrom@skysql.com +Author: Jan Lindström (jan.lindstrom@mariadb.com */ diff --git a/replication_listener/tests/event_dump.cpp b/replication_listener/tests/event_dump.cpp index 3fd33d1a7..132ba6f8b 100644 --- a/replication_listener/tests/event_dump.cpp +++ b/replication_listener/tests/event_dump.cpp @@ -1,8 +1,8 @@ /* -Copyright (C) 2013, SkySQL Ab +Copyright (C) 2013, MariaDB Corporation Ab -This file is distributed as part of the SkySQL Gateway. It is free +This file is distributed as part of the MariaDB Corporation MaxScale. It is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, version 2. @@ -16,7 +16,7 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -Author: Jan Lindström jan.lindstrom@skysql.com +Author: Jan Lindström jan.lindstrom@mariadb.com */ diff --git a/script/make-binary-tarball.sh b/script/make-binary-tarball.sh index 416a10675..531306ddb 100755 --- a/script/make-binary-tarball.sh +++ b/script/make-binary-tarball.sh @@ -14,7 +14,7 @@ echo "Looking for MaxScale in [${BINARY_PATH}]" if [ -s "${BINARY_PATH}/bin/maxscale" ]; then if [ -x "${BINARY_PATH}/bin/maxscale" ]; then - MAXSCALE_VERSION=`strings ${BINARY_PATH}/bin/maxscale | grep "SkySQL MaxScale" | awk '{print $3}' | head -1` + MAXSCALE_VERSION=`strings ${BINARY_PATH}/bin/maxscale | grep "MariaDB Corporation MaxScale" | awk '{print $3}' | head -1` echo "Found MaxScale, version: ${MAXSCALE_VERSION}" fi else diff --git a/server/CMakeLists.txt b/server/CMakeLists.txt index a42c625ae..6cd174190 100644 --- a/server/CMakeLists.txt +++ b/server/CMakeLists.txt @@ -1,6 +1,3 @@ add_subdirectory(core) add_subdirectory(modules) add_subdirectory(inih) -if(BUILD_TESTS) -add_subdirectory(test) -endif() \ No newline at end of file diff --git a/server/Makefile b/server/Makefile index fda9d14c1..93a181a07 100644 --- a/server/Makefile +++ b/server/Makefile @@ -1,4 +1,4 @@ -# This file is distributed as part of the SkySQL Gateway. It is free +# This file is distributed as part of the MariaDB Corporation MaxScale. It is free # software: you can redistribute it and/or modify it under the terms of the # GNU General Public License as published by the Free Software Foundation, # version 2. @@ -12,7 +12,7 @@ # this program; if not, write to the Free Software Foundation, Inc., 51 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # -# Copyright SkySQL Ab 2013 +# Copyright MariaDB Corporation Ab 2013 # # Revision History # Date Who Description diff --git a/server/MaxScale_template.cnf b/server/MaxScale_template.cnf index c322fec8f..e6887fdd1 100644 --- a/server/MaxScale_template.cnf +++ b/server/MaxScale_template.cnf @@ -1,108 +1,255 @@ +## Example MaxScale.cnf configuration file # -# Example MaxScale.cnf configuration file +# Number of worker threads in MaxScale # -# -# -# Number of server threads -# Valid options are: # threads= - +# [maxscale] -threads=1 +threads=4 -# Define a monitor that can be used to determine the state and role of +## Define a monitor that can be used to determine the state and role of # the servers. # -# Valid options for all monitors are: +# Currently valid options for all monitors are: # -# module= -# servers=,,... -# user = -# passwd= -# monitor_interval= +# module=[mysqlmon|galeramon] +# +# List of server names which are being monitored +# +# servers=,,..., +# +# Username for monitor queries, need slave replication and slave client privileges +# Password in plain text format, and monitor's sampling interval in milliseconds. +# +# user= +# passwd= +# monitor_interval= (default 10000) +# +# Timeouts for monitor operations in backend servers - optional. +# +# backend_connect_timeout= +# backend_write_timeout= +# backend_read_timeout= +# +## MySQL monitor-specific options: +# +# Enable detection of replication slaves lag via replication_heartbeat +# table - optional. +# +# detect_replication_lag=[1|0] (default 0) +# +# Allow previous master to be available even in case of stopped or misconfigured +# replication - optional. +# +# detect_stale_master=[1|0] (default 0) +# +## Galera monitor-specific options: +# +# If disable_master_failback is not set, recovery of previously failed master +# causes mastership to be switched back to it. Enabling the option prevents it. +# +# disable_master_failback=[0|1] (default 0) +# +## Examples: [MySQL Monitor] type=monitor module=mysqlmon servers=server1,server2,server3 -user=maxuser -passwd=maxpwd -# -# options for mysql_monitor only -# -# detect_replication_lag= -# detect_stale_master= +user=myuser +passwd=mypwd +monitor_interval=10000 +#backend_connect_timeout= +#backend_read_timeout= +#backend_write_timeout= +#detect_replication_lag= +#detect_stale_master= -# A series of service definition +[Galera Monitor] +type=monitor +module=galeramon +servers=server1,server2,server3 +user=myuser +passwd=mypwd +monitor_interval=10000 +#disable_master_failback= + +## Filter definition # -# Valid options are: +# Type specifies the section # -# router= -# servers=,,... -# user= -# passwd= -# enable_root_user=<0 or 1, default is 0> -# version_string= -# -# use_sql_variables_in=[master|all] (default all) -# router_options=,,... -# where value=[master|slave|synced] +# type=filter +# +# Module specifies which module implements the filter function # -# Read/Write Split Router specific options are: +# module=[qlafilter|regexfilter|topfilter|teefilter] +# +# Options specify the log file for Query Log Filter +# +# options= +# +# Match and replace are used in regexfilter +# +# match=fetch +# replace=select +# +# Count and filebase are used with topfilter to specify how many top queries are +# listed and where. +# +# count= +# filebase= +# +# Match and service are used by tee filter to specify what queries should be +# duplicated and where the copy should be routed. +# +# match=insert.*HighScore.*values +# service=Cassandra +# +## Examples: + +[qla] +type=filter +module=qlafilter +options=/tmp/QueryLog + +[fetch] +type=filter +module=regexfilter +match=fetch +replace=select + + +## A series of service definition +# +# Name of router module, currently valid options are +# +# router=[readconnroute|readwritesplit|debugcli|CLI] +# +# List of server names for use of service - mandatory for readconnroute, +# readwritesplit, and debugcli +# +# servers=,,..., +# +# Username to fetch password information with and password in plaintext +# format - for readconnroute and readwritesplit +# +# user= +# passwd= +# +# flag for enabling the use of root user - for readconnroute and +# readwritesplite - optional. +# +# enable_root_user=[0|1] (default 0) +# +# Version string to be used in server handshake. Default value is that of +# MariaDB embedded library's - for readconnroute and readwritesplite - optional. +# +# version_string= +# +# Filters specify the filters through which the query is transferred and the +# order of their appearance on the list corresponds the order they are +# used. Values refer to names of filters configured in this file - for +# readconnroute and readwritesplit - optional. +# +# filters= +# +## Read Connection Router specific router options. +# +# router_options specify the role in which the selected server must be. +# +# router_options=[master|slave|synced] +# +## Read/Write Split Router specific options. +# +# use_sql_variables_in specifies where sql variable modifications are +# routed - optional. +# +# use_sql_variables_in=[master|all] (default all) +# +# router_options=slave_selection_criteria specifies the selection criteria for +# slaves both in new session creation and when route target is selected - optional. +# +# router_options= +# slave_selection_criteria=[LEAST_CURRENT_OPERATIONS|LEAST_BEHIND_MASTER] +# +# max_slave_connections specifies how many slaves a router session can +# connect to - optional. +# +# max_slave_connections= +# +# max_slave_replication_lag specifies how much a slave is allowed to be behind +# the master and still become chosen routing target - optional, requires that +# monitor has detect_replication_lag=1 . # -# max_slave_connections= # max_slave_replication_lag= -# router_options=slave_selection_criteria=[LEAST_CURRENT_OPERATIONS|LEAST_BEHIND_MASTER] -# +# # Valid router modules currently are: -# readwritesplit, readconnroute and debugcli +# readwritesplit, readconnroute, debugcli and CLI +# +## Examples: + +[Read Connection Router] +type=service +router=readconnroute +servers=server1,server2,server3 +user=myuser +passwd=mypwd +router_options=slave [RW Split Router] type=service router=readwritesplit servers=server1,server2,server3 -user=maxuser -passwd=maxpwd -use_sql_variables_in=all -max_slave_connections=50% -max_slave_replication_lag=30 -router_options=slave_selection_criteria=LEAST_BEHIND_MASTER - - -[Read Connection Router] -type=service -router=readconnroute -router_options=slave -servers=server1,server2,server3 -user=maxuser -passwd=maxpwd - -[HTTPD Router] -type=service -router=testroute -servers=server1,server2,server3 +user=myuser +passwd=mypwd +#use_sql_variables_in= +#max_slave_connections=100% +#max_slave_replication_lag=21 +#router_options=slave_selection_criteria= +#filters=fetch|qla [Debug Interface] type=service router=debugcli -# Listener definitions for the services +[CLI] +type=service +router=cli + +## Listener definitions for the services # -# Valid options are: +# Type specifies section as listener one +# +# type=listener +# +# Service links the section to one of the service names used in this configuration +# +# service= +# +# Protocol is client protocol library name. +# +# protocol=[MySQLClient|telnetd|HTTPD|maxscaled] +# +# Port and address specify which port the service listens and the address limits +# listening to a specific network interface only. Address is optional. # -# service= -# protocol= # port= # address=
+# +# Socket is alternative for address. The specified socket path must be writable +# by the Unix user MaxScale runs as. +# # socket= +# +## Examples: + +[Read Connection Listener] +type=listener +service=Read Connection Router +protocol=MySQLClient +address=192.168.100.102 +port=4008 +#socket=/tmp/readconn.sock [RW Split Listener] type=listener @@ -111,42 +258,58 @@ protocol=MySQLClient port=4006 #socket=/tmp/rwsplit.sock -[Read Connection Listener] -type=listener -service=Read Connection Router -protocol=MySQLClient -port=4008 -#socket=/tmp/readconn.sock - [Debug Listener] type=listener service=Debug Interface protocol=telnetd -port=4442 #address=127.0.0.1 +port=4442 -[HTTPD Listener] +[CLI Listener] type=listener -service=HTTPD Router -protocol=HTTPD -port=6444 +service=CLI +protocol=maxscaled +#address=localhost +port=6603 -# Definition of the servers +## Definition of the servers +# +# Type specifies the section as server one +# +# type=server +# +# The IP address or hostname of the machine running the database server that is +# being defined. MaxScale will use this address to connect to the backend +# database server. +# +# address= +# +# The port on which the database listens for incoming connections. MaxScale +# will use this port to connect to the database server. +# +# port= +# +# The name for the protocol module to use to connect MaxScale to the database. +# Currently the only backend protocol supported is the MySQLBackend module. +# +# protocol=MySQLBackend +# +## Examples: [server1] type=server -address=127.0.0.1 +address=192.168.100.101 port=3000 protocol=MySQLBackend [server2] type=server -address=127.0.0.1 -port=3001 +address=192.168.100.102 +port=3000 protocol=MySQLBackend [server3] type=server -address=127.0.0.1 -port=3002 +address=192.168.100.103 +port=3000 protocol=MySQLBackend diff --git a/server/core/CMakeLists.txt b/server/core/CMakeLists.txt index 2c7854de3..b60999901 100644 --- a/server/core/CMakeLists.txt +++ b/server/core/CMakeLists.txt @@ -1,11 +1,13 @@ -file(GLOB FULLCORE_SRC *.c) -add_library(fullcore STATIC ${FULLCORE_SRC}) -target_link_libraries(fullcore log_manager utils pthread ${EMBEDDED_LIB} ssl aio rt crypt dl crypto inih z m stdc++) - +if(BUILD_TESTS) + file(GLOB FULLCORE_SRC *.c) + add_library(fullcore STATIC ${FULLCORE_SRC}) + target_link_libraries(fullcore log_manager utils pthread ${EMBEDDED_LIB} ssl aio rt crypt dl crypto inih z m stdc++) +endif() add_executable(maxscale atomic.c buffer.c spinlock.c gateway.c gw_utils.c utils.c dcb.c load_utils.c session.c service.c server.c poll.c config.c users.c hashtable.c dbusers.c thread.c gwbitmask.c - monitor.c adminusers.c secrets.c filter.c modutil.c hint.c housekeeper.c) + monitor.c adminusers.c secrets.c filter.c modutil.c hint.c + housekeeper.c memlog.c) target_link_libraries(maxscale ${EMBEDDED_LIB} log_manager utils ssl aio pthread crypt dl crypto inih z rt m stdc++) install(TARGETS maxscale DESTINATION bin) @@ -18,5 +20,5 @@ target_link_libraries(maxpasswd log_manager utils pthread crypt crypto) install(TARGETS maxpasswd DESTINATION bin) if(BUILD_TESTS) -add_subdirectory(test) -endif() \ No newline at end of file + add_subdirectory(test) +endif() diff --git a/server/core/Makefile b/server/core/Makefile index 7ad4e1b01..4eab4162c 100644 --- a/server/core/Makefile +++ b/server/core/Makefile @@ -1,4 +1,4 @@ -# This file is distributed as part of the SkySQL Gateway. It is free +# This file is distributed as part of the MariaDB Corporation MaxScale. It is free # software: you can redistribute it and/or modify it under the terms of the # GNU General Public License as published by the Free Software Foundation, # version 2. @@ -12,7 +12,7 @@ # this program; if not, write to the Free Software Foundation, Inc., 51 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # -# Copyright SkySQL Ab 2013 +# Copyright MariaDB Corporation Ab 2013 # # Revision History # Date Who Description @@ -47,7 +47,7 @@ CC=cc CFLAGS=-c -I/usr/include -I../include -I../modules/include -I../inih \ $(MYSQL_HEADERS) \ -I$(LOGPATH) -I$(UTILSPATH) \ - -Wall -g + -Wall -pedantic -g LDFLAGS=-rdynamic -L$(LOGPATH) \ -Wl,-rpath,$(DEST)/lib \ @@ -65,7 +65,8 @@ include ../../makefile.inc SRCS= atomic.c buffer.c spinlock.c gateway.c \ gw_utils.c utils.c dcb.c load_utils.c session.c service.c server.c \ poll.c config.c users.c hashtable.c dbusers.c thread.c gwbitmask.c \ - monitor.c adminusers.c secrets.c filter.c modutil.c hint.c housekeeper.c + monitor.c adminusers.c secrets.c filter.c modutil.c hint.c \ + housekeeper.c memlog.c HDRS= ../include/atomic.h ../include/buffer.h ../include/dcb.h \ ../include/gw.h ../modules/include/mysql_client_server_protocol.h \ @@ -73,7 +74,8 @@ HDRS= ../include/atomic.h ../include/buffer.h ../include/dcb.h \ ../include/modules.h ../include/poll.h ../include/config.h \ ../include/users.h ../include/hashtable.h ../include/gwbitmask.h \ ../include/adminusers.h ../include/version.h ../include/maxscale.h \ - ../include/filter.h ../include/modutil.h ../hint.h ../include/housekeeper.h + ../include/filter.h ../include/modutil.h ../hint.h \ + ../include/housekeeper.h ../include/memlog.h OBJ=$(SRCS:.c=.o) diff --git a/server/core/adminusers.c b/server/core/adminusers.c index 61cd7c077..2f1bc8fe8 100644 --- a/server/core/adminusers.c +++ b/server/core/adminusers.c @@ -1,5 +1,5 @@ /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -13,7 +13,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 */ #include #include @@ -27,7 +27,10 @@ #include #include -extern int lm_enabled_logfiles_bitmask; +/** Defined in log_manager.cc */ +extern int lm_enabled_logfiles_bitmask; +extern size_t log_ses_count[]; +extern __thread log_info_t tls_log_info; /** * @file adminusers.c - Administration user account management @@ -116,10 +119,12 @@ char fname[1024], *home; char uname[80], passwd[80]; initialise(); - if ((home = getenv("MAXSCALE_HOME")) != NULL) + if ((home = getenv("MAXSCALE_HOME")) != NULL && strlen(home) < 1024){ sprintf(fname, "%s/etc/passwd", home); - else + } + else{ sprintf(fname, "/usr/local/skysql/MaxScale/etc/passwd"); + } if ((fp = fopen(fname, "r")) == NULL) return NULL; if ((rval = users_alloc()) == NULL) @@ -150,10 +155,12 @@ FILE *fp; char fname[1024], *home, *cpasswd; initialise(); - if ((home = getenv("MAXSCALE_HOME")) != NULL) + if ((home = getenv("MAXSCALE_HOME")) != NULL && strlen(home) < 1024){ sprintf(fname, "%s/etc/passwd", home); - else + } + else{ sprintf(fname, "/usr/local/skysql/MaxScale/etc/passwd"); + } if (users == NULL) { @@ -246,7 +253,7 @@ char* admin_remove_user( /** * Open passwd file and remove user from the file. */ - if ((home = getenv("MAXSCALE_HOME")) != NULL) { + if ((home = getenv("MAXSCALE_HOME")) != NULL && strlen(home) < 1024) { sprintf(fname, "%s/etc/passwd", home); sprintf(fname_tmp, "%s/etc/passwd_tmp", home); } else { @@ -310,7 +317,12 @@ char* admin_remove_user( * Unmatching lines are copied to tmp file. */ if (strncmp(uname, fusr, strlen(uname)+1) != 0) { - fsetpos(fp, &rpos); /** one step back */ + if(fsetpos(fp, &rpos) != 0){ /** one step back */ + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : Unable to set stream position. "))); + + } fgets(line, LINELEN, fp); fputs(line, fp_tmp); } diff --git a/server/core/atomic.c b/server/core/atomic.c index f8dd4a07f..c1bd5b244 100644 --- a/server/core/atomic.c +++ b/server/core/atomic.c @@ -1,5 +1,5 @@ /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -13,7 +13,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 */ /** @@ -29,23 +29,32 @@ */ /** - * Implementation of an atomic add operation for the X86 processor. + * Implementation of an atomic add operation for the GCC environment, or the + * X86 processor. If we are working within GNU C then we can use the GCC + * atomic add built in function, which is portable across platforms that + * implement GCC. Otherwise, this function currently supports only X86 + * architecture (without further development). + * * Adds a value to the contents of a location pointed to by the first parameter. - * The add operation is atomic and the return value is the value stored in the location - * prior to the operation. The number that is added may be signed, therefore atomic_subtract - * is merely an atomic add with a negative value. + * The add operation is atomic and the return value is the value stored in the + * location prior to the operation. The number that is added may be signed, + * therefore atomic_subtract is merely an atomic add with a negative value. * * @param variable Pointer the the variable to add to * @param value Value to be added - * @return Pointer to the value of variable before the add occured + * @return The value of variable before the add occurred */ int atomic_add(int *variable, int value) { +#ifdef __GNUC__ + return (int) __sync_fetch_and_add (variable, value); +#else asm volatile( "lock; xaddl %%eax, %2;" :"=a" (value) : "a" (value), "m" (*variable) : "memory" ); return value; +#endif } diff --git a/server/core/buffer.c b/server/core/buffer.c index 290da6bde..6f9a162be 100644 --- a/server/core/buffer.c +++ b/server/core/buffer.c @@ -1,5 +1,5 @@ /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -13,11 +13,11 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2014 */ /** - * @file buffer.h - The Gateway buffer management functions + * @file buffer.h - The MaxScale buffer management functions * * The buffer management is based on the principle of a linked list * of variable size buffer, the intention beign to allow longer @@ -42,6 +42,15 @@ #include #include #include +#include +#include +#include +#include + +/** Defined in log_manager.cc */ +extern int lm_enabled_logfiles_bitmask; +extern size_t log_ses_count[]; +extern __thread log_info_t tls_log_info; static buffer_object_t* gwbuf_remove_buffer_object( GWBUF* buf, @@ -65,29 +74,32 @@ gwbuf_alloc(unsigned int size) GWBUF *rval; SHARED_BUF *sbuf; - // Allocate the buffer header + /* Allocate the buffer header */ if ((rval = (GWBUF *)malloc(sizeof(GWBUF))) == NULL) { - return NULL; + goto retblock;; } - // Allocate the shared data buffer + /* Allocate the shared data buffer */ if ((sbuf = (SHARED_BUF *)malloc(sizeof(SHARED_BUF))) == NULL) { free(rval); - return NULL; + rval = NULL; + goto retblock; } - // Allocate the space for the actual data + /* Allocate the space for the actual data */ if ((sbuf->data = (unsigned char *)malloc(size)) == NULL) { + ss_dassert(sbuf->data != NULL); free(rval); free(sbuf); - return NULL; + rval = NULL; + goto retblock; } spinlock_init(&rval->gwbuf_lock); rval->start = sbuf->data; - rval->end = rval->start + size; + rval->end = (void *)((char *)rval->start+size); sbuf->refcount = 1; rval->sbuf = sbuf; rval->next = NULL; @@ -98,6 +110,14 @@ SHARED_BUF *sbuf; rval->gwbuf_info = GWBUF_INFO_NONE; rval->gwbuf_bufobj = NULL; CHK_GWBUF(rval); +retblock: + if (rval == NULL) + { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : Memory allocation failed due to %s.", + strerror(errno)))); + } return rval; } @@ -159,8 +179,13 @@ gwbuf_clone(GWBUF *buf) { GWBUF *rval; - if ((rval = (GWBUF *)malloc(sizeof(GWBUF))) == NULL) + if ((rval = (GWBUF *)calloc(1,sizeof(GWBUF))) == NULL) { + ss_dassert(rval != NULL); + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : Memory allocation failed due to %s.", + strerror(errno)))); return NULL; } @@ -169,16 +194,43 @@ GWBUF *rval; rval->start = buf->start; rval->end = buf->end; rval->gwbuf_type = buf->gwbuf_type; - rval->properties = NULL; - rval->hint = NULL; rval->gwbuf_info = buf->gwbuf_info; rval->gwbuf_bufobj = buf->gwbuf_bufobj; - rval->next = NULL; rval->tail = rval; CHK_GWBUF(rval); return rval; } +/** + * Clone whole GWBUF list instead of single buffer. + * + * @param buf head of the list to be cloned till the tail of it + * + * @return head of the cloned list or NULL if the list was empty. + */ +GWBUF* gwbuf_clone_all( + GWBUF* buf) +{ + GWBUF* rval; + GWBUF* clonebuf; + + if (buf == NULL) + { + return NULL; + } + /** Store the head of the list to rval. */ + clonebuf = gwbuf_clone(buf); + rval = clonebuf; + + while (buf->next) + { + buf = buf->next; + clonebuf->next = gwbuf_clone(buf); + clonebuf = clonebuf->next; + } + return rval; +} + GWBUF *gwbuf_clone_portion( GWBUF *buf, @@ -192,13 +244,18 @@ GWBUF *gwbuf_clone_portion( if ((clonebuf = (GWBUF *)malloc(sizeof(GWBUF))) == NULL) { + ss_dassert(clonebuf != NULL); + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : Memory allocation failed due to %s.", + strerror(errno)))); return NULL; } atomic_add(&buf->sbuf->refcount, 1); clonebuf->sbuf = buf->sbuf; clonebuf->gwbuf_type = buf->gwbuf_type; /*< clone info bits too */ - clonebuf->start = (void *)((char*)buf->start)+start_offset; - clonebuf->end = (void *)((char *)clonebuf->start)+length; + clonebuf->start = (void *)((char*)buf->start+start_offset); + clonebuf->end = (void *)((char *)clonebuf->start+length); clonebuf->gwbuf_type = buf->gwbuf_type; /*< clone the type for now */ clonebuf->properties = NULL; clonebuf->hint = NULL; @@ -277,8 +334,6 @@ return_clonebuf: GWBUF * gwbuf_append(GWBUF *head, GWBUF *tail) { -GWBUF *ptr = head; - if (!head) return tail; CHK_GWBUF(head); @@ -311,7 +366,7 @@ GWBUF *rval = head; CHK_GWBUF(head); GWBUF_CONSUME(head, length); - CHK_GWBUF(head); + CHK_GWBUF(head); if (GWBUF_EMPTY(head)) { @@ -370,11 +425,36 @@ gwbuf_trim(GWBUF *buf, unsigned int n_bytes) gwbuf_consume(buf, GWBUF_LENGTH(buf)); return NULL; } - buf->end -= n_bytes; + buf->end = (void *)((char *)buf->end - n_bytes); return buf; } +/** + * Trim bytes from the end of a GWBUF structure that may be the first + * in a list. If the buffer has n_bytes or less then it will be freed and + * the next buffer in the list will be returned, or if none, NULL. + * + * @param head The buffer to trim + * @param n_bytes The number of bytes to trim off + * @return The buffer chain or NULL if buffer chain now empty + */ +GWBUF * +gwbuf_rtrim(GWBUF *head, unsigned int n_bytes) +{ +GWBUF *rval = head; + CHK_GWBUF(head); + GWBUF_RTRIM(head, n_bytes); + CHK_GWBUF(head); + + if (GWBUF_EMPTY(head)) + { + rval = head->next; + gwbuf_free(head); + } + return rval; +} + /** * Set given type to all buffers on the list. * * @@ -413,6 +493,16 @@ void gwbuf_add_buffer_object( CHK_GWBUF(buf); newb = (buffer_object_t *)malloc(sizeof(buffer_object_t)); + ss_dassert(newb != NULL); + + if (newb == NULL) + { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : Memory allocation failed due to %s.", + strerror(errno)))); + return; + } newb->bo_id = id; newb->bo_data = data; newb->bo_donefun_fp = donefun_fp; @@ -457,8 +547,10 @@ void* gwbuf_get_buffer_object_data( } /** Unlock */ spinlock_release(&buf->gwbuf_lock); - - return bo->bo_data; + if(bo){ + return bo->bo_data; + } + return NULL; } /** @@ -493,8 +585,15 @@ gwbuf_add_property(GWBUF *buf, char *name, char *value) BUF_PROPERTY *prop; if ((prop = malloc(sizeof(BUF_PROPERTY))) == NULL) + { + ss_dassert(prop != NULL); + + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : Memory allocation failed due to %s.", + strerror(errno)))); return 0; - + } prop->name = strdup(name); prop->value = strdup(value); spinlock_acquire(&buf->gwbuf_lock); @@ -544,7 +643,10 @@ int len; if ((newbuf = gwbuf_alloc(gwbuf_length(orig))) != NULL) { + newbuf->gwbuf_type = orig->gwbuf_type; + newbuf->hint = hint_dup(orig->hint); ptr = GWBUF_DATA(newbuf); + while (orig) { len = GWBUF_LENGTH(orig); diff --git a/server/core/config.c b/server/core/config.c index 995e0e0f4..a82827257 100644 --- a/server/core/config.c +++ b/server/core/config.c @@ -1,5 +1,5 @@ /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -13,7 +13,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 */ /** @@ -38,9 +38,12 @@ * 09/09/14 Massimiliano Pinto Added localhost_match_wildcard_host parameter * 12/09/14 Mark Riddoch Addition of checks on servers list and * internal router suppression of messages + * 30/10/14 Massimiliano Pinto Added disable_master_failback parameter + * 07/11/14 Massimiliano Pinto Addition of monitor timeouts for connect/read/write * * @endverbatim */ +#include #include #include #include @@ -55,8 +58,12 @@ #include #include -extern int lm_enabled_logfiles_bitmask; +/** Defined in log_manager.cc */ +extern int lm_enabled_logfiles_bitmask; +extern size_t log_ses_count[]; +extern __thread log_info_t tls_log_info; +extern int setipaddress(struct in_addr *, char *); static int process_config_context(CONFIG_CONTEXT *); static int process_config_update(CONFIG_CONTEXT *); static void free_config_context(CONFIG_CONTEXT *); @@ -131,7 +138,7 @@ CONFIG_PARAMETER *param, *p1; ptr->element = NULL; cntxt->next = ptr; } - /* Check to see if the paramter already exists for the section */ + /* Check to see if the parameter already exists for the section */ p1 = ptr->parameters; while (p1) { @@ -460,6 +467,7 @@ int error_count = 0; if (!succp) { + if(param){ LOGIF(LM, (skygw_log_write( LOGFILE_MESSAGE, "* Warning : invalid value type " @@ -469,6 +477,12 @@ int error_count = 0; ((SERVICE*)obj->element)->name, param->name, param->value))); + }else{ + LOGIF(LE, (skygw_log_write( + LOGFILE_ERROR, + "Error : parameter was NULL"))); + + } } } } /*< if (rw_split) */ @@ -576,11 +590,12 @@ int error_count = 0; } if (obj->element && options) { - char *s = strtok(options, ","); + char *lasts; + char *s = strtok_r(options, ",", &lasts); while (s) { filterAddOption(obj->element, s); - s = strtok(NULL, ","); + s = strtok_r(NULL, ",", &lasts); } } if (obj->element) @@ -626,7 +641,8 @@ int error_count = 0; router = config_get_value(obj->parameters, "router"); if (servers && obj->element) { - char *s = strtok(servers, ","); + char *lasts; + char *s = strtok_r(servers, ",", &lasts); while (s) { CONFIG_CONTEXT *obj1 = context; @@ -653,7 +669,7 @@ int error_count = 0; "service '%s'.", s, obj->object))); } - s = strtok(NULL, ","); + s = strtok_r(NULL, ",", &lasts); } } else if (servers == NULL && internalService(router) == 0) @@ -667,11 +683,12 @@ int error_count = 0; } if (roptions && obj->element) { - char *s = strtok(roptions, ","); + char *lasts; + char *s = strtok_r(roptions, ",", &lasts); while (s) { serviceAddRouterOption(obj->element, s); - s = strtok(NULL, ","); + s = strtok_r(NULL, ",", &lasts); } } if (filters && obj->element) @@ -764,6 +781,10 @@ int error_count = 0; unsigned long interval = 0; int replication_heartbeat = 0; int detect_stale_master = 0; + int disable_master_failback = 0; + int connect_timeout = 0; + int read_timeout = 0; + int write_timeout = 0; module = config_get_value(obj->parameters, "module"); servers = config_get_value(obj->parameters, "servers"); @@ -781,12 +802,26 @@ int error_count = 0; detect_stale_master = atoi(config_get_value(obj->parameters, "detect_stale_master")); } + if (config_get_value(obj->parameters, "disable_master_failback")) { + disable_master_failback = atoi(config_get_value(obj->parameters, "disable_master_failback")); + } + + if (config_get_value(obj->parameters, "backend_connect_timeout")) { + connect_timeout = atoi(config_get_value(obj->parameters, "backend_connect_timeout")); + } + if (config_get_value(obj->parameters, "backend_read_timeout")) { + read_timeout = atoi(config_get_value(obj->parameters, "backend_read_timeout")); + } + if (config_get_value(obj->parameters, "backend_write_timeout")) { + write_timeout = atoi(config_get_value(obj->parameters, "backend_write_timeout")); + } + if (module) { obj->element = monitor_alloc(obj->object, module); if (servers && obj->element) { - char *s; + char *s, *lasts; /* if id is not set, compute it now with pid only */ if (gateway.id == 0) { @@ -808,15 +843,27 @@ int error_count = 0; if(detect_stale_master == 1) monitorDetectStaleMaster(obj->element, detect_stale_master); + /* disable master failback */ + if(disable_master_failback == 1) + monitorDisableMasterFailback(obj->element, disable_master_failback); + + /* set timeouts */ + if (connect_timeout > 0) + monitorSetNetworkTimeout(obj->element, MONITOR_CONNECT_TIMEOUT, connect_timeout); + if (read_timeout > 0) + monitorSetNetworkTimeout(obj->element, MONITOR_READ_TIMEOUT, read_timeout); + if (write_timeout > 0) + monitorSetNetworkTimeout(obj->element, MONITOR_WRITE_TIMEOUT, write_timeout); + /* get the servers to monitor */ - s = strtok(servers, ","); + s = strtok_r(servers, ",", &lasts); while (s) { CONFIG_CONTEXT *obj1 = context; int found = 0; while (obj1) { - if (strcmp(s, obj1->object) == 0 && + if (strcmp(trim(s), obj1->object) == 0 && obj->element && obj1->element) { found = 1; @@ -836,7 +883,7 @@ int error_count = 0; "monitor '%s'.", s, obj->object))); - s = strtok(NULL, ","); + s = strtok_r(NULL, ",", &lasts); } } if (obj->element && user && passwd) @@ -1106,6 +1153,31 @@ config_threadcount() return gateway.n_threads; } +/** + * Return the number of non-blocking polls to be done before a blocking poll + * is issued. + * + * @return The number of blocking poll calls to make before a blocking call + */ +unsigned int +config_nbpolls() +{ + return gateway.n_nbpoll; +} + +/** + * Return the configured number of milliseconds for which we wait when we do + * a blocking poll call. + * + * @return The number of milliseconds to sleep in a blocking poll call + */ +unsigned int +config_pollsleep() +{ + return gateway.pollsleep; +} + + static struct { char *logname; logfile_id_t logfile; @@ -1126,9 +1198,20 @@ static int handle_global_item(const char *name, const char *value) { int i; - if (strcmp(name, "threads") == 0) { + if (strcmp(name, "threads") == 0) + { gateway.n_threads = atoi(value); - } else { + } + else if (strcmp(name, "non_blocking_polls") == 0) + { + gateway.n_nbpoll = atoi(value); + } + else if (strcmp(name, "poll_sleep") == 0) + { + gateway.pollsleep = atoi(value); + } + else + { for (i = 0; lognames[i].logname; i++) { if (strcasecmp(name, lognames[i].logname) == 0) @@ -1150,6 +1233,8 @@ static void global_defaults() { gateway.n_threads = 1; + gateway.n_nbpoll = DEFAULT_NBPOLLS; + gateway.pollsleep = DEFAULT_POLLSLEEP; if (version_string != NULL) gateway.version_string = strdup(version_string); else @@ -1260,7 +1345,7 @@ SERVER *server; (PERCENT_TYPE|COUNT_TYPE)); } - if (!succp) + if (!succp && param != NULL) { LOGIF(LM, (skygw_log_write( LOGFILE_MESSAGE, @@ -1305,6 +1390,7 @@ SERVER *server; if (!succp) { + if(param){ LOGIF(LM, (skygw_log_write( LOGFILE_MESSAGE, "* Warning : invalid value type " @@ -1314,6 +1400,11 @@ SERVER *server; ((SERVICE*)obj->element)->name, param->name, param->value))); + }else{ + LOGIF(LE, (skygw_log_write( + LOGFILE_ERROR, + "Error : parameter was NULL"))); + } } } } @@ -1346,11 +1437,11 @@ SERVER *server; user, auth); if (enable_root_user) - serviceEnableRootUser(service, atoi(enable_root_user)); + serviceEnableRootUser(obj->element, atoi(enable_root_user)); if (allow_localhost_match_wildcard_host) serviceEnableLocalhostMatchWildcardHost( - service, + obj->element, atoi(allow_localhost_match_wildcard_host)); } } @@ -1441,14 +1532,15 @@ SERVER *server; filters = config_get_value(obj->parameters, "filters"); if (servers && obj->element) { - char *s = strtok(servers, ","); + char *lasts; + char *s = strtok_r(servers, ",", &lasts); while (s) { CONFIG_CONTEXT *obj1 = context; int found = 0; while (obj1) { - if (strcmp(s, obj1->object) == 0 && + if (strcmp(trim(s), obj1->object) == 0 && obj->element && obj1->element) { found = 1; @@ -1471,17 +1563,18 @@ SERVER *server; "service '%s'.", s, obj->object))); } - s = strtok(NULL, ","); + s = strtok_r(NULL, ",", &lasts); } } if (roptions && obj->element) { - char *s = strtok(roptions, ","); + char *lasts; + char *s = strtok_r(roptions, ",", &lasts); serviceClearRouterOptions(obj->element); while (s) { serviceAddRouterOption(obj->element, s); - s = strtok(NULL, ","); + s = strtok_r(NULL, ",", &lasts); } } if (filters && obj->element) @@ -1579,17 +1672,6 @@ static char *service_params[] = NULL }; -static char *server_params[] = - { - "type", - "address", - "port", - "protocol", - "monitorpw", - "monitoruser", - NULL - }; - static char *listener_params[] = { "type", @@ -1611,6 +1693,10 @@ static char *monitor_params[] = "monitor_interval", "detect_replication_lag", "detect_stale_master", + "disable_master_failback", + "backend_connect_timeout", + "backend_read_timeout", + "backend_write_timeout", NULL }; /** @@ -1728,7 +1814,7 @@ config_truth_value(char *str) { return 1; } - if (strcasecmp(str, "flase") == 0 || strcasecmp(str, "off") == 0) + if (strcasecmp(str, "false") == 0 || strcasecmp(str, "off") == 0) { return 0; } diff --git a/server/core/dbusers.c b/server/core/dbusers.c index 4f8de392b..4a4174699 100644 --- a/server/core/dbusers.c +++ b/server/core/dbusers.c @@ -1,5 +1,5 @@ /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -13,7 +13,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 */ /** @@ -28,11 +28,17 @@ * 06/02/2014 Massimiliano Pinto Mysql user root selected based on configuration flag * 26/02/2014 Massimiliano Pinto Addd: replace_mysql_users() routine may replace users' table based on a checksum * 28/02/2014 Massimiliano Pinto Added Mysql user@host authentication + * 29/09/2014 Massimiliano Pinto Added Mysql user@host authentication with wildcard in IPv4 hosts: + * x.y.z.%, x.y.%.%, x.%.%.% + * 03/10/14 Massimiliano Pinto Added netmask to user@host authentication for wildcard in IPv4 hosts + * 13/10/14 Massimiliano Pinto Added (user@host)@db authentication + * 04/12/14 Massimiliano Pinto Added support for IPv$ wildcard hosts: a.%, a.%.% and a.b.% * * @endverbatim */ #include +#include #include #include @@ -43,20 +49,71 @@ #include #include #include +#include + + +#define DEFAULT_CONNECT_TIMEOUT 3 +#define DEFAULT_READ_TIMEOUT 1 +#define DEFAULT_WRITE_TIMEOUT 2 #define USERS_QUERY_NO_ROOT " AND user NOT IN ('root')" -#define LOAD_MYSQL_USERS_QUERY "SELECT user, host, password, concat(user,host,password) AS userdata FROM mysql.user WHERE user IS NOT NULL AND user <> ''" + +#if 0 +# define LOAD_MYSQL_USERS_QUERY \ + "SELECT DISTINCT \ + user.user AS user, \ + user.host AS host, \ + user.password AS password, \ + concat(user.user,user.host,user.password, \ + IF((user.Select_priv+0)||find_in_set('Select',Coalesce(tp.Table_priv,0)),'Y','N') , \ + COALESCE( db.db,tp.db, '')) AS userdata, \ + user.Select_priv AS anydb, \ + COALESCE( db.db,tp.db, NULL) AS db \ + FROM \ + mysql.user LEFT JOIN \ + mysql.db ON user.user=db.user AND user.host=db.host LEFT JOIN \ + mysql.tables_priv tp ON user.user=tp.user AND user.host=tp.host \ + WHERE user.user IS NOT NULL AND user.user <> ''" + +#else +# define LOAD_MYSQL_USERS_QUERY "SELECT user, host, password, concat(user,host,password,Select_priv) AS userdata, Select_priv AS anydb FROM mysql.user WHERE user IS NOT NULL AND user <> ''" +#endif #define MYSQL_USERS_COUNT "SELECT COUNT(1) AS nusers FROM mysql.user" -extern int lm_enabled_logfiles_bitmask; +#define MYSQL_USERS_WITH_DB_ORDER " ORDER BY host DESC" +#define LOAD_MYSQL_USERS_WITH_DB_QUERY "SELECT user.user AS user,user.host AS host,user.password AS password,concat(user.user,user.host,user.password,user.Select_priv,IFNULL(db,'')) AS userdata, user.Select_priv AS anydb,db.db AS db FROM mysql.user LEFT JOIN mysql.db ON user.user=db.user AND user.host=db.host WHERE user.user IS NOT NULL AND user.user <> ''" MYSQL_USERS_WITH_DB_ORDER -static int getUsers(SERVICE *service, struct users *users); +#define MYSQL_USERS_WITH_DB_COUNT "SELECT COUNT(1) AS nusers_db FROM (" LOAD_MYSQL_USERS_WITH_DB_QUERY ") AS tbl_count" + +#define LOAD_MYSQL_USERS_WITH_DB_QUERY_NO_ROOT "SELECT * FROM (" LOAD_MYSQL_USERS_WITH_DB_QUERY ") AS t1 WHERE user NOT IN ('root')" MYSQL_USERS_WITH_DB_ORDER + +#define LOAD_MYSQL_DATABASE_NAMES "SELECT * FROM ( (SELECT COUNT(1) AS ndbs FROM INFORMATION_SCHEMA.SCHEMATA) AS tbl1, (SELECT GRANTEE,PRIVILEGE_TYPE from INFORMATION_SCHEMA.USER_PRIVILEGES WHERE privilege_type='SHOW DATABASES' AND REPLACE(GRANTEE, \"\'\",\"\")=CURRENT_USER()) AS tbl2)" + +/** Defined in log_manager.cc */ +extern int lm_enabled_logfiles_bitmask; +extern size_t log_ses_count[]; +extern __thread log_info_t tls_log_info; + +static int getUsers(SERVICE *service, USERS *users); static int uh_cmpfun( void* v1, void* v2); static void *uh_keydup(void* key); static void uh_keyfree( void* key); static int uh_hfun( void* key); char *mysql_users_fetch(USERS *users, MYSQL_USER_HOST *key); char *mysql_format_user_entry(void *data); +int add_mysql_users_with_host_ipv4(USERS *users, char *user, char *host, char *passwd, char *anydb, char *db); +static int getDatabases(SERVICE *, MYSQL *); +HASHTABLE *resource_alloc(); +void resource_free(HASHTABLE *resource); +void *resource_fetch(HASHTABLE *, char *); +int resource_add(HASHTABLE *, char *, char *); +int resource_hash(char *); +static int normalize_hostname(char *input_host, char *output_host); +static int gw_mysql_set_timeouts( + MYSQL* handle, + int read_timeout, + int write_timeout, + int connect_timeout); /** * Load the user/passwd form mysql.user table into the service users' hashtable @@ -82,16 +139,27 @@ int reload_mysql_users(SERVICE *service) { int i; -struct users *newusers, *oldusers; +USERS *newusers, *oldusers; +HASHTABLE *oldresources; if ((newusers = mysql_users_alloc()) == NULL) return 0; + + oldresources = service->resources; + i = getUsers(service, newusers); + spinlock_acquire(&service->spin); oldusers = service->users; + service->users = newusers; + spinlock_release(&service->spin); + + /* free the old table */ users_free(oldusers); + /* free old resources */ + resource_free(oldresources); return i; } @@ -108,15 +176,23 @@ int replace_mysql_users(SERVICE *service) { int i; -struct users *newusers, *oldusers; +USERS *newusers, *oldusers; +HASHTABLE *oldresources; if ((newusers = mysql_users_alloc()) == NULL) return -1; + oldresources = service->resources; + + /* load db users ad db grants */ i = getUsers(service, newusers); - if (i <= 0) + if (i <= 0) { + users_free(newusers); + /* restore resources */ + service->resources = oldresources; return i; + } spinlock_acquire(&service->spin); oldusers = service->users; @@ -128,6 +204,7 @@ struct users *newusers, *oldusers; LOGFILE_DEBUG, "%lu [replace_mysql_users] users' tables not switched, checksum is the same", pthread_self()))); + /* free the new table */ users_free(newusers); i = 0; @@ -140,53 +217,258 @@ struct users *newusers, *oldusers; service->users = newusers; } + /* free old resources */ + resource_free(oldresources); + spinlock_release(&service->spin); - if (i) + if (i) { + /* free the old table */ users_free(oldusers); + } return i; } + +/** + * Add a new MySQL user with host, password and netmask into the service users table + * + * The netmask values are: + * 0 for any, 32 for single IPv4 + * 24 for a class C from a.b.c.%, 16 for a Class B from a.b.%.% and 8 for a Class A from a.%.%.% + * + * @param users The users table + * @param user The user name + * @param host The host to add, with possible wildcards + * @param passwd The sha1(sha1(passoword)) to add + * @return 1 on success, 0 on failure + */ + +int add_mysql_users_with_host_ipv4(USERS *users, char *user, char *host, char *passwd, char *anydb, char *db) { + struct sockaddr_in serv_addr; + MYSQL_USER_HOST key; + char ret_ip[400]=""; + int ret = 0; + + if (users == NULL || user == NULL || host == NULL) { + return ret; + } + + /* prepare the user@host data struct */ + memset(&serv_addr, 0, sizeof(serv_addr)); + memset(&key, '\0', sizeof(key)); + + /* set user */ + key.user = strdup(user); + + if(key.user == NULL) { + return ret; + } + + /* for anydb == Y key.resource is '\0' as set by memset */ + if (anydb == NULL) { + key.resource = NULL; + } else { + if (strcmp(anydb, "N") == 0) { + if (db != NULL) + key.resource = strdup(db); + else + key.resource = NULL; + } else { + key.resource = strdup(""); + } + } + + /* handle ANY, Class C,B,A */ + + /* ANY */ + if (strcmp(host, "%") == 0) { + strcpy(ret_ip, "0.0.0.0"); + key.netmask = 0; + } else { + /* hostname without % wildcards has netmask = 32 */ + key.netmask = normalize_hostname(host, ret_ip); + + if (key.netmask == -1) { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : strdup() failed in normalize_hostname for %s@%s", + user, + host))); + } + } + + /* fill IPv4 data struct */ + if (setipaddress(&serv_addr.sin_addr, ret_ip) && strlen(ret_ip)) { + + /* copy IPv4 data into key.ipv4 */ + memcpy(&key.ipv4, &serv_addr, sizeof(serv_addr)); + + /* if netmask < 32 there are % wildcards */ + if (key.netmask < 32) { + /* let's zero the last IP byte: a.b.c.0 we may have set above to 1*/ + key.ipv4.sin_addr.s_addr &= 0x00FFFFFF; + } + + /* add user@host as key and passwd as value in the MySQL users hash table */ + if (mysql_users_add(users, &key, passwd)) { + ret = 1; + } + } + + free(key.user); + if (key.resource) + free(key.resource); + + return ret; +} + +/** + * Load the database specific grants from mysql.db table into the service resources hashtable + * environment. + * + * @param service The current service + * @param users The users table into which to load the users + * @return -1 on any error or the number of users inserted (0 means no users at all) + */ +static int +getDatabases(SERVICE *service, MYSQL *con) +{ + MYSQL_ROW row; + MYSQL_RES *result = NULL; + char *service_user = NULL; + char *service_passwd = NULL; + int ndbs = 0; + + char *get_showdbs_priv_query = LOAD_MYSQL_DATABASE_NAMES; + + serviceGetUser(service, &service_user, &service_passwd); + + if (service_user == NULL || service_passwd == NULL) + return -1; + + if (mysql_query(con, get_showdbs_priv_query)) { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : Loading database names for service %s encountered " + "error: %s.", + service->name, + mysql_error(con)))); + return -1; + } + + result = mysql_store_result(con); + + if (result == NULL) { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : Loading database names for service %s encountered " + "error: %s.", + service->name, + mysql_error(con)))); + return -1; + } + + /* Result has only one row */ + row = mysql_fetch_row(result); + + if (row) { + ndbs = atoi(row[0]); + } else { + ndbs = 0; + + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "%s: Unable to load database grant information, MaxScale " + "authentication will proceed without including database " + "permissions. To correct this GRANT select permission " + "on msql.db to the user %s.", + service->name, service_user))); + } + + /* free resut set */ + mysql_free_result(result); + + if (!ndbs) { + /* return if no db names are available */ + return 0; + } + + if (mysql_query(con, "SHOW DATABASES")) { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : Loading database names for service %s encountered " + "error: %s.", + service->name, + mysql_error(con)))); + + return -1; + } + + result = mysql_store_result(con); + + if (result == NULL) { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : Loading database names for service %s encountered " + "error: %s.", + service->name, + mysql_error(con)))); + + return -1; + } + + /* Now populate service->resources hashatable with db names */ + service->resources = resource_alloc(); + + /* insert key and value "" */ + while ((row = mysql_fetch_row(result))) { + resource_add(service->resources, row[0], ""); + } + + mysql_free_result(result); + + return ndbs; +} + /** * Load the user/passwd form mysql.user table into the service users' hashtable * environment. * * @param service The current service * @param users The users table into which to load the users - * @return -1 on any error or the number of users inserted (0 means no users at all) + * @return -1 on any error or the number of users inserted + * (0 means no users at all) */ static int -getUsers(SERVICE *service, struct users *users) +getUsers(SERVICE *service, USERS *users) { - MYSQL *con = NULL; - MYSQL_ROW row; - MYSQL_RES *result = NULL; - int num_fields = 0; - char *service_user = NULL; - char *service_passwd = NULL; - char *dpwd; - int total_users = 0; - SERVER *server; - char *users_query; - unsigned char hash[SHA_DIGEST_LENGTH]=""; - char *users_data = NULL; - int nusers = 0; - int users_data_row_len = MYSQL_USER_MAXLEN + MYSQL_HOST_MAXLEN + MYSQL_PASSWORD_LEN; - struct sockaddr_in serv_addr; - MYSQL_USER_HOST key; - - /* enable_root for MySQL protocol module means load the root user credentials from backend databases */ - if(service->enable_root) { - users_query = LOAD_MYSQL_USERS_QUERY " ORDER BY HOST DESC"; - } else { - users_query = LOAD_MYSQL_USERS_QUERY USERS_QUERY_NO_ROOT " ORDER BY HOST DESC"; + MYSQL *con = NULL; + MYSQL_ROW row; + MYSQL_RES *result = NULL; + char *service_user = NULL; + char *service_passwd = NULL; + char *dpwd; + int total_users = 0; + SERVER_REF *server; + char *users_query; + unsigned char hash[SHA_DIGEST_LENGTH]=""; + char *users_data = NULL; + int nusers = 0; + int users_data_row_len = MYSQL_USER_MAXLEN + + MYSQL_HOST_MAXLEN + + MYSQL_PASSWORD_LEN + + sizeof(char) + + MYSQL_DATABASE_MAXLEN; + int dbnames = 0; + int db_grants = 0; + + if (serviceGetUser(service, &service_user, &service_passwd) == 0) + { + ss_dassert(service_passwd == NULL || service_user == NULL); + return -1; } - - serviceGetUser(service, &service_user, &service_passwd); - if (service_user == NULL || service_passwd == NULL) - return -1; - con = mysql_init(NULL); if (con == NULL) { @@ -196,13 +478,26 @@ getUsers(SERVICE *service, struct users *users) mysql_error(con)))); return -1; } + /** Set read, write and connect timeout values */ + if (gw_mysql_set_timeouts(con, + DEFAULT_READ_TIMEOUT, + DEFAULT_WRITE_TIMEOUT, + DEFAULT_CONNECT_TIMEOUT)) + { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : failed to set timeout values for backend " + "connection."))); + mysql_close(con); + return -1; + } if (mysql_options(con, MYSQL_OPT_USE_REMOTE_CONNECTION, NULL)) { LOGIF(LE, (skygw_log_write_flush( LOGFILE_ERROR, "Error : failed to set external connection. " - "It is needed for backend server connections. " - "Exiting."))); + "It is needed for backend server connections."))); + mysql_close(con); return -1; } /** @@ -210,55 +505,129 @@ getUsers(SERVICE *service, struct users *users) * out of databases * to try */ - server = service->databases; + server = service->dbref; dpwd = decryptPassword(service_passwd); - while (server != NULL && (mysql_real_connect(con, - server->name, - service_user, - dpwd, - NULL, - server->port, - NULL, - 0) == NULL)) - { - server = server->nextdb; + + /* Select a server with Master bit, if available */ + while (server != NULL && !(server->server->status & SERVER_MASTER)) { + server = server->next; } + + if (service->svc_do_shutdown) + { + free(dpwd); + mysql_close(con); + return -1; + } + + /* Try loading data from master server */ + if (server != NULL && + (mysql_real_connect(con, + server->server->name, service_user, + dpwd, + NULL, + server->server->port, + NULL, 0) != NULL)) + { + LOGIF(LD, (skygw_log_write_flush( + LOGFILE_DEBUG, + "Dbusers : Loading data from backend database with " + "Master role [%s:%i] for service [%s]", + server->server->name, + server->server->port, + service->name))); + } else { + /* load data from other servers via loop */ + server = service->dbref; + + while (!service->svc_do_shutdown && + server != NULL && + (mysql_real_connect(con, + server->server->name, + service_user, + dpwd, + NULL, + server->server->port, + NULL, + 0) == NULL)) + { + server = server->next; + } + + if (service->svc_do_shutdown) + { + free(dpwd); + mysql_close(con); + return -1; + } + + if (server != NULL) { + LOGIF(LD, (skygw_log_write_flush( + LOGFILE_DEBUG, + "Dbusers : Loading data from backend database " + "[%s:%i] for service [%s]", + server->server->name, + server->server->port, + service->name))); + } + } + free(dpwd); if (server == NULL) { LOGIF(LE, (skygw_log_write_flush( - LOGFILE_ERROR, - "Error : Unable to get user data from backend database " - "for service %s. Missing server information.", - service->name))); + LOGFILE_ERROR, + "Error : Unable to get user data from backend database " + "for service [%s]. Missing server information.", + service->name))); mysql_close(con); return -1; } - if (mysql_query(con, MYSQL_USERS_COUNT)) { - LOGIF(LE, (skygw_log_write_flush( - LOGFILE_ERROR, - "Error : Loading users for service %s encountered " - "error: %s.", - service->name, - mysql_error(con)))); - mysql_close(con); - return -1; + /** Count users. Start with users and db grants for users */ + if (mysql_query(con, MYSQL_USERS_WITH_DB_COUNT)) { + if (mysql_errno(con) != ER_TABLEACCESS_DENIED_ERROR) { + /* This is an error we cannot handle, return */ + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : Loading users for service [%s] encountered " + "error: [%s].", + service->name, + mysql_error(con)))); + mysql_close(con); + return -1; + } else { + /* + * We have got ER_TABLEACCESS_DENIED_ERROR + * try counting users from mysql.user without DB names. + */ + if (mysql_query(con, MYSQL_USERS_COUNT)) { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : Loading users for service [%s] encountered " + "error: [%s].", + service->name, + mysql_error(con)))); + mysql_close(con); + return -1; + } + } } + result = mysql_store_result(con); if (result == NULL) { LOGIF(LE, (skygw_log_write_flush( LOGFILE_ERROR, - "Error : Loading users for service %s encountered " - "error: %s.", + "Error : Loading users for service [%s] encountered " + "error: [%s].", service->name, mysql_error(con)))); mysql_close(con); return -1; } - num_fields = mysql_num_fields(result); + row = mysql_fetch_row(result); nusers = atoi(row[0]); @@ -274,15 +643,85 @@ getUsers(SERVICE *service, struct users *users) return -1; } + if(service->enable_root) { + /* enable_root for MySQL protocol module means load the root user credentials from backend databases */ + users_query = LOAD_MYSQL_USERS_WITH_DB_QUERY; + } else { + users_query = LOAD_MYSQL_USERS_WITH_DB_QUERY_NO_ROOT; + } + + /* send first the query that fetches users and db grants */ if (mysql_query(con, users_query)) { - LOGIF(LE, (skygw_log_write_flush( - LOGFILE_ERROR, - "Error : Loading users for service %s encountered " - "error: %s.", - service->name, - mysql_error(con)))); - mysql_close(con); - return -1; + /* + * An error occurred executing the query + * + * Check mysql_errno() against ER_TABLEACCESS_DENIED_ERROR) + */ + + if (1142 != mysql_errno(con)) { + /* This is an error we cannot handle, return */ + + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : Loading users with dbnames for service [%s] encountered " + "error: [%s], MySQL errno %i", + service->name, + mysql_error(con), + mysql_errno(con)))); + + mysql_close(con); + + return -1; + } else { + /* + * We have got ER_TABLEACCESS_DENIED_ERROR + * try loading users from mysql.user without DB names. + */ + + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "%s: Unable to load database grant information, MaxScale " + "authentication will proceed without including database " + "permissions. To correct this GRANT select permission " + "on msql.db to the user %s.", + service->name, service_user))); + + /* check for root user select */ + if(service->enable_root) { + users_query = LOAD_MYSQL_USERS_QUERY " ORDER BY HOST DESC"; + } else { + users_query = LOAD_MYSQL_USERS_QUERY USERS_QUERY_NO_ROOT " ORDER BY HOST DESC"; + } + + if (mysql_query(con, users_query)) { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : Loading users for service [%s] encountered " + "error: [%s], code %i", + service->name, + mysql_error(con), + mysql_errno(con)))); + + mysql_close(con); + + return -1; + } + + /* users successfully loaded but without db grants */ + + LOGIF(LM, (skygw_log_write_flush( + LOGFILE_MESSAGE, + "Loading users from [mysql.user] without access to [mysql.db] for " + "service [%s]. MaxScale Authentication with DBname on connect " + "will not consider database grants.", + service->name))); + } + } else { + /* + * users successfully loaded with db grants. + */ + + db_grants = 1; } result = mysql_store_result(con); @@ -294,83 +733,132 @@ getUsers(SERVICE *service, struct users *users) "error: %s.", service->name, mysql_error(con)))); + + mysql_free_result(result); mysql_close(con); + return -1; } - num_fields = mysql_num_fields(result); - + users_data = (char *)calloc(nusers, (users_data_row_len * sizeof(char)) + 1); - if(users_data == NULL) - return -1; + if (users_data == NULL) { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : Memory allocation for user data failed due to " + "%d, %s.", + errno, + strerror(errno)))); + mysql_free_result(result); + mysql_close(con); + + return -1; + } + + if (db_grants) { + /* load all mysql database names */ + dbnames = getDatabases(service, con); + + LOGIF(LD, (skygw_log_write( + LOGFILE_DEBUG, + "Loaded %d MySQL Database Names for service [%s]", + dbnames, + service->name))); + } else { + service->resources = NULL; + } + + while ((row = mysql_fetch_row(result))) { - while ((row = mysql_fetch_row(result))) { /** - * Four fields should be returned. - * user and passwd+1 (escaping the first byte that is '*') are - * added to hashtable. + * Up to six fields could be returned. + * user,host,passwd,concat(),anydb,db + * passwd+1 (escaping the first byte that is '*') */ - char ret_ip[INET_ADDRSTRLEN + 1]=""; - const char *rc; + int rc = 0; + char *password = NULL; - /* prepare the user@host data struct */ - memset(&serv_addr, 0, sizeof(serv_addr)); - memset(&key, 0, sizeof(key)); - - /* if host == '%', 0 is passed */ - if (setipaddress(&serv_addr.sin_addr, strcmp(row[1], "%") ? row[1] : "0.0.0.0")) { - - key.user = strdup(row[0]); - - if(key.user == NULL) { + if (row[2] != NULL) { + /* detect mysql_old_password (pre 4.1 protocol) */ + if (strlen(row[2]) == 16) { LOGIF(LE, (skygw_log_write_flush( LOGFILE_ERROR, - "%lu [getUsers()] strdup() failed for user %s", - pthread_self(), - row[0]))); - + "%s: The user %s@%s has on old password in the " + "backend database. MaxScale does not support these " + "old passwords. This user will not be able to connect " + "via MaxScale. Update the users password to correct " + "this.", + service->name, + row[0], + row[1]))); continue; } - memcpy(&key.ipv4, &serv_addr, sizeof(serv_addr)); - - rc = inet_ntop(AF_INET, &(serv_addr).sin_addr, ret_ip, INET_ADDRSTRLEN); + if (strlen(row[2]) > 1) + password = row[2] +1; + else + password = row[2]; + } - /* add user@host as key and passwd as value in the MySQL users hash table */ - if (mysql_users_add(users, &key, strlen(row[2]) ? row[2]+1 : row[2])) { + /* + * add user@host and DB global priv and specificsa grant (if possible) + */ + + if (db_grants) { + /* we have dbgrants, store them */ + rc = add_mysql_users_with_host_ipv4(users, row[0], row[1], password, row[4], row[5]); + } else { + /* we don't have dbgrants, simply set ANY DB for the user */ + rc = add_mysql_users_with_host_ipv4(users, row[0], row[1], password, "Y", NULL); + } + + if (rc == 1) { + if (db_grants) { + char dbgrant[MYSQL_DATABASE_MAXLEN + 1]=""; + if (row[4] != NULL) { + if (strcmp(row[4], "Y")) + strcpy(dbgrant, "ANY"); + else { + if (row[5]) + strncpy(dbgrant, row[5], MYSQL_DATABASE_MAXLEN); + } + } + + if (!strlen(dbgrant)) + strcpy(dbgrant, "no db"); + + /* Log the user being added with its db grants */ + LOGIF(LD, (skygw_log_write_flush( + LOGFILE_DEBUG, + "%s: User %s@%s for database %s added to " + "service user table.", + service->name, + row[0], + row[1], + dbgrant))); + } else { + /* Log the user being added (without db grants) */ LOGIF(LD, (skygw_log_write_flush( LOGFILE_DEBUG, - "%lu [mysql_users_add()] Added user %s@%s(%s)", - pthread_self(), - row[0], - row[1], - rc == NULL ? "NULL" : ret_ip))); - - /* Append data in the memory area for SHA1 digest */ - strncat(users_data, row[3], users_data_row_len); - - total_users++; - } else { - LOGIF(LE, (skygw_log_write_flush( - LOGFILE_ERROR, - "%lu [mysql_users_add()] Failed adding user %s@%s(%s)", - pthread_self(), - row[0], - row[1], - rc == NULL ? "NULL" : ret_ip))); + "%s: User %s@%s added to service user table.", + service->name, + row[0], + row[1]))); } - free(key.user); + /* Append data in the memory area for SHA1 digest */ + strncat(users_data, row[3], users_data_row_len); + total_users++; } else { - /* setipaddress() failed, skip user add and log this*/ LOGIF(LE, (skygw_log_write_flush( LOGFILE_ERROR, - "%lu [getUsers()] setipaddress failed: user %s@%s not added", - pthread_self(), + "Warning: Failed to add user %s@%s for service [%s]. " + "This user will be unavailable via MaxScale.", row[0], - row[1]))); + row[1], + service->name))); } } @@ -449,7 +937,7 @@ char *mysql_users_fetch(USERS *users, MYSQL_USER_HOST *key) { if (key == NULL) return NULL; atomic_add(&users->stats.n_fetches, 1); - return hashtable_fetch(users->data, key); + return hashtable_fetch(users->data, key); } /** @@ -475,7 +963,7 @@ static int uh_hfun( void* key) { * Currently only IPv4 addresses are supported * * @param key1 The key value, i.e. username@host (IPv4) - * @param key1 The key value, i.e. username@host (IPv4) + * @param key2 The key value, i.e. username@host (IPv4) * @return The compare value */ @@ -483,11 +971,34 @@ static int uh_cmpfun( void* v1, void* v2) { MYSQL_USER_HOST *hu1 = (MYSQL_USER_HOST *) v1; MYSQL_USER_HOST *hu2 = (MYSQL_USER_HOST *) v2; - if (v1 == NULL || v2 == NULL || hu1 == NULL || hu2 == NULL || hu1->user == NULL || hu2->user == NULL) + if (v1 == NULL || v2 == NULL) return 0; - if (strcmp(hu1->user, hu2->user) == 0 && (hu1->ipv4.sin_addr.s_addr == hu2->ipv4.sin_addr.s_addr)) { + if (hu1->user == NULL || hu2->user == NULL) return 0; + + if (strcmp(hu1->user, hu2->user) == 0 && (hu1->ipv4.sin_addr.s_addr == hu2->ipv4.sin_addr.s_addr) && (hu1->netmask >= hu2->netmask)) { + + /* if no database name was passed, auth is ok */ + if (hu1->resource == NULL || (hu1->resource && !strlen(hu1->resource))) { + return 0; + } else { + /* (1) check for no database grants at all and deny auth */ + if (hu2->resource == NULL) { + return 1; + } + /* (2) check for ANY database grant and allow auth */ + if (!strlen(hu2->resource)) { + return 0; + } + /* (3) check for database name specific grant and allow auth */ + if (hu1->resource && hu2->resource && strcmp(hu1->resource,hu2->resource) == 0) { + return 0; + } + + /* no matches, deny auth */ + return 1; + } } else { return 1; } @@ -504,15 +1015,25 @@ static void *uh_keydup(void* key) { MYSQL_USER_HOST *current_key = (MYSQL_USER_HOST *)key; if (key == NULL || rval == NULL || current_key == NULL || current_key->user == NULL) { + if (rval) { + free(rval); + } + return NULL; } rval->user = strdup(current_key->user); - if (rval->user == NULL) + if (rval->user == NULL) { + free(rval); return NULL; + } memcpy(&rval->ipv4, ¤t_key->ipv4, sizeof(struct sockaddr_in)); + memcpy(&rval->netmask, ¤t_key->netmask, sizeof(int)); + + if (current_key->resource) + rval->resource = strdup(current_key->resource); return (void *) rval; } @@ -531,6 +1052,9 @@ static void uh_keyfree( void* key) { if (current_key && current_key->user) free(current_key->user); + if (current_key && current_key->resource) + free(current_key->resource); + free(key); } @@ -546,28 +1070,249 @@ char *mysql_format_user_entry(void *data) MYSQL_USER_HOST *entry; char *mysql_user; /* the returned user string is "USER" + "@" + "HOST" + '\0' */ - int mysql_user_len = MYSQL_USER_MAXLEN + 1 + INET_ADDRSTRLEN + 1; + int mysql_user_len = MYSQL_USER_MAXLEN + 1 + INET_ADDRSTRLEN + 10 + MYSQL_USER_MAXLEN + 1; if (data == NULL) return NULL; entry = (MYSQL_USER_HOST *) data; - if (entry == NULL) - return NULL; - mysql_user = (char *) calloc(mysql_user_len, sizeof(char)); if (mysql_user == NULL) return NULL; + + /* format user@host based on wildcards */ - if (entry->ipv4.sin_addr.s_addr == INADDR_ANY) { - snprintf(mysql_user, mysql_user_len, "%s@%%", entry->user); - } else { + if (entry->ipv4.sin_addr.s_addr == INADDR_ANY && entry->netmask == 0) { + snprintf(mysql_user, mysql_user_len-1, "%s@%%", entry->user); + } else if ( (entry->ipv4.sin_addr.s_addr & 0xFF000000) == 0 && entry->netmask == 24) { + snprintf(mysql_user, mysql_user_len-1, "%s@%i.%i.%i.%%", entry->user, entry->ipv4.sin_addr.s_addr & 0x000000FF, (entry->ipv4.sin_addr.s_addr & 0x0000FF00) / (256), (entry->ipv4.sin_addr.s_addr & 0x00FF0000) / (256 * 256)); + } else if ( (entry->ipv4.sin_addr.s_addr & 0xFFFF0000) == 0 && entry->netmask == 16) { + snprintf(mysql_user, mysql_user_len-1, "%s@%i.%i.%%.%%", entry->user, entry->ipv4.sin_addr.s_addr & 0x000000FF, (entry->ipv4.sin_addr.s_addr & 0x0000FF00) / (256)); + } else if ( (entry->ipv4.sin_addr.s_addr & 0xFFFFFF00) == 0 && entry->netmask == 8) { + snprintf(mysql_user, mysql_user_len-1, "%s@%i.%%.%%.%%", entry->user, entry->ipv4.sin_addr.s_addr & 0x000000FF); + } else if (entry->netmask == 32) { strncpy(mysql_user, entry->user, MYSQL_USER_MAXLEN); strcat(mysql_user, "@"); inet_ntop(AF_INET, &(entry->ipv4).sin_addr, mysql_user+strlen(mysql_user), INET_ADDRSTRLEN); + } else { + snprintf(mysql_user, MYSQL_USER_MAXLEN-5, "Err: %s", entry->user); + strcat(mysql_user, "@"); + inet_ntop(AF_INET, &(entry->ipv4).sin_addr, mysql_user+strlen(mysql_user), INET_ADDRSTRLEN); } return mysql_user; } + +/* + * The hash function we use for storing MySQL database names. + * + * @param key The key value + * @return The hash key + */ +int +resource_hash(char *key) +{ + return (*key + *(key + 1)); +} + +/** + * Remove the resources table + * + * @param resources The resources table to remove + */ +void +resource_free(HASHTABLE *resources) +{ + if (resources) { + hashtable_free(resources); + } +} + +/** + * Allocate a MySQL database names table + * + * @return The database names table + */ +HASHTABLE * +resource_alloc() +{ +HASHTABLE *resources; + + if ((resources = hashtable_alloc(10, resource_hash, strcmp)) == NULL) + { + return NULL; + } + + hashtable_memory_fns(resources, (HASHMEMORYFN)strdup, (HASHMEMORYFN)strdup, (HASHMEMORYFN)free, (HASHMEMORYFN)free); + + return resources; +} + +/** + * Add a new MySQL database name to the resources table. The resource name must be unique + * + * @param resources The resources table + * @param key The resource name + * @param value The value for resource (not used) + * @return The number of resources dded to the table + */ +int +resource_add(HASHTABLE *resources, char *key, char *value) +{ + return hashtable_add(resources, key, value); +} + +/** + * Fetch a particular database name from the resources table + * + * @param resources The MySQL database names table + * @param key The database name to fetch + * @return The database esists or NULL if not found + */ +void * +resource_fetch(HASHTABLE *resources, char *key) +{ + return hashtable_fetch(resources, key); +} + +/** + * Normalize hostname with % wildcards to a valid IP string. + * + * Valid input values: + * a.b.c.d, a.b.c.%, a.b.%.%, a.%.%.% + * Short formats a.% and a.%.% are both converted to a.%.%.% + * Short format a.b.% is converted to a.b.%.% + * + * Last host byte is set to 1, avoiding setipadress() failure + * + * @param input_host The hostname with possible % wildcards + * @param output_host The normalized hostname (buffer must be preallocated) + * @return The calculated netmask or -1 on failure + */ +static int normalize_hostname(char *input_host, char *output_host) +{ +int netmask, bytes, bits = 0, found_wildcard = 0; +char *p, *lasts, *tmp; +int useorig = 0; + + output_host[0] = 0; + bytes = 0; + + tmp = strdup(input_host); + + if (tmp == NULL) { + return -1; + } + + p = strtok_r(tmp, ".", &lasts); + while (p != NULL) + { + + if (strcmp(p, "%")) + { + if (! isdigit(*p)) + useorig = 1; + + strcat(output_host, p); + bits += 8; + } + else if (bytes == 3) + { + found_wildcard = 1; + strcat(output_host, "1"); + } + else + { + found_wildcard = 1; + strcat(output_host, "0"); + } + bytes++; + p = strtok_r(NULL, ".", &lasts); + if (p) + strcat(output_host, "."); + } + if (found_wildcard) + { + netmask = bits; + while (bytes++ < 4) + { + if (bytes == 4) + { + strcat(output_host, ".1"); + } + else + { + strcat(output_host, ".0"); + } + } + } + else + netmask = 32; + + if (useorig == 1) + { + netmask = 32; + strcpy(output_host, input_host); + } + + free(tmp); + + return netmask; +} + +/** + * Set read, write and connect timeout values for MySQL database connection. + * + * @param handle MySQL handle + * @param read_timeout Read timeout value in seconds + * @param write_timeout Write timeout value in seconds + * @param connect_timeout Connect timeout value in seconds + * + * @return 0 if succeed, 1 if failed + */ +static int gw_mysql_set_timeouts( + MYSQL* handle, + int read_timeout, + int write_timeout, + int connect_timeout) +{ + int rc; + + if ((rc = mysql_options(handle, + MYSQL_OPT_READ_TIMEOUT, + (void *)&read_timeout))) + { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : failed to set read timeout for backend " + "connection."))); + goto retblock; + } + + if ((rc = mysql_options(handle, + MYSQL_OPT_CONNECT_TIMEOUT, + (void *)&connect_timeout))) + { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : failed to set connect timeout for backend " + "connection."))); + goto retblock; + } + + if ((rc = mysql_options(handle, + MYSQL_OPT_WRITE_TIMEOUT, + (void *)&write_timeout))) + { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : failed to set write timeout for backend " + "connection."))); + goto retblock; + } + + retblock: + return rc; +} \ No newline at end of file diff --git a/server/core/dcb.c b/server/core/dcb.c index 0b2b038e7..0e9c8b594 100644 --- a/server/core/dcb.c +++ b/server/core/dcb.c @@ -1,5 +1,5 @@ /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -13,7 +13,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 */ /** @@ -32,7 +32,7 @@ * 12/06/13 Mark Riddoch Initial implementation * 21/06/13 Massimiliano Pinto free_dcb is used * 25/06/13 Massimiliano Pinto Added checks to session and router_session - * 28/06/13 Mark Riddoch Changed the free mechanism ti + * 28/06/13 Mark Riddoch Changed the free mechanism to * introduce a zombie state for the * dcb * 02/07/2013 Massimiliano Pinto Addition of delayqlock, delayq and @@ -71,9 +71,12 @@ #include #include -extern int lm_enabled_logfiles_bitmask; +/** Defined in log_manager.cc */ +extern int lm_enabled_logfiles_bitmask; +extern size_t log_ses_count[]; +extern __thread log_info_t tls_log_info; -static DCB *allDCBs = NULL; /* Diagnotics need a list of DCBs */ +static DCB *allDCBs = NULL; /* Diagnostics need a list of DCBs */ static DCB *zombies = NULL; static SPINLOCK dcbspin = SPINLOCK_INIT; static SPINLOCK zombiespin = SPINLOCK_INIT; @@ -85,12 +88,65 @@ static bool dcb_set_state_nomutex( dcb_state_t* old_state); static void dcb_call_callback(DCB *dcb, DCB_REASON reason); static DCB* dcb_get_next (DCB* dcb); -static int dcb_null_write(DCB *dcb, GWBUF *buf); -static int dcb_null_close(DCB *dcb); -static int dcb_null_auth(DCB *dcb, SERVER *server, SESSION *session, GWBUF *buf); +static int dcb_null_write(DCB *dcb, GWBUF *buf); +static int dcb_null_close(DCB *dcb); +static int dcb_null_auth(DCB *dcb, SERVER *server, SESSION *session, GWBUF *buf); +static int dcb_isvalid_nolock(DCB *dcb); + +size_t dcb_get_session_id( + DCB* dcb) +{ + size_t rval; + + if (dcb != NULL && dcb->session != NULL) + { + rval = dcb->session->ses_id; + } + else + { + rval = 0; + } + return rval; +} /** - * Return the pointer to the lsit of zombie DCB's + * Read log info from session through DCB and store values to memory locations + * passed as parameters. + * + * @param dcb DCB + * @param sesid location where session id is to be copied + * @param enabled_logs bit field indicating which log types are enabled for the + * session + * + *@return true if call arguments included memory addresses, false if any of the + *parameters was NULL. + */ +bool dcb_get_ses_log_info( + DCB* dcb, + size_t* sesid, + int* enabled_logs) +{ + bool succp; + + if (dcb == NULL || + dcb->session == NULL || + sesid == NULL || + enabled_logs == NULL) + { + succp = false; + } + else + { + *sesid = dcb->session->ses_id; + *enabled_logs = dcb->session->ses_enabled_logs; + succp = true; + } + + return succp; +} + +/** + * Return the pointer to the list of zombie DCB's * * @return Zombies DCB list */ @@ -120,8 +176,8 @@ DCB *rval; #if defined(SS_DEBUG) rval->dcb_chk_top = CHK_NUM_DCB; rval->dcb_chk_tail = CHK_NUM_DCB; - rval->dcb_errhandle_called = false; #endif + rval->dcb_errhandle_called = false; rval->dcb_role = role; spinlock_init(&rval->dcb_initlock); spinlock_init(&rval->writeqlock); @@ -134,7 +190,7 @@ DCB *rval; rval->readcheck = 0; rval->polloutbusy = 0; rval->writecheck = 0; - rval->fd = -1; + rval->fd = DCBFD_CLOSED; rval->evq.next = NULL; rval->evq.prev = NULL; @@ -150,6 +206,7 @@ DCB *rval; rval->low_water = 0; rval->next = NULL; rval->callbacks = NULL; + rval->data = NULL; rval->remote = NULL; rval->user = NULL; @@ -178,13 +235,15 @@ DCB *rval; void dcb_free(DCB *dcb) { - if (dcb->fd == -1) + if (dcb->fd == DCBFD_CLOSED) + { dcb_final_free(dcb); + } else { LOGIF(LE, (skygw_log_write_flush( LOGFILE_ERROR, - "Error : Attempt to free a DCB via dcb_fee " + "Error : Attempt to free a DCB via dcb_free " "that has been associated with a descriptor."))); } } @@ -251,7 +310,7 @@ DCB *clone; return NULL; } - clone->fd = -1; + clone->fd = DCBFD_CLOSED; clone->flags |= DCBF_CLONE; clone->state = orig->state; clone->data = orig->data; @@ -262,7 +321,10 @@ DCB *clone; clone->protocol = orig->protocol; clone->func.write = dcb_null_write; - clone->func.close = dcb_null_close; + /** + * Close triggers closing of router session as well which is needed. + */ + clone->func.close = orig->func.close; clone->func.auth = dcb_null_auth; return clone; @@ -286,6 +348,15 @@ DCB_CALLBACK *cb; dcb->state == DCB_STATE_ALLOC, "dcb not in DCB_STATE_DISCONNECTED not in DCB_STATE_ALLOC state."); + if (DCB_POLL_BUSY(dcb)) + { + /* Check if DCB has outstanding poll events */ + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "dcb_final_free: DCB %p has outstanding events", + dcb))); + } + /*< First remove this DCB from the chain */ spinlock_acquire(&dcbspin); if (allDCBs == dcb) @@ -317,22 +388,25 @@ DCB_CALLBACK *cb; */ { SESSION *local_session = dcb->session; + dcb->session = NULL; CHK_SESSION(local_session); - /*< - * Remove reference from session if dcb is client. - */ - if (local_session->client == dcb) { - local_session->client = NULL; - } - dcb->session = NULL; + /** + * Set session's client pointer NULL so that other threads + * won't try to call dcb_close for client DCB + * after this call. + */ + if (local_session->client == dcb) + { + spinlock_acquire(&local_session->ses_lock); + local_session->client = NULL; + spinlock_release(&local_session->ses_lock); + } session_free(local_session); } } - if (dcb->protocol && ((dcb->flags & DCBF_CLONE) ==0)) - free(dcb->protocol); - if (dcb->data && ((dcb->flags & DCBF_CLONE) ==0)) - free(dcb->data); + if (dcb->protocol && (!DCB_IS_CLONE(dcb))) + free(dcb->protocol); if (dcb->remote) free(dcb->remote); if (dcb->user) @@ -439,6 +513,7 @@ bool succp = false; zombies = tptr; else lptr->memdata.next = tptr; + LOGIF(LD, (skygw_log_write_flush( LOGFILE_DEBUG, "%lu [dcb_process_zombies] Remove dcb " @@ -482,42 +557,57 @@ bool succp = false; while (dcb != NULL) { DCB* dcb_next = NULL; int rc = 0; - /*< - * Close file descriptor and move to clean-up phase. - */ - rc = close(dcb->fd); - if (rc < 0) { - int eno = errno; - errno = 0; - LOGIF(LE, (skygw_log_write_flush( - LOGFILE_ERROR, - "Error : Failed to close " - "socket %d on dcb %p due error %d, %s.", - dcb->fd, - dcb, - eno, - strerror(eno)))); - } -#if defined(SS_DEBUG) - else { - LOGIF(LD, (skygw_log_write_flush( - LOGFILE_DEBUG, - "%lu [dcb_process_zombies] Closed socket " - "%d on dcb %p.", - pthread_self(), - dcb->fd, - dcb))); - conn_open[dcb->fd] = false; - ss_debug(dcb->fd = -1;) - } -#endif + if (dcb->fd > 0) + { + /*< + * Close file descriptor and move to clean-up phase. + */ + rc = close(dcb->fd); + + if (rc < 0) + { + int eno = errno; + errno = 0; + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : Failed to close " + "socket %d on dcb %p due error %d, %s.", + dcb->fd, + dcb, + eno, + strerror(eno)))); + } + else + { + dcb->fd = DCBFD_CLOSED; + + LOGIF(LD, (skygw_log_write_flush( + LOGFILE_DEBUG, + "%lu [dcb_process_zombies] Closed socket " + "%d on dcb %p.", + pthread_self(), + dcb->fd, + dcb))); +#if defined(FAKE_CODE) + conn_open[dcb->fd] = false; +#endif /* FAKE_CODE */ + } + } + LOGIF_MAYBE(LT, (dcb_get_ses_log_info( + dcb, + &tls_log_info.li_sesid, + &tls_log_info.li_enabled_logs))); + succp = dcb_set_state(dcb, DCB_STATE_DISCONNECTED, NULL); ss_dassert(succp); dcb_next = dcb->memdata.next; dcb_final_free(dcb); dcb = dcb_next; } + /** Reset threads session data */ + LOGIF(LT, tls_log_info.li_sesid = 0); + return zombies; } @@ -576,7 +666,7 @@ int rc; } fd = dcb->func.connect(dcb, server, session); - if (fd == -1) { + if (fd == DCBFD_CLOSED) { LOGIF(LD, (skygw_log_write( LOGFILE_DEBUG, "%lu [dcb_connect] Failed to connect to server %s:%d, " @@ -602,7 +692,7 @@ int rc; session->client, session->client->fd))); } - ss_dassert(dcb->fd == -1); /*< must be uninitialized at this point */ + ss_dassert(dcb->fd == DCBFD_CLOSED); /*< must be uninitialized at this point */ /*< * Successfully connected to backend. Assign file descriptor to dcb */ @@ -623,7 +713,7 @@ int rc; */ rc = poll_add_dcb(dcb); - if (rc == -1) { + if (rc == DCBFD_CLOSED) { dcb_set_state(dcb, DCB_STATE_DISCONNECTED, NULL); dcb_final_free(dcb); return NULL; @@ -655,12 +745,22 @@ int dcb_read( GWBUF *buffer = NULL; int b; int rc; - int n ; + int n; int nread = 0; - int eno = 0; CHK_DCB(dcb); - while (true) + + if (dcb->fd <= 0) + { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : Read failed, dcb is %s.", + dcb->fd == DCBFD_CLOSED ? "closed" : "cloned, not readable"))); + n = 0; + goto return_n; + } + + while (true) { int bufsize; @@ -668,8 +768,6 @@ int dcb_read( if (rc == -1) { - eno = errno; - errno = 0; LOGIF(LE, (skygw_log_write_flush( LOGFILE_ERROR, "Error : ioctl FIONREAD for dcb %p in " @@ -677,8 +775,8 @@ int dcb_read( dcb, STRDCBSTATE(dcb->state), dcb->fd, - eno, - strerror(eno)))); + errno, + strerror(errno)))); n = -1; goto return_n; } @@ -726,22 +824,18 @@ int dcb_read( "for dcb %p fd %d, due %d, %s.", dcb, dcb->fd, - eno, - strerror(eno)))); + errno, + strerror(errno)))); n = -1; - ss_dassert(buffer != NULL); goto return_n; } GW_NOINTR_CALL(n = read(dcb->fd, GWBUF_DATA(buffer), bufsize); dcb->stats.n_reads++); if (n <= 0) - { - int eno = errno; - errno = 0; - - if (eno != 0 && eno != EAGAIN && eno != EWOULDBLOCK) + { + if (errno != 0 && errno != EAGAIN && errno != EWOULDBLOCK) { LOGIF(LE, (skygw_log_write_flush( LOGFILE_ERROR, @@ -750,10 +844,10 @@ int dcb_read( dcb, STRDCBSTATE(dcb->state), dcb->fd, - eno, - strerror(eno)))); + errno, + strerror(errno)))); } - gwbuf_free(buffer); + gwbuf_free(buffer); goto return_n; } nread += n; @@ -790,6 +884,14 @@ int below_water; below_water = (dcb->high_water && dcb->writeqlen < dcb->high_water) ? 1 : 0; ss_dassert(queue != NULL); + if (dcb->fd <= 0) + { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : Write failed, dcb is %s.", + dcb->fd == DCBFD_CLOSED ? "closed" : "cloned, not writable"))); + return 0; + } /** * SESSION_STATE_STOPPING means that one of the backends is closing * the router session. Some backends may have not completed @@ -803,7 +905,8 @@ int below_water; dcb->state != DCB_STATE_POLLING && dcb->state != DCB_STATE_LISTENING && dcb->state != DCB_STATE_NOPOLLING && - dcb->session->state != SESSION_STATE_STOPPING)) + (dcb->session == NULL || + dcb->session->state != SESSION_STATE_STOPPING))) { LOGIF(LD, (skygw_log_write( LOGFILE_DEBUG, @@ -814,7 +917,7 @@ int below_water; dcb, STRDCBSTATE(dcb->state), dcb->fd))); - ss_dassert(false); + //ss_dassert(false); return 0; } @@ -861,7 +964,7 @@ int below_water; while (queue != NULL) { int qlen; -#if defined(SS_DEBUG) +#if defined(FAKE_CODE) if (dcb->dcb_role == DCB_ROLE_REQUEST_HANDLER && dcb->session != NULL) { @@ -877,14 +980,10 @@ int below_water; fail_next_backend_fd = false; } } -#endif /* SS_DEBUG */ +#endif /* FAKE_CODE */ qlen = GWBUF_LENGTH(queue); GW_NOINTR_CALL( - w = gw_write( -#if defined(SS_DEBUG) - dcb, -#endif - dcb->fd, GWBUF_DATA(queue), qlen); + w = gw_write(dcb, GWBUF_DATA(queue), qlen); dcb->stats.n_writes++; ); @@ -1036,13 +1135,7 @@ int above_water; while (dcb->writeq != NULL) { len = GWBUF_LENGTH(dcb->writeq); - GW_NOINTR_CALL(w = gw_write( -#if defined(SS_DEBUG) - dcb, -#endif - dcb->fd, - GWBUF_DATA(dcb->writeq), - len);); + GW_NOINTR_CALL(w = gw_write(dcb, GWBUF_DATA(dcb->writeq), len);); saved_errno = errno; errno = 0; @@ -1117,10 +1210,14 @@ int above_water; void dcb_close(DCB *dcb) { - int rc; + int rc = 0; CHK_DCB(dcb); + LOGIF(LD, (skygw_log_write(LOGFILE_DEBUG, + "%lu [dcb_close]", + pthread_self()))); + /*< * dcb_close may be called for freshly created dcb, in which case * it only needs to be freed. @@ -1139,41 +1236,47 @@ dcb_close(DCB *dcb) /*< * Stop dcb's listening and modify state accordingly. */ - rc = poll_remove_dcb(dcb); - - ss_dassert(dcb->state == DCB_STATE_NOPOLLING || - dcb->state == DCB_STATE_ZOMBIE); - /** - * close protocol and router session - */ - if (dcb->func.close != NULL) - { - dcb->func.close(dcb); - } - dcb_call_callback(dcb, DCB_REASON_CLOSE); + if (dcb->state == DCB_STATE_POLLING) + { + rc = poll_remove_dcb(dcb); - if (rc == 0) { - LOGIF(LD, (skygw_log_write( - LOGFILE_DEBUG, - "%lu [dcb_close] Removed dcb %p in state %s from " - "poll set.", - pthread_self(), - dcb, - STRDCBSTATE(dcb->state)))); - } else { - LOGIF(LE, (skygw_log_write( - LOGFILE_ERROR, - "%lu [dcb_close] Error : Removing dcb %p in state %s from " - "poll set failed.", - pthread_self(), - dcb, - STRDCBSTATE(dcb->state)))); - } - - if (dcb->state == DCB_STATE_NOPOLLING) - { - dcb_add_to_zombieslist(dcb); - } + if (rc == 0) { + LOGIF(LD, (skygw_log_write( + LOGFILE_DEBUG, + "%lu [dcb_close] Removed dcb %p in state %s from " + "poll set.", + pthread_self(), + dcb, + STRDCBSTATE(dcb->state)))); + } else { + LOGIF(LE, (skygw_log_write( + LOGFILE_ERROR, + "Error : Removing DCB fd == %d in state %s from " + "poll set failed.", + dcb->fd, + STRDCBSTATE(dcb->state)))); + } + + if (rc == 0) + { + /** + * close protocol and router session + */ + if (dcb->func.close != NULL) + { + dcb->func.close(dcb); + } + /** Call possible callback for this DCB in case of close */ + dcb_call_callback(dcb, DCB_REASON_CLOSE); + + if (dcb->state == DCB_STATE_NOPOLLING) + { + dcb_add_to_zombieslist(dcb); + } + } + ss_dassert(dcb->state == DCB_STATE_NOPOLLING || + dcb->state == DCB_STATE_ZOMBIE); + } } /** @@ -1202,9 +1305,9 @@ printDCB(DCB *dcb) dcb->stats.n_buffered); printf("\t\tNo. of Accepts: %d\n", dcb->stats.n_accepts); - printf("\t\tNo. of High Water Events: %d\n", + printf("\t\tNo. of High Water Events: %d\n", dcb->stats.n_high_water); - printf("\t\tNo. of Low Water Events: %d\n", + printf("\t\tNo. of Low Water Events: %d\n", dcb->stats.n_low_water); } /** @@ -1389,6 +1492,12 @@ dprintDCB(DCB *pdcb, DCB *dcb) dcb->stats.n_high_water); dcb_printf(pdcb, "\t\tNo. of Low Water Events: %d\n", dcb->stats.n_low_water); + if (DCB_POLL_BUSY(dcb)) + { + dcb_printf(pdcb, "\t\tPending events in the queue: %x %s\n", + dcb->evq.pending_events, dcb->evq.processing ? "(processing)" : ""); + + } if (dcb->flags & DCBF_CLONE) dcb_printf(pdcb, "\t\tDCB is a clone.\n"); #if SPINLOCK_PROFILE @@ -1501,7 +1610,9 @@ void dcb_hashtable_stats( hashsize); dcb_printf(dcb, "\tNo. of entries: %d\n", total); - dcb_printf(dcb, "\tAverage chain length: %.1f\n", (float)total / hashsize); + dcb_printf(dcb, + "\tAverage chain length: %.1f\n", + (hashsize == 0 ? (float)hashsize : (float)total / hashsize)); dcb_printf(dcb, "\tLongest chain length: %d\n", longest); } @@ -1661,23 +1772,28 @@ static bool dcb_set_state_nomutex( "Old state %s > new state %s.", pthread_self(), dcb, - STRDCBSTATE(*old_state), + (old_state == NULL ? "NULL" : STRDCBSTATE(*old_state)), STRDCBSTATE(new_state)))); } return succp; } -int gw_write( -#if defined(SS_DEBUG) - DCB* dcb, -#endif - int fd, - const void* buf, - size_t nbytes) +/** + * Write data to a DCB + * + * @param dcb The DCB to write buffer + * @param buf Buffer to write + * @param nbytes Number of bytes to write + * @return Number of written bytes + */ +int +gw_write(DCB *dcb, const void *buf, size_t nbytes) { - int w; -#if defined(SS_DEBUG) - if (dcb_fake_write_errno[fd] != 0) { + int w = 0; + int fd = dcb->fd; +#if defined(FAKE_CODE) + if (fd > 0 && dcb_fake_write_errno[fd] != 0) + { ss_dassert(dcb_fake_write_ev[fd] != 0); w = write(fd, buf, nbytes/2); /*< leave peer to read missing bytes */ @@ -1685,12 +1801,16 @@ int gw_write( w = -1; errno = dcb_fake_write_errno[fd]; } - } else { + } else if (fd > 0) + { w = write(fd, buf, nbytes); } #else - w = write(fd, buf, nbytes); -#endif /* SS_DEBUG && SS_TEST */ + if (fd > 0) + { + w = write(fd, buf, nbytes); + } +#endif /* FAKE_CODE */ #if defined(SS_DEBUG_MYSQL) { @@ -1871,6 +1991,12 @@ DCB_CALLBACK *cb, *nextcb; { nextcb = cb->next; spinlock_release(&dcb->cb_lock); + + LOGIF(LD, (skygw_log_write(LOGFILE_DEBUG, + "%lu [dcb_call_callback] %s", + pthread_self(), + STRDCBREASON(reason)))); + cb->cb(dcb, reason, cb->userdata); spinlock_acquire(&dcb->cb_lock); cb = nextcb; @@ -1890,23 +2016,42 @@ DCB_CALLBACK *cb, *nextcb; int dcb_isvalid(DCB *dcb) { +int rval = 0; + + if (dcb) + { + spinlock_acquire(&dcbspin); + rval = dcb_isvalid_nolock(dcb); + spinlock_release(&dcbspin); + } + + return rval; +} + + +/** + * Check the passed DCB to ensure it is in the list of allDCBS. + * Requires that the DCB list is already locked before call. + * + * @param dcb The DCB to check + * @return 1 if the DCB is in the list, otherwise 0 + */ +static int +dcb_isvalid_nolock(DCB *dcb) +{ DCB *ptr; int rval = 0; - spinlock_acquire(&dcbspin); + if (dcb) + { ptr = allDCBs; - while (ptr) + while (ptr && ptr != dcb) { - if (ptr == dcb) - { - rval = 1; - break; - } ptr = ptr->next; } - spinlock_release(&dcbspin); - - return rval; + rval = (ptr == dcb); + } + return rval; } @@ -1919,33 +2064,11 @@ int rval = 0; static DCB * dcb_get_next (DCB* dcb) { - DCB* p; - spinlock_acquire(&dcbspin); - - p = allDCBs; - - if (dcb == NULL || p == NULL) - { - dcb = p; - - } - else - { - while (p != NULL && dcb != p) - { - p = p->next; - } - - if (p != NULL) - { - dcb = p->next; - } - else - { - dcb = NULL; - } + if (dcb) { + dcb = dcb_isvalid_nolock(dcb) ? dcb->next : NULL; } + else dcb = allDCBs; spinlock_release(&dcbspin); return dcb; @@ -1959,6 +2082,10 @@ dcb_get_next (DCB* dcb) void dcb_call_foreach(DCB_REASON reason) { + LOGIF(LD, (skygw_log_write(LOGFILE_DEBUG, + "%lu [dcb_call_foreach]", + pthread_self()))); + switch (reason) { case DCB_REASON_CLOSE: case DCB_REASON_DRAINED: @@ -1991,7 +2118,7 @@ dcb_call_foreach(DCB_REASON reason) /** * Null protocol write routine used for cloned dcb's. It merely consumes - * buffers written on the cloned DCB. + * buffers written on the cloned DCB and sets the DCB_REPLIED flag. * * @param dcb The descriptor control block * @param buf The buffer being written @@ -2004,6 +2131,9 @@ dcb_null_write(DCB *dcb, GWBUF *buf) { buf = gwbuf_consume(buf, GWBUF_LENGTH(buf)); } + + dcb->flags |= DCBF_REPLIED; + return 1; } diff --git a/server/core/filter.c b/server/core/filter.c index b199e931f..2cc18a42c 100644 --- a/server/core/filter.c +++ b/server/core/filter.c @@ -1,5 +1,5 @@ /* - * This file is distributed as part of MaxScale from SkySQL. It is free + * This file is distributed as part of MaxScale from MariaDB Corporation. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -13,7 +13,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2014 + * Copyright MariaDB Corporation Ab 2014 */ /** @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -37,7 +38,10 @@ #include #include -extern int lm_enabled_logfiles_bitmask; +/** Defined in log_manager.cc */ +extern int lm_enabled_logfiles_bitmask; +extern size_t log_ses_count[]; +extern __thread log_info_t tls_log_info; static SPINLOCK filter_spin = SPINLOCK_INIT; /**< Protects the list of all filters */ static FILTER_DEF *allFilters = NULL; /**< The list of all filters */ @@ -87,28 +91,31 @@ filter_free(FILTER_DEF *filter) { FILTER_DEF *ptr; - /* First of all remove from the linked list */ - spinlock_acquire(&filter_spin); - if (allFilters == filter) + if (filter) { - allFilters = filter->next; - } - else - { - ptr = allFilters; - while (ptr && ptr->next != filter) + /* First of all remove from the linked list */ + spinlock_acquire(&filter_spin); + if (allFilters == filter) { - ptr = ptr->next; + allFilters = filter->next; } - if (ptr) - ptr->next = filter->next; - } - spinlock_release(&filter_spin); + else + { + ptr = allFilters; + while (ptr && ptr->next != filter) + { + ptr = ptr->next; + } + if (ptr) + ptr->next = filter->next; + } + spinlock_release(&filter_spin); - /* Clean up session and free the memory */ - free(filter->name); - free(filter->module); - free(filter); + /* Clean up session and free the memory */ + free(filter->name); + free(filter->module); + free(filter); + } } /** @@ -331,6 +338,7 @@ DOWNSTREAM *me; return NULL; } } + if (filter->filter == NULL) { if ((filter->filter = (filter->obj->createInstance)(filter->options, @@ -341,14 +349,25 @@ DOWNSTREAM *me; } if ((me = (DOWNSTREAM *)calloc(1, sizeof(DOWNSTREAM))) == NULL) { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : Memory allocation for filter session failed " + "due to %d,%s.", + errno, + strerror(errno)))); + return NULL; } me->instance = filter->filter; me->routeQuery = (void *)(filter->obj->routeQuery); - me->session = filter->obj->newSession(me->instance, session); - + + if ((me->session=filter->obj->newSession(me->instance, session)) == NULL) + { + free(me); + return NULL; + } filter->obj->setDownstream(me->instance, me->session, downstream); - + return me; } diff --git a/server/core/gateway.c b/server/core/gateway.c index f2c1eefb2..a49f18e86 100644 --- a/server/core/gateway.c +++ b/server/core/gateway.c @@ -1,5 +1,5 @@ /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -13,7 +13,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 * */ @@ -40,10 +40,13 @@ * @endverbatim */ #define _XOPEN_SOURCE 700 +#include #include #include +#include #include #include +#include #include #include #include @@ -53,6 +56,8 @@ #include #include #include +#include +#include #include #include @@ -70,7 +75,9 @@ #include /** for procname */ -#define _GNU_SOURCE +#if !defined(_GNU_SOURCE) +# define _GNU_SOURCE +#endif extern char *program_invocation_name; extern char *program_invocation_short_name; @@ -80,7 +87,10 @@ extern char *program_invocation_short_name; * Used from log users to check enabled logs prior calling * actual library calls such as skygw_log_write. */ -extern int lm_enabled_logfiles_bitmask; +/** Defined in log_manager.cc */ +extern int lm_enabled_logfiles_bitmask; +extern size_t log_ses_count[]; +extern __thread log_info_t tls_log_info; /* * Server options are passed to the mysql_server_init. Each gateway must have a unique @@ -88,7 +98,7 @@ extern int lm_enabled_logfiles_bitmask; * is not fixed here and will be updated elsewhere. */ static char* server_options[] = { - "SkySQL Gateway", + "MariaDB Corporation MaxScale", "--no-defaults", "--datadir=", "--language=", @@ -136,7 +146,7 @@ const char *progname = NULL; static struct option long_options[] = { {"homedir", required_argument, 0, 'c'}, {"config", required_argument, 0, 'f'}, - {"nodeamon", required_argument, 0, 'd'}, + {"nodaemon", no_argument, 0, 'd'}, {"log", required_argument, 0, 'l'}, {"version", no_argument, 0, 'v'}, {"help", no_argument, 0, '?'}, @@ -171,6 +181,9 @@ static bool resolve_maxscale_conf_fname( char* cnf_file_arg); static bool resolve_maxscale_homedir( char** p_home_dir); + +static char* check_dir_access(char* dirname); + /** * Handler for SIGHUP signal. Reload the configuration for the * gateway. @@ -183,6 +196,21 @@ static void sighup_handler (int i) config_reload(); } +/** + * Handler for SIGUSR1 signal. A SIGUSR1 signal will cause + * maxscale to rotate all log files. + */ +static void sigusr1_handler (int i) +{ + LOGIF(LM, (skygw_log_write( + LOGFILE_MESSAGE, + "Log file flush following reception of SIGUSR1\n"))); + skygw_log_rotate(LOGFILE_ERROR); + skygw_log_rotate(LOGFILE_MESSAGE); + skygw_log_rotate(LOGFILE_TRACE); + skygw_log_rotate(LOGFILE_DEBUG); +} + static void sigterm_handler (int i) { extern void shutdown_server(); @@ -226,7 +254,6 @@ sigfatal_handler (int i) { void *addrs[128]; - char **strings= NULL; int n, count = backtrace(addrs, 128); char** symbols = backtrace_symbols( addrs, count ); @@ -375,6 +402,15 @@ static bool file_write_header( const char* header_buf3; time_t* t = NULL; struct tm* tm = NULL; +#if defined(LAPTOP_TEST) + struct timespec ts1; + ts1.tv_sec = 0; + ts1.tv_nsec = DISKWRITE_LATENCY*1000000; +#endif + +#if !defined(SS_DEBUG) + return true; +#endif if ((t = (time_t *)malloc(sizeof(time_t))) == NULL) { goto return_succp; @@ -387,7 +423,7 @@ static bool file_write_header( *t = time(NULL); *tm = *localtime(t); - header_buf1 = "\n\nSkySQL MaxScale " MAXSCALE_VERSION "\t"; + header_buf1 = "\n\nMariaDB Corporation MaxScale " MAXSCALE_VERSION "\t"; header_buf2 = strdup(asctime(tm)); if (header_buf2 == NULL) { @@ -399,7 +435,7 @@ static bool file_write_header( len2 = strlen(header_buf2); len3 = strlen(header_buf3); #if defined(LAPTOP_TEST) - usleep(DISKWRITE_LATENCY); + nanosleep(&ts1, NULL); #else wbytes1=fwrite((void*)header_buf1, len1, 1, outfile); wbytes2=fwrite((void*)header_buf2, len2, 1, outfile); @@ -472,6 +508,11 @@ static bool resolve_maxscale_conf_fname( goto return_succp; } } + else + { + /** Allocate memory for use of realpath */ + *cnf_full_path = (char *)malloc(PATH_MAX+1); + } /*< * 3. argument is valid relative pathname * '-f ../myconf.cnf' @@ -534,6 +575,7 @@ static bool resolve_maxscale_homedir( { bool succp = false; char* tmp; + char* tmp2; char* log_context = NULL; ss_dassert(*p_home_dir == NULL); @@ -541,6 +583,7 @@ static bool resolve_maxscale_homedir( if (*p_home_dir != NULL) { log_context = strdup("Command-line argument"); + tmp = NULL; goto check_home_dir; } /*< @@ -586,12 +629,12 @@ static bool resolve_maxscale_homedir( * isn't specified. Thus, try to access $PWD/MaxScale.cnf . */ tmp = strndup(getenv("PWD"), PATH_MAX); - get_expanded_pathname(p_home_dir, tmp, default_cnf_fname); - + tmp2 = get_expanded_pathname(p_home_dir, tmp, default_cnf_fname); + free(tmp2); /*< full path isn't needed so simply free it */ + if (*p_home_dir != NULL) { log_context = strdup("Current working directory"); - goto check_home_dir; } check_home_dir: @@ -616,7 +659,8 @@ check_home_dir: free(logstr); goto return_succp; } - + +#if WRITABLE_HOME if (!file_is_writable(*p_home_dir)) { char* tailstr = "MaxScale doesn't have write permission " @@ -636,7 +680,7 @@ check_home_dir: free(logstr); goto return_succp; } - +#endif if (!daemon_mode) { fprintf(stderr, @@ -651,6 +695,7 @@ check_home_dir: return_succp: free (tmp); + if (log_context != NULL) { free(log_context); @@ -666,6 +711,42 @@ return_succp: return succp; } +/** + * Check read and write accessibility to a directory. + * @param dirname directory to be checked + * + * @return NULL if directory can be read and written, an error message if either + * read or write is not permitted. + */ +static char* check_dir_access( + char* dirname) +{ + char* errstr = NULL; + + if (dirname == NULL) + { + errstr = strdup("Directory argument is NULL"); + goto retblock; + } + + if (!file_is_readable(dirname)) + { + errstr = strdup("MaxScale doesn't have read permission " + "to MAXSCALE_HOME."); + goto retblock; + } + + if (!file_is_writable(dirname)) + { + errstr = strdup("MaxScale doesn't have write permission " + "to MAXSCALE_HOME. Exiting."); + goto retblock; + } + +retblock: + return errstr; +} + /** * @node Provides error printing for non-formatted error strings. @@ -847,6 +928,13 @@ static char* get_expanded_pathname( if (cnf_file_buf == NULL) { + ss_dassert(cnf_file_buf != NULL); + + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : Memory allocation failed due to %s.", + strerror(errno)))); + free(expanded_path); expanded_path = NULL; goto return_cnf_file_buf; @@ -951,7 +1039,7 @@ int main(int argc, char **argv) int n_services; int eno = 0; /*< local variable for errno */ int opt; - void** threads; /*< thread list */ + void** threads = NULL; /*< thread list */ char mysql_home[PATH_MAX+1]; char datadir_arg[10+PATH_MAX+1]; /*< '--datadir=' + PATH_MAX */ char language_arg[11+PATH_MAX+1]; /*< '--language=' + PATH_MAX */ @@ -974,7 +1062,7 @@ int main(int argc, char **argv) progname = *argv; -#if defined(SS_DEBUG) +#if defined(FAKE_CODE) memset(conn_open, 0, sizeof(bool)*10240); memset(dcb_fake_write_errno, 0, sizeof(unsigned char)*10240); memset(dcb_fake_write_ev, 0, sizeof(__int32_t)*10240); @@ -982,7 +1070,7 @@ int main(int argc, char **argv) fail_next_client_fd = false; fail_next_accept = 0; fail_accept_errno = 0; -#endif +#endif /* FAKE_CODE */ file_write_header(stderr); /*< * Register functions which are called at exit except libmysqld-related, @@ -1088,9 +1176,9 @@ int main(int argc, char **argv) goto return_main; case 'l': - if (strncasecmp(optarg, "file") == 0) + if (strncasecmp(optarg, "file", PATH_MAX) == 0) logtofile = 1; - else if (strncasecmp(optarg, "shm") == 0) + else if (strncasecmp(optarg, "shm", PATH_MAX) == 0) logtofile = 0; else { @@ -1126,8 +1214,12 @@ int main(int argc, char **argv) if (!daemon_mode) { fprintf(stderr, - "Info : MaxScale will be run in the terminal process.\n See " + "Info : MaxScale will be run in the terminal process.\n"); +#if defined(SS_DEBUG) + fprintf(stderr, + "\tSee " "the log from the following log files : \n\n"); +#endif } else { @@ -1139,11 +1231,11 @@ int main(int argc, char **argv) int eno = 0; char* fprerr = "Failed to initialize set the signal " "set for MaxScale. Exiting."; - +#if defined(SS_DEBUG) fprintf(stderr, "Info : MaxScale will be run in a daemon process.\n\tSee " "the log from the following log files : \n\n"); - +#endif r = sigfillset(&sigset); if (r != 0) @@ -1166,6 +1258,18 @@ int main(int argc, char **argv) rc = MAXSCALE_INTERNALERROR; goto return_main; } + r = sigdelset(&sigset, SIGUSR1); + + if (r != 0) + { + char* logerr = "Failed to delete signal SIGUSR1 from the " + "signal set of MaxScale. Exiting."; + eno = errno; + errno = 0; + print_log_n_stderr(true, true, fprerr, logerr, eno); + rc = MAXSCALE_INTERNALERROR; + goto return_main; + } r = sigdelset(&sigset, SIGTERM); if (r != 0) @@ -1267,6 +1371,14 @@ int main(int argc, char **argv) "SIGHUP. Exiting."); goto sigset_err; } + l = signal_set(SIGUSR1, sigusr1_handler); + + if (l != 0) + { + logerr = strdup("Failed to set signal handler for " + "SIGUSR1. Exiting."); + goto sigset_err; + } l = signal_set(SIGTERM, sigterm_handler); if (l != 0) @@ -1369,13 +1481,51 @@ int main(int argc, char **argv) { if (!resolve_maxscale_homedir(&home_dir)) { - ss_dassert(home_dir == NULL); + ss_dassert(home_dir != NULL); rc = MAXSCALE_HOMELESS; goto return_main; } sprintf(mysql_home, "%s/mysql", home_dir); setenv("MYSQL_HOME", mysql_home, 1); } + else + { + char* log_context = strdup("Home directory command-line argument"); + char* errstr; + + errstr = check_dir_access(home_dir); + + if (errstr != NULL) + { + char* logstr = (char*)malloc(strlen(log_context)+ + 1+ + strlen(errstr)+ + 1); + + snprintf(logstr, + strlen(log_context)+ + 1+ + strlen(errstr)+1, + "%s: %s", + log_context, + errstr); + + print_log_n_stderr(true, true, logstr, logstr, 0); + + free(errstr); + free(logstr); + rc = MAXSCALE_HOMELESS; + goto return_main; + } + else if (!daemon_mode) + { + fprintf(stderr, + "Using %s as MAXSCALE_HOME = %s\n", + log_context, + home_dir); + } + free(log_context); + } /*< * Init Log Manager for MaxScale. @@ -1385,11 +1535,19 @@ int main(int argc, char **argv) * argv[0] */ { - char buf[1024]; - char *argv[8]; - + char buf[1024]; + char *argv[8]; + bool succp; + sprintf(buf, "%s/log", home_dir); - mkdir(buf, 0777); + if(mkdir(buf, 0777) != 0){ + + if(errno != EEXIST){ + fprintf(stderr, + "Error: Cannot create log directory: %s\n",buf); + goto return_main; + } + } argv[0] = "MaxScale"; argv[1] = "-j"; argv[2] = buf; @@ -1399,7 +1557,7 @@ int main(int argc, char **argv) argv[4] = "LOGFILE_MESSAGE,LOGFILE_ERROR" "LOGFILE_DEBUG,LOGFILE_TRACE"; argv[5] = NULL; - skygw_logmanager_init(5, argv); + succp = skygw_logmanager_init(5, argv); } else { @@ -1408,7 +1566,13 @@ int main(int argc, char **argv) argv[5] = "-l"; /*< write to syslog */ argv[6] = "LOGFILE_MESSAGE,LOGFILE_ERROR"; /*< ..these logs to syslog */ argv[7] = NULL; - skygw_logmanager_init(7, argv); + succp = skygw_logmanager_init(7, argv); + } + + if (!succp) + { + rc = MAXSCALE_BADCONFIG; + goto return_main; } } @@ -1429,17 +1593,38 @@ int main(int argc, char **argv) * instances of the gateway are beign run on the same * machine. */ - sprintf(datadir, "%s/data%d", home_dir, getpid()); - mkdir(datadir, 0777); + sprintf(datadir, "%s/data", home_dir); + + if(mkdir(datadir, 0777) != 0){ + + if(errno != EEXIST){ + fprintf(stderr, + "Error: Cannot create data directory: %s\n",datadir); + goto return_main; + } + } + + sprintf(datadir, "%s/data/data%d", home_dir, getpid()); + + if(mkdir(datadir, 0777) != 0){ + + if(errno != EEXIST){ + fprintf(stderr, + "Error: Cannot create data directory: %s\n",datadir); + goto return_main; + } + } if (!daemon_mode) { fprintf(stderr, "Home directory : %s" "\nConfiguration file : %s" + "\nLog directory : %s/log" "\nData directory : %s\n\n", home_dir, cnf_file_path, + home_dir, datadir); } LOGIF(LM, (skygw_log_write_flush( @@ -1450,6 +1635,10 @@ int main(int argc, char **argv) LOGFILE_MESSAGE, "Data directory : %s", datadir))); + LOGIF(LM, (skygw_log_write_flush( + LOGFILE_MESSAGE, + "Log directory : %s/log", + home_dir))); LOGIF(LM, (skygw_log_write_flush( LOGFILE_MESSAGE, "Configuration file : %s", @@ -1539,7 +1728,7 @@ int main(int argc, char **argv) } LOGIF(LM, (skygw_log_write( LOGFILE_MESSAGE, - "SkySQL MaxScale %s (C) SkySQL Ab 2013,2014", + "MariaDB Corporation MaxScale %s (C) MariaDB Corporation Ab 2013-2014", MAXSCALE_VERSION))); LOGIF(LM, (skygw_log_write( LOGFILE_MESSAGE, @@ -1608,11 +1797,7 @@ int main(int argc, char **argv) for (n = 0; n < n_threads - 1; n++) { thread_wait(threads[n]); - } - free(threads); - free(home_dir); - free(cnf_file_path); - + } /*< * Wait the flush thread. */ @@ -1632,10 +1817,18 @@ int main(int argc, char **argv) LOGFILE_MESSAGE, "MaxScale shutdown completed."))); + unload_all_modules(); /* Remove Pidfile */ unlink_pidfile(); return_main: + if (threads) + free(threads); + if (home_dir) + free(home_dir); + if (cnf_file_path) + free(cnf_file_path); + return rc; } /*< End of main */ @@ -1643,10 +1836,12 @@ return_main: * Shutdown MaxScale server */ void - shutdown_server() +shutdown_server() { - poll_shutdown(); + service_shutdown(); + poll_shutdown(); hkshutdown(); + memlog_flush_all(); log_flush_shutdown(); } @@ -1669,7 +1864,11 @@ static void log_flush_cb( void* arg) { ssize_t timeout_ms = *(ssize_t *)arg; + struct timespec ts1; + ts1.tv_sec = timeout_ms/1000; + ts1.tv_nsec = (timeout_ms%1000)*1000000; + LOGIF(LM, (skygw_log_write(LOGFILE_MESSAGE, "Started MaxScale log flusher."))); while (!do_exit) { @@ -1677,7 +1876,7 @@ static void log_flush_cb( skygw_log_flush(LOGFILE_MESSAGE); skygw_log_flush(LOGFILE_TRACE); skygw_log_flush(LOGFILE_DEBUG); - usleep(timeout_ms*1000); + nanosleep(&ts1, NULL); } LOGIF(LM, (skygw_log_write(LOGFILE_MESSAGE, "Finished MaxScale log flusher."))); @@ -1736,8 +1935,6 @@ static int write_pid_file(char *home_dir) { /* close file */ close(fd); - - fprintf(stderr, "MaxScale PID %s in pidfile %s\n", pidstr, pidfile); } /* success */ diff --git a/server/core/gw_utils.c b/server/core/gw_utils.c index 2662ab41d..5d4fb5ed2 100644 --- a/server/core/gw_utils.c +++ b/server/core/gw_utils.c @@ -1,5 +1,5 @@ /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -13,7 +13,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 * */ @@ -34,6 +34,7 @@ * 25-09-2013 Massimiliano Pinto setipaddress uses getaddrinfo * 06-02-2014 Mark Riddoch Added parse_bindconfig * 10-02-2014 Massimiliano Pinto Added return code to setipaddress + * 02-09-2014 Martin Brampton Replace C++ comment with C comment * *@endverbatim */ @@ -47,7 +48,10 @@ SPINLOCK tmplock = SPINLOCK_INIT; -extern int lm_enabled_logfiles_bitmask; +/** Defined in log_manager.cc */ +extern int lm_enabled_logfiles_bitmask; +extern size_t log_ses_count[]; +extern __thread log_info_t tls_log_info; /* * Set IP address in socket structure in_addr @@ -77,7 +81,7 @@ setipaddress(struct in_addr *a, char *p) { if ((rc = getaddrinfo(p, NULL, &hint, &ai)) != 0) { LOGIF(LE, (skygw_log_write_flush( LOGFILE_ERROR, - "Error : getaddrinfo failed for [%s] due [%s]", + "Error: Failed to obtain address for host %s, %s", p, gai_strerror(rc)))); @@ -90,7 +94,7 @@ setipaddress(struct in_addr *a, char *p) { if ((rc = getaddrinfo(p, NULL, &hint, &ai)) != 0) { LOGIF(LE, (skygw_log_write_flush( LOGFILE_ERROR, - "Error : getaddrinfo failed for [%s] due [%s]", + "Error: Failed to obtain address for host %s, %s", p, gai_strerror(rc)))); @@ -148,7 +152,7 @@ void gw_daemonize(void) { } if (pid != 0) { - // exit from main + /* exit from main */ exit(0); } @@ -174,7 +178,7 @@ void gw_daemonize(void) { int parse_bindconfig(char *config, unsigned short def_port, struct sockaddr_in *addr) { -char *port, buf[1024]; +char *port, buf[1024 + 1]; short pnum; struct hostent *hp; diff --git a/server/core/gwbitmask.c b/server/core/gwbitmask.c index d3f031080..9fb529629 100644 --- a/server/core/gwbitmask.c +++ b/server/core/gwbitmask.c @@ -1,5 +1,5 @@ /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -13,7 +13,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 */ #include #include @@ -93,7 +93,7 @@ unsigned char mask; { bitmask->bits = realloc(bitmask->bits, (bitmask->length + BIT_LENGTH_INC) / 8); - memset(bitmask + (bitmask->length / 8), 0, + memset(bitmask->bits + (bitmask->length / 8), 0, BIT_LENGTH_INC / 8); bitmask->length += (BIT_LENGTH_INC / 8); } @@ -121,7 +121,7 @@ unsigned char mask; { bitmask->bits = realloc(bitmask->bits, (bitmask->length + BIT_LENGTH_INC) / 8); - memset(bitmask + (bitmask->length / 8), 0, + memset(bitmask->bits + (bitmask->length / 8), 0, BIT_LENGTH_INC / 8); bitmask->length += (BIT_LENGTH_INC / 8); } @@ -150,7 +150,7 @@ unsigned char mask; { bitmask->bits = realloc(bitmask->bits, (bitmask->length + BIT_LENGTH_INC) / 8); - memset(bitmask + (bitmask->length / 8), 0, + memset(bitmask->bits + (bitmask->length / 8), 0, BIT_LENGTH_INC / 8); bitmask->length += (BIT_LENGTH_INC / 8); } diff --git a/server/core/hashtable.c b/server/core/hashtable.c index 0f129e825..ab979e472 100644 --- a/server/core/hashtable.c +++ b/server/core/hashtable.c @@ -1,5 +1,5 @@ /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -13,7 +13,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 */ #include #include @@ -198,7 +198,12 @@ HASHENTRIES *entry, *ptr; * @param vfreefn The free function for the value */ void -hashtable_memory_fns(HASHTABLE *table, HASHMEMORYFN kcopyfn, HASHMEMORYFN vcopyfn, HASHMEMORYFN kfreefn, HASHMEMORYFN vfreefn) +hashtable_memory_fns( + HASHTABLE *table, + HASHMEMORYFN kcopyfn, + HASHMEMORYFN vcopyfn, + HASHMEMORYFN kfreefn, + HASHMEMORYFN vfreefn) { if (kcopyfn != NULL) table->kcopyfn = kcopyfn; @@ -258,7 +263,9 @@ hashtable_add(HASHTABLE *table, void *key, void *value) /* check succesfull key copy */ if ( ptr->key == NULL) { + free(ptr); hashtable_write_unlock(table); + return 0; } @@ -269,9 +276,11 @@ hashtable_add(HASHTABLE *table, void *key, void *value) if ( ptr->value == NULL) { /* remove the key ! */ table->kfreefn(ptr->key); + free(ptr); /* value not copied, return */ hashtable_write_unlock(table); + return 0; } @@ -279,6 +288,7 @@ hashtable_add(HASHTABLE *table, void *key, void *value) table->entries[hashkey % table->hashsize] = ptr; } hashtable_write_unlock(table); + return 1; } @@ -439,28 +449,33 @@ void hashtable_get_stats( int i; int j; - ht = (HASHTABLE *)table; - CHK_HASHTABLE(ht); - *nelems = 0; - *longest = 0; - hashtable_read_lock(ht); - - for (i = 0; i < ht->hashsize; i++) + *nelems = 0; + *longest = 0; + *hashsize = 0; + + if (table != NULL) { - j = 0; - entries = ht->entries[i]; - while (entries) + ht = (HASHTABLE *)table; + CHK_HASHTABLE(ht); + hashtable_read_lock(ht); + + for (i = 0; i < ht->hashsize; i++) { - j++; - entries = entries->next; + j = 0; + entries = ht->entries[i]; + while (entries) + { + j++; + entries = entries->next; + } + *nelems += j; + if (j > *longest) { + *longest = j; + } } - *nelems += j; - if (j > *longest) { - *longest = j; - } + *hashsize = ht->hashsize; + hashtable_read_unlock(ht); } - *hashsize = ht->hashsize; - hashtable_read_unlock(ht); } @@ -493,7 +508,7 @@ hashtable_read_lock(HASHTABLE *table) ; spinlock_acquire(&table->spin); } - table->n_readers++; + atomic_add(&table->n_readers, 1); spinlock_release(&table->spin); } diff --git a/server/core/hint.c b/server/core/hint.c index 2d716771a..65a3816c9 100644 --- a/server/core/hint.c +++ b/server/core/hint.c @@ -13,7 +13,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2014 + * Copyright MariaDB Corporation Ab 2014 */ #include #include @@ -38,6 +38,8 @@ * * @param hint The hint list to duplicate * @return A duplicate of the list + * + * Note : Optimize this to use version numbering instead of copying memory */ HINT * hint_dup(HINT *hint) @@ -116,7 +118,7 @@ HINT *hint; return head; hint->next = head; hint->type = HINT_PARAMETER; - hint->data = pname; + hint->data = strdup(pname); hint->value = strdup(value); return hint; } @@ -151,4 +153,4 @@ bool hint_exists( p_hint = &(*p_hint)->next; } return succp; -} \ No newline at end of file +} diff --git a/server/core/housekeeper.c b/server/core/housekeeper.c index fe6cb9047..2225e628f 100644 --- a/server/core/housekeeper.c +++ b/server/core/housekeeper.c @@ -1,5 +1,5 @@ /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -13,7 +13,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2014 + * Copyright MariaDB Corporation Ab 2014 */ #include #include @@ -24,11 +24,21 @@ /** * @file housekeeper.c Provide a mechanism to run periodic tasks * + * The housekeeper provides a mechanism to allow for tasks, function + * calls basically, to be run on a tiem basis. A task may be run + * repeatedly, with a given frequency (in seconds), or may be a one + * shot task that will only be run once after a specified number of + * seconds. + * + * The housekeeper also maintains a global variable, hkheartbeat, that + * is incremented every 100ms. + * * @verbatim * Revision History * * Date Who Description * 29/08/14 Mark Riddoch Initial implementation + * 22/10/14 Mark Riddoch Addition of one-shot tasks * * @endverbatim */ @@ -43,6 +53,7 @@ static HKTASK *tasks = NULL; static SPINLOCK tasklock = SPINLOCK_INIT; static int do_shutdown = 0; +unsigned long hkheartbeat = 0; static void hkthread(void *); @@ -69,7 +80,7 @@ hkinit() * @param taskfn The function to call for the task * @param data Data to pass to the task function * @param frequency How often to run the task, expressed in seconds - * @return Return the tiem in seconds when the task will be first run if the task was added, otherwise 0 + * @return Return the time in seconds when the task will be first run if the task was added, otherwise 0 */ int hktask_add(char *name, void (*taskfn)(void *), void *data, int frequency) @@ -88,6 +99,7 @@ HKTASK *task, *ptr; task->task = taskfn; task->data = data; task->frequency = frequency; + task->type = HK_REPEATED; task->nextdue = time(0) + frequency; task->next = NULL; spinlock_acquire(&tasklock); @@ -112,6 +124,61 @@ HKTASK *task, *ptr; return task->nextdue; } +/** + * Add a one-shot task to the housekeeper task list + * + * Task names must be unique. + * + * @param name The unique name for this housekeeper task + * @param taskfn The function to call for the task + * @param data Data to pass to the task function + * @param when How many second until the task is executed + * @return Return the time in seconds when the task will be first run if the task was added, otherwise 0 + * + */ +int +hktask_oneshot(char *name, void (*taskfn)(void *), void *data, int when) +{ +HKTASK *task, *ptr; + + if ((task = (HKTASK *)malloc(sizeof(HKTASK))) == NULL) + { + return 0; + } + if ((task->name = strdup(name)) == NULL) + { + free(task); + return 0; + } + task->task = taskfn; + task->data = data; + task->frequency = 0; + task->type = HK_ONESHOT; + task->nextdue = time(0) + when; + task->next = NULL; + spinlock_acquire(&tasklock); + ptr = tasks; + while (ptr && ptr->next) + { + if (strcmp(ptr->name, name) == 0) + { + spinlock_release(&tasklock); + free(task->name); + free(task); + return 0; + } + ptr = ptr->next; + } + if (ptr) + ptr->next = task; + else + tasks = task; + spinlock_release(&tasklock); + + return task->nextdue; +} + + /** * Remove a named task from the housekeepers task list * @@ -171,12 +238,17 @@ HKTASK *ptr; time_t now; void (*taskfn)(void *); void *taskdata; +int i; for (;;) { - if (do_shutdown) - return; - thread_millisleep(1000); + for (i = 0; i < 10; i++) + { + if (do_shutdown) + return; + thread_millisleep(100); + hkheartbeat++; + } now = time(0); spinlock_acquire(&tasklock); ptr = tasks; @@ -189,6 +261,8 @@ void *taskdata; taskdata = ptr->data; spinlock_release(&tasklock); (*taskfn)(taskdata); + if (ptr->type == HK_ONESHOT) + hktask_remove(ptr->name); spinlock_acquire(&tasklock); ptr = tasks; } @@ -208,3 +282,33 @@ hkshutdown() { do_shutdown = 1; } + +/** + * Show the tasks that are scheduled for the house keeper + * + * @param pdcb The DCB to send to output + */ +void +hkshow_tasks(DCB *pdcb) +{ +HKTASK *ptr; +struct tm tm; +char buf[40]; + + dcb_printf(pdcb, "%-25s | Type | Frequency | Next Due\n", "Name"); + dcb_printf(pdcb, "--------------------------+----------+-----------+-------------------------\n"); + spinlock_acquire(&tasklock); + ptr = tasks; + while (ptr) + { + localtime_r(&ptr->nextdue, &tm); + asctime_r(&tm, buf); + dcb_printf(pdcb, "%-25s | %-8s | %-9d | %s", + ptr->name, + ptr->type == HK_REPEATED ? "Repeated" : "One-Shot", + ptr->frequency, + buf); + ptr = ptr->next; + } + spinlock_release(&tasklock); +} diff --git a/server/core/load_utils.c b/server/core/load_utils.c index cba0c6533..3fe975f5c 100644 --- a/server/core/load_utils.c +++ b/server/core/load_utils.c @@ -1,5 +1,5 @@ /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -13,7 +13,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 */ /** @@ -43,7 +43,10 @@ #include #include -extern int lm_enabled_logfiles_bitmask; +/** Defined in log_manager.cc */ +extern int lm_enabled_logfiles_bitmask; +extern size_t log_ses_count[]; +extern __thread log_info_t tls_log_info; static MODULES *registered = NULL; @@ -80,7 +83,7 @@ void * load_module(const char *module, const char *type) { char *home, *version; -char fname[MAXPATHLEN]; +char fname[MAXPATHLEN+1]; void *dlhandle, *sym; char *(*ver)(); void *(*ep)(), *modobj; @@ -94,11 +97,12 @@ MODULE_INFO *mod_info = NULL; * * Search of the shared object. */ - sprintf(fname, "./lib%s.so", module); + snprintf(fname,MAXPATHLEN+1, "./lib%s.so", module); + if (access(fname, F_OK) == -1) { home = get_maxscale_home (); - sprintf(fname, "%s/modules/lib%s.so", home, module); + snprintf(fname, MAXPATHLEN+1,"%s/modules/lib%s.so", home, module); if (access(fname, F_OK) == -1) { @@ -326,12 +330,28 @@ MODULES *ptr; * The module is now not in the linked list and all * memory related to it can be freed */ + dlclose(mod->handle); free(mod->module); free(mod->type); free(mod->version); free(mod); } +/** + * Unload all modules + * + * Remove all the modules from the system, called during shutdown + * to allow termination hooks to be called. + */ +void +unload_all_modules() +{ + while (registered) + { + unregister_module(registered->module); + } +} + /** * Print Modules * diff --git a/server/core/maxkeys.c b/server/core/maxkeys.c index 9ae59f8c7..ad1e28f86 100644 --- a/server/core/maxkeys.c +++ b/server/core/maxkeys.c @@ -1,5 +1,5 @@ /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -13,7 +13,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 */ /** diff --git a/server/core/maxpasswd.c b/server/core/maxpasswd.c index d11980ba3..0c728d869 100644 --- a/server/core/maxpasswd.c +++ b/server/core/maxpasswd.c @@ -1,5 +1,5 @@ /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -13,7 +13,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 */ /** @@ -39,7 +39,7 @@ int main(int argc, char **argv) { -char *enc; + char *enc, *pw; if (argc != 2) { @@ -47,9 +47,21 @@ char *enc; exit(1); } - if ((enc = encryptPassword(argv[1])) != NULL) + pw = calloc(81,sizeof(char)); + + if(pw == NULL){ + fprintf(stderr, "Error: cannot allocate enough memory."); + exit(1); + } + + strncpy(pw,argv[1],80); + + if ((enc = encryptPassword(pw)) != NULL){ printf("%s\n", enc); - else + }else{ fprintf(stderr, "Failed to encode the password\n"); + } + + free(pw); return 0; } diff --git a/server/core/memlog.c b/server/core/memlog.c new file mode 100644 index 000000000..43df8c5da --- /dev/null +++ b/server/core/memlog.c @@ -0,0 +1,254 @@ +/* + * This file is distributed as part of the MariaDB MaxScale. It is free + * software: you can redistribute it and/or modify it under the terms of the + * GNU General Public License as published by the Free Software Foundation, + * version 2. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 51 + * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Copyright MariaDB Ab 2014 + */ + +/** + * @file memlog.c - Implementation of memory logging mechanism for debug purposes + * + * @verbatim + * Revision History + * + * Date Who Description + * 26/09/14 Mark Riddoch Initial implementation + * + * @endverbatim + */ +#include +#include + +static MEMLOG *memlogs = NULL; +static SPINLOCK *memlock = SPINLOCK_INIT; + +/** + * Create a new instance of a memory logger. + * + * @param name The name of the memory log + * @param type The type of item being logged + * @param size The number of items to store in memory before flushign to disk + * + * @return MEMLOG* A memory log handle + */ +MEMLOG * +memlog_create(char *name, MEMLOGTYPE type, int size) +{ +MEMLOG *log; + + if ((log = (MEMLOG *)malloc(sizeof(MEMLOG))) == NULL) + { + return NULL; + } + + log->name = strdup(name); + spinlock_init(&log->lock); + log->type = type; + log->offset = 0; + log->size = size; + log->flags = 0; + switch (type) + { + case ML_INT: + log->values = malloc(sizeof(int) * size); + break; + case ML_LONG: + log->values = malloc(sizeof(long) * size); + break; + case ML_LONGLONG: + log->values = malloc(sizeof(long long) * size); + break; + case ML_STRING: + log->values = malloc(sizeof(char *) * size); + break; + } + if (log->values == NULL) + { + free(log); + return NULL; + } + spinlock_acquire(&memlock); + log->next = memlogs; + memlogs = log; + spinlock_release(&memlock); + + return log; +} + +/** + * Destroy a memory logger any unwritten data will be flushed to disk + * + * @param log The memory log to destroy + */ +void +memlog_destroy(MEMLOG *log) +{ +MEMLOG *ptr; + + if ((log->flags & MLNOAUTOFLUSH) == 0) + memlog_flush(log); + free(log->values); + + spinlock_acquire(&memlock); + if (memlogs == log) + memlogs = log->next; + else + { + ptr = memlogs; + while (ptr && ptr->next != log) + ptr = ptr->next; + if (ptr) + ptr->next = log->next; + } + spinlock_release(&memlock); + free(log->name); + free(log); +} + +/** + * Log a data item to the memory logger + * + * @param log The memory logger + * @param value The value to log + */ +void +memlog_log(MEMLOG *log, void *value) +{ + if (!log) + return; + spinlock_acquire(&log->lock); + switch (log->type) + { + case ML_INT: + ((int *)(log->values))[log->offset] = (int)value; + break; + case ML_LONG: + ((long *)(log->values))[log->offset] = (long)value; + break; + case ML_LONGLONG: + ((long long *)(log->values))[log->offset] = (long long)value; + break; + case ML_STRING: + ((char **)(log->values))[log->offset] = (char *)value; + break; + } + log->offset++; + if (log->offset == log->size) + { + if ((log->flags & MLNOAUTOFLUSH) == 0) + memlog_flush(log); + log->offset = 0; + log->iflags = MLWRAPPED; + } + spinlock_release(&log->lock); +} + +/** + * Flush all memlogs to disk, called during shutdown + * + */ +void +memlog_flush_all() +{ +MEMLOG *log; + + spinlock_acquire(&memlock); + log = memlogs; + while (log) + { + spinlock_acquire(&log->lock); + memlog_flush(log); + spinlock_release(&log->lock); + log = log->next; + } + spinlock_release(&memlock); +} + +/** + * Set the flags for a memlog + * + * @param log The memlog to set the flags for + * @param flags The new flags values + */ +void +memlog_set(MEMLOG *log, unsigned int flags) +{ + log->flags = flags; +} + +/** + * Flush a memory log to disk + * + * Assumes the the log->lock has been acquired by the caller + * + * @param log The memory log to flush + */ +void +memlog_flush(MEMLOG *log) +{ +FILE *fp; +int i; + + if ((fp = fopen(log->name, "a")) == NULL) + return; + if ((log->flags & MLNOAUTOFLUSH) && (log->iflags & MLWRAPPED)) + { + for (i = 0; i < log->size; i++) + { + int ind = (i + log->offset) % log->size; + switch (log->type) + { + case ML_INT: + fprintf(fp, "%d\n", + ((int *)(log->values))[ind]); + break; + case ML_LONG: + fprintf(fp, "%ld\n", + ((long *)(log->values))[ind]); + break; + case ML_LONGLONG: + fprintf(fp, "%lld\n", + ((long long *)(log->values))[ind]); + break; + case ML_STRING: + fprintf(fp, "%s\n", + ((char **)(log->values))[ind]); + break; + } + } + } + else + { + for (i = 0; i < log->offset; i++) + { + switch (log->type) + { + case ML_INT: + fprintf(fp, "%d\n", ((int *)(log->values))[i]); + break; + case ML_LONG: + fprintf(fp, "%ld\n", ((long *)(log->values))[i]); + break; + case ML_LONGLONG: + fprintf(fp, "%lld\n", ((long long *)(log->values))[i]); + break; + case ML_STRING: + fprintf(fp, "%s\n", ((char **)(log->values))[i]); + break; + } + } + } + log->offset = 0; + fclose(fp); +} diff --git a/server/core/modutil.c b/server/core/modutil.c index ec7f8530b..d6dbfa7b1 100644 --- a/server/core/modutil.c +++ b/server/core/modutil.c @@ -1,5 +1,5 @@ /* - * This file is distributed as part of MaxScale from SkySQL. It is free + * This file is distributed as part of MaxScale from MariaDB Corporation. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -13,7 +13,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2014 + * Copyright MariaDB Corporation Ab 2014 */ /** @@ -22,8 +22,9 @@ * @verbatim * Revision History * - * Date Who Description - * 04/06/14 Mark Riddoch Initial implementation + * Date Who Description + * 04/06/14 Mark Riddoch Initial implementation + * 24/10/14 Massimiliano Pinto Added modutil_send_mysql_err_packet, modutil_create_mysql_err_msg * * @endverbatim */ @@ -120,6 +121,40 @@ unsigned char *ptr; return 1; } +/** + * Calculate the length of MySQL packet and how much is missing from the GWBUF + * passed as parameter. + * + * This routine assumes that there is only one MySQL packet in the buffer. + * + * @param buf buffer list including the query, may consist of + * multiple buffers + * @param nbytes_missing pointer to missing bytecount + * + * @return the length of MySQL packet and writes missing bytecount to + * nbytes_missing. + */ +int modutil_MySQL_query_len( + GWBUF* buf, + int* nbytes_missing) +{ + int len; + int buflen; + + if (!modutil_is_SQL(buf)) + { + len = 0; + goto retblock; + } + len = MYSQL_GET_PACKET_LEN((uint8_t *)GWBUF_DATA(buf)); + *nbytes_missing = len-1; + buflen = gwbuf_length(buf); + + *nbytes_missing -= buflen-5; + +retblock: + return len; +} /** @@ -167,12 +202,58 @@ GWBUF *addition; *ptr++ = (newlength + 1) & 0xff; *ptr++ = ((newlength + 1) >> 8) & 0xff; *ptr++ = ((newlength + 1) >> 16) & 0xff; + addition->gwbuf_type = orig->gwbuf_type; orig->next = addition; } return orig; } + +/** + * Extract the SQL from a COM_QUERY packet and return in a NULL terminated buffer. + * The buffer should be freed by the caller when it is no longer required. + * + * If the packet is not a COM_QUERY packet then the function will return NULL + * + * @param buf The buffer chain + * @return Null terminated string containing query text or NULL on error + */ +char * +modutil_get_SQL(GWBUF *buf) +{ +unsigned int len, length; +unsigned char *ptr, *dptr, *rval = NULL; + + if (!modutil_is_SQL(buf)) + return rval; + ptr = GWBUF_DATA(buf); + length = *ptr++; + length += (*ptr++ << 8); + length += (*ptr++ << 16); + + if ((rval = (char *)malloc(length + 1)) == NULL) + return NULL; + dptr = rval; + ptr += 2; // Skip sequence id and COM_QUERY byte + len = GWBUF_LENGTH(buf) - 5; + while (buf && length > 0) + { + int clen = length > len ? len : length; + memcpy(dptr, ptr, clen); + dptr += clen; + length -= clen; + buf = buf->next; + if (buf) + { + ptr = GWBUF_DATA(buf); + len = GWBUF_LENGTH(buf); + } + } + *dptr = 0; + return rval; +} + /** * Copy query string from GWBUF buffer to separate memory area. * @@ -187,7 +268,7 @@ modutil_get_query(GWBUF *buf) uint8_t* packet; mysql_server_cmd_t packet_type; size_t len; - char* query_str; + char* query_str = NULL; packet = GWBUF_DATA(buf); packet_type = packet[4]; @@ -205,7 +286,7 @@ modutil_get_query(GWBUF *buf) case MYSQL_COM_QUERY: len = MYSQL_GET_PACKET_LEN(packet)-1; /*< distract 1 for packet type byte */ - if ((query_str = (char *)malloc(len+1)) == NULL) + if (len < 1 || len > ~(size_t)0 - 1 || (query_str = (char *)malloc(len+1)) == NULL) { goto retblock; } @@ -215,7 +296,7 @@ modutil_get_query(GWBUF *buf) default: len = strlen(STRPACKETTYPE(packet_type))+1; - if ((query_str = (char *)malloc(len+1)) == NULL) + if (len < 1 || len > ~(size_t)0 - 1 || (query_str = (char *)malloc(len+1)) == NULL) { goto retblock; } @@ -226,3 +307,189 @@ modutil_get_query(GWBUF *buf) retblock: return query_str; } + + +/** + * create a GWBUFF with a MySQL ERR packet + * + * @param packet_number MySQL protocol sequence number in the packet + * @param in_affected_rows MySQL affected rows + * @param mysql_errno The MySQL errno + * @param sqlstate_msg The MySQL State Message + * @param mysql_message The Error Message + * @return The allocated GWBUF or NULL on failure +*/ +GWBUF *modutil_create_mysql_err_msg( + int packet_number, + int affected_rows, + int merrno, + const char *statemsg, + const char *msg) +{ + uint8_t *outbuf = NULL; + uint32_t mysql_payload_size = 0; + uint8_t mysql_packet_header[4]; + uint8_t *mysql_payload = NULL; + uint8_t field_count = 0; + uint8_t mysql_err[2]; + uint8_t mysql_statemsg[6]; + unsigned int mysql_errno = 0; + const char *mysql_error_msg = NULL; + const char *mysql_state = NULL; + GWBUF *errbuf = NULL; + + if (statemsg == NULL || msg == NULL) + { + return NULL; + } + mysql_errno = (unsigned int)merrno; + mysql_error_msg = msg; + mysql_state = statemsg; + + field_count = 0xff; + + gw_mysql_set_byte2(mysql_err, mysql_errno); + + mysql_statemsg[0]='#'; + memcpy(mysql_statemsg+1, mysql_state, 5); + + mysql_payload_size = sizeof(field_count) + + sizeof(mysql_err) + + sizeof(mysql_statemsg) + + strlen(mysql_error_msg); + + /* allocate memory for packet header + payload */ + errbuf = gwbuf_alloc(sizeof(mysql_packet_header) + mysql_payload_size); + ss_dassert(errbuf != NULL); + + if (errbuf == NULL) + { + return NULL; + } + outbuf = GWBUF_DATA(errbuf); + + /** write packet header and packet number */ + gw_mysql_set_byte3(mysql_packet_header, mysql_payload_size); + mysql_packet_header[3] = packet_number; + + /** write header */ + memcpy(outbuf, mysql_packet_header, sizeof(mysql_packet_header)); + + mysql_payload = outbuf + sizeof(mysql_packet_header); + + /** write field */ + memcpy(mysql_payload, &field_count, sizeof(field_count)); + mysql_payload = mysql_payload + sizeof(field_count); + + /** write errno */ + memcpy(mysql_payload, mysql_err, sizeof(mysql_err)); + mysql_payload = mysql_payload + sizeof(mysql_err); + + /** write sqlstate */ + memcpy(mysql_payload, mysql_statemsg, sizeof(mysql_statemsg)); + mysql_payload = mysql_payload + sizeof(mysql_statemsg); + + /** write error message */ + memcpy(mysql_payload, mysql_error_msg, strlen(mysql_error_msg)); + + return errbuf; +} + +/** + * modutil_send_mysql_err_packet + * + * Send a MySQL protocol Generic ERR message, to the dcb + * + * @param dcb The DCB to send the packet + * @param packet_number MySQL protocol sequence number in the packet + * @param in_affected_rows MySQL affected rows + * @param mysql_errno The MySQL errno + * @param sqlstate_msg The MySQL State Message + * @param mysql_message The Error Message + * @return 0 for successful dcb write or 1 on failure + * + */ +int modutil_send_mysql_err_packet ( + DCB *dcb, + int packet_number, + int in_affected_rows, + int mysql_errno, + const char *sqlstate_msg, + const char *mysql_message) +{ + GWBUF* buf; + + buf = modutil_create_mysql_err_msg(packet_number, in_affected_rows, mysql_errno, sqlstate_msg, mysql_message); + + return dcb->func.write(dcb, buf); +} + +/** + * Buffer contains at least one of the following: + * complete [complete] [partial] mysql packet + * + * return pointer to gwbuf containing a complete packet or + * NULL if no complete packet was found. + */ +GWBUF* modutil_get_next_MySQL_packet( + GWBUF** p_readbuf) +{ + GWBUF* packetbuf; + GWBUF* readbuf; + size_t buflen; + size_t packetlen; + size_t totalbuflen; + uint8_t* data; + size_t nbytes_copied = 0; + uint8_t* target; + + readbuf = *p_readbuf; + + if (readbuf == NULL) + { + packetbuf = NULL; + goto return_packetbuf; + } + CHK_GWBUF(readbuf); + + if (GWBUF_EMPTY(readbuf)) + { + packetbuf = NULL; + goto return_packetbuf; + } + totalbuflen = gwbuf_length(readbuf); + data = (uint8_t *)GWBUF_DATA((readbuf)); + packetlen = MYSQL_GET_PACKET_LEN(data)+4; + + /** packet is incomplete */ + if (packetlen > totalbuflen) + { + packetbuf = NULL; + goto return_packetbuf; + } + + packetbuf = gwbuf_alloc(packetlen); + target = GWBUF_DATA(packetbuf); + packetbuf->gwbuf_type = readbuf->gwbuf_type; /*< Copy the type too */ + /** + * Copy first MySQL packet to packetbuf and leave posible other + * packets to read buffer. + */ + while (nbytes_copied < packetlen && totalbuflen > 0) + { + uint8_t* src = GWBUF_DATA((*p_readbuf)); + size_t bytestocopy; + + buflen = GWBUF_LENGTH((*p_readbuf)); + bytestocopy = MIN(buflen,packetlen-nbytes_copied); + + memcpy(target+nbytes_copied, src, bytestocopy); + *p_readbuf = gwbuf_consume((*p_readbuf), bytestocopy); + totalbuflen = gwbuf_length((*p_readbuf)); + nbytes_copied += bytestocopy; + } + ss_dassert(buflen == 0 || nbytes_copied == packetlen); + +return_packetbuf: + return packetbuf; +} diff --git a/server/core/monitor.c b/server/core/monitor.c index 101ce2e10..23b8ee4ef 100644 --- a/server/core/monitor.c +++ b/server/core/monitor.c @@ -1,5 +1,5 @@ /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -13,7 +13,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 */ /** @@ -26,6 +26,8 @@ * 08/07/13 Mark Riddoch Initial implementation * 23/05/14 Massimiliano Pinto Addition of monitor_interval parameter * and monitor id + * 30/10/14 Massimiliano Pinto Addition of disable_master_failback parameter + * 07/11/14 Massimiliano Pinto Addition of monitor network timeouts * * @endverbatim */ @@ -38,7 +40,10 @@ #include #include -extern int lm_enabled_logfiles_bitmask; +/** Defined in log_manager.cc */ +extern int lm_enabled_logfiles_bitmask; +extern size_t log_ses_count[]; +extern __thread log_info_t tls_log_info; static MONITOR *allMonitors = NULL; static SPINLOCK monLock = SPINLOCK_INIT; @@ -329,3 +334,31 @@ monitorDetectStaleMaster(MONITOR *mon, int enable) mon->module->detectStaleMaster(mon->handle, enable); } } + +/** + * Disable Master Failback + * + * @param mon The monitor instance + * @param disable The value 1 disable the failback, 0 keeps it + */ +void +monitorDisableMasterFailback(MONITOR *mon, int disable) +{ + if (mon->module->disableMasterFailback != NULL) { + mon->module->disableMasterFailback(mon->handle, disable); + } +} + +/** + * Set Monitor timeouts for connect/read/write + * + * @param mon The monitor instance + * @param type The timeout handling type + * @param value The timeout to set + */ +void +monitorSetNetworkTimeout(MONITOR *mon, int type, int value) { + if (mon->module->setNetworkTimeout != NULL) { + mon->module->setNetworkTimeout(mon->handle, type, value); + } +} diff --git a/server/core/poll.c b/server/core/poll.c index 12cb1d69d..012591711 100644 --- a/server/core/poll.c +++ b/server/core/poll.c @@ -1,5 +1,5 @@ /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -13,8 +13,9 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 */ +#include #include #include #include @@ -32,13 +33,23 @@ #include #include -#define PROFILE_POLL 1 +#define PROFILE_POLL 0 #if PROFILE_POLL #include +#include + +extern unsigned long hkheartbeat; +MEMLOG *plog; #endif -extern int lm_enabled_logfiles_bitmask; +/** Defined in log_manager.cc */ +extern int lm_enabled_logfiles_bitmask; +extern size_t log_ses_count[]; +extern __thread log_info_t tls_log_info; + +int number_poll_spins; +int max_poll_sleep; /** * @file poll.c - Abstraction of the epoll functionality @@ -66,7 +77,7 @@ extern int lm_enabled_logfiles_bitmask; /** * Control the use of mutexes for the epoll_wait call. Setting to 1 will * cause the epoll_wait calls to be moved under a mutex. This may be useful - * for debuggign purposes but should be avoided in general use. + * for debugging purposes but should be avoided in general use. */ #define MUTEX_EPOLL 0 @@ -78,6 +89,7 @@ static simple_mutex_t epoll_wait_mutex; /*< serializes calls to epoll_wait */ #endif static int n_waiting = 0; /*< No. of threads in epoll_wait */ static int process_pollq(int thread_id); +static void poll_add_event_to_dcb(DCB* dcb, GWBUF* buf, __uint32_t ev); DCB *eventq = NULL; @@ -92,6 +104,7 @@ static int load_samples = 0; static int load_nfds = 0; static double current_avg = 0.0; static double *avg_samples = NULL; +static int *evqp_samples = NULL; static int next_sample = 0; static int n_avg_samples; @@ -139,13 +152,29 @@ static struct { int n_hup; /*< Number of hangup events */ int n_accept; /*< Number of accept events */ int n_polls; /*< Number of poll cycles */ + int n_pollev; /*< Number of polls returnign events */ + int n_nbpollev; /*< Number of polls returnign events */ int n_nothreads; /*< Number of times no threads are polling */ int n_fds[MAXNFDS]; /*< Number of wakeups with particular n_fds value */ int evq_length; /*< Event queue length */ + int evq_pending; /*< Number of pending descriptors in event queue */ int evq_max; /*< Maximum event queue length */ + int wake_evqpending;/*< Woken from epoll_wait with pending events in queue */ + int blockingpolls; /*< Number of epoll_waits with a timeout specified */ } pollStats; +#define N_QUEUE_TIMES 30 +/** + * The event queue statistics + */ +static struct { + unsigned int qtimes[N_QUEUE_TIMES+1]; + unsigned int exectimes[N_QUEUE_TIMES+1]; + unsigned long maxqtime; + unsigned long maxexectime; +} queueStats; + /** * How frequently to call the poll_loadav function used to monitor the load * average of the poll subsystem. @@ -174,6 +203,7 @@ int i; exit(-1); } memset(&pollStats, 0, sizeof(pollStats)); + memset(&queueStats, 0, sizeof(queueStats)); bitmask_init(&poll_mask); n_threads = config_threadcount(); if ((thread_data = @@ -190,10 +220,19 @@ int i; hktask_add("Load Average", poll_loadav, NULL, POLL_LOAD_FREQ); n_avg_samples = 15 * 60 / POLL_LOAD_FREQ; - avg_samples = (double *)malloc(sizeof(double *) * n_avg_samples); + avg_samples = (double *)malloc(sizeof(double) * n_avg_samples); for (i = 0; i < n_avg_samples; i++) avg_samples[i] = 0.0; + evqp_samples = (int *)malloc(sizeof(int) * n_avg_samples); + for (i = 0; i < n_avg_samples; i++) + evqp_samples[i] = 0.0; + number_poll_spins = config_nbpolls(); + max_poll_sleep = config_pollsleep(); + +#if PROFILE_POLL + plog = memlog_create("EventQueueWaitTime", ML_LONG, 10000); +#endif } /** @@ -257,7 +296,7 @@ poll_add_dcb(DCB *dcb) dcb, STRDCBSTATE(dcb->state)))); } - ss_dassert(rc == 0); /*< trap in debug */ + ss_info_dassert(rc == 0, "Unable to add poll"); /*< trap in debug */ } else { LOGIF(LE, (skygw_log_write_flush( LOGFILE_ERROR, @@ -290,7 +329,8 @@ poll_remove_dcb(DCB *dcb) CHK_DCB(dcb); /*< It is possible that dcb has already been removed from the set */ - if (dcb->state != DCB_STATE_POLLING) { + if (dcb->state != DCB_STATE_POLLING) + { if (dcb->state == DCB_STATE_NOPOLLING || dcb->state == DCB_STATE_ZOMBIE) { @@ -298,23 +338,29 @@ poll_remove_dcb(DCB *dcb) } goto return_rc; } - /*< * Set state to NOPOLLING and remove dcb from poll set. */ - if (dcb_set_state(dcb, new_state, &old_state)) { - rc = epoll_ctl(epoll_fd, EPOLL_CTL_DEL, dcb->fd, &ev); + if (dcb_set_state(dcb, new_state, &old_state)) + { + /** + * Only positive fds can be removed from epoll set. + */ + if (dcb->fd > 0) + { + rc = epoll_ctl(epoll_fd, EPOLL_CTL_DEL, dcb->fd, &ev); - if (rc != 0) { - int eno = errno; - errno = 0; - LOGIF(LE, (skygw_log_write_flush( - LOGFILE_ERROR, - "Error : epoll_ctl failed due %d, %s.", - eno, - strerror(eno)))); - } - ss_dassert(rc == 0); /*< trap in debug */ + if (rc != 0) { + int eno = errno; + errno = 0; + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : epoll_ctl failed due %d, %s.", + eno, + strerror(eno)))); + } + ss_dassert(rc == 0); /*< trap in debug */ + } } /*< * This call was redundant, but the end result is correct. @@ -357,7 +403,7 @@ return_rc: * deschedule a process if a timeout is included, but will not do this if a 0 timeout * value is given. this improves performance when the gateway is under heavy load. * - * In order to provide a fairer means of sharign the threads between the different + * In order to provide a fairer means of sharing the threads between the different * DCB's the poll mechanism has been decoupled from the processing of the events. * The events are now recieved via the epoll_wait call, a queue of DCB's that have * events pending is maintained and as new events arrive the DCB is added to the end @@ -368,15 +414,33 @@ return_rc: * events at a high rate will not block the execution of events for other DCB's and * should result in a fairer polling strategy. * + * The introduction of the ability to inject "fake" write events into the event queue meant + * that there was a possibility to "starve" new events sicne the polling loop would + * consume the event queue before looking for new events. If the DCB that inject + * the fake event then injected another fake event as a result of the first it meant + * that new events did not get added to the queue. The strategy has been updated to + * not consume the entire event queue, but process one event before doing a non-blocking + * call to add any new events before processing any more events. A blocking call to + * collect events is only made if there are no pending events to be processed on the + * event queue. + * + * Also introduced a "timeout bias" mechanism. This mechansim control the length of + * of timeout passed to epoll_wait in blocking calls based on previous behaviour. + * The initial call will block for 10% of the define timeout peroid, this will be + * increased in increments of 10% until the full timeout value is used. If at any + * point there is an event to be processed then the value will be reduced to 10% again + * for the next blocking call. + * * @param arg The thread ID passed as a void * to satisfy the threading package */ void poll_waitevents(void *arg) { struct epoll_event events[MAX_EVENTS]; -int i, nfds; +int i, nfds, timeout_bias = 1; int thread_id = (int)arg; DCB *zombies = NULL; +int poll_spins = 0; /** Add this thread to the bitmask of running polling threads */ bitmask_set(&poll_mask, thread_id); @@ -390,12 +454,9 @@ DCB *zombies = NULL; while (1) { - /* Process of the queue of waiting requests */ - while (do_shutdown == 0 && process_pollq(thread_id)) + if (pollStats.evq_pending == 0 && timeout_bias < 10) { - if (thread_data) - thread_data[thread_id].state = THREAD_ZPROCESSING; - zombies = dcb_process_zombies(thread_id); + timeout_bias++; } atomic_add(&n_waiting, 1); @@ -411,6 +472,7 @@ DCB *zombies = NULL; thread_data[thread_id].state = THREAD_POLLING; } + atomic_add(&pollStats.n_polls, 1); if ((nfds = epoll_wait(epoll_fd, events, MAX_EVENTS, 0)) == -1) { atomic_add(&n_waiting, -1); @@ -423,19 +485,28 @@ DCB *zombies = NULL; pthread_self(), nfds, eno))); + atomic_add(&n_waiting, -1); } /* * If there are no new descriptors from the non-blocking call - * and nothing to proces on the event queue then for do a + * and nothing to process on the event queue then for do a * blocking call to epoll_wait. + * + * We calculate a timeout bias to alter the length of the blocking + * call based on the time since we last received an event to process */ - else if (nfds == 0 && process_pollq(thread_id) == 0) + else if (nfds == 0 && pollStats.evq_pending == 0 && poll_spins++ > number_poll_spins) { - atomic_add(&n_waiting, 1); + atomic_add(&pollStats.blockingpolls, 1); nfds = epoll_wait(epoll_fd, events, MAX_EVENTS, - EPOLL_TIMEOUT); + (max_poll_sleep * timeout_bias) / 10); + if (nfds == 0 && pollStats.evq_pending) + { + atomic_add(&pollStats.wake_evqpending, 1); + poll_spins = 0; + } } else { @@ -450,12 +521,16 @@ DCB *zombies = NULL; #endif /* BLOCKINGPOLL */ if (nfds > 0) { + timeout_bias = 1; + if (poll_spins <= number_poll_spins + 1) + atomic_add(&pollStats.n_nbpollev, 1); + poll_spins = 0; LOGIF(LD, (skygw_log_write( LOGFILE_DEBUG, "%lu [poll_waitevents] epoll_wait found %d fds", pthread_self(), nfds))); - atomic_add(&pollStats.n_polls, 1); + atomic_add(&pollStats.n_pollev, 1); if (thread_data) { thread_data[thread_id].n_fds = nfds; @@ -474,7 +549,7 @@ DCB *zombies = NULL; /* * Process every DCB that has a new event and add * it to the poll queue. - * If the DCB is currently beign processed then we + * If the DCB is currently being processed then we * or in the new eent bits to the pending event bits * and leave it in the queue. * If the DCB was not already in the queue then it was @@ -489,6 +564,11 @@ DCB *zombies = NULL; spinlock_acquire(&pollqlock); if (DCB_POLL_BUSY(dcb)) { + if (dcb->evq.pending_events == 0) + { + pollStats.evq_pending++; + dcb->evq.inserted = hkheartbeat; + } dcb->evq.pending_events |= ev; } else @@ -508,6 +588,8 @@ DCB *zombies = NULL; dcb->evq.next = dcb; } pollStats.evq_length++; + pollStats.evq_pending++; + dcb->evq.inserted = hkheartbeat; if (pollStats.evq_length > pollStats.evq_max) { pollStats.evq_max = pollStats.evq_length; @@ -518,17 +600,20 @@ DCB *zombies = NULL; } /* - * If there was nothing to process then process the zombie queue + * Process of the queue of waiting requests + * This is done without checking the evq_pending count as a + * precautionary measure to avoid issues if the house keeping + * of the count goes wrong. */ - if (process_pollq(thread_id) == 0) - { - if (thread_data) - { - thread_data[thread_id].state = THREAD_ZPROCESSING; - } - zombies = dcb_process_zombies(thread_id); - } - + if (process_pollq(thread_id)) + timeout_bias = 1; + + if (thread_data) + thread_data[thread_id].state = THREAD_ZPROCESSING; + zombies = dcb_process_zombies(thread_id); + if (thread_data) + thread_data[thread_id].state = THREAD_IDLE; + if (do_shutdown) { /*< @@ -551,6 +636,34 @@ DCB *zombies = NULL; } /*< while(1) */ } +/** + * Set the number of non-blocking poll cycles that will be done before + * a blocking poll will take place. Whenever an event arrives on a thread + * or the thread sees a pending event to execute it will reset it's + * poll_spin coutn to zero and will then poll with a 0 timeout until the + * poll_spin value is greater than the value set here. + * + * @param nbpolls Number of non-block polls to perform before blocking + */ +void +poll_set_nonblocking_polls(unsigned int nbpolls) +{ + number_poll_spins = nbpolls; +} + +/** + * Set the maximum amount of time, in milliseconds, the polling thread + * will block before it will wake and check the event queue for work + * that may have been added by another thread. + * + * @param maxwait Maximum wait time in milliseconds + */ +void +poll_set_maxwait(unsigned int maxwait) +{ + max_poll_sleep = maxwait; +} + /** * Process of the queue of DCB's that have outstanding events * @@ -571,6 +684,7 @@ process_pollq(int thread_id) DCB *dcb; int found = 0; uint32_t ev; +unsigned long qtime; spinlock_acquire(&pollqlock); if (eventq == NULL) @@ -607,13 +721,28 @@ uint32_t ev; if (found) { ev = dcb->evq.pending_events; + dcb->evq.processing_events = ev; dcb->evq.pending_events = 0; + pollStats.evq_pending--; } spinlock_release(&pollqlock); if (found == 0) return 0; +#if PROFILE_POLL + memlog_log(plog, hkheartbeat - dcb->evq.inserted); +#endif + qtime = hkheartbeat - dcb->evq.inserted; + dcb->evq.started = hkheartbeat; + + if (qtime > N_QUEUE_TIMES) + queueStats.qtimes[N_QUEUE_TIMES]++; + else + queueStats.qtimes[qtime]++; + if (qtime > queueStats.maxqtime) + queueStats.maxqtime = qtime; + CHK_DCB(dcb); if (thread_data) @@ -623,7 +752,7 @@ uint32_t ev; thread_data[thread_id].event = ev; } -#if defined(SS_DEBUG) +#if defined(FAKE_CODE) if (dcb_fake_write_ev[dcb->fd] != 0) { LOGIF(LD, (skygw_log_write( LOGFILE_DEBUG, @@ -635,7 +764,7 @@ uint32_t ev; ev |= dcb_fake_write_ev[dcb->fd]; dcb_fake_write_ev[dcb->fd] = 0; } -#endif +#endif /* FAKE_CODE */ ss_debug(spinlock_acquire(&dcb->dcb_initlock);) ss_dassert(dcb->state != DCB_STATE_ALLOC); ss_dassert(dcb->state != DCB_STATE_DISCONNECTED); @@ -658,23 +787,21 @@ uint32_t ev; if (eno == 0) { #if MUTEX_BLOCK - simple_mutex_lock( - &dcb->dcb_write_lock, - true); - ss_info_dassert( - !dcb->dcb_write_active, - "Write already active"); + simple_mutex_lock(&dcb->dcb_write_lock, true); + ss_info_dassert(!dcb->dcb_write_active, + "Write already active"); dcb->dcb_write_active = TRUE; - atomic_add( - &pollStats.n_write, - 1); + atomic_add(&pollStats.n_write, 1); dcb->func.write_ready(dcb); dcb->dcb_write_active = FALSE; - simple_mutex_unlock( - &dcb->dcb_write_lock); + simple_mutex_unlock(&dcb->dcb_write_lock); #else - atomic_add(&pollStats.n_write, - 1); + atomic_add(&pollStats.n_write, 1); + + LOGIF_MAYBE(LT, (dcb_get_ses_log_info( + dcb, + &tls_log_info.li_sesid, + &tls_log_info.li_enabled_logs))); dcb->func.write_ready(dcb); #endif } else { @@ -693,10 +820,8 @@ uint32_t ev; if (ev & EPOLLIN) { #if MUTEX_BLOCK - simple_mutex_lock(&dcb->dcb_read_lock, - true); - ss_info_dassert(!dcb->dcb_read_active, - "Read already active"); + simple_mutex_lock(&dcb->dcb_read_lock, true); + ss_info_dassert(!dcb->dcb_read_active, "Read already active"); dcb->dcb_read_active = TRUE; #endif @@ -710,6 +835,10 @@ uint32_t ev; dcb->fd))); atomic_add( &pollStats.n_accept, 1); + LOGIF_MAYBE(LT, (dcb_get_ses_log_info( + dcb, + &tls_log_info.li_sesid, + &tls_log_info.li_enabled_logs))); dcb->func.accept(dcb); } else @@ -722,6 +851,10 @@ uint32_t ev; dcb, dcb->fd))); atomic_add(&pollStats.n_read, 1); + LOGIF_MAYBE(LT, (dcb_get_ses_log_info( + dcb, + &tls_log_info.li_sesid, + &tls_log_info.li_enabled_logs))); dcb->func.read(dcb); } #if MUTEX_BLOCK @@ -733,7 +866,7 @@ uint32_t ev; if (ev & EPOLLERR) { int eno = gw_getsockerrno(dcb->fd); -#if defined(SS_DEBUG) +#if defined(FAKE_CODE) if (eno == 0) { eno = dcb_fake_write_errno[dcb->fd]; LOGIF(LD, (skygw_log_write( @@ -746,7 +879,7 @@ uint32_t ev; strerror(eno)))); } dcb_fake_write_errno[dcb->fd] = 0; -#endif +#endif /* FAKE_CODE */ if (eno != 0) { LOGIF(LD, (skygw_log_write( LOGFILE_DEBUG, @@ -757,6 +890,10 @@ uint32_t ev; strerror(eno)))); } atomic_add(&pollStats.n_error, 1); + LOGIF_MAYBE(LT, (dcb_get_ses_log_info( + dcb, + &tls_log_info.li_sesid, + &tls_log_info.li_enabled_logs))); dcb->func.error(dcb); } @@ -781,6 +918,10 @@ uint32_t ev; { dcb->flags |= DCBF_HUNG; spinlock_release(&dcb->dcb_initlock); + LOGIF_MAYBE(LT, (dcb_get_ses_log_info( + dcb, + &tls_log_info.li_sesid, + &tls_log_info.li_enabled_logs))); dcb->func.hangup(dcb); } else @@ -809,14 +950,28 @@ uint32_t ev; { dcb->flags |= DCBF_HUNG; spinlock_release(&dcb->dcb_initlock); + LOGIF_MAYBE(LT, (dcb_get_ses_log_info( + dcb, + &tls_log_info.li_sesid, + &tls_log_info.li_enabled_logs))); dcb->func.hangup(dcb); } else spinlock_release(&dcb->dcb_initlock); } #endif + qtime = hkheartbeat - dcb->evq.started; + + if (qtime > N_QUEUE_TIMES) + queueStats.exectimes[N_QUEUE_TIMES]++; + else + queueStats.exectimes[qtime % N_QUEUE_TIMES]++; + if (qtime > queueStats.maxexectime) + queueStats.maxexectime = qtime; spinlock_acquire(&pollqlock); + dcb->evq.processing_events = 0; + if (dcb->evq.pending_events == 0) { /* No pending events so remove from the queue */ @@ -860,6 +1015,7 @@ uint32_t ev; } } dcb->evq.processing = 0; + LOGIF(LT, tls_log_info.li_sesid = 0); spinlock_release(&pollqlock); return 1; @@ -909,24 +1065,35 @@ dprintPollStats(DCB *dcb) { int i; - dcb_printf(dcb, "Number of epoll cycles: %d\n", + dcb_printf(dcb, "\nPoll Statistics.\n\n"); + dcb_printf(dcb, "No. of epoll cycles: %d\n", pollStats.n_polls); - dcb_printf(dcb, "Number of read events: %d\n", + dcb_printf(dcb, "No. of epoll cycles with wait: %d\n", + pollStats.blockingpolls); + dcb_printf(dcb, "No. of epoll calls returning events: %d\n", + pollStats.n_pollev); + dcb_printf(dcb, "No. of non-blocking calls returning events: %d\n", + pollStats.n_nbpollev); + dcb_printf(dcb, "No. of read events: %d\n", pollStats.n_read); - dcb_printf(dcb, "Number of write events: %d\n", + dcb_printf(dcb, "No. of write events: %d\n", pollStats.n_write); - dcb_printf(dcb, "Number of error events: %d\n", + dcb_printf(dcb, "No. of error events: %d\n", pollStats.n_error); - dcb_printf(dcb, "Number of hangup events: %d\n", + dcb_printf(dcb, "No. of hangup events: %d\n", pollStats.n_hup); - dcb_printf(dcb, "Number of accept events: %d\n", + dcb_printf(dcb, "No. of accept events: %d\n", pollStats.n_accept); - dcb_printf(dcb, "Number of times no threads polling: %d\n", + dcb_printf(dcb, "No. of times no threads polling: %d\n", pollStats.n_nothreads); - dcb_printf(dcb, "Current event queue length: %d\n", + dcb_printf(dcb, "Current event queue length: %d\n", pollStats.evq_length); - dcb_printf(dcb, "Maximum event queue length: %d\n", + dcb_printf(dcb, "Maximum event queue length: %d\n", pollStats.evq_max); + dcb_printf(dcb, "No. of DCBs with pending events: %d\n", + pollStats.evq_pending); + dcb_printf(dcb, "No. of wakeups with pending queue: %d\n", + pollStats.wake_evqpending); dcb_printf(dcb, "No of poll completions with descriptors\n"); dcb_printf(dcb, "\tNo. of descriptors\tNo. of poll completions.\n"); @@ -1003,6 +1170,7 @@ dShowThreads(DCB *dcb) int i, j, n; char *state; double avg1 = 0.0, avg5 = 0.0, avg15 = 0.0; +double qavg1 = 0.0, qavg5 = 0.0, qavg15 = 0.0; dcb_printf(dcb, "Polling Threads.\n\n"); @@ -1011,8 +1179,12 @@ double avg1 = 0.0, avg5 = 0.0, avg15 = 0.0; /* Average all the samples to get the 15 minute average */ for (i = 0; i < n_avg_samples; i++) + { avg15 += avg_samples[i]; + qavg15 += evqp_samples[i]; + } avg15 = avg15 / n_avg_samples; + qavg15 = qavg15 / n_avg_samples; /* Average the last third of the samples to get the 5 minute average */ n = 5 * 60 / POLL_LOAD_FREQ; @@ -1020,8 +1192,12 @@ double avg1 = 0.0, avg5 = 0.0, avg15 = 0.0; if (i < 0) i += n_avg_samples; for (j = i; j < i + n; j++) + { avg5 += avg_samples[j % n_avg_samples]; + qavg5 += evqp_samples[j % n_avg_samples]; + } avg5 = (3 * avg5) / (n_avg_samples); + qavg5 = (3 * qavg5) / (n_avg_samples); /* Average the last 15th of the samples to get the 1 minute average */ n = 60 / POLL_LOAD_FREQ; @@ -1029,16 +1205,23 @@ double avg1 = 0.0, avg5 = 0.0, avg15 = 0.0; if (i < 0) i += n_avg_samples; for (j = i; j < i + n; j++) + { avg1 += avg_samples[j % n_avg_samples]; + qavg1 += evqp_samples[j % n_avg_samples]; + } avg1 = (15 * avg1) / (n_avg_samples); + qavg1 = (15 * qavg1) / (n_avg_samples); dcb_printf(dcb, "15 Minute Average: %.2f, 5 Minute Average: %.2f, " "1 Minute Average: %.2f\n\n", avg15, avg5, avg1); + dcb_printf(dcb, "Pending event queue length averages:\n"); + dcb_printf(dcb, "15 Minute Average: %.2f, 5 Minute Average: %.2f, " + "1 Minute Average: %.2f\n\n", qavg15, qavg5, qavg1); if (thread_data == NULL) return; - dcb_printf(dcb, " ID | State | # fds | Descriptor | Event\n"); - dcb_printf(dcb, "----+------------+--------+------------------+---------------\n"); + dcb_printf(dcb, " ID | State | # fds | Descriptor | Running | Event\n"); + dcb_printf(dcb, "----+------------+--------+------------------+----------+---------------\n"); for (i = 0; i < n_threads; i++) { switch (thread_data[i].state) @@ -1061,23 +1244,37 @@ double avg1 = 0.0, avg5 = 0.0, avg15 = 0.0; } if (thread_data[i].state != THREAD_PROCESSING) dcb_printf(dcb, - " %2d | %-10s | | |\n", + " %2d | %-10s | | | |\n", i, state); else if (thread_data[i].cur_dcb == NULL) dcb_printf(dcb, - " %2d | %-10s | %6d | |\n", + " %2d | %-10s | %6d | | |\n", i, state, thread_data[i].n_fds); else { char *event_string = event_to_string(thread_data[i].event); + bool from_heap; + if (event_string == NULL) + { + from_heap = false; event_string = "??"; + } + else + { + from_heap = true; + } dcb_printf(dcb, - " %2d | %-10s | %6d | %-16p | %s\n", + " %2d | %-10s | %6d | %-16p | <%3d00ms | %s\n", i, state, thread_data[i].n_fds, - thread_data[i].cur_dcb, event_string); - free(event_string); + thread_data[i].cur_dcb, 1 + hkheartbeat - dcb->evq.started, + event_string); + + if (from_heap) + { + free(event_string); + } } } } @@ -1105,7 +1302,213 @@ int new_samples, new_nfds; else current_avg = 0.0; avg_samples[next_sample] = current_avg; + evqp_samples[next_sample] = pollStats.evq_pending; next_sample++; if (next_sample >= n_avg_samples) next_sample = 0; } + +/** + * Add given GWBUF to DCB's readqueue and add a pending EPOLLIN event for DCB. + * The event pretends that there is something to read for the DCB. Actually + * the incoming data is stored in the DCB's readqueue where it is read. + * + * @param dcb DCB where the event and data are added + * @param buf GWBUF including the data + * + */ +void poll_add_epollin_event_to_dcb( + DCB* dcb, + GWBUF* buf) +{ + __uint32_t ev; + + ev = EPOLLIN; + + poll_add_event_to_dcb(dcb, buf, ev); +} + + +static void poll_add_event_to_dcb( + DCB* dcb, + GWBUF* buf, + __uint32_t ev) +{ + /** Add buf to readqueue */ + spinlock_acquire(&dcb->authlock); + dcb->dcb_readqueue = gwbuf_append(dcb->dcb_readqueue, buf); + spinlock_release(&dcb->authlock); + + spinlock_acquire(&pollqlock); + + /** Set event to DCB */ + if (DCB_POLL_BUSY(dcb)) + { + if (dcb->evq.pending_events == 0) + { + pollStats.evq_pending++; + } + dcb->evq.pending_events |= ev; + } + else + { + dcb->evq.pending_events = ev; + /** Add DCB to eventqueue if it isn't already there */ + if (eventq) + { + dcb->evq.prev = eventq->evq.prev; + eventq->evq.prev->evq.next = dcb; + eventq->evq.prev = dcb; + dcb->evq.next = eventq; + } + else + { + eventq = dcb; + dcb->evq.prev = dcb; + dcb->evq.next = dcb; + } + pollStats.evq_length++; + pollStats.evq_pending++; + + if (pollStats.evq_length > pollStats.evq_max) + { + pollStats.evq_max = pollStats.evq_length; + } + } + spinlock_release(&pollqlock); +} + +/* + * Insert a fake write completion event for a DCB into the polling + * queue. + * + * This is used to trigger transmission activity on another DCB from + * within the event processing routine of a DCB. or to allow a DCB + * to defer some further output processing, to allow for other DCBs + * to receive a slice of the processing time. Fake events are added + * to the tail of the event queue, in the same way that real events + * are, so maintain the "fairness" of processing. + * + * @param dcb DCB to emulate an EPOLLOUT event for + */ +void +poll_fake_write_event(DCB *dcb) +{ +uint32_t ev = EPOLLOUT; + + spinlock_acquire(&pollqlock); + /* + * If the DCB is already on the queue, there are no pending events and + * there are other events on the queue, then + * take it off the queue. This stops the DCB hogging the threads. + */ + if (DCB_POLL_BUSY(dcb) && dcb->evq.pending_events == 0 && dcb->evq.prev != dcb) + { + dcb->evq.prev->evq.next = dcb->evq.next; + dcb->evq.next->evq.prev = dcb->evq.prev; + if (eventq == dcb) + eventq = dcb->evq.next; + dcb->evq.next = NULL; + dcb->evq.prev = NULL; + pollStats.evq_length--; + } + + if (DCB_POLL_BUSY(dcb)) + { + if (dcb->evq.pending_events == 0) + pollStats.evq_pending++; + dcb->evq.pending_events |= ev; + } + else + { + dcb->evq.pending_events = ev; + dcb->evq.inserted = hkheartbeat; + if (eventq) + { + dcb->evq.prev = eventq->evq.prev; + eventq->evq.prev->evq.next = dcb; + eventq->evq.prev = dcb; + dcb->evq.next = eventq; + } + else + { + eventq = dcb; + dcb->evq.prev = dcb; + dcb->evq.next = dcb; + } + pollStats.evq_length++; + pollStats.evq_pending++; + dcb->evq.inserted = hkheartbeat; + if (pollStats.evq_length > pollStats.evq_max) + { + pollStats.evq_max = pollStats.evq_length; + } + } + spinlock_release(&pollqlock); +} + +/** + * Print the event queue contents + * + * @param pdcb The DCB to print the event queue to + */ +void +dShowEventQ(DCB *pdcb) +{ +DCB *dcb; +char *tmp1, *tmp2; + + spinlock_acquire(&pollqlock); + if (eventq == NULL) + { + /* Nothing to process */ + spinlock_release(&pollqlock); + return; + } + dcb = eventq; + dcb_printf(pdcb, "\nEvent Queue.\n"); + dcb_printf(pdcb, "%-16s | %-10s | %-18s | %s\n", "DCB", "Status", "Processing Events", + "Pending Events"); + dcb_printf(pdcb, "-----------------+------------+--------------------+-------------------\n"); + do { + dcb_printf(pdcb, "%-16p | %-10s | %-18s | %-18s\n", dcb, + dcb->evq.processing ? "Processing" : "Pending", + (tmp1 = event_to_string(dcb->evq.processing_events)), + (tmp2 = event_to_string(dcb->evq.pending_events))); + free(tmp1); + free(tmp2); + dcb = dcb->evq.next; + } while (dcb != eventq); + spinlock_release(&pollqlock); +} + + +/** + * Print the event queue statistics + * + * @param pdcb The DCB to print the event queue to + */ +void +dShowEventStats(DCB *pdcb) +{ +int i; + + dcb_printf(pdcb, "\nEvent statistics.\n"); + dcb_printf(pdcb, "Maximum queue time: %3d00ms\n", queueStats.maxqtime); + dcb_printf(pdcb, "Maximum execution time: %3d00ms\n", queueStats.maxexectime); + dcb_printf(pdcb, "Maximum event queue length: %3d\n", pollStats.evq_max); + dcb_printf(pdcb, "Current event queue length: %3d\n", pollStats.evq_length); + dcb_printf(pdcb, "\n"); + dcb_printf(pdcb, " | Number of events\n"); + dcb_printf(pdcb, "Duration | Queued | Executed\n"); + dcb_printf(pdcb, "---------------+------------+-----------\n"); + dcb_printf(pdcb, " < 100ms | %-10d | %-10d\n", + queueStats.qtimes[0], queueStats.exectimes[0]); + for (i = 1; i < N_QUEUE_TIMES; i++) + { + dcb_printf(pdcb, " %2d00 - %2d00ms | %-10d | %-10d\n", i, i + 1, + queueStats.qtimes[i], queueStats.exectimes[i]); + } + dcb_printf(pdcb, " > %2d00ms | %-10d | %-10d\n", N_QUEUE_TIMES, + queueStats.qtimes[N_QUEUE_TIMES], queueStats.exectimes[N_QUEUE_TIMES]); +} diff --git a/server/core/secrets.c b/server/core/secrets.c index f9ac62aef..32fe59467 100644 --- a/server/core/secrets.c +++ b/server/core/secrets.c @@ -1,5 +1,5 @@ /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -13,7 +13,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 */ #include @@ -23,7 +23,10 @@ #include #include -extern int lm_enabled_logfiles_bitmask; +/** Defined in log_manager.cc */ +extern int lm_enabled_logfiles_bitmask; +extern size_t log_ses_count[]; +extern __thread log_info_t tls_log_info; /** * Generate a random printable character * @@ -65,6 +68,7 @@ MAXKEYS *keys; struct stat secret_stats; int fd; int len; +static int reported = 0; home = getenv("MAXSCALE_HOME"); @@ -74,16 +78,33 @@ int len; snprintf(secret_file, 255, "%s/etc/.secrets", home); /* Try to access secrets file */ - if (access(secret_file, R_OK) == -1) { + if (access(secret_file, R_OK) == -1) + { int eno = errno; errno = 0; - LOGIF(LE, (skygw_log_write_flush( - LOGFILE_ERROR, - "Error : access for secrets file " - "[%s] failed. Error %d, %s.", - secret_file, - eno, - strerror(eno)))); + if (eno == ENOENT) + { + if (!reported) + { + LOGIF(LM, (skygw_log_write( + LOGFILE_MESSAGE, + "Encrypted password file %s can't be accessed " + "(%s). Password encryption is not used.", + secret_file, + strerror(eno)))); + reported = 1; + } + } + else + { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : access for secrets file " + "[%s] failed. Error %d, %s.", + secret_file, + eno, + strerror(eno)))); + } return NULL; } @@ -206,8 +227,9 @@ int len; */ int secrets_writeKeys(char *secret_file) { -int fd; -MAXKEYS key; +int fd,randfd; +unsigned int randval; +MAXKEYS key; /* Open for writing | Create | Truncate the file for writing */ if ((fd = open(secret_file, O_CREAT | O_WRONLY | O_TRUNC, S_IRUSR)) < 0) @@ -222,7 +244,30 @@ MAXKEYS key; return 1; } - srand(time(NULL)); + /* Open for writing | Create | Truncate the file for writing */ + if ((randfd = open("/dev/random", O_RDONLY)) < 0) + { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : failed opening /dev/random. Error %d, %s.", + errno, + strerror(errno)))); + close(fd); + return 1; + } + + if(read(randfd,(void*)&randval,sizeof(unsigned int)) < 1) + { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : failed to read /dev/random."))); + close(fd); + close(randfd); + return 1; + } + + close(randfd); + srand(randval); secrets_random_str(key.enckey, MAXSCALE_KEYLEN); secrets_random_str(key.initvector, MAXSCALE_IV_LEN); @@ -236,6 +281,7 @@ MAXKEYS key; secret_file, errno, strerror(errno)))); + close(fd); return 1; } @@ -250,7 +296,17 @@ MAXKEYS key; errno, strerror(errno)))); } - chmod(secret_file, S_IRUSR); + + if( chmod(secret_file, S_IRUSR) < 0) + { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : failed to change the permissions of the" + "secret file [%s]. Error %d, %s.", + secret_file, + errno, + strerror(errno)))); + } return 0; } @@ -331,7 +387,7 @@ unsigned char encrypted[80]; return NULL; memset(padded_passwd, 0, 80); - strcpy((char *)padded_passwd, password); + strncpy((char *)padded_passwd, password, 79); padded_len = ((strlen(password) / AES_BLOCK_SIZE) + 1) * AES_BLOCK_SIZE; AES_set_encrypt_key(keys->enckey, 8 * MAXSCALE_KEYLEN, &aeskey); diff --git a/server/core/server.c b/server/core/server.c index 43c535d0e..9841b96ff 100644 --- a/server/core/server.c +++ b/server/core/server.c @@ -1,5 +1,5 @@ /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -13,7 +13,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 */ /** @@ -30,7 +30,8 @@ * 28/05/14 Massimiliano Pinto Addition of rlagd and node_ts fields * 20/06/14 Massimiliano Pinto Addition of master_id, depth, slaves fields * 26/06/14 Mark Riddoch Addition of server parameters - * 30/08/14 Massimiliano Pinto Addition of new service status description + * 30/08/14 Massimiliano Pinto Addition of new service status description + * 30/10/14 Massimiliano Pinto Addition of SERVER_MASTER_STICKINESS description * * @endverbatim */ @@ -44,7 +45,10 @@ #include #include -extern int lm_enabled_logfiles_bitmask; +/** Defined in log_manager.cc */ +extern int lm_enabled_logfiles_bitmask; +extern size_t log_ses_count[]; +extern __thread log_info_t tls_log_info; static SPINLOCK server_spin = SPINLOCK_INIT; static SERVER *allServers = NULL; @@ -64,25 +68,16 @@ server_alloc(char *servname, char *protocol, unsigned short port) { SERVER *server; - if ((server = (SERVER *)malloc(sizeof(SERVER))) == NULL) + if ((server = (SERVER *)calloc(1, sizeof(SERVER))) == NULL) return NULL; server->name = strdup(servname); server->protocol = strdup(protocol); server->port = port; - memset(&server->stats, 0, sizeof(SERVER_STATS)); server->status = SERVER_RUNNING; - server->nextdb = NULL; - server->monuser = NULL; - server->monpw = NULL; - server->unique_name = NULL; - server->server_string = NULL; server->node_id = -1; server->rlag = -2; - server->node_ts = 0; - server->parameters = NULL; server->master_id = -1; server->depth = -1; - server->slaves = NULL; spinlock_acquire(&server_spin); server->next = allServers; @@ -136,7 +131,7 @@ SERVER *ptr; /** * Set a unique name for the server * - * @param server The server to ste the name on + * @param server The server to set the name on * @param name The unique name for the server */ void @@ -162,7 +157,7 @@ SERVER *server; server = allServers; while (server) { - if (strcmp(server->unique_name, name) == 0) + if (server->unique_name && strcmp(server->unique_name, name) == 0) break; server = server->next; } @@ -340,8 +335,10 @@ SERVER_PARAM *param; } } if (server->node_ts > 0) { + struct tm result; + char buf[40]; dcb_printf(dcb, "\tLast Repl Heartbeat:\t%s", - asctime(localtime(&server->node_ts))); + asctime_r(localtime_r((time_t *)(&server->node_ts), &result), buf)); } if ((param = server->parameters) != NULL) { @@ -424,6 +421,8 @@ char *status = NULL; strcat(status, "Slave of External Server, "); if (server->status & SERVER_STALE_STATUS) strcat(status, "Stale Status, "); + if (server->status & SERVER_MASTER_STICKINESS) + strcat(status, "Master Stickiness, "); if (server->status & SERVER_AUTH_ERROR) strcat(status, "Auth Error, "); if (server->status & SERVER_RUNNING) @@ -443,6 +442,12 @@ void server_set_status(SERVER *server, int bit) { server->status |= bit; + + /** clear error logged flag before the next failure */ + if (SERVER_IS_MASTER(server)) + { + server->master_err_is_logged = false; + } } /** diff --git a/server/core/service.c b/server/core/service.c index a5e08f937..e0a378647 100644 --- a/server/core/service.c +++ b/server/core/service.c @@ -1,5 +1,5 @@ /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -13,7 +13,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 */ /** @@ -32,6 +32,7 @@ * 23/05/14 Mark Riddoch Addition of service validation call * 29/05/14 Mark Riddoch Filter API implementation * 09/09/14 Massimiliano Pinto Added service option for localhost authentication + * 13/10/14 Massimiliano Pinto Added hashtable for resources (i.e database names for MySQL services) * * @endverbatim */ @@ -54,7 +55,10 @@ #include #include -extern int lm_enabled_logfiles_bitmask; +/** Defined in log_manager.cc */ +extern int lm_enabled_logfiles_bitmask; +extern size_t log_ses_count[]; +extern __thread log_info_t tls_log_info; /** To be used with configuration type checks */ typedef struct typelib_st { @@ -83,7 +87,6 @@ static void service_add_qualified_param( SERVICE* svc, CONFIG_PARAMETER* param); - /** * Allocate a new service for the gateway to support * @@ -91,14 +94,14 @@ static void service_add_qualified_param( * @param servname The service name * @param router Name of the router module this service uses * - * @return The newly created service or NULL if an error occured + * @return The newly created service or NULL if an error occurred */ SERVICE * -service_alloc(char *servname, char *router) +service_alloc(const char *servname, const char *router) { SERVICE *service; - if ((service = (SERVICE *)malloc(sizeof(SERVICE))) == NULL) + if ((service = (SERVICE *)calloc(1, sizeof(SERVICE))) == NULL) return NULL; if ((service->router = load_module(router, MODULE_ROUTER)) == NULL) { @@ -121,25 +124,17 @@ SERVICE *service; } service->name = strdup(servname); service->routerModule = strdup(router); - service->version_string = NULL; - memset(&service->stats, 0, sizeof(SERVICE_STATS)); - service->ports = NULL; + if (service->name == NULL || service->routerModule == NULL) + { + if (service->name) + free(service->name); + free(service); + return NULL; + } service->stats.started = time(0); service->state = SERVICE_STATE_ALLOC; - service->credentials.name = NULL; - service->credentials.authdata = NULL; - service->enable_root = 0; - service->localhost_match_wildcard_host = 0; - service->routerOptions = NULL; - service->databases = NULL; - service->svc_config_param = NULL; - service->svc_config_version = 0; - service->filters = NULL; - service->n_filters = 0; - service->weightby = 0; spinlock_init(&service->spin); spinlock_init(&service->users_table_spin); - memset(&service->rate_limit, 0, sizeof(SERVICE_REFRESH_RATE)); spinlock_acquire(&service_spin); service->next = allServices; @@ -152,7 +147,7 @@ SERVICE *service; /** * Check to see if a service pointer is valid * - * @param service The poitner to check + * @param service The pointer to check * @return 1 if the service is in the list of all services */ int @@ -194,32 +189,61 @@ GWPROTOCOL *funcs; if (port->listener == NULL) { - return 0; + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : Failed to create listener for service %s.", + service->name))); + goto retblock; } + if (strcmp(port->protocol, "MySQLClient") == 0) { int loaded; - /* Allocate specific data for MySQL users */ - service->users = mysql_users_alloc(); - loaded = load_mysql_users(service); - /* At service start last update is set to USERS_REFRESH_TIME seconds earlier. - * This way MaxScale could try reloading users' just after startup - */ - service->rate_limit.last=time(NULL) - USERS_REFRESH_TIME; - service->rate_limit.nloads=1; + if (service->users == NULL) { + /* + * Allocate specific data for MySQL users + * including hosts and db names + */ + service->users = mysql_users_alloc(); + + if ((loaded = load_mysql_users(service)) < 0) + { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : Unable to load users from %s:%d for " + "service %s.", + (port->address == NULL ? "0.0.0.0" : port->address), + port->port, + service->name))); + } + /* At service start last update is set to USERS_REFRESH_TIME seconds earlier. + * This way MaxScale could try reloading users' just after startup + */ + service->rate_limit.last=time(NULL) - USERS_REFRESH_TIME; + service->rate_limit.nloads=1; - LOGIF(LM, (skygw_log_write( - LOGFILE_MESSAGE, - "Loaded %d MySQL Users.", - loaded))); - } else { - /* Generic users table */ - service->users = users_alloc(); + LOGIF(LM, (skygw_log_write( + LOGFILE_MESSAGE, + "Loaded %d MySQL Users for service [%s].", + loaded, service->name))); + } + } + else + { + if (service->users == NULL) { + /* Generic users table */ + service->users = users_alloc(); + } } - if ((funcs = - (GWPROTOCOL *)load_module(port->protocol, MODULE_PROTOCOL)) == NULL) + if ((funcs=(GWPROTOCOL *)load_module(port->protocol, MODULE_PROTOCOL)) + == NULL) { + if (service->users->data) + { + hashtable_free(service->users->data); + } + free(service->users); dcb_free(port->listener); port->listener = NULL; LOGIF(LE, (skygw_log_write_flush( @@ -228,34 +252,60 @@ GWPROTOCOL *funcs; "for service %s not started.", port->protocol, service->name))); - return 0; + goto retblock; } memcpy(&(port->listener->func), funcs, sizeof(GWPROTOCOL)); port->listener->session = NULL; + if (port->address) sprintf(config_bind, "%s:%d", port->address, port->port); else sprintf(config_bind, "0.0.0.0:%d", port->port); - if (port->listener->func.listen(port->listener, config_bind)) { + if (port->listener->func.listen(port->listener, config_bind)) + { port->listener->session = session_alloc(service, port->listener); - if (port->listener->session != NULL) { + if (port->listener->session != NULL) + { port->listener->session->state = SESSION_STATE_LISTENER; listeners += 1; - } else { + } + else + { + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : Failed to create session to service %s.", + service->name))); + + if (service->users->data) + { + hashtable_free(service->users->data); + } + free(service->users); dcb_close(port->listener); + port->listener = NULL; + goto retblock; } - } else { - dcb_close(port->listener); - + } + else + { LOGIF(LE, (skygw_log_write_flush( LOGFILE_ERROR, "Error : Unable to start to listen port %d for %s %s.", port->port, port->protocol, service->name))); + if (service->users->data) + { + hashtable_free(service->users->data); + } + free(service->users); + dcb_close(port->listener); + port->listener = NULL; } + +retblock: return listeners; } @@ -276,17 +326,27 @@ serviceStart(SERVICE *service) SERV_PROTOCOL *port; int listeners = 0; - service->router_instance = service->router->createInstance(service, - service->routerOptions); + if ((service->router_instance = service->router->createInstance(service, + service->routerOptions)) == NULL) + { + LOGIF(LE, (skygw_log_write_flush(LOGFILE_ERROR, + "%s: Failed to create router instance for service. Service not started.", + service->name))); + service->state = SERVICE_STATE_FAILED; + return 0; + } port = service->ports; - while (port) + while (!service->svc_do_shutdown && port) { listeners += serviceStartPort(service, port); port = port->next; } if (listeners) + { + service->state = SERVICE_STATE_STARTED; service->stats.started = time(0); + } return listeners; } @@ -322,12 +382,21 @@ int serviceStartAll() { SERVICE *ptr; -int n = 0; +int n = 0,i; ptr = allServices; - while (ptr) + while (ptr && !ptr->svc_do_shutdown) { - n += serviceStart(ptr); + n += (i = serviceStart(ptr)); + + if(i == 0) + { + LOGIF(LE, (skygw_log_write( + LOGFILE_ERROR, + "Error : Failed to start service '%s'.", + ptr->name))); + } + ptr = ptr->next; } return n; @@ -356,6 +425,7 @@ int listeners = 0; port = port->next; } + service->state = SERVICE_STATE_STOPPED; return listeners; } @@ -398,7 +468,7 @@ int service_free(SERVICE *service) { SERVICE *ptr; - +SERVER_REF *srv; if (service->stats.n_current) return 0; /* First of all remove from the linked list */ @@ -420,6 +490,13 @@ SERVICE *ptr; spinlock_release(&service_spin); /* Clean up session and free the memory */ + + while(service->dbref){ + srv = service->dbref; + service->dbref = service->dbref->next; + free(srv); + } + free(service->name); free(service->routerModule); if (service->credentials.name) @@ -498,8 +575,13 @@ void serviceAddBackend(SERVICE *service, SERVER *server) { spinlock_acquire(&service->spin); - server->nextdb = service->databases; - service->databases = server; + SERVER_REF *sref; + if((sref = calloc(1,sizeof(SERVER_REF))) != NULL) + { + sref->next = service->dbref; + sref->server = server; + service->dbref = sref; + } spinlock_release(&service->spin); } @@ -513,12 +595,12 @@ serviceAddBackend(SERVICE *service, SERVER *server) int serviceHasBackend(SERVICE *service, SERVER *server) { -SERVER *ptr; +SERVER_REF *ptr; spinlock_acquire(&service->spin); - ptr = service->databases; - while (ptr && ptr != server) - ptr = ptr->nextdb; + ptr = service->dbref; + while (ptr && ptr->server != server) + ptr = ptr->next; spinlock_release(&service->spin); return ptr != NULL; @@ -676,7 +758,7 @@ int n = 0; if ((flist = (FILTER_DEF **)malloc(sizeof(FILTER_DEF *))) == NULL) { LOGIF(LE, (skygw_log_write_flush(LOGFILE_ERROR, - "Out of memory adding filters to service.\n"))); + "Error : Out of memory adding filters to service.\n"))); return; } ptr = strtok_r(filters, "|", &brkt); @@ -687,14 +769,14 @@ int n = 0; (n + 1) * sizeof(FILTER_DEF *))) == NULL) { LOGIF(LE, (skygw_log_write_flush(LOGFILE_ERROR, - "Out of memory adding filters to service.\n"))); + "Error : Out of memory adding filters to service.\n"))); return; } if ((flist[n-1] = filter_find(trim(ptr))) == NULL) { LOGIF(LE, (skygw_log_write_flush( LOGFILE_ERROR, - "Unable to find filter '%s' for service '%s'\n", + "Warning : Unable to find filter '%s' for service '%s'\n", trim(ptr), service->name ))); n--; @@ -736,18 +818,21 @@ SERVICE *service; void printService(SERVICE *service) { -SERVER *ptr = service->databases; -int i; +SERVER_REF *ptr = service->dbref; +struct tm result; +char time_buf[30]; +int i; printf("Service %p\n", service); printf("\tService: %s\n", service->name); printf("\tRouter: %s (%p)\n", service->routerModule, service->router); - printf("\tStarted: %s", asctime(localtime(&service->stats.started))); + printf("\tStarted: %s", + asctime_r(localtime_r(&service->stats.started, &result), time_buf)); printf("\tBackend databases\n"); while (ptr) { - printf("\t\t%s:%d Protocol: %s\n", ptr->name, ptr->port, ptr->protocol); - ptr = ptr->nextdb; + printf("\t\t%s:%d Protocol: %s\n", ptr->server->name, ptr->server->port, ptr->server->protocol); + ptr = ptr->next; } if (service->n_filters) { @@ -814,18 +899,35 @@ SERVICE *ptr; */ void dprintService(DCB *dcb, SERVICE *service) { -SERVER *server = service->databases; -int i; +SERVER_REF *server = service->dbref; +struct tm result; +char timebuf[30]; +int i; dcb_printf(dcb, "Service %p\n", service); dcb_printf(dcb, "\tService: %s\n", service->name); dcb_printf(dcb, "\tRouter: %s (%p)\n", service->routerModule, service->router); - if (service->router) + switch (service->state) + { + case SERVICE_STATE_STARTED: + dcb_printf(dcb, "\tState: Started\n"); + break; + case SERVICE_STATE_STOPPED: + dcb_printf(dcb, "\tState: Stopped\n"); + break; + case SERVICE_STATE_FAILED: + dcb_printf(dcb, "\tState: Failed\n"); + break; + case SERVICE_STATE_ALLOC: + dcb_printf(dcb, "\tState: Allocated\n"); + break; + } + if (service->router && service->router_instance) service->router->diagnostics(service->router_instance, dcb); dcb_printf(dcb, "\tStarted: %s", - asctime(localtime(&service->stats.started))); + asctime_r(localtime_r(&service->stats.started, &result), timebuf)); dcb_printf(dcb, "\tRoot user access: %s\n", service->enable_root ? "Enabled" : "Disabled"); if (service->n_filters) @@ -841,9 +943,9 @@ int i; dcb_printf(dcb, "\tBackend databases\n"); while (server) { - dcb_printf(dcb, "\t\t%s:%d Protocol: %s\n", server->name, server->port, - server->protocol); - server = server->nextdb; + dcb_printf(dcb, "\t\t%s:%d Protocol: %s\n", server->server->name, server->server->port, + server->server->protocol); + server = server->next; } if (service->weightby) dcb_printf(dcb, "\tRouting weight parameter: %s\n", @@ -996,8 +1098,8 @@ int service_refresh_users(SERVICE *service) { if ( (time(NULL) < (service->rate_limit.last + USERS_REFRESH_TIME)) || (service->rate_limit.nloads > USERS_REFRESH_MAX_PER_TIME)) { LOGIF(LE, (skygw_log_write_flush( LOGFILE_ERROR, - "%lu [service_refresh_users] refresh rate limit exceeded loading new users' table", - pthread_self()))); + "Refresh rate limit exceeded for load of users' table for service '%s'.", + service->name))); spinlock_release(&service->users_table_spin); return 1; @@ -1159,19 +1261,18 @@ bool service_set_param_value ( /* * Function to find a string in typelib_t * (similar to find_type() of mysys/typelib.c) - * - * SYNOPSIS - * find_type() - * lib typelib_t - * find String to find - * length Length of string to find - * part_match Allow part matching of value - * - * RETURN - * 0 error - * > 0 position in TYPELIB->type_names +1 + * + * SYNOPSIS + * find_type() + * lib typelib_t + * find String to find + * length Length of string to find + * part_match Allow part matching of value + * + * RETURN + * 0 error + * > 0 position in TYPELIB->type_names +1 */ - static int find_type( typelib_t* tl, const char* needle, @@ -1310,3 +1411,16 @@ serviceEnableLocalhostMatchWildcardHost(SERVICE *service, int action) return 1; } + +void service_shutdown() +{ + SERVICE* svc; + spinlock_acquire(&service_spin); + svc = allServices; + while (svc != NULL) + { + svc->svc_do_shutdown = true; + svc = svc->next; + } + spinlock_release(&service_spin); +} \ No newline at end of file diff --git a/server/core/session.c b/server/core/session.c index eb9d42a2b..fb1dda79f 100644 --- a/server/core/session.c +++ b/server/core/session.c @@ -1,5 +1,5 @@ /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -13,7 +13,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 */ /** @@ -43,7 +43,13 @@ #include #include -extern int lm_enabled_logfiles_bitmask; +/** Defined in log_manager.cc */ +extern int lm_enabled_logfiles_bitmask; +extern size_t log_ses_count[]; +extern __thread log_info_t tls_log_info; + +/** Global session id; updated safely by holding session_spin */ +static size_t session_id; static SPINLOCK session_spin = SPINLOCK_INIT; static SESSION *allSessions = NULL; @@ -71,21 +77,29 @@ session_alloc(SERVICE *service, DCB *client_dcb) ss_info_dassert(session != NULL, "Allocating memory for session failed."); - if (session == NULL) { - int eno = errno; - errno = 0; + if (session == NULL) + { LOGIF(LE, (skygw_log_write_flush( LOGFILE_ERROR, "Error : Failed to allocate memory for " "session object due error %d, %s.", - eno, - strerror(eno)))); + errno, + strerror(errno)))); + if (client_dcb->data && !DCB_IS_CLONE(client_dcb)) + { + free(client_dcb->data); + client_dcb->data = NULL; + } goto return_session; } #if defined(SS_DEBUG) session->ses_chk_top = CHK_NUM_SESSION; session->ses_chk_tail = CHK_NUM_SESSION; #endif + if (DCB_IS_CLONE(client_dcb)) + { + session->ses_is_child = true; + } spinlock_init(&session->ses_lock); /*< * Prevent backend threads from accessing before session is completely @@ -133,8 +147,9 @@ session_alloc(SERVICE *service, DCB *client_dcb) session->router_session = service->router->newSession(service->router_instance, session); - - if (session->router_session == NULL) { + + if (session->router_session == NULL) + { /** * Inform other threads that session is closing. */ @@ -143,6 +158,7 @@ session_alloc(SERVICE *service, DCB *client_dcb) * Decrease refcount, set dcb's session pointer NULL * and set session pointer to NULL. */ + session->client = NULL; session_free(session); client_dcb->session = NULL; session = NULL; @@ -153,7 +169,6 @@ session_alloc(SERVICE *service, DCB *client_dcb) goto return_session; } - /* * Pending filter chain being setup set the head of the chain to * be the router. As filters are inserted the current head will @@ -184,23 +199,27 @@ session_alloc(SERVICE *service, DCB *client_dcb) * Decrease refcount, set dcb's session pointer NULL * and set session pointer to NULL. */ + session->client = NULL; session_free(session); client_dcb->session = NULL; session = NULL; - LOGIF(LE, (skygw_log_write_flush( + LOGIF(LE, (skygw_log_write( LOGFILE_ERROR, - "Error : Failed to create %s session.", + "Error : Setting up filters failed. " + "Terminating session %s.", service->name))); goto return_session; } } } - spinlock_acquire(&session_spin); - + spinlock_acquire(&session->ses_lock); + if (session->state != SESSION_STATE_READY) { - session_free(session); + spinlock_release(&session->ses_lock); + session->client = NULL; + session_free(session); client_dcb->session = NULL; session = NULL; LOGIF(LE, (skygw_log_write_flush( @@ -212,10 +231,33 @@ session_alloc(SERVICE *service, DCB *client_dcb) else { session->state = SESSION_STATE_ROUTER_READY; - session->next = allSessions; + spinlock_release(&session->ses_lock); + spinlock_acquire(&session_spin); + /** Assign a session id and increase */ + session->ses_id = ++session_id; + session->next = allSessions; allSessions = session; spinlock_release(&session_spin); - atomic_add(&service->stats.n_sessions, 1); + + if (session->client->user == NULL) + { + LOGIF(LT, (skygw_log_write( + LOGFILE_TRACE, + "Started session [%lu] for %s service ", + session->ses_id, + service->name))); + } + else + { + LOGIF(LT, (skygw_log_write( + LOGFILE_TRACE, + "Started %s client session [%lu] for '%s' from %s", + service->name, + session->ses_id, + session->client->user, + session->client->remote))); + } + atomic_add(&service->stats.n_sessions, 1); atomic_add(&service->stats.n_current, 1); CHK_SESSION(session); } @@ -223,6 +265,41 @@ return_session: return session; } +/** + * Enable specified logging for the current session and increase logger + * counter. + * Generic logging setting has precedence over session-specific setting. + * + * @param ses session + * @param id logfile identifier + */ +void session_enable_log( + SESSION* ses, + logfile_id_t id) +{ + ses->ses_enabled_logs |= id; + atomic_add((int *)&log_ses_count[id], 1); +} + +/** + * Disable specified logging for the current session and decrease logger + * counter. + * Generic logging setting has precedence over session-specific setting. + * + * @param ses session + * @param id logfile identifier + */ +void session_disable_log( + SESSION* ses, + logfile_id_t id) +{ + if (ses->ses_enabled_logs & id) + { + ses->ses_enabled_logs &= ~id; + atomic_add((int *)&log_ses_count[id], -1); + } +} + /** * Link a session to a DCB. * @@ -266,12 +343,16 @@ int session_unlink_dcb( if (nlink == 0) { - session->state = SESSION_STATE_FREE; + session->state = SESSION_STATE_TO_BE_FREED; } if (dcb != NULL) { - dcb->session = NULL; + if (session->client == dcb) + { + session->client = NULL; + } + dcb->session = NULL; } spinlock_release(&session->ses_lock); @@ -292,7 +373,6 @@ bool session_free( int i; CHK_SESSION(session); - /*< * Remove one reference. If there are no references left, * free session. @@ -323,8 +403,12 @@ bool session_free( spinlock_release(&session_spin); atomic_add(&session->service->stats.n_current, -1); - /* Free router_session and session */ - if (session->router_session) { + /** + * If session is not child of some other session, free router_session. + * Otherwise let the parent free it. + */ + if (!session->ses_is_child && session->router_session) + { session->service->router->freeSession( session->service->router_instance, session->router_session); @@ -347,7 +431,27 @@ bool session_free( } free(session->filters); } - free(session); + + LOGIF(LT, (skygw_log_write( + LOGFILE_TRACE, + "Stopped %s client session [%lu]", + session->service->name, + session->ses_id))); + + /** Disable trace and decrease trace logger counter */ + session_disable_log(session, LT); + + /** If session doesn't have parent referencing to it, it can be freed */ + if (!session->ses_is_child) + { + session->state = SESSION_STATE_FREE; + + if (session->data) + { + free(session->data); + } + free(session); + } succp = true; return_succp : @@ -390,11 +494,15 @@ int rval = 0; void printSession(SESSION *session) { +struct tm result; +char timebuf[40]; + printf("Session %p\n", session); printf("\tState: %s\n", session_state(session->state)); printf("\tService: %s (%p)\n", session->service->name, session->service); printf("\tClient DCB: %p\n", session->client); - printf("\tConnected: %s", asctime(localtime(&session->stats.connect))); + printf("\tConnected: %s", + asctime_r(localtime_r(&session->stats.connect, &result), timebuf)); } /** @@ -491,19 +599,22 @@ int norouter = 0; void dprintAllSessions(DCB *dcb) { -SESSION *ptr; +struct tm result; +char timebuf[40]; +SESSION *ptr; spinlock_acquire(&session_spin); ptr = allSessions; while (ptr) { - dcb_printf(dcb, "Session %p\n", ptr); + dcb_printf(dcb, "Session %d (%p)\n",ptr->ses_id, ptr); dcb_printf(dcb, "\tState: %s\n", session_state(ptr->state)); dcb_printf(dcb, "\tService: %s (%p)\n", ptr->service->name, ptr->service); dcb_printf(dcb, "\tClient DCB: %p\n", ptr->client); if (ptr->client && ptr->client->remote) dcb_printf(dcb, "\tClient Address: %s\n", ptr->client->remote); - dcb_printf(dcb, "\tConnected: %s", asctime(localtime(&ptr->stats.connect))); + dcb_printf(dcb, "\tConnected: %s", + asctime_r(localtime_r(&ptr->stats.connect, &result), timebuf)); ptr = ptr->next; } spinlock_release(&session_spin); @@ -521,15 +632,18 @@ SESSION *ptr; void dprintSession(DCB *dcb, SESSION *ptr) { -int i; +struct tm result; +char buf[30]; +int i; - dcb_printf(dcb, "Session %p\n", ptr); + dcb_printf(dcb, "Session %d (%p)\n",ptr->ses_id, ptr); dcb_printf(dcb, "\tState: %s\n", session_state(ptr->state)); dcb_printf(dcb, "\tService: %s (%p)\n", ptr->service->name, ptr->service); dcb_printf(dcb, "\tClient DCB: %p\n", ptr->client); if (ptr->client && ptr->client->remote) dcb_printf(dcb, "\tClient Address: %s\n", ptr->client->remote); - dcb_printf(dcb, "\tConnected: %s", asctime(localtime(&ptr->stats.connect))); + dcb_printf(dcb, "\tConnected: %s", + asctime_r(localtime_r(&ptr->stats.connect, &result), buf)); if (ptr->n_filters) { for (i = 0; i < ptr->n_filters; i++) @@ -602,6 +716,15 @@ session_state(int state) return "Listener Session"; case SESSION_STATE_LISTENER_STOPPED: return "Stopped Listener Session"; +#ifdef SS_DEBUG + case SESSION_STATE_STOPPING: + return "Stopping session"; + case SESSION_STATE_TO_BE_FREED: + return "Session to be freed"; + case SESSION_STATE_FREE: + return "Freed session"; + +#endif default: return "Invalid State"; } @@ -668,9 +791,10 @@ int i; { LOGIF(LE, (skygw_log_write_flush( LOGFILE_ERROR, - "Failed to create filter '%s' for service '%s'.\n", - service->filters[i]->name, - service->name))); + "Error : Failed to create filter '%s' for " + "service '%s'.\n", + service->filters[i]->name, + service->name))); return 0; } session->filters[i].filter = service->filters[i]; @@ -777,3 +901,11 @@ session_getUser(SESSION *session) { return (session && session->client) ? session->client->user : NULL; } +/** + * Return the pointer to the list of all sessions. + * @return Pointer to the list of all sessions. + */ +SESSION *get_all_sessions() +{ + return allSessions; +} diff --git a/server/core/spinlock.c b/server/core/spinlock.c index ce64042e3..871ec42bb 100644 --- a/server/core/spinlock.c +++ b/server/core/spinlock.c @@ -1,5 +1,5 @@ /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -13,11 +13,11 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 */ /** - * @file spinlock.c - Spinlock operations for the SkySQL Gateway + * @file spinlock.c - Spinlock operations for the MariaDB Corporation MaxScale * * @verbatim * Revision History @@ -30,6 +30,7 @@ #include #include +#include /** * Initialise a spinlock. @@ -39,13 +40,13 @@ void spinlock_init(SPINLOCK *lock) { - lock->lock = 0; + lock->lock = 0; #if SPINLOCK_PROFILE - lock->spins = 0; - lock->acquired = 0; - lock->waiting = 0; - lock->max_waiting = 0; - lock->contended = 0; + lock->spins = 0; + lock->acquired = 0; + lock->waiting = 0; + lock->max_waiting = 0; + lock->contended = 0; #endif } @@ -62,24 +63,30 @@ int spins = 0; atomic_add(&(lock->waiting), 1); #endif - while (atomic_add(&(lock->lock), 1) != 0) - { - atomic_add(&(lock->lock), -1); + +#ifdef __GNUC__ + while (__sync_lock_test_and_set(&(lock->lock), 1)) + while (lock->lock) { +#else + while (atomic_add(&(lock->lock), 1) != 0) + { + atomic_add(&(lock->lock), -1); +#endif #if SPINLOCK_PROFILE atomic_add(&(lock->spins), 1); spins++; #endif } #if SPINLOCK_PROFILE - if (spins) - { - lock->contended++; - if (lock->maxspins < spins) - lock->maxspins = spins; - } - lock->acquired++; - lock->owner = THREAD_SHELF(); - atomic_add(&(lock->waiting), -1); + if (spins) + { + lock->contended++; + if (lock->maxspins < spins) + lock->maxspins = spins; + } + lock->acquired++; + lock->owner = THREAD_SHELF(); + atomic_add(&(lock->waiting), -1); #endif } @@ -92,16 +99,20 @@ int spins = 0; int spinlock_acquire_nowait(SPINLOCK *lock) { - if (atomic_add(&(lock->lock), 1) != 0) - { - atomic_add(&(lock->lock), -1); - return FALSE; - } -#if SPINLOCK_PROFILE - lock->acquired++; - lock->owner = THREAD_SHELF(); +#ifdef __GNUC__ + if (__sync_lock_test_and_set(&(lock->lock), 1)) return FALSE; +#else + if (atomic_add(&(lock->lock), 1) != 0) + { + atomic_add(&(lock->lock), -1); + return FALSE; + } #endif - return TRUE; +#if SPINLOCK_PROFILE + lock->acquired++; + lock->owner = THREAD_SHELF(); +#endif + return TRUE; } /* @@ -112,11 +123,16 @@ spinlock_acquire_nowait(SPINLOCK *lock) void spinlock_release(SPINLOCK *lock) { -#if SPINLOCK_PROFILE - if (lock->waiting > lock->max_waiting) - lock->max_waiting = lock->waiting; + #if SPINLOCK_PROFILE + if (lock->waiting > lock->max_waiting) + lock->max_waiting = lock->waiting; +#endif +#ifdef __GNUC__ + __sync_synchronize(); /* Memory barrier. */ + lock->lock = 0; +#else + atomic_add(&(lock->lock), -1); #endif - atomic_add(&(lock->lock), -1); } /** diff --git a/server/core/test/CMakeLists.txt b/server/core/test/CMakeLists.txt index 914fe277e..2a0977088 100644 --- a/server/core/test/CMakeLists.txt +++ b/server/core/test/CMakeLists.txt @@ -1,13 +1,56 @@ +add_executable(test_mysql_users test_mysql_users.c) add_executable(test_hash testhash.c) +add_executable(test_hint testhint.c) add_executable(test_spinlock testspinlock.c) add_executable(test_filter testfilter.c) +add_executable(test_buffer testbuffer.c) +add_executable(test_dcb testdcb.c) +add_executable(test_modutil testmodutil.c) +add_executable(test_poll testpoll.c) +add_executable(test_service testservice.c) +add_executable(test_server testserver.c) +add_executable(test_users testusers.c) add_executable(test_adminusers testadminusers.c) +add_executable(testmemlog testmemlog.c) +target_link_libraries(test_mysql_users MySQLClient fullcore) target_link_libraries(test_hash fullcore) +target_link_libraries(test_hint fullcore) target_link_libraries(test_spinlock fullcore) target_link_libraries(test_filter fullcore) +target_link_libraries(test_buffer fullcore) +target_link_libraries(test_dcb fullcore) +target_link_libraries(test_modutil fullcore) +target_link_libraries(test_poll fullcore) +target_link_libraries(test_service fullcore) +target_link_libraries(test_server fullcore) +target_link_libraries(test_users fullcore) target_link_libraries(test_adminusers fullcore) +target_link_libraries(testmemlog fullcore) +add_test(testMySQLUsers test_mysql_users) add_test(TestHash test_hash) +add_test(TestHint test_hint) add_test(TestSpinlock test_spinlock) add_test(TestFilter test_filter) +add_test(TestBuffer test_buffer) +add_test(TestDCB test_dcb) +add_test(TestModutil test_modutil) +add_test(TestPoll test_poll) +add_test(TestService test_service) +add_test(TestServer test_server) +add_test(TestUsers test_users) add_test(TestAdminUsers test_adminusers) - +add_test(TestMemlog testmemlog) +set_tests_properties(testMySQLUsers + TestHash + TestHint + TestSpinlock + TestFilter + TestBuffer + TestDCB + TestModutil + TestPoll + TestService + TestServer + TestUsers + TestAdminUsers + TestMemlog PROPERTIES ENVIRONMENT MAXSCALE_HOME=${CMAKE_BINARY_DIR}/) diff --git a/server/core/test/makefile b/server/core/test/makefile index 14f2828f2..e3d7a4ce5 100644 --- a/server/core/test/makefile +++ b/server/core/test/makefile @@ -18,10 +18,11 @@ LDFLAGS=-rdynamic -L$(LOGPATH) -L$(EMBEDDED_LIB) \ -Wl,-rpath,$(LOGPATH) -Wl,-rpath,$(UTILSPATH) \ -Wl,-rpath,$(EMBEDDED_LIB) -LIBS= -lz -lm -lcrypt -lcrypto -ldl -laio -lrt -pthread -llog_manager \ - -L../../inih/extra -linih -lssl -lstdc++ -lmysqld +LIBS= -L$(EMBEDDED_LIB) -lmysqld \ + -lz -lm -lcrypt -lcrypto -ldl -laio -lrt -pthread -llog_manager \ + -L../../inih/extra -linih -lssl -lstdc++ -TESTS=testhash testspinlock testfilter testadminusers +TESTS=testhash testspinlock testbuffer testmodutil testpoll testservice testdcb testfilter testadminusers testmemlog cleantests: - $(DEL) *.o @@ -41,12 +42,52 @@ testhash: testhash.c -I$(ROOT_PATH)/utils \ testhash.c ../hashtable.o ../atomic.o ../spinlock.o -o testhash +testmysqlusers: test_mysql_users.c + $(CC) $(CFLAGS) \ + -I$(ROOT_PATH)/server/include \ + -I$(ROOT_PATH)/utils \ + test_mysql_users.c ../hashtable.o ../atomic.o ../modutil.o ../spinlock.o -o testmysqlusers + + testspinlock: testspinlock.c $(CC) $(CFLAGS) \ -I$(ROOT_PATH)/server/include \ -I$(ROOT_PATH)/utils \ testspinlock.c ../spinlock.o ../atomic.o ../thread.o -o testspinlock +testmodutil: testmodutil.c + $(CC) $(CFLAGS) \ + -I$(ROOT_PATH)/server/include \ + -I$(ROOT_PATH)/utils \ + testmodutil.c ../modutil.o ../buffer.o ../atomic.o -o testmodutil + +testbuffer: testbuffer.c + $(CC) $(CFLAGS) \ + -I$(ROOT_PATH)/server/include \ + -I$(ROOT_PATH)/utils \ + testbuffer.c ../buffer.o ../atomic.o -o testbuffer + +testpoll: testpoll.c + $(CC) $(CFLAGS) $(LDFLAGS) \ + -I$(ROOT_PATH)/server/include \ + -I$(ROOT_PATH)/utils \ + -I$(ROOT_PATH)/log_manager \ + testpoll.c libcore.a $(UTILSPATH)/skygw_utils.o $(LIBS) -o testpoll + +testservice: testservice.c + $(CC) $(CFLAGS) $(LDFLAGS) \ + -I$(ROOT_PATH)/server/include \ + -I$(ROOT_PATH)/utils \ + -I$(ROOT_PATH)/log_manager \ + testservice.c libcore.a $(UTILSPATH)/skygw_utils.o $(LIBS) -o testservice + +testdcb: testdcb.c + $(CC) $(CFLAGS) $(LDFLAGS) \ + -I$(ROOT_PATH)/server/include \ + -I$(ROOT_PATH)/utils \ + -I$(ROOT_PATH)/log_manager \ + testdcb.c libcore.a $(UTILSPATH)/skygw_utils.o $(LIBS) -o testdcb + testfilter: testfilter.c libcore.a $(CC) $(CFLAGS) $(LDFLAGS) \ -I$(ROOT_PATH)/server/include \ @@ -59,6 +100,13 @@ testadminusers: testadminusers.c libcore.a -I$(ROOT_PATH)/utils \ testadminusers.c libcore.a $(UTILSPATH)/skygw_utils.o $(LIBS) -o testadminusers +testmemlog: testmemlog.c libcore.a + $(CC) $(CFLAGS) $(LDFLAGS) \ + -I$(ROOT_PATH)/server/include \ + -I$(ROOT_PATH)/utils \ + testmemlog.c libcore.a $(UTILSPATH)/skygw_utils.o $(LIBS) -o testmemlog + + libcore.a: ../*.o ar rv libcore.a ../*.o diff --git a/server/core/test/makefile.mysql_users b/server/core/test/makefile.mysql_users deleted file mode 100644 index 074188555..000000000 --- a/server/core/test/makefile.mysql_users +++ /dev/null @@ -1,32 +0,0 @@ -# cleantests - clean local and subdirectories' tests -# buildtests - build all local and subdirectories' tests -# runtests - run all local tests -# testall - clean, build and run local and subdirectories' tests - -include ../../../build_gateway.inc -include ../../../makefile.inc - -CC=cc -DEBUG=Y -cleantests: - - $(DEL) *.o - - $(DEL) test_mysql_users - - $(DEL) *~ - -testall: cleantests buildtests runtests - -buildtests : - $(CC) $(CFLAGS) \ - -I$(ROOT_PATH)/server/include \ - -I$(ROOT_PATH)/utils \ - -I$(ROOT_PATH)/log_manager \ - test_mysql_users.c ../secrets.o ../service.o ../gwbitmask.o ../load_utils.o ../session.o ../poll.o ../dcb.o ../utils.o ../buffer.o ../gw_utils.o ../hashtable.o ../atomic.o ../spinlock.o ../users.o ../dbusers.o ../../../utils/skygw_utils.o ../../../log_manager/log_manager.o -o test_mysql_users -L$(EMBEDDED_LIB) -lmysqlclient -lpthread -lssl -lz -lm -lcrypt -lcrypto -ldl -laio -lrt -lstdc++ -runtests: - @echo "" - @echo "-------------------------------" - @echo $(shell date) - @echo "Test MaxScale core" - @echo "-------------------------------" - @echo "" - @echo "MaxSclale Load MySQL users" - @./test_mysql_users diff --git a/server/core/test/test_mysql_users.c b/server/core/test/test_mysql_users.c index 97476b3ae..ccf52dbf3 100644 --- a/server/core/test/test_mysql_users.c +++ b/server/core/test/test_mysql_users.c @@ -1,5 +1,5 @@ /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -13,7 +13,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 */ /** @@ -24,6 +24,7 @@ * Date Who Description * 14/02/2014 Massimiliano Pinto Initial implementation * 17/02/2014 Massimiliano Pinto Added check ipv4 + * 03/10/2014 Massimiliano Pinto Added check for wildcard hosts * * @endverbatim */ @@ -39,6 +40,7 @@ #include #include #include +#include #include @@ -51,9 +53,24 @@ int set_and_get_single_mysql_users_ipv4(char *username, unsigned long ipv4, char USERS *mysql_users; char ret_ip[200]=""; char *fetch_data; + char *db=""; + DCB *dcb; + SERVICE *service; unsigned long fix_ipv4; + dcb = dcb_alloc(DCB_ROLE_INTERNAL); + + if (dcb == NULL) { + fprintf(stderr, "dcb_alloc() failed\n"); + return 1; + } + if ((service = (SERVICE *)calloc(1, sizeof(SERVICE))) == NULL) { + fprintf(stderr, "service_alloc() failed\n"); + dcb_free(dcb); + return 1; + } + if (ipv4 > UINT_MAX) { fix_ipv4 = UINT_MAX; } else { @@ -69,6 +86,7 @@ int set_and_get_single_mysql_users_ipv4(char *username, unsigned long ipv4, char key.user = username; memcpy(&key.ipv4, &serv_addr, sizeof(serv_addr)); + key.resource = db; inet_ntop(AF_INET, &(serv_addr).sin_addr, ret_ip, INET_ADDRSTRLEN); @@ -77,6 +95,9 @@ int set_and_get_single_mysql_users_ipv4(char *username, unsigned long ipv4, char /* add user@host as key and passwd as value in the MySQL users hash table */ if (!mysql_users_add(mysql_users, &key, password)) { fprintf(stderr, "Failed adding %s@%s(%lu)\n", username, ret_ip, fix_ipv4); + users_free(mysql_users); + free(service); + dcb_free(dcb); return 1; } @@ -85,12 +106,15 @@ int set_and_get_single_mysql_users_ipv4(char *username, unsigned long ipv4, char find_key.user = username; memcpy(&(serv_addr).sin_addr.s_addr, &ipv4, sizeof(ipv4)); + find_key.resource = db; memcpy(&find_key.ipv4, &serv_addr, sizeof(serv_addr)); fetch_data = mysql_users_fetch(mysql_users, &find_key); users_free(mysql_users); + free(service); + dcb_free(dcb); if (!fetch_data) return 1; @@ -101,12 +125,13 @@ int set_and_get_single_mysql_users_ipv4(char *username, unsigned long ipv4, char int set_and_get_single_mysql_users(char *username, char *hostname, char *password) { struct sockaddr_in serv_addr; MYSQL_USER_HOST key; - MYSQL_USER_HOST find_key; USERS *mysql_users; char ret_ip[200]=""; char *fetch_data; + char *db=""; mysql_users = mysql_users_alloc(); + /* prepare the user@host data struct */ memset(&serv_addr, 0, sizeof(serv_addr)); memset(&key, 0, sizeof(key)); @@ -115,12 +140,14 @@ int set_and_get_single_mysql_users(char *username, char *hostname, char *passwor if (hostname) if(!setipaddress(&serv_addr.sin_addr, hostname)) { fprintf(stderr, "setipaddress failed for host [%s]\n", hostname); + users_free(mysql_users); return 1; } if (username) key.user = username; memcpy(&key.ipv4, &serv_addr, sizeof(serv_addr)); + key.resource = db; inet_ntop(AF_INET, &(serv_addr).sin_addr, ret_ip, INET_ADDRSTRLEN); @@ -129,19 +156,21 @@ int set_and_get_single_mysql_users(char *username, char *hostname, char *passwor /* add user@host as key and passwd as value in the MySQL users hash table */ if (!mysql_users_add(mysql_users, &key, password)) { fprintf(stderr, "mysql_users_add() failed for %s@%s\n", username, hostname); + users_free(mysql_users); return 1; } memset(&serv_addr, 0, sizeof(serv_addr)); - memset(&find_key, 0, sizeof(key)); if (hostname) if(!setipaddress(&serv_addr.sin_addr, hostname)) { fprintf(stderr, "setipaddress failed for host [%s]\n", hostname); + users_free(mysql_users); return 1; } key.user = username; memcpy(&key.ipv4, &serv_addr, sizeof(serv_addr)); + key.resource = db; fetch_data = mysql_users_fetch(mysql_users, &key); @@ -153,6 +182,92 @@ int set_and_get_single_mysql_users(char *username, char *hostname, char *passwor return 0; } +int set_and_get_mysql_users_wildcards(char *username, char *hostname, char *password, char *from, char *anydb, char *db, char *db_from) { + USERS *mysql_users; + int ret = -1; + struct sockaddr_in client_addr; + DCB *dcb; + SERVICE *service; + MYSQL_session *data; + + dcb = dcb_alloc(DCB_ROLE_INTERNAL); + + if (dcb == NULL) { + fprintf(stderr, "dcb_alloc() failed\n"); + return ret; + } + if ((service = (SERVICE *)calloc(1, sizeof(SERVICE))) == NULL) { + fprintf(stderr, "service_alloc() failed\n"); + dcb_free(dcb); + return ret; + } + + memset(&client_addr, 0, sizeof(client_addr)); + + if (hostname) { + if(!setipaddress(&client_addr.sin_addr, from)) { + fprintf(stderr, "setipaddress failed for host [%s]\n", from); + free(service); + dcb_free(dcb); + return ret; + } + } + + if ((data = (MYSQL_session *) calloc(1, sizeof(MYSQL_session))) == NULL) { + fprintf(stderr, "MYSQL_session alloc failed\n"); + free(service); + dcb_free(dcb); + return ret; + } + + + /* client IPv4 in raw data*/ + memcpy(&dcb->ipv4, (struct sockaddr_in *)&client_addr, sizeof(struct sockaddr_in)); + + dcb->service = service; + + mysql_users = mysql_users_alloc(); + + service->users = mysql_users; + + if (db_from != NULL) + strncpy(data->db, db_from,MYSQL_DATABASE_MAXLEN); + else + strncpy(data->db, "",MYSQL_DATABASE_MAXLEN); + + /* freed by dcb_free(dcb) */ + dcb->data = data; + + // the routine returns 1 on success + if (anydb != NULL) { + if (strcmp(anydb, "N") == 0) { + ret = add_mysql_users_with_host_ipv4(mysql_users, username, hostname, password, anydb, db); + } else if (strcmp(anydb, "Y") == 0) { + ret = add_mysql_users_with_host_ipv4(mysql_users, username, hostname, password, "Y", ""); + } else { + ret = add_mysql_users_with_host_ipv4(mysql_users, username, hostname, password, "N", NULL); + } + } else { + ret = add_mysql_users_with_host_ipv4(mysql_users, username, hostname, password, "N", NULL); + } + + if (ret == 0) { + fprintf(stderr, "add_mysql_users_with_host_ipv4 (%s@%s, %s) FAILED\n", username, hostname, password); + } else { + unsigned char db_passwd[100]=""; + + dcb->remote=strdup(from); + + // returns 0 on success + ret = gw_find_mysql_user_password_sha1(username, db_passwd, dcb); + } + + users_free(mysql_users); + free(service); + dcb_free(dcb); + + return ret; +} int main() { int ret; @@ -166,6 +281,7 @@ int main() { fprintf(stderr, "%s\n", asctime(localtime(&t))); fprintf(stderr, ">>> Started MySQL load, set & get users@host\n"); + ret = set_and_get_single_mysql_users("pippo", "localhost", "xyz"); assert(ret == 0); ret = set_and_get_single_mysql_users("pippo", "127.0.0.2", "xyz"); @@ -180,6 +296,7 @@ int main() { assert(ret == 1); ret = set_and_get_single_mysql_users(NULL, NULL, NULL); assert(ret == 1); + ret = set_and_get_single_mysql_users_ipv4("negative", -467295, "_ncd"); assert(ret == 1); ret = set_and_get_single_mysql_users_ipv4("extra", 0xFFFFFFFFFUL * 100, "JJcd"); @@ -189,19 +306,122 @@ int main() { ret = set_and_get_single_mysql_users_ipv4(NULL, '\0', "JJcd"); assert(ret == 1); + for (i = 256*256*256; i <= 256*256*256 + 5; i++) { char user[129] = ""; snprintf(user, 128, "user_%i", k); ret = set_and_get_single_mysql_users_ipv4(user, i, "JJcd"); + assert(ret == 0); k++; } + ret = set_and_get_mysql_users_wildcards("pippo", "%", "one", "127.0.0.1", NULL, NULL, NULL); + if (ret) fprintf(stderr, "\t-- Expecting no match\n"); + assert(ret == 1); + + ret = set_and_get_mysql_users_wildcards("pippo", "%", "", "127.0.0.1", NULL, NULL, NULL); + if (ret) fprintf(stderr, "\t-- Expecting no match\n"); + assert(ret == 1); + + ret = set_and_get_mysql_users_wildcards("pippo", "%", "two", "192.168.2.2", NULL, NULL, NULL); + if (!ret) fprintf(stderr, "\t-- Expecting ok\n"); + assert(ret == 0); + + ret = set_and_get_mysql_users_wildcards("pippo", "192.168.4.%", "ffoo", "192.168.2.2", NULL, NULL, NULL); + if (ret) fprintf(stderr, "\t-- Expecting no match\n"); + assert(ret == 1); + + ret = set_and_get_mysql_users_wildcards("pippo", "192.168.%.%", "foo", "192.168.2.2", NULL, NULL, NULL); + if (!ret) fprintf(stderr, "\t-- Expecting ok\n"); + assert(ret == 0); + + ret = set_and_get_mysql_users_wildcards("pippo", "192.%.%.%", "foo", "192.68.0.2", NULL, NULL, NULL); + if (!ret) fprintf(stderr, "\t-- Expecting ok\n"); + assert(ret == 0); + + ret = set_and_get_mysql_users_wildcards("pippo", "192.%.%.%", "foo", "192.0.0.2", "Y", NULL, "cossa"); + if (!ret) fprintf(stderr, "\t-- Expecting ok\n"); + assert(ret == 0); + + fprintf(stderr, "Adding pippo, 192.%%.%%.%%, foo, 192.0.0.2, N, NULL, ragione\n"); + ret = set_and_get_mysql_users_wildcards("pippo", "192.%.%.%", "foo", "192.0.0.2", "N", NULL, "ragione"); + if (!ret) fprintf(stderr, "\t-- Expecting no match\n"); + assert(ret == 1); + + ret = set_and_get_mysql_users_wildcards("pippo", "192.0.%.%", "foo", "192.2.0.2", NULL, NULL, NULL); + if (ret) fprintf(stderr, "\t-- Expecting no match\n"); + assert(ret == 1); + + ret = set_and_get_mysql_users_wildcards("pippo", "192.0.0.1", "foo", "192.0.0.2", NULL, NULL, NULL); + if (ret) fprintf(stderr, "\t-- Expecting no match\n"); + assert(ret == 1); + + ret = set_and_get_mysql_users_wildcards("pippo", "192.0.%.%", "foo", "192.1.0.2", NULL, NULL, NULL); + if (ret) fprintf(stderr, "\t-- Expecting no match\n"); + assert(ret == 1); + + ret = set_and_get_mysql_users_wildcards("pippo", "192.0.0.%", "foo", "192.3.2.1", NULL, NULL, NULL); + if (ret) fprintf(stderr, "\t-- Expecting no match\n"); + assert(ret == 1); + + ret = set_and_get_mysql_users_wildcards("pippo", "192.0.%.%", "foo", "192.3.2.1", "Y", NULL, NULL); + if (ret) fprintf(stderr, "\t-- Expecting no match\n"); + assert(ret == 1); + + ret = set_and_get_mysql_users_wildcards("pippo", "192.%.%.%", "foo", "192.254.254.245", "N", "matto", "matto"); + if (!ret) fprintf(stderr, "\t-- Expecting ok\n"); + assert(ret == 0); + + ret = set_and_get_mysql_users_wildcards("pippo", "192.%.%.%", "foo", "192.254.254.245", "N", "matto", "fatto"); + if (!ret) fprintf(stderr, "\t-- Expecting no match\n"); + assert(ret == 1); + + ret = set_and_get_mysql_users_wildcards("pippo", "192.%.%.%", "foo", "192.254.254.245", "Y", "matto", "fatto"); + if (!ret) fprintf(stderr, "\t-- Expecting ok\n"); + assert(ret == 0); + + ret = set_and_get_mysql_users_wildcards("pippo", "192.%.%.%", "foo", "192.254.254.245", "Y", "", "fto"); + if (!ret) fprintf(stderr, "\t-- Expecting ok\n"); + assert(ret == 0); + + ret = set_and_get_mysql_users_wildcards("pippo", "192.%.%.%", "foo", "192.254.254.245", "Y", NULL, "grewao"); + if (!ret) fprintf(stderr, "\t-- Expecting ok\n"); + assert(ret == 0); + + ret = set_and_get_mysql_users_wildcards("pippo", "192.%.%.%", "foo", "192.254.254.242", NULL, NULL, NULL); + if (!ret) fprintf(stderr, "\t-- Expecting ok\n"); + assert(ret == 0); + + ret = set_and_get_mysql_users_wildcards("pippo", "192.%", "foo", "192.254.254.242", NULL, NULL, NULL); + if (!ret) fprintf(stderr, "\t-- Expecting ok\n"); + assert(ret == 0); + + ret = set_and_get_mysql_users_wildcards("pippo", "192.%.%", "foo", "192.254.254.242", NULL, NULL, NULL); + if (!ret) fprintf(stderr, "\t-- Expecting ok\n"); + assert(ret == 0); + + ret = set_and_get_mysql_users_wildcards("pippo", "192.254.%", "foo", "192.254.254.242", NULL, NULL, NULL); + if (!ret) fprintf(stderr, "\t-- Expecting ok\n"); + assert(ret == 0); + + ret = set_and_get_mysql_users_wildcards("pippo", "192.254.%", "foo", "192.254.0.242", NULL, NULL, NULL); + if (!ret) fprintf(stderr, "\t-- Expecting ok\n"); + assert(ret == 0); + + ret = set_and_get_mysql_users_wildcards("riccio", "192.0.0.%", "foo", "192.134.0.2", NULL, NULL, NULL); + if (ret) fprintf(stderr, "\t-- Expecting no match\n"); + assert(ret == 1); + + ret = set_and_get_mysql_users_wildcards("pippo", "192.%.%.%", "12345678901234567890123456789012345678901234", "192.254.254.245", "Y", NULL, NULL); + if (!ret) fprintf(stderr, "\t-- Expecting ok\n"); + assert(ret == 0); + fprintf(stderr, "----------------\n"); fprintf(stderr, "<<< Test completed\n"); time(&t); fprintf(stderr, "%s\n", asctime(localtime(&t))); - return ret; + return 0; } diff --git a/server/core/test/testadminusers.c b/server/core/test/testadminusers.c index 00ec3a452..7dfd9ef9c 100644 --- a/server/core/test/testadminusers.c +++ b/server/core/test/testadminusers.c @@ -13,7 +13,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2014 + * Copyright MariaDB Corporation Ab 2014 */ /** @@ -266,6 +266,16 @@ int main(int argc, char **argv) { int result = 0; +char *home, buf[1024]; + + /* Unlink any existing password file before running this test */ + if ((home = getenv("MAXSCALE_HOME")) == NULL || strlen(home) >= 1024) + home = "/usr/local/skysql"; + sprintf(buf, "%s/etc/passwd", home); + if(!is_valid_posix_path(buf)) + exit(1); + if (strcmp(buf, "/etc/passwd") != 0) + unlink(buf); result += test1(); result += test2(); @@ -273,6 +283,9 @@ int result = 0; result += test4(); result += test5(); + /* Add the default user back so other tests can use it */ + admin_add_user("admin", "skysql"); + exit(result); } diff --git a/server/core/test/testbuffer.c b/server/core/test/testbuffer.c new file mode 100644 index 000000000..73d71dc27 --- /dev/null +++ b/server/core/test/testbuffer.c @@ -0,0 +1,166 @@ +/* + * This file is distributed as part of MaxScale. It is free + * software: you can redistribute it and/or modify it under the terms of the + * GNU General Public License as published by the Free Software Foundation, + * version 2. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 51 + * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Copyright MariaDB Corporation Ab 2014 + */ + +/** + * + * @verbatim + * Revision History + * + * Date Who Description + * 29-08-2014 Martin Brampton Initial implementation + * + * @endverbatim + */ + +#include +#include +#include + +#include +#include + +/** + * test1 Allocate a buffer and do lots of things + * + */ +static int +test1() +{ +GWBUF *buffer, *extra, *clone, *partclone, *transform; +HINT *hint; +int size = 100; +int bite1 = 35; +int bite2 = 60; +int bite3 = 10; +int buflen; + + /* Single buffer tests */ + ss_dfprintf(stderr, + "testbuffer : creating buffer with data size %d bytes", + size); + buffer = gwbuf_alloc(size); + ss_dfprintf(stderr, "\t..done\nAllocated buffer of size %d.", size); + buflen = GWBUF_LENGTH(buffer); + ss_dfprintf(stderr, "\nBuffer length is now %d", buflen); + ss_info_dassert(size == buflen, "Incorrect buffer size"); + ss_info_dassert(0 == GWBUF_EMPTY(buffer), "Buffer should not be empty"); + ss_info_dassert(GWBUF_IS_TYPE_UNDEFINED(buffer), "Buffer type should be undefined"); + ss_dfprintf(stderr, "\t..done\nSet a hint for the buffer"); + char* name = strdup("name"); + hint = hint_create_parameter(NULL, name, "value"); + free(name); + gwbuf_add_hint(buffer, hint); + ss_info_dassert(hint == buffer->hint, "Buffer should point to first and only hint"); + ss_dfprintf(stderr, "\t..done\nSet a property for the buffer"); + gwbuf_add_property(buffer, "name", "value"); + ss_info_dassert(0 == strcmp("value", gwbuf_get_property(buffer, "name")), "Should now have correct property"); + strcpy(GWBUF_DATA(buffer), "The quick brown fox jumps over the lazy dog"); + ss_dfprintf(stderr, "\t..done\nLoad some data into the buffer"); + ss_info_dassert('q' == GWBUF_DATA_CHAR(buffer, 4), "Fourth character of buffer must be 'q'"); + ss_info_dassert(-1 == GWBUF_DATA_CHAR(buffer, 105), "Hundred and fifth character of buffer must return -1"); + ss_info_dassert(0 == GWBUF_IS_SQL(buffer), "Must say buffer is not SQL, as it does not have marker"); + strcpy(GWBUF_DATA(buffer), "1234\x03SELECT * FROM sometable"); + ss_dfprintf(stderr, "\t..done\nLoad SQL data into the buffer"); + ss_info_dassert(1 == GWBUF_IS_SQL(buffer), "Must say buffer is SQL, as it does have marker"); + transform = gwbuf_clone_transform(buffer, GWBUF_TYPE_PLAINSQL); + ss_dfprintf(stderr, "\t..done\nAttempt to transform buffer to plain SQL - should fail"); + ss_info_dassert(NULL == transform, "Buffer cannot be transformed to plain SQL"); + gwbuf_set_type(buffer, GWBUF_TYPE_MYSQL); + ss_dfprintf(stderr, "\t..done\nChanged buffer type to MySQL"); + ss_info_dassert(GWBUF_IS_TYPE_MYSQL(buffer), "Buffer type changed to MySQL"); + transform = gwbuf_clone_transform(buffer, GWBUF_TYPE_PLAINSQL); + ss_dfprintf(stderr, "\t..done\nAttempt to transform buffer to plain SQL - should succeed"); + ss_info_dassert((NULL != transform) && (GWBUF_IS_TYPE_PLAINSQL(transform)), "Transformed buffer is plain SQL"); + clone = gwbuf_clone(buffer); + ss_dfprintf(stderr, "\t..done\nCloned buffer"); + buflen = GWBUF_LENGTH(clone); + ss_dfprintf(stderr, "\nCloned buffer length is now %d", buflen); + ss_info_dassert(size == buflen, "Incorrect buffer size"); + ss_info_dassert(0 == GWBUF_EMPTY(clone), "Cloned buffer should not be empty"); + ss_dfprintf(stderr, "\t..done\n"); + gwbuf_free(clone); + ss_dfprintf(stderr, "Freed cloned buffer"); + ss_dfprintf(stderr, "\t..done\n"); + partclone = gwbuf_clone_portion(buffer, 25, 50); + buflen = GWBUF_LENGTH(partclone); + ss_dfprintf(stderr, "Part cloned buffer length is now %d", buflen); + ss_info_dassert(50 == buflen, "Incorrect buffer size"); + ss_info_dassert(0 == GWBUF_EMPTY(partclone), "Part cloned buffer should not be empty"); + ss_dfprintf(stderr, "\t..done\n"); + gwbuf_free(partclone); + ss_dfprintf(stderr, "Freed part cloned buffer"); + ss_dfprintf(stderr, "\t..done\n"); + buffer = gwbuf_consume(buffer, bite1); + ss_info_dassert(NULL != buffer, "Buffer should not be null"); + buflen = GWBUF_LENGTH(buffer); + ss_dfprintf(stderr, "Consumed %d bytes, now have %d, should have %d", bite1, buflen, size-bite1); + ss_info_dassert((size - bite1) == buflen, "Incorrect buffer size"); + ss_info_dassert(0 == GWBUF_EMPTY(buffer), "Buffer should not be empty"); + ss_dfprintf(stderr, "\t..done\n"); + buffer = gwbuf_consume(buffer, bite2); + ss_info_dassert(NULL != buffer, "Buffer should not be null"); + buflen = GWBUF_LENGTH(buffer); + ss_dfprintf(stderr, "Consumed %d bytes, now have %d, should have %d", bite2, buflen, size-bite1-bite2); + ss_info_dassert((size-bite1-bite2) == buflen, "Incorrect buffer size"); + ss_info_dassert(0 == GWBUF_EMPTY(buffer), "Buffer should not be empty"); + ss_dfprintf(stderr, "\t..done\n"); + buffer = gwbuf_consume(buffer, bite3); + ss_dfprintf(stderr, "Consumed %d bytes, should have null buffer", bite3); + ss_info_dassert(NULL == buffer, "Buffer should be null"); + + /* Buffer list tests */ + size = 100000; + buffer = gwbuf_alloc(size); + ss_dfprintf(stderr, "\t..done\nAllocated buffer of size %d.", size); + buflen = GWBUF_LENGTH(buffer); + ss_dfprintf(stderr, "\nBuffer length is now %d", buflen); + ss_info_dassert(size == buflen, "Incorrect buffer size"); + ss_info_dassert(0 == GWBUF_EMPTY(buffer), "Buffer should not be empty"); + ss_info_dassert(GWBUF_IS_TYPE_UNDEFINED(buffer), "Buffer type should be undefined"); + extra = gwbuf_alloc(size); + buflen = GWBUF_LENGTH(buffer); + ss_dfprintf(stderr, "\t..done\nAllocated extra buffer of size %d.", size); + ss_info_dassert(size == buflen, "Incorrect buffer size"); + buffer = gwbuf_append(buffer, extra); + buflen = gwbuf_length(buffer); + ss_dfprintf(stderr, "\t..done\nAppended extra buffer to original buffer to create list of size %d", buflen); + ss_info_dassert((size*2) == gwbuf_length(buffer), "Incorrect size for set of buffers"); + buffer = gwbuf_rtrim(buffer, 60000); + buflen = GWBUF_LENGTH(buffer); + ss_dfprintf(stderr, "\t..done\nTrimmed 60 bytes from buffer, now size is %d.", buflen); + ss_info_dassert((size-60000) == buflen, "Incorrect buffer size"); + buffer = gwbuf_rtrim(buffer, 60000); + buflen = GWBUF_LENGTH(buffer); + ss_dfprintf(stderr, "\t..done\nTrimmed another 60 bytes from buffer, now size is %d.", buflen); + ss_info_dassert(100000 == buflen, "Incorrect buffer size"); + ss_info_dassert(buffer == extra, "The buffer pointer should now point to the extra buffer"); + ss_dfprintf(stderr, "\t..done\n"); + + return 0; +} + +int main(int argc, char **argv) +{ +int result = 0; + + result += test1(); + + exit(result); +} + + diff --git a/server/core/test/testdcb.c b/server/core/test/testdcb.c new file mode 100644 index 000000000..2703763f0 --- /dev/null +++ b/server/core/test/testdcb.c @@ -0,0 +1,88 @@ +/* + * This file is distributed as part of MaxScale. It is free + * software: you can redistribute it and/or modify it under the terms of the + * GNU General Public License as published by the Free Software Foundation, + * version 2. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 51 + * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Copyright MariaDB Corporation Ab 2014 + */ + +/** + * + * @verbatim + * Revision History + * + * Date Who Description + * 05-09-2014 Martin Brampton Initial implementation + * + * @endverbatim + */ + +#include +#include +#include + +#include + +/** + * test1 Allocate a dcb and do lots of other things + * + */ +static int +test1() +{ +DCB *dcb, *extra, *clone; +int size = 100; +int bite1 = 35; +int bite2 = 60; +int bite3 = 10; +int buflen; + + /* Single buffer tests */ + ss_dfprintf(stderr, + "testdcb : creating buffer with type DCB_ROLE_SERVICE_LISTENER"); + dcb = dcb_alloc(DCB_ROLE_SERVICE_LISTENER); + printDCB(dcb); + ss_info_dassert(dcb_isvalid(dcb), "New DCB must be valid"); + ss_dfprintf(stderr, "\t..done\nAllocated dcb."); + clone = dcb_clone(dcb); + ss_dfprintf(stderr, "\t..done\nCloned dcb"); + printAllDCBs(); + ss_info_dassert(true, "Something is true"); + ss_dfprintf(stderr, "\t..done\n"); + dcb_free(dcb); + ss_dfprintf(stderr, "Freed original dcb"); + ss_info_dassert(!dcb_isvalid(dcb), "Freed DCB must not be valid"); + ss_dfprintf(stderr, "\t..done\nMake clone DCB a zombie"); + clone->state = DCB_STATE_NOPOLLING; + dcb_add_to_zombieslist(clone); + ss_info_dassert(dcb_get_zombies() == clone, "Clone DCB must be start of zombie list now"); + ss_dfprintf(stderr, "\t..done\nProcess the zombies list"); + dcb_process_zombies(0); + ss_dfprintf(stderr, "\t..done\nCheck clone no longer valid"); + ss_info_dassert(!dcb_isvalid(clone), "After zombie processing, clone DCB must not be valid"); + ss_dfprintf(stderr, "\t..done\n"); + + return 0; +} + +int main(int argc, char **argv) +{ +int result = 0; + + result += test1(); + + exit(result); +} + + + diff --git a/server/core/test/testfilter.c b/server/core/test/testfilter.c index 55f7fadf3..bf97d7897 100644 --- a/server/core/test/testfilter.c +++ b/server/core/test/testfilter.c @@ -13,7 +13,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2014 + * Copyright MariaDB Corporation Ab 2014 */ /** diff --git a/server/core/test/testgwbitmask.c b/server/core/test/testgwbitmask.c new file mode 100644 index 000000000..86bad1245 --- /dev/null +++ b/server/core/test/testgwbitmask.c @@ -0,0 +1,83 @@ +/* + * This file is distributed as part of MaxScale. It is free + * software: you can redistribute it and/or modify it under the terms of the + * GNU General Public License as published by the Free Software Foundation, + * version 2. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 51 + * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Copyright MariaDB Corporation Ab 2014 + */ + +/** + * + * @verbatim + * Revision History + * + * Date Who Description + * 13-10-2014 Martin Brampton Initial implementation + * + * @endverbatim + */ + +#include +#include +#include + +#include + +#include + +/** + * test1 Allocate table of users and mess around with it + * + */ + +static int +test1() +{ +static GWBITMASK bitmask, another; +int i; + + /* Hint tests */ + ss_dfprintf(stderr, + "testgwbitmask : Initialise a bitmask"); + bitmask_init(&bitmask); + ss_info_dassert(BIT_LENGTH_INITIAL == bitmask.length, "Length should be initial length."); + for (i = 0; i < BIT_LENGTH_INITIAL; i++) { + ss_info_dassert(0 == bitmask_isset(&bitmask, i), "All bits should initially be zero"); + } + ss_info_dassert(0 != bitmask_isallclear(&bitmask), "Should be all clear"); + ss_dfprintf(stderr, "\t..done\nSet an arbitrary bit."); + bitmask_set(&bitmask, 17); + bitmask_copy(&another, &bitmask); + ss_info_dassert(0 != bitmask_isset(&another, 17), "Test bit should be set"); + ss_dfprintf(stderr, "\t..done\nClear the arbitrary bit."); + bitmask_clear(&bitmask, 17); + ss_info_dassert(0 == bitmask_isset(&bitmask, 17), "Test bit should be clear"); + ss_info_dassert(0 != bitmask_isallclear(&bitmask), "Should be all clear"); + ss_dfprintf(stderr, "\t..done\nFree the bitmask."); + bitmask_free(&bitmask); + ss_info_dassert(0 == bitmask.length, "Length should be zero after bit mask freed."); + ss_dfprintf(stderr, "\t..done\n"); + + return 0; + +} + +int main(int argc, char **argv) +{ +int result = 0; + + result += test1(); + + exit(result); +} + diff --git a/server/core/test/testhash.c b/server/core/test/testhash.c index 3fa9d7f0d..89720da80 100644 --- a/server/core/test/testhash.c +++ b/server/core/test/testhash.c @@ -13,7 +13,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2014 + * Copyright MariaDB Corporation Ab 2014 */ /** @@ -158,6 +158,7 @@ static bool do_hashtest( hashtable_free(h); return_succp: + free(val_arr); return succp; } diff --git a/server/core/test/testhint.c b/server/core/test/testhint.c new file mode 100644 index 000000000..e95d2bdd5 --- /dev/null +++ b/server/core/test/testhint.c @@ -0,0 +1,73 @@ +/* + * This file is distributed as part of MaxScale. It is free + * software: you can redistribute it and/or modify it under the terms of the + * GNU General Public License as published by the Free Software Foundation, + * version 2. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 51 + * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Copyright MariaDB Corporation Ab 2014 + */ + +/** + * + * @verbatim + * Revision History + * + * Date Who Description + * 08-10-2014 Martin Brampton Initial implementation + * + * @endverbatim + */ + +#include +#include +#include + +#include + +/** + * test1 Allocate table of users and mess around with it + * + */ + +static int +test1() +{ +HINT *hint; + + /* Hint tests */ + ss_dfprintf(stderr, + "testhint : Add a parameter hint to a null list"); + char* name = strdup("name"); + hint = hint_create_parameter(NULL, name, "value"); + free(name); + skygw_log_sync_all(); + ss_info_dassert(NULL != hint, "New hint list should not be null"); + ss_info_dassert(0 == strcmp("value", hint->value), "Hint value should be correct"); + ss_info_dassert(0 != hint_exists(&hint, HINT_PARAMETER), "Hint of parameter type should exist"); + ss_dfprintf(stderr, "\t..done\nFree hints."); + if (NULL != hint) hint_free(hint); + skygw_log_sync_all(); + ss_dfprintf(stderr, "\t..done\n"); + + return 0; + +} + +int main(int argc, char **argv) +{ +int result = 0; + + result += test1(); + + exit(result); +} + diff --git a/server/core/test/testmemlog.c b/server/core/test/testmemlog.c new file mode 100644 index 000000000..1523ec8ec --- /dev/null +++ b/server/core/test/testmemlog.c @@ -0,0 +1,404 @@ +/* + * This file is distributed as part of MaxScale from MariaDB. It is free + * software: you can redistribute it and/or modify it under the terms of the + * GNU General Public License as published by the Free Software Foundation, + * version 2. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 51 + * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Copyright MariaDB Corporation 2014 + */ + +/** + * + * @verbatim + * Revision History + * + * Date Who Description + * 30/09/2014 Mark Riddoch Initial implementation + * + * @endverbatim + */ + +#include +#include +#include +#include +#include + +/** + * Count the number of lines in a file + * + * @param file The name of the file + * @return -1 if the file could not be opened or the numebr of lines + */ +int +linecount(char *file) +{ +FILE *fp; +int i = 0; +char buffer[180]; + + if ((fp = fopen(file, "r")) == NULL) + return -1; + while (fgets(buffer, 180, fp) != NULL) + i++; + fclose(fp); + return i; +} + +/* Some strings to log */ +char *strings[] = { + "First log entry", + "Second entry", + "Third", + "The fourth thing to log", + "Add a final 5th item" +}; + +int +main() +{ +MEMLOG *log, *log2; +int i; +long j; +long long k; +int failures = 0; + + unlink("memlog1"); + if ((log = memlog_create("memlog1", ML_INT, 100)) == NULL) + { + printf("Memlog Creation: Failed\n"); + failures++; + } + else + { + printf("Memlog Creation: Passed\n"); + if (access("memlog1",R_OK) == 0) + { + printf("File existance 1: Failed\n"); + failures++; + } + else + printf("File existance 1: Passed\n"); + for (i = 0; i < 50; i++) + memlog_log(log, (void *)i); + if (access("memlog1",R_OK) == 0) + { + printf("File existance 2: Failed\n"); + failures++; + } + else + printf("File existance 2: Passed\n"); + for (i = 0; i < 50; i++) + memlog_log(log, (void *)i); + if (access("memlog1",R_OK) != 0) + { + printf("File existance 3: Failed\n"); + failures++; + } + else + printf("File existance 3: Passed\n"); + if (linecount("memlog1") != 100) + { + printf("Incorrect entry count: Failed\n"); + failures++; + } + else + printf("Incorrect entry count: Passed\n"); + for (i = 0; i < 50; i++) + memlog_log(log, (void *)i); + if (linecount("memlog1") != 100) + { + printf("Premature Flushing: Failed\n"); + failures++; + } + else + printf("Premature Flushing: Passed\n"); + memlog_destroy(log); + if (linecount("memlog1") != 150) + { + printf("Flush on destroy: Failed\n"); + failures++; + } + else + printf("Flush on destroy: Passed\n"); + } + + unlink("memlog2"); + if ((log = memlog_create("memlog2", ML_LONG, 100)) == NULL) + { + printf("Memlog Creation: Failed\n"); + failures++; + } + else + { + printf("Memlog Creation: Passed\n"); + if (access("memlog2",R_OK) == 0) + { + printf("File existance 1: Failed\n"); + failures++; + } + else + printf("File existance 1: Passed\n"); + for (j = 0; j < 50; j++) + memlog_log(log, (void *)j); + if (access("memlog2",R_OK) == 0) + { + printf("File existance 2: Failed\n"); + failures++; + } + else + printf("File existance 2: Passed\n"); + for (j = 0; j < 50; j++) + memlog_log(log, (void *)j); + if (access("memlog2",R_OK) != 0) + { + printf("File existance 3: Failed\n"); + failures++; + } + else + printf("File existance 3: Passed\n"); + if (linecount("memlog2") != 100) + { + printf("Incorrect entry count: Failed\n"); + failures++; + } + else + printf("Incorrect entry count: Passed\n"); + for (j = 0; j < 50; j++) + memlog_log(log, (void *)j); + if (linecount("memlog2") != 100) + { + printf("Premature Flushing: Failed\n"); + failures++; + } + else + printf("Premature Flushing: Passed\n"); + memlog_destroy(log); + if (linecount("memlog2") != 150) + { + printf("Flush on destroy: Failed\n"); + failures++; + } + else + printf("Flush on destroy: Passed\n"); + } + + unlink("memlog3"); + if ((log = memlog_create("memlog3", ML_LONGLONG, 100)) == NULL) + { + printf("Memlog Creation: Failed\n"); + failures++; + } + else + { + printf("Memlog Creation: Passed\n"); + if (access("memlog3",R_OK) == 0) + { + printf("File existance 1: Failed\n"); + failures++; + } + else + printf("File existance 1: Passed\n"); + for (k = 0; k < 50; k++) + memlog_log(log, (void *)k); + if (access("memlog3",R_OK) == 0) + { + printf("File existance 2: Failed\n"); + failures++; + } + else + printf("File existance 2: Passed\n"); + for (k = 0; k < 50; k++) + memlog_log(log, (void *)k); + if (access("memlog3",R_OK) != 0) + { + printf("File existance 3: Failed\n"); + failures++; + } + else + printf("File existance 3: Passed\n"); + if (linecount("memlog3") != 100) + { + printf("Incorrect entry count: Failed\n"); + failures++; + } + else + printf("Incorrect entry count: Passed\n"); + for (k = 0; k < 50; k++) + memlog_log(log, (void *)k); + if (linecount("memlog3") != 100) + { + printf("Premature Flushing: Failed\n"); + failures++; + } + else + printf("Premature Flushing: Passed\n"); + memlog_destroy(log); + if (linecount("memlog3") != 150) + { + printf("Flush on destroy: Failed\n"); + failures++; + } + else + printf("Flush on destroy: Passed\n"); + } + + unlink("memlog4"); + if ((log = memlog_create("memlog4", ML_STRING, 100)) == NULL) + { + printf("Memlog Creation: Failed\n"); + failures++; + } + else + { + printf("Memlog Creation: Passed\n"); + if (access("memlog4",R_OK) == 0) + { + printf("File existance 1: Failed\n"); + failures++; + } + else + printf("File existance 1: Passed\n"); + for (i = 0; i < 50; i++) + memlog_log(log, strings[i%5]); + if (access("memlog4",R_OK) == 0) + { + printf("File existance 2: Failed\n"); + failures++; + } + else + printf("File existance 2: Passed\n"); + for (i = 0; i < 50; i++) + memlog_log(log, strings[i%5]); + if (access("memlog4",R_OK) != 0) + { + printf("File existance 3: Failed\n"); + failures++; + } + else + printf("File existance 3: Passed\n"); + if (linecount("memlog4") != 100) + { + printf("Incorrect entry count: Failed\n"); + failures++; + } + else + printf("Incorrect entry count: Passed\n"); + for (i = 0; i < 50; i++) + memlog_log(log, strings[i%5]); + if (linecount("memlog4") != 100) + { + printf("Premature Flushing: Failed\n"); + failures++; + } + else + printf("Premature Flushing: Passed\n"); + memlog_destroy(log); + if (linecount("memlog4") != 150) + { + printf("Flush on destroy: Failed\n"); + failures++; + } + else + printf("Flush on destroy: Passed\n"); + } + + unlink("memlog5"); + unlink("memlog6"); + if ((log = memlog_create("memlog5", ML_INT, 100)) == NULL) + { + printf("Memlog Creation: Failed\n"); + failures++; + } + else + { + printf("Memlog Creation: Passed\n"); + if ((log2 = memlog_create("memlog6", ML_INT, 100)) == NULL) + { + printf("Memlog Creation: Failed\n"); + failures++; + } + else + { + printf("Memlog Creation: Passed\n"); + for (i = 0; i < 40; i++) + memlog_log(log, (void *)i); + for (i = 0; i < 30; i++) + memlog_log(log2, (void *)i); + memlog_flush_all(); + if (linecount("memlog5") != 40 || + linecount("memlog6") != 30) + { + printf( + "Memlog flush all: Failed\n"); + failures++; + } + else + printf( + "Memlog flush all: Passed\n"); + } + } + + unlink("memlog7"); + if ((log = memlog_create("memlog7", ML_INT, 100)) == NULL) + { + printf("Memlog Creation: Failed\n"); + failures++; + } + else + { + printf("Memlog Creation: Passed\n"); + if (access("memlog7",R_OK) == 0) + { + printf("File existance 1: Failed\n"); + failures++; + } + else + printf("File existance 1: Passed\n"); + for (i = 0; i < 5050; i++) + memlog_log(log, (void *)i); + if (access("memlog7",R_OK) != 0) + { + printf("File existance 3: Failed\n"); + failures++; + } + else + printf("File existance 3: Passed\n"); + if (linecount("memlog7") != 5000) + { + printf("Incorrect entry count: Failed\n"); + failures++; + } + else + printf("Incorrect entry count: Passed\n"); + for (i = 0; i < 50; i++) + memlog_log(log, (void *)i); + if (linecount("memlog7") != 5100) + { + printf("Residual flushing: Failed\n"); + failures++; + } + else + printf("Premature Flushing: Passed\n"); + for (i = 0; i < 10120; i++) + memlog_log(log, (void *)i); + memlog_destroy(log); + if (linecount("memlog7") != 15220) + { + printf("Flush on destroy: Failed\n"); + failures++; + } + else + printf("Flush on destroy: Passed\n"); + } + exit(failures); +} diff --git a/server/core/test/testmodutil.c b/server/core/test/testmodutil.c new file mode 100644 index 000000000..b37203ff4 --- /dev/null +++ b/server/core/test/testmodutil.c @@ -0,0 +1,78 @@ +/* + * This file is distributed as part of MaxScale. It is free + * software: you can redistribute it and/or modify it under the terms of the + * GNU General Public License as published by the Free Software Foundation, + * version 2. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 51 + * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Copyright MariaDB Corporation Ab 2014 + */ + +/** + * + * @verbatim + * Revision History + * + * Date Who Description + * 17-09-2014 Martin Brampton Initial implementation + * + * @endverbatim + */ + +#include +#include +#include + +#include +#include + +/** + * test1 Allocate a service and do lots of other things + * + */ + +static int +test1() +{ +GWBUF *buffer; +char *(sql[100]); +int result, length, residual; + + /* Poll tests */ + ss_dfprintf(stderr, + "testmodutil : Rudimentary tests."); + buffer = gwbuf_alloc(100); + ss_info_dassert(0 == modutil_is_SQL(buffer), "Default buffer should be diagnosed as not SQL"); + /* There would ideally be some straightforward way to create a SQL buffer? */ + ss_dfprintf(stderr, "\t..done\nExtract SQL from buffer"); + ss_info_dassert(0 == modutil_extract_SQL(buffer, sql, &length), "Default buffer should fail"); + ss_dfprintf(stderr, "\t..done\nExtract SQL from buffer different way?"); + ss_info_dassert(0 == modutil_MySQL_Query(buffer, sql, &length, &residual), "Default buffer should fail"); + ss_dfprintf(stderr, "\t..done\nReplace SQL in buffer"); + ss_info_dassert(0 == modutil_replace_SQL(buffer, "select * from some_table;"), "Default buffer should fail"); + ss_dfprintf(stderr, "\t..done\nTidy up."); + gwbuf_free(buffer); + ss_dfprintf(stderr, "\t..done\n"); + + return 0; + +} + +int main(int argc, char **argv) +{ +int result = 0; + + result += test1(); + + exit(result); +} + + diff --git a/server/core/test/testpoll.c b/server/core/test/testpoll.c new file mode 100644 index 000000000..7b9175b2c --- /dev/null +++ b/server/core/test/testpoll.c @@ -0,0 +1,103 @@ +/* + * This file is distributed as part of MaxScale. It is free + * software: you can redistribute it and/or modify it under the terms of the + * GNU General Public License as published by the Free Software Foundation, + * version 2. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 51 + * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Copyright MariaDB Corporation Ab 2014 + */ + +/** + * + * @verbatim + * Revision History + * + * Date Who Description + * 11-09-2014 Martin Brampton Initial implementation + * + * @endverbatim + */ + +#include +#include +#include +#include +#include +#include + +/** + * test1 Allocate a service and do lots of other things + * + */ + +static int +test1() +{ +DCB *dcb; +int result; + int eno = 0; + + /* Poll tests */ + ss_dfprintf(stderr, + "testpoll : Initialise the polling system."); + poll_init(); + ss_dfprintf(stderr, "\t..done\nAdd a DCB"); + dcb = dcb_alloc(DCB_ROLE_REQUEST_HANDLER); + + if(dcb == NULL){ + ss_dfprintf(stderr, "\nError on function call: dcb_alloc() returned NULL.\n"); + return 1; + } + + dcb->fd = socket(AF_UNIX, SOCK_STREAM, 0); + + if(dcb->fd < 0){ + ss_dfprintf(stderr, "\nError on function call: socket() returned %d: %s\n",errno,strerror(errno)); + return 1; + } + + + if((eno = poll_add_dcb(dcb)) != 0){ + ss_dfprintf(stderr, "\nError on function call: poll_add_dcb() returned %d.\n",eno); + return 1; + } + + if((eno = poll_remove_dcb(dcb)) != 0){ + ss_dfprintf(stderr, "\nError on function call: poll_remove_dcb() returned %d.\n",eno); + return 1; + } + + if((eno = poll_add_dcb(dcb)) != 0){ + ss_dfprintf(stderr, "\nError on function call: poll_add_dcb() returned %d.\n",eno); + return 1; + } + + ss_dfprintf(stderr, "\t..done\nStart wait for events."); + sleep(10); + poll_shutdown(); + ss_dfprintf(stderr, "\t..done\nTidy up."); + dcb_free(dcb); + ss_dfprintf(stderr, "\t..done\n"); + + return 0; + +} + +int main(int argc, char **argv) +{ +int result = 0; + + result += test1(); + + exit(result); +} + diff --git a/server/core/test/testserver.c b/server/core/test/testserver.c new file mode 100644 index 000000000..de40847d6 --- /dev/null +++ b/server/core/test/testserver.c @@ -0,0 +1,99 @@ +/* + * This file is distributed as part of MaxScale. It is free + * software: you can redistribute it and/or modify it under the terms of the + * GNU General Public License as published by the Free Software Foundation, + * version 2. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 51 + * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Copyright MariaDB Corporation Ab 2014 + */ + +/** + * + * @verbatim + * Revision History + * + * Date Who Description + * 08-10-2014 Martin Brampton Initial implementation + * + * @endverbatim + */ + +#include +#include +#include + +#include + +/** + * test1 Allocate a server and do lots of other things + * + */ +static int +test1() +{ +SERVER *server; +int result; +char *status; + + /* Server tests */ + ss_dfprintf(stderr, + "testserver : creating server called MyServer"); + server = server_alloc("MyServer", "HTTPD", 9876); + skygw_log_sync_all(); + + //ss_info_dassert(NULL != service, "New server with valid protocol and port must not be null"); + //ss_info_dassert(0 != service_isvalid(service), "Service must be valid after creation"); + + ss_dfprintf(stderr, "\t..done\nTest Parameter for Server."); + ss_info_dassert(NULL == serverGetParameter(server, "name"), "Parameter should be null when not set"); + serverAddParameter(server, "name", "value"); + skygw_log_sync_all(); + ss_info_dassert(0 == strcmp("value", serverGetParameter(server, "name")), "Parameter should be returned correctly"); + ss_dfprintf(stderr, "\t..done\nTesting Unique Name for Server."); + ss_info_dassert(NULL == server_find_by_unique_name("uniquename"), "Should not find non-existent unique name."); + server_set_unique_name(server, "uniquename"); + skygw_log_sync_all(); + ss_info_dassert(server == server_find_by_unique_name("uniquename"), "Should find by unique name."); + ss_dfprintf(stderr, "\t..done\nTesting Status Setting for Server."); + status = server_status(server); + skygw_log_sync_all(); + ss_info_dassert(0 == strcmp("Running", status), "Status of Server should be Running by default."); + if (NULL != status) free(status); + server_set_status(server, SERVER_MASTER); + status = server_status(server); + skygw_log_sync_all(); + ss_info_dassert(0 == strcmp("Master, Running", status), "Should find correct status."); + server_clear_status(server, SERVER_MASTER); + free(status); + status = server_status(server); + skygw_log_sync_all(); + ss_info_dassert(0 == strcmp("Running", status), "Status of Server should be Running after master status cleared."); + if (NULL != status) free(status); + ss_dfprintf(stderr, "\t..done\nRun Prints for Server and all Servers."); + printServer(server); + printAllServers(); + skygw_log_sync_all(); + ss_dfprintf(stderr, "\t..done\nFreeing Server."); + ss_info_dassert(0 != server_free(server), "Free should succeed"); + ss_dfprintf(stderr, "\t..done\n"); + return 0; + +} + +int main(int argc, char **argv) +{ +int result = 0; + + result += test1(); + + exit(result); +} diff --git a/server/core/test/testservice.c b/server/core/test/testservice.c new file mode 100644 index 000000000..085a411ca --- /dev/null +++ b/server/core/test/testservice.c @@ -0,0 +1,102 @@ +/* + * This file is distributed as part of MaxScale. It is free + * software: you can redistribute it and/or modify it under the terms of the + * GNU General Public License as published by the Free Software Foundation, + * version 2. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 51 + * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Copyright MariaDB Corporation Ab 2014 + */ + +/** + * + * @verbatim + * Revision History + * + * Date Who Description + * 08-09-2014 Martin Brampton Initial implementation + * + * @endverbatim + */ + +#include +#include +#include +#include +#include +#include +/** + * test1 Allocate a service and do lots of other things + * + */ +static int +test1() +{ +SERVICE *service; +int result; +int argc = 3; +char buffer[1024]; +sprintf(buffer,"%s",TEST_LOG_DIR); +char* argv[] = { + "log_manager", + "-j", + buffer, + NULL +}; +skygw_logmanager_init(argc,argv); +poll_init(); + /* Service tests */ + ss_dfprintf(stderr, + "testservice : creating service called MyService with router nonexistent"); + service = service_alloc("MyService", "non-existent"); + skygw_log_sync_all(); + ss_info_dassert(NULL == service, "New service with invalid router should be null"); + ss_info_dassert(0 == service_isvalid(service), "Service must not be valid after incorrect creation"); + ss_dfprintf(stderr, "\t..done\nValid service creation, router testroute."); + service = service_alloc("MyService", "testroute"); + skygw_log_sync_all(); + ss_info_dassert(NULL != service, "New service with valid router must not be null"); + ss_info_dassert(0 != service_isvalid(service), "Service must be valid after creation"); + ss_info_dassert(0 == strcmp("MyService", service_get_name(service)), "Service must have given name"); + ss_dfprintf(stderr, "\t..done\nAdding protocol HTTPD."); + ss_info_dassert(0 != serviceAddProtocol(service, "HTTPD", "localhost", 9876), "Add Protocol should succeed"); + ss_info_dassert(0 != serviceHasProtocol(service, "HTTPD", 9876), "Service should have new protocol as requested"); + serviceStartProtocol(service, "HTTPD", 9876); + skygw_log_sync_all(); + ss_dfprintf(stderr, "\t..done\nStarting Service."); + result = serviceStart(service); + skygw_log_sync_all(); + ss_info_dassert(0 != result, "Start should succeed"); + result = serviceStop(service); + skygw_log_sync_all(); + ss_info_dassert(0 != result, "Stop should succeed"); + result = serviceStartAll(); + skygw_log_sync_all(); + ss_info_dassert(0 != result, "Start all should succeed"); + + ss_dfprintf(stderr, "\t..done\nStopping Service."); + ss_info_dassert(0 != serviceStop(service), "Stop should succeed"); + ss_dfprintf(stderr, "\t..done\nFreeing Service."); + ss_info_dassert(0 != service_free(service), "Free should succeed"); + ss_dfprintf(stderr, "\t..done\n"); + + return 0; + +} + +int main(int argc, char **argv) +{ +int result = 0; + + result += test1(); + + exit(result); +} diff --git a/server/core/test/testsession.c b/server/core/test/testsession.c new file mode 100644 index 000000000..4d8d4cc04 --- /dev/null +++ b/server/core/test/testsession.c @@ -0,0 +1,77 @@ +/* + * This file is distributed as part of MaxScale. It is free + * software: you can redistribute it and/or modify it under the terms of the + * GNU General Public License as published by the Free Software Foundation, + * version 2. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 51 + * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Copyright MariaDB Corporation Ab 2014 + */ + +/** + * + * @verbatim + * Revision History + * + * Date Who Description + * 11-09-2014 Martin Brampton Initial implementation + * + * @endverbatim + */ + +#include +#include +#include + +#include +#include + +/** + * test1 Allocate a service and do lots of other things + * + */ + +static int +test1() +{ +DCB *dcb; +int result; + + /* Poll tests */ + ss_dfprintf(stderr, + "testpoll : Initialise the polling system."); + poll_init(); + ss_dfprintf(stderr, "\t..done\nAdd a DCB"); + dcb = dcb_alloc(DCB_ROLE_SERVICE_LISTENER); + dcb->fd = socket(AF_UNIX, SOCK_STREAM, 0); + poll_add_dcb(dcb); + poll_remove_dcb(dcb); + poll_add_dcb(dcb); + ss_dfprintf(stderr, "\t..done\nStart wait for events."); + sleep(10); + poll_shutdown(); + ss_dfprintf(stderr, "\t..done\nTidy up."); + dcb_free(dcb); + ss_dfprintf(stderr, "\t..done\n"); + + return 0; + +} + +int main(int argc, char **argv) +{ +int result = 0; + + result += test1(); + + exit(result); +} + diff --git a/server/core/test/testspinlock.c b/server/core/test/testspinlock.c index ecdbdf108..14501cea5 100644 --- a/server/core/test/testspinlock.c +++ b/server/core/test/testspinlock.c @@ -13,7 +13,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2014 + * Copyright MariaDB Corporation Ab 2014 */ /** @@ -105,12 +105,16 @@ test2() { SPINLOCK lck; void *handle; +struct timespec sleeptime; + + sleeptime.tv_sec = 10; + sleeptime.tv_nsec = 0; acquire_time = 0; spinlock_init(&lck); spinlock_acquire(&lck); handle = thread_start(test2_helper, (void *)&lck); - sleep(10); + nanosleep(&sleeptime, NULL); spinlock_release(&lck); thread_wait(handle); @@ -122,12 +126,118 @@ void *handle; return 0; } -main(int argc, char **argv) +/** + * test3 spinlock_acquire tests process bound threads + * + * Check that spinlock correctly blocks all other threads whilst the spinlock + * is held. + * + * Start multiple threads that obtain spinlock and run process bound + */ +#define THREADS 5 +#define ITERATIONS 50000 +#define PROCESS_LOOP 10000 +#define SECONDS 15 +#define NANOTIME 100000 + +static int times_run, failures; +static volatile int active; +static int threadrun[THREADS]; +static int nowait[THREADS]; +static SPINLOCK lck; +static void +test3_helper(void *data) +{ +// SPINLOCK *lck = (SPINLOCK *)data; +int i; +int n = *(int *)data; +struct timespec sleeptime; +time_t rawtime; + + sleeptime.tv_sec = 0; + sleeptime.tv_nsec = 1; + + while (1) { + if (spinlock_acquire_nowait(&lck)) { + nowait[n]++; + } + else { + spinlock_acquire(&lck); + } + if (times_run++ > ITERATIONS) { + break; + } + threadrun[n]++; + /* + if (99 == (times_run % 100)) { + time ( &rawtime ); + fprintf(stderr, "%s Done %d iterations of test, in thread %d.\n", asctime (localtime ( &rawtime )), times_run, n); + } + */ + if (0 != active) { + fprintf(stderr, "spinlock: test 3 failed with active non-zero after lock obtained.\n"); + failures++; + } + else { + active = 1; + for (i=0; i +#include +#include + +#include + +#include "log_manager.h" + +/** + * test1 Allocate table of users and mess around with it + * + */ + +static int +test1() +{ +USERS *users; +char *authdata; +int result, count; + + /* Poll tests */ + ss_dfprintf(stderr, + "testusers : Initialise the user table."); + users = users_alloc(); + skygw_log_sync_all(); + ss_info_dassert(NULL != users, "Allocating user table should not return NULL.") + ss_dfprintf(stderr, "\t..done\nAdd a user"); + count = users_add(users, "username", "authorisation"); + skygw_log_sync_all(); + ss_info_dassert(1 == count, "Should add one user"); + authdata = users_fetch(users, "username"); + skygw_log_sync_all(); + ss_info_dassert(NULL != authdata, "Fetch valid user must not return NULL"); + ss_info_dassert(0 == strcmp("authorisation", authdata), "User authorisation should be correct"); + ss_dfprintf(stderr, "\t..done\nPrint users"); + usersPrint(users); + skygw_log_sync_all(); + ss_dfprintf(stderr, "\t..done\nUpdate a user"); + count = users_update(users, "username", "newauth"); + skygw_log_sync_all(); + ss_info_dassert(1 == count, "Should update just one user"); + authdata = users_fetch(users, "username"); + skygw_log_sync_all(); + ss_info_dassert(NULL != authdata, "Fetch valid user must not return NULL"); + ss_info_dassert(0 == strcmp("newauth", authdata), "User authorisation should be correctly updated"); + + ss_dfprintf(stderr, "\t..done\nAdd another user"); + count = users_add(users, "username2", "authorisation2"); + skygw_log_sync_all(); + ss_info_dassert(1 == count, "Should add one user"); + ss_dfprintf(stderr, "\t..done\nDelete a user."); + count = users_delete(users, "username"); + skygw_log_sync_all(); + ss_info_dassert(1 == count, "Should delete just one user"); + ss_dfprintf(stderr, "\t..done\nFree user table."); + users_free(users); + skygw_log_sync_all(); + ss_dfprintf(stderr, "\t..done\n"); + + return 0; + +} + +int main(int argc, char **argv) +{ +int result = 0; + + result += test1(); + + exit(result); +} + diff --git a/server/core/thread.c b/server/core/thread.c index dded79507..ac8095b38 100644 --- a/server/core/thread.c +++ b/server/core/thread.c @@ -1,5 +1,5 @@ /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -13,7 +13,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 */ #include #include diff --git a/server/core/users.c b/server/core/users.c index 3b4dadf15..086a6b81c 100644 --- a/server/core/users.c +++ b/server/core/users.c @@ -1,5 +1,5 @@ /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -13,7 +13,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 */ #include #include @@ -106,6 +106,8 @@ int add; /** * Delete a user from the user table. * + * The last user in the table can not be deleted + * * @param users The users table * @param user The user name * @return The number of users deleted from the table @@ -115,12 +117,12 @@ users_delete(USERS *users, char *user) { int del; - atomic_add(&users->stats.n_deletes, 1); if (users->stats.n_entries == 1) { return 0; } + atomic_add(&users->stats.n_deletes, 1); del = hashtable_delete(users->data, user); - atomic_add(&users->stats.n_entries, del * -1); + atomic_add(&users->stats.n_entries, -del); return del; } @@ -181,32 +183,41 @@ char *sep; void *user; dcb_printf(dcb, "Users table data\n"); - dcb_hashtable_stats(dcb, users->data); - if ((iter = hashtable_iterator(users->data)) != NULL) + + if (users == NULL || users->data == NULL) { - dcb_printf(dcb, "User names: "); - sep = ""; + dcb_printf(dcb, "Users table is empty\n"); + } + else + { + dcb_hashtable_stats(dcb, users->data); + + if ((iter = hashtable_iterator(users->data)) != NULL) + { + dcb_printf(dcb, "User names: "); + sep = ""; - if (users->usersCustomUserFormat != NULL) { - while ((user = hashtable_next(iter)) != NULL) - { - char *custom_user; - custom_user = users->usersCustomUserFormat(user); - if (custom_user) { - dcb_printf(dcb, "%s%s", sep, custom_user); - free(custom_user); + if (users->usersCustomUserFormat != NULL) { + while ((user = hashtable_next(iter)) != NULL) + { + char *custom_user; + custom_user = users->usersCustomUserFormat(user); + if (custom_user) { + dcb_printf(dcb, "%s%s", sep, custom_user); + free(custom_user); + sep = ", "; + } + } + } else { + while ((user = hashtable_next(iter)) != NULL) + { + dcb_printf(dcb, "%s%s", sep, (char *)user); sep = ", "; } } - } else { - while ((user = hashtable_next(iter)) != NULL) - { - dcb_printf(dcb, "%s%s", sep, (char *)user); - sep = ", "; - } - } - dcb_printf(dcb, "\n"); - hashtable_iterator_free(iter); + hashtable_iterator_free(iter); + } } + dcb_printf(dcb, "\n"); } diff --git a/server/core/utils.c b/server/core/utils.c index 1b8c6b5fe..da9378867 100644 --- a/server/core/utils.c +++ b/server/core/utils.c @@ -1,5 +1,5 @@ /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -13,7 +13,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 * */ @@ -27,8 +27,9 @@ * 10-06-2013 Massimiliano Pinto Initial implementation * 12-06-2013 Massimiliano Pinto Read function trought * the gwbuff strategy - * 13-06-2013 Massimiliano Pinto Gateway local authentication + * 13-06-2013 Massimiliano Pinto MaxScale local authentication * basics + * 02-09-2014 Martin Brampton Replaced C++ comments by C comments * * @endverbatim */ @@ -42,21 +43,24 @@ #include #include -extern int lm_enabled_logfiles_bitmask; +/** Defined in log_manager.cc */ +extern int lm_enabled_logfiles_bitmask; +extern size_t log_ses_count[]; +extern __thread log_info_t tls_log_info; -// used in the hex2bin function +/* used in the hex2bin function */ #define char_val(X) (X >= '0' && X <= '9' ? X-'0' :\ X >= 'A' && X <= 'Z' ? X-'A'+10 :\ X >= 'a' && X <= 'z' ? X-'a'+10 :\ '\177') -// used in the bin2hex function +/* used in the bin2hex function */ char hex_upper[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; char hex_lower[] = "0123456789abcdefghijklmnopqrstuvwxyz"; -////////////////////////////////////////// -//backend read event triggered by EPOLLIN -////////////////////////////////////////// +/***************************************** + * backend read event triggered by EPOLLIN +*****************************************/ int setnonblocking(int fd) { @@ -91,17 +95,17 @@ char *gw_strend(register const char *s) { return (char*) (s-1); } -/////////////////////////////// -// generate a random char -////////////////////////////// +/***************************************** +* generate a random char +*****************************************/ static char gw_randomchar() { return (char)((rand() % 78) + 30); } -///////////////////////////////// -// generate a random string -// output must be pre allocated -///////////////////////////////// +/***************************************** + * generate a random string + * output must be pre allocated +*****************************************/ int gw_generate_random_str(char *output, int len) { int i; @@ -116,10 +120,10 @@ int gw_generate_random_str(char *output, int len) { return 0; } -///////////////////////////////// -// hex string to binary data -// output must be pre allocated -///////////////////////////////// +/***************************************** + * hex string to binary data + * output must be pre allocated +*****************************************/ int gw_hex2bin(uint8_t *out, const char *in, unsigned int len) { const char *in_end= in + len; @@ -140,10 +144,10 @@ int gw_hex2bin(uint8_t *out, const char *in, unsigned int len) { return 0; } -///////////////////////////////// -// binary data to hex string -// output must be pre allocated -///////////////////////////////// +/***************************************** + * binary data to hex string + * output must be pre allocated +*****************************************/ char *gw_bin2hex(char *out, const uint8_t *in, unsigned int len) { const uint8_t *in_end= in + len; if (len == 0 || in == NULL) { @@ -159,12 +163,12 @@ char *gw_bin2hex(char *out, const uint8_t *in, unsigned int len) { return out; } -/////////////////////////////////////////////////////// -// fill a preallocated buffer with XOR(str1, str2) -// XOR between 2 equal len strings -// note that XOR(str1, XOR(str1 CONCAT str2)) == str2 -// and that XOR(str1, str2) == XOR(str2, str1) -/////////////////////////////////////////////////////// +/**************************************************** + * fill a preallocated buffer with XOR(str1, str2) + * XOR between 2 equal len strings + * note that XOR(str1, XOR(str1 CONCAT str2)) == str2 + * and that XOR(str1, str2) == XOR(str2, str1) +*****************************************************/ void gw_str_xor(uint8_t *output, const uint8_t *input1, const uint8_t *input2, unsigned int len) { const uint8_t *input1_end = NULL; input1_end = input1 + len; @@ -175,10 +179,10 @@ void gw_str_xor(uint8_t *output, const uint8_t *input1, const uint8_t *input2, u *output = '\0'; } -///////////////////////////////////////////////////////////// -// fill a 20 bytes preallocated with SHA1 digest (160 bits) -// for one input on in_len bytes -///////////////////////////////////////////////////////////// +/********************************************************** + * fill a 20 bytes preallocated with SHA1 digest (160 bits) + * for one input on in_len bytes +**********************************************************/ void gw_sha1_str(const uint8_t *in, int in_len, uint8_t *out) { unsigned char hash[SHA_DIGEST_LENGTH]; @@ -186,10 +190,10 @@ void gw_sha1_str(const uint8_t *in, int in_len, uint8_t *out) { memcpy(out, hash, SHA_DIGEST_LENGTH); } -///////////////////////////////////////////////////////////// -// fill 20 bytes preallocated with SHA1 digest (160 bits) -// for two inputs, in_len and in2_len bytes -///////////////////////////////////////////////////////////// +/******************************************************** + * fill 20 bytes preallocated with SHA1 digest (160 bits) + * for two inputs, in_len and in2_len bytes +********************************************************/ void gw_sha1_2_str(const uint8_t *in, int in_len, const uint8_t *in2, int in2_len, uint8_t *out) { SHA_CTX context; unsigned char hash[SHA_DIGEST_LENGTH]; @@ -224,7 +228,9 @@ int gw_getsockerrno( goto return_eno; } - getsockopt(fd, SOL_SOCKET, SO_ERROR, (void *)&eno, &elen); + if(getsockopt(fd, SOL_SOCKET, SO_ERROR, (void *)&eno, &elen) != 0){ + eno = 0; + } return_eno: return eno; diff --git a/server/include/adminusers.h b/server/include/adminusers.h index 07afa5390..4cc560e41 100644 --- a/server/include/adminusers.h +++ b/server/include/adminusers.h @@ -1,7 +1,7 @@ #ifndef _ADMINUSERS_H #define _ADMINUSERS_H /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -15,7 +15,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 */ /** diff --git a/server/include/atomic.h b/server/include/atomic.h index 7fea0926e..f139d15d5 100644 --- a/server/include/atomic.h +++ b/server/include/atomic.h @@ -1,7 +1,7 @@ #ifndef _ATOMIC_H #define _ATOMIC_H /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -15,7 +15,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 */ /** diff --git a/server/include/buffer.h b/server/include/buffer.h index eefdd431d..df426baca 100644 --- a/server/include/buffer.h +++ b/server/include/buffer.h @@ -1,7 +1,7 @@ #ifndef _BUFFER_H #define _BUFFER_H /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -15,7 +15,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 */ /** @@ -40,6 +40,9 @@ * 16/07/2013 Massimiliano Pinto Added command type for the queue * 10/07/2014 Mark Riddoch Addition of hints * 15/07/2014 Mark Riddoch Added buffer properties + * 03/10/2014 Martin Brampton Pointer arithmetic standard conformity + * Add more buffer handling macros + * Add gwbuf_rtrim (handle chains) * * @endverbatim */ @@ -47,6 +50,7 @@ #include #include #include +#include EXTERN_C_BLOCK_BEGIN @@ -146,19 +150,25 @@ typedef struct gwbuf { /*< * Macros to access the data in the buffers */ -/*< First valid, uncomsumed byte in the buffer */ +/*< First valid, unconsumed byte in the buffer */ #define GWBUF_DATA(b) ((b)->start) /*< Number of bytes in the individual buffer */ -#define GWBUF_LENGTH(b) ((b)->end - (b)->start) +#define GWBUF_LENGTH(b) ((char *)(b)->end - (char *)(b)->start) + +/*< Return the byte at offset byte from the start of the unconsumed portion of the buffer */ +#define GWBUF_DATA_CHAR(b, byte) (GWBUF_LENGTH(b) < ((byte)+1) ? -1 : *(((char *)(b)->start)+4)) + +/*< Check that the data in a buffer has the SQL marker*/ +#define GWBUF_IS_SQL(b) (0x03 == GWBUF_DATA_CHAR(b,4)) /*< True if all bytes in the buffer have been consumed */ -#define GWBUF_EMPTY(b) ((b)->start == (b)->end) +#define GWBUF_EMPTY(b) ((char *)(b)->start >= (char *)(b)->end) /*< Consume a number of bytes in the buffer */ -#define GWBUF_CONSUME(b, bytes) (b)->start += (bytes) +#define GWBUF_CONSUME(b, bytes) ((b)->start = bytes > ((char *)(b)->end - (char *)(b)->start) ? (b)->end : (void *)((char *)(b)->start + (bytes))); -#define GWBUF_RTRIM(b, bytes) (b)->end -= (bytes) +#define GWBUF_RTRIM(b, bytes) ((b)->end = bytes > ((char *)(b)->end - (char *)(b)->start) ? (b)->start : (void *)((char *)(b)->end - (bytes))); #define GWBUF_TYPE(b) (b)->gwbuf_type /*< @@ -170,9 +180,11 @@ extern GWBUF *gwbuf_clone(GWBUF *buf); extern GWBUF *gwbuf_append(GWBUF *head, GWBUF *tail); extern GWBUF *gwbuf_consume(GWBUF *head, unsigned int length); extern GWBUF *gwbuf_trim(GWBUF *head, unsigned int length); +extern GWBUF *gwbuf_rtrim(GWBUF *head, unsigned int length); extern unsigned int gwbuf_length(GWBUF *head); extern GWBUF *gwbuf_clone_portion(GWBUF *head, size_t offset, size_t len); extern GWBUF *gwbuf_clone_transform(GWBUF *head, gwbuf_type_t type); +extern GWBUF *gwbuf_clone_all(GWBUF* head); extern void gwbuf_set_type(GWBUF *head, gwbuf_type_t type); extern int gwbuf_add_property(GWBUF *buf, char *name, char *value); extern char *gwbuf_get_property(GWBUF *buf, char *name); @@ -184,7 +196,6 @@ void gwbuf_add_buffer_object(GWBUF* buf, void* data, void (*donefun_fp)(void *)); void* gwbuf_get_buffer_object_data(GWBUF* buf, bufobj_id_t id); - EXTERN_C_BLOCK_END diff --git a/server/include/config.h b/server/include/config.h index 659fb6378..ca3092576 100644 --- a/server/include/config.h +++ b/server/include/config.h @@ -1,7 +1,7 @@ #ifndef _CONFIG_H #define _CONFIG_H /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -15,7 +15,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 */ #include @@ -29,10 +29,13 @@ * 21/06/13 Mark Riddoch Initial implementation * 07/05/14 Massimiliano Pinto Added version_string to global configuration * 23/05/14 Massimiliano Pinto Added id to global configuration + * 17/10/14 Mark Riddoch Added poll tuning configuration parameters * * @endverbatim */ +#define DEFAULT_NBPOLLS 3 /**< Default number of non block polls before we block */ +#define DEFAULT_POLLSLEEP 1000 /**< Default poll wait time (milliseconds) */ /** * Maximum length for configuration parameter value. */ @@ -92,11 +95,15 @@ typedef struct { int n_threads; /**< Number of polling threads */ char *version_string; /**< The version string of embedded database library */ unsigned long id; /**< MaxScale ID */ + unsigned int n_nbpoll; /**< Tune number of non-blocking polls */ + unsigned int pollsleep; /**< Wait time in blocking polls */ } GATEWAY_CONF; extern int config_load(char *); extern int config_reload(); extern int config_threadcount(); +extern unsigned int config_nbpolls(); +extern unsigned int config_pollsleep(); CONFIG_PARAMETER* config_get_param(CONFIG_PARAMETER* params, const char* name); config_param_type_t config_get_paramtype(CONFIG_PARAMETER* param); CONFIG_PARAMETER* config_clone_param(CONFIG_PARAMETER* param); diff --git a/server/include/dbusers.h b/server/include/dbusers.h index 3b0afe50b..7b7fb03dd 100644 --- a/server/include/dbusers.h +++ b/server/include/dbusers.h @@ -1,7 +1,7 @@ #ifndef _DBUSERS_H #define _DBUSERS_H /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -15,7 +15,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 */ #include @@ -32,6 +32,8 @@ * 25/06/13 Mark Riddoch Initial implementation * 25/02/13 Massimiliano Pinto Added users table refresh rate default values * 28/02/14 Massimiliano Pinto Added MySQL user and host data structure + * 03/10/14 Massimiliano Pinto Added netmask to MySQL user and host data structure + * 13/10/14 Massimiliano Pinto Added resource to MySQL user and host data structure * * @endverbatim */ @@ -52,11 +54,14 @@ typedef struct mysql_user_host_key { char *user; struct sockaddr_in ipv4; + int netmask; + char *resource; } MYSQL_USER_HOST; extern int load_mysql_users(SERVICE *service); extern int reload_mysql_users(SERVICE *service); extern int mysql_users_add(USERS *users, MYSQL_USER_HOST *key, char *auth); +extern int add_mysql_users_with_host_ipv4(USERS *users, char *user, char *host, char *passwd, char *anydb, char *db); extern USERS *mysql_users_alloc(); extern char *mysql_users_fetch(USERS *users, MYSQL_USER_HOST *key); extern int replace_mysql_users(SERVICE *service); diff --git a/server/include/dcb.h b/server/include/dcb.h index 72686b7b7..abd2cbcb6 100644 --- a/server/include/dcb.h +++ b/server/include/dcb.h @@ -1,7 +1,7 @@ #ifndef _DCB_H #define _DCB_H /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -15,7 +15,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 */ #include #include @@ -66,7 +66,7 @@ struct dcb; * The operations that can be performed on the descriptor * * read EPOLLIN handler for the socket - * write Gateway data write entry point + * write MaxScale data write entry point * write_ready EPOLLOUT handler for the socket, indicates * that the socket is ready to send more data * error EPOLLERR handler for the socket @@ -74,7 +74,7 @@ struct dcb; * accept Accept handler for listener socket only * connect Create a connection to the specified server * for the session pased in - * close Gateway close entry point for the socket + * close MaxScale close entry point for the socket * listen Create a listener for the protocol * auth Authentication entry point * session Session handling entry point @@ -98,12 +98,28 @@ typedef struct gw_protocol { int (*session)(struct dcb *, void *); } GWPROTOCOL; +/** + * The event queue structure used in the polling loop to maintain a queue + * of events that need to be processed for the DCB. + * + * next The next DCB in the event queue + * prev The previous DCB in the event queue + * pending_events The events that are pending processing + * processing_events The evets currently being processed + * processing Flag to indicate the processing status of the DCB + * eventqlock Spinlock to protect this structure + * inserted Insertion time for logging purposes + * started Time that the processign started + */ typedef struct { struct dcb *next; struct dcb *prev; uint32_t pending_events; + uint32_t processing_events; int processing; SPINLOCK eventqlock; + unsigned long inserted; + unsigned long started; } DCBEVENTQ; /** @@ -113,6 +129,8 @@ typedef struct { */ #define GWPROTOCOL_VERSION {1, 0, 0} +#define DCBFD_CLOSED -1 + /** * The statitics gathered on a descriptor control block */ @@ -205,9 +223,9 @@ typedef struct dcb_callback { typedef struct dcb { #if defined(SS_DEBUG) skygw_chk_t dcb_chk_top; - bool dcb_errhandle_called; #endif - dcb_role_t dcb_role; + bool dcb_errhandle_called; /*< this can be called only once */ + dcb_role_t dcb_role; SPINLOCK dcb_initlock; DCBEVENTQ evq; /**< The event queue for this DCB */ int fd; /**< The descriptor */ @@ -252,14 +270,14 @@ typedef struct dcb { #endif } DCB; -#if defined(SS_DEBUG) +#if defined(FAKE_CODE) unsigned char dcb_fake_write_errno[10240]; __int32_t dcb_fake_write_ev[10240]; bool fail_next_backend_fd; bool fail_next_client_fd; int fail_next_accept; int fail_accept_errno; -#endif +#endif /* FAKE_CODE */ /* A few useful macros */ #define DCB_SESSION(x) (x)->session @@ -274,13 +292,7 @@ int fail_accept_errno; #define DCB_POLL_BUSY(x) ((x)->evq.next != NULL) DCB *dcb_get_zombies(void); -int gw_write( -#if defined(SS_DEBUG) - DCB* dcb, -#endif - int fd, - const void* buf, - size_t nbytes); +int gw_write(DCB *, const void *, size_t); int dcb_write(DCB *, GWBUF *); DCB *dcb_alloc(dcb_role_t); void dcb_free(DCB *); @@ -307,19 +319,20 @@ int dcb_remove_callback(DCB *, DCB_REASON, int (*)(struct dcb *, DCB_REASON, vo void *); int dcb_isvalid(DCB *); /* Check the DCB is in the linked list */ -bool dcb_set_state( - DCB* dcb, - dcb_state_t new_state, - dcb_state_t* old_state); -void dcb_call_foreach (DCB_REASON reason); +bool dcb_set_state(DCB* dcb, dcb_state_t new_state, dcb_state_t* old_state); +void dcb_call_foreach (DCB_REASON reason); +size_t dcb_get_session_id(DCB* dcb); +bool dcb_get_ses_log_info(DCB* dcb, size_t* sesid, int* enabled_logs); -void dcb_call_foreach ( - DCB_REASON reason); /** * DCB flags values */ #define DCBF_CLONE 0x0001 /*< DCB is a clone */ #define DCBF_HUNG 0x0002 /*< Hangup has been dispatched */ +#define DCBF_REPLIED 0x0004 /*< DCB was written to */ + +#define DCB_IS_CLONE(d) ((d)->flags & DCBF_CLONE) +#define DCB_REPLIED(d) ((d)->flags & DCBF_REPLIED) #endif /* _DCB_H */ diff --git a/server/include/filter.h b/server/include/filter.h index 3076587e6..ad0b9a21a 100644 --- a/server/include/filter.h +++ b/server/include/filter.h @@ -1,7 +1,7 @@ #ifndef _FILTER_H #define _FILTER_H /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -15,7 +15,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2014 + * Copyright MariaDB Corporation Ab 2014 */ /** diff --git a/server/include/gw.h b/server/include/gw.h index 14adfb1a0..9c8507210 100644 --- a/server/include/gw.h +++ b/server/include/gw.h @@ -61,12 +61,7 @@ int do_read_dcb(DCB *dcb); int do_read_10(DCB *dcb, uint8_t *buffer); int MySQLWrite(DCB *dcb, GWBUF *queue); int setnonblocking(int fd); -int gw_write( -#if defined(SS_DEBUG) - DCB* dcb, -#endif - int fd, - const void* buf, - size_t nbytes); +int gw_write(DCB *dcb, const void *buf, size_t nbytes); int gw_getsockerrno(int fd); int parse_bindconfig(char *, unsigned short, struct sockaddr_in *); +int setipaddress(struct in_addr *, char *); diff --git a/server/include/gwbitmask.h b/server/include/gwbitmask.h index 6b3b6b622..87f1a8b98 100644 --- a/server/include/gwbitmask.h +++ b/server/include/gwbitmask.h @@ -1,7 +1,7 @@ #ifndef _GWBITMASK_H #define _GWBITMASK_H /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -15,7 +15,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 */ #include diff --git a/server/include/hashtable.h b/server/include/hashtable.h index 175bd5d32..07c7d0950 100644 --- a/server/include/hashtable.h +++ b/server/include/hashtable.h @@ -1,7 +1,7 @@ #ifndef _HASTABLE_H #define _HASTABLE_H /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -15,7 +15,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 */ /** diff --git a/server/include/hint.h b/server/include/hint.h index 03c319142..4cc687731 100644 --- a/server/include/hint.h +++ b/server/include/hint.h @@ -15,7 +15,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2014 + * Copyright MariaDB Corporation Ab 2014 */ /** diff --git a/server/include/housekeeper.h b/server/include/housekeeper.h index 35e76e80d..0379ff23a 100644 --- a/server/include/housekeeper.h +++ b/server/include/housekeeper.h @@ -1,7 +1,7 @@ #ifndef _HOUSEKEEPER_H #define _HOUSEKEEPER_H /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -15,9 +15,10 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2014 + * Copyright MariaDB Corporation Ab 2014 */ #include +#include /** * @file housekeeper.h A mechanism to have task run periodically @@ -31,6 +32,11 @@ * @endverbatim */ +typedef enum { + HK_REPEATED = 1, + HK_ONESHOT +} HKTASK_TYPE; + /** * The housekeeper task list */ @@ -40,12 +46,22 @@ typedef struct hktask { void *data; /*< Data to pass the task */ int frequency; /*< How often to call the tasks (seconds) */ time_t nextdue; /*< When the task should be next run */ + HKTASK_TYPE + type; /*< The task type */ struct hktask *next; /*< Next task in the list */ } HKTASK; +/** + * The global housekeeper heartbeat value. This value is increamente + * every 100ms and may be used for crude timing etc. + */ +extern unsigned long hkheartbeat; + extern void hkinit(); extern int hktask_add(char *name, void (*task)(void *), void *data, int frequency); +extern int hktask_oneshot(char *name, void (*task)(void *), void *data, int when); extern int hktask_remove(char *name); extern void hkshutdown(); +extern void hkshow_tasks(DCB *pdcb); #endif diff --git a/server/include/maxscale.h b/server/include/maxscale.h index 4fb5dd75f..521c42dd9 100644 --- a/server/include/maxscale.h +++ b/server/include/maxscale.h @@ -1,7 +1,7 @@ #ifndef _MAXSCALE_H #define _MAXSCALE_H /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -15,7 +15,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2014 + * Copyright MariaDB Corporation Ab 2014 */ /** diff --git a/server/include/memlog.h b/server/include/memlog.h new file mode 100644 index 000000000..de183a48d --- /dev/null +++ b/server/include/memlog.h @@ -0,0 +1,65 @@ +#ifndef _MEMLOG_H +#define _MEMLOG_H +/* + * This file is distributed as part of MariaDB MaxScale. It is free + * software: you can redistribute it and/or modify it under the terms of the + * GNU General Public License as published by the Free Software Foundation, + * version 2. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 51 + * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Copyright MariaDB Ab 2014 + */ + +/** + * @file memlog.h The memory logging mechanism + * + * @verbatim + * Revision History + * + * Date Who Description + * 26/09/14 Mark Riddoch Initial implementation + * + * @endverbatim + */ +#include + +typedef enum { ML_INT, ML_LONG, ML_LONGLONG, ML_STRING } MEMLOGTYPE; + +typedef struct memlog { + char *name; + SPINLOCK lock; + void *values; + int offset; + int size; + MEMLOGTYPE type; + unsigned int flags; + unsigned int iflags; + struct memlog *next; +} MEMLOG; + +/* + * MEMLOG flag bits + */ +#define MLNOAUTOFLUSH 0x0001 + +/* + * MEMLOG internal flags + */ +#define MLWRAPPED 0x0001 + + +extern MEMLOG *memlog_create(char *, MEMLOGTYPE, int); +extern void memlog_destroy(MEMLOG *); +extern void memlog_set(MEMLOG *, unsigned int); +extern void memlog_log(MEMLOG *, void *); +extern void memlog_flush_all(); +extern void memlog_flush(MEMLOG *); +#endif diff --git a/server/include/modinfo.h b/server/include/modinfo.h index bc4107b39..759cea0fb 100644 --- a/server/include/modinfo.h +++ b/server/include/modinfo.h @@ -15,7 +15,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2014 + * Copyright MariaDB Corporation Ab 2014 */ /** diff --git a/server/include/modules.h b/server/include/modules.h index 199e3a24b..adda2b255 100644 --- a/server/include/modules.h +++ b/server/include/modules.h @@ -1,7 +1,7 @@ #ifndef _MODULES_H #define _MODULES_H /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -15,7 +15,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 */ #include #include @@ -32,6 +32,8 @@ * 13/06/13 Mark Riddoch Initial implementation * 08/07/13 Mark Riddoch Addition of monitor modules * 29/05/14 Mark Riddoch Addition of filter modules + * 01/10/14 Mark Riddoch Addition of call to unload all modules on + * shutdown * @endverbatim */ @@ -58,6 +60,7 @@ typedef struct modules { extern void *load_module(const char *module, const char *type); extern void unload_module(const char *module); +extern void unload_all_modules(); extern void printModules(); extern void dprintAllModules(DCB *); char* get_maxscale_home(void); diff --git a/server/include/modutil.h b/server/include/modutil.h index a0624752a..fac39cbcc 100644 --- a/server/include/modutil.h +++ b/server/include/modutil.h @@ -1,7 +1,7 @@ #ifndef _MODUTIL_H #define _MODUTIL_H /* - * This file is distributed as part of MaxScale from SkySQL. It is free + * This file is distributed as part of MaxScale from MariaDB Corporation. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -15,7 +15,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2014 + * Copyright MariaDB Corporation Ab 2014 */ /** @@ -24,18 +24,32 @@ * @verbatim * Revision History * - * Date Who Description - * 04/06/14 Mark Riddoch Initial implementation - * 24/06/14 Mark Riddoch Add modutil_MySQL_Query to enable multipacket queries + * Date Who Description + * 04/06/14 Mark Riddoch Initial implementation + * 24/06/14 Mark Riddoch Add modutil_MySQL_Query to enable multipacket queries + * 24/10/14 Massimiliano Pinto Add modutil_send_mysql_err_packet to send a mysql ERR_Packet * * @endverbatim */ #include +#include extern int modutil_is_SQL(GWBUF *); extern int modutil_extract_SQL(GWBUF *, char **, int *); extern int modutil_MySQL_Query(GWBUF *, char **, int *, int *); +extern char *modutil_get_SQL(GWBUF *); extern GWBUF *modutil_replace_SQL(GWBUF *, char *); -char* modutil_get_query(GWBUF* buf); +extern char *modutil_get_query(GWBUF* buf); +extern int modutil_send_mysql_err_packet(DCB *, int, int, int, const char *, const char *); +GWBUF* modutil_get_next_MySQL_packet(GWBUF** p_readbuf); +int modutil_MySQL_query_len(GWBUF* buf, int* nbytes_missing); + + +GWBUF *modutil_create_mysql_err_msg( + int packet_number, + int affected_rows, + int merrno, + const char *statemsg, + const char *msg); #endif diff --git a/server/include/monitor.h b/server/include/monitor.h index 04337d761..c08e8153e 100644 --- a/server/include/monitor.h +++ b/server/include/monitor.h @@ -1,7 +1,7 @@ #ifndef _MONITOR_H #define _MONITOR_H /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -15,7 +15,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 */ #include #include @@ -33,6 +33,8 @@ * 23/05/14 Massimiliano Pinto Addition of defaultId and setInterval * 23/06/14 Massimiliano Pinto Addition of replicationHeartbeat * 28/08/14 Massimiliano Pinto Addition of detectStaleMaster + * 30/10/14 Massimiliano Pinto Addition of disableMasterFailback + * 07/11/14 Massimiliano Pinto Addition of setNetworkTimeout * * @endverbatim */ @@ -70,9 +72,11 @@ typedef struct { void (*defaultUser)(void *, char *, char *); void (*diagnostics)(DCB *, void *); void (*setInterval)(void *, size_t); + void (*setNetworkTimeout)(void *, int, int); void (*defaultId)(void *, unsigned long); void (*replicationHeartbeat)(void *, int); void (*detectStaleMaster)(void *, int); + void (*disableMasterFailback)(void *, int); } MONITOR_OBJECT; /** @@ -96,6 +100,20 @@ typedef enum MONITOR_STATE_FREED = 0x08 } monitor_state_t; +/** + * Monitor network timeout types + */ +typedef enum +{ + MONITOR_CONNECT_TIMEOUT = 0, + MONITOR_READ_TIMEOUT = 1, + MONITOR_WRITE_TIMEOUT = 2 +} monitor_timeouts_t; + +#define DEFAULT_CONNECT_TIMEOUT 3 +#define DEFAULT_READ_TIMEOUT 1 +#define DEFAULT_WRITE_TIMEOUT 2 + /** * Representation of the running monitor. */ @@ -123,4 +141,6 @@ extern void monitorSetId(MONITOR *, unsigned long); extern void monitorSetInterval (MONITOR *, unsigned long); extern void monitorSetReplicationHeartbeat(MONITOR *, int); extern void monitorDetectStaleMaster(MONITOR *, int); +extern void monitorDisableMasterFailback(MONITOR *, int); +extern void monitorSetNetworkTimeout(MONITOR *, int, int); #endif diff --git a/server/include/poll.h b/server/include/poll.h index 6524f1bbb..24bf0645d 100644 --- a/server/include/poll.h +++ b/server/include/poll.h @@ -1,7 +1,7 @@ #ifndef _POLL_H #define _POLL_H /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -15,7 +15,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 */ #include #include @@ -32,7 +32,6 @@ * @endverbatim */ #define MAX_EVENTS 1000 -#define EPOLL_TIMEOUT 1000 /**< The epoll timeout in milliseconds */ extern void poll_init(); extern int poll_add_dcb(DCB *); @@ -40,6 +39,11 @@ extern int poll_remove_dcb(DCB *); extern void poll_waitevents(void *); extern void poll_shutdown(); extern GWBITMASK *poll_bitmask(); +extern void poll_set_maxwait(unsigned int); +extern void poll_set_nonblocking_polls(unsigned int); extern void dprintPollStats(DCB *); extern void dShowThreads(DCB *dcb); +void poll_add_epollin_event_to_dcb(DCB* dcb, GWBUF* buf); +extern void dShowEventQ(DCB *dcb); +extern void dShowEventStats(DCB *dcb); #endif diff --git a/server/include/rdtsc.h b/server/include/rdtsc.h index 50a752f31..b221417b7 100644 --- a/server/include/rdtsc.h +++ b/server/include/rdtsc.h @@ -1,7 +1,7 @@ #ifndef _RDTSC_H #define _RDTSC_H /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -15,7 +15,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2014 + * Copyright MariaDB Corporation Ab 2014 */ /** diff --git a/server/include/router.h b/server/include/router.h index 6c29fe1bf..c3401c43a 100644 --- a/server/include/router.h +++ b/server/include/router.h @@ -1,7 +1,7 @@ #ifndef _ROUTER_H #define _ROUTER_H /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -15,7 +15,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 */ /** diff --git a/server/include/secrets.h b/server/include/secrets.h index 0325d75ae..505e79154 100644 --- a/server/include/secrets.h +++ b/server/include/secrets.h @@ -1,7 +1,7 @@ #ifndef _SECRETS_H #define _SECRETS_H /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -15,7 +15,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 */ /** diff --git a/server/include/server.h b/server/include/server.h index f6d146ccd..4bb514d65 100644 --- a/server/include/server.h +++ b/server/include/server.h @@ -1,7 +1,7 @@ #ifndef _SERVER_H #define _SERVER_H /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -15,7 +15,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 */ #include @@ -40,6 +40,7 @@ * 26/06/14 Mark Riddoch Adidtion of server parameters * 30/07/14 Massimiliano Pinto Addition of NDB status for MySQL Cluster * 30/08/14 Massimiliano Pinto Addition of SERVER_STALE_STATUS + * 27/10/14 Massimiliano Pinto Addition of SERVER_MASTER_STICKINESS * * @endverbatim */ @@ -90,6 +91,7 @@ typedef struct server { long master_id; /**< Master server id of this node */ int depth; /**< Replication level in the tree */ long *slaves; /**< Slaves of this node */ + bool master_err_is_logged; /*< If node failed, this indicates whether it is logged */ } SERVER; /** @@ -105,6 +107,7 @@ typedef struct server { #define SERVER_MAINT 0x0020 /**<< Server is in maintenance mode */ #define SERVER_SLAVE_OF_EXTERNAL_MASTER 0x0040 /**<< Server is slave of a Master outside the provided replication topology */ #define SERVER_STALE_STATUS 0x0080 /**<< Server stale status, monitor didn't update it */ +#define SERVER_MASTER_STICKINESS 0x0100 /**<< Server Master stickiness */ #define SERVER_AUTH_ERROR 0x1000 /**<< Authentication erorr from monitor */ /** @@ -121,8 +124,11 @@ typedef struct server { * Is the server a master? The server must be both running and marked as master * in order for the macro to return true */ -#define SERVER_IS_MASTER(server) \ - (((server)->status & (SERVER_RUNNING|SERVER_MASTER|SERVER_SLAVE|SERVER_MAINT)) == (SERVER_RUNNING|SERVER_MASTER)) +#define SERVER_IS_MASTER(server) SRV_MASTER_STATUS((server)->status) + +#define SRV_MASTER_STATUS(status) ((status & \ + (SERVER_RUNNING|SERVER_MASTER|SERVER_SLAVE|SERVER_MAINT)) == \ + (SERVER_RUNNING|SERVER_MASTER)) /** * Is the server valid candidate for root master. The server must be running, diff --git a/server/include/service.h b/server/include/service.h index 139a08056..ab18e5d29 100644 --- a/server/include/service.h +++ b/server/include/service.h @@ -1,7 +1,7 @@ #ifndef _SERVICE_H #define _SERVICE_H /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -15,7 +15,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 */ #include @@ -23,6 +23,7 @@ #include #include #include +#include #include "config.h" /** @@ -45,6 +46,7 @@ * 29/05/14 Mark Riddoch Filter API mechanism * 26/06/14 Mark Riddoch Added WeightBy support * 09/09/14 Massimiliano Pinto Added service option for localhost authentication + * 09/10/14 Massimiliano Pinto Added service resources via hashtable * * @endverbatim */ @@ -97,6 +99,11 @@ typedef struct { time_t last; } SERVICE_REFRESH_RATE; +typedef struct server_ref_t{ + struct server_ref_t *next; + SERVER* server; +}SERVER_REF; + /** * Defines a service within the gateway. * @@ -117,16 +124,18 @@ typedef struct service { void *router_instance; /**< The router instance for this service */ char *version_string;/** version string for this service listeners */ - struct server *databases; /**< The set of servers in the backend */ + SERVER_REF *dbref; /** server references */ SERVICE_USER credentials; /**< The cedentials of the service user */ SPINLOCK spin; /**< The service spinlock */ SERVICE_STATS stats; /**< The service statistics */ struct users *users; /**< The user data for this service */ int enable_root; /**< Allow root user access */ int localhost_match_wildcard_host; /**< Match localhost against wildcard */ + HASHTABLE *resources; /**< hastable for service resources, i.e. database names */ CONFIG_PARAMETER* svc_config_param; /*< list of config params and values */ int svc_config_version; /*< Version number of configuration */ + bool svc_do_shutdown; /*< tells the service to exit loops etc. */ SPINLOCK users_table_spin; /**< The spinlock for users data refresh */ SERVICE_REFRESH_RATE @@ -141,8 +150,10 @@ typedef enum count_spec_t {COUNT_NONE=0, COUNT_ATLEAST, COUNT_EXACT, COUNT_ATMOS #define SERVICE_STATE_ALLOC 1 /**< The service has been allocated */ #define SERVICE_STATE_STARTED 2 /**< The service has been started */ +#define SERVICE_STATE_FAILED 3 /**< The service failed to start */ +#define SERVICE_STATE_STOPPED 4 /**< The service has been stopped */ -extern SERVICE *service_alloc(char *, char *); +extern SERVICE *service_alloc(const char *, const char *); extern int service_free(SERVICE *); extern SERVICE *service_find(char *); extern int service_isvalid(SERVICE *); @@ -181,4 +192,5 @@ extern void dprintService(DCB *, SERVICE *); extern void dListServices(DCB *); extern void dListListeners(DCB *); char* service_get_name(SERVICE* svc); +void service_shutdown(); #endif diff --git a/server/include/session.h b/server/include/session.h index cbd43fe40..e008cc4ff 100644 --- a/server/include/session.h +++ b/server/include/session.h @@ -1,7 +1,7 @@ #ifndef _SESSION_H #define _SESSION_H /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -15,7 +15,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 */ /** @@ -41,6 +41,7 @@ #include #include #include +#include struct dcb; struct service; @@ -60,6 +61,7 @@ typedef enum { SESSION_STATE_STOPPING, /*< session and router are being closed */ SESSION_STATE_LISTENER, /*< for listener session */ SESSION_STATE_LISTENER_STOPPED, /*< for listener session */ + SESSION_STATE_TO_BE_FREED, /*< ready to be freed as soon as there are no references */ SESSION_STATE_FREE /*< for all sessions */ } session_state_t; @@ -109,18 +111,21 @@ typedef struct session { skygw_chk_t ses_chk_top; #endif SPINLOCK ses_lock; - session_state_t state; /**< Current descriptor state */ - struct dcb *client; /**< The client connection */ - void *data; /**< The session data */ - void *router_session;/**< The router instance data */ - SESSION_STATS stats; /**< Session statistics */ - struct service *service; /**< The service this session is using */ - int n_filters; /**< Number of filter sessions */ - SESSION_FILTER *filters; /**< The filters in use within this session */ - DOWNSTREAM head; /**< Head of the filter chain */ - UPSTREAM tail; /**< The tail of the filter chain */ - struct session *next; /**< Linked list of all sessions */ - int refcount; /**< Reference count on the session */ + session_state_t state; /*< Current descriptor state */ + size_t ses_id; /*< Unique session identifier */ + int ses_enabled_logs; /*< Bitfield of enabled logs */ + struct dcb *client; /*< The client connection */ + void *data; /*< The session data */ + void *router_session; /*< The router instance data */ + SESSION_STATS stats; /*< Session statistics */ + struct service *service; /*< The service this session is using */ + int n_filters; /*< Number of filter sessions */ + SESSION_FILTER *filters; /*< The filters in use within this session */ + DOWNSTREAM head; /*< Head of the filter chain */ + UPSTREAM tail; /*< The tail of the filter chain */ + struct session *next; /*< Linked list of all sessions */ + int refcount; /*< Reference count on the session */ + bool ses_is_child; /*< this is a child session */ #if defined(SS_DEBUG) skygw_chk_t ses_chk_tail; #endif @@ -145,6 +150,7 @@ typedef struct session { ((sess)->tail.clientReply)((sess)->tail.instance, \ (sess)->tail.session, (buf)) +SESSION *get_all_sessions(); SESSION *session_alloc(struct service *, struct dcb *); bool session_free(SESSION *); int session_isvalid(SESSION *); @@ -159,4 +165,7 @@ void dListSessions(struct dcb *); char *session_state(int); bool session_link_dcb(SESSION *, struct dcb *); SESSION* get_session_by_router_ses(void* rses); +void session_enable_log(SESSION* ses, logfile_id_t id); +void session_disable_log(SESSION* ses, logfile_id_t id); + #endif diff --git a/server/include/spinlock.h b/server/include/spinlock.h index e5f938815..47677119e 100644 --- a/server/include/spinlock.h +++ b/server/include/spinlock.h @@ -1,7 +1,7 @@ #ifndef _SPINLOCK_H #define _SPINLOCK_H /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -15,7 +15,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 */ /** diff --git a/server/include/thread.h b/server/include/thread.h index e3ea6f0dc..9d391b9d7 100644 --- a/server/include/thread.h +++ b/server/include/thread.h @@ -1,7 +1,7 @@ #ifndef _THREAD_H #define _THREAD_H /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -15,7 +15,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 */ #include diff --git a/server/include/users.h b/server/include/users.h index ffa091e3b..cf0bca17f 100644 --- a/server/include/users.h +++ b/server/include/users.h @@ -1,7 +1,7 @@ #ifndef _USERS_H #define _USERS_H /* - * This file is distributed as part of the SkySQL Gateway. It is free + * This file is distributed as part of the MariaDB Corporation MaxScale. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -15,7 +15,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2013 + * Copyright MariaDB Corporation Ab 2013-2014 */ #include #include diff --git a/server/inih/._LICENSE.txt b/server/inih/._LICENSE.txt deleted file mode 100755 index 7fa6eb8a6..000000000 Binary files a/server/inih/._LICENSE.txt and /dev/null differ diff --git a/server/inih/._README.txt b/server/inih/._README.txt deleted file mode 100755 index 7fa6eb8a6..000000000 Binary files a/server/inih/._README.txt and /dev/null differ diff --git a/server/inih/._cpp b/server/inih/._cpp deleted file mode 100755 index 17b8574a4..000000000 Binary files a/server/inih/._cpp and /dev/null differ diff --git a/server/inih/._examples b/server/inih/._examples deleted file mode 100755 index 17b8574a4..000000000 Binary files a/server/inih/._examples and /dev/null differ diff --git a/server/inih/._extra b/server/inih/._extra deleted file mode 100755 index 17b8574a4..000000000 Binary files a/server/inih/._extra and /dev/null differ diff --git a/server/inih/._ini.c b/server/inih/._ini.c deleted file mode 100755 index 17b8574a4..000000000 Binary files a/server/inih/._ini.c and /dev/null differ diff --git a/server/inih/._ini.h b/server/inih/._ini.h deleted file mode 100755 index 17b8574a4..000000000 Binary files a/server/inih/._ini.h and /dev/null differ diff --git a/server/inih/._tests b/server/inih/._tests deleted file mode 100755 index 7fa6eb8a6..000000000 Binary files a/server/inih/._tests and /dev/null differ diff --git a/server/inih/cpp/._INIReader.cpp b/server/inih/cpp/._INIReader.cpp deleted file mode 100755 index 17b8574a4..000000000 Binary files a/server/inih/cpp/._INIReader.cpp and /dev/null differ diff --git a/server/inih/cpp/._INIReader.h b/server/inih/cpp/._INIReader.h deleted file mode 100755 index 17b8574a4..000000000 Binary files a/server/inih/cpp/._INIReader.h and /dev/null differ diff --git a/server/inih/cpp/._INIReaderTest.cpp b/server/inih/cpp/._INIReaderTest.cpp deleted file mode 100755 index 17b8574a4..000000000 Binary files a/server/inih/cpp/._INIReaderTest.cpp and /dev/null differ diff --git a/server/inih/examples/._config.def b/server/inih/examples/._config.def deleted file mode 100755 index 17b8574a4..000000000 Binary files a/server/inih/examples/._config.def and /dev/null differ diff --git a/server/inih/examples/._ini_dump.c b/server/inih/examples/._ini_dump.c deleted file mode 100755 index 17b8574a4..000000000 Binary files a/server/inih/examples/._ini_dump.c and /dev/null differ diff --git a/server/inih/examples/._ini_example.c b/server/inih/examples/._ini_example.c deleted file mode 100755 index 17b8574a4..000000000 Binary files a/server/inih/examples/._ini_example.c and /dev/null differ diff --git a/server/inih/examples/._ini_xmacros.c b/server/inih/examples/._ini_xmacros.c deleted file mode 100755 index 17b8574a4..000000000 Binary files a/server/inih/examples/._ini_xmacros.c and /dev/null differ diff --git a/server/inih/examples/._test.ini b/server/inih/examples/._test.ini deleted file mode 100755 index 17b8574a4..000000000 Binary files a/server/inih/examples/._test.ini and /dev/null differ diff --git a/server/inih/extra/._Makefile.static b/server/inih/extra/._Makefile.static deleted file mode 100755 index 17b8574a4..000000000 Binary files a/server/inih/extra/._Makefile.static and /dev/null differ diff --git a/server/inih/tests/._bad_comment.ini b/server/inih/tests/._bad_comment.ini deleted file mode 100755 index 7fa6eb8a6..000000000 Binary files a/server/inih/tests/._bad_comment.ini and /dev/null differ diff --git a/server/inih/tests/._bad_multi.ini b/server/inih/tests/._bad_multi.ini deleted file mode 100755 index 7fa6eb8a6..000000000 Binary files a/server/inih/tests/._bad_multi.ini and /dev/null differ diff --git a/server/inih/tests/._bad_section.ini b/server/inih/tests/._bad_section.ini deleted file mode 100755 index 7fa6eb8a6..000000000 Binary files a/server/inih/tests/._bad_section.ini and /dev/null differ diff --git a/server/inih/tests/._baseline_multi.txt b/server/inih/tests/._baseline_multi.txt deleted file mode 100755 index 7fa6eb8a6..000000000 Binary files a/server/inih/tests/._baseline_multi.txt and /dev/null differ diff --git a/server/inih/tests/._baseline_single.txt b/server/inih/tests/._baseline_single.txt deleted file mode 100755 index 7fa6eb8a6..000000000 Binary files a/server/inih/tests/._baseline_single.txt and /dev/null differ diff --git a/server/inih/tests/._bom.ini b/server/inih/tests/._bom.ini deleted file mode 100755 index 7fa6eb8a6..000000000 Binary files a/server/inih/tests/._bom.ini and /dev/null differ diff --git a/server/inih/tests/._multi_line.ini b/server/inih/tests/._multi_line.ini deleted file mode 100755 index 7fa6eb8a6..000000000 Binary files a/server/inih/tests/._multi_line.ini and /dev/null differ diff --git a/server/inih/tests/._normal.ini b/server/inih/tests/._normal.ini deleted file mode 100755 index 7fa6eb8a6..000000000 Binary files a/server/inih/tests/._normal.ini and /dev/null differ diff --git a/server/inih/tests/._unittest.bat b/server/inih/tests/._unittest.bat deleted file mode 100755 index 7fa6eb8a6..000000000 Binary files a/server/inih/tests/._unittest.bat and /dev/null differ diff --git a/server/inih/tests/._unittest.c b/server/inih/tests/._unittest.c deleted file mode 100755 index 7fa6eb8a6..000000000 Binary files a/server/inih/tests/._unittest.c and /dev/null differ diff --git a/server/inih/tests/._user_error.ini b/server/inih/tests/._user_error.ini deleted file mode 100755 index 7fa6eb8a6..000000000 Binary files a/server/inih/tests/._user_error.ini and /dev/null differ diff --git a/server/modules/filter/CMakeLists.txt b/server/modules/filter/CMakeLists.txt index f9dea236d..6e70adaf3 100644 --- a/server/modules/filter/CMakeLists.txt +++ b/server/modules/filter/CMakeLists.txt @@ -24,4 +24,8 @@ add_library(topfilter SHARED topfilter.c) target_link_libraries(topfilter log_manager utils) install(TARGETS topfilter DESTINATION modules) -add_subdirectory(hint) \ No newline at end of file +add_subdirectory(hint) + +if(BUILD_TESTS) + add_subdirectory(test) +endif() \ No newline at end of file diff --git a/server/modules/filter/Makefile b/server/modules/filter/Makefile index c52c8a8fa..284c7b2dc 100644 --- a/server/modules/filter/Makefile +++ b/server/modules/filter/Makefile @@ -1,4 +1,4 @@ -# This file is distributed as part of MaxScale form SkySQL. It is free +# This file is distributed as part of MaxScale form MariaDB Corporation. It is free # software: you can redistribute it and/or modify it under the terms of the # GNU General Public License as published by the Free Software Foundation, # version 2. @@ -12,7 +12,7 @@ # this program; if not, write to the Free Software Foundation, Inc., 51 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # -# Copyright SkySQL Ab 2014 +# Copyright MariaDB Corporation Ab 2014 # # Revision History # Date Who Description diff --git a/server/modules/filter/hint/Makefile b/server/modules/filter/hint/Makefile index 4f2194739..202034b9c 100644 --- a/server/modules/filter/hint/Makefile +++ b/server/modules/filter/hint/Makefile @@ -1,4 +1,4 @@ -# This file is distributed as part of MaxScale form SkySQL. It is free +# This file is distributed as part of MaxScale form MariaDB Corporation. It is free # software: you can redistribute it and/or modify it under the terms of the # GNU General Public License as published by the Free Software Foundation, # version 2. @@ -12,7 +12,7 @@ # this program; if not, write to the Free Software Foundation, Inc., 51 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # -# Copyright SkySQL Ab 2014 +# Copyright MariaDB Corporation Ab 2014 # # Revision History # Date Who Description diff --git a/server/modules/filter/hint/hintfilter.c b/server/modules/filter/hint/hintfilter.c index e54319d5d..67c9f7f28 100644 --- a/server/modules/filter/hint/hintfilter.c +++ b/server/modules/filter/hint/hintfilter.c @@ -1,5 +1,5 @@ /* - * This file is distributed as part of MaxScale by SkySQL. It is free + * This file is distributed as part of MaxScale by MariaDB Corporation. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -13,7 +13,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2014 + * Copyright MariaDB Corporation Ab 2014 */ #include #include diff --git a/server/modules/filter/hint/hintparser.c b/server/modules/filter/hint/hintparser.c index 77d9c98ec..abf2fb141 100644 --- a/server/modules/filter/hint/hintparser.c +++ b/server/modules/filter/hint/hintparser.c @@ -1,5 +1,5 @@ /* - * This file is distributed as part of MaxScale by SkySQL. It is free + * This file is distributed as part of MaxScale by MariaDB Corporation. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -13,7 +13,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2014 + * Copyright MariaDB Corporation Ab 2014 */ #include #include @@ -25,7 +25,10 @@ #include #include -extern int lm_enabled_logfiles_bitmask; +/** Defined in log_manager.cc */ +extern int lm_enabled_logfiles_bitmask; +extern size_t log_ses_count[]; +extern __thread log_info_t tls_log_info; /** * hintparser.c - Find any comment in the SQL packet and look for MAXSCALE @@ -210,7 +213,7 @@ HINT_MODE mode = HM_EXECUTE; /* * If we have got here then we have a comment, ptr point to * the comment character if it is a '#' comment or the second - * character of the comment if it is a -- or /* comment + * character of the comment if it is a -- or \/\* comment * * Move to the next character in the SQL. */ @@ -435,7 +438,7 @@ HINT_MODE mode = HM_EXECUTE; token_free(tok); } /*< while */ - if (tok->token == TOK_EOL) + if ( tok && tok->token == TOK_EOL) { token_free(tok); } @@ -550,7 +553,7 @@ HINT_TOKEN *tok; else if (!inword && inquote == '\0' && **ptr == '=') { *dest = **ptr; - *dest++; + dest++; (*ptr)++; break; } diff --git a/server/modules/filter/mqfilter.c b/server/modules/filter/mqfilter.c index 9761fa777..f88297e8f 100644 --- a/server/modules/filter/mqfilter.c +++ b/server/modules/filter/mqfilter.c @@ -1,5 +1,5 @@ /* - * This file is distributed as part of MaxScale by SkySQL. It is free + * This file is distributed as part of MaxScale by MariaDB Corporation. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -13,7 +13,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2014 + * Copyright MariaDB Corporation Ab 2014 */ /** @@ -58,6 +58,7 @@ *@endverbatim * See the individual struct documentations for logging trigger parameters */ +#include #include #include #include @@ -422,8 +423,8 @@ init_conn(MQ_INSTANCE *my_instance) */ char** parse_optstr(char* str, char* tok, int* szstore) { - char* tk = str; - char** arr; + char *lasts, *tk = str; + char **arr; int i = 0, size = 1; while((tk = strpbrk(tk + 1,tok))){ size++; @@ -439,10 +440,10 @@ char** parse_optstr(char* str, char* tok, int* szstore) } *szstore = size; - tk = strtok(str,tok); + tk = strtok_r(str,tok, &lasts); while(tk && i < size){ arr[i++] = strdup(tk); - tk = strtok(NULL,tok); + tk = strtok_r(NULL,tok,&lasts); } return arr; } @@ -1051,7 +1052,8 @@ routeQuery(FILTER *instance, void *session, GWBUF *queue) for(z = 0;zshm_trg->size; i++){ if(strcmp(tmp,my_instance->shm_trg->objects[i]) == 0){ @@ -1102,8 +1104,9 @@ routeQuery(FILTER *instance, void *session, GWBUF *queue) char* tbnm = NULL; if((strchr(sesstbls[j],'.')) != NULL){ - tbnm = strtok(sesstbls[j],"."); - tbnm = strtok(NULL,"."); + char *lasts; + tbnm = strtok_r(sesstbls[j],".",&lasts); + tbnm = strtok_r(NULL,".",&lasts); }else{ tbnm = sesstbls[j]; } @@ -1167,7 +1170,11 @@ routeQuery(FILTER *instance, void *session, GWBUF *queue) } - + if (queue->next != NULL) + { + queue = gwbuf_make_contiguous(queue); + } + if(modutil_extract_SQL(queue, &ptr, &length)){ my_session->was_query = true; diff --git a/server/modules/filter/qlafilter.c b/server/modules/filter/qlafilter.c index b3accfbf8..74c176e02 100644 --- a/server/modules/filter/qlafilter.c +++ b/server/modules/filter/qlafilter.c @@ -1,5 +1,5 @@ /* - * This file is distributed as part of MaxScale by SkySQL. It is free + * This file is distributed as part of MaxScale by MariaDB Corporation. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -13,7 +13,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2014 + * Copyright MariaDB Corporation Ab 2014 */ /** @@ -40,6 +40,7 @@ */ #include #include +#include #include #include #include @@ -50,11 +51,14 @@ #include #include -extern int lm_enabled_logfiles_bitmask; +/** Defined in log_manager.cc */ +extern int lm_enabled_logfiles_bitmask; +extern size_t log_ses_count[]; +extern __thread log_info_t tls_log_info; MODULE_INFO info = { MODULE_API_FILTER, - MODULE_BETA_RELEASE, + MODULE_GA, FILTER_VERSION, "A simple query logging filter" }; @@ -170,10 +174,11 @@ int i; if ((my_instance = calloc(1, sizeof(QLA_INSTANCE))) != NULL) { - if (options) + if (options){ my_instance->filebase = strdup(options[0]); - else + }else{ my_instance->filebase = strdup("qla"); + } my_instance->source = NULL; my_instance->userName = NULL; my_instance->match = NULL; @@ -196,9 +201,11 @@ int i; my_instance->userName = strdup(params[i]->value); else if (!strcmp(params[i]->name, "filebase")) { - if (my_instance->filebase) + if (my_instance->filebase){ free(my_instance->filebase); - my_instance->source = strdup(params[i]->value); + my_instance->filebase = NULL; + } + my_instance->filebase = strdup(params[i]->value); } else if (!filter_standard_parameter(params[i]->name)) { @@ -219,7 +226,9 @@ int i; my_instance->match))); free(my_instance->match); free(my_instance->source); - free(my_instance->filebase); + if(my_instance->filebase){ + free(my_instance->filebase); + } free(my_instance); return NULL; } @@ -235,7 +244,9 @@ int i; regfree(&my_instance->re); free(my_instance->match); free(my_instance->source); - free(my_instance->filebase); + if(my_instance->filebase){ + free(my_instance->filebase); + } free(my_instance); return NULL; } @@ -265,10 +276,17 @@ char *remote, *userName; (char *)malloc(strlen(my_instance->filebase) + 20)) == NULL) { + LOGIF(LE, (skygw_log_write( + LOGFILE_ERROR, + "Error : Memory allocation for qla filter " + "file name failed due to %d, %s.", + errno, + strerror(errno)))); free(my_session); return NULL; } my_session->active = 1; + if (my_instance->source && (remote = session_get_remote(session)) != NULL) { @@ -276,16 +294,45 @@ char *remote, *userName; my_session->active = 0; } userName = session_getUser(session); - if (my_instance->userName && userName && strcmp(userName, - my_instance->userName)) + + if (my_instance->userName && + userName && + strcmp(userName,my_instance->userName)) + { my_session->active = 0; - sprintf(my_session->filename, "%s.%d", my_instance->filebase, - my_instance->sessions); + } + sprintf(my_session->filename, "%s.%d", + my_instance->filebase, + my_instance->sessions); my_instance->sessions++; + if (my_session->active) + { my_session->fp = fopen(my_session->filename, "w"); + + if (my_session->fp == NULL) + { + LOGIF(LE, (skygw_log_write( + LOGFILE_ERROR, + "Error : Opening output file for qla " + "fileter failed due to %d, %s", + errno, + strerror(errno)))); + free(my_session->filename); + free(my_session); + my_session = NULL; + } + } + } + else + { + LOGIF(LE, (skygw_log_write( + LOGFILE_ERROR, + "Error : Memory allocation for qla filter failed due to " + "%d, %s.", + errno, + strerror(errno)))); } - return my_session; } @@ -354,28 +401,35 @@ routeQuery(FILTER *instance, void *session, GWBUF *queue) QLA_INSTANCE *my_instance = (QLA_INSTANCE *)instance; QLA_SESSION *my_session = (QLA_SESSION *)session; char *ptr; -int length; +int length = 0; struct tm t; struct timeval tv; - if (my_session->active && modutil_extract_SQL(queue, &ptr, &length)) + if (my_session->active) { - if ((my_instance->match == NULL || - regexec(&my_instance->re, ptr, 0, NULL, 0) == 0) && - (my_instance->nomatch == NULL || - regexec(&my_instance->nore,ptr,0,NULL, 0) != 0)) + if (queue->next != NULL) { - gettimeofday(&tv, NULL); - localtime_r(&tv.tv_sec, &t); - fprintf(my_session->fp, - "%02d:%02d:%02d.%-3d %d/%02d/%d, ", - t.tm_hour, t.tm_min, t.tm_sec, (int)(tv.tv_usec / 1000), - t.tm_mday, t.tm_mon + 1, 1900 + t.tm_year); - fwrite(ptr, sizeof(char), length, my_session->fp); - fwrite("\n", sizeof(char), 1, my_session->fp); + queue = gwbuf_make_contiguous(queue); + } + if ((ptr = modutil_get_SQL(queue)) != NULL) + { + if ((my_instance->match == NULL || + regexec(&my_instance->re, ptr, 0, NULL, 0) == 0) && + (my_instance->nomatch == NULL || + regexec(&my_instance->nore,ptr,0,NULL, 0) != 0)) + { + gettimeofday(&tv, NULL); + localtime_r(&tv.tv_sec, &t); + fprintf(my_session->fp, + "%02d:%02d:%02d.%-3d %d/%02d/%d, ", + t.tm_hour, t.tm_min, t.tm_sec, (int)(tv.tv_usec / 1000), + t.tm_mday, t.tm_mon + 1, 1900 + t.tm_year); + fwrite(ptr, sizeof(char), length, my_session->fp); + fwrite("\n", sizeof(char), 1, my_session->fp); + } + free(ptr); } } - /* Pass the query downstream */ return my_session->down.routeQuery(my_session->down.instance, my_session->down.session, queue); diff --git a/server/modules/filter/regexfilter.c b/server/modules/filter/regexfilter.c index 79cf70c09..b2a8784e4 100644 --- a/server/modules/filter/regexfilter.c +++ b/server/modules/filter/regexfilter.c @@ -1,5 +1,5 @@ /* - * This file is distributed as part of MaxScale by SkySQL. It is free + * This file is distributed as part of MaxScale by MariaDB Corporation. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -13,7 +13,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2014 + * Copyright MariaDB Corporation Ab 2014 */ #include #include @@ -24,7 +24,10 @@ #include #include -extern int lm_enabled_logfiles_bitmask; +/** Defined in log_manager.cc */ +extern int lm_enabled_logfiles_bitmask; +extern size_t log_ses_count[]; +extern __thread log_info_t tls_log_info; /** * @file regexfilter.c - a very simple regular expression rewrite filter. @@ -45,7 +48,7 @@ extern int lm_enabled_logfiles_bitmask; MODULE_INFO info = { MODULE_API_FILTER, - MODULE_BETA_RELEASE, + MODULE_GA, FILTER_VERSION, "A query rewrite filter that uses regular expressions to rewite queries" }; @@ -60,7 +63,7 @@ static void setDownstream(FILTER *instance, void *fsession, DOWNSTREAM *downstre static int routeQuery(FILTER *instance, void *fsession, GWBUF *queue); static void diagnostic(FILTER *instance, void *fsession, DCB *dcb); -static char *regex_replace(char *sql, int length, regex_t *re, char *replace); +static char *regex_replace(char *sql, regex_t *re, char *replace); static FILTER_OBJECT MyObject = { createInstance, @@ -192,6 +195,7 @@ int i, cflags = REG_ICASE; if (my_instance->match == NULL || my_instance->replace == NULL) { + free(my_instance); return NULL; } @@ -301,21 +305,28 @@ routeQuery(FILTER *instance, void *session, GWBUF *queue) REGEX_INSTANCE *my_instance = (REGEX_INSTANCE *)instance; REGEX_SESSION *my_session = (REGEX_SESSION *)session; char *sql, *newsql; -int length; if (modutil_is_SQL(queue)) { - modutil_extract_SQL(queue, &sql, &length); - newsql = regex_replace(sql, length, &my_instance->re, - my_instance->replace); - if (newsql) + if (queue->next != NULL) { - queue = modutil_replace_SQL(queue, newsql); - free(newsql); - my_session->replacements++; + queue = gwbuf_make_contiguous(queue); + } + if ((sql = modutil_get_SQL(queue)) != NULL) + { + newsql = regex_replace(sql, &my_instance->re, + my_instance->replace); + if (newsql) + { + queue = modutil_replace_SQL(queue, newsql); + queue = gwbuf_make_contiguous(queue); + free(newsql); + my_session->replacements++; + } + else + my_session->no_change++; + free(sql); } - else - my_session->no_change++; } return my_session->down.routeQuery(my_session->down.instance, @@ -362,25 +373,24 @@ REGEX_SESSION *my_session = (REGEX_SESSION *)fsession; * Perform a regular expression match and subsititution on the SQL * * @param sql The original SQL text - * @param length The length of the SQL text * @param re The compiled regular expression * @param replace The replacement text * @return The replaced text or NULL if no replacement was done. */ static char * -regex_replace(char *sql, int length, regex_t *re, char *replace) +regex_replace(char *sql, regex_t *re, char *replace) { char *orig, *result, *ptr; int i, res_size, res_length, rep_length; -int last_match; +int last_match, length; regmatch_t match[10]; - orig = strndup(sql, length); - if (regexec(re, orig, 10, match, 0)) + if (regexec(re, sql, 10, match, 0)) { - free(orig); return NULL; } + length = strlen(sql); + res_size = 2 * length; result = (char *)malloc(res_size); res_length = 0; diff --git a/server/modules/filter/tee.c b/server/modules/filter/tee.c index 2d164ad42..c00e7c992 100644 --- a/server/modules/filter/tee.c +++ b/server/modules/filter/tee.c @@ -1,5 +1,5 @@ /* - * This file is distributed as part of MaxScale by SkySQL. It is free + * This file is distributed as part of MaxScale by MariaDB Corporation. It is free * software: you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation, * version 2. @@ -13,7 +13,7 @@ * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * - * Copyright SkySQL Ab 2014 + * Copyright MariaDB Corporation Ab 2014 */ /** @@ -41,6 +41,7 @@ * Date Who Description * 20/06/2014 Mark Riddoch Initial implementation * 24/06/2014 Mark Riddoch Addition of support for multi-packet queries + * 12/12/2014 Mark Riddoch Add support for otehr packet types * * @endverbatim */ @@ -57,12 +58,42 @@ #include #include #include +#include +#include -extern int lm_enabled_logfiles_bitmask; +#define MYSQL_COM_QUIT 0x01 +#define MYSQL_COM_INITDB 0x02 +#define MYSQL_COM_FIELD_LIST 0x04 +#define MYSQL_COM_CHANGE_USER 0x11 +#define MYSQL_COM_STMT_PREPARE 0x16 +#define MYSQL_COM_STMT_EXECUTE 0x17 +#define MYSQL_COM_STMT_SEND_LONG_DATA 0x18 +#define MYSQL_COM_STMT_CLOSE 0x19 +#define MYSQL_COM_STMT_RESET 0x1a + +#define REPLY_TIMEOUT_SECOND 5 +#define REPLY_TIMEOUT_MILLISECOND 1 + +static unsigned char required_packets[] = { + MYSQL_COM_QUIT, + MYSQL_COM_INITDB, + MYSQL_COM_FIELD_LIST, + MYSQL_COM_CHANGE_USER, + MYSQL_COM_STMT_PREPARE, + MYSQL_COM_STMT_EXECUTE, + MYSQL_COM_STMT_SEND_LONG_DATA, + MYSQL_COM_STMT_CLOSE, + MYSQL_COM_STMT_RESET, + 0 }; + +/** Defined in log_manager.cc */ +extern int lm_enabled_logfiles_bitmask; +extern size_t log_ses_count[]; +extern __thread log_info_t tls_log_info; MODULE_INFO info = { MODULE_API_FILTER, - MODULE_BETA_RELEASE, + MODULE_GA, FILTER_VERSION, "A tee piece in the filter plumbing" }; @@ -77,19 +108,20 @@ static void *newSession(FILTER *instance, SESSION *session); static void closeSession(FILTER *instance, void *session); static void freeSession(FILTER *instance, void *session); static void setDownstream(FILTER *instance, void *fsession, DOWNSTREAM *downstream); +static void setUpstream(FILTER *instance, void *fsession, UPSTREAM *upstream); static int routeQuery(FILTER *instance, void *fsession, GWBUF *queue); +static int clientReply(FILTER *instance, void *fsession, GWBUF *queue); static void diagnostic(FILTER *instance, void *fsession, DCB *dcb); - static FILTER_OBJECT MyObject = { createInstance, newSession, closeSession, freeSession, setDownstream, - NULL, // No Upstream requirement + setUpstream, routeQuery, - NULL, // No client reply + clientReply, diagnostic, }; @@ -117,14 +149,59 @@ typedef struct { */ typedef struct { DOWNSTREAM down; /* The downstream filter */ + UPSTREAM up; /* The upstream filter */ + + FILTER_DEF* dummy_filterdef; int active; /* filter is active? */ + int waiting; /* if the client is waiting for a reply */ + int replies; /* Number of queries received */ + int min_replies; /* Minimum number of replies to receive + * before forwarding the packet to the client*/ DCB *branch_dcb; /* Client DCB for "branch" service */ SESSION *branch_session;/* The branch service session */ int n_duped; /* Number of duplicated queries */ int n_rejected; /* Number of rejected queries */ int residual; /* Any outstanding SQL text */ + GWBUF* tee_replybuf; /* Buffer for reply */ + SPINLOCK tee_lock; } TEE_SESSION; +typedef struct orphan_session_tt +{ + SESSION* session; + struct orphan_session_tt* next; +}orphan_session_t; + +static orphan_session_t* allOrphans = NULL; + +static SPINLOCK orphanLock; +static int packet_is_required(GWBUF *queue); +static int detect_loops(TEE_INSTANCE *instance, HASHTABLE* ht, SERVICE* session); + +static int hkfn( + void* key) +{ + if(key == NULL){ + return 0; + } + unsigned int hash = 0,c = 0; + char* ptr = (char*)key; + while((c = *ptr++)){ + hash = c + (hash << 6) + (hash << 16) - hash; + } + return *(int *)key; +} + +static int hcfn( + void* v1, + void* v2) +{ + char* i1 = (char*) v1; + char* i2 = (char*) v2; + + return strcmp(i1,i2); +} + /** * Implementation of the mandatory version entry point * @@ -143,6 +220,7 @@ version() void ModuleInit() { + spinlock_init(&orphanLock); } /** @@ -229,7 +307,8 @@ int i; free(my_instance->source); free(my_instance); return NULL; - } + } + if (my_instance->match && regcomp(&my_instance->re, my_instance->match, REG_ICASE)) { @@ -277,27 +356,148 @@ TEE_INSTANCE *my_instance = (TEE_INSTANCE *)instance; TEE_SESSION *my_session; char *remote, *userName; + if (strcmp(my_instance->service->name, session->service->name) == 0) + { + LOGIF(LE, (skygw_log_write_flush(LOGFILE_ERROR, + "Error : %s: Recursive use of tee filter in service.", + session->service->name))); + my_session = NULL; + goto retblock; + } + + HASHTABLE* ht = hashtable_alloc(100,hkfn,hcfn); + bool is_loop = detect_loops(my_instance,ht,session->service); + hashtable_free(ht); + + if(is_loop) + { + LOGIF(LE, (skygw_log_write_flush(LOGFILE_ERROR, + "Error : %s: Recursive use of tee filter in service.", + session->service->name))); + my_session = NULL; + goto retblock; + } + if ((my_session = calloc(1, sizeof(TEE_SESSION))) != NULL) { my_session->active = 1; my_session->residual = 0; - if (my_instance->source - && (remote = session_get_remote(session)) != NULL) + spinlock_init(&my_session->tee_lock); + if (my_instance->source && + (remote = session_get_remote(session)) != NULL) { if (strcmp(remote, my_instance->source)) + { my_session->active = 0; + + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Warning : Tee filter is not active."))); + } } userName = session_getUser(session); - if (my_instance->userName && userName && strcmp(userName, - my_instance->userName)) + + if (my_instance->userName && + userName && + strcmp(userName, my_instance->userName)) + { my_session->active = 0; + + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Warning : Tee filter is not active."))); + } + if (my_session->active) { - my_session->branch_dcb = dcb_clone(session->client); - my_session->branch_session = session_alloc(my_instance->service, my_session->branch_dcb); + DCB* dcb; + SESSION* ses; + FILTER_DEF* dummy; + UPSTREAM* dummy_upstream; + + if ((dcb = dcb_clone(session->client)) == NULL) + { + freeSession(instance, (void *)my_session); + my_session = NULL; + + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : Creating client DCB for Tee " + "filter failed. Terminating session."))); + + goto retblock; + } + + if((dummy = filter_alloc("tee_dummy","tee_dummy")) == NULL) + { + dcb_close(dcb); + freeSession(instance, (void *)my_session); + my_session = NULL; + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : tee: Allocating memory for " + "dummy filter definition failed." + " Terminating session."))); + + goto retblock; + } + + + + if ((ses = session_alloc(my_instance->service, dcb)) == NULL) + { + dcb_close(dcb); + freeSession(instance, (void *)my_session); + my_session = NULL; + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : Creating client session for Tee " + "filter failed. Terminating session."))); + + goto retblock; + } + + ss_dassert(ses->ses_is_child); + + dummy->obj = GetModuleObject(); + dummy->filter = NULL; + + + if((dummy_upstream = filterUpstream( + dummy, my_session, &ses->tail)) == NULL) + { + spinlock_acquire(&ses->ses_lock); + ses->state = SESSION_STATE_STOPPING; + spinlock_release(&ses->ses_lock); + + ses->service->router->closeSession( + ses->service->router_instance, + ses->router_session); + + ses->client = NULL; + dcb->session = NULL; + session_free(ses); + dcb_close(dcb); + freeSession(instance, (void *) my_session); + my_session = NULL; + LOGIF(LE, (skygw_log_write_flush( + LOGFILE_ERROR, + "Error : tee: Allocating memory for" + "dummy upstream failed." + " Terminating session."))); + + goto retblock; + } + + ses->tail = *dummy_upstream; + my_session->min_replies = 2; + my_session->branch_session = ses; + my_session->branch_dcb = dcb; + my_session->dummy_filterdef = dummy; + free(dummy_upstream); } } - +retblock: return my_session; } @@ -322,17 +522,27 @@ SESSION *bsession; { if ((bsession = my_session->branch_session) != NULL) { + CHK_SESSION(bsession); + spinlock_acquire(&bsession->ses_lock); + + if (bsession->state != SESSION_STATE_STOPPING) + { + bsession->state = SESSION_STATE_STOPPING; + } router = bsession->service->router; router_instance = bsession->service->router_instance; rsession = bsession->router_session; + spinlock_release(&bsession->ses_lock); + /** Close router session and all its connections */ router->closeSession(router_instance, rsession); } - dcb_free(my_session->branch_dcb); /* No need to free the session, this is done as * a side effect of closing the client DCB of the * session. */ + + my_session->active = 0; } } @@ -346,11 +556,129 @@ static void freeSession(FILTER *instance, void *session) { TEE_SESSION *my_session = (TEE_SESSION *)session; +SESSION* ses = my_session->branch_session; + if (ses != NULL) + { + if (ses->state == SESSION_STATE_ROUTER_READY) + { + session_free(ses); + } + + if (ses->state == SESSION_STATE_TO_BE_FREED) + { + /** Free branch router session */ + ses->service->router->freeSession( + ses->service->router_instance, + ses->router_session); + /** Free memory of branch client session */ + ses->state = SESSION_STATE_FREE; + free(ses); + /** This indicates that branch session is not available anymore */ + my_session->branch_session = NULL; + } + else if(ses->state == SESSION_STATE_STOPPING) + { + orphan_session_t* orphan; + if((orphan = malloc(sizeof(orphan_session_t))) == NULL) + { + skygw_log_write(LOGFILE_ERROR,"Error : Failed to " + "allocate memory for orphan session struct, " + "child session might leak memory."); + }else{ + orphan->session = ses; + spinlock_acquire(&orphanLock); + orphan->next = allOrphans; + allOrphans = orphan; + spinlock_release(&orphanLock); + } + if(ses->refcount == 0) + { + ss_dassert(ses->refcount == 0 && ses->client == NULL); + ses->state = SESSION_STATE_TO_BE_FREED; + } + } + } + if (my_session->dummy_filterdef) + { + filter_free(my_session->dummy_filterdef); + } free(session); + + spinlock_acquire(&orphanLock); + orphan_session_t *ptr = allOrphans, *finished = NULL,*tmp = NULL; +#ifdef SS_DEBUG + int o_stopping = 0, o_ready = 0,o_freed = 0; +#endif + while(ptr) + { + if(ptr->session->state == SESSION_STATE_TO_BE_FREED) + { + if(ptr == allOrphans) + { + tmp = ptr; + allOrphans = ptr->next; + } + else + { + tmp = allOrphans; + while(tmp && tmp->next != ptr) + tmp = tmp->next; + if(tmp) + { + tmp->next = ptr->next; + tmp = ptr; + } + } + } +#ifdef SS_DEBUG + else if(ptr->session->state == SESSION_STATE_STOPPING) + { + o_stopping++; + } + else if(ptr->session->state == SESSION_STATE_ROUTER_READY) + { + o_ready++; + } +#endif + ptr = ptr->next; + if(tmp) + { + tmp->next = finished; + finished = tmp; + tmp = NULL; + } + } + + spinlock_release(&orphanLock); + +#ifdef SS_DEBUG + if(o_stopping + o_ready > 0) + skygw_log_write(LOGFILE_DEBUG,"tee.c: %d orphans in " + "SESSION_STATE_STOPPING, %d orphans in " + "SESSION_STATE_ROUTER_READY. ",o_stopping,o_ready); +#endif + + while(finished) + { +#ifdef SS_DEBUG + skygw_log_write(LOGFILE_DEBUG,"tee.c: %d orphans freed.",++o_freed); +#endif + tmp = finished; + finished = finished->next; + + tmp->session->service->router->freeSession( + tmp->session->service->router_instance, + tmp->session->router_session); + + tmp->session->state = SESSION_STATE_FREE; + free(tmp->session); + free(tmp); + } + + return; } - /** * Set the downstream filter or router to which queries will be * passed from this filter. @@ -362,9 +690,23 @@ TEE_SESSION *my_session = (TEE_SESSION *)session; static void setDownstream(FILTER *instance, void *session, DOWNSTREAM *downstream) { -TEE_SESSION *my_session = (TEE_SESSION *)session; + TEE_SESSION *my_session = (TEE_SESSION *) session; + my_session->down = *downstream; +} - my_session->down = *downstream; +/** + * Set the downstream filter or router to which queries will be + * passed from this filter. + * + * @param instance The filter instance data + * @param session The filter session + * @param downstream The downstream filter or router. + */ +static void +setUpstream(FILTER *instance, void *session, UPSTREAM *upstream) +{ + TEE_SESSION *my_session = (TEE_SESSION *) session; + my_session->up = *upstream; } /** @@ -391,46 +733,136 @@ routeQuery(FILTER *instance, void *session, GWBUF *queue) TEE_INSTANCE *my_instance = (TEE_INSTANCE *)instance; TEE_SESSION *my_session = (TEE_SESSION *)session; char *ptr; -int length, rval, residual; +int length, rval, residual = 0; GWBUF *clone = NULL; - if (my_session->residual) + if (my_session->branch_session && + my_session->branch_session->state == SESSION_STATE_ROUTER_READY) { - clone = gwbuf_clone(queue); - if (my_session->residual < GWBUF_LENGTH(clone)) - GWBUF_RTRIM(clone, GWBUF_LENGTH(clone) - residual); - my_session->residual -= GWBUF_LENGTH(clone); - if (my_session->residual < 0) - my_session->residual = 0; - } - else if (my_session->active && - modutil_MySQL_Query(queue, &ptr, &length, &residual)) - { - if ((my_instance->match == NULL || - regexec(&my_instance->re, ptr, 0, NULL, 0) == 0) && - (my_instance->nomatch == NULL || - regexec(&my_instance->nore,ptr,0,NULL, 0) != 0)) + if (my_session->residual) { - clone = gwbuf_clone(queue); - my_session->residual = residual; + clone = gwbuf_clone_all(queue); + + if (my_session->residual < GWBUF_LENGTH(clone)) + { + GWBUF_RTRIM(clone, GWBUF_LENGTH(clone) - residual); + } + my_session->residual -= GWBUF_LENGTH(clone); + + if (my_session->residual < 0) + { + my_session->residual = 0; + } + } + else if (my_session->active && (ptr = modutil_get_SQL(queue)) != NULL) + { + if ((my_instance->match == NULL || + regexec(&my_instance->re, ptr, 0, NULL, 0) == 0) && + (my_instance->nomatch == NULL || + regexec(&my_instance->nore,ptr,0,NULL, 0) != 0)) + { + char *dummy; + + length = modutil_MySQL_query_len(queue, &residual); + clone = gwbuf_clone_all(queue); + my_session->residual = residual; + } + free(ptr); + } + else if (packet_is_required(queue)) + { + clone = gwbuf_clone_all(queue); } } - /* Pass the query downstream */ + + my_session->replies = 0; rval = my_session->down.routeQuery(my_session->down.instance, - my_session->down.session, queue); + my_session->down.session, + queue); if (clone) { my_session->n_duped++; - SESSION_ROUTE_QUERY(my_session->branch_session, clone); + + if (my_session->branch_session->state == SESSION_STATE_ROUTER_READY) + { + SESSION_ROUTE_QUERY(my_session->branch_session, clone); + } + else + { + /** Close tee session */ + my_session->active = 0; + LOGIF(LT, (skygw_log_write( + LOGFILE_TRACE, + "Closed tee filter session."))); + gwbuf_free(clone); + } } else { + if (my_session->active) + { + LOGIF(LT, (skygw_log_write( + LOGFILE_TRACE, + "Closed tee filter session."))); + my_session->active = 0; + } my_session->n_rejected++; } return rval; } + +/** + * The clientReply entry point. This is passed the response buffer + * to which the filter should be applied. Once processed the + * query is passed to the upstream component + * (filter or router) in the filter chain. + * + * @param instance The filter instance data + * @param session The filter session + * @param reply The response data + */ +static int +clientReply (FILTER* instance, void *session, GWBUF *reply) +{ + int rc; + TEE_SESSION *my_session = (TEE_SESSION *) session; + + spinlock_acquire(&my_session->tee_lock); + + ss_dassert(my_session->active); + my_session->replies++; + + if (my_session->tee_replybuf == NULL && + instance != NULL) + { + my_session->tee_replybuf = reply; + } + else + { + gwbuf_free(reply); + } + + if((my_session->branch_session == NULL || + my_session->replies >= my_session->min_replies) && + my_session->tee_replybuf != NULL) + { + rc = my_session->up.clientReply ( + my_session->up.instance, + my_session->up.session, + my_session->tee_replybuf); + my_session->replies = 0; + my_session->tee_replybuf = NULL; + } + else + { + rc = 1; + } + + spinlock_release(&my_session->tee_lock); + return rc; +} /** * Diagnostics routine * @@ -470,3 +902,74 @@ TEE_SESSION *my_session = (TEE_SESSION *)fsession; my_session->n_rejected); } } + +/** + * Determine if the packet is a command that must be sent to the branch + * to maintain the session consistancy. These are COM_INIT_DB, + * COM_CHANGE_USER and COM_QUIT packets. + * + * @param queue The buffer to check + * @return non-zero if the packet should be sent to the branch + */ +static int +packet_is_required(GWBUF *queue) +{ +uint8_t *ptr; +int i; + + ptr = GWBUF_DATA(queue); + if (GWBUF_LENGTH(queue) > 4) + for (i = 0; required_packets[i]; i++) + if (ptr[4] == required_packets[i]) + return 1; + return 0; +} + +/** + * Detects possible loops in the query cloning chain. + */ +int detect_loops(TEE_INSTANCE *instance,HASHTABLE* ht, SERVICE* service) +{ + SERVICE* svc = service; + int i; + + if(ht == NULL) + { + return -1; + } + + if(hashtable_add(ht,(void*)service->name,(void*)true) == 0) + { + return true; + } + + for(i = 0;in_filters;i++) + { + if(strcmp(svc->filters[i]->module,"tee") == 0) + { + /* + * Found a Tee filter, recurse down its path + * if the service name isn't already in the hashtable. + */ + + TEE_INSTANCE* ninst = (TEE_INSTANCE*)svc->filters[i]->filter; + if(ninst == NULL) + { + /** + * This tee instance hasn't been initialized yet and full + * resolution of recursion cannot be done now. + */ + continue; + } + SERVICE* tgt = ninst->service; + + if(detect_loops((TEE_INSTANCE*)svc->filters[i]->filter,ht,tgt)) + { + return true; + } + + } + } + + return false; +} diff --git a/server/modules/filter/test/CMakeLists.txt b/server/modules/filter/test/CMakeLists.txt new file mode 100644 index 000000000..d703bc7be --- /dev/null +++ b/server/modules/filter/test/CMakeLists.txt @@ -0,0 +1,24 @@ +aux_source_directory(${CMAKE_SOURCE_DIR}/server/core CORE_ALL) +foreach(VAR ${CORE_ALL}) + if(NOT( (${VAR} MATCHES "max[a-z_]*.c") OR (${VAR} MATCHES "gateway.c"))) + list(APPEND CORE ${VAR}) + endif() +endforeach() + +include_directories(${CMAKE_CURRENT_SOURCE_DIR}) +add_executable(harness_ui harness_ui.c harness_common.c) +add_executable(harness harness_util.c harness_common.c ${CORE}) +target_link_libraries(harness_ui fullcore log_manager utils) +target_link_libraries(harness fullcore) +execute_process(COMMAND ${CMAKE_COMMAND} -E copy ${ERRMSG} ${CMAKE_CURRENT_BINARY_DIR}) +add_test(TestHintfilter /bin/sh -c "MAXSCALE_HOME=\"${CMAKE_BINARY_DIR}\" ${CMAKE_CURRENT_BINARY_DIR}/harness -i ${CMAKE_CURRENT_SOURCE_DIR}/hint_testing.input -o ${CMAKE_CURRENT_BINARY_DIR}/hint_testing.output -c ${CMAKE_CURRENT_SOURCE_DIR}/hint_testing.cnf -t 1 -s 1 -e ${CMAKE_CURRENT_SOURCE_DIR}/hint_testing.expected") + +add_test(TestRegexfilter /bin/sh -c "MAXSCALE_HOME=\"${CMAKE_BINARY_DIR}\" ${CMAKE_CURRENT_BINARY_DIR}/harness -i ${CMAKE_CURRENT_SOURCE_DIR}/regextest.input -o ${CMAKE_CURRENT_BINARY_DIR}/regextest.output -c ${CMAKE_CURRENT_SOURCE_DIR}/regextest.cnf -t 1 -s 1 -e ${CMAKE_CURRENT_SOURCE_DIR}/regextest.expected") + +add_test(TestTeeRecursion ${CMAKE_CURRENT_SOURCE_DIR}/tee_recursion.sh + ${CMAKE_BINARY_DIR} + ${CMAKE_SOURCE_DIR} + ${TEST_USER} + ${TEST_PASSWORD} + ${TEST_HOST} + ${TEST_PORT}) diff --git a/server/modules/filter/test/Makefile b/server/modules/filter/test/Makefile new file mode 100644 index 000000000..12546291d --- /dev/null +++ b/server/modules/filter/test/Makefile @@ -0,0 +1,80 @@ +# This file is distributed as part of MaxScale form MariaDB Corporation. It is free +# software: you can redistribute it and/or modify it under the terms of the +# GNU General Public License as published by the Free Software Foundation, +# version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright MariaDB Corporation Ab 2014 + +include ../../../../build_gateway.inc + +LOGPATH := $(ROOT_PATH)/log_manager +UTILSPATH := $(ROOT_PATH)/utils +QCLASSPATH := $(ROOT_PATH)/query_classifier +COREPATH := $(ROOT_PATH)/server/core + +CC=cc +CFLAGS=-c -fPIC -I/usr/include -I../../include -I../../../include \ + -I$(LOGPATH) -I$(UTILSPATH) -I./ -I$(ROOT_PATH)/server/inih -I$(QCLASSPATH) \ + $(MYSQL_HEADERS) -Wall -g + +include ../../../../makefile.inc + +LDFLAGS=-rdynamic -L$(LOGPATH) -L$(UTILSPATH) -L$(EMBEDDED_LIB) \ + -Wl,-rpath,$(DEST)/lib \ + -Wl,-rpath,$(LOGPATH) -Wl,-rpath,$(UTILSPATH) \ + -Wl,-rpath,$(EMBEDDED_LIB) + +SRCS=harness_util.c harness_common.c +OBJ=$(SRCS:.c=.o) +COREOBJ=$(COREPATH)/load_utils.o $(COREPATH)/dcb.o $(COREPATH)/utils.o \ + $(COREPATH)/gw_utils.o $(COREPATH)/buffer.o $(COREPATH)/poll.o \ + $(COREPATH)/spinlock.o $(COREPATH)/gwbitmask.o $(COREPATH)/session.o \ + $(COREPATH)/atomic.o $(COREPATH)/hashtable.o $(COREPATH)/filter.o $(COREPATH)/modutil.o $(ROOT_PATH)/server/inih/ini.o \ + $(COREPATH)/hint.o $(COREPATH)/config.o $(COREPATH)/service.o $(COREPATH)/server.o $(COREPATH)/monitor.o $(COREPATH)/housekeeper.o $(COREPATH)/adminusers.o $(COREPATH)/dbusers.o $(COREPATH)/thread.o $(COREPATH)/users.o $(COREPATH)/secrets.o +LIBS= $(UTILSPATH)/skygw_utils.o -lssl -pthread -llog_manager -lmysqld -ldl -lcrypto -lcrypt -lm +MODULES := $(wildcard ../*.so) + +all: build + +build:$(OBJ) + $(CC) $(OBJ) $(COREOBJ) $(LDFLAGS) $(LIBS) -o harness + $(MAKE) -C ../ + cp ../*.so ./ + +%.o: %.c + $(CC) $(CFLAGS) $< -o $@ + +clean: + rm -f *.o + rm -f *.so + rm -f harness +cleantests:clean + rm *.output +buildtests:build + +testall: + $(MAKE) cleantests + $(MAKE) buildtests + $(MAKE) runtests + +runtests: + @echo "" + @echo "-------------------------------" + @echo "$(shell date)" + @echo "Test Filter harness" + @echo "-------------------------------" + @echo "Testing hints... " + @./hint_tests.sh + @echo "" + +documentation: + doxygen doxygen.conf diff --git a/server/modules/filter/test/README b/server/modules/filter/test/README new file mode 100644 index 000000000..ee8f81edd --- /dev/null +++ b/server/modules/filter/test/README @@ -0,0 +1,20 @@ +Filter Test Harness + +For a more detailed description of the filter harness, either generate the documentation or read the harness.h file. + +Running the program without arguments enters the interactive mode. Type 'help' for a list of all commands. + +The default values for threads and sessions are stored in the 'harness.cnf' file + +Mandatory parameters for the command line mode are -c and -i. + +Parameters for the command line: + + -h Display this information + -c Path to the MaxScale configuration file to parse for filters + -i Name of the input file for buffers + -o Name of the output file for results + -q Suppress printing to stdout + -t Number of threads + -s Number of sessions + -d Routing delay diff --git a/server/modules/filter/test/doxygen.conf b/server/modules/filter/test/doxygen.conf new file mode 100644 index 000000000..a3c6eae77 --- /dev/null +++ b/server/modules/filter/test/doxygen.conf @@ -0,0 +1,2303 @@ +# Doxyfile 1.8.6 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project. +# +# All text after a double hash (##) is considered a comment and is placed in +# front of the TAG it is preceding. +# +# All text after a single hash (#) is considered a comment and will be ignored. +# The format is: +# TAG = value [value, ...] +# For lists, items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (\" \"). + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the config file +# that follow. The default is UTF-8 which is also the encoding used for all text +# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv +# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv +# for the list of possible encodings. +# The default value is: UTF-8. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by +# double-quotes, unless you are using Doxywizard) that should identify the +# project for which the documentation is generated. This name is used in the +# title of most generated pages and in a few other places. +# The default value is: My Project. + +PROJECT_NAME = "Filter Harness" + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. This +# could be handy for archiving the generated documentation or if some version +# control system is used. + +PROJECT_NUMBER = 1.1 + +# Using the PROJECT_BRIEF tag one can provide an optional one line description +# for a project that appears at the top of each page and should give viewer a +# quick idea about the purpose of the project. Keep the description short. + +PROJECT_BRIEF = "Test harness for independent testing of filters" + +# With the PROJECT_LOGO tag one can specify an logo or icon that is included in +# the documentation. The maximum height of the logo should not exceed 55 pixels +# and the maximum width should not exceed 200 pixels. Doxygen will copy the logo +# to the output directory. + +PROJECT_LOGO = + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path +# into which the generated documentation will be written. If a relative path is +# entered, it will be relative to the location where doxygen was started. If +# left blank the current directory will be used. + +OUTPUT_DIRECTORY = doc/ + +# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub- +# directories (in 2 levels) under the output directory of each output format and +# will distribute the generated files over these directories. Enabling this +# option can be useful when feeding doxygen a huge amount of source files, where +# putting all generated files in the same directory would otherwise causes +# performance problems for the file system. +# The default value is: NO. + +CREATE_SUBDIRS = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, +# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), +# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, +# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), +# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, +# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, +# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, +# Ukrainian and Vietnamese. +# The default value is: English. + +OUTPUT_LANGUAGE = English + +# If the BRIEF_MEMBER_DESC tag is set to YES doxygen will include brief member +# descriptions after the members that are listed in the file and class +# documentation (similar to Javadoc). Set to NO to disable this. +# The default value is: YES. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES doxygen will prepend the brief +# description of a member or function before the detailed description +# +# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. +# The default value is: YES. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator that is +# used to form the text in various listings. Each string in this list, if found +# as the leading text of the brief description, will be stripped from the text +# and the result, after processing the whole list, is used as the annotated +# text. Otherwise, the brief description is used as-is. If left blank, the +# following values are used ($name is automatically replaced with the name of +# the entity):The $name class, The $name widget, The $name file, is, provides, +# specifies, contains, represents, a, an and the. + +ABBREVIATE_BRIEF = + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# doxygen will generate a detailed section even if there is only a brief +# description. +# The default value is: NO. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. +# The default value is: NO. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES doxygen will prepend the full path +# before files name in the file list and in the header files. If set to NO the +# shortest path that makes the file name unique will be used +# The default value is: YES. + +FULL_PATH_NAMES = YES + +# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. +# Stripping is only done if one of the specified strings matches the left-hand +# part of the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the path to +# strip. +# +# Note that you can specify absolute paths here, but also relative paths, which +# will be relative from the directory where doxygen is started. +# This tag requires that the tag FULL_PATH_NAMES is set to YES. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the +# path mentioned in the documentation of a class, which tells the reader which +# header file to include in order to use a class. If left blank only the name of +# the header file containing the class definition is used. Otherwise one should +# specify the list of include paths that are normally passed to the compiler +# using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but +# less readable) file names. This can be useful is your file systems doesn't +# support long names like on DOS, Mac, or CD-ROM. +# The default value is: NO. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the +# first line (until the first dot) of a Javadoc-style comment as the brief +# description. If set to NO, the Javadoc-style will behave just like regular Qt- +# style comments (thus requiring an explicit @brief command for a brief +# description.) +# The default value is: NO. + +JAVADOC_AUTOBRIEF = NO + +# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first +# line (until the first dot) of a Qt-style comment as the brief description. If +# set to NO, the Qt-style will behave just like regular Qt-style comments (thus +# requiring an explicit \brief command for a brief description.) +# The default value is: NO. + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a +# multi-line C++ special comment block (i.e. a block of //! or /// comments) as +# a brief description. This used to be the default behavior. The new default is +# to treat a multi-line C++ comment block as a detailed description. Set this +# tag to YES if you prefer the old behavior instead. +# +# Note that setting this tag to YES also means that rational rose comments are +# not recognized any more. +# The default value is: NO. + +MULTILINE_CPP_IS_BRIEF = NO + +# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the +# documentation from any documented member that it re-implements. +# The default value is: YES. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce a +# new page for each member. If set to NO, the documentation of a member will be +# part of the file/class/namespace that contains it. +# The default value is: NO. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen +# uses this value to replace tabs by spaces in code fragments. +# Minimum value: 1, maximum value: 16, default value: 4. + +TAB_SIZE = 4 + +# This tag can be used to specify a number of aliases that act as commands in +# the documentation. An alias has the form: +# name=value +# For example adding +# "sideeffect=@par Side Effects:\n" +# will allow you to put the command \sideeffect (or @sideeffect) in the +# documentation, which will result in a user-defined paragraph with heading +# "Side Effects:". You can put \n's in the value part of an alias to insert +# newlines. + +ALIASES = + +# This tag can be used to specify a number of word-keyword mappings (TCL only). +# A mapping has the form "name=value". For example adding "class=itcl::class" +# will allow you to use the command class in the itcl::class meaning. + +TCL_SUBST = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources +# only. Doxygen will then generate output that is more tailored for C. For +# instance, some of the names that are used will be different. The list of all +# members will be omitted, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_FOR_C = YES + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or +# Python sources only. Doxygen will then generate output that is more tailored +# for that language. For instance, namespaces will be presented as packages, +# qualified scopes will look different, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources. Doxygen will then generate output that is tailored for Fortran. +# The default value is: NO. + +OPTIMIZE_FOR_FORTRAN = NO + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for VHDL. +# The default value is: NO. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Doxygen selects the parser to use depending on the extension of the files it +# parses. With this tag you can assign which parser to use for a given +# extension. Doxygen has a built-in mapping, but you can override or extend it +# using this tag. The format is ext=language, where ext is a file extension, and +# language is one of the parsers supported by doxygen: IDL, Java, Javascript, +# C#, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL. For instance to make +# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C +# (default is Fortran), use: inc=Fortran f=C. +# +# Note For files without extension you can use no_extension as a placeholder. +# +# Note that for custom extensions you also need to set FILE_PATTERNS otherwise +# the files are not read by doxygen. + +EXTENSION_MAPPING = + +# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments +# according to the Markdown format, which allows for more readable +# documentation. See http://daringfireball.net/projects/markdown/ for details. +# The output of markdown processing is further processed by doxygen, so you can +# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in +# case of backward compatibilities issues. +# The default value is: YES. + +MARKDOWN_SUPPORT = YES + +# When enabled doxygen tries to link words that correspond to documented +# classes, or namespaces to their corresponding documentation. Such a link can +# be prevented in individual cases by by putting a % sign in front of the word +# or globally by setting AUTOLINK_SUPPORT to NO. +# The default value is: YES. + +AUTOLINK_SUPPORT = YES + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should set this +# tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); +# versus func(std::string) {}). This also make the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. +# The default value is: NO. + +BUILTIN_STL_SUPPORT = NO + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. +# The default value is: NO. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip (see: +# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen +# will parse them like normal C++ but will assume all classes use public instead +# of private inheritance when no explicit protection keyword is present. +# The default value is: NO. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate +# getter and setter methods for a property. Setting this option to YES will make +# doxygen to replace the get and set methods by a property in the documentation. +# This will only work if the methods are indeed getting or setting a simple +# type. If this is not the case, or you want to show the methods anyway, you +# should set this option to NO. +# The default value is: YES. + +IDL_PROPERTY_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES, then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. +# The default value is: NO. + +DISTRIBUTE_GROUP_DOC = NO + +# Set the SUBGROUPING tag to YES to allow class member groups of the same type +# (for instance a group of public functions) to be put as a subgroup of that +# type (e.g. under the Public Functions section). Set it to NO to prevent +# subgrouping. Alternatively, this can be done per class using the +# \nosubgrouping command. +# The default value is: YES. + +SUBGROUPING = YES + +# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions +# are shown inside the group in which they are included (e.g. using \ingroup) +# instead of on a separate page (for HTML and Man pages) or section (for LaTeX +# and RTF). +# +# Note that this feature does not work in combination with +# SEPARATE_MEMBER_PAGES. +# The default value is: NO. + +INLINE_GROUPED_CLASSES = NO + +# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions +# with only public data fields or simple typedef fields will be shown inline in +# the documentation of the scope in which they are defined (i.e. file, +# namespace, or group documentation), provided this scope is documented. If set +# to NO, structs, classes, and unions are shown on a separate page (for HTML and +# Man pages) or section (for LaTeX and RTF). +# The default value is: NO. + +INLINE_SIMPLE_STRUCTS = NO + +# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or +# enum is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically be +# useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. +# The default value is: NO. + +TYPEDEF_HIDES_STRUCT = NO + +# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This +# cache is used to resolve symbols given their name and scope. Since this can be +# an expensive process and often the same symbol appears multiple times in the +# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small +# doxygen will become slower. If the cache is too large, memory is wasted. The +# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range +# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 +# symbols. At the end of a run doxygen will report the cache usage and suggest +# the optimal cache size from a speed point of view. +# Minimum value: 0, maximum value: 9, default value: 0. + +LOOKUP_CACHE_SIZE = 0 + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in +# documentation are documented, even if no documentation was available. Private +# class members and static file members will be hidden unless the +# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. +# Note: This will also disable the warnings about undocumented members that are +# normally produced when WARNINGS is set to YES. +# The default value is: NO. + +EXTRACT_ALL = YES + +# If the EXTRACT_PRIVATE tag is set to YES all private members of a class will +# be included in the documentation. +# The default value is: NO. + +EXTRACT_PRIVATE = NO + +# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal +# scope will be included in the documentation. +# The default value is: NO. + +EXTRACT_PACKAGE = NO + +# If the EXTRACT_STATIC tag is set to YES all static members of a file will be +# included in the documentation. +# The default value is: NO. + +EXTRACT_STATIC = NO + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) defined +# locally in source files will be included in the documentation. If set to NO +# only classes defined in header files are included. Does not have any effect +# for Java sources. +# The default value is: YES. + +EXTRACT_LOCAL_CLASSES = YES + +# This flag is only useful for Objective-C code. When set to YES local methods, +# which are defined in the implementation section but not in the interface are +# included in the documentation. If set to NO only methods in the interface are +# included. +# The default value is: NO. + +EXTRACT_LOCAL_METHODS = NO + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base name of +# the file that contains the anonymous namespace. By default anonymous namespace +# are hidden. +# The default value is: NO. + +EXTRACT_ANON_NSPACES = NO + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all +# undocumented members inside documented classes or files. If set to NO these +# members will be included in the various overviews, but no documentation +# section is generated. This option has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. If set +# to NO these classes will be included in the various overviews. This option has +# no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend +# (class|struct|union) declarations. If set to NO these declarations will be +# included in the documentation. +# The default value is: NO. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any +# documentation blocks found inside the body of a function. If set to NO these +# blocks will be appended to the function's detailed documentation block. +# The default value is: NO. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation that is typed after a +# \internal command is included. If the tag is set to NO then the documentation +# will be excluded. Set it to YES to include the internal documentation. +# The default value is: NO. + +INTERNAL_DOCS = NO + +# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file +# names in lower-case letters. If set to YES upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# and Mac users are advised to set this option to NO. +# The default value is: system dependent. + +CASE_SENSE_NAMES = YES + +# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with +# their full class and namespace scopes in the documentation. If set to YES the +# scope will be hidden. +# The default value is: NO. + +HIDE_SCOPE_NAMES = NO + +# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of +# the files that are included by a file in the documentation of that file. +# The default value is: YES. + +SHOW_INCLUDE_FILES = YES + +# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each +# grouped member an include statement to the documentation, telling the reader +# which file to include in order to use the member. +# The default value is: NO. + +SHOW_GROUPED_MEMB_INC = NO + +# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include +# files with double quotes in the documentation rather than with sharp brackets. +# The default value is: NO. + +FORCE_LOCAL_INCLUDES = NO + +# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the +# documentation for inline members. +# The default value is: YES. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the +# (detailed) documentation of file and class members alphabetically by member +# name. If set to NO the members will appear in declaration order. +# The default value is: YES. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief +# descriptions of file, namespace and class members alphabetically by member +# name. If set to NO the members will appear in declaration order. Note that +# this will also influence the order of the classes in the class list. +# The default value is: NO. + +SORT_BRIEF_DOCS = NO + +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the +# (brief and detailed) documentation of class members so that constructors and +# destructors are listed first. If set to NO the constructors will appear in the +# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. +# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief +# member documentation. +# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting +# detailed member documentation. +# The default value is: NO. + +SORT_MEMBERS_CTORS_1ST = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy +# of group names into alphabetical order. If set to NO the group names will +# appear in their defined order. +# The default value is: NO. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by +# fully-qualified names, including namespaces. If set to NO, the class list will +# be sorted only by class name, not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the alphabetical +# list. +# The default value is: NO. + +SORT_BY_SCOPE_NAME = NO + +# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper +# type resolution of all parameters of a function it will reject a match between +# the prototype and the implementation of a member function even if there is +# only one candidate or it is obvious which candidate to choose by doing a +# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still +# accept a match between prototype and implementation in such cases. +# The default value is: NO. + +STRICT_PROTO_MATCHING = NO + +# The GENERATE_TODOLIST tag can be used to enable ( YES) or disable ( NO) the +# todo list. This list is created by putting \todo commands in the +# documentation. +# The default value is: YES. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable ( YES) or disable ( NO) the +# test list. This list is created by putting \test commands in the +# documentation. +# The default value is: YES. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable ( YES) or disable ( NO) the bug +# list. This list is created by putting \bug commands in the documentation. +# The default value is: YES. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable ( YES) or disable ( NO) +# the deprecated list. This list is created by putting \deprecated commands in +# the documentation. +# The default value is: YES. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional documentation +# sections, marked by \if ... \endif and \cond +# ... \endcond blocks. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the +# initial value of a variable or macro / define can have for it to appear in the +# documentation. If the initializer consists of more lines than specified here +# it will be hidden. Use a value of 0 to hide initializers completely. The +# appearance of the value of individual variables and macros / defines can be +# controlled using \showinitializer or \hideinitializer command in the +# documentation regardless of this setting. +# Minimum value: 0, maximum value: 10000, default value: 30. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at +# the bottom of the documentation of classes and structs. If set to YES the list +# will mention the files that were used to generate the documentation. +# The default value is: YES. + +SHOW_USED_FILES = YES + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This +# will remove the Files entry from the Quick Index and from the Folder Tree View +# (if specified). +# The default value is: YES. + +SHOW_FILES = YES + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces +# page. This will remove the Namespaces entry from the Quick Index and from the +# Folder Tree View (if specified). +# The default value is: YES. + +SHOW_NAMESPACES = YES + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command command input-file, where command is the value of the +# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided +# by doxygen. Whatever the program writes to standard output is used as the file +# version. For an example see the documentation. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed +# by doxygen. The layout file controls the global structure of the generated +# output files in an output format independent way. To create the layout file +# that represents doxygen's defaults, run doxygen with the -l option. You can +# optionally specify a file name after the option, if omitted DoxygenLayout.xml +# will be used as the name of the layout file. +# +# Note that if you run doxygen from a directory containing a file called +# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE +# tag is left empty. + +LAYOUT_FILE = + +# The CITE_BIB_FILES tag can be used to specify one or more bib files containing +# the reference definitions. This must be a list of .bib files. The .bib +# extension is automatically appended if omitted. This requires the bibtex tool +# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info. +# For LaTeX the style of the bibliography can be controlled using +# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the +# search path. Do not use file names with spaces, bibtex cannot handle them. See +# also \cite for info how to create references. + +CITE_BIB_FILES = + +#--------------------------------------------------------------------------- +# Configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated to +# standard output by doxygen. If QUIET is set to YES this implies that the +# messages are off. +# The default value is: NO. + +QUIET = NO + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated to standard error ( stderr) by doxygen. If WARNINGS is set to YES +# this implies that the warnings are on. +# +# Tip: Turn warnings on while writing the documentation. +# The default value is: YES. + +WARNINGS = YES + +# If the WARN_IF_UNDOCUMENTED tag is set to YES, then doxygen will generate +# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag +# will automatically be disabled. +# The default value is: YES. + +WARN_IF_UNDOCUMENTED = YES + +# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some parameters +# in a documented function, or documenting parameters that don't exist or using +# markup commands wrongly. +# The default value is: YES. + +WARN_IF_DOC_ERROR = YES + +# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that +# are documented, but have no documentation for their parameters or return +# value. If set to NO doxygen will only warn about wrong or incomplete parameter +# documentation, but not about the absence of documentation. +# The default value is: NO. + +WARN_NO_PARAMDOC = NO + +# The WARN_FORMAT tag determines the format of the warning messages that doxygen +# can produce. The string should contain the $file, $line, and $text tags, which +# will be replaced by the file and line number from which the warning originated +# and the warning text. Optionally the format may contain $version, which will +# be replaced by the version of the file (if it could be obtained via +# FILE_VERSION_FILTER) +# The default value is: $file:$line: $text. + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning and error +# messages should be written. If left blank the output is written to standard +# error (stderr). + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# Configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag is used to specify the files and/or directories that contain +# documented source files. You may enter file names like myfile.cpp or +# directories like /usr/src/myproject. Separate the files or directories with +# spaces. +# Note: If this tag is empty the current directory is searched. + +INPUT = + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses +# libiconv (or the iconv built into libc) for the transcoding. See the libiconv +# documentation (see: http://www.gnu.org/software/libiconv) for the list of +# possible encodings. +# The default value is: UTF-8. + +INPUT_ENCODING = UTF-8 + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and +# *.h) to filter out the source-files in the directories. If left blank the +# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii, +# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp, +# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown, +# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf, +# *.qsf, *.as and *.js. + +FILE_PATTERNS = + +# The RECURSIVE tag can be used to specify whether or not subdirectories should +# be searched for input files as well. +# The default value is: NO. + +RECURSIVE = NO + +# The EXCLUDE tag can be used to specify files and/or directories that should be +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. +# +# Note that relative paths are relative to the directory from which doxygen is +# run. + +EXCLUDE = + +# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or +# directories that are symbolic links (a Unix file system feature) are excluded +# from the input. +# The default value is: NO. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories for example use the pattern */test/* + +EXCLUDE_PATTERNS = + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# AClass::ANamespace, ANamespace::*Test +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories use the pattern */test/* + +EXCLUDE_SYMBOLS = + +# The EXAMPLE_PATH tag can be used to specify one or more files or directories +# that contain example code fragments that are included (see the \include +# command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and +# *.h) to filter out the source-files in the directories. If left blank all +# files are included. + +EXAMPLE_PATTERNS = + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude commands +# irrespective of the value of the RECURSIVE tag. +# The default value is: NO. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or directories +# that contain images that are to be included in the documentation (see the +# \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command: +# +# +# +# where is the value of the INPUT_FILTER tag, and is the +# name of an input file. Doxygen will then use the output that the filter +# program writes to standard output. If FILTER_PATTERNS is specified, this tag +# will be ignored. +# +# Note that the filter must not add or remove lines; it is applied before the +# code is scanned, but not when the output code is generated. If lines are added +# or removed, the anchors will not be placed correctly. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. The filters are a list of the form: pattern=filter +# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how +# filters are used. If the FILTER_PATTERNS tag is empty or if none of the +# patterns match the file name, INPUT_FILTER is applied. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER ) will also be used to filter the input files that are used for +# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). +# The default value is: NO. + +FILTER_SOURCE_FILES = NO + +# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file +# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and +# it is also possible to disable source filtering for a specific pattern using +# *.ext= (so without naming a filter). +# This tag requires that the tag FILTER_SOURCE_FILES is set to YES. + +FILTER_SOURCE_PATTERNS = + +# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that +# is part of the input, its contents will be placed on the main page +# (index.html). This can be useful if you have a project on for instance GitHub +# and want to reuse the introduction page also for the doxygen output. + +USE_MDFILE_AS_MAINPAGE = + +#--------------------------------------------------------------------------- +# Configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will be +# generated. Documented entities will be cross-referenced with these sources. +# +# Note: To get rid of all source code in the generated output, make sure that +# also VERBATIM_HEADERS is set to NO. +# The default value is: NO. + +SOURCE_BROWSER = NO + +# Setting the INLINE_SOURCES tag to YES will include the body of functions, +# classes and enums directly into the documentation. +# The default value is: NO. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any +# special comment blocks from generated source code fragments. Normal C, C++ and +# Fortran comments will always remain visible. +# The default value is: YES. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES then for each documented +# function all documented functions referencing it will be listed. +# The default value is: NO. + +REFERENCED_BY_RELATION = NO + +# If the REFERENCES_RELATION tag is set to YES then for each documented function +# all documented entities called/used by that function will be listed. +# The default value is: NO. + +REFERENCES_RELATION = NO + +# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set +# to YES, then the hyperlinks from functions in REFERENCES_RELATION and +# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will +# link to the documentation. +# The default value is: YES. + +REFERENCES_LINK_SOURCE = YES + +# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the +# source code will show a tooltip with additional information such as prototype, +# brief description and links to the definition and documentation. Since this +# will make the HTML file larger and loading of large files a bit slower, you +# can opt to disable this feature. +# The default value is: YES. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +SOURCE_TOOLTIPS = YES + +# If the USE_HTAGS tag is set to YES then the references to source code will +# point to the HTML generated by the htags(1) tool instead of doxygen built-in +# source browser. The htags tool is part of GNU's global source tagging system +# (see http://www.gnu.org/software/global/global.html). You will need version +# 4.8.6 or higher. +# +# To use it do the following: +# - Install the latest version of global +# - Enable SOURCE_BROWSER and USE_HTAGS in the config file +# - Make sure the INPUT points to the root of the source tree +# - Run doxygen as normal +# +# Doxygen will invoke htags (and that will in turn invoke gtags), so these +# tools must be available from the command line (i.e. in the search path). +# +# The result: instead of the source browser generated by doxygen, the links to +# source code will now point to the output of htags. +# The default value is: NO. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a +# verbatim copy of the header file for each class for which an include is +# specified. Set to NO to disable this. +# See also: Section \class. +# The default value is: YES. + +VERBATIM_HEADERS = YES + +#--------------------------------------------------------------------------- +# Configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all +# compounds will be generated. Enable this if the project contains a lot of +# classes, structs, unions or interfaces. +# The default value is: YES. + +ALPHABETICAL_INDEX = YES + +# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in +# which the alphabetical index list will be split. +# Minimum value: 1, maximum value: 20, default value: 5. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +COLS_IN_ALPHA_INDEX = 5 + +# In case all classes in a project start with a common prefix, all classes will +# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag +# can be used to specify a prefix (or a list of prefixes) that should be ignored +# while generating the index headers. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES doxygen will generate HTML output +# The default value is: YES. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a +# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of +# it. +# The default directory is: html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_OUTPUT = html + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each +# generated HTML page (for example: .htm, .php, .asp). +# The default value is: .html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a user-defined HTML header file for +# each generated HTML page. If the tag is left blank doxygen will generate a +# standard header. +# +# To get valid HTML the header file that includes any scripts and style sheets +# that doxygen needs, which is dependent on the configuration options used (e.g. +# the setting GENERATE_TREEVIEW). It is highly recommended to start with a +# default header using +# doxygen -w html new_header.html new_footer.html new_stylesheet.css +# YourConfigFile +# and then modify the file new_header.html. See also section "Doxygen usage" +# for information on how to generate the default header that doxygen normally +# uses. +# Note: The header is subject to change so you typically have to regenerate the +# default header when upgrading to a newer version of doxygen. For a description +# of the possible markers and block names see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each +# generated HTML page. If the tag is left blank doxygen will generate a standard +# footer. See HTML_HEADER for more information on how to generate a default +# footer and what special commands can be used inside the footer. See also +# section "Doxygen usage" for information on how to generate the default footer +# that doxygen normally uses. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style +# sheet that is used by each HTML page. It can be used to fine-tune the look of +# the HTML output. If left blank doxygen will generate a default style sheet. +# See also section "Doxygen usage" for information on how to generate the style +# sheet that doxygen normally uses. +# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as +# it is more robust and this tag (HTML_STYLESHEET) will in the future become +# obsolete. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_STYLESHEET = + +# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional user- +# defined cascading style sheet that is included after the standard style sheets +# created by doxygen. Using this option one can overrule certain style aspects. +# This is preferred over using HTML_STYLESHEET since it does not replace the +# standard style sheet and is therefor more robust against future updates. +# Doxygen will copy the style sheet file to the output directory. For an example +# see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_STYLESHEET = + +# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or +# other source files which should be copied to the HTML output directory. Note +# that these files will be copied to the base HTML output directory. Use the +# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these +# files. In the HTML_STYLESHEET file, use the file name only. Also note that the +# files will be copied as-is; there are no commands or markers available. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_FILES = + +# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen +# will adjust the colors in the stylesheet and background images according to +# this color. Hue is specified as an angle on a colorwheel, see +# http://en.wikipedia.org/wiki/Hue for more information. For instance the value +# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 +# purple, and 360 is red again. +# Minimum value: 0, maximum value: 359, default value: 220. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_HUE = 220 + +# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors +# in the HTML output. For a value of 0 the output will use grayscales only. A +# value of 255 will produce the most vivid colors. +# Minimum value: 0, maximum value: 255, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_SAT = 100 + +# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the +# luminance component of the colors in the HTML output. Values below 100 +# gradually make the output lighter, whereas values above 100 make the output +# darker. The value divided by 100 is the actual gamma applied, so 80 represents +# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not +# change the gamma. +# Minimum value: 40, maximum value: 240, default value: 80. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_GAMMA = 80 + +# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML +# page will contain the date and time when the page was generated. Setting this +# to NO can help when comparing the output of multiple runs. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_TIMESTAMP = NO + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_DYNAMIC_SECTIONS = NO + +# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries +# shown in the various tree structured indices initially; the user can expand +# and collapse entries dynamically later on. Doxygen will expand the tree to +# such a level that at most the specified number of entries are visible (unless +# a fully collapsed tree already exceeds this amount). So setting the number of +# entries 1 will produce a full collapsed tree by default. 0 is a special value +# representing an infinite number of entries and will result in a full expanded +# tree by default. +# Minimum value: 0, maximum value: 9999, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_INDEX_NUM_ENTRIES = 100 + +# If the GENERATE_DOCSET tag is set to YES, additional index files will be +# generated that can be used as input for Apple's Xcode 3 integrated development +# environment (see: http://developer.apple.com/tools/xcode/), introduced with +# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a +# Makefile in the HTML output directory. Running make will produce the docset in +# that directory and running make install will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at +# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html +# for more information. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_DOCSET = NO + +# This tag determines the name of the docset feed. A documentation feed provides +# an umbrella under which multiple documentation sets from a single provider +# (such as a company or product suite) can be grouped. +# The default value is: Doxygen generated docs. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_FEEDNAME = "Doxygen generated docs" + +# This tag specifies a string that should uniquely identify the documentation +# set bundle. This should be a reverse domain-name style string, e.g. +# com.mycompany.MyDocSet. Doxygen will append .docset to the name. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_BUNDLE_ID = org.doxygen.Project + +# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify +# the documentation publisher. This should be a reverse domain-name style +# string, e.g. com.mycompany.MyDocSet.documentation. +# The default value is: org.doxygen.Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_ID = org.doxygen.Publisher + +# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. +# The default value is: Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_NAME = Publisher + +# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three +# additional HTML index files: index.hhp, index.hhc, and index.hhk. The +# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop +# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on +# Windows. +# +# The HTML Help Workshop contains a compiler that can convert all HTML output +# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML +# files are now used as the Windows 98 help format, and will replace the old +# Windows help format (.hlp) on all Windows platforms in the future. Compressed +# HTML files also contain an index, a table of contents, and you can search for +# words in the documentation. The HTML workshop also contains a viewer for +# compressed HTML files. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_HTMLHELP = NO + +# The CHM_FILE tag can be used to specify the file name of the resulting .chm +# file. You can add a path in front of the file if the result should not be +# written to the html output directory. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_FILE = + +# The HHC_LOCATION tag can be used to specify the location (absolute path +# including file name) of the HTML help compiler ( hhc.exe). If non-empty +# doxygen will try to run the HTML help compiler on the generated index.hhp. +# The file has to be specified with full path. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +HHC_LOCATION = + +# The GENERATE_CHI flag controls if a separate .chi index file is generated ( +# YES) or that it should be included in the master .chm file ( NO). +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +GENERATE_CHI = NO + +# The CHM_INDEX_ENCODING is used to encode HtmlHelp index ( hhk), content ( hhc) +# and project file content. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_INDEX_ENCODING = + +# The BINARY_TOC flag controls whether a binary table of contents is generated ( +# YES) or a normal table of contents ( NO) in the .chm file. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members to +# the table of contents of the HTML help documentation and to the tree view. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +TOC_EXPAND = NO + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and +# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that +# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help +# (.qch) of the generated HTML documentation. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify +# the file name of the resulting .qch file. The path specified is relative to +# the HTML output folder. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help +# Project output. For more information please see Qt Help Project / Namespace +# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace). +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_NAMESPACE = org.doxygen.Project + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt +# Help Project output. For more information please see Qt Help Project / Virtual +# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual- +# folders). +# The default value is: doc. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_VIRTUAL_FOLDER = doc + +# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom +# filter to add. For more information please see Qt Help Project / Custom +# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- +# filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the +# custom filter to add. For more information please see Qt Help Project / Custom +# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- +# filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this +# project's filter section matches. Qt Help Project / Filter Attributes (see: +# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_SECT_FILTER_ATTRS = + +# The QHG_LOCATION tag can be used to specify the location of Qt's +# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the +# generated .qhp file. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHG_LOCATION = + +# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be +# generated, together with the HTML files, they form an Eclipse help plugin. To +# install this plugin and make it available under the help contents menu in +# Eclipse, the contents of the directory containing the HTML and XML files needs +# to be copied into the plugins directory of eclipse. The name of the directory +# within the plugins directory should be the same as the ECLIPSE_DOC_ID value. +# After copying Eclipse needs to be restarted before the help appears. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_ECLIPSEHELP = NO + +# A unique identifier for the Eclipse help plugin. When installing the plugin +# the directory name containing the HTML and XML files should also have this +# name. Each documentation set should have its own identifier. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. + +ECLIPSE_DOC_ID = org.doxygen.Project + +# If you want full control over the layout of the generated HTML pages it might +# be necessary to disable the index and replace it with your own. The +# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top +# of each HTML page. A value of NO enables the index and the value YES disables +# it. Since the tabs in the index contain the same information as the navigation +# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +DISABLE_INDEX = NO + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. If the tag +# value is set to YES, a side panel will be generated containing a tree-like +# index structure (just like the one that is generated for HTML Help). For this +# to work a browser that supports JavaScript, DHTML, CSS and frames is required +# (i.e. any modern browser). Windows users are probably better off using the +# HTML help feature. Via custom stylesheets (see HTML_EXTRA_STYLESHEET) one can +# further fine-tune the look of the index. As an example, the default style +# sheet generated by doxygen has an example that shows how to put an image at +# the root of the tree instead of the PROJECT_NAME. Since the tree basically has +# the same information as the tab index, you could consider setting +# DISABLE_INDEX to YES when enabling this option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_TREEVIEW = NO + +# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that +# doxygen will group on one line in the generated HTML documentation. +# +# Note that a value of 0 will completely suppress the enum values from appearing +# in the overview section. +# Minimum value: 0, maximum value: 20, default value: 4. +# This tag requires that the tag GENERATE_HTML is set to YES. + +ENUM_VALUES_PER_LINE = 4 + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used +# to set the initial width (in pixels) of the frame in which the tree is shown. +# Minimum value: 0, maximum value: 1500, default value: 250. +# This tag requires that the tag GENERATE_HTML is set to YES. + +TREEVIEW_WIDTH = 250 + +# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open links to +# external symbols imported via tag files in a separate window. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +EXT_LINKS_IN_WINDOW = NO + +# Use this tag to change the font size of LaTeX formulas included as images in +# the HTML documentation. When you change the font size after a successful +# doxygen run you need to manually remove any form_*.png images from the HTML +# output directory to force them to be regenerated. +# Minimum value: 8, maximum value: 50, default value: 10. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_FONTSIZE = 10 + +# Use the FORMULA_TRANPARENT tag to determine whether or not the images +# generated for formulas are transparent PNGs. Transparent PNGs are not +# supported properly for IE 6.0, but are supported on all modern browsers. +# +# Note that when changing this option you need to delete any form_*.png files in +# the HTML output directory before the changes have effect. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_TRANSPARENT = YES + +# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see +# http://www.mathjax.org) which uses client side Javascript for the rendering +# instead of using prerendered bitmaps. Use this if you do not have LaTeX +# installed or if you want to formulas look prettier in the HTML output. When +# enabled you may also need to install MathJax separately and configure the path +# to it using the MATHJAX_RELPATH option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +USE_MATHJAX = NO + +# When MathJax is enabled you can set the default output format to be used for +# the MathJax output. See the MathJax site (see: +# http://docs.mathjax.org/en/latest/output.html) for more details. +# Possible values are: HTML-CSS (which is slower, but has the best +# compatibility), NativeMML (i.e. MathML) and SVG. +# The default value is: HTML-CSS. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_FORMAT = HTML-CSS + +# When MathJax is enabled you need to specify the location relative to the HTML +# output directory using the MATHJAX_RELPATH option. The destination directory +# should contain the MathJax.js script. For instance, if the mathjax directory +# is located at the same level as the HTML output directory, then +# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax +# Content Delivery Network so you can quickly see the result without installing +# MathJax. However, it is strongly recommended to install a local copy of +# MathJax from http://www.mathjax.org before deployment. +# The default value is: http://cdn.mathjax.org/mathjax/latest. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest + +# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax +# extension names that should be enabled during MathJax rendering. For example +# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_EXTENSIONS = + +# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces +# of code that will be used on startup of the MathJax code. See the MathJax site +# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an +# example see the documentation. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_CODEFILE = + +# When the SEARCHENGINE tag is enabled doxygen will generate a search box for +# the HTML output. The underlying search engine uses javascript and DHTML and +# should work on any modern browser. Note that when using HTML help +# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) +# there is already a search function so this one should typically be disabled. +# For large projects the javascript based search engine can be slow, then +# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to +# search using the keyboard; to jump to the search box use + S +# (what the is depends on the OS and browser, but it is typically +# , /