diff --git a/Documentation/About/About-MaxScale.md b/Documentation/About/About-MaxScale.md new file mode 100644 index 000000000..496ca8d5b --- /dev/null +++ b/Documentation/About/About-MaxScale.md @@ -0,0 +1,23 @@ +# About MaxScale +The MariaDB Corporation **MaxScale** is an intelligent proxy that allows forwarding of database statements to one or more database servers using complex rules, which can be based on a semantic understanding of the database statements and the roles of the various servers within the backend cluster of databases. + +MaxScale is designed to provide load balancing and high availability functionality transparently to the applications. In addition it provides a highly scalable and flexibile architecture, with plugin components to support different protocols and routing decisions. + +MaxScale is implemented in C so as to operate speedily. It also makes extensive use of the asynchronous I/O capabilities of the Linux operating system. The epoll system is used to provide the event driven framework for the input and output via sockets. Similar features in Windows® could be used in future development of MaxScale. + +Many of the services provided by MaxScale are implemented as external shared object modules which can be loaded at runtime. These modules support a fixed interface, communicating the entry points via a structure consisting of a set of function pointers. This structure is called the "module object". Additional modules can be created to work with MaxScale. + +One group of modules provides support for protocols, both for clients that communicate with MaxScale and for backend servers. The code that routes the queries to the backend servers is also loaded as external shared objects and they are referred to as routing modules. Another group of modules work on data as it passes through MaxScale, and they are known as filters. + +A Google Group exists for MaxScale that can be used to discuss ideas, issues and communicate with the MaxScale community: +Send email to [maxscale@googlegroups.com](mailto:maxscale@googlegroups.com) + or use the [forum](http://groups.google.com/forum/#!forum/maxscale) interface + +Bugs can be reported in the MariaDB Corporation bugs database + [bug.mariadb.com](http://bugs.mariadb.com) + +## Installing MaxScale +Information about installing MaxScale, either from a repository or by building from source code, is included in the guide [Getting Started with MaxScale](/Documentation/Getting-Started/Getting-Started-With-MaxScale.md). + +The same guide also provides basic information on running MaxScale. More detailed information about configuring MaxScale is given in the [Configuration Guide](/Documentation/Getting-Started/Configuration-Guide.md). + diff --git a/Documentation/Documentation-Contents.md b/Documentation/Documentation-Contents.md index b0d33ec5e..f77c775ca 100644 --- a/Documentation/Documentation-Contents.md +++ b/Documentation/Documentation-Contents.md @@ -4,6 +4,7 @@ ## About MaxScale + - [About MaxScale](About/About-MaxScale.md) - [Release Notes 1.0.4](About/MaxScale-1.0.4-Release-Notes.md) - [Limitations](About/Limitations.md) - [COPYRIGHT](About/COPYRIGHT.md) @@ -13,6 +14,7 @@ ## Getting Started - [Getting Started with MaxScale](Getting-Started/Getting-Started-With-MaxScale.md) + - [Building MaxScale from Source Code](Getting-Started/Building-MaxScale-from-Source-Code.md) - [Configuration Guide](Getting-Started/Configuration-Guide.md) ## Reference diff --git a/Documentation/Getting-Started/Building-MaxScale-from-Source-Code.md b/Documentation/Getting-Started/Building-MaxScale-from-Source-Code.md new file mode 100644 index 000000000..66b178fa9 --- /dev/null +++ b/Documentation/Getting-Started/Building-MaxScale-from-Source-Code.md @@ -0,0 +1,190 @@ +# Building MaxScale from Source Code + +You will need a number of tools and libraries in order to achieve this. + +* cmake version 2.8.12 or later + +* gcc recommended version 4.4.7 or later + +* libaio + +* MariaDB Develop libraries version 5.5.38 or later + +* libedit 2.11 or later (used by the MaxAdmin tool) + +The full list of dependencies for the most common distros is provided in the next section. + +### All RHEL, CentOS and Fedora versions: + gcc gcc-c++ ncurses-devel bison glibc-devel cmake libgcc perl make libtool + openssl-devel libaio libaio-devel librabbitmq-devel +In addition, if you wish to to build an RPM package include: + + rpm-build + +#### RHEL 6, 7, CentOS 6, 7, Fedora: + libedit-devel + +#### RHEL 7, CentOS 7: + mariadb-devel mariadb-embedded-devel + +#### RHEL 5, 7, CentOS 5, 6, Fedora 19, 20 + MariaDB-devel MariaDB-server + +#### Fedora 19, 20 + systemtap-sdt-devel + +### All Ubuntu and Debian versions: + build-essential libssl-dev libaio-dev ncurses-dev bison + cmake perl libtool librabbitmq-dev +If you want to build a DEB package, you will also need: + + dpkg-dev + +#### Ubuntu 14.04 or later, Debian 8 (Jessie) or later + libmariadbclient-dev libmariadbd-dev + +#### Earlier versions of Ubuntu or Debian + +For these, you will need to obtain the MariaDB embedded library. It has to be manually extracted from the tarball. But first ascertain what version of glibc is installed. Run the command: + + dpkg -l | grep libc6 +which will show the version number. If the version is less than 2.14 you should obtain the library from: +[https://downloads.mariadb.org/interstitial/mariadb-5.5.41/bintar-linux-x86_64/mariadb-5.5.41-linux-x86_64.tar.gz](https://downloads.mariadb.org/interstitial/mariadb-5.5.41/bintar-linux-x86_64/mariadb-5.5.41-linux-x86_64.tar.gz). +Otherwise, from: +[https://downloads.mariadb.org/interstitial/mariadb-5.5.41/bintar-linux-glibc_214-x86_64/mariadb-5.5.41-linux-glibc_214-x86_64.tar.gz](https://downloads.mariadb.org/interstitial/mariadb-5.5.41/bintar-linux-glibc_214-x86_64/mariadb-5.5.41-linux-glibc_214-x86_64.tar.gz) + +The suggested location for extracting the tarball is /usr so the operation can be done by the following commands: + + cd /usr + tar -xzvf /path/to/mariadb.library.tar.gz +where /path/to/mariadb.library.tar.gz is replaced by the actual path and name of the downloaded tarball. + +### OpenSUSE + +(mariadb-devel package, build fails????) + +The packages required are: + + gcc gcc-c++ ncurses-devel bison glibc-devel cmake libgcc_s1 perl + make libtool libopenssl-devel libaio libaio-devel + libedit-devel librabbitmq-devel + MariaDB-devel MariaDB-client MariaDB-server +(if zypper ask which MariaDB client should be installed 'MariaDB-client' or 'mariadb-client' + please select 'MariaDB-client') + + +##Obtaining the MaxScale Source Code +Now clone the GitHub project to your machine either via the web interface, your favorite graphical interface or the git command line + + $ git clone https://github.com/mariadb-corporation/MaxScale + Cloning into 'MaxScale'... + remote: Counting objects: 16228, done. + ... + +Change directory to the MaxScale directory, create a build directory and change directory to that build directory + + $ cd MaxScale + $ mkdir build + $ cd build + +The next step is to run the cmake command to build the Makefile you need to compile Maxscale. There are a number of options you may give to configure cmake and point it to the various packages it requires. In this example we will assume the MariaDB developer packages have been installed as described above and set all the options required to locate these, along with options to build the unit tests and configure the installation target directory. + +If you run into any trouble while configuring CMake, you can always remove the +'CMakeCache.txt' file to clear CMake's internal cache. This resets all values to their +defaults and can be used to fix a 'stuck' configuration of CMake. This is also a good +reason why you should always build into a separate directory, because you can safely +wipe the build directory clean without the danger of deleting important files when +something goes wrong. Building 'out-of-source' also allows you to have multiple +configurations of MaxScale at the same time. + +The default values that CMake uses can be found in the 'macros.cmake' file. +If you wish to change these, edit the 'macros.cmake' file or define the +variables manually at configuration time. + +To display all CMake variables with their descriptions: + + cmake -LH + +When you are ready to run cmake: + + $ cmake -DMYSQL\_DIR=/usr/mariadb-5.5.41-linux-x86_64/include/mysql \ + -DEMBEDDED\_LIB=/usr/mariadb-5.5.41-linux-x86\_64/lib/libmysqld.a \ + -DMYSQLCLIENT\_LIBRARIES=/usr/mariadb-5.5.41-linux-x86_64/lib/libmysqlclient.so \ + -DERRMSG=/usr/mariadb-5.5.41-linux-x86\_64/share/english/errmsg.sys \ + -DINSTALL\_DIR=/home/maxscale/MaxScale -DBUILD_TESTS=Y \ + -DINSTALL\_SYSTEM\_FILES=N \ + -DBUILD_BINLOG=Y .. -DBUILD_RABBITMQ=N/ + +
+    -- CMake version: 2.8.12.2
+    -- The C compiler identification is GNU 4.4.7
+    -- The CXX compiler identification is GNU 4.4.7
+    -- Check for working C compiler: /usr/bin/cc
+    -- Check for working C compiler: /usr/bin/cc -- works
+    -- Detecting C compiler ABI info
+    -- Detecting C compiler ABI info - done
+    -- Check for working CXX compiler: /usr/bin/c++
+    -- Check for working CXX compiler: /usr/bin/c++ -- works
+    -- Detecting CXX compiler ABI info
+    -- Detecting CXX compiler ABI info - done
+    -- Library was found at: /lib64/libaio.so
+    -- Library was found at: /usr/lib64/libssl.so
+    -- Library was found at: /usr/lib64/libcrypt.so
+    -- Library was found at: /usr/lib64/libcrypto.so
+    -- Library was found at: /usr/lib64/libz.so
+    -- Library was found at: /usr/lib64/libm.so
+    -- Library was found at: /usr/lib64/libdl.so
+    -- Library was found at: /usr/lib64/librt.so
+    -- Library was found at: /usr/lib64/libpthread.so
+    -- Using errmsg.sys found at: /home/maxscale/usr/share/mysql/english/errmsg.sys
+    -- Using embedded library: /home/mpinto/usr/lib64/libmysqld.a
+    -- Valgrind found: /usr/bin/valgrind
+    -- Found dynamic MySQL client library: /home/maxscale/usr/lib64/libmysqlclient.so
+    -- Found static MySQL client library: /usr/lib/libmysqlclient.a
+    -- C Compiler supports: -Werror=format-security
+    -- Linking against: /home/mpinto/usr/lib64/libmysqlclient.so
+    -- Installing MaxScale to: /usr/local/maxscale/
+    -- Generating RPM packages
+    -- Found Doxygen: /usr/bin/doxygen (found version "1.6.1") 
+    -- Configuring done
+    -- Generating done
+    -- Build files have been written to: /home/maxscale/develop/build
+
+Once the cmake command is complete simply run make to build the MaxScale binaries.
+
+    $ make
+  
+
+    **Scanning dependencies of target utils**
+    [  1%] Building CXX object utils/CMakeFiles/utils.dir/skygw_utils.cc.o
+    **Linking CXX static library libutils.a**
+    [  1%] Built target utils
+    **Scanning dependencies of target log_manager**
+    [  2%] Building CXX object log_manager/CMakeFiles/log_manager.dir/log_manager.cc.o
+    ...
+
+ +After the completion of the make process the installation can be achieved by running the make install target. + + $ make install + ... + +This will result in an installation being created which is identical to that which would be achieved by installing the binary package. The only difference is that init.d scripts aren't installed and the RabbitMQ components are not built. + +By default, MaxScale installs to '/usr/local/skysql/maxscale' and places init.d scripts and ldconfig files into their folders. Change the CMAKE_INSTALL_PREFIX variable to your desired installation directory and set INSTALL_SYSTEM_FILES=N to prevent the init.d script and ldconfig file installation. + +Other useful targets for Make are `documentation`, which generates the Doxygen documentation, and `uninstall` which uninstall MaxScale binaries after an install. + +## Running the MaxScale testsuite + +To run "make testall" you need to have four mysqld servers running on localhost. It assumes a master-slave replication setup with one slave and three slaves. + +The ports to which these servers are listening and the credentials to use for testing + can be specified in the 'macros.cmake' file. + +On the master full privileges on the databases "test" and "FOO" are needed, on the saves SELECT permissions on test.* should be sufficient. + +When you run the 'make testall' target after configuring the build with CMake a local version of MaxScale is installed into the build folder. After this a MaxScale instance is started and the test set is executed. + +After testing has finished you can find a full testlog generated by CTest in Testing/Temporary/ directory and MaxScale's log files in the log/ directory of the build root. + diff --git a/Documentation/Getting-Started/Getting-Started-With-MaxScale.md b/Documentation/Getting-Started/Getting-Started-With-MaxScale.md index 87c9996e0..48d722ff2 100644 --- a/Documentation/Getting-Started/Getting-Started-With-MaxScale.md +++ b/Documentation/Getting-Started/Getting-Started-With-MaxScale.md @@ -30,98 +30,7 @@ Upon successful completion of the installation process you have a version of Max ## Building MaxScale From Source Code -Alternatively you may download the MaxScale source and build your own binaries. You will need a number of tools and libraries in order to achieve this. - -* cmake version 2.8.12 or later - -* gcc recommended version 4.4.7 or later - -* libaio - -* MariaDB Develop libraries version 5.5.38 or later - -* libedit 2.11 or later (used by MaxAdmin tool) - -First clone the GitHub project to your machine either via the web interface, your favorite graphical interface or the git command line - - $ git clone https://github.com/mariadb-corporation/MaxScale - Cloning into 'MaxScale'... - remote: Counting objects: 16228, done. - ... - -Change directory to the MaxScale directory, create a build directory and change directory to that build directory - - $ cd MaxScale - - $ mkdir build - - $ cd build - - The next step is to run the cmake command to build the Makefile you need to compile Maxscale. There are a number of options you may give to configure cmake and point it to the various packages it requires. These are documented in the MaxScale README file, in this example we will assume the MariaDB developer packages have been installed in a non-standard location and set all the options required to locate these, along with options to build the unit tests and configure the installation target directory. - - $ cmake -DMYSQL\_DIR=/usr/mariadb-5.5.41-linux-x86_64/include/mysql \ - -DEMBEDDED\_LIB=/usr/mariadb-5.5.41-linux-x86\_64/lib/libmysqld.a \ - -DMYSQLCLIENT\_LIBRARIES=/usr/mariadb-5.5.41-linux-x86_64/lib/libmysqlclient.so \ - -DERRMSG=/usr/mariadb-5.5.41-linux-x86\_64/share/english/errmsg.sys \ - -DINSTALL\_DIR=/home/maxscale/MaxScale -DBUILD_TESTS=Y \ - -DINSTALL\_SYSTEM\_FILES=N \ - -DBUILD_BINLOG=Y ../ - - -- CMake version: 2.8.12.2 - -- The C compiler identification is GNU 4.4.7 - -- The CXX compiler identification is GNU 4.4.7 - -- Check for working C compiler: /usr/bin/cc - -- Check for working C compiler: /usr/bin/cc -- works - -- Detecting C compiler ABI info - -- Detecting C compiler ABI info - done - -- Check for working CXX compiler: /usr/bin/c++ - -- Check for working CXX compiler: /usr/bin/c++ -- works - -- Detecting CXX compiler ABI info - -- Detecting CXX compiler ABI info - done - -- Library was found at: /lib64/libaio.so - -- Library was found at: /usr/lib64/libssl.so - -- Library was found at: /usr/lib64/libcrypt.so - -- Library was found at: /usr/lib64/libcrypto.so - -- Library was found at: /usr/lib64/libz.so - -- Library was found at: /usr/lib64/libm.so - -- Library was found at: /usr/lib64/libdl.so - -- Library was found at: /usr/lib64/librt.so - -- Library was found at: /usr/lib64/libpthread.so - -- Using errmsg.sys found at: /home/maxscale/usr/share/mysql/english/errmsg.sys - -- Using embedded library: /home/mpinto/usr/lib64/libmysqld.a - -- Valgrind found: /usr/bin/valgrind - -- Found dynamic MySQL client library: /home/maxscale/usr/lib64/libmysqlclient.so - -- Found static MySQL client library: /usr/lib/libmysqlclient.a - -- C Compiler supports: -Werror=format-security - -- Linking against: /home/mpinto/usr/lib64/libmysqlclient.so - -- Installing MaxScale to: /usr/local/maxscale/ - -- Generating RPM packages - -- Found Doxygen: /usr/bin/doxygen (found version "1.6.1") - -- Configuring done - -- Generating done - -- Build files have been written to: /home/maxscale/develop/build - - -bash-4.1$ make depend - - -bash-4.1$ make - -Once the cmake command is complete simply run make to build the MaxScale binaries. - - $ make - **Scanning dependencies of target utils** - [ 1%] Building CXX object utils/CMakeFiles/utils.dir/skygw_utils.cc.o - **Linking CXX static library libutils.a** - [ 1%] Built target utils - **Scanning dependencies of target log_manager** - [ 2%] Building CXX object log_manager/CMakeFiles/log_manager.dir/log_manager.cc.o - ... - -After the completion of the make process the installation can be achieved by running the make install target. - - $ make install - ... - -This will result in an installation being created which is identical to that which would be achieved by installing the binary package. +Alternatively you may download the MaxScale source and build your own binaries. To do this, refer to the separate document [Building MaxScale from Source Code](/Documentation/Getting-Started/Building-MaxScale-from-Source-Code.md) ## Configuring MaxScale @@ -147,8 +56,21 @@ It is also possible to use the Read/Write Splitter with Galera. Although it is n As well as the four major configuration choices outlined above there are also other configurations sub-options that may be mixed with those to provide a variety of different configuration and functionality. The MaxScale filter concept allows the basic configurations to be built upon in a large variety of ways. A separate filter tutorial is available that discusses the concept and gives some examples of ways to use filters. +## Running MaxScale + +MaxScale consists of a core executable and a number of modules that implement +the different protocols and routing algorithms. These modules are built as +shared objects that are loaded on demand. In order for MaxScale to find these +modules it will search using a predescribed search path. The rules are: + + 1. Look in the current directory for the module + 2. Look in $MAXSCALE_HOME/modules + 3. Look in /usr/local/skysql/maxscale/modules + +Configuration is read by default from the file \$MAXSCALE_HOME/etc/MaxScale.cnf, /etc/MaxScale.cnf. An example file is included in in the installation and can be found in the etc/ folder within the MaxScale installation. The default value of MAXSCALE_HOME can be overriden by using the -c flag on the command line. This should be immediately followed by the path to the MaxScale home directory. The -f flag can be used on the command line to set the name and the location of the configuration file. Without path expression the file is read from \$MAXSCALE_HOME/etc directory. + + ## Administration Of MaxScale There are various administration tasks that may be done with MaxScale, a client command, maxadmin, is available that will interact with a running MaxScale and allow the status of MaxScale to be monitored and give some control of the MaxScale functionality. There is a separate reference guide for the maxadmin utility and also a short administration tutorial that covers the common administration tasks that need to be done with MaxScale. - \ No newline at end of file diff --git a/Documentation/filters/Firewall-Filter.md b/Documentation/filters/Firewall-Filter.md index 1955f699b..9d99b359a 100644 --- a/Documentation/filters/Firewall-Filter.md +++ b/Documentation/filters/Firewall-Filter.md @@ -19,7 +19,7 @@ The firewall filter does not support any filter options. ### Filter Parameters -The firewall filter has one mandatory parameter that defines the location of the rule file. This is the 'rules' parameter and it expects an absolute path to the rule file. +The firewall filter has one mandatory parameter that defines the location of the rule file. This is the `rules` parameter and it expects an absolute path to the rule file. ## Rule syntax @@ -31,7 +31,7 @@ The rules are defined by using the following syntax. Rules always define a blocking action so the basic mode for the firewall filter is to allow all queries that do not match a given set of rules. Rules are identified by their name and have a mandatory part and optional parts. -The first step of defining a rule is to start with the keyword 'rule' which identifies this line of text as a rule. The second token is identified as the name of the rule. After that the mandatory token 'deny' is required to mark the start of the actual rule definition. +The first step of defining a rule is to start with the keyword `rule` which identifies this line of text as a rule. The second token is identified as the name of the rule. After that the mandatory token `deny` is required to mark the start of the actual rule definition. ### Mandatory rule parameters @@ -43,7 +43,7 @@ This rule blocks all queries that use the wildcard character *. #### Columns -This rule expects a list of values after the 'columns' keyword. These values are interpreted as column names and if a query targets any of these, it is blocked. +This rule expects a list of values after the `columns` keyword. These values are interpreted as column names and if a query targets any of these, it is blocked. #### Regex @@ -63,7 +63,7 @@ Each mandatory rule accepts one or more optional parameters. These are to be def #### At_times -This rule expects a list of time ranges that define the times when the rule in question is active. The time formats are expected to be ISO-8601 compliant and to be separated by a single dash (the - character). For example defining the active period of a rule to be 17:00 to 19:00 you would add 'at times 17:00:00-19:00:00' to the end of the rule. +This rule expects a list of time ranges that define the times when the rule in question is active. The time formats are expected to be ISO-8601 compliant and to be separated by a single dash (the - character). For example defining the active period of a rule to be 17:00 to 19:00 you would add `at times 17:00:00-19:00:00` to the end of the rule. #### On_queries @@ -73,9 +73,11 @@ This limits the rule to be active only on certain types of queries. To apply the defined rules to users use the following syntax. -`users NAME ... match [any|all] rules RULE ...` +`users NAME ... match [any|all|strict_all] rules RULE ...` -The first keyword is users which identifies this line as a user definition line. After this a list of user names and network addresses in the format 'user@0.0.0.0' is expected. The first part is the user name and the second part is the network address. You can use the '%' character as the wildcard to enable user name matching from any address or network matching for all users. After the list of users and networks the keyword match is expected. After this either the keyword 'any' or 'all' is expected. This defined how the rules are matched. If 'any' is used when the first rule is matched the query is considered blocked and the rest of the rules are skipped. If instead the 'all' keyword is used all rules must match for the query to be blocked. +The first keyword is users which identifies this line as a user definition line. After this a list of user names and network addresses in the format `user@0.0.0.0` is expected. The first part is the user name and the second part is the network address. You can use the `%` character as the wildcard to enable user name matching from any address or network matching for all users. After the list of users and networks the keyword match is expected. + +After this either the keyword `any` `all` or `strict_all` is expected. This defined how the rules are matched. If `any` is used when the first rule is matched the query is considered blocked and the rest of the rules are skipped. If instead the `all` keyword is used all rules must match for the query to be blocked. The `strict_all` is the same as `all` but it checks the rules from left to right in the order they were listed. If one of these does not match, the rest of the rules are not checked. This could be usedful in situations where you would for example combine `limit_queries` and `regex` rules. By using `strict_all` you can have the `regex` rule first and the `limit_queries` rule second. This way the rule only matches if the `regex` rule matches enough times for the `limit_queries` rule to match. After the matching part comes the rules keyword after which a list of rule names is expected. This allows reusing of the rules and enables varying levels of query restriction. @@ -105,4 +107,4 @@ This can be achieved by creating two rules. One that blocks the usage of the wil We want to prevent accidental deletes into the managers table where the where clause is missing. This poses a problem, we don't want to require all the delete queries to have a where clause. We only want to prevent the data in the managers table from being deleted without a where clause. -To achieve this, we need two rules. The first rule can be seen on line 5 in the example rule file. This defines that all delete operations must have a where clause. This rule alone does us no good so we need a second one. The second rule is defined on line 6 and it blocks all queries that match the provided regular expression. When we combine these two rules we get the result we want. You can see the application of these rules on line 9 of the example rule file. The usage of the 'all' matching mode requires that all the rules must match for the query to be blocked. This in effect combines the two rules into a more complex rule. +To achieve this, we need two rules. The first rule can be seen on line 5 in the example rule file. This defines that all delete operations must have a where clause. This rule alone does us no good so we need a second one. The second rule is defined on line 6 and it blocks all queries that match the provided regular expression. When we combine these two rules we get the result we want. You can see the application of these rules on line 9 of the example rule file. The usage of the `all` and `strict_all` matching mode requires that all the rules must match for the query to be blocked. This in effect combines the two rules into a more complex rule. diff --git a/README b/README index 583f9d26a..d105075ae 100644 --- a/README +++ b/README @@ -31,190 +31,20 @@ issues and communicate with the MaxScale community. Bugs can be reported in the MariaDB Corporation bugs database [bug.mariadb.com](http://bugs.mariadb.com) -\section Dependency List +\section Documentation -Before building MaxScale from source, make sure you have installed all the dependencies for your system. -To install MariaDB packages configure MariaDB repositories for your system: - see instruction [here](https://downloads.mariadb.org/mariadb/repositories/) -The full list of dependencies for the most common distros: +For information about installing and using MaxScale, please refer to the +documentation. It is in Markdown format. -All RHEL, CentOS and Fedora versions: +You can point your browser to the MaxScale project at GitHub. Look +inside the "Documentation" directory, where you will find a file named +Documentation-Contents.md. Click on that, and GitHub will show the +documentation in its intended display format. The contents page lists +the available documents and has links to them. - gcc gcc-c++ ncurses-devel bison glibc-devel cmake libgcc perl make libtool - openssl-devel libaio libaio-devel librabbitmq-devel - -RHEL 6, 7, CentOS 6, 7, Fedora: - - libedit-devel - -RHEL 7, CentOS 7: - - mariadb-devel mariadb-embedded-devel - -RHEL 5, 7, CentOS 5, 6, Fedora 19, 20 - - MariaDB-devel MariaDB-server - -Fedora 19, 20 - - systemtap-sdt-devel - -to build RPM package: - - rpm-build - -Ubuntu 14.04, Debian 'jessie' - - cmake - gcc g++ ncurses-dev bison build-essential libssl-dev libaio-dev - perl make libtool librabbitmq-dev libmariadbclient-dev - libmariadbd-dev mariadb-server - -Other Ubuntu and Debian - - MariaDB embedded library have to be manually extracted from tarball: - https://downloads.mariadb.org/interstitial/mariadb-5.5.41/bintar-linux-glibc_214-x86_64/mariadb-5.5.41-linux-glibc_214-x86_64.tar.gz - for old systems with glibc < 2.14: - https://downloads.mariadb.org/interstitial/mariadb-5.5.41/bintar-linux-x86_64/mariadb-5.5.41-linux-x86_64.tar.gz - -To build DEB package: - dpkg-dev - -OpenSUSE (mariadb-devel package, build fails): - gcc gcc-c++ ncurses-devel bison glibc-devel cmake libgcc_s1 perl - make libtool libopenssl-devel libaio libaio-devel - libedit-devel librabbitmq-devel - MariaDB-devel MariaDB-client MariaDB-server (if zypper ask which MariaDB client should be installed 'MariaDB-client' or 'mariadb-client' - please select 'MariaDB-client') - - -If you do not wish to install the MariaDB packages you can use the bundled RPM unpacking script: - - ./unpack_rpm.sh - -This looks for MariaDB RPMs and unpacks them into the destination directory. This location -can then be passed to CMake to specify the location of the headers, libraries and other required files. - -\section Building Building MaxScale - -Once you have installed all of MaxScale's dependencies you are ready to build MaxScale using CMake. - -CMake tries to find all the required directories and files on its own but if it can't find them or you wish to -explicitly state the locations you can pass additional options to CMake by using the -D flag. To confirm the variable -values, you can run CMake in interactive mode by using the -i flag or use a CMake GUI (for example, ccmake for command line). - -It is highly recommended to make a separate build directory to build into. -This keeps the source and build trees clean and makes it easy to get rid -of everything you built by simply deleting the build directory. - -To build MaxScale using CMake: - - cd - - mkdir build - - cd build - - cmake .. - - make - - make install - -This generates the required makefiles in the current directory, compiles -and links all the programs and installs -all the required files in their right places. - -If you have your headers and libraries in non-standard locations, you can -define those locations at configuration time as such: - - cmake -D= - - -This libmysqld.a comes from the RPM or it is copied from an existing -MariaDB setup. The file embedded_priv.h is not available in the RPM -packages, please get it from an existing MariaDB setup and copy it -to one of the path in MYSQL_HEADERS - -The ERRMSG variable points to the errmsg.sys file that is required -by the embedded library. If you unpacked the RPMs using the script -you need to provide the location of the errmsg.sys file when you -are configuring the build system. - -Example: - - cmake -DERRMSG=/home/user/share/english/errmsg.sys .. - - -Please note the errmsg.sys file is NOT included in the RPMs at the -current time, it must be taken from an existing MariaDB setup. The -version of the errmsg.sys file must match the version of the developer -package you are using. A version mismatch will cause the library to fail -to initialise. - -By default, MaxScale installs to '/usr/local/skysql/maxscale' and places init.d scripts -and ldconfig files into their folders. Change the CMAKE_INSTALL_PREFIX variable to your desired -installation directory and set INSTALL_SYSTEM_FILES=N to prevent the init.d script and -ldconfig file installation. - -If you run into any trouble while configuring CMake, you can always remove the -'CMakeCache.txt' file to clear CMake's internal cache. This resets all values to their -defaults and can be used to fix a 'stuck' configuration of CMake. This is also a good -reason why you should always build into a separate directory, because you can safely -wipe the build directory clean without the danger of deleting important files when -something goes wrong. Building 'out-of-source' also allows you to have multiple -configurations of MaxScale at the same time. - -The default values that CMake uses can be found in the 'macros.cmake' file. -If you wish to change these, edit the 'macros.cmake' file or define the -variables manually at configuration time. - -To display all CMake variables with their descriptions: - - cmake -LH - -\section Running Running MaxScale - -MaxScale consists of a core executable and a number of modules that implement -the different protocols and routing algorithms. These modules are built as -shared objects that are loaded on demand. In order for MaxScale to find these -modules it will search using a predescribed search path. The rules are: - -1. Look in the current directory for the module - -2. Look in $MAXSCALE_HOME/modules - -3. Look in /usr/local/skysql/maxscale/modules - -Configuration is read by default from the file -$MAXSCALE_HOME/etc/MaxScale.cnf, /etc/MaxScale.cnf, an example file -is included in in the installation and can be found in the etc/ folder. -The default value of MAXSCALE_HOME can be overriden by using the -c flag -on the command line. This should be immediately followed by the path to -the MaxScale home directory. - -The -f flag can be used to set the name and the location of the configuration -file. Without path expression the file is read from $MAXSCALE_HOME/etc directory. - -\section Testing Running MaxScale testsuite - -To run "make testall" you need to have four mysqld servers running -on localhost. It assumes a master-slave replication setup with one slave and -three slaves. - -The ports to which these servers are listening and the credentials to use for testing - can be specified in the 'macros.cmake' file. - -On the master full privileges on the databases "test" and "FOO" -are needed, on the saves SELECT permissions on test.* should -be sufficient. - -When you run the 'make testall' target after configuring the build with CMake -a local version of MaxScale is installed into the build folder. After this a MaxScale -instance is started and the test set is executed. - -After testing has finished you can find a full testlog -generated by CTest in Testing/Temporary/ directory and MaxScale's -log files in the log/ directory of the build root. +If you do not want to rely on the internet, then clone the project +from GitHub - either the entire project or just the Documentation folder. +Then point your browser to the Documentation-Contents.md file in your +local file system and proceed as above. */ diff --git a/log_manager/log_manager.cc b/log_manager/log_manager.cc index 1171414df..11d1dbc23 100644 --- a/log_manager/log_manager.cc +++ b/log_manager/log_manager.cc @@ -683,7 +683,7 @@ static int logmanager_write_log( size_t safe_str_len; /** Length of session id */ size_t sesid_str_len; - + size_t cmplen = 0; /** * 2 braces, 2 spaces and terminating char * If session id is stored to tls_log_info structure, allocate @@ -691,7 +691,7 @@ static int logmanager_write_log( */ if (id == LOGFILE_TRACE && tls_log_info.li_sesid != 0) { - sesid_str_len = 2+2+get_decimal_len(tls_log_info.li_sesid)+1; + sesid_str_len = 5*sizeof(char)+get_decimal_len(tls_log_info.li_sesid); } else { @@ -699,14 +699,16 @@ static int logmanager_write_log( } timestamp_len = get_timestamp_len(); + cmplen = sesid_str_len > 0 ? sesid_str_len - sizeof(char) : 0; + /** Find out how much can be safely written with current block size */ - if (timestamp_len-1+MAX(sesid_str_len-1,0)+str_len > lf->lf_buf_size) + if (timestamp_len-sizeof(char)+cmplen+str_len > lf->lf_buf_size) { safe_str_len = lf->lf_buf_size; } else { - safe_str_len = timestamp_len-1+MAX(sesid_str_len-1,0)+str_len; + safe_str_len = timestamp_len-sizeof(char)+cmplen+str_len; } /** * Seek write position and register to block buffer. diff --git a/macros.cmake b/macros.cmake index 77c565135..9676cd5f8 100644 --- a/macros.cmake +++ b/macros.cmake @@ -52,8 +52,11 @@ macro(set_variables) set(STATIC_EMBEDDED TRUE CACHE BOOL "Use static version of libmysqld") # Build RabbitMQ components - set(BUILD_RABBITMQ FALSE CACHE BOOL "Build RabbitMQ components") + set(BUILD_RABBITMQ TRUE CACHE BOOL "Build RabbitMQ components") + # Build the binlog router + set(BUILD_BINLOG TRUE CACHE BOOL "Build binlog router") + # Use gcov build flags set(GCOV FALSE CACHE BOOL "Use gcov build flags") diff --git a/server/core/config.c b/server/core/config.c index b8be5e21a..d3c7e8a0c 100644 --- a/server/core/config.c +++ b/server/core/config.c @@ -71,7 +71,6 @@ static char *config_get_value(CONFIG_PARAMETER *, const char *); static int handle_global_item(const char *, const char *); static void global_defaults(); static void check_config_objects(CONFIG_CONTEXT *context); -static int config_truth_value(char *str); static int internalService(char *router); static char *config_file = NULL; @@ -1902,14 +1901,14 @@ bool config_set_qualified_param( * @param str String to convert to a boolean * @return Truth value */ -static int +int config_truth_value(char *str) { - if (strcasecmp(str, "true") == 0 || strcasecmp(str, "on") == 0) + if (strcasecmp(str, "true") == 0 || strcasecmp(str, "on") == 0 || strcasecmp(str, "yes") == 0) { return 1; } - if (strcasecmp(str, "false") == 0 || strcasecmp(str, "off") == 0) + if (strcasecmp(str, "false") == 0 || strcasecmp(str, "off") == 0 || strcasecmp(str, "no") == 0) { return 0; } diff --git a/server/core/dcb.c b/server/core/dcb.c index fbe7e8e98..3d619ff93 100644 --- a/server/core/dcb.c +++ b/server/core/dcb.c @@ -2081,12 +2081,12 @@ dcb_get_next (DCB* dcb) } /** - * Call all the callbacks on all DCB's that match the reason given + * Call all the callbacks on all DCB's that match the server and the reason given * * @param reason The DCB_REASON that triggers the callback */ void -dcb_call_foreach(DCB_REASON reason) +dcb_call_foreach(struct server* server, DCB_REASON reason) { LOGIF(LD, (skygw_log_write(LOGFILE_DEBUG, "%lu [dcb_call_foreach]", @@ -2106,7 +2106,8 @@ dcb_call_foreach(DCB_REASON reason) while (dcb != NULL) { - if (dcb->state == DCB_STATE_POLLING) + if (dcb->state == DCB_STATE_POLLING && dcb->server && + strcmp(dcb->server->unique_name,server->unique_name) == 0) { dcb_call_callback(dcb, DCB_REASON_NOT_RESPONDING); } diff --git a/server/core/monitor.c b/server/core/monitor.c index 23b8ee4ef..3584978e9 100644 --- a/server/core/monitor.c +++ b/server/core/monitor.c @@ -139,9 +139,12 @@ monitorStart(MONITOR *monitor) void monitorStop(MONITOR *monitor) { + if(monitor->state != MONITOR_STATE_STOPPED) + { monitor->state = MONITOR_STATE_STOPPING; monitor->module->stopMonitor(monitor->handle); monitor->state = MONITOR_STATE_STOPPED; + } } /** diff --git a/server/core/server.c b/server/core/server.c index 9841b96ff..0f39c0edc 100644 --- a/server/core/server.c +++ b/server/core/server.c @@ -293,6 +293,90 @@ char *stat; spinlock_release(&server_spin); } +/** + * Print all servers in Json format to a DCB + * + * Designed to be called within a debugger session in order + * to display all active servers within the gateway + */ +void +dprintAllServersJson(DCB *dcb) +{ +SERVER *ptr; +char *stat; +int len = 0; +int el = 1; + + spinlock_acquire(&server_spin); + ptr = allServers; + while (ptr) + { + ptr = ptr->next; + len++; + } + ptr = allServers; + dcb_printf(dcb, "[\n"); + while (ptr) + { + dcb_printf(dcb, " {\n \"server\": \"%s\",\n", + ptr->name); + stat = server_status(ptr); + dcb_printf(dcb, " \"status\": \"%s\",\n", + stat); + free(stat); + dcb_printf(dcb, " \"protocol\": \"%s\",\n", + ptr->protocol); + dcb_printf(dcb, " \"port\": \"%d\",\n", + ptr->port); + if (ptr->server_string) + dcb_printf(dcb, " \"version\": \"%s\",\n", + ptr->server_string); + dcb_printf(dcb, " \"nodeId\": \"%d\",\n", + ptr->node_id); + dcb_printf(dcb, " \"masterId\": \"%d\",\n", + ptr->master_id); + if (ptr->slaves) { + int i; + dcb_printf(dcb, " \"slaveIds\": [ "); + for (i = 0; ptr->slaves[i]; i++) + { + if (i == 0) + dcb_printf(dcb, "%li", ptr->slaves[i]); + else + dcb_printf(dcb, ", %li ", ptr->slaves[i]); + } + dcb_printf(dcb, "],\n"); + } + dcb_printf(dcb, " \"replDepth\": \"%d\",\n", + ptr->depth); + if (SERVER_IS_SLAVE(ptr) || SERVER_IS_RELAY_SERVER(ptr)) { + if (ptr->rlag >= 0) { + dcb_printf(dcb, " \"slaveDelay\": \"%d\",\n", ptr->rlag); + } + } + if (ptr->node_ts > 0) { + dcb_printf(dcb, " \"lastReplHeartbeat\": \"%lu\",\n", ptr->node_ts); + } + dcb_printf(dcb, " \"totalConnections\": \"%d\",\n", + ptr->stats.n_connections); + dcb_printf(dcb, " \"currentConnections\": \"%d\",\n", + ptr->stats.n_current); + dcb_printf(dcb, " \"currentOps\": \"%d\"\n", + ptr->stats.n_current_ops); + if (el < len) { + dcb_printf(dcb, " },\n"); + } + else { + dcb_printf(dcb, " }\n"); + } + ptr = ptr->next; + el++; + } + dcb_printf(dcb, "]\n"); + spinlock_release(&server_spin); +} + + /** * Print server details to a DCB * diff --git a/server/core/test/testhash.c b/server/core/test/testhash.c index 89720da80..776c8d93a 100644 --- a/server/core/test/testhash.c +++ b/server/core/test/testhash.c @@ -157,7 +157,7 @@ static bool do_hashtest( CHK_HASHTABLE(h); hashtable_free(h); -return_succp: + free(val_arr); return succp; } diff --git a/server/core/test/testserver.c b/server/core/test/testserver.c index de40847d6..7e00b4f0b 100644 --- a/server/core/test/testserver.c +++ b/server/core/test/testserver.c @@ -32,7 +32,7 @@ #include #include - +#include /** * test1 Allocate a server and do lots of other things * diff --git a/server/include/config.h b/server/include/config.h index ca3092576..93bd095c1 100644 --- a/server/include/config.h +++ b/server/include/config.h @@ -107,7 +107,7 @@ extern unsigned int config_pollsleep(); CONFIG_PARAMETER* config_get_param(CONFIG_PARAMETER* params, const char* name); config_param_type_t config_get_paramtype(CONFIG_PARAMETER* param); CONFIG_PARAMETER* config_clone_param(CONFIG_PARAMETER* param); - +int config_truth_value(char *str); bool config_set_qualified_param( CONFIG_PARAMETER* param, void* val, diff --git a/server/include/dcb.h b/server/include/dcb.h index e82b55e33..e360a8fbb 100644 --- a/server/include/dcb.h +++ b/server/include/dcb.h @@ -321,7 +321,7 @@ int dcb_remove_callback(DCB *, DCB_REASON, int (*)(struct dcb *, DCB_REASON, vo int dcb_isvalid(DCB *); /* Check the DCB is in the linked list */ bool dcb_set_state(DCB* dcb, dcb_state_t new_state, dcb_state_t* old_state); -void dcb_call_foreach (DCB_REASON reason); +void dcb_call_foreach (struct server* server, DCB_REASON reason); size_t dcb_get_session_id(DCB* dcb); bool dcb_get_ses_log_info(DCB* dcb, size_t* sesid, int* enabled_logs); diff --git a/server/include/server.h b/server/include/server.h index 4bb514d65..1eb2d3eeb 100644 --- a/server/include/server.h +++ b/server/include/server.h @@ -176,6 +176,7 @@ extern SERVER *server_find(char *, unsigned short); extern void printServer(SERVER *); extern void printAllServers(); extern void dprintAllServers(DCB *); +extern void dprintAllServersJson(DCB *); extern void dprintServer(DCB *, SERVER *); extern void dListServers(DCB *); extern char *server_status(SERVER *); diff --git a/server/modules/filter/fwfilter.c b/server/modules/filter/fwfilter.c index 7766521cf..8fbfc9435 100644 --- a/server/modules/filter/fwfilter.c +++ b/server/modules/filter/fwfilter.c @@ -58,7 +58,7 @@ * combinations of username and network, either the value any or all, * depending on how you want to match the rules, and one or more rule names. *@code{.unparsed} - * users NAME ... match [any|all] rules RULE ... + * users NAME ... match [any|all|strict_all] rules RULE ... *@endcode */ #include @@ -166,6 +166,7 @@ typedef struct queryspeed_t{ int count; /*< Number of queries done */ int limit; /*< Maximum number of queries */ long id; /*< Unique id of the rule */ + bool active; /*< If the rule has been triggered */ struct queryspeed_t* next; /*< Next node in the list */ }QUERYSPEED; @@ -200,6 +201,9 @@ typedef struct user_t{ QUERYSPEED* qs_limit;/*< The query speed structure unique to this user */ RULELIST* rules_or;/*< If any of these rules match the action is triggered */ RULELIST* rules_and;/*< All of these rules must match for the action to trigger */ + RULELIST* rules_strict_and; /*< rules that skip the rest of the rules if one of them + * fails. This is only for rules paired with 'match strict_all'. */ + }USER; /** @@ -625,6 +629,7 @@ void add_users(char* rule, FW_INSTANCE* instance) instance->userstrings = link; } + /** * Parses the list of rule strings for users and links them against the listed rules. * Only adds those rules that are found. If the rule isn't found a message is written to the error log. @@ -641,7 +646,7 @@ void link_rules(char* rule, FW_INSTANCE* instance) char *tok, *ruleptr, *userptr, *modeptr; char *saveptr = NULL; RULELIST* rulelist = NULL; - + bool strict = false; userptr = strstr(rule,"users "); modeptr = strstr(rule," match "); ruleptr = strstr(rule," rules "); @@ -662,6 +667,9 @@ void link_rules(char* rule, FW_INSTANCE* instance) match_any = true; }else if(strcmp(tok,"all") == 0){ match_any = false; + }else if(strcmp(tok,"strict_all") == 0){ + match_any = false; + strict = true; }else{ skygw_log_write(LOGFILE_ERROR, "fwfilter: Rule syntax incorrect, 'match' was not followed by 'any' or 'all': %s",rule); return; @@ -730,10 +738,15 @@ void link_rules(char* rule, FW_INSTANCE* instance) if(match_any){ tail->next = user->rules_or; user->rules_or = tl; - }else{ - tail->next = user->rules_and; - user->rules_and = tl; + }else if(strict){ + tail->next = user->rules_and; + user->rules_strict_and = tl; } + else + { + tail->next = user->rules_and; + user->rules_and = tl; + } hashtable_add(instance->htable, (void *)userptr, @@ -1295,11 +1308,6 @@ bool rule_matches(FW_INSTANCE* my_instance, FW_SESSION* my_session, GWBUF *queue time_t time_now; struct tm* tm_now; - if(my_session->errmsg){ - free(my_session->errmsg); - my_session->errmsg = NULL; - } - time(&time_now); tm_now = localtime(&time_now); @@ -1439,43 +1447,56 @@ bool rule_matches(FW_INSTANCE* my_instance, FW_SESSION* my_session, GWBUF *queue queryspeed->next = user->qs_limit; user->qs_limit = queryspeed; } - - if(queryspeed->count > queryspeed->limit) - { - queryspeed->triggered = time_now; - queryspeed->count = 0; - matches = true; - - - skygw_log_write(LOGFILE_TRACE, - "fwfilter: rule '%s': query limit triggered (%d queries in %f seconds), denying queries from user for %f seconds.", - rulelist->rule->name, - queryspeed->limit, - queryspeed->period, - queryspeed->cooldown); - double blocked_for = queryspeed->cooldown - difftime(time_now,queryspeed->triggered); - sprintf(emsg,"Queries denied for %f seconds",blocked_for); - msg = strdup(emsg); - } - else if(difftime(time_now,queryspeed->triggered) < queryspeed->cooldown) - { - - double blocked_for = queryspeed->cooldown - difftime(time_now,queryspeed->triggered); - - sprintf(emsg,"Queries denied for %f seconds",blocked_for); - skygw_log_write(LOGFILE_TRACE, "fwfilter: rule '%s': user denied for %f seconds",rulelist->rule->name,blocked_for); - msg = strdup(emsg); - - matches = true; - } - else if(difftime(time_now,queryspeed->first_query) < queryspeed->period) - { - queryspeed->count++; - } - else - { - queryspeed->first_query = time_now; - } + + if(queryspeed->active) + { + if(difftime(time_now,queryspeed->triggered) < queryspeed->cooldown) + { + + double blocked_for = queryspeed->cooldown - difftime(time_now,queryspeed->triggered); + + sprintf(emsg,"Queries denied for %f seconds",blocked_for); + skygw_log_write(LOGFILE_TRACE, "fwfilter: rule '%s': user denied for %f seconds",rulelist->rule->name,blocked_for); + msg = strdup(emsg); + + matches = true; + } + else + { + queryspeed->active = false; + queryspeed->count = 0; + + } + } + else + { + if(queryspeed->count >= queryspeed->limit) + { + queryspeed->triggered = time_now; + matches = true; + queryspeed->active = true; + + skygw_log_write(LOGFILE_TRACE, + "fwfilter: rule '%s': query limit triggered (%d queries in %f seconds), denying queries from user for %f seconds.", + rulelist->rule->name, + queryspeed->limit, + queryspeed->period, + queryspeed->cooldown); + double blocked_for = queryspeed->cooldown - difftime(time_now,queryspeed->triggered); + sprintf(emsg,"Queries denied for %f seconds",blocked_for); + msg = strdup(emsg); + } + else if(queryspeed->count > 0 && + difftime(time_now,queryspeed->first_query) <= queryspeed->period) + { + queryspeed->count++; + } + else + { + queryspeed->first_query = time_now; + queryspeed->count = 1; + } + } break; @@ -1499,7 +1520,11 @@ bool rule_matches(FW_INSTANCE* my_instance, FW_SESSION* my_session, GWBUF *queue queryresolved: if(msg){ - my_session->errmsg = msg; + if(my_session->errmsg){ + free(my_session->errmsg); + } + + my_session->errmsg = msg; } if(matches){ @@ -1536,7 +1561,10 @@ bool check_match_any(FW_INSTANCE* my_instance, FW_SESSION* my_session, GWBUF *qu memset(fullquery + qlen,0,1); } - rulelist = user->rules_or; + if((rulelist = user->rules_or) == NULL) + { + goto retblock; + } while(rulelist){ @@ -1544,9 +1572,10 @@ bool check_match_any(FW_INSTANCE* my_instance, FW_SESSION* my_session, GWBUF *qu rulelist = rulelist->next; continue; } - if((rval = rule_matches(my_instance,my_session,queue,user,rulelist,fullquery))){ - goto retblock; + if((rval = rule_matches(my_instance,my_session,queue,user,rulelist,fullquery))){ + goto retblock; } + rulelist = rulelist->next; } @@ -1565,9 +1594,9 @@ bool check_match_any(FW_INSTANCE* my_instance, FW_SESSION* my_session, GWBUF *qu * @param user The user whose rulelist is checked * @return True if the query matches all of the rules otherwise false */ -bool check_match_all(FW_INSTANCE* my_instance, FW_SESSION* my_session, GWBUF *queue, USER* user) +bool check_match_all(FW_INSTANCE* my_instance, FW_SESSION* my_session, GWBUF *queue, USER* user,bool strict_all) { - bool is_sql, rval = 0; + bool is_sql, rval = true; int qlen; char *fullquery = NULL,*ptr; @@ -1585,23 +1614,38 @@ bool check_match_all(FW_INSTANCE* my_instance, FW_SESSION* my_session, GWBUF *qu } - - rulelist = user->rules_or; - + + if(strict_all) + { + rulelist = user->rules_strict_and; + } + else + { + rulelist = user->rules_and; + } + + if(rulelist == NULL) + { + rval = false; + goto retblock; + } + while(rulelist){ if(!rule_is_active(rulelist->rule)){ rulelist = rulelist->next; continue; } + if(!rule_matches(my_instance,my_session,queue,user,rulelist,fullquery)){ rval = false; - goto retblock; + if(strict_all) + break; } rulelist = rulelist->next; } - + retblock: free(fullquery); @@ -1664,7 +1708,12 @@ routeQuery(FILTER *instance, void *session, GWBUF *queue) goto queryresolved; } - if(check_match_all(my_instance,my_session,queue,user)){ + if(check_match_all(my_instance,my_session,queue,user,false)){ + accept = false; + goto queryresolved; + } + + if(check_match_all(my_instance,my_session,queue,user,true)){ accept = false; goto queryresolved; } diff --git a/server/modules/filter/mqfilter.c b/server/modules/filter/mqfilter.c index 86059c7bc..7d78f20ea 100644 --- a/server/modules/filter/mqfilter.c +++ b/server/modules/filter/mqfilter.c @@ -78,6 +78,8 @@ #include #include #include +#include + MODULE_INFO info = { MODULE_API_FILTER, MODULE_ALPHA_RELEASE, @@ -87,7 +89,7 @@ MODULE_INFO info = { static char *version_str = "V1.0.2"; static int uid_gen; - +static int hktask_id = 0; /* * The filter entry points */ @@ -179,6 +181,16 @@ typedef struct object_trigger_t{ int size; }OBJ_TRIG; +/** + * Statistics for the mqfilter. + */ +typedef struct mqstats_t{ + int n_msg; /*< Total number of messages */ + int n_sent; /*< Number of sent messages */ + int n_queued; /*< Number of unsent messages */ + +}MQSTATS; + /** * A instance structure, containing the hostname, login credentials, @@ -190,7 +202,6 @@ typedef struct object_trigger_t{ * routing key named 'key'. Type of the exchange is 'direct' by default and all queries are logged. * */ - typedef struct { int port; char *hostname; @@ -213,12 +224,14 @@ typedef struct { int conn_stat; /**state of the connection to the server*/ int rconn_intv; /**delay for reconnects, in seconds*/ time_t last_rconn; /**last reconnect attempt*/ - SPINLOCK* rconn_lock; + SPINLOCK rconn_lock; + SPINLOCK msg_lock; mqmessage* messages; enum log_trigger_t trgtype; SRC_TRIG* src_trg; SHM_TRIG* shm_trg; OBJ_TRIG* obj_trg; + MQSTATS stats; } MQ_INSTANCE; /** @@ -239,6 +252,9 @@ typedef struct { bool was_query; /**True if the previous routeQuery call had valid content*/ } MQ_SESSION; +void sendMessage(void* data); + + /** * Implementation of the mandatory version entry point * @@ -426,6 +442,8 @@ char** parse_optstr(char* str, char* tok, int* szstore) char *lasts, *tk = str; char **arr; int i = 0, size = 1; + + while((tk = strpbrk(tk + 1,tok))){ size++; } @@ -463,11 +481,12 @@ createInstance(char **options, FILTER_PARAMETER **params) int paramcount = 0, parammax = 64, i = 0, x = 0, arrsize = 0; FILTER_PARAMETER** paramlist; char** arr; + char taskname[512]; - if ((my_instance = calloc(1, sizeof(MQ_INSTANCE)))&& - (my_instance->rconn_lock = malloc(sizeof(SPINLOCK)))) + if ((my_instance = calloc(1, sizeof(MQ_INSTANCE)))) { - spinlock_init(my_instance->rconn_lock); + spinlock_init(&my_instance->rconn_lock); + spinlock_init(&my_instance->msg_lock); uid_gen = 0; paramlist = malloc(sizeof(FILTER_PARAMETER*)*64); @@ -510,6 +529,7 @@ createInstance(char **options, FILTER_PARAMETER **params) }else if(!strcmp(params[i]->name,"ssl_client_key")){ my_instance->ssl_client_key = strdup(params[i]->value); + }else if(!strcmp(params[i]->name,"ssl_CA_cert")){ my_instance->ssl_CA_cert = strdup(params[i]->value); @@ -617,11 +637,11 @@ createInstance(char **options, FILTER_PARAMETER **params) } }else if(!strcmp(paramlist[i]->name,"logging_log_all")){ - if(!strcmp(paramlist[i]->value,"true")){ + if(config_truth_value(paramlist[i]->value)){ my_instance->log_all = true; } }else if(!strcmp(paramlist[i]->name,"logging_strict")){ - if(strcmp(paramlist[i]->value,"false") == 0){ + if(!config_truth_value(paramlist[i]->value)){ my_instance->strict_logging = false; } } @@ -669,7 +689,8 @@ createInstance(char **options, FILTER_PARAMETER **params) /**Connect to the server*/ init_conn(my_instance); - + snprintf(taskname,511,"mqtask%d",atomic_add(&hktask_id,1)); + hktask_add(taskname,sendMessage,(void*)my_instance,5); } return (FILTER *)my_instance; @@ -689,7 +710,7 @@ int declareQueue(MQ_INSTANCE *my_instance, MQ_SESSION* my_session, char* qname) int success = 1; amqp_rpc_reply_t reply; - spinlock_acquire(my_instance->rconn_lock); + spinlock_acquire(&my_instance->rconn_lock); amqp_queue_declare(my_instance->conn,my_instance->channel, amqp_cstring_bytes(qname), @@ -716,20 +737,23 @@ int declareQueue(MQ_INSTANCE *my_instance, MQ_SESSION* my_session, char* qname) "Error : Failed to bind queue to exchange."); } - spinlock_release(my_instance->rconn_lock); + spinlock_release(&my_instance->rconn_lock); return success; } /** * Broadcasts a message on the message stack to the RabbitMQ server - * and frees the allocated memory if successful. - * @return AMQP_STATUS_OK if the broadcasting was successful + * and frees the allocated memory if successful. This function is only called by + * the housekeeper thread. + * @param data MQfilter instance */ -int sendMessage(MQ_INSTANCE *instance) +void sendMessage(void* data) { - int err_code; + MQ_INSTANCE *instance = (MQ_INSTANCE*)data; mqmessage *tmp; - + int err_num; + + spinlock_acquire(&instance->rconn_lock); if(instance->conn_stat != AMQP_STATUS_OK){ if(difftime(time(NULL),instance->last_rconn) > instance->rconn_intv){ @@ -746,29 +770,70 @@ int sendMessage(MQ_INSTANCE *instance) "Error : Failed to reconnect to the MQRabbit server "); } } + err_num = instance->conn_stat; } - - if(instance->messages){ - instance->conn_stat = amqp_basic_publish(instance->conn,instance->channel, + spinlock_release(&instance->rconn_lock); + + if(err_num != AMQP_STATUS_OK) + { + /** No connection to the broker */ + return; + } + + spinlock_acquire(&instance->msg_lock); + tmp = instance->messages; + + if(tmp == NULL) + { + spinlock_release(&instance->msg_lock); + return; + } + + instance->messages = instance->messages->next; + spinlock_release(&instance->msg_lock); + + while(tmp){ + + err_num = amqp_basic_publish(instance->conn,instance->channel, amqp_cstring_bytes(instance->exchange), amqp_cstring_bytes(instance->key), - 0,0,instance->messages->prop,amqp_cstring_bytes(instance->messages->msg)); + 0,0,tmp->prop,amqp_cstring_bytes(tmp->msg)); - - /**Message was sent successfully*/ - if(instance->conn_stat == AMQP_STATUS_OK){ - tmp = instance->messages; - instance->messages = instance->messages->next; + spinlock_acquire(&instance->rconn_lock); + instance->conn_stat = err_num; + spinlock_release(&instance->rconn_lock); + + if(err_num == AMQP_STATUS_OK){ + /**Message was sent successfully*/ free(tmp->prop); free(tmp->msg); free(tmp); + + atomic_add(&instance->stats.n_sent,1); + atomic_add(&instance->stats.n_queued,-1); + spinlock_acquire(&instance->msg_lock); + tmp = instance->messages; + + if(tmp == NULL) + { + spinlock_release(&instance->msg_lock); + return; + } + + instance->messages = instance->messages->next; + spinlock_release(&instance->msg_lock); + } + else + { + spinlock_acquire(&instance->msg_lock); + tmp->next = instance->messages; + instance->messages = tmp; + spinlock_release(&instance->msg_lock); + return; } } - err_code = instance->conn_stat; - - return err_code; } @@ -780,34 +845,30 @@ int sendMessage(MQ_INSTANCE *instance) */ void pushMessage(MQ_INSTANCE *instance, amqp_basic_properties_t* prop, char* msg) { - spinlock_acquire(instance->rconn_lock); - mqmessage* newmsg = malloc(sizeof(mqmessage)); + mqmessage* newmsg = calloc(1,sizeof(mqmessage)); if(newmsg){ + newmsg->msg = msg; newmsg->prop = prop; - newmsg->next = NULL; - - if(instance->messages){ - newmsg->next = instance->messages; - } - - instance->messages = newmsg; }else{ skygw_log_write(LOGFILE_ERROR, "Error : Cannot allocate enough memory."); free(prop); free(msg); + return; } - while(instance->messages){ - if(sendMessage(instance) != AMQP_STATUS_OK){ - break; - } - } + spinlock_acquire(&instance->msg_lock); + + newmsg->next = instance->messages; + instance->messages = newmsg; - spinlock_release(instance->rconn_lock); + spinlock_release(&instance->msg_lock); + + atomic_add(&instance->stats.n_msg,1); + atomic_add(&instance->stats.n_queued,1); } @@ -1458,12 +1519,18 @@ diagnostic(FILTER *instance, void *fsession, DCB *dcb) if (my_instance) { - dcb_printf(dcb, "\t\tConnecting to %s:%d as %s/%s.\nVhost: %s\tExchange: %s\tKey: %s\tQueue: %s\n", + dcb_printf(dcb, "Connecting to %s:%d as '%s'.\nVhost: %s\tExchange: %s\nKey: %s\tQueue: %s\n\n", my_instance->hostname,my_instance->port, - my_instance->username,my_instance->password, + my_instance->username, my_instance->vhost, my_instance->exchange, my_instance->key, my_instance->queue ); + dcb_printf(dcb, "%-16s%-16s%-16s\n", + "Messages","Queued","Sent"); + dcb_printf(dcb, "%-16d%-16d%-16d\n", + my_instance->stats.n_msg, + my_instance->stats.n_queued, + my_instance->stats.n_sent); } } diff --git a/server/modules/filter/test/harness_ui.c b/server/modules/filter/test/harness_ui.c index 8854b5c49..fa8d14e3d 100644 --- a/server/modules/filter/test/harness_ui.c +++ b/server/modules/filter/test/harness_ui.c @@ -187,7 +187,7 @@ int main(int argc, char** argv){ } instance.thrpool = t_thr_pool; - int thr_num = 1; + intptr_t thr_num = 1; for(i = 0;iserver)) || !(SERVER_IS_IN_CLUSTER(ptr->server))) { - dcb_call_foreach(DCB_REASON_NOT_RESPONDING); + dcb_call_foreach(ptr->server,DCB_REASON_NOT_RESPONDING); } } diff --git a/server/modules/routing/binlog/blr.c b/server/modules/routing/binlog/blr.c index 4b13744ef..9cb2fd106 100644 --- a/server/modules/routing/binlog/blr.c +++ b/server/modules/routing/binlog/blr.c @@ -31,8 +31,10 @@ * @verbatim * Revision History * - * Date Who Description + * Date Who Description * 02/04/2014 Mark Riddoch Initial implementation + * 17/02/2015 Massimiliano Pinto Addition of slave port and username in diagnostics + * 18/02/2015 Massimiliano Pinto Addition of dcb_close in closeSession * * @endverbatim */ @@ -625,6 +627,14 @@ ROUTER_SLAVE *slave = (ROUTER_SLAVE *)router_session; /* Unlock */ rses_end_locked_router_action(slave); + + /** + * Close the slave server connection + */ + if (slave->dcb != NULL) { + CHK_DCB(slave->dcb); + dcb_close(slave->dcb); + } } } @@ -848,8 +858,11 @@ struct tm tm; if (session->uuid) dcb_printf(dcb, "\t\tSlave UUID: %s\n", session->uuid); dcb_printf(dcb, - "\t\tSlave: %s\n", - session->dcb->remote); + "\t\tSlave_host_port: %s:%d\n", + session->dcb->remote, ntohs((session->dcb->ipv4).sin_port)); + dcb_printf(dcb, + "\t\tUsername: %s\n", + session->dcb->user); dcb_printf(dcb, "\t\tSlave DCB: %p\n", session->dcb); @@ -911,18 +924,21 @@ struct tm tm; dcb_printf(dcb, "\t\tSeconds behind master %u\n", router_inst->lastEventTimestamp - session->lastEventTimestamp); } - if ((session->cstate & CS_UPTODATE) == 0) + if (session->state == 0) { - dcb_printf(dcb, "\t\tSlave is in catchup mode. %s%s\n", + dcb_printf(dcb, "\t\tSlave_mode: connected\n"); + } + else if ((session->cstate & CS_UPTODATE) == 0) + { + dcb_printf(dcb, "\t\tSlave_mode: catchup. %s%s\n", ((session->cstate & CS_EXPECTCB) == 0 ? "" : "Waiting for DCB queue to drain."), ((session->cstate & CS_BUSY) == 0 ? "" : " Busy in slave catchup.")); - } else { - dcb_printf(dcb, "\t\tSlave is in normal mode.\n"); + dcb_printf(dcb, "\t\tSlave_mode: follow\n"); if (session->binlog_pos != router_inst->binlog_position) { dcb_printf(dcb, "\t\tSlave reports up to date however " diff --git a/server/modules/routing/binlog/blr_master.c b/server/modules/routing/binlog/blr_master.c index cb0da38e3..7bd194ac6 100644 --- a/server/modules/routing/binlog/blr_master.c +++ b/server/modules/routing/binlog/blr_master.c @@ -387,14 +387,19 @@ char query[128]; break; case BLRM_SERVERID: { - char *val = blr_extract_column(buf, 1); + char *val = blr_extract_column(buf, 2); // Response to fetch of master's server-id if (router->saved_master.server_id) GWBUF_CONSUME_ALL(router->saved_master.server_id); router->saved_master.server_id = buf; blr_cache_response(router, "serverid", buf); - // TODO: Extract the value of server-id and place in router->master_id + + // set router->masterid from master server-id if it's not set by the config option + if (router->masterid == 0) { + router->masterid = atoi(val); + } + { char str[80]; sprintf(str, "SET @master_heartbeat_period = %lu000000000", router->heartbeat); diff --git a/server/modules/routing/binlog/blr_slave.c b/server/modules/routing/binlog/blr_slave.c index 803be8770..4aafd41fd 100644 --- a/server/modules/routing/binlog/blr_slave.c +++ b/server/modules/routing/binlog/blr_slave.c @@ -31,8 +31,9 @@ * @verbatim * Revision History * - * Date Who Description + * Date Who Description * 14/04/2014 Mark Riddoch Initial implementation + * 18/02/2015 Massimiliano Pinto Addition of DISCONNECT ALL and DISCONNECT SERVER server_id * * @endverbatim */ @@ -76,6 +77,9 @@ static int blr_slave_send_slave_hosts(ROUTER_INSTANCE *router, ROUTER_SLAVE *sla static int blr_slave_send_fieldcount(ROUTER_INSTANCE *router, ROUTER_SLAVE *slave, int count); static int blr_slave_send_columndef(ROUTER_INSTANCE *router, ROUTER_SLAVE *slave, char *name, int type, int len, uint8_t seqno); static int blr_slave_send_eof(ROUTER_INSTANCE *router, ROUTER_SLAVE *slave, int seqno); +static int blr_slave_send_disconnected_server(ROUTER_INSTANCE *router, ROUTER_SLAVE *slave, int server_id, int found); +static int blr_slave_disconnect_all(ROUTER_INSTANCE *router, ROUTER_SLAVE *slave); +static int blr_slave_disconnect_server(ROUTER_INSTANCE *router, ROUTER_SLAVE *slave, int server_id); extern int lm_enabled_logfiles_bitmask; extern size_t log_ses_count[]; @@ -395,6 +399,35 @@ int query_len; } } } + else if (strcasecmp(query_text, "DISCONNECT") == 0) + { + if ((word = strtok_r(NULL, sep, &brkb)) == NULL) + { + LOGIF(LE, (skygw_log_write(LOGFILE_ERROR, "%s: Incomplete DISCONNECT command.", + router->service->name))); + + } + else if (strcasecmp(word, "ALL") == 0) + { + free(query_text); + spinlock_release(&router->lock); + + return blr_slave_disconnect_all(router, slave); + } + else if (strcasecmp(word, "SERVER") == 0) + { + if ((word = strtok_r(NULL, sep, &brkb)) == NULL) + { + LOGIF(LE, (skygw_log_write(LOGFILE_ERROR, + "%s: Expected DISCONNECT SERVER $server_id", + router->service->name))); + } else { + free(query_text); + return blr_slave_disconnect_server(router, slave, atoi(word)); + } + } + } + free(query_text); query_text = strndup(qtext, query_len); @@ -1837,3 +1870,190 @@ uint8_t *ptr; encode_value(ptr, 2, 16); // Autocommit enabled return slave->dcb->func.write(slave->dcb, pkt); } + +/** + * Send the reply only to the SQL command "DISCONNECT SERVER $server_id' + * + * @param router The binlog router instance + * @param slave The slave server to which we are sending the response + * @return Non-zero if data was sent + */ +static int +blr_slave_send_disconnected_server(ROUTER_INSTANCE *router, ROUTER_SLAVE *slave, int server_id, int found) +{ +GWBUF *pkt; +char state[40]; +char serverid[40]; +uint8_t *ptr; +int len, id_len, seqno = 2; + + blr_slave_send_fieldcount(router, slave, 2); + blr_slave_send_columndef(router, slave, "server_id", 0x03, 40, seqno++); + blr_slave_send_columndef(router, slave, "state", 0xf, 40, seqno++); + blr_slave_send_eof(router, slave, seqno++); + + sprintf(serverid, "%d", server_id); + id_len = strlen(serverid); + if (found) + strcpy(state, "disconnected"); + else + strcpy(state, "not found"); + + len = 5 + id_len + strlen(state) + 1; + if ((pkt = gwbuf_alloc(len)) == NULL) + return 0; + ptr = GWBUF_DATA(pkt); + encode_value(ptr, id_len + 2 + strlen(state), 24); // Add length of data packet + ptr += 3; + *ptr++ = seqno++; // Sequence number in response + + *ptr++ = id_len; // Length of result string + strncpy((char *)ptr, serverid, id_len); // Result string + ptr += id_len; + + *ptr++ = strlen(state); // Length of result string + strncpy((char *)ptr, state, strlen(state)); // Result string + ptr += strlen(state); + + slave->dcb->func.write(slave->dcb, pkt); + + return blr_slave_send_eof(router, slave, seqno++); +} + + +/** + * Send the response to the SQL command "DISCONNECT SERVER $server_id' + * and close the connection to that server + * + * @param router The binlog router instance + * @param slave The slave server to which we are sending the response + * @param server_id The slave server_id to disconnect + * @return Non-zero if data was sent to the client + */ +static int +blr_slave_disconnect_server(ROUTER_INSTANCE *router, ROUTER_SLAVE *slave, int server_id) +{ + ROUTER_OBJECT *router_obj= router->service->router; + ROUTER_SLAVE *sptr; + int n; + int server_found = 0; + + spinlock_acquire(&router->lock); + + sptr = router->slaves; + /* look for server_id among all registered slaves */ + while (sptr) + { + /* don't examine slaves with state = 0 */ + if (sptr->state != 0 && sptr->serverid == server_id) + { + /* server_id found */ + server_found = 1; + + LOGIF(LT, (skygw_log_write(LOGFILE_TRACE, "DISCONNECT SERVER: closing [%s], server id [%d]", + sptr->dcb->remote, server_id))); + + /* send server_id with disconnect state to client */ + n = blr_slave_send_disconnected_server(router, slave, server_id, 1); + + /* force session close for matched slave */ + router_obj->closeSession(router->service->router_instance, sptr); + + break; + } else { + sptr = sptr->next; + } + } + + spinlock_release(&router->lock); + + /** server id was not found + * send server_id with not found state to the client + */ + if (!server_found) + { + n = blr_slave_send_disconnected_server(router, slave, server_id, 0); + } + + return n; +} + +/** + * Send the response to the SQL command "DISCONNECT ALL' + * and close the connection to all slave servers + * + * @param router The binlog router instance + * @param slave The slave server to which we are sending the response + * @return Non-zero if data was sent to the client + */ +static int +blr_slave_disconnect_all(ROUTER_INSTANCE *router, ROUTER_SLAVE *slave) +{ + ROUTER_OBJECT *router_obj= router->service->router; + ROUTER_SLAVE *sptr; + char server_id[40]; + char state[40]; + uint8_t *ptr; + int len, seqno; + GWBUF *pkt; + int n = 0; + + /* preparing output result */ + blr_slave_send_fieldcount(router, slave, 2); + blr_slave_send_columndef(router, slave, "server_id", 0x03, 40, 2); + blr_slave_send_columndef(router, slave, "state", 0xf, 40, 3); + blr_slave_send_eof(router, slave, 4); + seqno = 5; + + spinlock_acquire(&router->lock); + sptr = router->slaves; + + while (sptr) + { + /* skip servers with state = 0 */ + if (sptr->state != 0) + { + LOGIF(LT, (skygw_log_write(LOGFILE_TRACE, "DISCONNECT ALL: closing [%s], server_id [%d]", + sptr->dcb->remote, sptr->serverid))); + + sprintf(server_id, "%d", sptr->serverid); + sprintf(state, "disconnected"); + + len = 5 + strlen(server_id) + strlen(state) + 1; + if ((pkt = gwbuf_alloc(len)) == NULL) { + LOGIF(LE, (skygw_log_write(LOGFILE_ERROR, "Error: gwbuf memory allocation in " + "DISCONNECT ALL for [%s], server_id [%d]", + sptr->dcb->remote, sptr->serverid))); + + spinlock_release(&router->lock); + + return 0; + } + + ptr = GWBUF_DATA(pkt); + encode_value(ptr, len - 4, 24); // Add length of data packet + + ptr += 3; + *ptr++ = seqno++; // Sequence number in response + *ptr++ = strlen(server_id); // Length of result string + strncpy((char *)ptr, server_id, strlen(server_id)); // Result string + ptr += strlen(server_id); + *ptr++ = strlen(state); // Length of result string + strncpy((char *)ptr, state, strlen(state)); // Result string + ptr += strlen(state); + + n = slave->dcb->func.write(slave->dcb, pkt); + + /* force session close*/ + router_obj->closeSession(router->service->router_instance, sptr); + + } + sptr = sptr->next; + } + + spinlock_release(&router->lock); + + blr_slave_send_eof(router, slave, seqno); + + return n; +} diff --git a/server/modules/routing/debugcmd.c b/server/modules/routing/debugcmd.c index cf2232816..821e38f23 100644 --- a/server/modules/routing/debugcmd.c +++ b/server/modules/routing/debugcmd.c @@ -157,6 +157,10 @@ struct subcommand showoptions[] = { "Show all configured servers", "Show all configured servers", {0, 0, 0} }, + { "serversjson", 0, dprintAllServersJson, + "Show all configured servers in JSON format", + "Show all configured servers in JSON format", + {0, 0, 0} }, { "services", 0, dprintAllServices, "Show all configured services in MaxScale", "Show all configured services in MaxScale", diff --git a/server/modules/routing/readwritesplit/readwritesplit.c b/server/modules/routing/readwritesplit/readwritesplit.c index 80c391fad..552ecc4fc 100644 --- a/server/modules/routing/readwritesplit/readwritesplit.c +++ b/server/modules/routing/readwritesplit/readwritesplit.c @@ -1373,8 +1373,19 @@ static route_target_t get_route_target ( * backends but since this is SELECT that is not possible: * 1. response set is not handled correctly in clientReply and * 2. multiple results can degrade performance. + * + * Prepared statements are an exception to this since they do not + * actually do anything but only prepare the statement to be used. + * They can be safely routed to all backends since the execution + * is done later. + * + * With prepared statement caching the task of routing + * the execution of the prepared statements to the right server would be + * an easy one. Currently this is not supported. */ - if (QUERY_IS_TYPE(qtype, QUERY_TYPE_READ)) + if (QUERY_IS_TYPE(qtype, QUERY_TYPE_READ) && + !( QUERY_IS_TYPE(qtype, QUERY_TYPE_PREPARE_STMT) || + QUERY_IS_TYPE(qtype, QUERY_TYPE_PREPARE_NAMED_STMT))) { LOGIF(LE, (skygw_log_write_flush( LOGFILE_ERROR, diff --git a/utils/skygw_utils.cc b/utils/skygw_utils.cc index 272a54f9a..00b63b368 100644 --- a/utils/skygw_utils.cc +++ b/utils/skygw_utils.cc @@ -31,7 +31,7 @@ const char* timestamp_formatstr = "%04d-%02d-%02d %02d:%02d:%02d "; /** One for terminating '\0' */ -const int timestamp_len = 4+1 +2+1 +2+1 +2+1 +2+1 +2+3 +1; +const size_t timestamp_len = (4+1 +2+1 +2+1 +2+1 +2+1 +2+3 +1) * sizeof(char); /** Single-linked list for storing test cases */ @@ -662,7 +662,7 @@ bool mlist_cursor_move_to_first( /** End of mlist */ -int get_timestamp_len(void) +size_t get_timestamp_len(void) { return timestamp_len; } @@ -682,13 +682,13 @@ int get_timestamp_len(void) * @details (write detailed description here) * */ -int snprint_timestamp( +size_t snprint_timestamp( char* p_ts, - int tslen) + size_t tslen) { time_t t; struct tm tm; - int rval; + size_t rval; if (p_ts == NULL) { rval = 0; @@ -708,7 +708,7 @@ int snprint_timestamp( tm.tm_min, tm.tm_sec); - rval = strlen(p_ts); + rval = strlen(p_ts)*sizeof(char); retblock: return rval; } diff --git a/utils/skygw_utils.h b/utils/skygw_utils.h index eb1fb0371..cd3793048 100644 --- a/utils/skygw_utils.h +++ b/utils/skygw_utils.h @@ -124,8 +124,8 @@ int skygw_thread_start(skygw_thread_t* thr); skygw_thr_state_t skygw_thread_get_state(skygw_thread_t* thr); pthread_t skygw_thread_gettid(skygw_thread_t* thr); -int get_timestamp_len(void); -int snprint_timestamp(char* p_ts, int tslen); +size_t get_timestamp_len(void); +size_t snprint_timestamp(char* p_ts, size_t tslen); EXTERN_C_BLOCK_BEGIN