Merge branch 'develop' into MAX-324
@ -34,12 +34,13 @@ set(CMAKE_C_FLAGS "-Wall -fPIC")
|
||||
set(CMAKE_CXX_FLAGS "-Wall -fPIC")
|
||||
set(DEBUG_FLAGS "-ggdb -pthread -pipe -Wformat -fstack-protector --param=ssp-buffer-size=4")
|
||||
|
||||
if((CMAKE_C_COMPILER_ID STREQUAL "GNU") AND (NOT (CMAKE_C_COMPILER_VERSION VERSION_LESS 4.2)))
|
||||
message(STATUS "C Compiler supports: -Werror=format-security")
|
||||
set(DEBUG_FLAGS "${DEBUG_FLAGS} -Werror=format-security")
|
||||
if(CMAKE_VERSION VERSION_GREATER 2.6)
|
||||
if((CMAKE_C_COMPILER_ID STREQUAL "GNU") AND (NOT (CMAKE_C_COMPILER_VERSION VERSION_LESS 4.2)))
|
||||
message(STATUS "C Compiler supports: -Werror=format-security")
|
||||
set(DEBUG_FLAGS "${DEBUG_FLAGS} -Werror=format-security")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
|
||||
if(BUILD_TYPE STREQUAL Debug)
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${DEBUG_FLAGS} -DSS_DEBUG")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${DEBUG_FLAGS} -DSS_DEBUG")
|
||||
|
19
Documentation/About/COPYRIGHT.md
Normal file
@ -0,0 +1,19 @@
|
||||
This source code is distributed as part of MariaDB Corporation MaxScale. It is free
|
||||
software: you can redistribute it and/or modify it under the terms of the
|
||||
GNU General Public License as published by the Free Software Foundation,
|
||||
version 2.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
||||
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
|
||||
details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along with
|
||||
this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
Copyright MariaDB Corporation Ab 2013
|
||||
Tekniikantie 12
|
||||
02150 Espoo
|
||||
Finland
|
||||
|
88
Documentation/About/LICENSE.md
Normal file
@ -0,0 +1,88 @@
|
||||
|
||||
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
|
||||
Version 2, June 1991
|
||||
|
||||
Copyright (C) 1989, 1991 Free Software Foundation, Inc.
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
Preamble
|
||||
|
||||
The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things.
|
||||
|
||||
To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it.
|
||||
|
||||
For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights.
|
||||
|
||||
We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software.
|
||||
|
||||
Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations.
|
||||
|
||||
Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all.
|
||||
|
||||
The precise terms and conditions for copying, distribution and modification follow.
|
||||
|
||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||
|
||||
0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you".
|
||||
|
||||
Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does.
|
||||
|
||||
1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program.
|
||||
|
||||
You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee.
|
||||
|
||||
2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions:
|
||||
|
||||
a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change.
|
||||
b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License.
|
||||
c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.)
|
||||
These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it.
|
||||
|
||||
Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program.
|
||||
|
||||
In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License.
|
||||
|
||||
3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following:
|
||||
|
||||
a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or,
|
||||
b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or,
|
||||
c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.)
|
||||
The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable.
|
||||
|
||||
If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code.
|
||||
|
||||
4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance.
|
||||
|
||||
5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it.
|
||||
|
||||
6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License.
|
||||
|
||||
7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program.
|
||||
|
||||
If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances.
|
||||
|
||||
It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice.
|
||||
|
||||
This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License.
|
||||
|
||||
8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License.
|
||||
|
||||
9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation.
|
||||
|
||||
10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally.
|
||||
|
||||
NO WARRANTY
|
||||
|
||||
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
104
Documentation/About/Limitations.md
Normal file
@ -0,0 +1,104 @@
|
||||
# Limitations and Known Issues within MaxScale
|
||||
|
||||
The purpose of this documentation is to provide a central location that will document known issues and limitations within the MaxScale product and the plugins that form part of that product. Since limitations may related to specific plugins or to MaxScale as a whole this document is divided into a number of sections, the purpose of which are to isolate the limitations to the components which illustrate them.
|
||||
|
||||
## Limitations in the MaxScale core
|
||||
|
||||
This section describes the limitations that are common to all configuration of plugins with MaxScale.
|
||||
|
||||
## Limitations with MySQL Protocol support
|
||||
|
||||
* Compression
|
||||
|
||||
* SSL
|
||||
|
||||
Both capabilities are not included in MySQL server handshake
|
||||
|
||||
* LOAD DATA LOCAL INFILE currently not supported
|
||||
|
||||
## Limitations with MySQL Master/Slave Replication monitoring
|
||||
|
||||
## Limitations with Galera Cluster Monitoring
|
||||
|
||||
Master selection is based only on MIN(wsrep_local_index), no other server parameter.
|
||||
|
||||
## Limitations in the connection router
|
||||
|
||||
If Master changes (ie. new Master promotion) during current connection the router cannot check the change
|
||||
|
||||
## Limitations in the Read/Write Splitter
|
||||
|
||||
### Scale-out limitations
|
||||
|
||||
In master-slave replication cluster also read-only queries are routed to master too in the following situations:
|
||||
|
||||
* if they are executed inside an open transaction
|
||||
|
||||
* in case of prepared statement execution
|
||||
|
||||
* statement includes a stored procedure, or an UDF call
|
||||
|
||||
### Limitations in client session handling
|
||||
|
||||
Some of the queries that client sends are routed to all backends instead of sending them just to one of server. These queries include "USE <db name>" and “SET autocommit=0” among many others. Read/Write Splitter sends a copy of these queries to each backend server and forwards the first reply it receives to the client. Below is a list of MySQL commands which we call session commands :
|
||||
|
||||
COM_INIT_DB (USE <db name> creates this)
|
||||
|
||||
COM_CHANGE_USER
|
||||
|
||||
COM_STMT_CLOSE
|
||||
|
||||
COM_STMT_SEND_LONG_DATA
|
||||
|
||||
COM_STMT_RESET
|
||||
|
||||
COM_STMT_PREPARE
|
||||
|
||||
Also these are session commands:
|
||||
|
||||
COM_QUIT (no response)
|
||||
|
||||
COM_REFRESH
|
||||
|
||||
COM_DEBUG
|
||||
|
||||
COM_PING
|
||||
|
||||
In addition there are query types which belong to the same group:
|
||||
|
||||
SQLCOM_CHANGE_DB
|
||||
|
||||
SQLCOM_DEALLOCATE_PREPARE
|
||||
|
||||
SQLCOM_PREPARE
|
||||
|
||||
SQLCOM_SET_OPTION
|
||||
|
||||
SELECT ..INTO variable|OUTFILE|DUMPFILE
|
||||
|
||||
Then there are queries which modify session characteristics, listed as derived, internal RWSplit types:
|
||||
|
||||
QUERY_TYPE_ENABLE_AUTOCOMMIT
|
||||
|
||||
QUERY_TYPE_DISABLE_AUTOCOMMIT
|
||||
|
||||
There is a possibility for misbehavior; if "USE mytable" was executed in one of the slaves and it failed, it may be due to replication lag rather than the fact it didn’t exist. Thus the same command may end up with different result among backend servers. This disparity is missed.
|
||||
|
||||
The above-mentioned behavior can be partially controller with RWSplit configuration parameter called
|
||||
|
||||
use_sql_variables_in=[master|all] (master)
|
||||
|
||||
Server-side session variables are called as SQL variables. If "master" or no value is set, SQL variables are read and written in master only. Autocommit values and prepared statements are routed to all nodes always.
|
||||
|
||||
NOTE: If variable is written as a part of write query, it is treated like write query and not routed to all servers. For example, INSERT INTO test.t1 VALUES (@myvar:= 7) .
|
||||
|
||||
Examples:
|
||||
|
||||
If new database "db" was created and client executes “USE db” and it is routed to slave before the CREATE DATABASE clause is replicated to all slaves there is a risk of executing query in wrong database. Similarly, if any response that RWSplit sends back to the client differ from that of the master, there is a risk for misbehavior.
|
||||
|
||||
Most imaginable reasons are related to replication lag but it could be possible that a slave fails to execute something because of some non-fatal, temporary failure while execution of same command succeeds in other backends.
|
||||
|
||||
## Authentication Related Limitations
|
||||
|
||||
MySQL old passwords are not supported
|
||||
|
140
Documentation/About/MaxScale-1.0.4-Release-Notes.md
Normal file
@ -0,0 +1,140 @@
|
||||
# MaxScale Release Notes
|
||||
|
||||
1.0.4 GA
|
||||
|
||||
This document details the changes in version 1.0.4 since the release of the 1.0.2 Release Candidate of the MaxScale product.
|
||||
|
||||
## New Features
|
||||
|
||||
No new features have been introduced since the released candidate was released.
|
||||
|
||||
## Bug Fixes
|
||||
|
||||
A number of bug fixes have been applied between the 0.6 alpha and this alpha release. The table below lists the bugs that have been resolved. The details for each of these may be found in bugs.mariadb.com.
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td>ID</td>
|
||||
<td>Summary</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>644</td>
|
||||
<td>Buffered that were cloned using the gwbuf_clone routine failed to initialise the buffer lock structure correctly.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>643</td>
|
||||
<td>Recursive filter definitions in the configuration file could cause MaxScale to loop</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>665</td>
|
||||
<td>An access to memory that had already been freed could be made within the MaxScale core</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>664</td>
|
||||
<td>MySQL Authentication code could access memory that had already been freed.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>673</td>
|
||||
<td>MaxScale could crash if it had an empty user table and the MaxAdmin show dbusers command was run</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>670</td>
|
||||
<td>The tee filter could lose statement on the branch service if the branch service was significantly slower at executing statements compared with the main service.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>653</td>
|
||||
<td>Memory corruption could occur with extremely long hostnames in the mysql.user table.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>657</td>
|
||||
<td>If the branch service of a tee filter shutdown unexpectedly then MaxScale could fail</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>654</td>
|
||||
<td>Missing quotes in MaxAdmin show dbusers command could cause MaxAdmin to crash</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>677</td>
|
||||
<td>A race condition existed in the tee filter client reply handling</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>658</td>
|
||||
<td>The readconnroute router did not correctly close sessions when a backend database failed</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>662</td>
|
||||
<td>MaxScale startup hangs if no backend servers respond</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>676</td>
|
||||
<td>MaxScale writes a log entry, "Write to backend failed. Session closed." when changing default database via readwritesplit with max_slave_connections != 100%</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>650</td>
|
||||
<td>Tee filter does not correctly detect missing branch service</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>645</td>
|
||||
<td>Tee filter can hang MaxScale if the read/write splitter is used</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>678</td>
|
||||
<td>Tee filter does not always send full query to branch service</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>679</td>
|
||||
<td>A shared pointer in the service was leading to misleading service states</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>680</td>
|
||||
<td>The Read/Write Splitter can not load users if there are no databases available at startup</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>681</td>
|
||||
<td>The Read/Write Splitter could crash is the value of max_slave_connections was set to a low percentage and only a small number of backend servers are available</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
|
||||
## Known Issues
|
||||
|
||||
There are a number bugs and known limitations within this version of MaxScale, the most serious of this are listed below.
|
||||
|
||||
* The SQL construct "LOAD DATA LOCAL INFILE" is not fully supported.
|
||||
|
||||
* The Read/Write Splitter is a little too strict when it receives errors from slave servers during execution of session commands. This can result in sessions being terminated in situation in which MaxScale could recover without terminating the sessions.
|
||||
|
||||
* MaxScale can not manage authentication that uses wildcard matching in hostnames in the mysql.user table of the backend database. The only wildcards that can be used are in IP address entries.
|
||||
|
||||
* When users have different passwords based on the host from which they connect MaxScale is unable to determine which password it should use to connect to the backend database. This results in failed connections and unusable usernames in MaxScale.
|
||||
|
||||
# Packaging
|
||||
|
||||
Both RPM and Debian packages are available for MaxScale in addition to the tar based releases previously distributed we now provide
|
||||
|
||||
* CentOS/RedHat 5
|
||||
|
||||
* CentOS/RedHat 6
|
||||
|
||||
* CentOS/RedHat 7
|
||||
|
||||
* Debian 6
|
||||
|
||||
* Debian 7
|
||||
|
||||
* Ubuntu 12.04 LTS
|
||||
|
||||
* Ubuntu 13.10
|
||||
|
||||
* Ubuntu 14.04 LTS
|
||||
|
||||
* Fedora 19
|
||||
|
||||
* Fedora 20
|
||||
|
||||
* OpenSuSE 13
|
||||
|
||||
# MaxScale Home Default Value
|
||||
|
||||
The installation assumes that the default value for the environment variable MAXSCALE_HOME is set to /usr/local/skysql/maxscale. This is hard coded in the service startup file that is placed in /etc/init.d/maxscale by the installation process.
|
||||
|
37
Documentation/About/SETUP.md
Normal file
@ -0,0 +1,37 @@
|
||||
Installation and startup
|
||||
|
||||
Untar the binary distribution in the desired location,
|
||||
e.g. /usr/local/skysql
|
||||
|
||||
Alternatively build from the source code using the instructions
|
||||
in the README file and execute make install.
|
||||
|
||||
Simply set the environment variable MAXSCALE_HOME to point to the
|
||||
MaxScale directory, found inside the path into which the files have been copied,
|
||||
e.g. MAXSCALE_HOME=/usr/local/skysql/maxscale/MaxScale
|
||||
|
||||
Also you will need to optionaly set LD_LIBRARY_PATH to include the 'lib' folder,
|
||||
found inside the path into which the files have been copied,
|
||||
e.g. LD_LIBRARY_PATH=/usr/local/skysql/maxscale/lib
|
||||
|
||||
Because we need the libmysqld library for parsing we must create a
|
||||
valid my.cnf file to enable the library to be used. Copy the my.cnf
|
||||
to $MAXSCALE_HOME/mysql/my.cnf.
|
||||
|
||||
To start MaxScale execute the command 'maxscale' from the bin folder,
|
||||
e.g. /usr/local/skysql/maxscale/bin/maxscale
|
||||
|
||||
Configuration
|
||||
|
||||
You need to edit the file MaxScale.cnf in $MAXSCALE_HOME/etc, you should
|
||||
define the set of server definitions you require, with the addresses
|
||||
and ports of those servers. Also define the listening ports for your
|
||||
various services.
|
||||
|
||||
In order to view the internal activity of the gateway you can telnet to
|
||||
the port defined for the telnet listener. Initially you may login with
|
||||
the user name of "admin" and the password "skysql". Once connected type
|
||||
help for an overview of the commands and help <command> for the more
|
||||
detailed help on commands. Use the add user command to add a new user,
|
||||
this will also remove the admin/skysql user.
|
||||
|
6158
Documentation/Design-Documents/assets/css/bootstrap.css
vendored
Normal file
1579
Documentation/Design-Documents/assets/css/icons.css
Normal file
269
Documentation/Design-Documents/assets/css/icons.less
Normal file
@ -0,0 +1,269 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2014 Minkyu Lee. All rights reserved.
|
||||
*
|
||||
* NOTICE: All information contained herein is, and remains the
|
||||
* property of Minkyu Lee. The intellectual and technical concepts
|
||||
* contained herein are proprietary to Minkyu Lee and may be covered
|
||||
* by Republic of Korea and Foreign Patents, patents in process,
|
||||
* and are protected by trade secret or copyright law.
|
||||
* Dissemination of this information or reproduction of this material
|
||||
* is strictly forbidden unless prior written permission is obtained
|
||||
* from Minkyu Lee (niklaus.lee@gmail.com).
|
||||
*
|
||||
*/
|
||||
|
||||
@ui-icon-url: url('../icons/icons-light.png');
|
||||
@icon-width: 16px;
|
||||
@icon-height: 16px;
|
||||
|
||||
._icon-base (@xpos, @ypos, @offset: 0px) {
|
||||
background-image: @ui-icon-url !important;
|
||||
background-repeat: no-repeat;
|
||||
background-position: (@icon-width * @xpos * -1 + @offset) (@icon-height * @ypos * -1 + @offset);
|
||||
width: @icon-width;
|
||||
height: @icon-height;
|
||||
background-clip: content-box;
|
||||
}
|
||||
|
||||
// UML Icons
|
||||
|
||||
// Annotations & Etc.
|
||||
._icon-Project { ._icon-base(5, 0); }
|
||||
._icon-Tag { ._icon-base(13, 13); }
|
||||
._icon-Lock { ._icon-base(0, 13); }
|
||||
._icon-Rectangle { ._icon-base(6, 12); }
|
||||
._icon-RoundedRectangle { ._icon-base(7, 12); }
|
||||
._icon-Ellipse { ._icon-base(4, 11); }
|
||||
._icon-UMLNote { ._icon-base(2, 0); }
|
||||
._icon-UMLNoteLink { ._icon-base(3, 0); }
|
||||
._icon-UMLText { ._icon-base(4, 0); }
|
||||
._icon-UMLConstraint { ._icon-base(11, 3); }
|
||||
// Diagrams
|
||||
._icon-UMLClassDiagram { ._icon-base(7, 6); }
|
||||
._icon-UMLObjectDiagram { ._icon-base(10, 6); }
|
||||
._icon-UMLPackageDiagram { ._icon-base(12, 6); }
|
||||
._icon-UMLUseCaseDiagram { ._icon-base(8, 6); }
|
||||
._icon-UMLSequenceDiagram { ._icon-base(9, 6); }
|
||||
._icon-UMLCommunicationDiagram { ._icon-base(11, 6); }
|
||||
._icon-UMLStatechartDiagram { ._icon-base(13, 6); }
|
||||
._icon-UMLActivityDiagram { ._icon-base(0, 7); }
|
||||
._icon-UMLComponentDiagram { ._icon-base(1, 7); }
|
||||
._icon-UMLDeploymentDiagram { ._icon-base(2, 7); }
|
||||
._icon-UMLCompositeStructureDiagram { ._icon-base(11, 12); }
|
||||
._icon-UMLProfileDiagram { ._icon-base(0, 10); }
|
||||
// Backbone
|
||||
._icon-UMLAttribute { ._icon-base(4, 3); }
|
||||
._icon-UMLOperation { ._icon-base(8, 3); }
|
||||
._icon-UMLParameter { ._icon-base(12, 3); }
|
||||
._icon-UMLTemplateParameter { ._icon-base(13, 3); }
|
||||
._icon-UMLFrame { ._icon-base(13, 11); }
|
||||
// Packages
|
||||
._icon-UMLPackage { ._icon-base(13, 0); }
|
||||
._icon-UMLModel { ._icon-base(7, 0); }
|
||||
._icon-UMLSubsystem { ._icon-base(10, 0); }
|
||||
._icon-UMLProfile { ._icon-base(10, 0); }
|
||||
// Classes
|
||||
._icon-UMLClass { ._icon-base(2, 1); }
|
||||
._icon-UMLInterface { ._icon-base(3, 1); }
|
||||
._icon-UMLSignal { ._icon-base(5, 1); }
|
||||
._icon-UMLDataType { ._icon-base(5, 3); }
|
||||
._icon-UMLPrimitiveType { ._icon-base(6, 3); }
|
||||
._icon-UMLEnumerationLiteral { ._icon-base(0, 4); }
|
||||
._icon-UMLEnumeration { ._icon-base(4, 1); }
|
||||
._icon-UMLStereotype { ._icon-base(2, 1); }
|
||||
._icon-UMLDependency { ._icon-base(4, 5); }
|
||||
._icon-UMLGeneralization { ._icon-base(7, 5); }
|
||||
._icon-UMLInterfaceRealization { ._icon-base(6, 6); }
|
||||
._icon-UMLComponentRealization { ._icon-base(6, 6); }
|
||||
._icon-UMLAssociationEnd { ._icon-base(10, 5); }
|
||||
._icon-UMLAssociation { ._icon-base(5, 5); }
|
||||
._icon-UMLDirectedAssociation { ._icon-base(0, 11); }
|
||||
._icon-UMLAggregation { ._icon-base(1, 11); }
|
||||
._icon-UMLComposition { ._icon-base(2, 11); }
|
||||
._icon-UMLAssociationClassLink { ._icon-base(6, 5); }
|
||||
._icon-UMLContainment { ._icon-base(1, 13); }
|
||||
// Instances
|
||||
._icon-UMLSlot { ._icon-base(10, 13); }
|
||||
._icon-UMLObject { ._icon-base(3, 5); }
|
||||
._icon-UMLArtifactInstance { ._icon-base(2, 13); }
|
||||
._icon-UMLComponentInstance { ._icon-base(8, 1); }
|
||||
._icon-UMLNodeInstance { ._icon-base(10, 1); }
|
||||
._icon-UMLLinkEnd { ._icon-base(10, 5); }
|
||||
._icon-UMLLink { ._icon-base(8, 5); }
|
||||
._icon-UMLDirectedLink { ._icon-base(11, 5); }
|
||||
// Composite Structures
|
||||
._icon-UMLPort { ._icon-base(5, 12); }
|
||||
._icon-UMLPart { ._icon-base(4, 12); }
|
||||
._icon-UMLConnectorEnd { ._icon-base(10, 5); }
|
||||
._icon-UMLConnector { ._icon-base(10, 11); }
|
||||
._icon-UMLSelfConnector { ._icon-base(11, 11); }
|
||||
._icon-UMLCollaboration { ._icon-base(2, 2); }
|
||||
._icon-UMLCollaborationUse { ._icon-base(3, 2); }
|
||||
// Components
|
||||
._icon-UMLArtifact { ._icon-base(8, 12); }
|
||||
._icon-UMLComponent { ._icon-base(7, 1); }
|
||||
// Deployments
|
||||
._icon-UMLNode { ._icon-base(9, 1); }
|
||||
._icon-UMLDeployment { ._icon-base(4, 5); } // Temporally ref to Dependency Icon
|
||||
._icon-UMLCommunicationPath { ._icon-base(5, 5); }
|
||||
// Use Cases
|
||||
._icon-UMLExtensionPoint { ._icon-base(9, 12); }
|
||||
._icon-UMLUseCase { ._icon-base(11, 1); }
|
||||
._icon-UMLActor { ._icon-base(12, 1); }
|
||||
._icon-UMLInclude { ._icon-base(4, 6); }
|
||||
._icon-UMLExtend { ._icon-base(5, 6); }
|
||||
._icon-UMLUseCaseSubject { ._icon-base(8, 11); }
|
||||
// State Machines
|
||||
._icon-UMLStateMachine { ._icon-base(0, 2); }
|
||||
._icon-UMLRegion { ._icon-base(0, 1); }
|
||||
._icon-UMLInitialState { ._icon-base(9, 2); }
|
||||
._icon-UMLJunction { ._icon-base(11, 2); }
|
||||
._icon-UMLChoice { ._icon-base(8, 2); }
|
||||
._icon-UMLShallowHistory { ._icon-base(13, 2); }
|
||||
._icon-UMLDeepHistory { ._icon-base(0, 3); }
|
||||
._icon-UMLJoin { ._icon-base(10, 2); }
|
||||
._icon-UMLFork { ._icon-base(11, 0); }
|
||||
._icon-UMLEntryPoint { ._icon-base(12, 2); }
|
||||
._icon-UMLExitPoint { ._icon-base(12, 11); }
|
||||
._icon-UMLTerminate { ._icon-base(12, 0); }
|
||||
._icon-UMLState { ._icon-base(1, 2); }
|
||||
._icon-UMLOrthogonalState { ._icon-base(1, 1); }
|
||||
._icon-UMLSubmachineState { ._icon-base(3, 3); }
|
||||
._icon-UMLFinalState { ._icon-base(1, 3); }
|
||||
._icon-UMLTransition { ._icon-base(1, 4); }
|
||||
._icon-UMLSelfTransition { ._icon-base(2, 4); }
|
||||
._icon-UMLEffect { ._icon-base(8, 4); }
|
||||
._icon-UMLEntryActivity { ._icon-base(9, 4); }
|
||||
._icon-UMLDoActivity { ._icon-base(10, 4); }
|
||||
._icon-UMLExitActivity { ._icon-base(11, 4); }
|
||||
._icon-UMLEvent { ._icon-base(12, 4); }
|
||||
._icon-UMLOpaqueBehavior { ._icon-base(6, 1); }
|
||||
._icon-UMLConnectionPointReference { ._icon-base(11, 13); }
|
||||
// Activity Graphs
|
||||
._icon-UMLActivity { ._icon-base(13, 1); }
|
||||
._icon-UMLInputPin { ._icon-base(7, 2); }
|
||||
._icon-UMLOutputPin { ._icon-base(7, 3); }
|
||||
._icon-UMLAction { ._icon-base(6, 2); }
|
||||
._icon-UMLObjectNode { ._icon-base(3, 12); }
|
||||
._icon-UMLInitialNode { ._icon-base(9, 2); }
|
||||
._icon-UMLActivityFinalNode { ._icon-base(1, 3); }
|
||||
._icon-UMLFlowFinalNode { ._icon-base(12, 11); }
|
||||
._icon-UMLForkNode { ._icon-base(11, 0); }
|
||||
._icon-UMLJoinNode { ._icon-base(10, 2); }
|
||||
._icon-UMLMergeNode { ._icon-base(8, 0); }
|
||||
._icon-UMLDecisionNode { ._icon-base(9, 0); }
|
||||
._icon-UMLControlFlow { ._icon-base(1, 4); }
|
||||
._icon-UMLObjectFlow { ._icon-base(6, 0); }
|
||||
._icon-UMLSwimlaneVert { ._icon-base(2, 3); }
|
||||
._icon-UMLSwimlaneHorz { ._icon-base(7, 11); }
|
||||
._icon-UMLSendSignal { ._icon-base(6, 11); }
|
||||
._icon-UMLAcceptSignal { ._icon-base(5, 11); }
|
||||
// Interactions
|
||||
._icon-UMLLifeline { ._icon-base(2, 5); }
|
||||
._icon-UMLInteraction { ._icon-base(4, 2); }
|
||||
._icon-UMLStateInvariant { ._icon-base(8, 13); }
|
||||
._icon-UMLContinuation { ._icon-base(9, 13); }
|
||||
._icon-UMLInteractionOperand { ._icon-base(1, 12); }
|
||||
._icon-UMLCombinedFragment { ._icon-base(9, 11); }
|
||||
._icon-UMLInteractionUse { ._icon-base(5, 13); }
|
||||
._icon-UMLEndpoint { ._icon-base(6, 13); }
|
||||
._icon-UMLGate { ._icon-base(7, 13); }
|
||||
._icon-UMLSelfLink { ._icon-base(9, 5); }
|
||||
._icon-UMLMessage { ._icon-base(1, 6); }
|
||||
._icon-UMLSelfMessage { ._icon-base(0, 6); }
|
||||
._icon-UMLLostMessage { ._icon-base(3, 13); }
|
||||
._icon-UMLFoundMessage { ._icon-base(4, 13); }
|
||||
._icon-UMLForwardMessage { ._icon-base(12, 5); }
|
||||
._icon-UMLReverseMessage { ._icon-base(13, 5); }
|
||||
// Profiles
|
||||
._icon-UMLProfile { ._icon-base(12, 13); }
|
||||
._icon-UMLMetaClass { ._icon-base(12, 9); }
|
||||
._icon-UMLImage { ._icon-base(0, 12); }
|
||||
._icon-UMLStereotype { ._icon-base(13, 9); }
|
||||
._icon-UMLExtension { ._icon-base(11, 9); }
|
||||
// Robustness
|
||||
._icon-UMLBoundary { ._icon-base(0, 9); }
|
||||
._icon-UMLEntity { ._icon-base(1, 9); }
|
||||
._icon-UMLControl { ._icon-base(2, 9); }
|
||||
|
||||
// ColorPicker Icons
|
||||
.tool-icon-font-color { ._icon-base(0, 8); }
|
||||
.tool-icon-line-color { ._icon-base(1, 8); }
|
||||
.tool-icon-fill-color { ._icon-base(2, 8); }
|
||||
// .tool-icon-font-color { ._icon-base(0, 8, 3px); }
|
||||
// .tool-icon-line-color { ._icon-base(1, 8, 3px); }
|
||||
// .tool-icon-fill-color { ._icon-base(2, 8, 3px); }
|
||||
|
||||
|
||||
// Toolbar Icons
|
||||
.tool-icon-select { ._icon-base(0, 0); }
|
||||
.tool-icon-zoom-in { ._icon-base(7, 9); }
|
||||
.tool-icon-zoom-out { ._icon-base(8, 9); }
|
||||
.tool-icon-stereotype-display { ._icon-base(13, 10); }
|
||||
.tool-icon-line-style { ._icon-base(3, 8); }
|
||||
.tool-icon-auto-resize { ._icon-base(12, 10); }
|
||||
.tool-icon-show-namespace { ._icon-base(7, 8); }
|
||||
.tool-icon-show-properties { ._icon-base(3, 11); }
|
||||
.tool-icon-suppress-attributes { ._icon-base(4, 8); }
|
||||
.tool-icon-suppress-operations { ._icon-base(5, 8); }
|
||||
.tool-icon-suppress-literals { ._icon-base(6, 8); }
|
||||
.tool-icon-show-operation-signature { ._icon-base(8, 8); }
|
||||
.tool-icon-show-compartment-visibility { ._icon-base(9, 8); }
|
||||
.tool-icon-show-compartment-stereotype { ._icon-base(10, 8); }
|
||||
.tool-icon-undo { ._icon-base(11, 7); }
|
||||
.tool-icon-redo { ._icon-base(12, 7); }
|
||||
.tool-icon-copy { ._icon-base(8, 7); }
|
||||
.tool-icon-cut { ._icon-base(7, 7); }
|
||||
.tool-icon-paste { ._icon-base(9, 7); }
|
||||
.tool-icon-delete { ._icon-base(10, 7); }
|
||||
|
||||
.tool-icon-bringtofront { ._icon-base(11, 8); }
|
||||
.tool-icon-sendtoback { ._icon-base(12, 8); }
|
||||
|
||||
.tool-icon-moveup { ._icon-base(1, 10); }
|
||||
.tool-icon-movedown { ._icon-base(2, 10); }
|
||||
.tool-icon-add { ._icon-base(3, 10); }
|
||||
.tool-icon-delete { ._icon-base(4, 10); }
|
||||
.tool-icon-edit { ._icon-base(5, 10); }
|
||||
|
||||
// Quick Dialog Icons
|
||||
.quick-edit-icon-public { ._icon-base(8, 10); }
|
||||
.quick-edit-icon-protected { ._icon-base(9, 10); }
|
||||
.quick-edit-icon-private { ._icon-base(10, 10); }
|
||||
.quick-edit-icon-package { ._icon-base(11, 10); }
|
||||
|
||||
.quick-edit-icon-attribute { ._icon-base(4, 3); }
|
||||
.quick-edit-icon-operation { ._icon-base(8, 3); }
|
||||
.quick-edit-icon-literal { ._icon-base(0, 4); }
|
||||
|
||||
.quick-edit-icon-moveup { ._icon-base(1, 10); }
|
||||
.quick-edit-icon-movedown { ._icon-base(2, 10); }
|
||||
.quick-edit-icon-add { ._icon-base(3, 10); }
|
||||
.quick-edit-icon-delete { ._icon-base(4, 10); }
|
||||
|
||||
.quick-edit-icon-associate { ._icon-base(5, 5); }
|
||||
.quick-edit-icon-shared { ._icon-base(1, 11); }
|
||||
.quick-edit-icon-composite { ._icon-base(2, 11); }
|
||||
|
||||
.quick-edit-icon-navigable { ._icon-base(9, 12); }
|
||||
.quick-edit-icon-notnavigable { ._icon-base(10, 12); }
|
||||
|
||||
.quick-edit-icon-select-type { ._icon-base(2, 6); }
|
||||
.quick-edit-icon-create-type { ._icon-base(3, 6); }
|
||||
|
||||
.quick-edit-icon-select-operation { ._icon-base(9, 3); }
|
||||
.quick-edit-icon-create-operation { ._icon-base(10, 3); }
|
||||
|
||||
.quick-edit-icon-select-signal { ._icon-base(0, 5); }
|
||||
.quick-edit-icon-create-signal { ._icon-base(1, 5); }
|
||||
|
||||
|
||||
// Others
|
||||
|
||||
.validation-error {
|
||||
background: url(icons/warning.svg) no-repeat;
|
||||
}
|
||||
|
||||
.validation-ok {
|
||||
background: url(icons/okay.svg) no-repeat;
|
||||
}
|
36
Documentation/Design-Documents/assets/css/jquery.bonsai.css
Normal file
@ -0,0 +1,36 @@
|
||||
.bonsai,
|
||||
.bonsai li {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
list-style: none;
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
.bonsai li {
|
||||
position: relative;
|
||||
padding-left: 1.3em; /* padding for the thumb */
|
||||
}
|
||||
|
||||
li .thumb {
|
||||
margin: -1px 0 0 -1em; /* negative margin into the padding of the li */
|
||||
position: absolute;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
li.has-children > .thumb:after {
|
||||
content: '▸';
|
||||
}
|
||||
|
||||
li.has-children.expanded > .thumb:after {
|
||||
content: '▾';
|
||||
}
|
||||
|
||||
li.collapsed > ol.bonsai {
|
||||
height: 0;
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
.bonsai .all,
|
||||
.bonsai .none {
|
||||
cursor: pointer;
|
||||
}
|
812
Documentation/Design-Documents/assets/css/main.css
Normal file
@ -0,0 +1,812 @@
|
||||
/* bootstrap.css override
|
||||
---------------------------------------------------------*/
|
||||
|
||||
body {
|
||||
margin: 0;
|
||||
font-family: source-sans-pro, Helvetica, Arial, sans-serif !imporant;
|
||||
font-size: 14px;
|
||||
font-weight: 400;
|
||||
color: #555;
|
||||
background-color: #F8F8F8;
|
||||
-webkit-font-smoothing: antialiased;
|
||||
}
|
||||
|
||||
.module, .left-section {
|
||||
overflow-y: auto;
|
||||
height: calc(100vh - 95px);
|
||||
}
|
||||
|
||||
a {
|
||||
color: #137cd4;
|
||||
}
|
||||
|
||||
a:focus {
|
||||
outline: none;
|
||||
-webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 0 2px #6fb5f1;
|
||||
-moz-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 0 2px #6fb5f1;
|
||||
box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 0 2px #6fb5f1;
|
||||
}
|
||||
|
||||
p {
|
||||
margin-bottom: 20px;
|
||||
}
|
||||
|
||||
code,
|
||||
pre {
|
||||
padding: 20px;
|
||||
font-family: source-code-pro, Monaco, Menlo, Consolas, "Courier New", monospace;
|
||||
font-size: 12px;
|
||||
color: #454545;
|
||||
-webkit-border-radius: 3px;
|
||||
-moz-border-radius: 3px;
|
||||
border-radius: 3px;
|
||||
}
|
||||
|
||||
p code,
|
||||
p pre,
|
||||
li code,
|
||||
li pre {
|
||||
border-radius: 2px;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
h1,
|
||||
h2,
|
||||
h3,
|
||||
h4,
|
||||
h5,
|
||||
h6 {
|
||||
margin: 10px 0 0;
|
||||
font-weight: 300;
|
||||
line-height: 20px;
|
||||
color: #000;
|
||||
text-rendering: optimizelegibility;
|
||||
}
|
||||
|
||||
h1 small,
|
||||
h2 small,
|
||||
h3 small,
|
||||
h4 small,
|
||||
h5 small,
|
||||
h6 small {
|
||||
color: #888;
|
||||
}
|
||||
|
||||
h1,
|
||||
h2,
|
||||
h3 {
|
||||
line-height: 1.3em;
|
||||
}
|
||||
|
||||
h1 {
|
||||
font-size: 40px;
|
||||
font-weight: 400;
|
||||
margin-top: 30px;
|
||||
}
|
||||
|
||||
h2 {
|
||||
font-size: 30px;
|
||||
}
|
||||
|
||||
h3 {
|
||||
font-size: 27px;
|
||||
}
|
||||
|
||||
h4 {
|
||||
font-size: 17.5px;
|
||||
}
|
||||
|
||||
h5 {
|
||||
font-size: 14px;
|
||||
}
|
||||
|
||||
h6 {
|
||||
font-size: 11.9px;
|
||||
}
|
||||
|
||||
h1 small {
|
||||
font-size: 24.5px;
|
||||
}
|
||||
|
||||
dl {
|
||||
margin-bottom: 0;
|
||||
}
|
||||
|
||||
dt,
|
||||
dd {
|
||||
line-height: 20px;
|
||||
}
|
||||
|
||||
dt {
|
||||
color: #000;
|
||||
font-weight: 400;
|
||||
margin-bottom: 5px;
|
||||
-webkit-font-smoothing: subpixel-antialiased; /* this makes it slightly bolder */
|
||||
}
|
||||
|
||||
dd {
|
||||
display: inline-block;
|
||||
margin-bottom: 20px;
|
||||
}
|
||||
|
||||
input,
|
||||
button,
|
||||
select,
|
||||
textarea {
|
||||
font-family: source-sans-pro, Helvetica, Arial, sans-serif;
|
||||
}
|
||||
|
||||
table p {
|
||||
margin-bottom: 0px;
|
||||
}
|
||||
|
||||
.btn {
|
||||
padding: 4px 12px;
|
||||
margin-bottom: 0;
|
||||
*margin-left: .3em;
|
||||
font-size: 14px;
|
||||
line-height: 20px;
|
||||
color: #454545;
|
||||
text-align: center;
|
||||
text-shadow: none;
|
||||
background-color: #e5e9e9;
|
||||
*background-color: #e5e9e9;
|
||||
background-image: none;
|
||||
|
||||
border: 1px solid #cdcdcd;
|
||||
-webkit-border-radius: 3px;
|
||||
-moz-border-radius: 3px;
|
||||
border-radius: 3px;
|
||||
|
||||
-webkit-box-shadow: none;
|
||||
-moz-box-shadow: none;
|
||||
box-shadow: none;
|
||||
}
|
||||
|
||||
.btn:hover,
|
||||
.btn:focus,
|
||||
.btn:active,
|
||||
.btn.active,
|
||||
.btn.disabled,
|
||||
.btn[disabled] {
|
||||
color: #454545;
|
||||
background-color: #e5e9e9;
|
||||
*background-color: #e5e9e9;
|
||||
}
|
||||
|
||||
.btn:active,
|
||||
.btn.active {
|
||||
background-color: #d3d7d7;
|
||||
}
|
||||
|
||||
.btn:first-child {
|
||||
*margin-left: 0;
|
||||
}
|
||||
|
||||
.btn:hover,
|
||||
.btn:focus {
|
||||
color: #454545;
|
||||
text-decoration: none;
|
||||
background-position: 0 -15px;
|
||||
-webkit-transition: background-position 0.1s linear;
|
||||
-moz-transition: background-position 0.1s linear;
|
||||
-o-transition: background-position 0.1s linear;
|
||||
transition: background-position 0.1s linear;
|
||||
}
|
||||
|
||||
.btn:focus {
|
||||
border: 1px solid #2893ef;
|
||||
outline: 0;
|
||||
outline: thin dotted \9;
|
||||
/* IE6-9 */
|
||||
|
||||
-webkit-box-shadow: 0 0 0 1px #94ceff;
|
||||
-moz-box-shadow: 0 0 0 1px #94ceff;
|
||||
box-shadow: 0 0 0 1px #94ceff;
|
||||
}
|
||||
|
||||
.btn.active,
|
||||
.btn:active {
|
||||
background-image: none;
|
||||
outline: 0;
|
||||
-webkit-box-shadow: inset 0 1px 0 rgba(0, 0, 0, 0.15);
|
||||
-moz-box-shadow: inset 0 1px 0 rgba(0, 0, 0, 0.15);
|
||||
box-shadow: inset 0 1px 0 rgba(0, 0, 0, 0.15);
|
||||
}
|
||||
|
||||
.btn.disabled,
|
||||
.btn[disabled] {
|
||||
cursor: default;
|
||||
background-image: none;
|
||||
opacity: 0.65;
|
||||
filter: alpha(opacity=65);
|
||||
-webkit-box-shadow: none;
|
||||
-moz-box-shadow: none;
|
||||
box-shadow: none;
|
||||
}
|
||||
|
||||
.btn-large {
|
||||
padding: 11px 19px;
|
||||
font-size: 17.5px;
|
||||
-webkit-border-radius: 6px;
|
||||
-moz-border-radius: 6px;
|
||||
border-radius: 6px;
|
||||
}
|
||||
|
||||
.btn-large [class^="icon-"],
|
||||
.btn-large [class*=" icon-"] {
|
||||
margin-top: 4px;
|
||||
}
|
||||
|
||||
.btn-small {
|
||||
padding: 2px 10px;
|
||||
font-size: 11.9px;
|
||||
-webkit-border-radius: 3px;
|
||||
-moz-border-radius: 3px;
|
||||
border-radius: 3px;
|
||||
}
|
||||
|
||||
.btn-small [class^="icon-"],
|
||||
.btn-small [class*=" icon-"] {
|
||||
margin-top: 0;
|
||||
}
|
||||
|
||||
.btn-mini [class^="icon-"],
|
||||
.btn-mini [class*=" icon-"] {
|
||||
margin-top: -1px;
|
||||
}
|
||||
|
||||
.btn-mini {
|
||||
padding: 0 6px;
|
||||
font-size: 10.5px;
|
||||
-webkit-border-radius: 3px;
|
||||
-moz-border-radius: 3px;
|
||||
border-radius: 3px;
|
||||
}
|
||||
|
||||
.btn-block {
|
||||
display: block;
|
||||
width: 100%;
|
||||
padding-right: 0;
|
||||
padding-left: 0;
|
||||
-webkit-box-sizing: border-box;
|
||||
-moz-box-sizing: border-box;
|
||||
box-sizing: border-box;
|
||||
}
|
||||
|
||||
.btn-block + .btn-block {
|
||||
margin-top: 5px;
|
||||
}
|
||||
|
||||
input[type="submit"].btn-block,
|
||||
input[type="reset"].btn-block,
|
||||
input[type="button"].btn-block {
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.btn-primary.active,
|
||||
.btn-warning.active,
|
||||
.btn-danger.active,
|
||||
.btn-success.active,
|
||||
.btn-info.active,
|
||||
.btn-inverse.active {
|
||||
color: rgba(255, 255, 255, 0.75);
|
||||
}
|
||||
|
||||
.btn-primary {
|
||||
color: #ffffff;
|
||||
text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25);
|
||||
background-color: #006dcc;
|
||||
*background-color: #0044cc;
|
||||
background-image: -moz-linear-gradient(top, #0088cc, #0044cc);
|
||||
background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#0088cc), to(#0044cc));
|
||||
background-image: -webkit-linear-gradient(top, #0088cc, #0044cc);
|
||||
background-image: -o-linear-gradient(top, #0088cc, #0044cc);
|
||||
background-image: linear-gradient(to bottom, #0088cc, #0044cc);
|
||||
background-repeat: repeat-x;
|
||||
border-color: #0044cc #0044cc #002a80;
|
||||
border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25);
|
||||
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff0088cc', endColorstr='#ff0044cc', GradientType=0);
|
||||
filter: progid:DXImageTransform.Microsoft.gradient(enabled=false);
|
||||
}
|
||||
|
||||
.btn-primary:hover,
|
||||
.btn-primary:focus,
|
||||
.btn-primary:active,
|
||||
.btn-primary.active,
|
||||
.btn-primary.disabled,
|
||||
.btn-primary[disabled] {
|
||||
color: #ffffff;
|
||||
background-color: #0044cc;
|
||||
*background-color: #003bb3;
|
||||
}
|
||||
|
||||
.btn-primary:active,
|
||||
.btn-primary.active {
|
||||
background-color: #003399 \9;
|
||||
}
|
||||
|
||||
|
||||
select,
|
||||
textarea,
|
||||
input[type="text"],
|
||||
input[type="password"],
|
||||
input[type="datetime"],
|
||||
input[type="datetime-local"],
|
||||
input[type="date"],
|
||||
input[type="month"],
|
||||
input[type="time"],
|
||||
input[type="week"],
|
||||
input[type="number"],
|
||||
input[type="email"],
|
||||
input[type="url"],
|
||||
input[type="search"],
|
||||
input[type="tel"],
|
||||
input[type="color"],
|
||||
.uneditable-input {
|
||||
color: #454545;
|
||||
-webkit-border-radius: 3px;
|
||||
-moz-border-radius: 3px;
|
||||
border-radius: 3px;
|
||||
}
|
||||
|
||||
textarea:focus,
|
||||
input[type="text"]:focus,
|
||||
input[type="password"]:focus,
|
||||
input[type="datetime"]:focus,
|
||||
input[type="datetime-local"]:focus,
|
||||
input[type="date"]:focus,
|
||||
input[type="month"]:focus,
|
||||
input[type="time"]:focus,
|
||||
input[type="week"]:focus,
|
||||
input[type="number"]:focus,
|
||||
input[type="email"]:focus,
|
||||
input[type="url"]:focus,
|
||||
input[type="search"]:focus,
|
||||
input[type="tel"]:focus,
|
||||
input[type="color"]:focus,
|
||||
.uneditable-input:focus {
|
||||
border-color: #2893ef;
|
||||
outline: 0;
|
||||
outline: thin dotted \9;
|
||||
/* IE6-9 */
|
||||
|
||||
-webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 0 1px #94ceff;
|
||||
-moz-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 0 1px #94ceff;
|
||||
box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 0 1px #94ceff;
|
||||
}
|
||||
|
||||
.nav > li > a:hover,
|
||||
.nav > li > a:focus {
|
||||
background-color: #e0f0fa;
|
||||
border-radius: 3px;
|
||||
color: #137cd4;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
.navbar .nav > li > a {
|
||||
padding: 10px 15px 10px;
|
||||
color: #000;
|
||||
font-weight: 300;
|
||||
text-shadow: none;
|
||||
}
|
||||
|
||||
.nav-list {
|
||||
margin-left: 15px;
|
||||
}
|
||||
|
||||
.package-list.nav-list {
|
||||
margin-left: 0;
|
||||
}
|
||||
|
||||
.nav-list > li > a,
|
||||
.nav-list .nav-header {
|
||||
text-shadow: none;
|
||||
}
|
||||
|
||||
.nav-header a,
|
||||
.nav-header a:hover {
|
||||
color: #000 !important;
|
||||
}
|
||||
|
||||
.navbar .brand {
|
||||
font-weight: 500;
|
||||
color: #000;
|
||||
text-shadow: none;
|
||||
}
|
||||
|
||||
.navbar-inner {
|
||||
min-height: 40px;
|
||||
border: none;
|
||||
-webkit-border-radius: 0;
|
||||
-moz-border-radius: 0;
|
||||
border-radius: 0;
|
||||
}
|
||||
|
||||
.navbar-inverse .navbar-inner {
|
||||
background-image: none;
|
||||
background: #fff;
|
||||
border-bottom: 1px solid rgba(0, 0, 0, 0.12);
|
||||
|
||||
-webkit-box-shadow: 0 1px 6px rgba(0, 0, 0, 0.06);
|
||||
-moz-box-shadow: 0 1px 6px rgba(0, 0, 0, 0.06);
|
||||
box-shadow: 0 1px 6px rgba(0, 0, 0, 0.06);
|
||||
}
|
||||
|
||||
.navbar-inverse .brand,
|
||||
.navbar-inverse .nav > li > a {
|
||||
color: #000;
|
||||
text-shadow: none;
|
||||
}
|
||||
|
||||
.navbar-inverse .brand:hover,
|
||||
.navbar-inverse .nav > li > a:hover,
|
||||
.navbar-inverse .brand:focus,
|
||||
.navbar-inverse .nav > li > a:focus {
|
||||
color: #000;
|
||||
}
|
||||
|
||||
.navbar-inverse .brand {
|
||||
color: #000;
|
||||
margin-left: -10px;
|
||||
}
|
||||
|
||||
.navbar-inverse .navbar-text {
|
||||
color: #454545;
|
||||
}
|
||||
|
||||
.navbar-inverse .nav > li > a:focus,
|
||||
.navbar-inverse .nav > li > a:hover {
|
||||
color: #000;
|
||||
background-color: transparent;
|
||||
}
|
||||
|
||||
.navbar-inverse .nav .active > a,
|
||||
.navbar-inverse .nav .active > a:hover,
|
||||
.navbar-inverse .nav .active > a:focus {
|
||||
color: #000;
|
||||
background-color: transparent;
|
||||
}
|
||||
|
||||
.navbar-inverse .navbar-link {
|
||||
color: #000;
|
||||
}
|
||||
|
||||
.navbar-inverse .navbar-link:hover,
|
||||
.navbar-inverse .navbar-link:focus {
|
||||
color: #000;
|
||||
}
|
||||
|
||||
.nav-header {
|
||||
padding: 3px 15px;
|
||||
font-size: 11px;
|
||||
font-weight: 400;
|
||||
line-height: 20px;
|
||||
color: #999999;
|
||||
text-shadow: 0 1px 0 rgba(255, 255, 255, 0.5);
|
||||
text-transform: none;
|
||||
letter-spacing: 1px;
|
||||
}
|
||||
|
||||
.page-header {
|
||||
padding-bottom: 0;
|
||||
margin: 10px 0 40px;
|
||||
border-bottom: 1px solid #d7d7d7;
|
||||
}
|
||||
|
||||
|
||||
.page-header h1 {
|
||||
background: #F8F8F8;
|
||||
display: inline-block;
|
||||
position: relative;
|
||||
bottom: -19px;
|
||||
}
|
||||
|
||||
.alert {
|
||||
padding: 4px 7px;
|
||||
}
|
||||
|
||||
.alert-info {
|
||||
color: #000;
|
||||
background-color: #e0f0fa;
|
||||
border-color: #d9eaf4;
|
||||
border-radius: 3px;
|
||||
font-size: 12px;
|
||||
text-shadow: none;
|
||||
}
|
||||
|
||||
.radio input[type="radio"],
|
||||
.checkbox input[type="checkbox"] {
|
||||
float: left;
|
||||
margin-left: -15px;
|
||||
}
|
||||
|
||||
.label,
|
||||
badge {
|
||||
padding: 4px 7px;
|
||||
font-weight: 400;
|
||||
color: #ffffff;
|
||||
text-shadow: none;
|
||||
}
|
||||
|
||||
.label-non-nullable,
|
||||
.label-nullable,
|
||||
.label-optional,
|
||||
.label-info,
|
||||
.badge-info {
|
||||
background-color: #eee;
|
||||
color: #222;
|
||||
text-shadow: none;
|
||||
}
|
||||
|
||||
.well {
|
||||
padding: 19px 19px 0;
|
||||
}
|
||||
|
||||
.table {
|
||||
background-color: #fff;
|
||||
}
|
||||
|
||||
/* non-bootstrap css
|
||||
---------------------------------------------------------*/
|
||||
|
||||
[class^="icon-"]{
|
||||
background: none;
|
||||
}
|
||||
body{
|
||||
padding-left: 1.5em;
|
||||
padding-right: 1.5em;
|
||||
}
|
||||
|
||||
.number-of-modules {
|
||||
font-size: 14px;
|
||||
font-weight: 400;
|
||||
line-height: 1.5em;
|
||||
margin: 10px 0 0 15px;
|
||||
}
|
||||
|
||||
#other-module{
|
||||
display: none;
|
||||
overflow: scroll;
|
||||
}
|
||||
#toggle-other-modules i{
|
||||
font-size: 28px;
|
||||
}
|
||||
.nav-header{
|
||||
}
|
||||
|
||||
#description {
|
||||
font-size: 14px;
|
||||
line-height: 22px;
|
||||
}
|
||||
section > h2,
|
||||
section > h3{
|
||||
font-size: 30px;
|
||||
line-height: 30px;
|
||||
margin-bottom: 10px;
|
||||
margin-top: 25px;
|
||||
text-indent: 2px;
|
||||
}
|
||||
.properties > h3 {
|
||||
font-size: 20px;
|
||||
line-height: 20px;
|
||||
margin-bottom: 15px;
|
||||
margin-top: 30px;
|
||||
text-indent: 2px;
|
||||
}
|
||||
.methods > h3 {
|
||||
font-size: 20px;
|
||||
line-height: 20px;
|
||||
margin-bottom: 15px;
|
||||
margin-top: 30px;
|
||||
text-indent: 2px;
|
||||
}
|
||||
h3 .checkbox{
|
||||
display: inline-block;
|
||||
font-weight: 300;
|
||||
margin-left: 10px;
|
||||
vertical-align: middle;
|
||||
width: auto;
|
||||
}
|
||||
.element-list ul{
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
.element-list ul li {
|
||||
display: inline-block;
|
||||
padding: 3px 8px;
|
||||
margin-bottom: 10px;
|
||||
margin-right: 5px;
|
||||
font-size: 14px;
|
||||
line-height: 20px;
|
||||
color: #454545;
|
||||
text-align: center;
|
||||
background-color: #e0f0fa;
|
||||
*background-color: #e0f0fa;
|
||||
border: 1px solid #d9eaf4;
|
||||
background-image: none;
|
||||
|
||||
-webkit-border-radius: 3px;
|
||||
-moz-border-radius: 3px;
|
||||
border-radius: 3px;
|
||||
}
|
||||
.element-list ul li a {
|
||||
padding-top:0;
|
||||
padding-bottom:0;
|
||||
}
|
||||
.element-list ul li a:hover {
|
||||
background: transparent;
|
||||
}
|
||||
.member{
|
||||
background: #fff;
|
||||
color: #454545;
|
||||
margin-bottom: 20px;
|
||||
overflow: hidden; /* clearfix */
|
||||
padding: 20px 17px;
|
||||
border-radius: 4px;
|
||||
border: 1px solid #dedede;
|
||||
border-top: 1px solid #eee;
|
||||
}
|
||||
/*.member:last-of-type{*/
|
||||
/*margin-bottom: 0;*/
|
||||
/*}*/
|
||||
.member h4{
|
||||
border-bottom: 1px solid #e7e7e7;
|
||||
font-weight: 400;
|
||||
padding-bottom: 10px;
|
||||
margin-top: -10px;
|
||||
margin-bottom: 10px;
|
||||
}
|
||||
.member .code,
|
||||
.member .code {
|
||||
background: #f9f9f9;
|
||||
border: 1px solid #eee;
|
||||
border-top: 1px solid #e7e7e7;
|
||||
display: none;
|
||||
margin-top: 0;
|
||||
margin-bottom: 0;
|
||||
}
|
||||
|
||||
.member .example {
|
||||
display: block;
|
||||
margin-bottom: 15px;
|
||||
position: relative;
|
||||
}
|
||||
|
||||
.member .example:before {
|
||||
color: #888;
|
||||
content: 'Example';
|
||||
font-style: italic;
|
||||
position: absolute;
|
||||
right: 10px;
|
||||
top: 10px;
|
||||
}
|
||||
|
||||
.member.private{
|
||||
display: none;
|
||||
background: #fff;
|
||||
}
|
||||
.show-private .member.private{
|
||||
display: block;
|
||||
}
|
||||
.member .scope{
|
||||
color: #888;
|
||||
font-style: italic;
|
||||
padding-bottom: 10px;
|
||||
margin-top: -10px;
|
||||
}
|
||||
|
||||
.member .anchor {
|
||||
color: inherit;
|
||||
visibility: hidden
|
||||
}
|
||||
|
||||
.member .anchor:hover {
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
.member .anchor:focus {
|
||||
box-shadow: none;
|
||||
}
|
||||
|
||||
.member .anchor .icon-link {
|
||||
line-height: 24px;
|
||||
}
|
||||
|
||||
.member:hover .anchor {
|
||||
visibility: visible;
|
||||
}
|
||||
|
||||
.deprecated {
|
||||
background: #EBEBEB;
|
||||
background-image: repeating-linear-gradient(135deg, transparent, transparent 35px, rgba(255,255,255,.5) 35px, rgba(255,255,255,.5) 70px);
|
||||
}
|
||||
|
||||
.deprecated .label-deprecated {
|
||||
margin-right: 10px;
|
||||
}
|
||||
|
||||
.deprecated .scope {
|
||||
text-decoration: line-through;
|
||||
}
|
||||
|
||||
.show-code {
|
||||
float: right;
|
||||
}
|
||||
|
||||
/* Typeahead styles (Bootstrap conflicts) */
|
||||
|
||||
.twitter-typeahead .tt-query,
|
||||
.twitter-typeahead .tt-hint {
|
||||
margin-bottom: 0;
|
||||
}
|
||||
|
||||
.tt-dropdown-menu {
|
||||
min-width: 160px;
|
||||
margin-top: 0;
|
||||
padding: 5px 0;
|
||||
background-color: #fff;
|
||||
border: 1px solid #d7d7d7;
|
||||
*border-right-width: 2px;
|
||||
*border-bottom-width: 2px;
|
||||
-webkit-border-radius: 4px;
|
||||
-moz-border-radius: 4px;
|
||||
border-radius: 4px;
|
||||
-webkit-box-shadow: 0 1px 6px rgba(0, 0, 0, 0.12);
|
||||
-moz-box-shadow: 0 1px 6px rgba(0, 0, 0, 0.12);
|
||||
box-shadow: 0 1px 6px rgba(0, 0, 0, 0.12);
|
||||
-webkit-background-clip: padding-box;
|
||||
-moz-background-clip: padding;
|
||||
background-clip: padding-box;
|
||||
}
|
||||
|
||||
.tt-suggestion {
|
||||
display: block;
|
||||
font-family: source-sans-pro, Helvetica, Arial, sans-serif;
|
||||
font-size: 14px;
|
||||
padding: 3px 10px;
|
||||
}
|
||||
|
||||
.tt-suggestion.tt-is-under-cursor {
|
||||
color: #000;
|
||||
background-color: #e0f0fa;
|
||||
background-image: none;
|
||||
}
|
||||
|
||||
.tt-suggestion.tt-is-under-cursor a {
|
||||
color: #fff;
|
||||
}
|
||||
|
||||
.tt-suggestion p {
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
.navbar-fixed-top .container {
|
||||
margin: 5px;
|
||||
width: auto;
|
||||
}
|
||||
|
||||
span.twitter-typeahead {
|
||||
float: right;
|
||||
margin: 5px;
|
||||
}
|
||||
|
||||
input.typeahead, input.tt-hint {
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
input.tt-hint {
|
||||
color: #999;
|
||||
}
|
||||
|
||||
dl .label {
|
||||
margin-bottom: 7px;
|
||||
}
|
||||
|
||||
/* --------------- Appended ---------------- */
|
||||
|
||||
.node-icon {
|
||||
width: 16px;
|
||||
height: 16px;
|
||||
display: inline-block;
|
||||
}
|
97
Documentation/Design-Documents/assets/css/variables.less
Normal file
@ -0,0 +1,97 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2014 Minkyu Lee. All rights reserved.
|
||||
*
|
||||
* NOTICE: All information contained herein is, and remains the
|
||||
* property of Minkyu Lee. The intellectual and technical concepts
|
||||
* contained herein are proprietary to Minkyu Lee and may be covered
|
||||
* by Republic of Korea and Foreign Patents, patents in process,
|
||||
* and are protected by trade secret or copyright law.
|
||||
* Dissemination of this information or reproduction of this material
|
||||
* is strictly forbidden unless prior written permission is obtained
|
||||
* from Minkyu Lee (niklaus.lee@gmail.com).
|
||||
*
|
||||
*/
|
||||
|
||||
// ----- Light Theme ----------------------------------------------------------
|
||||
|
||||
/*
|
||||
// Icons
|
||||
@ui-image-folder: "Default";
|
||||
@ui-icon-url: url('icons/icons-light.png');
|
||||
|
||||
// Widget
|
||||
@ui-widget-color: #e4e4e4;
|
||||
@ui-widget-light-color: lighten(@ui-widget-color, 15%);
|
||||
@ui-widget-dark-color: darken(@ui-widget-color, 15%);
|
||||
@ui-widget-border-color: darken(@ui-widget-color, 20%);
|
||||
@ui-widget-text-color: #454545;
|
||||
@ui-widget-text-weight: 400;
|
||||
|
||||
// Widget Header
|
||||
@ui-widget-header-color: darken(@ui-widget-color, 12%);
|
||||
@ui-widget-header-light-color: lighten(@ui-widget-header-color, 10%);
|
||||
@ui-widget-header-dark-color: darken(@ui-widget-header-color, 10%);
|
||||
@ui-widget-header-border-color: @ui-widget-border-color;
|
||||
@ui-widget-header-text-color: @ui-widget-text-color;
|
||||
|
||||
// Widget Content
|
||||
@ui-widget-content-color: lighten(@ui-widget-color, 5%);
|
||||
@ui-widget-content-light-color: lighten(@ui-widget-content-color, 2%);
|
||||
@ui-widget-content-dark-color: darken(@ui-widget-content-color, 2%);
|
||||
@ui-widget-content-border-color: darken(@ui-widget-content-color, 5%);
|
||||
@ui-widget-content-hover-color: lighten(@ui-widget-content-color, 5%);
|
||||
|
||||
// Widget Active
|
||||
@ui-widget-active-color: #0380e8;
|
||||
@ui-widget-active-light-color: lighten(@ui-widget-active-color, 10%);
|
||||
@ui-widget-active-dark-color: darken(@ui-widget-active-color, 10%);
|
||||
@ui-widget-active-border-color: darken(@ui-widget-active-color, 20%);
|
||||
|
||||
// Context Menu
|
||||
@ui-context-menu-background-color: #ffffff;
|
||||
@ui-context-menu-text-color: @ui-widget-text-color;
|
||||
@ui-context-menu-border-color: #ddd;
|
||||
@ui-context-menu-hover-color: #d6f6ff;
|
||||
*/
|
||||
|
||||
|
||||
// ----- Dark Theme ----------------------------------------------------------
|
||||
|
||||
// Icons
|
||||
@ui-image-folder: "Black";
|
||||
@ui-icon-url: url('assets/icons/icons-light.png');
|
||||
|
||||
// Widget
|
||||
@ui-widget-color: #3b3f41;
|
||||
@ui-widget-light-color: lighten(@ui-widget-color, 3%);
|
||||
@ui-widget-dark-color: darken(@ui-widget-color, 3%);
|
||||
@ui-widget-border-color: darken(@ui-widget-color, 5%);
|
||||
@ui-widget-text-color: #c7c7c7;
|
||||
@ui-widget-text-weight: 400;
|
||||
|
||||
// Widget Header
|
||||
@ui-widget-header-color: darken(@ui-widget-color, 4%);
|
||||
@ui-widget-header-light-color: lighten(@ui-widget-header-color, 2%);
|
||||
@ui-widget-header-dark-color: darken(@ui-widget-header-color, 2%);
|
||||
@ui-widget-header-border-color: @ui-widget-border-color;
|
||||
@ui-widget-header-text-color: @ui-widget-text-color;
|
||||
|
||||
// Widget Content
|
||||
@ui-widget-content-color: lighten(@ui-widget-color, 2%);
|
||||
@ui-widget-content-light-color: lighten(@ui-widget-content-color, 2%);
|
||||
@ui-widget-content-dark-color: darken(@ui-widget-content-color, 2%);
|
||||
@ui-widget-content-border-color: darken(@ui-widget-content-color, 3%);
|
||||
@ui-widget-content-hover-color: lighten(@ui-widget-content-color, 3%);
|
||||
|
||||
// Widget Active
|
||||
@ui-widget-active-color: #0079de;
|
||||
@ui-widget-active-light-color: lighten(@ui-widget-active-color, 3%);
|
||||
@ui-widget-active-dark-color: darken(@ui-widget-active-color, 3%);
|
||||
@ui-widget-active-border-color: darken(@ui-widget-active-color, 5%);
|
||||
|
||||
// Context Menu
|
||||
@ui-context-menu-background-color: @ui-widget-color;
|
||||
@ui-context-menu-text-color: @ui-widget-text-color;
|
||||
@ui-context-menu-border-color: @ui-widget-border-color;
|
||||
@ui-context-menu-hover-color: lighten(@ui-context-menu-background-color, 5%);
|
||||
|
BIN
Documentation/Design-Documents/assets/icons/icons-light.png
Normal file
After Width: | Height: | Size: 12 KiB |
BIN
Documentation/Design-Documents/assets/icons/icons-light_orig.png
Normal file
After Width: | Height: | Size: 12 KiB |
After Width: | Height: | Size: 8.6 KiB |
After Width: | Height: | Size: 12 KiB |
2276
Documentation/Design-Documents/assets/js/bootstrap.js
vendored
Normal file
4
Documentation/Design-Documents/assets/js/jquery-2.1.0.min.js
vendored
Normal file
247
Documentation/Design-Documents/assets/js/jquery.bonsai.js
Normal file
@ -0,0 +1,247 @@
|
||||
(function($){
|
||||
$.fn.bonsai = function(options) {
|
||||
var args = arguments;
|
||||
return this.each(function() {
|
||||
var bonsai = $(this).data('bonsai');
|
||||
if (!bonsai) {
|
||||
bonsai = new Bonsai(this, options);
|
||||
$(this).data('bonsai', bonsai);
|
||||
}
|
||||
if (typeof options == 'string') {
|
||||
var method = options;
|
||||
bonsai[method].apply(bonsai, [].slice.call(args, 1));
|
||||
}
|
||||
});
|
||||
};
|
||||
$.bonsai = {};
|
||||
$.bonsai.defaults = {
|
||||
expandAll: false, // boolean expands all items
|
||||
expand: null, // function to expand an item
|
||||
collapse: null, // function to collapse an item
|
||||
checkboxes: false, // requires jquery.qubit
|
||||
// createCheckboxes: creates checkboxes for each list item.
|
||||
//
|
||||
// The name and value for the checkboxes can be declared in the
|
||||
// markup using `data-name` and `data-value`.
|
||||
//
|
||||
// The name is inherited from parent items if not specified.
|
||||
//
|
||||
// Checked state can be indicated using `data-checked`.
|
||||
createCheckboxes: false,
|
||||
// handleDuplicateCheckboxes: adds onChange bindings to update
|
||||
// any other checkboxes that have the same value.
|
||||
handleDuplicateCheckboxes: false,
|
||||
selectAllExclude: null
|
||||
};
|
||||
var Bonsai = function(el, options) {
|
||||
var self = this;
|
||||
options = options || {};
|
||||
this.options = $.extend({}, $.bonsai.defaults, options);
|
||||
this.el = $(el).addClass('bonsai').data('bonsai', this);
|
||||
this.update();
|
||||
if (this.isRootNode()) {
|
||||
if (this.options.handleDuplicateCheckboxes) this.handleDuplicates();
|
||||
if (this.options.checkboxes) this.el.qubit(this.options);
|
||||
if (this.options.addExpandAll) this.addExpandAllLink();
|
||||
if (this.options.addSelectAll) this.addSelectAllLink();
|
||||
this.el.on('click', '.thumb', function(ev) {
|
||||
self.toggle($(ev.currentTarget).closest('li'));
|
||||
});
|
||||
}
|
||||
if (this.options.expandAll) this.expandAll();
|
||||
};
|
||||
Bonsai.prototype = {
|
||||
isRootNode: function() {
|
||||
return this.options.scope == this.el;
|
||||
},
|
||||
toggle: function(listItem) {
|
||||
if (!$(listItem).hasClass('expanded')) {
|
||||
this.expand(listItem);
|
||||
}
|
||||
else {
|
||||
this.collapse(listItem);
|
||||
}
|
||||
},
|
||||
expand: function(listItem) {
|
||||
this.setExpanded(listItem, true);
|
||||
},
|
||||
collapse: function(listItem) {
|
||||
this.setExpanded(listItem, false);
|
||||
},
|
||||
setExpanded: function(listItem, expanded) {
|
||||
listItem = $(listItem);
|
||||
if (listItem.length > 1) {
|
||||
var self = this;
|
||||
listItem.each(function() {
|
||||
self.setExpanded(this, expanded);
|
||||
});
|
||||
return;
|
||||
}
|
||||
if (expanded) {
|
||||
if (!listItem.data('subList')) return;
|
||||
listItem = $(listItem).addClass('expanded')
|
||||
.removeClass('collapsed');
|
||||
$(listItem.data('subList')).css('height', 'auto');
|
||||
}
|
||||
else {
|
||||
listItem = $(listItem).addClass('collapsed')
|
||||
.removeClass('expanded');
|
||||
$(listItem.data('subList')).height(0);
|
||||
}
|
||||
},
|
||||
expandAll: function() {
|
||||
this.expand(this.el.find('li'));
|
||||
},
|
||||
collapseAll: function() {
|
||||
this.collapse(this.el.find('li'));
|
||||
},
|
||||
update: function() {
|
||||
var self = this;
|
||||
// store the scope in the options for child nodes
|
||||
if (!this.options.scope) {
|
||||
this.options.scope = this.el;
|
||||
}
|
||||
// look for a nested list (if any)
|
||||
this.el.children().each(function() {
|
||||
var item = $(this);
|
||||
if (self.options.createCheckboxes) self.insertCheckbox(item);
|
||||
// insert a thumb if it doesn't already exist
|
||||
if (item.children().filter('.thumb').length == 0) {
|
||||
var thumb = $('<div class="thumb"></div>');
|
||||
item.prepend(thumb);
|
||||
}
|
||||
var subLists = item.children().filter('ol, ul');
|
||||
item.toggleClass('has-children', subLists.find('li').length > 0);
|
||||
// if there is a child list
|
||||
subLists.each(function() {
|
||||
// that's not empty
|
||||
if ($('li', this).length == 0) {
|
||||
return;
|
||||
}
|
||||
// then this el has children
|
||||
item.data('subList', this);
|
||||
// collapse the nested list
|
||||
if (item.hasClass('expanded')) {
|
||||
self.expand(item);
|
||||
}
|
||||
else {
|
||||
self.collapse(item);
|
||||
}
|
||||
// handle any deeper nested lists
|
||||
var exists = !!$(this).data('bonsai');
|
||||
$(this).bonsai(exists ? 'update' : self.options);
|
||||
});
|
||||
});
|
||||
this.expand = this.options.expand || this.expand;
|
||||
this.collapse = this.options.collapse || this.collapse;
|
||||
},
|
||||
insertCheckbox: function(listItem) {
|
||||
if (listItem.find('> input[type=checkbox]').length) return;
|
||||
var id = this.generateId(listItem),
|
||||
checkbox = $('<input type="checkbox" name="'
|
||||
+ this.getCheckboxName(listItem) + '" id="' + id + '" /> '
|
||||
),
|
||||
children = listItem.children(),
|
||||
// get the first text node for the label
|
||||
text = listItem.contents().filter(function() {
|
||||
return this.nodeType == 3;
|
||||
}).first();
|
||||
checkbox.val(listItem.data('value'));
|
||||
checkbox.prop('checked', listItem.data('checked'))
|
||||
children.remove();
|
||||
listItem.append(checkbox)
|
||||
.append(
|
||||
$('<label for="' + id + '">').append(text ? text : children.first())
|
||||
)
|
||||
.append(text ? children : children.slice(1));
|
||||
},
|
||||
handleDuplicates: function() {
|
||||
var self = this;
|
||||
self.el.on('change', 'input[type=checkbox]', function(ev) {
|
||||
var checkbox = $(ev.target);
|
||||
if (!checkbox.val()) return;
|
||||
// select all duplicate checkboxes that need to be updated
|
||||
var selector = 'input[type=checkbox]'
|
||||
+ '[value="' + checkbox.val() + '"]'
|
||||
+ '[name="' + checkbox.attr('name') + '"]'
|
||||
+ (checkbox.prop('checked') ? ':not(:checked)' : ':checked');
|
||||
self.el.find(selector).prop({
|
||||
checked: checkbox.prop('checked'),
|
||||
indeterminate: checkbox.prop('indeterminate')
|
||||
}).trigger('change');
|
||||
});
|
||||
},
|
||||
idPrefix: 'checkbox-',
|
||||
generateId: function(listItem) {
|
||||
do {
|
||||
var id = this.idPrefix + Bonsai.uniqueId++;
|
||||
}
|
||||
while($('#' + id).length > 0);
|
||||
return id;
|
||||
},
|
||||
getCheckboxName: function(listItem) {
|
||||
return listItem.data('name')
|
||||
|| listItem.parents().filter('[data-name]').data('name');
|
||||
},
|
||||
addExpandAllLink: function() {
|
||||
var self = this;
|
||||
$('<div class="expand-all">')
|
||||
.append($('<a class="all">Expand all</a>')
|
||||
.on('click', function() {
|
||||
self.expandAll();
|
||||
})
|
||||
)
|
||||
.append('<i class="separator"></i>')
|
||||
.append($('<a class="none">Collapse all</a>')
|
||||
.on('click', function() {
|
||||
self.collapseAll();
|
||||
})
|
||||
)
|
||||
.insertBefore(this.el);
|
||||
},
|
||||
addSelectAllLink: function() {
|
||||
var scope = this.options.scope,
|
||||
self = this;
|
||||
function getCheckboxes() {
|
||||
// return all checkboxes that are not in hidden list items
|
||||
return scope.find('li')
|
||||
.filter(self.options.selectAllExclude || function() {
|
||||
return $(this).css('display') != 'none';
|
||||
})
|
||||
.find('> input[type=checkbox]');
|
||||
}
|
||||
$('<div class="check-all">')
|
||||
.append($('<a class="all">Select all</a>')
|
||||
.css('cursor', 'pointer')
|
||||
.on('click', function() {
|
||||
getCheckboxes().prop({
|
||||
checked: true,
|
||||
indeterminate: false
|
||||
});
|
||||
})
|
||||
)
|
||||
.append('<i class="separator"></i>')
|
||||
.append($('<a class="none">Select none</a>')
|
||||
.css('cursor', 'pointer')
|
||||
.on('click', function() {
|
||||
getCheckboxes().prop({
|
||||
checked: false,
|
||||
indeterminate: false
|
||||
});
|
||||
})
|
||||
)
|
||||
.insertAfter(this.el);
|
||||
},
|
||||
setCheckedValues: function(values) {
|
||||
var all = this.options.scope.find('input[type=checkbox]');
|
||||
$.each(values, function(key, value) {
|
||||
all.filter('[value="' + value + '"]')
|
||||
.prop('checked', true)
|
||||
.trigger('change');
|
||||
});
|
||||
}
|
||||
};
|
||||
$.extend(Bonsai, {
|
||||
uniqueId: 0
|
||||
});
|
||||
}(jQuery));
|
16
Documentation/Design-Documents/assets/js/less-1.7.0.min.js
vendored
Normal file
0
Documentation/Design-Documents/assets/js/main.js
Normal file
55
Documentation/Documentation-Contents.md
Normal file
@ -0,0 +1,55 @@
|
||||
# Contents
|
||||
|
||||
## About MaxScale
|
||||
|
||||
- [Release Notes 1.0.4](About/MaxScale-1.0.4-Release-Notes.md)
|
||||
- [Limitations](About/Limitations.md)
|
||||
- [COPYRIGHT](About/COPYRIGHT.md)
|
||||
- [LICENSE](About/LICENSE.md)
|
||||
- [SETUP](About/SETUP.md)
|
||||
|
||||
## Getting Started
|
||||
|
||||
- [Getting Started with MaxScale](Getting-Started/Getting-Started-With-MaxScale.md)
|
||||
- [Configuration Guide](Getting-Started/Configuration-Guide.md)
|
||||
|
||||
## Reference
|
||||
|
||||
- [MaxAdmin](Reference/MaxAdmin.md)
|
||||
- [MaxScale HA with Corosync-Pacemaker](Reference/MaxScale-HA-with-Corosync-Pacemaker.md)
|
||||
- [How Errors are Handled in MaxScale](Reference/How-errors-are-handled-in-MaxScale.md)
|
||||
- [Debug and Diagnostic Support](Reference/Debug-And-Diagnostic-Support.md)
|
||||
|
||||
## Tutorials
|
||||
|
||||
- [Administration Tutorial](Tutorials/Administration-Tutorial.md)
|
||||
- [Filter Tutorial](Tutorials/Filter-Tutorial.md)
|
||||
- [Galera Cluster Connection Routing Tutorial](Tutorials/Galera-Cluster-Connection-Routing-Tutorial.md)
|
||||
- [Galera Cluster Read-Write Splitting Tutorial](Tutorials/Galera-Cluster-Read-Write-Splitting-Tutorial.md)
|
||||
- [MySQL Replication Connection Routing Tutorial](Tutorials/MySQL-Replication-Connection-Routing-Tutorial.md)
|
||||
- [MySQL Replication Read-Write Splitting Tutorial](Tutorials/MySQL-Replication-Read-Write-Splitting-Tutorial.md)
|
||||
- [MySQL Cluster Setup](Tutorials/MySQL-Cluster-Setup.md)
|
||||
- [Replication Proxy with the Binlog Router Tutorial](Tutorials/Replication-Proxy-Binlog-Router-Tutorial.md)
|
||||
- [RabbitMQ Setup and MaxScale Integration Tutorial](Tutorials/RabbitMQ-Setup-And-MaxScale-Integration.md)
|
||||
|
||||
## Filters
|
||||
|
||||
- [Query Log All](filters/Query-Log-All-Filter.md)
|
||||
- [Regex Filter](filters/Regex-Filter.md)
|
||||
- [Tee Filter](filters/Tee-Filter.md)
|
||||
- [Top N Filter](filters/Top-N-Filter.md)
|
||||
- [Firewall Filter](filters/Firewall-Filter.md)
|
||||
|
||||
## Design Documents
|
||||
|
||||
- [Session Commands design (in development)](http://mariadb-corporation.github.io/MaxScale/Design-Documents/)
|
||||
- [DCB States (to be replaced in StarUML)](Design-Documents/DCB-States.pdf)
|
||||
|
||||
## Earlier Release Notes
|
||||
|
||||
- [MaxScale 0.5 Release Notes](Release-Notes/MaxScale-0.5-Release-Notes.md)
|
||||
- [MaxScale 0.6 Release Notes](Release-Notes/MaxScale-0.6-Release-Notes.md)
|
||||
- [MaxScale 0.7 Release Notes](Release-Notes/MaxScale-0.7-Release-Notes.md)
|
||||
- [MaxScale 1.0 Release Notes](Release-Notes/MaxScale-1.0-Release-Notes.md)
|
||||
- [MaxScale 1.0.1 Release Notes](Release-Notes/MaxScale-1.0.1-Release-Notes.md)
|
||||
- [MaxScale 1.0.3 Release Notes](Release-Notes/MaxScale-1.0.3-Release-Notes.md)
|
200
Documentation/Getting-Started/Getting-Started-With-MaxScale.md
Normal file
@ -0,0 +1,200 @@
|
||||
# Getting Started With MariaDB MaxScale
|
||||
|
||||
## First Steps With MaxScale
|
||||
|
||||
In this introduction to MaxScale the aim is to take the reader from the point of installation to making the decision as to which of the various setups that are possible with MaxScale should be the initial configuration to use. One of the problems that new users to MaxScale suffer is deciding exactly what they should consider as a base configuration to start exploring what MaxScale is capable of. MaxScale is highly configurable, with new plugins expanding the capabilities of MaxScale, whilst this makes it a very adaptable tool it does lead to an initial hurdle in configuring MaxScale.
|
||||
|
||||
## Installation
|
||||
|
||||
The simplest way to install MaxScale is to use one of the binary packages that are available for download from the MariaDB website.
|
||||
|
||||
* Simply go to [www.mariadb.com](http://www.mariadb.com)
|
||||
|
||||
* Select the Downloads option from the Resources menu
|
||||
|
||||
* Find and click on the button "Download MariaDB MaxScale Binaries"
|
||||
|
||||
* Find the section on that page entitled MariaDB MaxScale
|
||||
|
||||
* Select your operating system from the drop down box
|
||||
|
||||

|
||||
|
||||
* Instructions that are specific for your operating system will then appear
|
||||
|
||||

|
||||
|
||||
* Follow these instructions to install MaxScale on your machine
|
||||
|
||||
Upon successful completion of the installation process you have a version of MaxScale that is missing only a configuration file before it can be started.
|
||||
|
||||
## Building MaxScale From Source Code
|
||||
|
||||
Alternatively you may download the MaxScale source and build your own binaries. You will need a number of tools and libraries in order to achieve this.
|
||||
|
||||
* cmake version 2.8.12 or later
|
||||
|
||||
* gcc recommended version 4.4.7 or later
|
||||
|
||||
* libaio
|
||||
|
||||
* MariaDB Develop libraries version 5.5.38 or later
|
||||
|
||||
* libedit 2.11 or later (used by MaxAdmin tool)
|
||||
|
||||
First clone the GitHub project to your machine either via the web interface, your favorite graphical interface or the git command line
|
||||
|
||||
$ git clone https://github.com/mariadb-corporation/MaxScale
|
||||
|
||||
Cloning into 'MaxScale'...
|
||||
|
||||
remote: Counting objects: 16228, done.
|
||||
|
||||
...
|
||||
|
||||
Change directory to the MaxScale directory, create a build directory and change directory to that build directory
|
||||
|
||||
$ cd MaxScale
|
||||
|
||||
$ mkdir build
|
||||
|
||||
$ cd build
|
||||
|
||||
The next step is to run the cmake command to build the Makefile you need to compile Maxscale. There are a number of options you may give to configure cmake and point it to the various packages it requires. These are documented in the MaxScale README file, in this example we will assume the MariaDB developer packages have been installed in a non-standard location and set all the options required to locate these, along with options to build the unit tests and configure the installation target directory.
|
||||
|
||||
$ cmake -DMYSQL_DIR=~/usr/include/mysql \
|
||||
|
||||
-DEMBEDDED_LIB=~/usr/lib64/libmysqld.a \
|
||||
|
||||
-DMYSQLCLIENT_LIBRARIES=~/usr/lib64/libmysqlclient.so \
|
||||
|
||||
-DERRMSG=~/usr/share/mysql/english/errmsg.sys \
|
||||
|
||||
-DINSTALL_DIR=/usr/local/maxscale -DBUILD_TESTS=Y \
|
||||
|
||||
-DINSTALL_SYSTEM_FILES=N ../MaxScale
|
||||
|
||||
-- CMake version: 2.8.12.2
|
||||
|
||||
-- The C compiler identification is GNU 4.4.7
|
||||
|
||||
-- The CXX compiler identification is GNU 4.4.7
|
||||
|
||||
-- Check for working C compiler: /usr/bin/cc
|
||||
|
||||
-- Check for working C compiler: /usr/bin/cc -- works
|
||||
|
||||
-- Detecting C compiler ABI info
|
||||
|
||||
-- Detecting C compiler ABI info - done
|
||||
|
||||
-- Check for working CXX compiler: /usr/bin/c++
|
||||
|
||||
-- Check for working CXX compiler: /usr/bin/c++ -- works
|
||||
|
||||
-- Detecting CXX compiler ABI info
|
||||
|
||||
-- Detecting CXX compiler ABI info - done
|
||||
|
||||
-- Library was found at: /lib64/libaio.so
|
||||
|
||||
-- Library was found at: /usr/lib64/libssl.so
|
||||
|
||||
-- Library was found at: /usr/lib64/libcrypt.so
|
||||
|
||||
-- Library was found at: /usr/lib64/libcrypto.so
|
||||
|
||||
-- Library was found at: /usr/lib64/libz.so
|
||||
|
||||
-- Library was found at: /usr/lib64/libm.so
|
||||
|
||||
-- Library was found at: /usr/lib64/libdl.so
|
||||
|
||||
-- Library was found at: /usr/lib64/librt.so
|
||||
|
||||
-- Library was found at: /usr/lib64/libpthread.so
|
||||
|
||||
-- Using errmsg.sys found at: /home/maxscale/usr/share/mysql/english/errmsg.sys
|
||||
|
||||
-- Using embedded library: /home/mpinto/usr/lib64/libmysqld.a
|
||||
|
||||
-- Valgrind found: /usr/bin/valgrind
|
||||
|
||||
-- Found dynamic MySQL client library: /home/maxscale/usr/lib64/libmysqlclient.so
|
||||
|
||||
-- Found static MySQL client library: /usr/lib/libmysqlclient.a
|
||||
|
||||
-- C Compiler supports: -Werror=format-security
|
||||
|
||||
-- Linking against: /home/mpinto/usr/lib64/libmysqlclient.so
|
||||
|
||||
-- Installing MaxScale to: /usr/local/maxscale/
|
||||
|
||||
-- Generating RPM packages
|
||||
|
||||
-- Found Doxygen: /usr/bin/doxygen (found version "1.6.1")
|
||||
|
||||
-- Configuring done
|
||||
|
||||
-- Generating done
|
||||
|
||||
-- Build files have been written to: /home/maxscale/develop/build
|
||||
|
||||
-bash-4.1$ make depend
|
||||
|
||||
-bash-4.1$ make
|
||||
|
||||
Once the cmake command is complete simply run make to build the MaxScale binaries.
|
||||
|
||||
$ make
|
||||
|
||||
**Scanning dependencies of target utils**
|
||||
|
||||
[ 1%] Building CXX object utils/CMakeFiles/utils.dir/skygw_utils.cc.o
|
||||
|
||||
**Linking CXX static library libutils.a**
|
||||
|
||||
[ 1%] Built target utils
|
||||
|
||||
**Scanning dependencies of target log_manager**
|
||||
|
||||
[ 2%] Building CXX object log_manager/CMakeFiles/log_manager.dir/log_manager.cc.o
|
||||
|
||||
...
|
||||
|
||||
After the completion of the make process the installation can be achieved by running the make install target.
|
||||
|
||||
$ make install
|
||||
|
||||
...
|
||||
|
||||
This will result in an installation being created which is identical to that which would be achieved by installing the binary package.
|
||||
|
||||
## Configuring MaxScale
|
||||
|
||||
The first step in configuring your MaxScale is to determine what it is you want to achieve with your MaxScale and what environment it will run in. The later is probably the easiest starting point for choosing which configuration route you wish to take. There are two distinct database environments which the first GA release of MaxScale supports; MySQL Master/Slave Replication clusters and Galera Cluster.
|
||||
|
||||
### Master/Slave Replication Clusters
|
||||
|
||||
There are two major configuration options available to use MaxScale with a MySQL Replication cluster; connection routing with separate read and write connections, or read/write splitting with a single connection. A separate tutorial is available for each of these configurations that describes how to build the configuration file for MaxScale that will work with your environment.
|
||||
|
||||
Using a MySQL Master/Slave Replication cluster will provide one node server within the cluster that is the master server and the remainder of the servers will be slaves. The slaves are read replicas of the master. In a replication cluster like this all write operations must be performed on the master. This can provide not just added security of your data, but also read scalability. In an application environment with a substantial proportions of read operations, directing those read operations to the slave servers can increase the total load which the system can handle by offloading the master server from the burden of these read operations.
|
||||
|
||||
Making the choice between these two setups is relatively simple, if you have an application that understands that there are some database servers that it can only read from and one it must send all of the writes to, then the connection routing option can be used. Applications that are not written to separate read and write statements must use a service within MaxScale that will split the incoming stream of SQL statements into operations that can be executed on the master and those that can be set to the slave. These applications should use the statement based routing provided by the Read/Write Splitter router.
|
||||
|
||||
### Galera Cluster
|
||||
|
||||
A Galera Cluster provides a true multi-master cluster option for MariaDB and MySQL database environments. In such a setup any node that is part of the cluster can be used to both execute read and write operations. MaxScale again offers two different configurations that can be used with Galera; a connection balancing configuration or a statement splitting mechanism that can be used to isolate write operations to a single node within the cluster. Again there is a tutorial guide available for both of these major configurations.
|
||||
|
||||
The connection based load balancing configuration is used in an environment in which you have a cluster that you want to be available to an application without the application needing to be aware of the cluster configuration or state of the database nodes. MaxScale will monitor the nodes within the database cluster and will route connections from the application to database nodes that are active members of the cluster. MaxScale will also keep track of the number of connections to each database node keep equal numbers of connections to each node, at the time the connection is established.
|
||||
|
||||
It is also possible to use the Read/Write Splitter with Galera. Although it is not necessary to segregate the write operations to a single node, there are advantages in doing this if you have an application where the write load is not too great to be handled by a single node in the cluster. Galera Cluster uses an optimistic locking strategy that will allow transactions to progress independently on each node within the cluster. It is only when the transaction commits that the transaction is checked for conflicts with other transactions that are committing on the other nodes. At this stage the commit can fail with a deadlock detection error. This can be inconvenient for applications and, some older applications, that are not aware that the transaction can fail at this stage may not check for this failure. Using the Read/Write Splitter will allow this to be avoided since it will isolate the write to one node and no deadlock detection will occur. MaxScale provides a monitoring module that will maintain pseudo states of master and slave for the Galera cluster that allows for this type of configuration.
|
||||
|
||||
### Other MaxScale Configuration
|
||||
|
||||
As well as the four major configuration choices outlined above there are also other configurations sub-options that may be mixed with those to provide a variety of different configuration and functionality. The MaxScale filter concept allows the basic configurations to be built upon in a large variety of ways. A separate filter tutorial is available that discusses the concept and gives some examples of ways to use filters.
|
||||
|
||||
## Administration Of MaxScale
|
||||
|
||||
There are various administration tasks that may be done with MaxScale, a client command, maxadmin, is available that will interact with a running MaxScale and allow the status of MaxScale to be monitored and give some control of the MaxScale functionality. There is a separate reference guide for the maxadmin utility and also a short administration tutorial that covers the common administration tasks that need to be done with MaxScale.
|
||||
|
BIN
Documentation/Getting-Started/images/image_0.png
Normal file
After Width: | Height: | Size: 86 KiB |
BIN
Documentation/Getting-Started/images/image_1.png
Normal file
After Width: | Height: | Size: 83 KiB |
BIN
Documentation/Getting-Started/images/image_10.png
Normal file
After Width: | Height: | Size: 13 KiB |
BIN
Documentation/Getting-Started/images/image_11.png
Normal file
After Width: | Height: | Size: 23 KiB |
1947
Documentation/Reference/Debug-And-Diagnostic-Support.md
Normal file
29
Documentation/Reference/Hint-Syntax.md
Normal file
@ -0,0 +1,29 @@
|
||||
Hint Syntax
|
||||
Use either ’-- ’ (notice the whitespace) or ’#’after the semicolon or ’/* .. */’ before
|
||||
the semicolon.
|
||||
The MySQL manual doesn’t specify if comment blocks, i.e. ’/* .. */’, should contain a w
|
||||
hitespace character before or after the tags.
|
||||
All hints must start with the ’maxscale tag’:
|
||||
-- maxscale <hint>
|
||||
The hints right now have two types, ones that route to a server and others that contain
|
||||
name-value pairs.
|
||||
Routing queries to a server:
|
||||
-- maxscale route to [master | slave | server <server name>]
|
||||
The name of the server is the same as in MaxScale.cnf
|
||||
Creating a name-value pair:
|
||||
-- maxscale <param>=<value>
|
||||
Currently the only accepted parameter is
|
||||
’max_slave_replication_lag’
|
||||
Hints can be either single-use hints, which makes them affect only one query, or named
|
||||
hints, which can be pushed on and off a stack of active hints.
|
||||
Defining named hints:
|
||||
-- maxscale <hint name> prepare <hint content>
|
||||
Pushing a hint onto the stack:
|
||||
-- maxscale <hint name> begin
|
||||
Popping the topmost hint off the stack:
|
||||
-- maxscale end
|
||||
You can define and activate a hint in a single command using the following:
|
||||
-- maxscale <hint name> begin <hint content>
|
||||
You can also push anonymous hints onto the stack which are only used as long as they ar
|
||||
e on the stack:
|
||||
-- maxscale begin <hint content>
|
@ -0,0 +1,56 @@
|
||||
# How errors are handled in MaxScale
|
||||
|
||||
This document describes how errors are handled in MaxScale, its protocol modules and routers.
|
||||
|
||||
Assume a client, maxscale, and master/slave replication cluster.
|
||||
|
||||
An "error" can be due to failed authentication, routing error (unsupported query type etc.), or backend failure.
|
||||
|
||||
## Authentication error
|
||||
|
||||
Authentication is relatively complex phase in the beginning of session creation. Roughly speaking, client protocol has loaded user information from backend so that it can authenticate client without consulting backend. When client sends authentication data to MaxScale data is compared against backend’s user data in the client protocol module. If authentication fails client protocol module refreshes backend data just in case it had became obsolete after last refresh. If authentication still fails after refresh, authentication error occurs.
|
||||
|
||||
Close sequence starts from mysql_client.c:gw_read_client_event where
|
||||
|
||||
1. session state is set to SESSION_STATE_STOPPING
|
||||
|
||||
2. dcb_close is called for client DCB
|
||||
|
||||
1. client DCB is removed from epoll set and state is set to DCB_STATE_NOPOLLING
|
||||
|
||||
2. client protocol’s close is called (gw_client_close)
|
||||
|
||||
* protocol struct is done’d
|
||||
|
||||
* router’s closeSession is called (includes calling dcb_close for backends)
|
||||
|
||||
3. dcb_call_callback is called for client DCB with DCB_REASON_CLOSE
|
||||
|
||||
4. client DCB is set to zombies list
|
||||
|
||||
Each call for dcb_close in closeSession repeat steps 2a-d.
|
||||
|
||||
## Routing errors
|
||||
|
||||
### Invalid capabilities returned by router
|
||||
|
||||
When client protocol module receives query from client the protocol state is (typically) MYSQL_IDLE. The protocol state is checked in mysql_client.c:gw_read_client_event. First place where a hard error may occur is when router capabilities are read. If router response is invalid (other than RCAP_TYPE_PACKET_INPUT and RCAP_TYPE_STMT_INPUT). In case of invalid return value from the router, error is logged, followed by session closing.
|
||||
|
||||
### Backend failure
|
||||
|
||||
When mysql_client.c:gw_read_client_event calls either route_by_statement or directly SESSION_ROUTE_QUERY script, which calls the routeQuery function of the head session’s router. routeQuery returns 1 if succeed, or 0 in case of error. Success here means that query was routed and reply will be sent to the client while error means that routing failed because of backend (server/servers/service) failure or because of side effect of backend failure.
|
||||
|
||||
In case of backend failure, error is replied to client and handleError is called to resolve backend problem. handleError is called with action ERRACT_NEW_CONNECTION which tells to error handler that it should try to find a replacement for failed backend. Handler will return true if there are enough backend servers for session’s needs. If handler returns false it means that session can’t continue processing further queries and will be closed. Client will be sent an error message and dcb_close is called for client DCB.
|
||||
|
||||
Close sequence is similar to that described above from phase #2 onward.
|
||||
|
||||
Reasons for "backend failure" in rwsplit:
|
||||
|
||||
* router has rses_closed == true because other thread has detected failure and started to close session
|
||||
|
||||
* master has disappeared; demoted to slave, for example
|
||||
|
||||
### Router error
|
||||
|
||||
In cases where SESSION_ROUTE_QUERY has returned successfully (=1) query may not be successfully processed in backend or even sent to it. It is posible that router fails in routing the particular query but there is no such error which would prevent session from continuing. In this case router handles error silently by creating and adding MySQL error to first available backend’s (incoming) eventqueue where it is found and sent to client (clientReply).
|
||||
|
1355
Documentation/Reference/MaxAdmin.md
Normal file
658
Documentation/Reference/MaxScale-HA-with-Corosync-Pacemaker.md
Normal file
@ -0,0 +1,658 @@
|
||||
How to make MaxScale High Available
|
||||
|
||||
Corosync/Pacemaker setup
|
||||
|
||||
& MaxScale init script
|
||||
|
||||
Massimiliano Pinto
|
||||
|
||||
Last Updated: 4th August 2014
|
||||
|
||||
# Contents
|
||||
|
||||
[Contents](#heading=h.myvf4p2ngdc5)
|
||||
|
||||
[Overview](#heading=h.92d1rpk8nyx4)
|
||||
|
||||
[Clustering Software installation](#heading=h.c1l0xy6aynl7)
|
||||
|
||||
[MaxScale init script](#heading=h.cfb6xvv8fu1n)
|
||||
|
||||
[Configure MaxScale for HA](#heading=h.qk4cgmtiugm0)
|
||||
|
||||
[Use case: failed resource is restarted](#heading=h.3fszf28iz3m5)
|
||||
|
||||
[Use case: failed resource migration on a node is started in another one](#heading=h.erqw535ttk7l)
|
||||
|
||||
[Add a Virtual IP (VIP) to the cluster](#heading=h.vzslsgvxjyug)
|
||||
|
||||
# Overview
|
||||
|
||||
The document shows an example of a Pacemaker / Corosync setup with MaxScale based on Linux Centos 6.5, using three virtual servers and unicast heartbeat mode with the following minimum requirements:
|
||||
|
||||
- MaxScale process is started/stopped and monitored via /etc/init.d/maxscale script that is LSB compatible in order to be managed by Pacemaker resource manager
|
||||
|
||||
- A Virtual IP is set providing the access to the MaxScale process that could be set to one of the cluster nodes
|
||||
|
||||
- Pacemaker/Corosync and crmsh command line tool basic knowledge
|
||||
|
||||
Please note the solution is a quick setup example that may not be suited for all production environments.
|
||||
|
||||
# Clustering Software installation
|
||||
|
||||
On each node in the cluster do the following steps:
|
||||
|
||||
(1) Add clustering repos to yum
|
||||
|
||||
# vi /etc/yum.repos.d/ha-clustering.repo
|
||||
|
||||
Add the following to the file
|
||||
|
||||
[haclustering]
|
||||
|
||||
name=HA Clustering
|
||||
|
||||
baseurl=http://download.opensuse.org/repositories/network:/ha-clustering:/Stable/CentOS_CentOS-6/
|
||||
|
||||
enabled=1
|
||||
|
||||
gpgcheck=0
|
||||
|
||||
(2) Install the software
|
||||
|
||||
# yum install pacemaker corosync crmsh
|
||||
|
||||
Package versions used
|
||||
|
||||
Package** pacemake**r-1.1.10-14.el6_5.3.x86_64
|
||||
|
||||
Package **corosync**-1.4.5-2.4.x86_64
|
||||
|
||||
Package **crmsh**-2.0+git46-1.1.x86_64
|
||||
|
||||
(3) Assign hostname on each node
|
||||
|
||||
|
||||
|
||||
In this example the three names used for the nodes are:
|
||||
|
||||
**node1,node,node3**
|
||||
|
||||
# hostname **node1**
|
||||
|
||||
...
|
||||
|
||||
# hostname nodeN
|
||||
|
||||
(4) For each node add server names in /etc/hosts
|
||||
|
||||
[root@node3 ~]# vi /etc/hosts
|
||||
|
||||
10.74.14.39 node1
|
||||
|
||||
10.228.103.72 node2
|
||||
|
||||
10.35.15.26 node3 current-node
|
||||
|
||||
[root@node1 ~]# vi /etc/hosts
|
||||
|
||||
10.74.14.39 node1 current-node
|
||||
|
||||
10.228.103.72 node2
|
||||
|
||||
10.35.15.26 node3
|
||||
|
||||
...
|
||||
|
||||
**Please note**: add **current-node** as an alias for the current node in each of the /etc/hosts files.
|
||||
|
||||
(5) Prepare authkey for optional cryptographic use
|
||||
|
||||
On one of the nodes, say node2 run the corosync-keygen utility and follow
|
||||
|
||||
[root@node2 ~]# corosync-keygen
|
||||
|
||||
Corosync Cluster Engine Authentication key generator.
Gathering 1024 bits for key from /dev/random.
Press keys on your keyboard to generate entropy.
|
||||
|
||||
After completion the key will be found in /etc/corosync/authkey.
|
||||
|
||||
(6) Prepare the corosync configuration file
|
||||
|
||||
Using node2 as an example:
|
||||
|
||||
[root@node2 ~]# vi /etc/corosync/corosync.conf
|
||||
|
||||
Add the following to the file:
|
||||
|
||||
# Please read the corosync.conf.5 manual page
|
||||
|
||||
compatibility: whitetank
|
||||
|
||||
totem {
|
||||
|
||||
version: 2
|
||||
|
||||
secauth: off
|
||||
|
||||
interface {
|
||||
|
||||
member {
|
||||
|
||||
memberaddr: node1
|
||||
|
||||
}
|
||||
|
||||
member {
|
||||
|
||||
memberaddr: node2
|
||||
|
||||
}
|
||||
|
||||
member {
|
||||
|
||||
memberaddr: node3
|
||||
|
||||
}
|
||||
|
||||
ringnumber: 0
|
||||
|
||||
bindnetaddr: current-node
|
||||
|
||||
mcastport: 5405
|
||||
|
||||
ttl: 1
|
||||
|
||||
}
|
||||
|
||||
transport: udpu
|
||||
|
||||
}
|
||||
|
||||
logging {
|
||||
|
||||
fileline: off
|
||||
|
||||
to_logfile: yes
|
||||
|
||||
to_syslog: yes
|
||||
|
||||
logfile: /var/log/cluster/corosync.log
|
||||
|
||||
debug: off
|
||||
|
||||
timestamp: on
|
||||
|
||||
logger_subsys {
|
||||
|
||||
subsys: AMF
|
||||
|
||||
debug: off
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
# this will start Pacemaker processes
|
||||
|
||||
service {
|
||||
|
||||
ver: 0
|
||||
|
||||
name: pacemaker
|
||||
|
||||
}
|
||||
|
||||
**Please note **in this example:
|
||||
|
||||
- unicast UDP is used
|
||||
|
||||
- bindnetaddr for corosync process is current-node, that has the right value on each node due to the alias added in /etc/hosts above
|
||||
|
||||
- Pacemaker processes are started by the corosync daemon, so there is no need to launch it via /etc/init.d/pacemaker start
|
||||
|
||||
(7) copy configuration files and auth key on each of the other nodes
|
||||
|
||||
[root@node2 ~]# scp /etc/corosync/* root@node1:/etc/corosync/
|
||||
|
||||
[root@node2 ~]# scp /etc/corosync/* root@nodeN:/etc/corosync/
|
||||
|
||||
...
|
||||
|
||||
(8) Corosync needs port *5*405 to be opened:
|
||||
|
||||
- configure any firewall or iptables accordingly
|
||||
|
||||
For a quick start just disable iptables on each nodes:
|
||||
|
||||
[root@node2 ~]# service iptables stop
|
||||
|
||||
…
|
||||
|
||||
[root@nodeN ~]# service iptables stop
|
||||
|
||||
(9) Start Corosyn on each node:
|
||||
|
||||
[root@node2 ~] #/etc/init.d/corosync start
|
||||
|
||||
…
|
||||
|
||||
[root@nodeN ~] #/etc/init.d/corosync start
|
||||
|
||||
and check the corosync daemon is successfully bound to port 5405:
|
||||
|
||||
[root@node2 ~] #netstat -na | grep 5405
|
||||
|
||||
udp 0 0 10.228.103.72:5405 0.0.0.0:*
|
||||
|
||||
Check if other nodes are reachable with nc utility and option UDP (-u):
|
||||
|
||||
[root@node2 ~] #echo "check ..." | nc -u node1 5405
|
||||
|
||||
[root@node2 ~] #echo "check ..." | nc -u node3 5405
|
||||
|
||||
...
|
||||
|
||||
[root@node1 ~] #echo "check ..." | nc -u node2 5405
|
||||
|
||||
[root@node1 ~] #echo "check ..." | nc -u node3 5405
|
||||
|
||||
…
|
||||
|
||||
If the following message is displayed
|
||||
|
||||
**nc: Write error: Connection refused**
|
||||
|
||||
There is an issue with communication between the nodes, this is most likely to be an issue with the firewall configuration on your nodes. Check and resolve issues with your firewall configuration.
|
||||
|
||||
(10) Check the cluster status, from any node
|
||||
|
||||
[root@node3 ~]# crm status
|
||||
|
||||
After a while this will be the output:
|
||||
|
||||
[root@node3 ~]# crm status
|
||||
|
||||
Last updated: Mon Jun 30 12:47:53 2014
|
||||
|
||||
Last change: Mon Jun 30 12:47:39 2014 via crmd on node2
|
||||
|
||||
Stack: classic openais (with plugin)
|
||||
|
||||
Current DC: node2 - partition with quorum
|
||||
|
||||
Version: 1.1.10-14.el6_5.3-368c726
|
||||
|
||||
3 Nodes configured, 3 expected votes
|
||||
|
||||
0 Resources configured
|
||||
|
||||
Online: [ node1 node2 node3 ]
|
||||
|
||||
For the basic setup disable the following properties:
|
||||
|
||||
- stonith
|
||||
|
||||
- quorum policy
|
||||
|
||||
[root@node3 ~]# crm configure property 'stonith-enabled'='false'
|
||||
|
||||
[root@node3 ~]# crm configure property 'no-quorum-policy'='ignore'
|
||||
|
||||
For more information see:
|
||||
|
||||
[http://www.clusterlabs.org/doc/crm_fencing.html](http://www.clusterlabs.org/doc/crm_fencing.html)
|
||||
|
||||
[http://clusterlabs.org/doc/](http://clusterlabs.org/doc/)
|
||||
|
||||
The configuration is automatically updated on every node:
|
||||
|
||||
Check it from another node, say node1
|
||||
|
||||
[root@node1 ~]# crm configure show
|
||||
|
||||
node node1
|
||||
|
||||
node node2
|
||||
|
||||
node node3
|
||||
|
||||
property cib-bootstrap-options: \
|
||||
|
||||
dc-version=1.1.10-14.el6_5.3-368c726 \
|
||||
|
||||
cluster-infrastructure="classic openais (with plugin)" \
|
||||
|
||||
expected-quorum-votes=3 \
|
||||
|
||||
stonith-enabled=false \
|
||||
|
||||
no-quorum-policy=ignore \
|
||||
|
||||
placement-strategy=balanced \
|
||||
|
||||
default-resource-stickiness=infinity
|
||||
|
||||
The Corosync / Pacemaker cluster is ready to be configured to manage resources.
|
||||
|
||||
# MaxScale init script /etc/init.d/maxscale
|
||||
|
||||
The MaxScale /etc/init.d./maxscale script allows to start/stop/restart and monitor maxScale process running in the system.
|
||||
|
||||
Edit it and modify the **MAXSCALE_BASEDIR** to match the installation directory you choose when you installed MaxScale.
|
||||
|
||||
**Note**:
|
||||
|
||||
It could be necessary to modify other variables, such as
|
||||
|
||||
MAXSCALE_BIN, MAXSCALE_HOME, MAXSCALE_PIDFILE and LD_LIBRARY_PATH for a non standard setup.
|
||||
|
||||
[root@node1 ~]# /etc/init.d/maxscale
|
||||
|
||||
Usage: /etc/init.d/maxscale {start|stop|status|restart|condrestart|reload}
|
||||
|
||||
- Start
|
||||
|
||||
[root@node1 ~]# /etc/init.d/maxscale start
|
||||
|
||||
Starting MaxScale: maxscale (pid 25892) is running... [ OK ]
|
||||
|
||||
- Start again
|
||||
|
||||
[root@node1 ~]# /etc/init.d/maxscale start
|
||||
|
||||
Starting MaxScale: found maxscale (pid 25892) is running.[ OK ]
|
||||
|
||||
- Stop
|
||||
|
||||
[root@node1 ~]# /etc/init.d/maxscale stop
|
||||
|
||||
Stopping MaxScale: [ OK ]
|
||||
|
||||
- Stop again
|
||||
|
||||
[root@node1 ~]# /etc/init.d/maxscale stop
|
||||
|
||||
Stopping MaxScale: [FAILED]
|
||||
|
||||
- Status (MaxScale not running)
|
||||
|
||||
[root@node1 ~]# /etc/init.d/maxscale status
|
||||
|
||||
MaxScale is stopped [FAILED]
|
||||
|
||||
The script exit code for "status" is 3
|
||||
|
||||
- Status (MaxScale is running)
|
||||
|
||||
[root@node1 ~]# /etc/init.d/maxscale status
|
||||
|
||||
Checking MaxScale status: MaxScale (pid 25953) is running.[ OK ]
|
||||
|
||||
The script exit code for "status" is 0
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
Note: the MaxScale script is LSB compatible and returns the proper exit code for each action:
|
||||
|
||||
For more informations;
|
||||
|
||||
[http://www.linux-ha.org/wiki/LSB_Resource_Agents](http://www.linux-ha.org/wiki/LSB_Resource_Agents)
|
||||
|
||||
After checking maxScale is well managed by the /etc/init.d/script is possible to configure the MAxScale HA via Pacemaker.
|
||||
|
||||
# Configure MaxScale for HA with Pacemaker
|
||||
|
||||
[root@node2 ~]# crm configure primitive MaxScale lsb:maxscale \
|
||||
|
||||
op monitor interval="10s” timeout=”15s” \
|
||||
|
||||
op start interval="0” timeout=”15s” \
|
||||
|
||||
op stop interval="0” timeout=”30s”
|
||||
|
||||
MaxScale resource will be started:
|
||||
|
||||
[root@node2 ~]# crm status
|
||||
|
||||
Last updated: Mon Jun 30 13:15:34 2014
|
||||
|
||||
Last change: Mon Jun 30 13:15:28 2014 via cibadmin on node2
|
||||
|
||||
Stack: classic openais (with plugin)
|
||||
|
||||
Current DC: node2 - partition with quorum
|
||||
|
||||
Version: 1.1.10-14.el6_5.3-368c726
|
||||
|
||||
3 Nodes configured, 3 expected votes
|
||||
|
||||
1 Resources configured
|
||||
|
||||
Online: [ node1 node2 node3 ]
|
||||
|
||||
MaxScale (lsb:maxscale): Started node1
|
||||
|
||||
Basic use cases:
|
||||
|
||||
# 1. Resource restarted after a failure:
|
||||
|
||||
MaxScale Pid is, $MAXSCALE_PIDFILE=$MAXSCALE_HOME/log/maxscale.pid
|
||||
|
||||
In the example is 26114, kill the process immediately:
|
||||
|
||||
[root@node2 ~]# kill -9 26114
|
||||
|
||||
[root@node2 ~]# crm status
|
||||
|
||||
Last updated: Mon Jun 30 13:16:11 2014
|
||||
|
||||
Last change: Mon Jun 30 13:15:28 2014 via cibadmin on node2
|
||||
|
||||
Stack: classic openais (with plugin)
|
||||
|
||||
Current DC: node2 - partition with quorum
|
||||
|
||||
Version: 1.1.10-14.el6_5.3-368c726
|
||||
|
||||
3 Nodes configured, 3 expected votes
|
||||
|
||||
1 Resources configured
|
||||
|
||||
Online: [ node1 node2 node3 ]
|
||||
|
||||
Failed actions:
|
||||
|
||||
MaxScale_monitor_15000 on node1 'not running' (7): call=19, status=complete, last-rc-change='Mon Jun 30 13:16:14 2014', queued=0ms, exec=0ms
|
||||
|
||||
**Note** the **MaxScale_monitor** failed action
|
||||
|
||||
After a few seconds it will be started again:
|
||||
|
||||
[root@node2 ~]# crm status
|
||||
|
||||
Last updated: Mon Jun 30 13:21:12 2014
|
||||
|
||||
Last change: Mon Jun 30 13:15:28 2014 via cibadmin on node1
|
||||
|
||||
Stack: classic openais (with plugin)
|
||||
|
||||
Current DC: node2 - partition with quorum
|
||||
|
||||
Version: 1.1.10-14.el6_5.3-368c726
|
||||
|
||||
3 Nodes configured, 3 expected votes
|
||||
|
||||
1 Resources configured
|
||||
|
||||
Online: [ node1 node2 node3 ]
|
||||
|
||||
MaxScale (lsb:maxscale): Started node1
|
||||
|
||||
# 2. The resource cannot be migrated to node1 for a failure:
|
||||
|
||||
First, migrate the the resource to another node, say node3
|
||||
|
||||
[root@node1 ~]# crm resource migrate MaxScale node3
|
||||
|
||||
...
|
||||
|
||||
Online: [ node1 node2 node3 ]
|
||||
|
||||
Failed actions:
|
||||
|
||||
MaxScale_start_0 on node1 'not running' (7): call=76, status=complete, last-rc-change='Mon Jun 30 13:31:17 2014', queued=2015ms, exec=0ms
|
||||
|
||||
Note the **MaxScale_start** failed action on node1, and after a few seconds
|
||||
|
||||
[root@node3 ~]# crm status
|
||||
|
||||
Last updated: Mon Jun 30 13:35:00 2014
|
||||
|
||||
Last change: Mon Jun 30 13:31:13 2014 via crm_resource on node3
|
||||
|
||||
Stack: classic openais (with plugin)
|
||||
|
||||
Current DC: node2 - partition with quorum
|
||||
|
||||
Version: 1.1.10-14.el6_5.3-368c726
|
||||
|
||||
3 Nodes configured, 3 expected votes
|
||||
|
||||
1 Resources configured
|
||||
|
||||
Online: [ node1 node2 node3 ]
|
||||
|
||||
MaxScale (lsb:maxscale): Started node2
|
||||
|
||||
Failed actions:
|
||||
|
||||
MaxScale_start_0 on node1 'not running' (7): call=76, status=complete, last-rc-change='Mon Jun 30 13:31:17 2014', queued=2015ms, exec=0ms
|
||||
|
||||
Successfully, MaxScale has been started on a new node: node2.
|
||||
|
||||
**Note**: Failed actions remain in the output of crm status.
|
||||
|
||||
With "crm resource cleanup MaxScale" is possible to cleanup the messages:
|
||||
|
||||
[root@node1 ~]# crm resource cleanup MaxScale
|
||||
|
||||
Cleaning up MaxScale on node1
|
||||
|
||||
Cleaning up MaxScale on node2
|
||||
|
||||
Cleaning up MaxScale on node3
|
||||
|
||||
The cleaned status is visible from other nodes as well:
|
||||
|
||||
[root@node2 ~]# crm status
|
||||
|
||||
Last updated: Mon Jun 30 13:38:18 2014
|
||||
|
||||
Last change: Mon Jun 30 13:38:17 2014 via crmd on node3
|
||||
|
||||
Stack: classic openais (with plugin)
|
||||
|
||||
Current DC: node2 - partition with quorum
|
||||
|
||||
Version: 1.1.10-14.el6_5.3-368c726
|
||||
|
||||
3 Nodes configured, 3 expected votes
|
||||
|
||||
1 Resources configured
|
||||
|
||||
Online: [ node1 node2 node3 ]
|
||||
|
||||
MaxScale (lsb:maxscale): Started node2
|
||||
|
||||
# Add a Virtual IP (VIP) to the cluster
|
||||
|
||||
It’s possible to add a virtual IP to the cluster:
|
||||
|
||||
MaxScale process will be only contacted with this IP, that mat move across nodes with maxscale process as well.
|
||||
|
||||
Setup is very easy:
|
||||
|
||||
assuming an addition IP address is available and can be added to one of the nodes, this i the new configuration to add:
|
||||
|
||||
[root@node2 ~]# crm configure primitive maxscale_vip ocf:heartbeat:IPaddr2 params ip=192.168.122.125 op monitor interval=10s
|
||||
|
||||
|
||||
|
||||
MaxScale process and the VIP must be run in the same node, so it’s mandatory to add to the configuration the group ‘maxscale_service’.
|
||||
|
||||
[root@node2 ~]# crm configure group maxscale_service maxscale_vip MaxScale
|
||||
|
||||
The final configuration is, from another node:
|
||||
|
||||
[root@node3 ~]# crm configure show
|
||||
|
||||
node node1
|
||||
|
||||
node node2
|
||||
|
||||
node node3
|
||||
|
||||
primitive MaxScale lsb:maxscale \
|
||||
|
||||
op monitor interval=15s timeout=10s \
|
||||
|
||||
op start interval=0 timeout=15s \
|
||||
|
||||
op stop interval=0 timeout=30s
|
||||
|
||||
primitive maxscale_vip IPaddr2 \
|
||||
|
||||
params ip=192.168.122.125 \
|
||||
|
||||
op monitor interval=10s
|
||||
|
||||
group maxscale_service maxscale_vip MaxScale \
|
||||
|
||||
meta target-role=Started
|
||||
|
||||
property cib-bootstrap-options: \
|
||||
|
||||
dc-version=1.1.10-14.el6_5.3-368c726 \
|
||||
|
||||
cluster-infrastructure="classic openais (with plugin)" \
|
||||
|
||||
expected-quorum-votes=3 \
|
||||
|
||||
stonith-enabled=false \
|
||||
|
||||
no-quorum-policy=ignore \
|
||||
|
||||
placement-strategy=balanced \
|
||||
|
||||
last-lrm-refresh=1404125486
|
||||
|
||||
Check the resource status:
|
||||
|
||||
[root@node1 ~]# crm status
|
||||
|
||||
Last updated: Mon Jun 30 13:51:29 2014
|
||||
|
||||
Last change: Mon Jun 30 13:51:27 2014 via crmd on node1
|
||||
|
||||
Stack: classic openais (with plugin)
|
||||
|
||||
Current DC: node2 - partition with quorum
|
||||
|
||||
Version: 1.1.10-14.el6_5.3-368c726
|
||||
|
||||
3 Nodes configured, 3 expected votes
|
||||
|
||||
2 Resources configured
|
||||
|
||||
Online: [ node1 node2 node3 ]
|
||||
|
||||
Resource Group: maxscale_service
|
||||
|
||||
maxscale_vip (ocf::heartbeat:IPaddr2): Started node2
|
||||
|
||||
MaxScale (lsb:maxscale): Started node2
|
||||
|
||||
With both resources on node2, now MaxScale service will be reachable via the configured VIP address 192.168.122.125
|
||||
|
BIN
Documentation/Reference/images/image_0.png
Normal file
After Width: | Height: | Size: 38 KiB |
336
Documentation/Release-Notes/MaxScale-0.5-Release-Notes.md
Normal file
@ -0,0 +1,336 @@
|
||||
MaxScale Release Notes
|
||||
|
||||
0.5 Alpha
|
||||
|
||||
This document details the changes in version 0.5 since the release of the 0.4 alpha of the MaxScale product.
|
||||
|
||||
# New Features
|
||||
|
||||
## Read/Write Splitter Routing Module
|
||||
|
||||
In previous versions the read/write splitter routing module has had a number of limitations on it use, in the alpha release the router now removes the most important restrictions.
|
||||
|
||||
### Session Commands
|
||||
|
||||
Session commands are those statements that make some change to the user’s login session that may cause different effects from subsequent statements executed. Since the read/write splitter executes statements on either a master server or a slave server, depending upon the statement to execute, it is important that these session modifications are executed on all connections to both slave and master servers. This is resolved in release 0.5 such that session modification commands are executed on all active connections and a single return is forward back to the client that made the request.
|
||||
|
||||
### Transaction Support
|
||||
|
||||
Transaction support has been added into this version of the read/write splitter, there is one known outstanding limitation. If autocommit is enabled inside an active transaction it is not considered as commit in read/write splitter. Once a transaction has started all statements are routed to a master until the transaction is committed or rolled back.
|
||||
|
||||
## Authentication
|
||||
|
||||
A number of issues and shortcomings in the authentication performed by MaxScale have been resolved by this release.
|
||||
|
||||
### Host Considered in Authentication
|
||||
|
||||
Previously MaxScale did not follow the same rules as MySQL when authenticating a login request, it would always use the wildcard password entries and would not check the incoming host was allowed to connect. MaxScale now checks the incoming IP address for a connection request and verifies this against the authentication data loaded from the backend servers. The same rules are applied when choosing the password entry to authenticate with. Note however that authentication from MaxScale to the backend database will fail if the MaxScale host is not allowed to login using the matching password for the user.
|
||||
|
||||
### Stale Authentication Data
|
||||
|
||||
In previous releases of MaxScale the authentication data would be read at startup time only and would not be refreshed. Therefore if a user was added or modified in the backend server this will not be picked up by MaxScale and that user would be unable to connect via MaxScale. MaxScale now reloads user authentication data when a failure occurs and will refresh its internal tables if the data has changed in the backend. Please note that this reload process is rate limited to prevent incorrect logins to MaxScale being used for a denial of service attack on the backend servers.
|
||||
|
||||
### Enable Use Of "root" User
|
||||
|
||||
Previously MaxScale would prevent the use of the root user to login to the backend servers via MaxScale. This may be enabled on a per service basis by adding an "enable_root_user" options in the service entry to enable it in the MaxScale configuration file. This allows the use of root to be controlled on a per service basis.
|
||||
|
||||
## Network Support
|
||||
|
||||
### Unix Domain Sockets
|
||||
|
||||
MaxScale now supports Unix domain sockets for connecting to a local MaxScale server. The use of a Unix domain socket is controlled by adding a "socket" entry in the listener configuration entry for a service.
|
||||
|
||||
### Network Interface Binding
|
||||
|
||||
MaxScale has added the ability to bind a listener for a service to a network address via an "address" entry in the configuration file.
|
||||
|
||||
# Server Version
|
||||
|
||||
The server version reported when connected to a database via MaxScale has now been altered. This now shows the MaxScale name and version together with the backend server name. An example of this can be seen below for the 0.5 release.
|
||||
|
||||
-bash-4.1$ mysql -h 127.0.0.1 -P 4006 -uxxxx -pxxxx
Welcome to the MariaDB monitor. Commands end with ; or \g.
Your MySQL connection id is 22320
Server version: MaxScale 0.5.0 MariaDB Server
Copyright (c) 2000, 2012, Oracle, Monty Program Ab and others.
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
MySQL [(none)]> \s
--------------
mysql Ver 15.1 Distrib 5.5.28a-MariaDB, for Linux (i686) using readline 5.1
...
Server: MySQL
Server version: MaxScale 0.5.0 MariaDB Server
...
--------------
MySQL [(none)]>
|
||||
|
||||
# Bug Fixes
|
||||
|
||||
A number of bug fixes have been applied between the 0.4 alpha and this alpha release. The table below lists the bugs that have been resolved. The details for each of these may be found in bugs.skysql.com.
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td>ID</td>
|
||||
<td>Summary</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>141</td>
|
||||
<td>No "delete user" command in debugcli</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>175</td>
|
||||
<td>Buffer leak in dcb_read from Coverity run</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>178</td>
|
||||
<td>Uninitialised variables from Coverity run</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>179</td>
|
||||
<td>open with O_CREAT in second argument needs 3 arguments</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>363</td>
|
||||
<td>simple_mutex "name" memory handling ...</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>126</td>
|
||||
<td>"reload config" in debug interface causes maxscale server to segfault</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>149</td>
|
||||
<td>It is possible to delete all maxscale users</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>218</td>
|
||||
<td>there is no way to understand what is going on if MAXSCALE_HOME is incorrect</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>137</td>
|
||||
<td>"show users" and "reload users" refer to very different things in debugcli</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>154</td>
|
||||
<td>readwritesplit does not use router_options</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>160</td>
|
||||
<td>telnetd leaks memory</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>169</td>
|
||||
<td>Galera monitor is actually never compiled ....</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>172</td>
|
||||
<td>Several compile errors in galera_mon.c</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>174</td>
|
||||
<td>Resource leak in server.c</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>176</td>
|
||||
<td>Resource leak in gw_utils.c</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>362</td>
|
||||
<td>possible datadir_cleanup() problems ...</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>124</td>
|
||||
<td>readconnroute does not validate router_options</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>153</td>
|
||||
<td>MaxScale fails when max connections are exceeded</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>133</td>
|
||||
<td>MaxScale leaves lots of "data<pid>" directories sitting around $MAXSCALE_HOME</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>166</td>
|
||||
<td>readwritesplit causes MaxScale segfault when starting up</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>207</td>
|
||||
<td>Quitting telnet session causes maxscale to fail</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>161</td>
|
||||
<td>Memory leak in load_mysql_users.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>177</td>
|
||||
<td>Resource leak in secrets.c</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>182</td>
|
||||
<td>On Startup logfiles are empty</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>135</td>
|
||||
<td>MaxScale unsafely handles empty passwords in getUsers</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>145</td>
|
||||
<td>.secret file for encrypted passwords cyclicly searched</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>171</td>
|
||||
<td>ifndef logic in build_gateway.inc doesn't work, MARIADB_SRC_PATH from env not picked up</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>173</td>
|
||||
<td>Resource leak in adminusers.c found by Coverity</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>376</td>
|
||||
<td>Confusing Server Version</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>370</td>
|
||||
<td>maxscale binary returns zero exit status on failures</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>150</td>
|
||||
<td>telnetd listener should bind to 127.0.0.1 by default</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>152</td>
|
||||
<td>listener configuration should support bind address</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>373</td>
|
||||
<td>Documentation: it's not clear what privileges the maxscale user needs</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>128</td>
|
||||
<td>Maxscale prints debug information to terminal session when run in background</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>129</td>
|
||||
<td>MaxScale refuses to connect to server and reports nonsense error as a result</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>147</td>
|
||||
<td>Maxscale's hashtable fails to handle deletion of entries.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>148</td>
|
||||
<td>users data structure's stats have incorrect values.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>384</td>
|
||||
<td>MaxScale crashes if backend authentication fails</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>210</td>
|
||||
<td>Bad timing in freeing readconnrouter's dcbs cause maxscale crash</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>403</td>
|
||||
<td>gwbuf_free doesn't protect freeing shared buffer</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>371</td>
|
||||
<td>If router module load fails, MaxScale goes to inifinite loop</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>385</td>
|
||||
<td>MaxScale (DEBUG-version) dasserts if backend dcb is closed in the middle of client dcb performing close_dcb</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>386</td>
|
||||
<td>Starting MaxScale with -c pointing at existing file causes erroneous behavior</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>209</td>
|
||||
<td>Error in backend hangs client connection</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>194</td>
|
||||
<td>maxscale crashes at start if module load fails</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>369</td>
|
||||
<td>typo in "QUERY_TYPE_UNKNWON"</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>163</td>
|
||||
<td>MaxScale crashes with multiple threads</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>162</td>
|
||||
<td>threads parameter in configuration file is not effective</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>400</td>
|
||||
<td>hastable_get_stats returns value of uninitialized value in 'nelems'</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>212</td>
|
||||
<td>Failing write causes maxscale to fail</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>222</td>
|
||||
<td>Double freeing mutex corrupts log</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>208</td>
|
||||
<td>current_connection_count is decreased multiple times per session, thus breaking load balancing logic</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>378</td>
|
||||
<td>Misspelling maxscale section name in config file crashes maxscale</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>399</td>
|
||||
<td>Every row in log starts with 0x0A00</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>205</td>
|
||||
<td>MaxScale crashes due SEGFAULT because return value of dcb_read is not checked</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>220</td>
|
||||
<td>Maxscale crash if socket listening fails in startup</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>372</td>
|
||||
<td>Log manager hangs MaxScale if log string (mostly query length) exceeds block size</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>397</td>
|
||||
<td>Free of uninitialised pointer if MAXSCALE_HOME is not set</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>402</td>
|
||||
<td>gw_decode_mysql_server_handshake asserts with mysql 5.1 backend</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>345</td>
|
||||
<td>MaxScale don't find backend servers if they are started after MaxScale</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>406</td>
|
||||
<td>Memory leak in dcb_alloc()</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>360</td>
|
||||
<td>MaxScale passwd option</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>151</td>
|
||||
<td>Get parse_sql failed on array INSERT</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>216</td>
|
||||
<td>Backend error handling doesn't update server's connection counter</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>127</td>
|
||||
<td>MaxScale should handle out-of-date backend auth data more gracefully</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>146</td>
|
||||
<td>"show dbusers" argument not documented</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>125</td>
|
||||
<td>readconnroute causes maxscale server crash if no slaves are available</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>375</td>
|
||||
<td>Tarball contains UID and maxscale base dir</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
|
31
Documentation/Release-Notes/MaxScale-0.6-Release-Notes.md
Normal file
@ -0,0 +1,31 @@
|
||||
MaxScale Release Notes
|
||||
|
||||
0.6 Alpha
|
||||
|
||||
This document details the changes in version 0.6 since the release of the 0.5 alpha of the MaxScale product. The 0.6 version is merely a set of bug fixes based on the previous 0.5 version.
|
||||
|
||||
# Bug Fixes
|
||||
|
||||
A number of bug fixes have been applied between the 0.5 alpha and this alpha release. The table below lists the bugs that have been resolved. The details for each of these may be found in bugs.skysql.com.
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td>ID</td>
|
||||
<td>Summary</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>423</td>
|
||||
<td>The new "version_string" parameter has been added to service section.
|
||||
This allows a specific version string to be set for each service, this version string is used in the MySQL handshake from MaxScale to clients and is reported as the server version to clients.
|
||||
|
||||
The version_string is optional, the default value will be taken from the embedded MariaDB library which supplies the parser to MaxScale.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>418</td>
|
||||
<td>Statements are not routed to master if a transaction is started implicitly by setting autocommit=0. In such cases statements were previously routed as if they were not part of a transaction.
|
||||
|
||||
This fix changes the behavior so that is autocommit is disabled, all statements are routed to the master and in case of session variable updates, to both master and slave.</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
|
158
Documentation/Release-Notes/MaxScale-0.7-Release-Notes.md
Normal file
@ -0,0 +1,158 @@
|
||||
MaxScale Release Notes
|
||||
|
||||
0.7 Alpha
|
||||
|
||||
This document details the changes in version 0.7 since the release of the 0.6 alpha of the MaxScale product.
|
||||
|
||||
# New Features
|
||||
|
||||
## Galera Support
|
||||
|
||||
Enhanced support for Galera cluster to allow Galera to be used as a High Available Cluster with no write contention between the nodes..
|
||||
|
||||
MaxScale will control access to a Galera Cluster such that one node is designated as the master node to which all write operations will be sent. Read operations will be sent to any of the remaining nodes that are part of the cluster. Should the currently elected master node fail MaxScale will automatically promote one of the remaining nodes to become the new master node.
|
||||
|
||||
## Multiple Slave Connections
|
||||
|
||||
The Read/Write Split query router has been enhanced to allow multiple slaves connections to be created. The number of slave connections is configurable via a parameter in the MaxScale configuration file.
|
||||
|
||||
Adding multiple connections allows for better load balancing between the slaves and in a pre-requisite for providing improved fault tolerance within the Read/Write Splitter. The selection of which slave to use for a particular read operation can be controlled via options in the router configuration.
|
||||
|
||||
## Debug Interface Enhancements
|
||||
|
||||
A number of new list commands have been added to the debug interface to allow more concise tabular output of certain object types within the interface.
|
||||
|
||||
**MaxScale>** help list
|
||||
|
||||
Available options to the list command:
|
||||
|
||||
filters List all the filters defined within MaxScale
|
||||
|
||||
listeners List all the listeners defined within MaxScale
|
||||
|
||||
modules Show all currently loaded modules
|
||||
|
||||
services List all the services defined within MaxScale
|
||||
|
||||
servers List all the servers defined within MaxScale
|
||||
|
||||
sessions List all the active sessions within MaxScale
|
||||
|
||||
**MaxScale>**
|
||||
|
||||
Those objects that are defined in the configuration file can now be referenced by the names used in the configuration file rather than by using memory addresses. This means that services, servers, monitors and filters can all now be referenced using meaningful names provided by the user. Internal objects such as DCB’s and sessions, which are not named in the configuration file still require the use of memory addresses.
|
||||
|
||||
Two modes of operation of the interface are now available, user mode and developer mode. The user mode restricts access to the feature that allow arbitrary structures to be examined and checks all memory address for validity before allowing access.
|
||||
|
||||
## Maintenance Mode for Servers
|
||||
|
||||
MaxScale now provides a maintenance mode for servers, this mode allows servers to be set such that no new connections will be opened to that server. Also, servers in maintenance mode are not monitored by MaxScale. This allows an administrator to set a server into maintenance mode when it is required to be taken out of use. The connections will then diminish over time and since no new connections are created, the administrator can remove the node from use to perform some maintenance activities.
|
||||
|
||||
Nodes are placed into maintenance mode via the debug interface using the set server command.
|
||||
|
||||
**MaxScale>** set server datanode3 maintenance
|
||||
|
||||
Nodes are taken out of maintenance using the clear server command.
|
||||
|
||||
**MaxScale>** clear server datanode3 maintenance
|
||||
|
||||
## Configurable Monitoring Interval
|
||||
|
||||
All monitor plugins now provide a configuration parameter that can be set to control how frequently the MaxScale monitoring is performed.
|
||||
|
||||
## Replication Lag Heartbeat Monitor
|
||||
|
||||
The mysqlmon monitor module now implements a replication heartbeat protocol that is used to determine the lag between updates to the master and those updates being applied to the slave. This information is then made available to routing modules and may be used to determine if a particular slave node may be used or which slave node is most up to date.
|
||||
|
||||
## Filters API
|
||||
|
||||
The first phase of the filter API is available as part of this release. This provides filtering for the statements from the client application to the router. Filtering for the returned results has not yet been implemented and will be available in a future version.
|
||||
|
||||
Three example filters are including in the release
|
||||
|
||||
1. Statement counting Filter - a simple filter that counts the number of SQL statements executed within a session. Results may be viewed via the debug interface.
|
||||
|
||||
2. Query Logging Filter - a simple query logging filter that write all statements for a session into a log file for that session.
|
||||
|
||||
3. Query Rewrite Filter - an example of how filters can alter the query contents. This filter allows a regular expression to be defined, along with replacement text that should be substituted for every match of that regular expression.
|
||||
|
||||
## MariaDB 10 Replication Support
|
||||
|
||||
The myqlmon monitor module has been updated to support the new syntax for show all slaves status in MariaDB in order to correctly determine the master and slave state of each server being monitor. Determination of MariaDB 10 is automatically performed by the monitor and no configuration is required.
|
||||
|
||||
## API Versioning
|
||||
|
||||
The module interface has been enhanced to allow the API version in use to be reported, along with the status of the module and a short description of the module. The status allows for differentiation of the release status of a plugin to be identified independently of the core of MaxScale. plugins may be designated as "in development", “alpha”, “beta” or “GA”.
|
||||
|
||||
**MaxScale>** list modules
|
||||
|
||||
Module Name | Module Type | Version | API | Status
|
||||
|
||||
----------------------------------------------------------------
|
||||
|
||||
regexfilter | Filter | V1.0.0 | 1.0.0 | Alpha
|
||||
|
||||
MySQLBackend | Protocol | V2.0.0 | 1.0.0 | Alpha
|
||||
|
||||
telnetd | Protocol | V1.0.1 | 1.0.0 | Alpha
|
||||
|
||||
MySQLClient | Protocol | V1.0.0 | 1.0.0 | Alpha
|
||||
|
||||
mysqlmon | Monitor | V1.2.0 | 1.0.0 | Alpha
|
||||
|
||||
readwritesplit | Router | V1.0.2 | 1.0.0 | Alpha
|
||||
|
||||
readconnroute | Router | V1.0.2 | 1.0.0 | Alpha
|
||||
|
||||
debugcli | Router | V1.1.1 | 1.0.0 | Alpha
|
||||
|
||||
**MaxScale>**
|
||||
|
||||
# Bug Fixes
|
||||
|
||||
A number of bug fixes have been applied between the 0.6 alpha and this alpha release. The table below lists the bugs that have been resolved. The details for each of these may be found in bugs.skysql.com.
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td>ID</td>
|
||||
<td>Summary</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>443</td>
|
||||
<td>mysql/galera monitors hang when backend fails</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>424</td>
|
||||
<td>Read/Write Splitter closes connection without sending COM_QUIT</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>438</td>
|
||||
<td>Internal thread deadlock</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>436</td>
|
||||
<td>Sessions in invalid state</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>359</td>
|
||||
<td>Router options for Read/Write Split module</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>435</td>
|
||||
<td>Some automated tests have invalid SQL syntax</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>431</td>
|
||||
<td>rwsplit.sh test script has incorrect bash syntax</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>425</td>
|
||||
<td>MaxScale crashes after prolonged use</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
|
||||
# Linking
|
||||
|
||||
Following reported issues with incompatibilities between MaxScale and the shared library used by MySQL this version of MaxScale will be statically linked with the MariaDB 5.5 embedded library that it requires. This library is used for internal purposes only and does not result in MaxScale support for other versions of MySQL or MariaDB being affected.
|
||||
|
124
Documentation/Release-Notes/MaxScale-1.0-Release-Notes.md
Normal file
@ -0,0 +1,124 @@
|
||||
MaxScale Release Notes
|
||||
|
||||
1.0 Beta
|
||||
|
||||
This document details the changes in version 1.0 since the release of the 0.7 alpha of the MaxScale product.
|
||||
|
||||
# New Features
|
||||
|
||||
## Complex Replication Structures
|
||||
|
||||
The MaxScale monitor module for Master/Slave replication is now able to correctly identify tree structured replication environments and route write statements to the master server at the root level of the tree. Isolated database instances and now also correctly identified as external to the replication tree.
|
||||
|
||||
## Read/Write Splitter Enhancements
|
||||
|
||||
### Support For Prepared Statements
|
||||
|
||||
Prepared statements are now correctly recognised by MaxScale, with the prepare stage being sent to all the eligible servers that could eventually run the statement. Statements are then execute on a single server.
|
||||
|
||||
### Slave Failure Resilience
|
||||
|
||||
The Read/Write splitter can not be used to establish multiple connections to different slave servers. The read load will be distributed across these slaves and slave failure will be masked from the application as MaxScale will automatically failover to another slave when one fails.
|
||||
|
||||
### Configurable Load Balancing Options
|
||||
|
||||
It is now possible to configure the criteria that the Read/Write Splitter uses for load balancing, the options are:
|
||||
|
||||
* The total number of connections to the servers, from this MaxScale instance
|
||||
|
||||
* The number of connections to the server for this particular MaxScale service
|
||||
|
||||
* The number of statements currently being executed on the server on behalf of this MaxScale instance
|
||||
|
||||
* Route statements to the slave that has the least replication lag
|
||||
|
||||
### Replication Consistency
|
||||
|
||||
The Read/Write splitter may now be configured to exclude nodes that are currently showing a replication lag greater than a configurable threshold. The replication lag is measured using the MySQL Monitor module of MaxScale.
|
||||
|
||||
Alternatively it is possible to define that read operations should be routed to the slave that has the least measured replication lag.
|
||||
|
||||
## Weighted Routing Options
|
||||
|
||||
The distribution of connections and statement across the set of nodes can be controlled by attaching arbitrary parameters to the servers and then configuring the router to use that parameter value as a weighting factor when deciding which of the valid servers to which to connect or route queries.
|
||||
|
||||
Several parameters may be used on each host and different routers may choose to use different parameters as the weighting parameter for that router. The use of weighting is optional, if no weighting parameter is given in the service definition then all eligible servers will have an equal distribution applied.
|
||||
|
||||
Server weighting is supported by both the Read/Write Splitter and the connection router.
|
||||
|
||||
## MaxAdmin Client
|
||||
|
||||
A new administrative interface has been added that uses a MaxScale specific client application to interact with MaxScale to control and monitor the MaxScale activities. This client application may be used interactively or within scripts, passing commands to MaxScale via command line arguments. Command scripts are available, allowing command sets of commands to be stored in script files.
|
||||
|
||||
MaxAdmin also supports command history via libedit on those distributions that support the libedit library. This allows for the use of the up and down cursor keys or selection of previous commands and editing of lines using vi or emacs style editing commands.
|
||||
|
||||
## Pacemaker Support
|
||||
|
||||
MaxScale now ships with an init.d script that is compatible with the use of Pacemaker and Heartbeat to provide for a highly available implementation of MaxScale. A tutorial on setting up MaxScale under Pacemaker control is included in the Documentation directory.
|
||||
|
||||
## Filter API Enhancements
|
||||
|
||||
The filter API has now been enhanced to operate not just on downstream query filtering but also upstream result set filtering.
|
||||
|
||||
## Enhanced and New Filters
|
||||
|
||||
Addition of new filters and enhancements to those existing filters that appeared in 0.7 of MaxScale.
|
||||
|
||||
### Top Filter
|
||||
|
||||
A new filter to capture and log the longest running queries within a client session. The filter can be configured to capture a specific number of queries that take the longest time between the query being submitted to the database server and the first result being returned.
|
||||
|
||||
The queries captured can be defined using regular expressions to include and exclude queries that match these expressions. In addition the inclusion of a session may be based on the user name used to connect to the database or the source address of the client session.
|
||||
|
||||
### Tee Filter
|
||||
|
||||
A filter to optionally duplicate requests received from the client and send them to other services within MaxScale. This allows a single statement sent by a client to be routed to multiple storage backends via MaxScale.
|
||||
|
||||
The queries duplicated can be defined using regular expressions to include and exclude queries that match these expressions. In addition the inclusion of a session may be based on the user name used to connect to the database or the source client session.
|
||||
|
||||
### QLA and Regex Filter Improvements
|
||||
|
||||
These filters have been enhanced to provide for the inclusion of sessions by specifying the username used to connect to the database or the source of the client connection as a criteria to trigger the use of these filters for particular sessions connected to the MaxScale service.
|
||||
|
||||
# Bug Fixes
|
||||
|
||||
A number of bug fixes have been applied between the 0.6 alpha and this alpha release. The table below lists the bugs that have been resolved. The details for each of these may be found in bugs.skysql.com.
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td>ID</td>
|
||||
<td>Summary</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>441</td>
|
||||
<td>Possible failure to return a value in setipaddress</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>396</td>
|
||||
<td>Build instruction suggest forcing install of RPM’s</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>452</td>
|
||||
<td>Make install copies the modules to an incorrect directory</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>450</td>
|
||||
<td>Read/Write splitter does not balance load between multiple slaves</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>449</td>
|
||||
<td>The router clientReply function does not handle GWBUF structures correctly</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
|
||||
# Packaging
|
||||
|
||||
Both RPM and Debian packages are available for MaxScale in addition to the tar based releases previously distributed we now provide
|
||||
|
||||
* CentOS/RedHat 5 RPM
|
||||
|
||||
* CentOS/RedHat 6 RPM
|
||||
|
||||
* Ubuntu 14.04 package
|
||||
|
332
Documentation/Release-Notes/MaxScale-1.0.1-Release-Notes.md
Normal file
@ -0,0 +1,332 @@
|
||||
MaxScale Release Notes
|
||||
|
||||
1.0.1 Beta
|
||||
|
||||
This document details the changes in version 1.0.1 since the release of the 1.0 beta of the MaxScale product.
|
||||
|
||||
# New Features
|
||||
|
||||
## CMake build system
|
||||
|
||||
Building MaxScale is now easier than ever thanks to the introduction of CMake into the build process. Building with CMake removes the need to edit files, specify directory locations or change build flags, in all but the rarest of the cases, and building with non-standard configurations is a lot easier thanks to the easy configuration of all the build parameters.
|
||||
|
||||
Here’s a short list of the most common build parameters,their functions and default values.
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td>Variable</td>
|
||||
<td>Purpose</td>
|
||||
<td>Default value</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>INSTALL_DIR</td>
|
||||
<td>Root location of the MaxScale install</td>
|
||||
<td>/usr/local/skysql/maxscale</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>STATIC_EMBEDDED</td>
|
||||
<td>Whether to use the static or the dynamic version of the embedded library</td>
|
||||
<td>No</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>OLEVEL</td>
|
||||
<td>Level of optimization used when compiling</td>
|
||||
<td>No optimization</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>INSTALL_SYSTEM_FILES</td>
|
||||
<td>If startup scripts should be installed into /etc/init.d and ldconfig configuration files to /etc/ld.so.conf.d</td>
|
||||
<td>Yes</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>BUILD_TYPE</td>
|
||||
<td>The type of the build. ‘None’ for normal, ‘Debug’ for debugging and ‘Optimized’ for an optimized build.</td>
|
||||
<td>None</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
|
||||
Details on all the configurable parameters and instructions on how to use CMake can be found in the README file.
|
||||
|
||||
## Enhancements
|
||||
|
||||
The polling mechanism in MaxScale has been modified to overcome a flaw which mean that connections with a heavy I/O load could starve other connections within MaxScale and prevent query execution. This has been resolved with a more fairer event scheduling mechanism within the MaxScale polling subsystem. This has led to improve overall performance in high load situations.
|
||||
|
||||
# Bug Fixes
|
||||
|
||||
A number of bug fixes have been applied between the 1.0 beta release and this release candidate. The table below lists the bugs that have been resolved. The details for each of these may be found in bugs.skysql.com.
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td>ID</td>
|
||||
<td>Summary</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>462</td>
|
||||
<td>Testall target fails in server/test to invalid MAXSCALE_HOME path specification</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>467</td>
|
||||
<td>max_slave_replication lag is not effective after session creation</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>468</td>
|
||||
<td>query_classifier : if parsing fails, parse tree and thread context are freed but used</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>469</td>
|
||||
<td>rwsplit counts every connection twice in master - connection counts leak</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>466</td>
|
||||
<td>hint_next_token doesn't detect <param>=<value> pair if there are no spaces around '='</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>470</td>
|
||||
<td>Maxscale crashes after a normal query if a query with named hint was used before</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>473</td>
|
||||
<td>Entering a hint with route server target as '=(' causes a crash</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>472</td>
|
||||
<td>Using a named hint after its initial use causes a crash</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>471</td>
|
||||
<td>Routing Hints route to server sometimes doesn't work</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>463</td>
|
||||
<td>MaxScale hangs receiving more than 16K in input</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>476</td>
|
||||
<td>mysql_common.c:protocol_archive_srv_command leaks memory and accesses freed memory</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>479</td>
|
||||
<td>Undefined filter reference in MaxScale.cnf causes a crash</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>410</td>
|
||||
<td>MaxScale.cnf server option is not parsed for spaces</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>417</td>
|
||||
<td>Galera monitor freezes on network failure of a server</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>488</td>
|
||||
<td>SHOW VARIABLES randomly failing with "Lost connection to MySQL server"</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>484</td>
|
||||
<td>Hashtable does not always release write lock during add</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>485</td>
|
||||
<td>Hashtable not locked soon enough in iterator get next item</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>493</td>
|
||||
<td>Can have same section name multiple times without warning</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>510</td>
|
||||
<td>Embedded library crashes on a call to free_embedded_thd</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>511</td>
|
||||
<td>Format strings in log_manager.cc should be const char*</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>509</td>
|
||||
<td>rw-split sensitive to order of terms in field list of SELECT</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>507</td>
|
||||
<td>rw-split router does not send last_insert_id() to master</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>490</td>
|
||||
<td>session handling for non-determinstic user variables broken</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>489</td>
|
||||
<td>@@hostname and @@server_id treated differently from @@wsrep_node_address</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>528</td>
|
||||
<td>Wrong service name in tee filter crashes maxscale on connect</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>530</td>
|
||||
<td>MaxScale socket permission</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>536</td>
|
||||
<td>log_manager doesn't write buffers to disk in the order they are written</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>447</td>
|
||||
<td>Error log is flooded with same warning if there are no slaves present</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>475</td>
|
||||
<td>The end comment tag in hints isn't properly detected.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>181</td>
|
||||
<td>Missing log entry if server not reachable</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>486</td>
|
||||
<td>Hashtable problems when created with size less than one</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>516</td>
|
||||
<td>maxadmin CLI client sessions are not closed?</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>495</td>
|
||||
<td>Referring to a nonexisting server in servers=... doesn't even raise a warning</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>538</td>
|
||||
<td>maxscale should expose details of "Down" server</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>539</td>
|
||||
<td>MaxScale crashes in session_setup_filters</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>494</td>
|
||||
<td>The service 'CLI' is missing a definition of the servers that provide the service</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>180</td>
|
||||
<td>Documentation: No information found in the documentation about firewall settings</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>524</td>
|
||||
<td>Connecting to MaxScale from localhost tries matching @127.0.0.1 grant</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>481</td>
|
||||
<td>MySQL monitor doesn't set master server if the replication is broken</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>437</td>
|
||||
<td>Failure to detect MHA master switch</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>541</td>
|
||||
<td>Long queries cause MaxScale to block</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>492</td>
|
||||
<td>In dcb.c switch fallthrough appears to be used without comment</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>439</td>
|
||||
<td>Memory leak in getUsers</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>545</td>
|
||||
<td>RWSplit: session modification commands weren't routed to all if executed inside open transaction</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>543</td>
|
||||
<td>RWSplit router statistics counters are not updated correctly</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>544</td>
|
||||
<td>server with weight=0 gets one connection</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>525</td>
|
||||
<td>Crash when saving post in Wordpress</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>533</td>
|
||||
<td>Drupal installer hangs</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>497</td>
|
||||
<td>Can’t enable debug/trace logs in configuration file</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>430</td>
|
||||
<td>Temporary tables not working in MaxScale</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>527</td>
|
||||
<td>No signal handler for segfault etc</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>546</td>
|
||||
<td>Use of weightby router parameter causes error log write</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>506</td>
|
||||
<td>Don’t write shm/tmpfs by default without telling the user or giving a way to override it</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>552</td>
|
||||
<td>Long argument options to maxadmin and maxscale broke maxadmin commands</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>521</td>
|
||||
<td>Many commands in maxadmin client simply hang</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>478</td>
|
||||
<td>Parallel session command processing fails</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>499</td>
|
||||
<td>make clean leavessoem .o files behind</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>500</td>
|
||||
<td>"depend: no such file warnings during make</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>501</td>
|
||||
<td>log_manager, query classifier rebuilds unconditionally</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>502</td>
|
||||
<td>log_manager and query_classifier builds always rebuild utils</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>504</td>
|
||||
<td>clean rule for Documentation directory in wrong makefile</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>505</td>
|
||||
<td>utils/makefile builds stuff unconditionally, misses "depend" target</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>548</td>
|
||||
<td>MaxScale accesses freed client DCB and crashes</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>550</td>
|
||||
<td>modutil functions process length incorrectly</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
|
||||
# Packaging
|
||||
|
||||
Both RPM and Debian packages are available for MaxScale in addition to the tar based releases previously distributed we now provide
|
||||
|
||||
* CentOS/RedHat 5 RPM
|
||||
|
||||
* CentOS/RedHat 6 RPM
|
||||
|
||||
* Ubuntu 14.04 package
|
||||
|
136
Documentation/Release-Notes/MaxScale-1.0.3-Release-Notes.md
Normal file
@ -0,0 +1,136 @@
|
||||
MaxScale Release Notes
|
||||
|
||||
1.0.3 GA
|
||||
|
||||
This document details the changes in version 1.0.3 since the release of the 1.0.2 Release Candidate of the MaxScale product.
|
||||
|
||||
# New Features
|
||||
|
||||
No new features have been introduced since the released candidate was released.
|
||||
|
||||
# Bug Fixes
|
||||
|
||||
A number of bug fixes have been applied between the 0.6 alpha and this alpha release. The table below lists the bugs that have been resolved. The details for each of these may be found in bugs.mariadb.com.
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td>ID</td>
|
||||
<td>Summary</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>644</td>
|
||||
<td>Buffered that were cloned using the gwbuf_clone routine failed to initialise the buffer lock structure correctly.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>643</td>
|
||||
<td>Recursive filter definitions in the configuration file could cause MaxScale to loop</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>665</td>
|
||||
<td>An access to memory that had already been freed could be made within the MaxScale core</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>664</td>
|
||||
<td>MySQL Authentication code could access memory that had already been freed.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>673</td>
|
||||
<td>MaxScale could crash if it had an empty user table and the MaxAdmin show dbusers command was run</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>670</td>
|
||||
<td>The tee filter could lose statement on the branch service if the branch service was significantly slower at executing statements compared with the main service.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>653</td>
|
||||
<td>Memory corruption could occur with extremely long hostnames in the mysql.user table.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>657</td>
|
||||
<td>If the branch service of a tee filter shutdown unexpectedly then MaxScale could fail</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>654</td>
|
||||
<td>Missing quotes in MaxAdmin show dbusers command could cause MaxAdmin to crash</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>677</td>
|
||||
<td>A race condition existed in the tee filter client reply handling</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>658</td>
|
||||
<td>The readconnroute router did not correctly close sessions when a backend database failed</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>662</td>
|
||||
<td>MaxScale startup hangs if no backend servers respond</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>676</td>
|
||||
<td>MaxScale writes a log entry, "Write to backend failed. Session closed." when changing default database via readwritesplit with max_slave_connections != 100%</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>650</td>
|
||||
<td>Tee filter does not correctly detect missing branch service</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>645</td>
|
||||
<td>Tee filter can hang MaxScale if the read/write splitter is used</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>678</td>
|
||||
<td>Tee filter does not always send full query to branch service</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>679</td>
|
||||
<td>A shared pointer in the service was leading to misleading service states</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>680</td>
|
||||
<td>The Read/Write Splitter can not load users if there are no databases available at startup</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>681</td>
|
||||
<td>The Read/Write Splitter could crash is the value of max_slave_connections was set to a low percentage and only a small number of backend servers are available</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
|
||||
# Known Issues
|
||||
|
||||
There are a number bugs and known limitations within this version of MaxScale, the most serious of this are listed below.
|
||||
|
||||
* The SQL construct "LOAD DATA LOCAL INFILE" is not fully supported.
|
||||
|
||||
* The Read/Write Splitter is a little too strict when it receives errors from slave servers during execution of session commands. This can result in sessions being terminated in situation in which MaxScale could recover without terminating the sessions.
|
||||
|
||||
* MaxScale can not manage authentication that uses wildcard matching in hostnames in the mysql.user table of the backend database. The only wildcards that can be used are in IP address entries.
|
||||
|
||||
* When users have different passwords based on the host from which they connect MaxScale is unable to determine which password it should use to connect to the backend database. This results in failed connections and unusable usernames in MaxScale.
|
||||
|
||||
# Packaging
|
||||
|
||||
Both RPM and Debian packages are available for MaxScale in addition to the tar based releases previously distributed we now provide
|
||||
|
||||
* CentOS/RedHat 5
|
||||
|
||||
* CentOS/RedHat 6
|
||||
|
||||
* CentOS/RedHat 7
|
||||
|
||||
* Debian 6
|
||||
|
||||
* Debian 7
|
||||
|
||||
* Ubuntu 12.04 LTS
|
||||
|
||||
* Ubuntu 13.10
|
||||
|
||||
* Ubuntu 14.04 LTS
|
||||
|
||||
* Fedora 19
|
||||
|
||||
* Fedora 20
|
||||
|
||||
* OpenSuSE 13
|
||||
|
209
Documentation/Tutorials/Administration-Tutorial.md
Normal file
@ -0,0 +1,209 @@
|
||||
# MaxScale Administration Tutorial
|
||||
|
||||
## Common Administration Tasks
|
||||
|
||||
The purpose of this tutorial is to introduce the MaxScale Administrator to a few of the common administration tasks that need to be performed with MaxScale. It is not intended as a reference to all the tasks that may be performed, more this is aimed as an introduction for administrators who are new to MaxScale.
|
||||
|
||||
[Starting MaxScale](#starting)
|
||||
[Stopping MaxScale](#stopping)
|
||||
[Checking The Status Of The MaxScale Services](#checking)
|
||||
[What Clients Are Connected To MaxScale](#clients)
|
||||
[Rotating Log Files](#rotating)
|
||||
[Taking A Database Server Out Of Use](#outofuse)
|
||||
|
||||
<a name="starting"></a>
|
||||
### Starting MaxScale
|
||||
|
||||
There are several ways to start MaxScale, the most convenient mechanism is probably using the Linux service interface. When a MaxScale package is installed the package manager will also installed a script in /etc/init.d which may be used to start and stop MaxScale either directly or via the service interface.
|
||||
|
||||
$ service maxscale start
|
||||
|
||||
or
|
||||
|
||||
$ /etc/init.d/maxscale start
|
||||
|
||||
It is also possible to start MaxScale by executing the maxscale command itself, in this case you must ensure that the environment is correctly setup or command line options are passed. The major elements to consider are the correct setting of the MAXSCALE\_HOME directory and to ensure that LD\_LIBRARY\_PATH. The LD\_LIBRARY\_PATH should include the lib directory that was installed as part of the MaxScale installation, the MAXSCALE\_HOME should point to /usr/local/skysql/maxscale if a default installation has been created or to the directory this was relocated to. Running the executable $MAXSCALE\_HOME/bin/maxscale will result in MaxScale running as a daemon process, unattached to the terminal in which it was started and using configuration files that it finds in the $MAXSCALE\_HOME directory.
|
||||
|
||||
Options may be passed to the MaxScale binary that alter this default behaviour, this options are documented in the table below.
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td>Switch</td>
|
||||
<td>Long Option</td>
|
||||
<td>Description</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>-d</td>
|
||||
<td>--nodaemon</td>
|
||||
<td>Run MaxScale attached to the terminal rather than as a daemon process. This is useful for debugging purposes.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>-c</td>
|
||||
<td>--homedir=</td>
|
||||
<td>Ignore the environment variable MAXSCALE_HOME and use the supplied argument instead.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>-f</td>
|
||||
<td>--config=</td>
|
||||
<td>Use the filename passed as an argument instead of looking in $MAXSCALE_HOME/etc/MaxScale.cnf</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>-l<file>|<shm></td>
|
||||
<td>--log=</td>
|
||||
<td>Control where logs are written for the debug and trace level log messages. the default is to write these to a shared memory device, however using the -lfile or --log=file option will forced these to be written to regular files.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>-v</td>
|
||||
<td>--version</td>
|
||||
<td>Print version information for MaxScale</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>-?</td>
|
||||
<td>--help</td>
|
||||
<td>Print usage information for MaxScale</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
<a name="stopping"></a>
|
||||
### Stopping MaxScale
|
||||
|
||||
There are numerous ways in which MaxScale can be stopped; using the service interface, killing the process or by use of the maxadmin utility.
|
||||
|
||||
Stopping MaxScale with the service interface is simply a case of using the service stop command or calling the init.d script with the stop argument.
|
||||
|
||||
$ service maxscale stop
|
||||
|
||||
or
|
||||
|
||||
$ /etc/init.d/maxscale stop
|
||||
|
||||
MaxScale will also stop gracefully if it received a hangup signal, to find the process id of the MaxScale server use the ps command or read the contents of the maxscale.pid file located in the same directory as the logs.
|
||||
|
||||
$ kill -HUP `cat $MAXSCALE_HOME/log/maxscale.pid`
|
||||
|
||||
In order to shutdown MaxScale using the maxadmin command you may either connect with maxadmin in interactive mode or pass the "shutdown maxscale" command you wish to execute as an argument to maxadmin.
|
||||
|
||||
$ maxadmin -pskysql shutdown maxscale
|
||||
|
||||
<a name="checking"></a>
|
||||
### Checking The Status Of The MaxScale Services
|
||||
|
||||
It is possible to use the maxadmin command to obtain statistics regarding the services that are configured within your MaxScale configuration file. The maxadmin command "list services" will give very basic information regarding the services that are define. This command may be either run in interactive mode or passed on the maxadmin command line.
|
||||
|
||||
$ maxadmin -pskysql
|
||||
MaxScale> list services
|
||||
|
||||
Services.
|
||||
|
||||
--------------------------+----------------------+--------+---------------
|
||||
|
||||
Service Name | Router Module | #Users | Total Sessions
|
||||
|
||||
--------------------------+----------------------+--------+---------------
|
||||
|
||||
RWSplitter | readwritesplit | 2 | 4
|
||||
|
||||
Cassandra | readconncouter | 1 | 1
|
||||
|
||||
CLI | cli | 2 | 2
|
||||
|
||||
--------------------------+----------------------+--------+---------------
|
||||
|
||||
MaxScale>
|
||||
|
||||
It should be noted that network listeners count as a user of the service, therefore there will always be one user per network port in which the service listens. More detail can be obtained by use of the "show service" command which is passed a service name.
|
||||
|
||||
<a name="clients"></a>
|
||||
### What Clients Are Connected To MaxScale
|
||||
|
||||
To determine what client are currently connected to MaxScale you can use the "list clients" command within maxadmin. This will give you IP address and the ID’s of the DCB and session for that connection. As with any maxadmin command this can be passed on the command line or typed interactively in maxadmin.
|
||||
|
||||
$ maxadmin -pskysql list clients
|
||||
|
||||
Client Connections
|
||||
|
||||
-----------------+------------------+----------------------+------------
|
||||
|
||||
Client | DCB | Service | Session
|
||||
|
||||
-----------------+------------------+----------------------+------------
|
||||
|
||||
127.0.0.1 | 0x7fe694013410 | CLI | 0x7fe69401ac10
|
||||
|
||||
-----------------+------------------+----------------------+------------
|
||||
|
||||
$
|
||||
|
||||
<a name="rotating"></a>
|
||||
### Rotating Log Files
|
||||
|
||||
MaxScale write log data into four log files with varying degrees of detail. With the exception of the error log, which can not be disabled, these log files may be enabled and disabled via the maxadmin interface or in the configuration file. The default behaviour of MaxScale is to grow the log files indefinitely, the administrator must take action to prevent this.
|
||||
|
||||
It is possible to rotate either a single log file or all the log files with a single command. When the logfile is rotated, the current log file is closed and a new log file, with an increased sequence number in its name, is created. Log file rotation is achieved by use of the "flush log" or “flush logs” command in maxadmin.
|
||||
|
||||
$ maxadmin -pskysql flush logs
|
||||
|
||||
Flushes all of the logs, whereas an individual log may be flushed with the "flush log" command.
|
||||
|
||||
$ maxadmin -pskysql
|
||||
MaxScale> flush log error
|
||||
MaxScale> flush log trace
|
||||
MaxScale>
|
||||
|
||||
This may be integrated into the Linux logrotate mechanism by adding a configuration file to the /etc/logrotate.d directory. If we assume we want to rotate the log files once per month and wish to keep 5 log files worth of history, the configuration file would look like the following.
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td>/usr/local/skysql/maxscale/log/*.log {
|
||||
monthly
|
||||
rotate 5
|
||||
missingok
|
||||
nocompress
|
||||
sharedscripts
|
||||
postrotate
|
||||
\# run if maxscale is running
|
||||
if test -n "`ps acx|grep maxscale`"; then
|
||||
/usr/local/skysql/maxscale/bin/maxadmin -pskysql flush logs
|
||||
fi
|
||||
endscript
|
||||
}</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
|
||||
One disadvantage with this is that the password used for the maxadmin command has to be embedded in the log rotate configuration file. MaxScale will also rotate all of its log files if it receives the USR1 signal. Using this the logrotate configuration script can be rewritten as
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td>/usr/local/skysql/maxscale/log/*.log {
|
||||
monthly
|
||||
rotate 5
|
||||
missingok
|
||||
nocompress
|
||||
sharedscripts
|
||||
postrotate
|
||||
kill -USR1 `cat /usr/local/skysql/maxscale/log/maxscale.pid`
|
||||
endscript
|
||||
}</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
<a name="outofuse"></a>
|
||||
### Taking A Database Server Out Of Use
|
||||
|
||||
MaxScale supports the concept of maintenance mode for servers within a cluster, this allows for planned, temporary removal of a database from the cluster within the need to change the MaxScale configuration.
|
||||
|
||||
To achieve the removal of a database server you can use the set server command in the maxadmin utility to set the maintenance mode flag for the server. This may be done interactively within maxadmin or by passing the command on the command line.
|
||||
|
||||
MaxScale> set server dbserver3 maintenance
|
||||
MaxScale>
|
||||
|
||||
This will cause MaxScale to stop routing any new requests to the server, however if there are currently requests executing on the server these will not be interrupted.
|
||||
|
||||
To bring the server back into service use the "clear server" command to clear the maintenance mode bit for that server.
|
||||
|
||||
MaxScale> clear server dbserver3 maintenance
|
||||
MaxScale>
|
||||
|
||||
Note that maintenance mode is not persistent, if MaxScale restarts when a node is in maintenance mode a new instance of MaxScale will not honour this mode. If multiple MaxScale instances are configured to use the node them maintenance mode must be set within each MaxScale instance. However if multiple services within one MaxScale instance are using the server then you only need set the maintenance mode once on the server for all services to take note of the mode change.
|
||||
|
178
Documentation/Tutorials/Filter-Tutorial.md
Normal file
@ -0,0 +1,178 @@
|
||||
# Filters
|
||||
|
||||
## What Are Filters?
|
||||
|
||||
The filter mechanism in MaxScale is a means by which processing can be inserted into the flow of requests and responses between the client connection to MaxScale and the MaxScale connection to the backend database servers. The path from the client side of MaxScale out to the actual database servers can be considered a pipeline, filters can then be placed in that pipeline to monitor, modify, copy or block the content that flows through that pipeline.
|
||||
|
||||
## Types Of Filter
|
||||
|
||||
Filters can be divided into a number of categories
|
||||
|
||||
### Logging filters
|
||||
|
||||
Logging filters do not in any way alter the statement or results of the statements that are passed through MaxScale. They merely log some information about some or all of the statements and/or result sets.
|
||||
|
||||
Two examples of logging filters are contained within the MaxScale GA, a filter that will log all statements and another that will log only a number of statements, based on the duration of the execution of the query.
|
||||
|
||||
### Statement rewriting filters
|
||||
|
||||
Statement rewriting filters modify the statements that are passed through the filter. This allows a filter to be used as a mechanism to alter the statements that are seen by the database, an example of the use of this might be to allow an application to remain unchanged when the underlying database changes or to compensate for the migration from one database schema to another.
|
||||
|
||||
The MaxScale GA includes a filter that can modify statements by use of regular expressions to match statements and replaced that matched text.
|
||||
|
||||
### Result set manipulation filters
|
||||
|
||||
A result set manipulation filter is very similar to a statement rewriting but applies to the result set returned rather than the statement executed. An example of this may be obfuscating the values in a column.
|
||||
|
||||
The MaxScale 1.0 GA release does not contain any result set manipulation filters.
|
||||
|
||||
### Routing hint filters
|
||||
|
||||
Routing hint filters are filters that embed hints in the request that can be used by the router onto which the query is passed. These hints include suggested destinations as well as metric that may be used by the routing process.
|
||||
|
||||
The MaxScale 1.0 GA release does not contain any hint filters.
|
||||
|
||||
### Firewall filters
|
||||
|
||||
A firewall filter is a mechanism that allows queries to be blocked within MaxScale before they are sent on to the database server for execution. They allow constructs or individual queries to be intercepted and give a level of access control that is more flexible than the traditional database grant mechanism.
|
||||
|
||||
The 1.0 GA release of MaxScale does not include any firewall filters.
|
||||
|
||||
### Pipeline control filters
|
||||
|
||||
A pipeline filter is one that has an affect on how the requests are routed within the internal MaxScale components. The most obvious version of this is the ability to add a "tee" connector in the pipeline, duplicating the request and sending it to a second MaxScale service for processing.
|
||||
|
||||
The MaxScale 1.0 GA release contains an implementation of a tee filter that allows statements to be matched using a regular expression and passed to a second service within MaxScale.
|
||||
|
||||
## Filter Definition
|
||||
|
||||
Filters are defined in the configuration file, MaxScale.ini, using a section for each filter instance. The content of the filter sections in the configuration file various from filter to filter, however there are always to entries present for every filter, the type and module.
|
||||
|
||||
[MyFilter]
|
||||
type=filter
|
||||
module=xxxfilter
|
||||
|
||||
The type is used by the configuration manager within MaxScale to determine what this section is defining and the module is the name of the plugin that implements the filter.
|
||||
|
||||
When a filter is used within a service in MaxScale the entry filters= is added to the service definition in the ini file section for the service. Multiple filters can be defined using a syntax akin to the Linux shell pipe syntax.
|
||||
|
||||
[Split Service]
|
||||
type=service
|
||||
router=readwritesplit
|
||||
servers=dbserver1,dbserver2,dbserver3,dbserver4
|
||||
user=massi
|
||||
passwd=6628C50E07CCE1F0392EDEEB9D1203F3
|
||||
filters=hints | top10
|
||||
|
||||
The names used in the filters= parameter are the names of the filter definition sections in the ini file. The same filter definition can be used in multiple services and the same filter module can have multiple instances, each with its own section in the ini file.
|
||||
|
||||
## Filter Examples
|
||||
|
||||
The filters that are bundled with the MaxScale 1.0 GA release are documented separately, in this section a short overview of how these might be used for some simple tasks will be discussed. These are just examples of how these filters might be used, other filters may also be easily added that will enhance the MaxScale functionality still further.
|
||||
|
||||
### Log The 30 Longest Running Queries
|
||||
|
||||
The top filter can be used to measure the execution time of every statement within a connection and log the details of the longest running statements.
|
||||
|
||||
The first thing to do is to define a filter entry in the ini file for the top filter. In this case we will call it "top30". The type is filter and the module that implements the filter is called topfilter.
|
||||
|
||||
[top30]
|
||||
type=filter
|
||||
module=topfilter
|
||||
count=30
|
||||
filebase=/var/log/DBSessions/top30
|
||||
|
||||
In the definition above we have defined two filter specific parameters, the count of the number of statement to be logged and a filebase that is used to define where to log the information. This filename is a stem to which a session id is added for each new connection that uses the filter.
|
||||
|
||||
The filter keeps track of every statement that is executed, monitors the time it takes for a response to come back and uses this as the measure of execution time for the statement. If the time is longer than the other statements that have been recorded, then this is added to the ordered list within the filter. Once 30 statements have been recorded those statements that have been recorded with the least time are discarded from the list. The result is that at any time the filter has a list of the 30 longest running statements in each session.
|
||||
|
||||
It is possible to see what is in the current list by using the maxadmin tool to view the state of the filter by looking at the session data. First you need to find the session id for the session of interest, this can be done using commands such as list sessions. You can then use the show session command to see the details for a particular session.
|
||||
|
||||
MaxScale> show session 0x736680
|
||||
|
||||
Session 0x736680
|
||||
State: Session ready for routing
|
||||
Service: Split Service (0x719f60)
|
||||
Client DCB: 0x7361a0
|
||||
Client Address: 127.0.0.1
|
||||
Connected: Thu Jun 26 10:10:44 2014
|
||||
|
||||
Filter: top30
|
||||
Report size 30
|
||||
Logging to file /var/log/DBSessions/top30.1.
|
||||
Current Top 30:
|
||||
|
||||
1 place:
|
||||
Execution time: 23.826 seconds
|
||||
SQL: select sum(salary), year(from_date) from salaries s, (select distinct year(from_date) as y1 from salaries) y where (makedate(y.y1, 1) between s.from_date and s.to_date) group by y.y1 ("1988-08-01?
|
||||
|
||||
2 place:
|
||||
Execution time: 5.251 seconds
|
||||
SQL: select d.dept_name as "Department", y.y1 as "Year", count(*) as "Count" from departments d, dept_emp de, (select distinct year(from_date) as y1 from dept_emp order by 1) y where d.dept_no = de.dept_no and (makedate(y.y1, 1) between de.from_date and de.to_date) group by y.y1, d.dept_name order by 1, 2
|
||||
|
||||
3 place:
|
||||
Execution time: 2.903 seconds
|
||||
SQL: select year(now()) - year(birth_date) as age, gender, avg(salary) as "Average Salary" from employees e, salaries s where e.emp_no = s.emp_no and ("1988-08-01" between from_date AND to_date) group by year(now()) - year(birth_date), gender order by 1,2
|
||||
|
||||
...
|
||||
|
||||
When the session ends a report will be written for the session into the logfile defined. That report will include the top 30 longest running statements, plus summary data for the session;
|
||||
|
||||
* The time the connection was opened.
|
||||
|
||||
* The host the connection was from.
|
||||
|
||||
* The username used in the connection.
|
||||
|
||||
* The duration of the connection.
|
||||
|
||||
* The total number of statements executed in the connection.
|
||||
|
||||
* The average execution time for a statement in this connection.
|
||||
|
||||
### Duplicate Data From Your Application Into Cassandra
|
||||
|
||||
The scenario we are using in this example is one in which you have an online gaming application that is designed to work with a MariaDB/MySQL database. The database schema includes a high score table which you would like to have access to in a Cassandra cluster. The application is already using MaxScale to connect to a MariaDB Galera cluster, using a service names BubbleGame. The definition of that service is as follows
|
||||
|
||||
[BubbleGame]
|
||||
type=service
|
||||
router=readwritesplit
|
||||
servers=dbbubble1,dbbubble2,dbbubble3,dbbubble4,dbbubble5
|
||||
user=maxscale
|
||||
passwd=6628C50E07CCE1F0392EDEEB9D1203F3
|
||||
|
||||
The table you wish to store in Cassandra in called HighScore and will contain the same columns in both the MariaDB table and the Cassandra table. The first step is to install a MariaDB instance with the Cassandra storage engine to act as a bridge server between the relational database and Cassandra. In this bridge server add a table definition for the HighScore table with the engine type set to cassandra. Add this server into the MaxScale configuration and create a service that will connect to this server.
|
||||
|
||||
[CassandraDB]
|
||||
type=server
|
||||
address=192.168.4.28
|
||||
port=3306
|
||||
protocol=MySQLBackend
|
||||
[Cassandra]
|
||||
type=service
|
||||
router=readconnrouter
|
||||
router_options=running
|
||||
servers=CassandraDB
|
||||
user=maxscale
|
||||
passwd=6628C50E07CCE1F0392EDEEB9D1203F3
|
||||
|
||||
Next add a filter definition for the tee filter that will duplication insert statements that are destined for the HighScore table to this new service.
|
||||
|
||||
[HighScores]
|
||||
type=filter
|
||||
module=teefilter
|
||||
match=insert.*HighScore.*values
|
||||
service=Cassandra
|
||||
|
||||
The above filter definition will cause all statements that match the regular expression inset.*HighScore.*values to be duplication and sent not just to the original destination, via the router but also to the service named Cassandra.
|
||||
|
||||
The final step is to add the filter to the BubbleGame service to enable the use of the filter.
|
||||
|
||||
[BubbleGame]
|
||||
type=service
|
||||
router=readwritesplit
|
||||
servers=dbbubble1,dbbubble2,dbbubble3,dbbubble4,dbbubble5
|
||||
user=maxscale
|
||||
passwd=6628C50E07CCE1F0392EDEEB9D1203F3
|
||||
filters=HighScores
|
||||
|
@ -0,0 +1,216 @@
|
||||
# Connection Routing with Galera Cluster
|
||||
|
||||
## Environment & Solution Space
|
||||
|
||||
This document is designed as a quick introduction to setting up MaxScale in an environment in which you have a Galera Cluster within which you wish to balance connection across all the database nodes of the cluster that are active members of cluster.
|
||||
|
||||
The process of setting and configuring MaxScale will be covered within this document. However the installation and configuration of the Galera Cluster will not be covered.
|
||||
|
||||
This tutorial will assume the user is running from one of the binary distributions available and has installed this in the default location. Building from source code in GitHub is covered in guides elsewhere as is installing to non-default locations.
|
||||
|
||||
## Process
|
||||
|
||||
The steps involved in creating a system from the binary distribution of MaxScale are:
|
||||
|
||||
* Install the package relevant to your distribution
|
||||
|
||||
* Create the required users in your MariaDB or MySQL Galera cluster
|
||||
|
||||
* Create a MaxScale configuration file
|
||||
|
||||
### Installation
|
||||
|
||||
The precise installation process will vary from one distribution to another details of what to do with the RPM and DEB packages can be found on the download site when you select the distribution you are downloading from. The process involves setting up your package manager to include the MariaDB repositories and then running the package manager for your distribution, RPM or apt-get.
|
||||
|
||||
Upon successful completion of the installation command you will have MaxScale installed and ready to be run but without a configuration. You must create a configuration file before you first run MaxScale.
|
||||
|
||||
### Creating Database Users
|
||||
|
||||
MaxScale needs to connect to the backend databases and run queries for two reasons; one to determine the current state of the database and the other to retrieve the user information for the database cluster. This may be done either using two separate usernames or with a single user.
|
||||
|
||||
The first user required must be able to select data from the table mysql.user, to create this user follow the steps below.
|
||||
|
||||
1. Connect to one of the nodes in your Galera cluster as the root user
|
||||
|
||||
2. Create the user, substituting the username, password and host on which maxscale runs within your environment
|
||||
|
||||
MariaDB [(none)]> create user '*username*'@'*maxscalehost*' identified by '*password*';
|
||||
|
||||
**Query OK, 0 rows affected (0.00 sec)**
|
||||
|
||||
3. Grant select privileges on the mysql.user table
|
||||
|
||||
MariaDB [(none)]> grant SELECT on mysql.user to '*username*'@'*maxscalehost*';
|
||||
|
||||
**Query OK, 0 rows affected (0.03 sec)**
|
||||
|
||||
Additionally, GRANT SELECT on the mysql.db table and SHOW DATABASES privileges are required in order to load databases name and grants suitable for database name authorization.
|
||||
|
||||
MariaDB [(none)]> GRANT SELECT ON mysql.db TO 'username'@'maxscalehost';
|
||||
|
||||
**Query OK, 0 rows affected (0.00 sec)**
|
||||
|
||||
MariaDB [(none)]> GRANT SHOW DATABASES ON *.* TO 'username'@'maxscalehost';
|
||||
|
||||
**Query OK, 0 rows affected (0.00 sec)**
|
||||
|
||||
The second user is used to monitored the state of the cluster. This user, which may be the same username as the first, requires permissions to access the various sources of monitoring data within the information schema. No special permission need to be granted to the user in order to query the information schema.
|
||||
|
||||
If you wish to use two different usernames for the two different roles of monitoring and collecting user information then create a different username using the first two steps from above.
|
||||
|
||||
### Creating Your MaxScale Configuration
|
||||
|
||||
MaxScale configuration is held in an ini file that is located in the file MaxScale.cnf in the directory $MAXSCALE_HOME/etc, if you have installed in the default location then this file is available in /usr/local/skysql/maxscale/etc/MaxScale.cnf. This is not created as part of the installation process and must be manually created. A template file does exist within this directory that may be use as a basis for your configuration.
|
||||
|
||||
A global, maxscale, section is included within every MaxScale configuration file; this is used to set the values of various MaxScale wide parameters, perhaps the most important of these is the number of threads that MaxScale will use to execute the code that forwards requests and handles responses for clients.
|
||||
|
||||
[maxscale]
|
||||
threads=4
|
||||
|
||||
Since we are using Galera Cluster and connection routing we want a single to which the client application can connect; MaxScale will then route connections to this port onwards to the various nodes within the Galera Cluster. To achieve this within MaxScale we need to define a service in the ini file. Create a section for each in your MaxScale.ini file and set the type to service, the section name is the names of the service and should be meaningful to the administrator. Names may contain whitespace.
|
||||
|
||||
[Galera Service]
|
||||
type=service
|
||||
|
||||
The router for this section the readconnroute module, also the service should be provided with the list of servers that will be part of the cluster. The server names given here are actually the names of server sections in the configuration file and not the physical hostnames or addresses of the servers.
|
||||
|
||||
[Galera Service]
|
||||
type=service
|
||||
router=readconnroute
|
||||
servers=dbserv1, dbserv2, dbserv3
|
||||
|
||||
In order to instruct the router to which servers it should route we must add router options to the service. The router options are compared to the status that the monitor collects from the servers and used to restrict the eligible set of servers to which that service may route. In our case we use the option that restricts us to servers that are fully functional members of the Galera cluster which are able to support SQL operations on the cluster. To achieve this we use the router option synced.
|
||||
|
||||
[Galera Service]
|
||||
type=service
|
||||
router=readconnroute
|
||||
router_options=synced
|
||||
servers=dbserv1, dbserv2, dbserv3
|
||||
|
||||
The final step in the service section is to add the username and password that will be used to populate the user data from the database cluster. There are two options for representing the password, either plain text or encrypted passwords may be used. In order to use encrypted passwords a set of keys must be generated that will be used by the encryption and decryption process. To generate the keys use the maxkeys command and pass the name of the secrets file in which the keys are stored.
|
||||
|
||||
% maxkeys /usr/local/skysql/maxscale/etc/.secrets
|
||||
%
|
||||
|
||||
Once the keys have been created the maxpasswd command can be used to generate the encrypted password.
|
||||
|
||||
% maxpasswd plainpassword
|
||||
96F99AA1315BDC3604B006F427DD9484
|
||||
%
|
||||
|
||||
The username and password, either encrypted or plain text, are stored in the service section using the user and passwd parameters.
|
||||
|
||||
[Galera Service]
|
||||
type=service
|
||||
router=readconnroute
|
||||
router_options=synced
|
||||
servers=dbserv1, dbserv2, dbserv3
|
||||
user=maxscale
|
||||
passwd=96F99AA1315BDC3604B006F427DD9484
|
||||
|
||||
This completes the definitions required by the service, however listening ports must be associated with a service in order to allow network connections. This is done by creating a series of listener sections. These sections again are named for the convenience of the administrator and should be of type listener with an entry labelled service which contains the name of the service to associate the listener with. Each service may have multiple listeners.
|
||||
|
||||
[Galera Listener]
|
||||
type=listener
|
||||
service=Galera Service
|
||||
|
||||
A listener must also define the protocol module it will use for the incoming network protocol, currently this should be the MySQLClient protocol for all database listeners. The listener may then supply a network port to listen on and/or a socket within the file system.
|
||||
|
||||
[Galera Listener]
|
||||
type=listener
|
||||
service=Galera Service
|
||||
protocol=MySQLClient
|
||||
port=4306
|
||||
socket=/tmp/DB.Cluster
|
||||
|
||||
An address parameter may be given if the listener is required to bind to a particular network address when using hosts with multiple network addresses. The default behaviour is to listen on all network interfaces.
|
||||
|
||||
The next stage is the configuration is to define the server information. This defines how to connect to each of the servers within the cluster, again a section is created for each server, with the type set to server, the network address and port to connect to and the protocol to use to connect to the server. Currently the protocol for all database connections in MySQLBackend.
|
||||
|
||||
[dbserv1]
|
||||
type=server
|
||||
address=192.168.2.1
|
||||
port=3306
|
||||
protocol=MySQLBackend
|
||||
[dbserv2]
|
||||
type=server
|
||||
address=192.168.2.2
|
||||
port=3306
|
||||
protocol=MySQLBackend
|
||||
[dbserv3]
|
||||
type=server
|
||||
address=192.168.2.3
|
||||
port=3306
|
||||
protocol=MySQLBackend
|
||||
|
||||
In order for MaxScale to monitor the servers using the correct monitoring mechanisms a section should be provided that defines the monitor to use and the servers to monitor. Once again a section is created with a symbolic name for the monitor, with the type set to monitor. Parameters are added for the module to use, the list of servers to monitor and the username and password to use when connecting to the the servers with the monitor.
|
||||
|
||||
[Galera Monitor]
|
||||
type=monitor
|
||||
module=galeramon
|
||||
servers=dbserv1, dbserv2, dbserv3
|
||||
user=maxscale
|
||||
passwd=96F99AA1315BDC3604B006F427DD9484
|
||||
|
||||
As with the password definition in the server either plain text or encrypted passwords may be used.
|
||||
|
||||
The final stage in the configuration is to add the option service which is used by the maxadmin command to connect to MaxScale for monitoring and administration purposes. This creates a service section and a listener section.
|
||||
|
||||
[CLI]
|
||||
type=service
|
||||
router=cli
|
||||
[CLI Listener]
|
||||
type=listener
|
||||
service=CLI
|
||||
protocol=maxscaled
|
||||
address=localhost
|
||||
port=6603
|
||||
|
||||
In the case of the example above it should be noted that an address parameter has been given to the listener, this limits connections to maxadmin commands that are executed on the same machine that hosts MaxScale.
|
||||
|
||||
## Starting MaxScale
|
||||
|
||||
Upon completion of the configuration process MaxScale is ready to be started for the first time. This may either be done manually by running the maxscale command or via the service interface.
|
||||
|
||||
% maxscale
|
||||
|
||||
or
|
||||
|
||||
% service maxscale start
|
||||
|
||||
Check the error log in /usr/local/skysql/maxscale/log to see if any errors are detected in the configuration file and to confirm MaxScale has been started. Also the maxadmin command may be used to confirm that MaxScale is running and the services, listeners etc have been correctly configured.
|
||||
|
||||
% maxadmin -pskysql list services
|
||||
|
||||
Services.
|
||||
--------------------------+----------------------+--------+---------------
|
||||
Service Name | Router Module | #Users | Total Sessions
|
||||
--------------------------+----------------------+--------+---------------
|
||||
Galera Service | readconnroute | 1 | 1
|
||||
CLI | cli | 2 | 2
|
||||
--------------------------+----------------------+--------+---------------
|
||||
% maxadmin -pskysql list servers
|
||||
Servers.
|
||||
-------------------+-----------------+-------+-------------+-------------------
|
||||
Server | Address | Port | Connections | Status
|
||||
-------------------+-----------------+-------+-------------+--------------------
|
||||
dbserv1 | 192.168.2.1 | 3306 | 0 | Running, Synced, Master
|
||||
dbserv2 | 192.168.2.2 | 3306 | 0 | Running, Synced, Slave
|
||||
dbserv3 | 192.168.2.3 | 3306 | 0 | Running, Synced, Slave
|
||||
-------------------+-----------------+-------+-------------+--------------------
|
||||
|
||||
A Galera Cluster is a multi-master clustering technology, however the monitor is able to impose false notions of master and slave roles within a Galera Cluster in order to facilitate the use of Galera as if it were a standard MySQL Replication setup. This is merely an internal MaxScale convenience and has no impact on the behaviour of the cluster.
|
||||
|
||||
% maxadmin -pskysql list listeners
|
||||
|
||||
Listeners.
|
||||
---------------------+--------------------+-----------------+-------+--------
|
||||
Service Name | Protocol Module | Address | Port | State
|
||||
---------------------+--------------------+-----------------+-------+--------
|
||||
Galera Service | MySQLClient | * | 4306 | Running
|
||||
CLI | maxscaled | localhost | 6603 | Running
|
||||
---------------------+--------------------+-----------------+-------+--------
|
||||
%
|
||||
|
||||
MaxScale is now ready to start accepting client connections and routing them to the master or slaves within your cluster. Other configuration options are available that can alter the criteria used for routing, such as using weights to obtain unequal balancing operations. These options may be found in the MaxScale Configuration Guide. More detail on the use of maxadmin can be found in the document ["MaxAdmin - The MaxScale Administration & Monitoring Client Application"](../Reference/MaxAdmin.md).
|
||||
|
@ -0,0 +1,222 @@
|
||||
# Read/Write Splitting with Galera Cluster
|
||||
|
||||
## Environment & Solution Space
|
||||
|
||||
This document is designed as a quick introduction to setting up MaxScale in an environment in which you have a Galera Cluster which you wish to use as a single database node for update and one or more read only nodes. The object of this tutorial is to have a system that appears to the clients of MaxScale as if there is a single database behind MaxScale. MaxScale will split the statements such that write statements will be sent to only one server in the cluster and read statements will be balanced across the remainder of the servers.
|
||||
|
||||
The reason for a configuration like this, with all the updates being directed to a single node within what is a multi-master cluster, is to prevent any possible conflict between updates that may run on multiple nodes. Galera is built to provide the mechanism for this situation, however issues have been known to occur when conflicting transactions are committed on multiple nodes. Some applications are unable to deal with the resulting errors that may be created in this situation.
|
||||
|
||||
The process of setting and configuring MaxScale will be covered within this document. However the installation and configuration of the Galera Cluster will not be covered in this tutorial.
|
||||
|
||||
This tutorial will assume the user is running from one of the binary distributions available and has installed this in the default location. Building from source code in GitHub is covered in guides elsewhere as is installing to non-default locations.
|
||||
|
||||
## Process
|
||||
|
||||
The steps involved in creating a system from the binary distribution of MaxScale are:
|
||||
|
||||
* Install the package relevant to your distribution
|
||||
|
||||
* Create the required users in your Galera Cluster
|
||||
|
||||
* Create a MaxScale configuration file
|
||||
|
||||
### Installation
|
||||
|
||||
The precise installation process will vary from one distribution to another details of what to do with the RPM and DEB packages can be found on the download site when you select the distribution you are downloading from. The process involves setting up your package manager to include the MariaDB repositories and then running the package manager for your distribution, RPM or apt-get.
|
||||
|
||||
Upon successful completion of the installation command you will have MaxScale installed and ready to be run but without a configuration. You must create a configuration file before you first run MaxScale.
|
||||
|
||||
### Creating Database Users
|
||||
|
||||
MaxScale needs to connect to the backend databases and run queries for two reasons; one to determine the current state of the database and the other to retrieve the user information for the database cluster. This may be done either using two separate usernames or with a single user.
|
||||
|
||||
The first user required must be able to select data from the table mysql.user, to create this user follow the steps below.
|
||||
|
||||
1. Connect to Galera Cluster as the root user
|
||||
|
||||
2. Create the user, substituting the username, password and host on which maxscale runs within your environment
|
||||
|
||||
MariaDB [(none)]> create user '*username*'@'*maxscalehost*' identified by '*password*';
|
||||
|
||||
**Query OK, 0 rows affected (0.00 sec)**
|
||||
|
||||
3. Grant select privileges on the mysql.user table.
|
||||
|
||||
MariaDB [(none)]> grant SELECT on mysql.user to '*username*'@'*maxscalehost*';
|
||||
|
||||
**Query OK, 0 rows affected (0.03 sec)**
|
||||
|
||||
Additionally, GRANT SELECT on the mysql.db table and SHOW DATABASES privileges are required in order to load databases name and grants suitable for database name authorization.
|
||||
|
||||
MariaDB [(none)]> GRANT SELECT ON mysql.db TO 'username'@'maxscalehost';
|
||||
|
||||
**Query OK, 0 rows affected (0.00 sec)**
|
||||
|
||||
MariaDB [(none)]> GRANT SHOW DATABASES ON *.* TO 'username'@'maxscalehost';
|
||||
|
||||
**Query OK, 0 rows affected (0.00 sec)**
|
||||
|
||||
The second user is used to monitored the state of the cluster. This user, which may be the same username as the first, requires permissions to access the various sources of monitoring data within the information schema. No special permission need to be granted to the user in order to query the information schema.
|
||||
|
||||
If you wish to use two different usernames for the two different roles of monitoring and collecting user information then create a different username using the first two steps from above.
|
||||
|
||||
### Creating Your MaxScale Configuration
|
||||
|
||||
MaxScale configuration is held in an ini file that is located in the file MaxScale.cnf in the directory $MAXSCALE_HOME/etc, if you have installed in the default location then this file is available in /usr/local/skysql/maxscale/etc/MaxScale.cnf. This is not created as part of the installation process and must be manually created. A template file does exist within this directory that may be use as a basis for your configuration.
|
||||
|
||||
A global, maxscale, section is included within every MaxScale configuration file; this is used to set the values of various MaxScale wide parameters, perhaps the most important of these is the number of threads that MaxScale will use to execute the code that forwards requests and handles responses for clients.
|
||||
|
||||
[maxscale]
|
||||
threads=4
|
||||
|
||||
The first step is to create a service for our Read/Write Splitter. Create a section in your MaxScale.ini file and set the type to service, the section names are the names of the services themselves and should be meaningful to the administrator. Names may contain whitespace.
|
||||
|
||||
[Splitter Service]
|
||||
type=service
|
||||
|
||||
The router for we need to use for this configuration is the readwritesplit module, also the services should be provided with the list of servers that will be part of the cluster. The server names given here are actually the names of server sections in the configuration file and not the physical hostnames or addresses of the servers.
|
||||
|
||||
[Splitter Service]
|
||||
type=service
|
||||
router=readwritesplit
|
||||
servers=dbserv1, dbserv2, dbserv3
|
||||
|
||||
The final step in the service sections is to add the username and password that will be used to populate the user data from the database cluster. There are two options for representing the password, either plain text or encrypted passwords may be used. In order to use encrypted passwords a set of keys must be generated that will be used by the encryption and decryption process. To generate the keys use the maxkeys command and pass the name of the secrets file in which the keys are stored.
|
||||
|
||||
% maxkeys /usr/local/skysql/maxscale/etc/.secrets
|
||||
%
|
||||
|
||||
Once the keys have been created the maxpasswd command can be used to generate the encrypted password.
|
||||
|
||||
% maxpasswd plainpassword
|
||||
96F99AA1315BDC3604B006F427DD9484
|
||||
%
|
||||
|
||||
The username and password, either encrypted or plain text, are stored in the service section using the user and passwd parameters.
|
||||
|
||||
[Splitter Service]
|
||||
type=service
|
||||
router=readwritesplit
|
||||
servers=dbserv1, dbserv2, dbserv3
|
||||
user=maxscale
|
||||
passwd=96F99AA1315BDC3604B006F427DD9484
|
||||
|
||||
This completes the definitions required by the service, however listening ports must be associated with the service in order to allow network connections. This is done by creating a series of listener sections. This section again is named for the convenience of the administrator and should be of type listener with an entry labelled service which contains the name of the service to associate the listener with. A service may have multiple listeners.
|
||||
|
||||
[Splitter Listener]
|
||||
type=listener
|
||||
service=Splitter Service
|
||||
|
||||
A listener must also define the protocol module it will use for the incoming network protocol, currently this should be the MySQLClient protocol for all database listeners. The listener may then supply a network port to listen on and/or a socket within the file system.
|
||||
|
||||
[Splitter Listener]
|
||||
type=listener
|
||||
service=Splitter Service
|
||||
protocol=MySQLClient
|
||||
port=3306
|
||||
socket=/tmp/ClusterMaster
|
||||
|
||||
An address parameter may be given if the listener is required to bind to a particular network address when using hosts with multiple network addresses. The default behaviour is to listen on all network interfaces.
|
||||
|
||||
The next stage is the configuration is to define the server information. This defines how to connect to each of the servers within the cluster, again a section is created for each server, with the type set to server, the network address and port to connect to and the protocol to use to connect to the server. Currently the protocol module for all database connections in MySQLBackend.
|
||||
|
||||
[dbserv1]
|
||||
type=server
|
||||
address=192.168.2.1
|
||||
port=3306
|
||||
protocol=MySQLBackend
|
||||
[dbserv2]
|
||||
type=server
|
||||
address=192.168.2.2
|
||||
port=3306
|
||||
protocol=MySQLBackend
|
||||
[dbserv3]
|
||||
type=server
|
||||
address=192.168.2.3
|
||||
port=3306
|
||||
protocol=MySQLBackend
|
||||
|
||||
In order for MaxScale to monitor the servers using the correct monitoring mechanisms a section should be provided that defines the monitor to use and the servers to monitor. Once again a section is created with a symbolic name for the monitor, with the type set to monitor. Parameters are added for the module to use, the list of servers to monitor and the username and password to use when connecting to the the servers with the monitor.
|
||||
|
||||
[Galera Monitor]
|
||||
type=monitor
|
||||
module=galeramon
|
||||
servers=dbserv1, dbserv2, dbserv3
|
||||
user=maxscale
|
||||
passwd=96F99AA1315BDC3604B006F427DD9484
|
||||
|
||||
As with the password definition in the server either plain text or encrypted passwords may be used.
|
||||
|
||||
This monitor module will assign one node within the Galera Cluster as the current master and other nodes as slave. Only those nodes that are active members of the cluster are considered when making the choice of master node. Normally the master node will be the node with the lowest value of the status variable, WSREP_LOCAL_INDEX. When cluster membership changes a new master may be elected. In order to prevent changes of the node that is currently master, a parameter can be added to the monitor that will result in the current master remaining as master even if a node with a lower value of WSREP_LOCAL_INDEX joins the cluster. This parameter is called disable_master_failback.
|
||||
|
||||
[Galera Monitor]
|
||||
type=monitor
|
||||
module=galeramon
|
||||
diable_master_failback=1
|
||||
servers=dbserv1, dbserv2, dbserv3
|
||||
user=maxscale
|
||||
passwd=96F99AA1315BDC3604B006F427DD9484
|
||||
|
||||
Using this option the master node will only change if there is a problem with the current master and never because other nodes have joined the cluster.
|
||||
|
||||
The final stage in the configuration is to add the option service which is used by the maxadmin command to connect to MaxScale for monitoring and administration purposes. This creates a service section and a listener section.
|
||||
|
||||
[CLI]
|
||||
type=service
|
||||
router=cli
|
||||
[CLI Listener]
|
||||
type=listener
|
||||
service=CLI
|
||||
protocol=maxscaled
|
||||
address=localhost
|
||||
port=6603
|
||||
|
||||
In the case of the example above it should be noted that an address parameter has been given to the listener, this limits connections to maxadmin commands that are executed on the same machine that hosts MaxScale.
|
||||
|
||||
## Starting MaxScale
|
||||
|
||||
Upon completion of the configuration process MaxScale is ready to be started for the first time. This may either be done manually by running the maxscale command or via the service interface.
|
||||
|
||||
% maxscale
|
||||
|
||||
or
|
||||
|
||||
% service maxscale start
|
||||
|
||||
Check the error log in /usr/local/skysql/maxscale/log to see if any errors are detected in the configuration file and to confirm MaxScale has been started. Also the maxadmin command may be used to confirm that MaxScale is running and the services, listeners etc have been correctly configured.
|
||||
|
||||
% maxadmin -pskysql list services
|
||||
|
||||
Services.
|
||||
--------------------------+----------------------+--------+---------------
|
||||
Service Name | Router Module | #Users | Total Sessions
|
||||
--------------------------+----------------------+--------+---------------
|
||||
Splitter Service | readwritesplit | 1 | 1
|
||||
CLI | cli | 2 | 2
|
||||
--------------------------+----------------------+--------+---------------
|
||||
|
||||
% maxadmin -pskysql list servers
|
||||
Servers.
|
||||
-------------------+-----------------+-------+-------------+--------------------
|
||||
Server | Address | Port | Connections | Status
|
||||
-------------------+-----------------+-------+-------------+--------------------
|
||||
dbserv1 | 192.168.2.1 | 3306 | 0 | Running, Synced, Master
|
||||
dbserv2 | 192.168.2.2 | 3306 | 0 | Running, Synced, Slave
|
||||
dbserv3 | 192.168.2.3 | 3306 | 0 | Running, Synced, Slave
|
||||
-------------------+-----------------+-------+-------------+--------------------
|
||||
|
||||
A Galera Cluster is a multi-master clustering technology, however the monitor is able to impose false notions of master and slave roles within a Galera Cluster in order to facilitate the use of Galera as if it were a standard MySQL Replication setup. This is merely an internal MaxScale convenience and has no impact on the behaviour of the cluster but does allow the monitor to create these pseudo roles which are utilised by the Read/Write Splitter.
|
||||
|
||||
% maxadmin -pskysql list listeners
|
||||
|
||||
Listeners.
|
||||
---------------------+--------------------+-----------------+-------+--------
|
||||
Service Name | Protocol Module | Address | Port | State
|
||||
---------------------+--------------------+-----------------+-------+--------
|
||||
Splitter Service | MySQLClient | * | 3306 | Running
|
||||
CLI | maxscaled | localhost | 6603 | Running
|
||||
---------------------+--------------------+-----------------+-------+--------
|
||||
%
|
||||
|
||||
MaxScale is now ready to start accepting client connections and routing them to the master or slaves within your cluster. Other configuration options are available that can alter the criteria used for routing, these include monitoring the replication lag within the cluster and routing only to slaves that are within a predetermined delay from the current master or using weights to obtain unequal balancing operations. These options may be found in the MaxScale Configuration Guide. More detail on the use of maxadmin can be found in the document "MaxAdmin - The MaxScale Administration & Monitoring Client Application".
|
||||
|
342
Documentation/Tutorials/MySQL-Cluster-Setup.md
Normal file
@ -0,0 +1,342 @@
|
||||
# MySQL Cluster setup and MaxScale configuration
|
||||
|
||||
Massimiliano Pinto
|
||||
|
||||
Last Updated: 1st August 2014
|
||||
|
||||
## Contents
|
||||
|
||||
## Document History
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td>Date</td>
|
||||
<td>Change</td>
|
||||
<td>Who</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>31st July 2014</td>
|
||||
<td>Initial version</td>
|
||||
<td>Massimiliano Pinto</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
|
||||
## Overview
|
||||
|
||||
The document covers the MySQL Cluster 7.2.17 setup and MaxScale configuration in order to load balancing the SQL nodes acces.
|
||||
|
||||
## MySQL Cluster setup
|
||||
|
||||
The MySQL Cluster 7.2.17 setup is based on two virtual servers with Linux Centos 6.5
|
||||
|
||||
- server1:
|
||||
|
||||
NDB Manager process
|
||||
|
||||
SQL data node1
|
||||
|
||||
MySQL 5.5.38 as SQL node1
|
||||
|
||||
- server2:
|
||||
|
||||
SQL data node2
|
||||
|
||||
MySQL 5.5.38 as SQL node2
|
||||
|
||||
Cluster configuration file is /var/lib/mysql-cluster/config.ini, copied on all servers
|
||||
|
||||
[ndbd default]
|
||||
NoOfReplicas=2
|
||||
DataMemory=60M
|
||||
IndexMemory=16M
|
||||
|
||||
[ndb_mgmd]
|
||||
hostname=178.62.38.199
|
||||
id=21
|
||||
datadir=/var/lib/mysql-cluster
|
||||
|
||||
[mysqld]
|
||||
hostname=178.62.38.199
|
||||
|
||||
[mysqld]
|
||||
hostname=162.243.90.81
|
||||
|
||||
[ndbd]
|
||||
hostname=178.62.38.199
|
||||
|
||||
[ndbd]
|
||||
hostname=162.243.90.81
|
||||
|
||||
Note, it’s possible to specify all node ids and datadir as well for each cluster component
|
||||
|
||||
Example:
|
||||
|
||||
[ndbd]
|
||||
hostname=162.243.90.81
|
||||
id=43
|
||||
datadir=/usr/local/mysql/data
|
||||
|
||||
and /etc/my.cnf, copied as well in all servers
|
||||
|
||||
[mysqld]
|
||||
ndbcluster
|
||||
ndb-connectstring=178.62.38.199
|
||||
innodb_buffer_pool_size=16M
|
||||
|
||||
[mysql_cluster]
|
||||
ndb-connectstring=178.62.38.199
|
||||
|
||||
## Startup of MySQL Cluster
|
||||
|
||||
Each cluster node process must be started separately, and on the host where it resides. The management node should be started first, followed by the data nodes, and then finally by any SQL nodes:
|
||||
|
||||
- On the management host, server1, issue the following command from the system shell to start the management node process:
|
||||
|
||||
[root@server1 ~]# ndb_mgmd -f /var/lib/mysql-cluster/config.ini
|
||||
|
||||
- On each of the data node hosts, run this command to start the ndbd process:
|
||||
|
||||
[root@server1 ~]# ndbd —-initial -—initial-start
|
||||
|
||||
[root@server2 ~]# ndbd —-initial -—initial-start
|
||||
|
||||
- On each SQL node start the MySQL server process:
|
||||
|
||||
[root@server1 ~]# /etc/init.d/mysql start
|
||||
|
||||
[root@server2 ~]# /etc/init.d/mysql start
|
||||
|
||||
## Check the cluster status
|
||||
|
||||
If all has gone well, and the cluster has been set up correctly, the cluster should now be operational.
|
||||
|
||||
It’s possible to test this by invoking the ndb_mgm management node client.
|
||||
|
||||
The output should look like that shown here, although you might see some slight differences in the output depending upon the exact version of MySQL that you are using:
|
||||
|
||||
[root@server1 ~]# ndb_mgm
|
||||
|
||||
-- NDB Cluster -- Management Client --
|
||||
|
||||
ndb_mgm> show
|
||||
|
||||
Connected to Management Server at: 178.62.38.199:1186
|
||||
|
||||
Cluster Configuration
|
||||
|
||||
---------------------
|
||||
|
||||
[ndbd(NDB)] 2 node(s)
|
||||
|
||||
id=24 @178.62.38.199 (mysql-5.5.38 ndb-7.2.17, Nodegroup: 0, *)
|
||||
|
||||
id=25 @162.243.90.81 (mysql-5.5.38 ndb-7.2.17, Nodegroup: 0)
|
||||
|
||||
[ndb_mgmd(MGM)] 1 node(s)
|
||||
|
||||
id=21 @178.62.38.199 (mysql-5.5.38 ndb-7.2.17)
|
||||
|
||||
[mysqld(API)] 2 node(s)
|
||||
|
||||
id=22 @178.62.38.199 (mysql-5.5.38 ndb-7.2.17)
|
||||
|
||||
id=23 @162.243.90.81 (mysql-5.5.38 ndb-7.2.17)
|
||||
|
||||
ndb_mgm>
|
||||
|
||||
The SQL node is referenced here as [mysqld(API)], which reflects the fact that the mysqld process is acting as a MySQL Cluster API node.
|
||||
|
||||
## Working with NDBCLUSTER engine in MySQL
|
||||
|
||||
- First create a table with NDBCLUSTER engine:
|
||||
|
||||
[root@server1 ~]# mysql
|
||||
|
||||
mysql> CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL ) ENGINE=NDBCLUSTER;
|
||||
|
||||
Query OK, 0 rows affected (3.28 sec)
|
||||
|
||||
mysql> show create table t1;
|
||||
|
||||
+------- +-------------------------------------------------------------------------------------------+
|
||||
| Table | Create Table |
|
||||
|
||||
+-------+-------------------------------------------------------------------------------------------+
|
||||
|
||||
| t1 | CREATE TABLE `t1` (
|
||||
`a` int(11) DEFAULT NULL
|
||||
|
||||
) ENGINE=ndbcluster DEFAULT CHARSET=latin1 |
|
||||
|
||||
+-------+-------------------------------------------------------------------------------------------+
|
||||
|
||||
1 row in set (0.01 sec)
|
||||
|
||||
- Just add a row in the table:
|
||||
|
||||
mysql> insert into test.t1 values(11);
|
||||
|
||||
Query OK, 1 row affected (0.15 sec)
|
||||
|
||||
- Select the current number of rows:
|
||||
|
||||
mysql> select count(1) from t1;
|
||||
|
||||
+----------+
|
||||
| count(1) |
|
||||
+----------+
|
||||
| 1 |
|
||||
+----------+
|
||||
|
||||
1 row in set (0.07 sec)
|
||||
|
||||
- The same from the MySQL client pointing to SQL node on server2
|
||||
|
||||
[root@server2 ~]# mysql
|
||||
|
||||
mysql> select count(1) from test.t1;
|
||||
|
||||
+----------+
|
||||
| count(1) |
|
||||
+----------+
|
||||
| 1 |
|
||||
+----------+
|
||||
|
||||
1 row in set (0.08 sec)
|
||||
|
||||
## Configuring MaxScale for connection load balancing of SQL nodes
|
||||
|
||||
Add these sections in MaxScale.cnf config file:
|
||||
|
||||
[Cluster Service]
|
||||
type=service
|
||||
router=readconnroute
|
||||
router_options=ndb
|
||||
servers=server1,server2
|
||||
user=test
|
||||
passwd=test
|
||||
version_string=5.5.37-CLUSTER
|
||||
|
||||
[Cluster Listener]
|
||||
type=listener
|
||||
service=Cluster Service
|
||||
protocol=MySQLClient
|
||||
port=4906
|
||||
|
||||
[NDB Cluster Monitor]
|
||||
type=monitor
|
||||
module=ndbclustermon
|
||||
servers=server1,server2
|
||||
user=monitor
|
||||
passwd=monitor
|
||||
monitor_interval=8000
|
||||
|
||||
[server1]
|
||||
|
||||
#SQL node1
|
||||
type=server
|
||||
address=127.0.0.1
|
||||
port=3306
|
||||
protocol=MySQLBackend
|
||||
|
||||
[server2]
|
||||
#SQL node2
|
||||
type=server
|
||||
address=162.243.90.81
|
||||
port=3306
|
||||
protocol=MySQLBackend
|
||||
|
||||
Assuming MaxScale is installed in server1, start it
|
||||
|
||||
[root@server1 ~]# cd /usr/local/skysql/maxscale/bin
|
||||
|
||||
[root@server1 bin]# ./maxscale -c ../
|
||||
|
||||
Using the debug interface it’s possible to check the status of monitored servers
|
||||
|
||||
MaxScale> show monitors
|
||||
|
||||
Monitor: 0x387b880
|
||||
|
||||
Name: NDB Cluster Monitor
|
||||
Monitor running
|
||||
Sampling interval: 8000 milliseconds
|
||||
Monitored servers: 127.0.0.1:3306, 162.243.90.81:3306
|
||||
|
||||
MaxScale> show servers
|
||||
|
||||
Server 0x3873b40 (server1)
|
||||
|
||||
Server: 127.0.0.1
|
||||
Status: NDB, Running
|
||||
Protocol: MySQLBackend
|
||||
Port: 3306
|
||||
Server Version: 5.5.38-ndb-7.2.17-cluster-gpl
|
||||
Node Id: 22
|
||||
Master Id: -1
|
||||
Repl Depth: 0
|
||||
Number of connections: 0
|
||||
Current no. of conns: 0
|
||||
Current no. of operations: 0
|
||||
|
||||
Server 0x3873a40 (server2)
|
||||
|
||||
Server: 162.243.90.81
|
||||
Status: NDB, Running
|
||||
Protocol: MySQLBackend
|
||||
Port: 3306
|
||||
Server Version: 5.5.38-ndb-7.2.17-cluster-gpl
|
||||
Node Id: 23
|
||||
Master Id: -1
|
||||
Repl Depth: 0
|
||||
Number of connections: 0
|
||||
Current no. of conns: 0
|
||||
Current no. of operations: 0
|
||||
|
||||
It’s now possible to run basic tests with the read connection load balancing for the two configured SQL nodes
|
||||
|
||||
(1) test MaxScale load balancing requesting the Ndb_cluster_node_id variable:
|
||||
|
||||
[root@server1 ~]# mysql -h 127.0.0.1 -P 4906 -u test -ptest -e "SHOW STATUS LIKE 'Ndb_cluster_node_id'"
|
||||
|
||||
+---------------------+-------+
|
||||
| Variable_name | Value |
|
||||
+---------------------+-------+
|
||||
| Ndb_cluster_node_id | 23 |
|
||||
+---------------------+-------+
|
||||
|
||||
[root@server1 ~]# mysql -h 127.0.0.1 -P 4906 -u test -ptest -e "SHOW STATUS LIKE 'Ndb_cluster_node_id'"
|
||||
|
||||
+---------------------+-------+
|
||||
| Variable_name | Value |
|
||||
+---------------------+-------+
|
||||
| Ndb_cluster_node_id | 22 |
|
||||
+---------------------+-------+
|
||||
|
||||
The MaxScale connection load balancing is working.
|
||||
|
||||
(2) test a select statement on an NBDBCLUSTER table, database test and table t1 created before:
|
||||
|
||||
[root@server1 ~] mysql -h 127.0.0.1 -P 4906 -utest -ptest -e "SELECT COUNT(1) FROM test.t1"
|
||||
|
||||
+----------+
|
||||
| COUNT(1) |
|
||||
+----------+
|
||||
| 1 |
|
||||
+----------+
|
||||
|
||||
(3) test an insert statement
|
||||
|
||||
mysql -h 127.0.0.1 -P 4906 -utest -ptest -e "INSERT INTO test.t1 VALUES (19)"
|
||||
|
||||
(4) test again the select and check the number of rows
|
||||
|
||||
[root@server1 ~] mysql -h 127.0.0.1 -P 4906 -utest -ptest -e "SELECT COUNT(1) FROM test.t1"
|
||||
|
||||
+----------+
|
||||
| COUNT(1) |
|
||||
+----------+
|
||||
| 2 |
|
||||
+----------+
|
||||
|
@ -0,0 +1,354 @@
|
||||
Getting Started With MariaDB MaxScale
|
||||
|
||||
Connection Routing with MySQL Replication
|
||||
|
||||
# Environment & Solution Space
|
||||
|
||||
This document is designed as a quick introduction to setting up MaxScale in an environment in which you have a MySQL Replication Cluster with one master and multiple slave servers. The object of this tutorial is to have a system that has two ports available, one for write connections to the database cluster and the other for read connections to the database.
|
||||
|
||||
The process of setting and configuring MaxScale will be covered within this document. However the installation and configuration of the MySQL Replication subsystem will not be covered nor will any discussion of installation management tools to handle automated or semi-automated failover of the replication cluster.
|
||||
|
||||
This tutorial will assume the user is running from one of the binary distributions available and has installed this in the default location. Building from source code in GitHub is covered in guides elsewhere as is installing to non-default locations.
|
||||
|
||||
# Process
|
||||
|
||||
The steps involved in creating a system from the binary distribution of MaxScale are:
|
||||
|
||||
* Install the package relevant to your distribution
|
||||
|
||||
* Create the required users in your MariaDB or MySQL Replication cluster
|
||||
|
||||
* Create a MaxScale configuration file
|
||||
|
||||
## Installation
|
||||
|
||||
The precise installation process will vary from one distribution to another details of what to do with the RPM and DEB packages can be found on the download site when you select the distribution you are downloading from. The process involves setting up your package manager to include the MariaDB repositories and then running the package manager for your distribution, RPM or apt-get.
|
||||
|
||||
Upon successful completion of the installation command you will have MaxScale installed and ready to be run but without a configuration. You must create a configuration file before you first run MaxScale.
|
||||
|
||||
## Creating Database Users
|
||||
|
||||
MaxScale needs to connect to the backend databases and run queries for two reasons; one to determine the current state of the database and the other to retrieve the user information for the database cluster. This may be done either using two separate usernames or with a single user.
|
||||
|
||||
The first user required must be able to select data from the table mysql.user, to create this user follow the steps below.
|
||||
|
||||
1. Connect to the current master server in your replication tree as the root user
|
||||
|
||||
2. Create the user, substituting the username, password and host on which maxscale runs within your environment
|
||||
|
||||
MariaDB [(none)]> create user '*username*'@'*maxscalehost*' identified by '*password*';
|
||||
|
||||
**Query OK, 0 rows affected (0.00 sec)**
|
||||
|
||||
3. Grant select privileges on the mysql.user table
|
||||
|
||||
MariaDB [(none)]> grant SELECT on mysql.user to '*username*'@'*maxscalehost*';
|
||||
|
||||
**Query OK, 0 rows affected (0.03 sec)**
|
||||
|
||||
Additionally, GRANT SELECT on the mysql.db table and SHOW DATABASES privileges are required in order to load databases name and grants suitable for database name authorization.
|
||||
|
||||
MariaDB [(none)]> GRANT SELECT ON mysql.db TO 'username'@'maxscalehost';
|
||||
|
||||
**Query OK, 0 rows affected (0.00 sec)**
|
||||
|
||||
MariaDB [(none)]> GRANT SHOW DATABASES ON *.* TO 'username'@'maxscalehost';
|
||||
|
||||
**Query OK, 0 rows affected (0.00 sec)**
|
||||
|
||||
The second user is used to monitored the state of the cluster. This user, which may be the same username as the first, requires permissions to access the various sources of monitoring data. In order to monitor a replication cluster this user must be granted the roles REPLICATION SLAVE and REPLICATION CLIENT
|
||||
|
||||
MariaDB [(none)]> grant REPLICATION SLAVE on *.* to '*username*'@'*maxscalehost*';
|
||||
|
||||
**Query OK, 0 rows affected (0.00 sec)**
|
||||
|
||||
MariaDB [(none)]> grant REPLICATION CLIENT on *.* to '*username*'@'*maxscalehost*';
|
||||
|
||||
**Query OK, 0 rows affected (0.00 sec)**
|
||||
|
||||
If you wish to use two different usernames for the two different roles of monitoring and collecting user information then create a different username using the first two steps from above.
|
||||
|
||||
## Creating Your MaxScale Configuration
|
||||
|
||||
MaxScale configuration is held in an ini file that is located in the file MaxScale.cnf in the directory $MAXSCALE_HOME/etc, if you have installed in the default location then this file is available in /usr/local/skysql/maxscle/etc/MaxScale.cnf. This is not created as part of the installation process and must be manually created. A template file does exist within this directory that may be use as a basis for your configuration.
|
||||
|
||||
A global, maxscale, section is included within every MaxScale configuration file; this is used to set the values of various MaxScale wide parameters, perhaps the most important of these is the number of threads that MaxScale will use to execute the code that forwards requests and handles responses for clients.
|
||||
|
||||
[maxscale]
|
||||
|
||||
threads=4
|
||||
|
||||
Since we are using MySQL Replication and connection routing we want two different ports to which the client application can connect; one that will be directed to the current master within the replication cluster and another that will load balance between the slaves. To achieve this within MaxScale we need to define two services in the ini file; one for the read/write operations that should be executed on the master server and another for connections to one of the slaves. Create a section for each in your MaxScale.ini file and set the type to service, the section names are the names of the services themselves and should be meaningful to the administrator. Names may contain whitespace.
|
||||
|
||||
[Write Service]
|
||||
|
||||
type=service
|
||||
|
||||
[Read Service]
|
||||
|
||||
type=service
|
||||
|
||||
The router for these two sections is identical, the readconnroute module, also the services should be provided with the list of servers that will be part of the cluster. The server names given here are actually the names of server sections in the configuration file and not the physical hostnames or addresses of the servers.
|
||||
|
||||
[Write Service]
|
||||
|
||||
type=service
|
||||
|
||||
router=readconnroute
|
||||
|
||||
servers=dbserv1, dbserv2, dbserv3
|
||||
|
||||
[Read Service]
|
||||
|
||||
type=service
|
||||
|
||||
router=readconnroute
|
||||
|
||||
servers=dbserv1, dbserv2, dbserv3
|
||||
|
||||
In order to instruct the router to which servers it should route we must add router options to the service. The router options are compared to the status that the monitor collects from the servers and used to restrict the eligible set of servers to which that service may route. In our case we use the two options master and slave for our two services.
|
||||
|
||||
[Write Service]
|
||||
|
||||
type=service
|
||||
|
||||
router=readconnroute
|
||||
|
||||
router_options=master
|
||||
|
||||
servers=dbserv1, dbserv2, dbserv3
|
||||
|
||||
[Read Service]
|
||||
|
||||
type=service
|
||||
|
||||
router=readconnroute
|
||||
|
||||
router_options=slave
|
||||
|
||||
servers=dbserv1, dbserv2, dbserv3
|
||||
|
||||
The final step in the service sections is to add the username and password that will be used to populate the user data from the database cluster. There are two options for representing the password, either plain text or encrypted passwords may be used. In order to use encrypted passwords a set of keys must be generated that will be used by the encryption and decryption process. To generate the keys use the maxkeys command and pass the name of the secrets file in which the keys are stored.
|
||||
|
||||
% maxkeys /usr/local/skysql/maxscale/etc/.secrets
|
||||
|
||||
%
|
||||
|
||||
Once the keys have been created the maxpasswd command can be used to generate the encrypted password.
|
||||
|
||||
% maxpasswd plainpassword
|
||||
|
||||
96F99AA1315BDC3604B006F427DD9484
|
||||
|
||||
%
|
||||
|
||||
The username and password, either encrypted or plain text, are stored in the service section using the user and passwd parameters.
|
||||
|
||||
[Write Service]
|
||||
|
||||
type=service
|
||||
|
||||
router=readconnroute
|
||||
|
||||
router_options=master
|
||||
|
||||
servers=dbserv1, dbserv2, dbserv3
|
||||
|
||||
user=maxscale
|
||||
|
||||
passwd=96F99AA1315BDC3604B006F427DD9484
|
||||
|
||||
[Read Service]
|
||||
|
||||
type=service
|
||||
|
||||
router=readconnroute
|
||||
|
||||
router_options=slave
|
||||
|
||||
servers=dbserv1, dbserv2, dbserv3
|
||||
|
||||
user=maxscale
|
||||
|
||||
passwd=96F99AA1315BDC3604B006F427DD9484
|
||||
|
||||
This completes the definitions required by the services, however listening ports must be associated with the services in order to allow network connections. This is done by creating a series of listener sections. These sections again are named for the convenience of the administrator and should be of type listener with an entry labelled service which contains the name of the service to associate the listener with. Each service may have multiple listeners.
|
||||
|
||||
[Write Listener]
|
||||
|
||||
type=listener
|
||||
|
||||
service=Write Service
|
||||
|
||||
[Read Listener]
|
||||
|
||||
type=listener
|
||||
|
||||
service=Read Service
|
||||
|
||||
A listener must also define the protocol module it will use for the incoming network protocol, currently this should be the MySQLClient protocol for all database listeners. The listener may then supply a network port to listen on and/or a socket within the file system.
|
||||
|
||||
[Write Listener]
|
||||
|
||||
type=listener
|
||||
|
||||
service=Write Service
|
||||
|
||||
protocol=MySQLClient
|
||||
|
||||
port=4306
|
||||
|
||||
socket=/tmp/ClusterMaster
|
||||
|
||||
[Read Listener]
|
||||
|
||||
type=listener
|
||||
|
||||
service=Read Service
|
||||
|
||||
protocol=MySQLClient
|
||||
|
||||
port=4307
|
||||
|
||||
An address parameter may be given if the listener is required to bind to a particular network address when using hosts with multiple network addresses. The default behaviour is to listen on all network interfaces.
|
||||
|
||||
The next stage is the configuration is to define the server information. This defines how to connect to each of the servers within the cluster, again a section is created for each server, with the type set to server, the network address and port to connect to and the protocol to use to connect to the server. Currently the protocol for all database connections in MySQLBackend.
|
||||
|
||||
[dbserv1]
|
||||
|
||||
type=server
|
||||
|
||||
address=192.168.2.1
|
||||
|
||||
port=3306
|
||||
|
||||
protocol=MySQLBackend
|
||||
|
||||
[dbserv2]
|
||||
|
||||
type=server
|
||||
|
||||
address=192.168.2.2
|
||||
|
||||
port=3306
|
||||
|
||||
protocol=MySQLBackend
|
||||
|
||||
[dbserv3]
|
||||
|
||||
type=server
|
||||
|
||||
address=192.168.2.3
|
||||
|
||||
port=3306
|
||||
|
||||
protocol=MySQLBackend
|
||||
|
||||
In order for MaxScale to monitor the servers using the correct monitoring mechanisms a section should be provided that defines the monitor to use and the servers to monitor. Once again a section is created with a symbolic name for the monitor, with the type set to monitor. Parameters are added for the module to use, the list of servers to monitor and the username and password to use when connecting to the the servers with the monitor.
|
||||
|
||||
[Replication Monitor]
|
||||
|
||||
type=monitor
|
||||
|
||||
module=mysqlmon
|
||||
|
||||
servers=dbserv1, dbserv2, dbserv3
|
||||
|
||||
user=maxscale
|
||||
|
||||
passwd=96F99AA1315BDC3604B006F427DD9484
|
||||
|
||||
As with the password definition in the server either plain text or encrypted passwords may be used.
|
||||
|
||||
The final stage in the configuration is to add the option service which is used by the maxadmin command to connect to MaxScale for monitoring and administration purposes. This creates a service section and a listener section.
|
||||
|
||||
[CLI]
|
||||
|
||||
type=service
|
||||
|
||||
router=cli
|
||||
|
||||
[CLI Listener]
|
||||
|
||||
type=listener
|
||||
|
||||
service=CLI
|
||||
|
||||
protocol=maxscaled
|
||||
|
||||
address=localhost
|
||||
|
||||
port=6603
|
||||
|
||||
In the case of the example above it should be noted that an address parameter has been given to the listener, this limits connections to maxadmin commands that are executed on the same machine that hosts MaxScale.
|
||||
|
||||
# Starting MaxScale
|
||||
|
||||
Upon completion of the configuration process MaxScale is ready to be started for the first time. This may either be done manually by running the maxscale command or via the service interface.
|
||||
|
||||
% maxscale
|
||||
|
||||
or
|
||||
|
||||
% service maxscale start
|
||||
|
||||
Check the error log in /usr/local/skysql/maxscale/log to see if any errors are detected in the configuration file and to confirm MaxScale has been started. Also the maxadmin command may be used to confirm that MaxScale is running and the services, listeners etc have been correctly configured.
|
||||
|
||||
% maxadmin -pskysql list services
|
||||
|
||||
Services.
|
||||
|
||||
--------------------------+----------------------+--------+---------------
|
||||
|
||||
Service Name | Router Module | #Users | Total Sessions
|
||||
|
||||
--------------------------+----------------------+--------+---------------
|
||||
|
||||
Read Service | readconnroute | 1 | 1
|
||||
|
||||
Write Service | readconnroute | 1 | 1
|
||||
|
||||
CLI | cli | 2 | 2
|
||||
|
||||
--------------------------+----------------------+--------+---------------
|
||||
|
||||
% maxadmin -pskysql list servers
|
||||
|
||||
Servers.
|
||||
|
||||
-------------------+-----------------+-------+-------------+--------------------
|
||||
|
||||
Server | Address | Port | Connections | Status
|
||||
|
||||
-------------------+-----------------+-------+-------------+--------------------
|
||||
|
||||
dbserv1 | 192.168.2.1 | 3306 | 0 | Running, Slave
|
||||
|
||||
dbserv2 | 192.168.2.2 | 3306 | 0 | Running, Master
|
||||
|
||||
dbserv3 | 192.168.2.3 | 3306 | 0 | Running, Slave
|
||||
|
||||
-------------------+-----------------+-------+-------------+--------------------
|
||||
|
||||
% maxadmin -pskysql list listeners
|
||||
|
||||
Listeners.
|
||||
|
||||
---------------------+--------------------+-----------------+-------+--------
|
||||
|
||||
Service Name | Protocol Module | Address | Port | State
|
||||
|
||||
---------------------+--------------------+-----------------+-------+--------
|
||||
|
||||
Read Service | MySQLClient | * | 4307 | Running
|
||||
|
||||
Write Service | MySQLClient | * | 4306 | Running
|
||||
|
||||
CLI | maxscaled | localhost | 6603 | Running
|
||||
|
||||
---------------------+--------------------+-----------------+-------+--------
|
||||
|
||||
%
|
||||
|
||||
MaxScale is now ready to start accepting client connections and routing them to the master or slaves within your cluster. Other configuration options are available that can alter the criteria used for routing, these include monitoring the replication lag within the cluster and routing only to slaves that are within a predetermined delay from the current master or using weights to obtain unequal balancing operations. These options may be found in the MaxScale Configuration Guide. More detail on the use of maxadmin can be found in the document "MaxAdmin - The MaxScale Administration & Monitoring Client Application".
|
||||
|
@ -0,0 +1,284 @@
|
||||
Getting Started With MariaDB MaxScale
|
||||
|
||||
Read/Write Splitting with MySQL Replication
|
||||
|
||||
# Environment & Solution Space
|
||||
|
||||
This document is designed as a quick introduction to setting up MaxScale in an environment in which you have a MySQL Replication Cluster with one master and multiple slave servers. The object of this tutorial is to have a system that appears to the clients of MaxScale as if there is a single database behind MaxScale. MaxScale will split the statements such that write statements will be sent to the current master server in the replication cluster and read statements will be balanced across a number of the slave statements.
|
||||
|
||||
The process of setting and configuring MaxScale will be covered within this document. However the installation and configuration of the MySQL Replication subsystem will not be covered nor will any discussion of installation management tools to handle automated or semi-automated failover of the replication cluster.
|
||||
|
||||
This tutorial will assume the user is running from one of the binary distributions available and has installed this in the default location. Building from source code in GitHub is covered in guides elsewhere as is installing to non-default locations.
|
||||
|
||||
# Process
|
||||
|
||||
The steps involved in creating a system from the binary distribution of MaxScale are:
|
||||
|
||||
* Install the package relevant to your distribution
|
||||
|
||||
* Create the required users in your MariaDB or MySQL Replication cluster
|
||||
|
||||
* Create a MaxScale configuration file
|
||||
|
||||
## Installation
|
||||
|
||||
The precise installation process will vary from one distribution to another details of what to do with the RPM and DEB packages can be found on the download site when you select the distribution you are downloading from. The process involves setting up your package manager to include the MariaDB repositories and then running the package manager for your distribution, RPM or apt-get.
|
||||
|
||||
Upon successful completion of the installation command you will have MaxScale installed and ready to be run but without a configuration. You must create a configuration file before you first run MaxScale.
|
||||
|
||||
## Creating Database Users
|
||||
|
||||
MaxScale needs to connect to the backend databases and run queries for two reasons; one to determine the current state of the database and the other to retrieve the user information for the database cluster. This may be done either using two separate usernames or with a single user.
|
||||
|
||||
The first user required must be able to select data from the table mysql.user, to create this user follow the steps below.
|
||||
|
||||
1. Connect to the current master server in your replication tree as the root user
|
||||
|
||||
2. Create the user, substituting the username, password and host on which maxscale runs within your environment
|
||||
|
||||
MariaDB [(none)]> create user '*username*'@'*maxscalehost*' identified by '*password*';
|
||||
|
||||
**Query OK, 0 rows affected (0.00 sec)**
|
||||
|
||||
3. Grant select privileges on the mysql.user table.
|
||||
|
||||
MariaDB [(none)]> grant SELECT on mysql.user to '*username*'@'*maxscalehost*';
|
||||
|
||||
**Query OK, 0 rows affected (0.03 sec)**
|
||||
|
||||
Additionally, GRANT SELECT on the mysql.db table and SHOW DATABASES privileges are required in order to load databases name and grants suitable for database name authorization.
|
||||
|
||||
MariaDB [(none)]> GRANT SELECT ON mysql.db TO 'username'@'maxscalehost';
|
||||
|
||||
**Query OK, 0 rows affected (0.00 sec)**
|
||||
|
||||
MariaDB [(none)]> GRANT SHOW DATABASES ON *.* TO 'username'@'maxscalehost';
|
||||
|
||||
**Query OK, 0 rows affected (0.00 sec)**
|
||||
|
||||
The second user is used to monitored the state of the cluster. This user, which may be the same username as the first, requires permissions to access the various sources of monitoring data. In order to monitor a replication cluster this user must be granted the roles REPLICATION SLAVE and REPLICATION CLIENT
|
||||
|
||||
MariaDB [(none)]> grant REPLICATION SLAVE on *.* to '*username*'@'*maxscalehost*';
|
||||
|
||||
**Query OK, 0 rows affected (0.00 sec)**
|
||||
|
||||
MariaDB [(none)]> grant REPLICATION CLIENT on *.* to '*username*'@'*maxscalehost*';
|
||||
|
||||
**Query OK, 0 rows affected (0.00 sec)**
|
||||
|
||||
If you wish to use two different usernames for the two different roles of monitoring and collecting user information then create a different username using the first two steps from above.
|
||||
|
||||
## Creating Your MaxScale Configuration
|
||||
|
||||
MaxScale configuration is held in an ini file that is located in the file MaxScale.cnf in the directory $MAXSCALE_HOME/etc, if you have installed in the default location then this file is available in /usr/local/skysql/maxscale/etc/MaxScale.cnf. This is not created as part of the installation process and must be manually created. A template file does exist within this directory that may be use as a basis for your configuration.
|
||||
|
||||
A global, maxscale, section is included within every MaxScale configuration file; this is used to set the values of various MaxScale wide parameters, perhaps the most important of these is the number of threads that MaxScale will use to execute the code that forwards requests and handles responses for clients.
|
||||
|
||||
[maxscale]
|
||||
|
||||
threads=4
|
||||
|
||||
The first step is to create a service for our Read/Write Splitter. Create a section in your MaxScale.ini file and set the type to service, the section names are the names of the services themselves and should be meaningful to the administrator. Names may contain whitespace.
|
||||
|
||||
[Splitter Service]
|
||||
|
||||
type=service
|
||||
|
||||
The router for we need to use for this configuration is the readwritesplit module, also the services should be provided with the list of servers that will be part of the cluster. The server names given here are actually the names of server sections in the configuration file and not the physical hostnames or addresses of the servers.
|
||||
|
||||
[Splitter Service]
|
||||
|
||||
type=service
|
||||
|
||||
router=readwritesplit
|
||||
|
||||
servers=dbserv1, dbserv2, dbserv3
|
||||
|
||||
The final step in the service sections is to add the username and password that will be used to populate the user data from the database cluster. There are two options for representing the password, either plain text or encrypted passwords may be used. In order to use encrypted passwords a set of keys must be generated that will be used by the encryption and decryption process. To generate the keys use the maxkeys command and pass the name of the secrets file in which the keys are stored.
|
||||
|
||||
% maxkeys /usr/local/skysql/maxscale/etc/.secrets
|
||||
|
||||
%
|
||||
|
||||
Once the keys have been created the maxpasswd command can be used to generate the encrypted password.
|
||||
|
||||
% maxpasswd plainpassword
|
||||
|
||||
96F99AA1315BDC3604B006F427DD9484
|
||||
|
||||
%
|
||||
|
||||
The username and password, either encrypted or plain text, are stored in the service section using the user and passwd parameters.
|
||||
|
||||
[Splitter Service]
|
||||
|
||||
type=service
|
||||
|
||||
router=readwritesplit
|
||||
|
||||
servers=dbserv1, dbserv2, dbserv3
|
||||
|
||||
user=maxscale
|
||||
|
||||
passwd=96F99AA1315BDC3604B006F427DD9484
|
||||
|
||||
This completes the definitions required by the service, however listening ports must be associated with the service in order to allow network connections. This is done by creating a series of listener sections. This section again is named for the convenience of the administrator and should be of type listener with an entry labelled service which contains the name of the service to associate the listener with. A service may have multiple listeners.
|
||||
|
||||
[Splitter Listener]
|
||||
|
||||
type=listener
|
||||
|
||||
service=Splitter Service
|
||||
|
||||
A listener must also define the protocol module it will use for the incoming network protocol, currently this should be the MySQLClient protocol for all database listeners. The listener may then supply a network port to listen on and/or a socket within the file system.
|
||||
|
||||
[Splitter Listener]
|
||||
|
||||
type=listener
|
||||
|
||||
service=Splitter Service
|
||||
|
||||
protocol=MySQLClient
|
||||
|
||||
port=3306
|
||||
|
||||
socket=/tmp/ClusterMaster
|
||||
|
||||
An address parameter may be given if the listener is required to bind to a particular network address when using hosts with multiple network addresses. The default behaviour is to listen on all network interfaces.
|
||||
|
||||
The next stage is the configuration is to define the server information. This defines how to connect to each of the servers within the cluster, again a section is created for each server, with the type set to server, the network address and port to connect to and the protocol to use to connect to the server. Currently the protocol module for all database connections in MySQLBackend.
|
||||
|
||||
[dbserv1]
|
||||
|
||||
type=server
|
||||
|
||||
address=192.168.2.1
|
||||
|
||||
port=3306
|
||||
|
||||
protocol=MySQLBackend
|
||||
|
||||
[dbserv2]
|
||||
|
||||
type=server
|
||||
|
||||
address=192.168.2.2
|
||||
|
||||
port=3306
|
||||
|
||||
protocol=MySQLBackend
|
||||
|
||||
[dbserv3]
|
||||
|
||||
type=server
|
||||
|
||||
address=192.168.2.3
|
||||
|
||||
port=3306
|
||||
|
||||
protocol=MySQLBackend
|
||||
|
||||
In order for MaxScale to monitor the servers using the correct monitoring mechanisms a section should be provided that defines the monitor to use and the servers to monitor. Once again a section is created with a symbolic name for the monitor, with the type set to monitor. Parameters are added for the module to use, the list of servers to monitor and the username and password to use when connecting to the the servers with the monitor.
|
||||
|
||||
[Replication Monitor]
|
||||
|
||||
type=monitor
|
||||
|
||||
module=mysqlmon
|
||||
|
||||
servers=dbserv1, dbserv2, dbserv3
|
||||
|
||||
user=maxscale
|
||||
|
||||
passwd=96F99AA1315BDC3604B006F427DD9484
|
||||
|
||||
As with the password definition in the server either plain text or encrypted passwords may be used.
|
||||
|
||||
The final stage in the configuration is to add the option service which is used by the maxadmin command to connect to MaxScale for monitoring and administration purposes. This creates a service section and a listener section.
|
||||
|
||||
[CLI]
|
||||
|
||||
type=service
|
||||
|
||||
router=cli
|
||||
|
||||
[CLI Listener]
|
||||
|
||||
type=listener
|
||||
|
||||
service=CLI
|
||||
|
||||
protocol=maxscaled
|
||||
|
||||
address=localhost
|
||||
|
||||
port=6603
|
||||
|
||||
In the case of the example above it should be noted that an address parameter has been given to the listener, this limits connections to maxadmin commands that are executed on the same machine that hosts MaxScale.
|
||||
|
||||
# Starting MaxScale
|
||||
|
||||
Upon completion of the configuration process MaxScale is ready to be started for the first time. This may either be done manually by running the maxscale command or via the service interface.
|
||||
|
||||
% maxscale
|
||||
|
||||
or
|
||||
|
||||
% service maxscale start
|
||||
|
||||
Check the error log in /usr/local/skysql/maxscale/log to see if any errors are detected in the configuration file and to confirm MaxScale has been started. Also the maxadmin command may be used to confirm that MaxScale is running and the services, listeners etc have been correctly configured.
|
||||
|
||||
% maxadmin -pskysql list services
|
||||
|
||||
Services.
|
||||
|
||||
--------------------------+----------------------+--------+---------------
|
||||
|
||||
Service Name | Router Module | #Users | Total Sessions
|
||||
|
||||
--------------------------+----------------------+--------+---------------
|
||||
|
||||
Splitter Service | readwritesplit | 1 | 1
|
||||
|
||||
CLI | cli | 2 | 2
|
||||
|
||||
--------------------------+----------------------+--------+---------------
|
||||
|
||||
% maxadmin -pskysql list servers
|
||||
|
||||
Servers.
|
||||
|
||||
-------------------+-----------------+-------+-------------+--------------------
|
||||
|
||||
Server | Address | Port | Connections | Status
|
||||
|
||||
-------------------+-----------------+-------+-------------+--------------------
|
||||
|
||||
dbserv1 | 192.168.2.1 | 3306 | 0 | Running, Slave
|
||||
|
||||
dbserv2 | 192.168.2.2 | 3306 | 0 | Running, Master
|
||||
|
||||
dbserv3 | 192.168.2.3 | 3306 | 0 | Running, Slave
|
||||
|
||||
-------------------+-----------------+-------+-------------+--------------------
|
||||
|
||||
% maxadmin -pskysql list listeners
|
||||
|
||||
Listeners.
|
||||
|
||||
---------------------+--------------------+-----------------+-------+--------
|
||||
|
||||
Service Name | Protocol Module | Address | Port | State
|
||||
|
||||
---------------------+--------------------+-----------------+-------+--------
|
||||
|
||||
Splitter Service | MySQLClient | * | 3306 | Running
|
||||
|
||||
CLI | maxscaled | localhost | 6603 | Running
|
||||
|
||||
---------------------+--------------------+-----------------+-------+--------
|
||||
|
||||
%
|
||||
|
||||
MaxScale is now ready to start accepting client connections and routing them to the master or slaves within your cluster. Other configuration options are available that can alter the criteria used for routing, these include monitoring the replication lag within the cluster and routing only to slaves that are within a predetermined delay from the current master or using weights to obtain unequal balancing operations. These options may be found in the MaxScale Configuration Guide. More detail on the use of maxadmin can be found in the document "MaxAdmin - The MaxScale Administration & Monitoring Client Application".
|
||||
|
@ -0,0 +1,375 @@
|
||||
# Rabbit MQ setup and MaxScale Integration
|
||||
## Introduction
|
||||
A step by step guide helps installing a RabbitMQ server and testing it before MaxScale integration.
|
||||
|
||||
New plugin filter and a message consumer application need to be compiled and linked with an external C library, RabbitMQ-c, that provides AMQP protocol integration.
|
||||
Custom configuration, with TCP/IP and Queue parameters, is also detailed here.
|
||||
The software install setup provides RPM and DEB packaging and traditional compilation steps.
|
||||
|
||||
## Step 1 - Get the RabbitMQ binaries
|
||||
|
||||
On Centos 6.5 using fedora / RHEL rpm get the rpm from [http://www.rabbitmq.com/](http://www.rabbitmq.com/ "RabbitMQ")
|
||||
|
||||
rabbitmq-server-3.3.4-1.noarch.rpm
|
||||
|
||||
Please note, before installing RabbitMQ, you must install Erlang.
|
||||
|
||||
Example:
|
||||
|
||||
yum install erlang
|
||||
Package erlang-R14B-04.3.el6.x86_64 already installed and latest version
|
||||
|
||||
## Step 2 - Install and Start the Server
|
||||
|
||||
Install the packages using your distribution's package manager and start the server:
|
||||
|
||||
yum install rabbitmq-server-3.3.4-1.noarch.rpm
|
||||
systemctl start rabbitmq-server.service
|
||||
|
||||
To configure your RabbitMQ server, please refer to the RabbitMQ website: [http://www.rabbitmq.com/](http://www.rabbitmq.com/ RabbitMQ website).
|
||||
|
||||
rabbitmqctl is a command line tool for managing a RabbitMQ broker. It performs all actions by connecting to one of the broker's nodes.
|
||||
|
||||
rabbitmqctl list_queues
|
||||
rabbitmqctl list_queues | list_exchanges| cluster_status | list_bindings | list_connections | list_consumers | status
|
||||
|
||||
Example output:
|
||||
|
||||
[root@maxscale-02 MaxScale]# rabbitmqctl status
|
||||
Status of node 'rabbit@maxscale-02' ...
|
||||
[{pid,12251},
|
||||
{running_applications,[{rabbit,"RabbitMQ","3.3.4"},
|
||||
{os_mon,"CPO CXC 138 46","2.2.7"},
|
||||
{xmerl,"XML parser","1.2.10"},
|
||||
{mnesia,"MNESIA CXC 138 12","4.5"},
|
||||
{sasl,"SASL CXC 138 11","2.1.10"},
|
||||
{stdlib,"ERTS CXC 138 10","1.17.5"},
|
||||
{kernel,"ERTS CXC 138 10","2.14.5"}]},
|
||||
{os,{unix,linux}},
|
||||
{erlang_version,"Erlang R14B04 (erts-5.8.5) [source] [64-bit] [smp:2:2] [rq:2] [async-threads:30] [kernel-poll:true]\n"},
|
||||
...
|
||||
{listeners,[{clustering,25672,"::"},{amqp,5672,"::"}]},
|
||||
...
|
||||
...done.
|
||||
|
||||
|
||||
[root@maxscale-02 MaxScale]# rabbitmqctl list_bindings
|
||||
Listing bindings ...
|
||||
x1 exchange q1 queue k1 []
|
||||
...done.
|
||||
|
||||
Interaction with the server may require stop & reset at some point:
|
||||
|
||||
rabbitmqctl stop_app
|
||||
rabbitmqctl reset
|
||||
rabbitmqctl start_app
|
||||
|
||||
## Step 3 - Install and test the client libraries
|
||||
|
||||
The selected library for MaxScale integration of RabbitMQ is:
|
||||
[https://github.com/alanxz/rabbitmq-c](https://github.com/alanxz/rabbitmq-c RabbitMQ-C)
|
||||
|
||||
### Manual software compilation
|
||||
|
||||
To compile the RabbitMQ-C libraries manually:
|
||||
|
||||
git clone https://github.com/alanxz/rabbitmq-c.git
|
||||
cd rabbitmq-c
|
||||
cmake -DCMAKE_INSTALL_PREFIX=/usr .
|
||||
make
|
||||
make install
|
||||
|
||||
Please note, this will install the packages to /usr. If you do not wish to install them to this location, provide a different value for the CMAKE_INSTALL_PREFIX variable.
|
||||
|
||||
|
||||
### Setup using the EPEL repository
|
||||
|
||||
Check how to configure your distribution for the EPEL repository: [https://fedoraproject.org/wiki/EPEL](https://fedoraproject.org/wiki/EPEL EPEL)
|
||||
|
||||
Configure your repositories and install the software:
|
||||
|
||||
yum install librabbitmq.x86_64
|
||||
|
||||
you might also like to install:
|
||||
|
||||
librabbitmq-tools.x86_64, librabbitmq-devel.x86_64
|
||||
|
||||
Please note you may also install the rabbitmq server from the EPEL repository:
|
||||
|
||||
yum install rabbitmq-server
|
||||
|
||||
|
||||
|
||||
|
||||
### Basic tests with library
|
||||
|
||||
The required library librabbitmq-c is now installed and we continue with basic operations with amqp_* tools, located in the examples/ folder of the build directory, testing client server interaction.
|
||||
|
||||
Please note, those example applications may not be included in the RPM library packages.
|
||||
|
||||
#### Test 1 - create the exchange
|
||||
|
||||
[root@maxscale-02 examples]# ./amqp_exchange_declare
|
||||
Usage: amqp_exchange_declare host port exchange exchangetype
|
||||
|
||||
Declare the exchange:
|
||||
|
||||
[root@maxscale-02 examples]# ./amqp_exchange_declare 127.0.0.1 5672 foo direct
|
||||
|
||||
#### Test 2 - Listen to exchange with selected binding key
|
||||
|
||||
[root@maxscale-02 examples]# ./amqp_listen
|
||||
Usage: amqp_listen host port exchange bindingkey
|
||||
|
||||
Start the listener:
|
||||
|
||||
[root@maxscale-02 examples]# ./amqp_listen 127.0.0.1 5672 foo k1 &
|
||||
|
||||
#### Test 3 - Send a message …
|
||||
|
||||
[root@maxscale-02 examples]# ./amqp_sendstring
|
||||
Usage: amqp_sendstring host port exchange routingkey messagebody
|
||||
|
||||
[root@maxscale-02 examples]# ./amqp_sendstring 127.0.0.1 5672 foo k1 “This is a new message”
|
||||
|
||||
... and watch the listener output
|
||||
|
||||
Delivery 1, exchange foo routingkey k1
|
||||
Content-type: text/plain
|
||||
|
||||
|
||||
## Step 4 - MaxScale integration with librabbitmq-c
|
||||
|
||||
A new filter (mqfilter.c) is implemented in order to send messages to the rabbitmq server and a message consumer (rabbitmq_consumer/consumer.c) program will get messages and store them into a MySQL/MariaDB database.
|
||||
A quick way to install MaxScale with the RabbitMQ filter is to go to the MaxScale source directory and run the following commands:
|
||||
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -DBUILD_RABBITMQ=Y
|
||||
make
|
||||
make install
|
||||
|
||||
To build the RabbitMQ filter CMake needs an additional parameter:
|
||||
|
||||
-DBUILD_RABBITMQ=Y
|
||||
|
||||
If the librabbitmq-c library is manually compiled it may be necessary to manually pass the location of the libraries and header files to CMake.
|
||||
|
||||
Libraries:
|
||||
|
||||
-DRABBITMQ_LIBRARIES=<path to RabbitMQ-c libraries>
|
||||
|
||||
Headers:
|
||||
|
||||
-DRABBITMQ_HEADERS=<path to RabbitMQ-c headers>
|
||||
|
||||
|
||||
Please note, Message Queue Consumer (consumer.c) also needs to be compiled with MySQL/MariaDB client libraries in addition to the RabbitMQ-c libraries. If you have your MySQL/MariaDB client libraries and headers in non-standard locations, you can pass them manually to CMake:
|
||||
|
||||
Libraries:
|
||||
|
||||
-DMYSQLCLIENT_LIBRARIES=<path to libraries>
|
||||
|
||||
Headers:
|
||||
|
||||
-DMYSQLCLIENT_HEADERS=<path to headers>
|
||||
|
||||
The message queue consumer must be also built as a separate task, it’s not built as part of MaxScale build system. To build it, run the following commands in the rabbitmq_consumer directory in the MaxScale source folder:
|
||||
|
||||
mkdir build
|
||||
cd build
|
||||
cmake ..
|
||||
make
|
||||
|
||||
To install it:
|
||||
|
||||
make install
|
||||
|
||||
To build packages:
|
||||
|
||||
make package
|
||||
|
||||
This generates RPM or DEB packages based on your system. These packages can then be installed on remote systems for easy access to the data generated by the consumer client.
|
||||
|
||||
## Step 5 - Configure new applications
|
||||
|
||||
The new filter needs to be configured in MaxScale.cnf.
|
||||
|
||||
[Test Service]
|
||||
type=service
|
||||
router=readconnroute
|
||||
router_options=slave
|
||||
servers=server1,server2,server3,server5,server4
|
||||
user=massi
|
||||
passwd=massi
|
||||
filters=MQ
|
||||
|
||||
[MQ]
|
||||
type=filter
|
||||
module=mqfilter
|
||||
exchange=x1
|
||||
key=k1
|
||||
queue=q1
|
||||
hostname=127.0.0.1
|
||||
port=5672
|
||||
logging_trigger=all
|
||||
|
||||
|
||||
|
||||
Logging triggers define whether to log all or a subset of the incoming queries using these options:
|
||||
|
||||
# log only some elements or all
|
||||
logging_trigger=[all,source,schema,object]
|
||||
|
||||
# Whether to log only SELECT, UPDATE, INSERT and DELETE queries or all possible queries
|
||||
logging_log_all=true|false
|
||||
|
||||
|
||||
|
||||
# Log only when any of the trigger parameters match or only if all parameters match
|
||||
logging_strict=true|false
|
||||
|
||||
# specify objects
|
||||
logging_object=mytable,another_table
|
||||
|
||||
# specify logged users
|
||||
logging_source_user=testuser,testuser
|
||||
|
||||
|
||||
# specify source addresses
|
||||
logging_source_host=127.0.0.1,192.168.10.14
|
||||
|
||||
# specify schemas
|
||||
logging_schema=employees,orders,catalog
|
||||
|
||||
|
||||
Example:
|
||||
|
||||
logging_trigger=object,schema,source
|
||||
logging_strict=false
|
||||
logging_log_all=false
|
||||
logging_object=my1
|
||||
logging_schema=test
|
||||
logging_source_user=maxtest
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
The logging result of the example is:
|
||||
|
||||
if user maxtest does something, it's logged
|
||||
and all queries in test schema are logged
|
||||
anything targeting my1 table is logged
|
||||
SELECT NOW(), SELECT MD5(“xyz)” are not logged
|
||||
|
||||
Please note that if we want to log only the user ‘maxtest’ accessing the schema ‘test’ with target ‘my1’ the option logging_strict must be set to TRUE and if we want to include those selects without schema name the option logging_log_all must be set to TRUE.
|
||||
|
||||
The mqfilter logs into the MaxScale TRACE log information about the matched logging triggers and the message delivering:
|
||||
|
||||
2014 09/03 06:22:04 Trigger is TRG_SOURCE: user: testuser = testuser
|
||||
2014 09/03 06:22:04 Trigger is TRG_SCHEMA: test = test
|
||||
2014 09/03 06:22:04 Trigger is TRG_OBJECT: test.t1 = t1
|
||||
2014 09/03 06:22:04 Routing message to: 127.0.0.1:5672 / as guest/guest, exchange: x1<direct> key:k1 queue:q1
|
||||
|
||||
The consumer application needs to be configured as well:
|
||||
|
||||
|
||||
#The options for the consumer are:
|
||||
#hostname RabbitMQ hostname
|
||||
#port RabbitMQ port
|
||||
#vhost RabbitMQ virtual host
|
||||
#user RabbitMQ username
|
||||
#passwd RabbitMQ password
|
||||
|
||||
|
||||
#queue Name of the queue to use
|
||||
#dbserver SQL server name
|
||||
#dbport SQL server port
|
||||
#dbname Name of the database to use
|
||||
#dbuser SQL server username
|
||||
#dbpasswd SQL server password
|
||||
#logfile Message log filename
|
||||
|
||||
[consumer]
|
||||
hostname=127.0.0.1
|
||||
port=5672
|
||||
vhost=/
|
||||
user=guest
|
||||
passwd=guest
|
||||
queue=q1
|
||||
dbserver=127.0.0.1
|
||||
dbport=3308
|
||||
dbname=mqpairs
|
||||
dbuser=xxx
|
||||
dbpasswd=yyy
|
||||
|
||||
We may probably need to modify LD_LIBRARY_PATH before launching ‘consumer’:
|
||||
|
||||
# export LD_LIBRARY_PATH=/packages/rabbitmq-c/rabbitmq-c/librabbitmq:/packages/mariadb_client-2.0.0-Linux/lib/mariadb:/usr/lib64
|
||||
|
||||
and finally we can launch it:
|
||||
|
||||
# ./consumer
|
||||
|
||||
If the consumer.cnf file is not in the same directory as the binary file is, you can provide the location of the folder that it is in by passing it the -c flag followed by the path:
|
||||
|
||||
# ./consumer -c path/to/file
|
||||
|
||||
and start maxScale as well
|
||||
|
||||
## Step 6 - Test the filter and check collected data
|
||||
Assuming that MaxScale and the message consumer are successfully running let’s connect to the service with an active mqfilter:
|
||||
|
||||
[root@maxscale-02 MaxScale]# mysql -h 127.0.0.1 -P 4506 -uxxx -pyyy
|
||||
...
|
||||
MariaDB [(none)]> select RAND(3), RAND(5);
|
||||
+--------------------+---------------------+
|
||||
| RAND(3) | RAND(5) |
|
||||
+--------------------+---------------------+
|
||||
| 0.9057697559760601 | 0.40613597483014313 |
|
||||
+--------------------+---------------------+
|
||||
1 row in set (0.01 sec)
|
||||
|
||||
…
|
||||
MariaDB [(none)]> select RAND(3544), RAND(11);
|
||||
|
||||
|
||||
|
||||
we can check the consumer output in the terminal where it was started:
|
||||
|
||||
--------------------------------------------------------------
|
||||
Received: 1409671452|select @@version_comment limit ?
|
||||
Received: 1409671452|Columns: 1
|
||||
...
|
||||
Received: 1409671477|select RAND(?), RAND(?)
|
||||
Received: 1409671477|Columns: 2
|
||||
|
||||
We query now the database for the content collected so far:
|
||||
|
||||
MariaDB [(none)]> use mqpairs;
|
||||
Database changed
|
||||
|
||||
|
||||
|
||||
MariaDB [mqpairs]> select * from pairs;
|
||||
|
||||
+-------------------------------------+----------------------------------+------------+---------------------+---------------------+---------+
|
||||
| tag | query | reply | date_in | date_out | counter |
|
||||
+-------------------------------------+----------------------------------+------------+---------------------+---------------------+---------+
|
||||
| 006c006d006e006f007000710072007374 | select @@version_comment limit ? | Columns: 1 | 2014-09-02 11:14:51 | 2014-09-02 11:26:38 | 3 |
|
||||
| 00750076007700780079007a007b007c7d | SELECT DATABASE() | Columns: 1 | 2014-09-02 11:14:56 | 2014-09-02 11:27:06 | 3 |
|
||||
| 007e007f00800081008200830084008586 | show databases | Columns: 1 | 2014-09-02 11:14:56 | 2014-09-02 11:27:06 | 3 |
|
||||
| 008700880089008a008b008c008d008e8f | show tables | Columns: 1 | 2014-09-02 11:14:56 | 2014-09-02 11:27:06 | 3 |
|
||||
| 0090009100920093009400950096009798 | select * from mqpairs.pairs | Columns: 6 | 2014-09-02 11:15:00 | 2014-09-02 11:27:00 | 12 |
|
||||
| 00fc00fd00fe00ff0100010101020103104 | select NOW() | Columns: 1 | 2014-09-02 11:24:23 | 2014-09-02 11:24:23 | 1 |
|
||||
| 01050106010701080109010a010b010c10d | select RAND(?), RAND(?) | Columns: 2 | 2014-09-02 11:24:37 | 2014-09-02 11:24:37 | 1 |
|
||||
+-------------------------------------+----------------------------------+------------+---------------------+---------------------+---------+
|
||||
7 rows in set (0.01 sec)
|
||||
|
||||
The filter send queries to the RabbitMQ server in the canonical format, i.e select RAND(?), RAND(?).
|
||||
The queries Message Queue Consumer application gets from the server are stored with a counter that quickly shows how many times that normalized query was received:
|
||||
|
||||
| 01050106010701080109010a010b010c10d | select RAND(?), RAND(?) | Columns: 2 | 2014-09-02 11:24:37 | 2014-09-02 11:29:15 | 3 |
|
@ -0,0 +1,209 @@
|
||||
# MaxScale as a replication proxy
|
||||
MaxScale was designed as a highly configurable proxy that sits between a database layer and the clients of that database, the binlog router described here is somewhat different to that original concept, moving MaxScale down to play a role within the database layer itself.
|
||||
|
||||
In a traditional MySQL replication setup a single master server is created and a set of slaves MySQL instances are configured to pull the binlog files from that master to the slaves. There are some problems however in this setup; when the number of slaves servers starts to increase an increasing load is placed on the master to serve the binlogs to each slave. When a master server fails every slave server requires some action to be performed before a new server can become the master server.
|
||||
|
||||
Introducing a proxy layer between the master server and the slave servers can improve the situation by reducing the load on the master to simply serving the proxy layer rather than all of the slaves and the slaves only need to be aware of the proxy layer and not the real master server. Removing this requirement for the slaves to have knowledge of the master greatly simplifies the process of replacing a failed master within a replication environment.
|
||||
|
||||
## MariaDB/MySQL as a replication proxy
|
||||
The most obvious solution to the requirement for a proxy layer within a replication environment is to use a MariaDB or MySQL database instance. The database server is designed to allow this, since a slave server is able to be configured such that it will produce binary logs for updates it has itself received via replication from the master server. This is done with the log_slave_updates configuration option of the server. In this case the server is known as an intermediate master, it is both a slave to the real master and a master to the other slaves in the configuration.
|
||||
|
||||
Using an intermediate master does not however solve all the problems and introduces some due to the way replication is implemented. A slave server reads the binary log data and creates a relay log from that binary log. This then provides a source of SQL statements which are executed within the slave in order to make the same changes to the databases on the slaves as were made on the master. If the log_slave_updates option has been enabled new binary log entries are created for the statements executed from the relay log. This means that the data in the binary log of the intermediate master is not a direct copy of the data that was received from the binary log of the real master. The resultant changes to the database will be the same, provided no updates have been performed on the intermediate master that did not originate on the real master, but the steps to achieve those changes may be different. In particular if group commit functionality is used, to allow multiple transactions to commit in parallel, these may well be different on the intermediate master. This can cause a reduction in the parallelism of the commits and a subsequent reduction in the performance of the slave servers.
|
||||
|
||||
This re-execution of the SQL statements also adds latency to the intermediate master solution, since the full process of parsing, optimisation and execution must occur for every statement that is replicated from the master to the slaves must be performed in the intermediate master. This latency introduces lag in the replication chain, with a greater delay being introduced from the time a transaction is committed on the master until the data is available on the slaves.
|
||||
|
||||
Use of an intermediate master does improve the process of failover of the master server, since the slaves are only aware of the intermediate master the process of promoting one of the existing slaves to become the new master only involves that slave and the intermediate master. A slave can become the new master as soon as all the changes from the intermediate master have been processed. The intermediate master then needs to be reset to the correct point in the binary log of the new master and replication can continue.
|
||||
|
||||
An added complexity that needs to be dealt with is the failure of the intermediate master itself. If this occurs then the same problem as described earlier exists, all slaves must be updated when a new intermediate master is created. If multiple intermediate masters are used there is also a restriction that slaves can not be moved from the failed intermediate master to another intermediate master due to the fact that the binlog on the different intermediate nodes are not gaurenteed to be the same.
|
||||
|
||||
## MaxScale's approach
|
||||
MaxScale takes a much simpler approach to the process of being a replication proxy. It acts as a slave to the real master and as a master to the slaves in the same way as an intermediate master does, however it does not implement any re-execution of the statements within the binary log. MaxScale creates a local cache of the binary logs it receives from the master and it will serve binary log events to the slaves from this cache of the master's binary log. This means that the slaves will always get binary log events that have a one-to-one correlation to those written by the master. Parallelism in the binary log events of the master is maintained in the events that are observed by the slaves.
|
||||
|
||||
In the MaxScale approach the latency that is introduced is mostly the added network latency associated with adding the extra network hop. There is no appreciable processing performed at the MaxScale level, other than for managing the local cache of the binlog files.
|
||||
|
||||
In addition every MaxScale that is acting as a proxy of the master will have exactly the same binlog events as the master itself. This means that a slave can be moved between any of the MaxScale server or to the real master without the need to perform any special processing. The result is much simpler behaviour for failure recovery and the ability to have a very simple, redundant proxy layer with slaves free to both between the proxies.
|
||||
|
||||
# Configuring MaxScale as a replication proxy
|
||||
Using MaxScale as a replication proxy is much the same as using MaxScale as a proxy between the clients and the database servers. In this case the master server should be considered as the database backend and the slave servers as the clients of MaxScale.
|
||||
|
||||
## Service Configuration
|
||||
|
||||
As with any MaxScale configuration a good starting point is with the service definition with the MaxScale.cnf file. The service requires a name which is the section name in the ini file, a type parameter with a value of service and the name of the router plugin that should be loaded. In the case of replication proxies this router name is binlogrouter.
|
||||
|
||||
|
||||
[Replication]
|
||||
type=service
|
||||
router=binlogrouter
|
||||
|
||||
Other standard service parameters need to be given in the configuration section that are used to retrieve the set of users from the backend (master) database, also a version string can be given such that the MaxScale instance will report this version string to the slave servers that connect to MaxScale. The master server entry must also be given. In the current implementation of the router only a single server can be given.
|
||||
|
||||
[Replication]
|
||||
type=service
|
||||
router=binlogrouter
|
||||
servers=masterdb
|
||||
version_string=5.6.17-log
|
||||
user=maxscale
|
||||
passwd=Mhu87p2D
|
||||
|
||||
The user and passwd entries in the above example are used in order for MaxScale to populate the credential information that is required to allow the slaves to connect to MaxScale. This user should be configured in exactly the same way a for any other MaxScale service, i.e. the user needs access to the mysql.user table and the mysql.db table as well as having the ability to perform a SHOW DATABASES command.
|
||||
|
||||
The final configuration requirement is the router specific options. The binlog router requires a set of parameters to be passed, these are passed in the router_options parameter of the service definition as a comma separated list of name value pairs.
|
||||
|
||||
### uuid
|
||||
|
||||
This is used to set the unique uuid that the router uses when it connects to the master server. It is a requirement of replication that each slave have a unique UUID value. The MaxScale router will identify itself to the slaves using the uuid of the real master and not this uuid. If no explicit value is given for the uuid in the configuration file then a uuid will be generated.
|
||||
|
||||
### server-id
|
||||
|
||||
As with uuid, MaxScale must have a unique server-id for the connection it makes to the master, this parameter provides the value of server-id that MaxScale will use when connecting to the master.
|
||||
|
||||
### user
|
||||
|
||||
This is the user name that MaxScale uses when it connects to the master. This user name must have the rights required for replication as with any other user that a slave uses for replication purposes.
|
||||
|
||||
### password
|
||||
|
||||
The password of the above user.
|
||||
|
||||
### master-id
|
||||
|
||||
The server-id value that MaxScale should use to report to the slaves that connect to MaxScale. This may either be the same as the server-id of the real master or can be chosen to be different if the slaves need to be aware of the proxy layer.
|
||||
|
||||
### filestem
|
||||
|
||||
This parameter is used to provide the stem of the file names that are used to store the binlog events. If this parameter is not given then the events are stored in the default name of mysql-bin followed by a sequence number.
|
||||
|
||||
### initialfile
|
||||
|
||||
This optional parameter allows for the administrator to define the number of the first binlog file to download. In normal circumstances MaxScale will use any existing binlog file to determine what to request from the master. If there are no files it will then ask for the binlog file with the index number defined in the initialfile parameter. If this parameter is not set then MaxScale will ask the master for binlog events from file 1.
|
||||
|
||||
### binlogdir
|
||||
|
||||
This parameter allows the location that MaxScale uses to store binlog files to be set. If this parameter is not set to a directory name then MaxScale will store the binlog files in the directory $MAXSCALE_HOME/<Service Name>.
|
||||
|
||||
### heartbeat
|
||||
|
||||
This defines the value of the heartbeat interval in seconds for the connection to the master. MaxScale requests the master to ensure that a binlog event is sent at least every heartbeat period. If there are no real binlog events to send the master will sent a special heartbeat event. The default value for the heartbeat period is every 5 minutes.
|
||||
|
||||
### burstsize
|
||||
|
||||
This parameter is used to define the maximum amount of data that will be sent to a slave by MaxScale when that slave is lagging behind the master. In this situation the slave is said to be in "catchup mode", this parameter is designed to both prevent flooding of that slave and also to prevent threads within MaxScale spending disproportionate amounts of time with slaves that are lagging behind the master. The burst size can be defined in Kb, Mb or Gb by adding the qualifier K, M or G to the number given.
|
||||
|
||||
A complete example of a service entry for a binlog router service would be as follows.
|
||||
|
||||
[Replication]
|
||||
type=service
|
||||
router=binlogrouter
|
||||
servers=maserdb
|
||||
version_string=5.6.17-log
|
||||
router_options=uuid=f12fcb7f-b97b-11e3-bc5e-0401152c4c22,server-id=3,user=repl,password=slavepass,master-id=1,filestem=mybin,heartbeat=30,binlogdir=/home/mriddoch/binlogs
|
||||
user=maxscale
|
||||
passwd=Mhu87p2D
|
||||
|
||||
## Listener Section
|
||||
|
||||
As per any service in MaxScale a listener section is required to define the address, port and protocol that is used to listen for incoming connections. In this case those incoming connections will originate from the slave servers.
|
||||
|
||||
[Replication Listener]
|
||||
type=listener
|
||||
service=Replication
|
||||
protocol=MySQLClient
|
||||
port=5308
|
||||
|
||||
The protocol used by slaves for connection to MaxScale is the same MySQLClient protocol that is used for client applications to connect to databases, therefore the same MaxScale protocol module can be used.
|
||||
|
||||
## Master Server Section
|
||||
|
||||
The master server is defined in a section within the MaxScale configuration file in the same way as any other server. The protocol that is used is the same backend protocol as is used in other configurations.
|
||||
|
||||
[masterdb]
|
||||
type=server
|
||||
address=178.62.50.70
|
||||
port=3306
|
||||
protocol=MySQLBackend
|
||||
|
||||
# MaxScale replication diagnostics
|
||||
|
||||
The binlog router module of MaxScale produces diagnostic output that can be viewed via the `maxadmin` client application. Running the maxadmin command and issuing a show service command will produce a considerable amount of output that will show both the master connection status and statistics and also a block for each of the slaves currently connected.
|
||||
|
||||
-bash-4.1$ maxadmin show service Replication
|
||||
Service 0x1567ef0
|
||||
Service: Replication
|
||||
Router: binlogrouter (0x7f4ceb96a820)
|
||||
State: Started
|
||||
Master connection DCB: 0x15693c0
|
||||
Master connection state: Binlog Dump
|
||||
Binlog directory: /home/mriddoch/binlogs
|
||||
Number of master connects: 1
|
||||
Number of delayed reconnects: 0
|
||||
Current binlog file: mybin.000061
|
||||
Current binlog position: 120
|
||||
Number of slave servers: 0
|
||||
No. of binlog events received this session: 1002705
|
||||
Total no. of binlog events received: 2005410
|
||||
No. of bad CRC received from master: 0
|
||||
Number of binlog events per minute
|
||||
Current 5 10 15 30 Min Avg
|
||||
4 4.0 4.0 4.0 4.0
|
||||
Number of fake binlog events: 0
|
||||
Number of artificial binlog events: 61
|
||||
Number of binlog events in error: 0
|
||||
Number of binlog rotate events: 60
|
||||
Number of heartbeat events: 69
|
||||
Number of packets received: 599
|
||||
Number of residual data packets: 379
|
||||
Average events per packet 3347.9
|
||||
Last event from master at: Thu Jan 29 16:41:53 2015
|
||||
(1 seconds ago)
|
||||
Last event from master: 0x1b (Heartbeat Event)
|
||||
Events received:
|
||||
Invalid 0
|
||||
Start Event V3 0
|
||||
Query Event 703307
|
||||
Stop Event 55
|
||||
Rotate Event 65
|
||||
Integer Session Variable 0
|
||||
Load Event 0
|
||||
Slave Event 0
|
||||
Create File Event 0
|
||||
Append Block Event 0
|
||||
Exec Load Event 0
|
||||
Delete File Event 0
|
||||
New Load Event 0
|
||||
Rand Event 0
|
||||
User Variable Event 0
|
||||
Format Description Event 61
|
||||
Transaction ID Event (2 Phase Commit) 299148
|
||||
Begin Load Query Event 0
|
||||
Execute Load Query Event 0
|
||||
Table Map Event 0
|
||||
Write Rows Event (v0) 0
|
||||
Update Rows Event (v0) 0
|
||||
Delete Rows Event (v0) 0
|
||||
Write Rows Event (v1) 0
|
||||
Update Rows Event (v1) 0
|
||||
Delete Rows Event (v1) 0
|
||||
Incident Event 0
|
||||
Heartbeat Event 69
|
||||
Ignorable Event 0
|
||||
Rows Query Event 0
|
||||
Write Rows Event (v2) 0
|
||||
Update Rows Event (v2) 0
|
||||
Delete Rows Event (v2) 0
|
||||
GTID Event 0
|
||||
Anonymous GTID Event 0
|
||||
Previous GTIDS Event 0
|
||||
Started: Thu Jan 29 16:06:11 2015
|
||||
Root user access: Disabled
|
||||
Backend databases
|
||||
178.62.50.70:3306 Protocol: MySQLBackend
|
||||
Users data: 0x156c030
|
||||
Total connections: 2
|
||||
Currently connected: 2
|
||||
-bash-4.1$
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -1,999 +0,0 @@
|
||||
|
||||
|
||||
MaxScale
|
||||
|
||||
Configuration & Usage Scenarios
|
||||
|
||||
|
||||
Mark Riddoch
|
||||
|
||||
Last Updated: 2nd July 2014
|
||||
|
||||
|
||||
== Contents
|
||||
|
||||
Contents
|
||||
Document History
|
||||
Introduction
|
||||
Terms
|
||||
Configuration
|
||||
Global Settings
|
||||
Threads
|
||||
Service
|
||||
Router
|
||||
Filters
|
||||
Servers
|
||||
User
|
||||
Passwd
|
||||
weightby
|
||||
Server
|
||||
Address
|
||||
Port
|
||||
Protocol
|
||||
Monitoruser
|
||||
MonitorPw
|
||||
Listener
|
||||
Service
|
||||
Protocol
|
||||
Address
|
||||
Port
|
||||
Filter
|
||||
Module
|
||||
Options
|
||||
Other Parameters
|
||||
Monitor
|
||||
Module
|
||||
Servers
|
||||
User
|
||||
Passwd
|
||||
Protocol Modules
|
||||
MySQLClient
|
||||
MySQLBackend
|
||||
Telnetd
|
||||
maxscaled
|
||||
HTTPD
|
||||
Router Modules
|
||||
Connection Based Routing
|
||||
Statement Based Routing
|
||||
Available Routing Modules
|
||||
Readconnroute
|
||||
Master/Slave Replication Setup
|
||||
Galera Cluster Configuration
|
||||
Readwritesplit
|
||||
Master/Slave Replication Setup
|
||||
Debugcli
|
||||
Debug CLI Configuration
|
||||
CLI
|
||||
CLI Configuration
|
||||
Monitor Modules
|
||||
Mysqlmon
|
||||
Galeramon
|
||||
Filter Modules
|
||||
Statement Counting Filter
|
||||
Query Log All Filter
|
||||
Regular Expression Filter
|
||||
Tee Filter
|
||||
Encrypting Passwords
|
||||
Creating Encrypted Passwords
|
||||
Configuration Updates
|
||||
Limitations
|
||||
Authentication
|
||||
Wildcard Hosts
|
||||
Limitations
|
||||
Error Reporting
|
||||
|
||||
|
||||
==
|
||||
==
|
||||
== Document History
|
||||
|
||||
|===
|
||||
|*Date*|*Change*|*Who*
|
||||
|
||||
|21st July 2013|Initial version|Mark Riddoch
|
||||
|23rd July 2013|Addition of default user and password for a monitor and discussion of monitor user requirements|Mark Riddoch
|
||||
|13th November 2013|state for Galera Monitor is “synced”|Massimiliano Pinto
|
||||
|2nd December 2013|Updated the description of the command line arguments to match the code updates.|Mark Riddoch
|
||||
|6th February 2014|Added “enable_root_user” as a service parameter|Massimiliano Pinto
|
||||
|7th February 2014|Addition of bind address information|Mark Riddoch
|
||||
|3rd March 2014|MySQL authentication with hostnames|Massimiliano Pinto
|
||||
|3rd March 2014|Addition of section that describes authentication requirements and the rules for creating user credentials|Mark Riddoch
|
||||
|28th March 2014|Unix socket support|Massimiliano Pinto
|
||||
|8th May 2014|Added “version_string” parameter in service|Massimiliano Pinto
|
||||
|29th May 2014|Added troubleshooting section|Massimiliano Pinto
|
||||
|2nd June 2014|Correction of some typos, clarification of the meaning of session modification statements and the default user for the CLI.|Mark Riddoch
|
||||
|4th June 2014|Addition of “monitor_interval” for monitors|Massimiliano Pinto
|
||||
|6th June 2014|Addition of filters sections|Mark Riddoch
|
||||
|27th June 2014|Addition of server weighting, the configuration for the maxadmin client|Mark Riddoch
|
||||
|2nd July 2014|Addition of new readwritesplit router options with description and examples.|Vilho Raatikka
|
||||
|===
|
||||
==
|
||||
== Introduction
|
||||
|
||||
The purpose of this document is to describe how to configure MaxScale and to discuss some possible usage scenarios for MaxScale. MaxScale is designed with flexibility in mind, and consists of an event processing core with various support functions and plugin modules that tailor the behaviour of the MaxScale itself.
|
||||
|
||||
=== Terms
|
||||
|
||||
|===
|
||||
|*Term*|*Description*
|
||||
|
||||
|service|A service represents a set of databases with a specific access mechanism that is offered to clients of MaxScale. The access mechanism defines the algorithm that MaxScale will use to direct particular requests to the individual databases.
|
||||
|server|A server represents an individual database server to which a client can be connected via MaxScale.
|
||||
|router|A router is a module within MaxScale that will route client requests to the various database servers which MaxScale provides a service interface to.
|
||||
|connection routing|Connection routing is a method of handling requests in which MaxScale will accept connections from a client and route data on that connection to a single database using a single connection. Connection based routing will not examine individual quests on a connection and it will not move that connection once it is established.
|
||||
|statement routing|Statement routing is a method of handling requests in which each request within a connection will be handled individually. Requests may be sent to one or more servers and connections may be dynamically added or removed from the session.
|
||||
|protocol|A protocol is a module of software that is used to communicate with another software entity within the system. MaxScale supports the dynamic loading of protocol modules to allow for increased flexibility.
|
||||
|module|A module is a separate code entity that may be loaded dynamically into MaxScale to increase the available functionality. Modules are implemented as run-time loadable shared objects.
|
||||
|monitor|A monitor is a module that can be executed within MaxScale to monitor the state of a set of database. The use of an internal monitor is optional, monitoring may be performed externally to MaxScale.
|
||||
|listener|A listener is the network endpoint that is used to listen for connections to MaxScale from the client applications. A listener is associated to a single service, however a service may have many listeners.
|
||||
|connection failover|When a connection currently being used between MaxScale and the database server fails a replacement will be automatically created to another server by MaxScale without client intervention
|
||||
|backend database|A term used to refer to a database that sits behind MaxScale and is accessed by applications via MaxScale.
|
||||
|filter|A module that can be placed between the client and the MaxScale router module. All client data passes through the filter module and may be examined or modified by the filter modules.
|
||||
|===
|
||||
|
||||
|
||||
==
|
||||
==
|
||||
== Configuration
|
||||
|
||||
The MaxScale configuration is read from a file which can be located in a number of placing, MaxScale will search for the configuration file in a number of locations.
|
||||
|
||||
. If the environment variable MAXSCALE_HOME is set then MaxScale will look for a configuration file called MaxScale.cnf in the directory $MAXSCALE_HOME/etc
|
||||
. If MAXSCALE_HOME is not set or the configuration file is not in the location above MaxScale will look for a file in /etc/MaxScale.cnf
|
||||
|
||||
Alternatively MaxScale can be started with the -c flag and the path of the MaxScale home directory tree.
|
||||
|
||||
An explicit path to a configuration file can be passed by using the -f option to MaxScale.
|
||||
|
||||
The configuration file itself is based on the “ini” file format and consists of various sections that are used to build the configuration, these sections define services, servers, listeners, monitors and global settings.
|
||||
|
||||
=== Global Settings
|
||||
The global settings, in a section named [MaxScale], allow various parameters that affect MaxScale as a whole to be tuned. Currently the only setting that is supported is the number of threads to use to handle the network traffic. MaxScale will also accept the section name of [gateway] for global settings. This is for backward compatibility with versions prior to the naming of MaxScale.
|
||||
|
||||
==== Threads
|
||||
To control the number of threads that poll for network traffic set the parameter threads to a number. It is recommended that you start with a single thread and add more as you find the performance is not satisfactory. MaxScale is implemented to be very thread efficient, so a small number of threads is usually adequate to support reasonably heavy workloads. Adding more threads may not improve performance and can consume resources needlessly.
|
||||
|
||||
----
|
||||
# Valid options are:
|
||||
# threads=<number of epoll threads>
|
||||
[MaxScale]
|
||||
threads=1
|
||||
----
|
||||
|
||||
It should be noted that additional threads will be created to execute other internal services within MaxScale, this setting is merely used to configure the number of threads that will be used to manage the user connections.
|
||||
|
||||
=== Service
|
||||
A service represents the database service that MaxScale offers to the clients. In general a service consists of a set of backend database servers and a routing algorithm that determines how MaxScale decides to send statements or route connections to those backend servers.
|
||||
|
||||
A service may be considered as a virtual database server that MaxScale makes available to its clients.
|
||||
|
||||
Several different services may be defined using the same set of backend servers. For example a connection based routing service might be used by clients that already performed internal read/write splitting, whilst a different statement based router may be used by clients that are not written with this functionality in place. Both sets of applications could access the same data in the same databases.
|
||||
|
||||
A service is identified by a service name, which is the name of the configuration file section and a type parameter of service
|
||||
|
||||
----
|
||||
[Test Service]
|
||||
type=service
|
||||
----
|
||||
|
||||
In order for MaxScale to forward any requests it must have at least one service defined within the configuration file. The definition of a service alone is not enough to allow MaxScale to forward requests however, the service is merely present to link together the other configuration elements.
|
||||
|
||||
==== Router
|
||||
The router parameter of a service defines the name of the router module that will be used to implement the routing algorithm between the client of MaxScale and the backend databases. Additionally routers may also be passed a comma separated list of options that are used to control the behaviour of the routing algorithm. The two parameters that control the routing choice are router and router_options. The router options are specific to a particular router and are used to modify the behaviour of the router. The read connection router can be passed options of master, slave or synced, an example of configuring a service to use this router and limiting the choice of servers to those in slave state would be as follows.
|
||||
|
||||
----
|
||||
router=readconnroute
|
||||
router_options=slave
|
||||
----
|
||||
|
||||
To change the router to connect on to servers in the master state as well as slave servers, the router options can be modified to include the master state.
|
||||
|
||||
----
|
||||
router=readconnroute
|
||||
router_options=master,slave
|
||||
----
|
||||
|
||||
A more complete description of router options and what is available for a given router is included with the documentation of the router itself.
|
||||
|
||||
==== Filters
|
||||
The filters option allow a set of filters to be defined for a service; requests from the client are passed through these filters before being sent to the router for dispatch to the backend server. The filters parameter takes one or more filter names, as defined within the filter definition section of the configuration file. Multiple filters are separated using the | character.
|
||||
|
||||
+filters=counter | QLA+
|
||||
|
||||
The requests pass through the filters from left to right in the order defined in the configuration parameter.
|
||||
|
||||
==== Servers
|
||||
The servers parameter in a service definition provides a comma separated list of the backend servers that comprise the service. The server names are those used in the name section of a block with a type parameter of server (see below).
|
||||
|
||||
+servers=server1,server2,server3+
|
||||
|
||||
==== User
|
||||
The user parameter, along with the passwd parameter are used to define the credentials used to connect to the backend servers to extract the list of database users from the backend database that is used for the client authentication.
|
||||
|
||||
----
|
||||
user=maxscale
|
||||
passwd=Mhu87p2D
|
||||
----
|
||||
|
||||
Authentication of incoming connections is performed by MaxScale itself rather than by the database server to which the client is connected. The client will authenticate itself with MaxScale, using the username, hostname and password information that MaxScale has extracted from the backend database servers. For a detailed discussion of how this impacts the authentication process please see the “Authentication” section below.
|
||||
|
||||
The host matching criteria is restricted to IPv4, IPv6 will be added in a future release.
|
||||
|
||||
Existing user configuration in the backend databases must be checked and may be updated before successful MaxScale authentication:
|
||||
|
||||
|
||||
In order for MaxScale to obtain all the data it must be given a username it can use to connect to the database and retrieve that data. This is the parameter that gives MaxScale the username to use for this purpose.
|
||||
|
||||
The account used must be able to select from the mysql.user table, the following is an example showing how to create this user.
|
||||
|
||||
----
|
||||
MariaDB [mysql]> create user 'maxscale'@'maxscalehost' identified by 'Mhu87p2D';
|
||||
Query OK, 0 rows affected (0.01 sec)
|
||||
|
||||
MariaDB [mysql]> grant SELECT on mysql.user to 'maxscale'@'maxscalehost';
|
||||
----
|
||||
Query OK, 0 rows affected (0.00 sec)
|
||||
|
||||
==== Passwd
|
||||
The auth parameter provides the password information for the above user and may be either a plain text password or it may be an encrypted password. See the section on encrypting passwords for use in the MaxScale.cnf file. This user must be capable of connecting to the backend database and executing the SQL statement “SELECT user, host, password FROM mysql.user”.
|
||||
|
||||
*enable_root_user* +
|
||||
This parameter controls the ability of the root user to connect to MaxScale and hence onwards to the backend servers via MaxScale.
|
||||
|
||||
The default value is 0, disabling the ability of the root user to connect to MaxScale.
|
||||
|
||||
Example for enabling root user: +
|
||||
enable_root_user=1
|
||||
|
||||
Values of “on” or “true” may also be given to enable the root user and “off” or “false” may be given to disable the use of the root user. +
|
||||
+enable_root_user=true+
|
||||
|
||||
*version_string* +
|
||||
This parameter sets a custom version string that is sent in the MySQL Handshake from MaxScale to clients.
|
||||
|
||||
Example: +
|
||||
version_string=5.5.37-MariaDB-RWsplit
|
||||
|
||||
If not set, the default value is the server version of the embedded MySQL/MariaDB library. Example: 5.5.35-MariaDB
|
||||
|
||||
==== weightby
|
||||
The weightby parameter is used in conjunction with server parameters in order to control the load balancing applied in the router in use by the service. This allows varying weights to be applied to each server to create a non-uniform distribution of the load amongst the servers.
|
||||
|
||||
An example of this might be to define a parameter for each server that represents the amount of resource available on the server, we could call this serversize. Every server should then have a serversize parameter set for the server.
|
||||
|
||||
+serversize=10+
|
||||
|
||||
The service would then have the parameter weightby set. If there are 4 servers defined in the service, serverA, serverB, serverC and serverD, with the serversize set as shown in the table below, the connections would balanced using the percentages in this table.
|
||||
|
||||
|===
|
||||
|Server|serversize|% connections
|
||||
|
||||
|serverA|10|18%
|
||||
|serverB|15|27%
|
||||
|serverC|10|18%
|
||||
|serverD|20|36%
|
||||
|===
|
||||
|
||||
=== Server
|
||||
|
||||
Server sections are used to define the backend database servers that can be formed into a service. A server may be a member of one or more services within MaxScale. Servers are identified by a server name which is the section name in the configuration file. Servers have a type parameter of server, plus address port and protocol parameters.
|
||||
|
||||
----
|
||||
[server1]
|
||||
type=server
|
||||
address=127.0.0.1
|
||||
port=3000
|
||||
protocol=MySQLBackend
|
||||
----
|
||||
==== Address
|
||||
The IP address or hostname of the machine running the database server that is being defined. MaxScale will use this address to connect to the backend database server.
|
||||
|
||||
==== Port
|
||||
The port on which the database listens for incoming connections. MaxScale will use this port to connect to the database server.
|
||||
|
||||
==== Protocol
|
||||
The name for the protocol module to use to connect MaxScale to the database. Currently only one backend protocol is supported, the MySQLBackend module.
|
||||
|
||||
==== Monitoruser
|
||||
The monitor has a username and password that is used to connect to all servers for monitoring purposes, this may be overridden by supplying a monitoruser statement for each individual server
|
||||
|
||||
+monitoruser=mymonitoruser+
|
||||
|
||||
==== MonitorPw
|
||||
The monitor has a username and password that is used to connect to all servers for monitoring purposes, this may be overridden by supplying a monpasswd statement for the individual servers
|
||||
|
||||
----
|
||||
monitorpw=mymonitorpasswd
|
||||
|
||||
----
|
||||
The monpasswd parameter may be either a plain text password or it may be an encrypted password. See the section on encrypting passwords for use in the MaxScale.cnf file.
|
||||
|
||||
=== Listener
|
||||
|
||||
The listener defines a port and protocol pair that is used to listen for connections to a service. A service may have multiple listeners associated with it, either to support multiple protocols or multiple ports. As with other elements of the configuration the section name is the listener name and a type parameter is used to identify the section as a listener definition.
|
||||
|
||||
----
|
||||
[Test Listener]
|
||||
type=listener
|
||||
service=Test Service
|
||||
protocol=MySQLClient
|
||||
address=localhost
|
||||
port=4008
|
||||
socket=/tmp/testlistener.sock
|
||||
----
|
||||
==== Service
|
||||
The service to which the listener is associated. This is the name of a service that is defined elsewhere in the configuration file.
|
||||
|
||||
==== Protocol
|
||||
The name of the protocol module that is used for the communication between the client and MaxScale itself.
|
||||
|
||||
==== Address
|
||||
The address option sets the address that will be used to bind the listening socket. The address may be specified as an IP address in ‘dot notation’ or as a hostname. If the address option is not included in the listener definition the listener will bind to all network interfaces.
|
||||
|
||||
==== Port
|
||||
The port to use to listen for incoming connections to MaxScale from the clients. If the port is omitted from the configuration a default port for the protocol will be used.
|
||||
|
||||
*Socket* +
|
||||
The socket option may be included in a listener definition, this configures the listener to use Unix domain sockets to listen for incoming connections. The parameter value given is the name of the socket to use.
|
||||
|
||||
If a socket option and an address option is given then the listener will listen on both the specific IP address and the Unix socket.
|
||||
|
||||
=== Filter
|
||||
Filters provide a means to manipulate or process requests as they pass through MaxScale between the client side protocol and the query router. A filter should be defined in a section with a type of filter.
|
||||
|
||||
----
|
||||
[QLA]
|
||||
type=filter
|
||||
module=qlafilter
|
||||
options=/tmp/QueryLog
|
||||
----
|
||||
|
||||
The section name may then be used in one or more services by using the filters= parameter in the service section. In order to use the above filter for a service called “QLA Service”, an entry of the following form would exist for that service.
|
||||
|
||||
----
|
||||
[QLA Service]
|
||||
type=service
|
||||
router=readconnroute
|
||||
router_options=slave
|
||||
servers=server1,server2,server3,server4
|
||||
user=massi
|
||||
passwd=6628C50E07CCE1F0392EDEEB9D1203F3
|
||||
filters=QLA
|
||||
----
|
||||
|
||||
See the Services section for more details on how to configure the various options of a service.
|
||||
|
||||
==== Module
|
||||
The module parameter defines the name of the loadable module that implements the filter.
|
||||
|
||||
==== Options
|
||||
The options parameter is used to pass options to the filter to control the actions the filter will perform. The values that can be passed differ between filter implementation, the inclusion of an options parameter is optional.
|
||||
|
||||
==== Other Parameters
|
||||
Any other parameters present in the filters section will be passed to the filter to be interpreted by the filter. An example of this is the regexfilter that requires the two parameters match and replace
|
||||
|
||||
----
|
||||
[regex]
|
||||
type=filter
|
||||
module=regexfilter
|
||||
match=form
|
||||
replace=from
|
||||
----
|
||||
|
||||
=== Monitor
|
||||
|
||||
In order for the various router modules to function correctly they require information about the state of the servers that are part of the service they provide. MaxScale has the ability to internally monitor the state of the back-end database servers or that state may be feed into MaxScale from external monitoring systems. If automated monitoring and failover of services is required this is achieved by running a monitor module that is designed for the particular database architecture that is in use.
|
||||
|
||||
Monitors are defined in much the same way as other elements in the configuration file, with the section name being the name of the monitor instance and the type being set to monitor.
|
||||
|
||||
----
|
||||
[MySQL Monitor]
|
||||
type=monitor
|
||||
module=mysqlmon
|
||||
servers=server1,server2,server3
|
||||
user=dbmonitoruser
|
||||
passwd=dbmonitorpwd
|
||||
monitor_interval=8000
|
||||
|
||||
----
|
||||
==== Module
|
||||
The module parameter defines the name of the loadable module that implements the monitor. This module is loaded and executed on a separate thread within MaxScale.
|
||||
|
||||
==== Servers
|
||||
The servers parameter is a comma separated list of server names to monitor, these are the names defined elsewhere in the configuration file. The set of servers monitored by a single monitor need not be the same as the set of servers used within any particular server, a single monitor instance may monitor servers in multiple servers.
|
||||
|
||||
==== User
|
||||
The user parameter defines the username that the monitor will use to connect to the monitored databases. Depending on the monitoring module used this user will require specific privileges in order to determine the state of the nodes, details of those privileges can be found in the sections on each of the monitor modules.
|
||||
|
||||
Individual servers may define override values for the user and password the monitor uses by setting the monuser and monpasswd parameters in the server section.
|
||||
|
||||
==== Passwd
|
||||
The password parameter may be either a plain text password or it may be an encrypted password. See the section on encrypting passwords for use in the MaxScale.cnf file.
|
||||
|
||||
*Monitor_interval* +
|
||||
The monitor_interval parameter sets the sampling interval in milliseconds for each monitor, the default value is 10000 milliseconds.
|
||||
==
|
||||
==
|
||||
== Protocol Modules
|
||||
The protocols supported by MaxScale are implemented as external modules that are loaded dynamically into the MaxScale core. These modules reside in the directory $MAXSCALE_HOME/module, if the environment variable $MAXSCALE_HOME is not set it defaults to /usr/local/skysql/MaxScale. It may also be set by passing the -c option on the MaxScale command line.
|
||||
|
||||
=== MySQLClient
|
||||
|
||||
This is the implementation of the MySQL protocol that is used by clients of MaxScale to connect to MaxScale.
|
||||
|
||||
=== MySQLBackend
|
||||
|
||||
The MySQLBackend protocol module is the implementation of the protocol that MaxScale uses to connect to the backend MySQL, MariaDB and Percona Server databases. This implementation is tailored for the MaxScale to MySQL Database traffic and is not a general purpose implementation of the MySQL protocol.
|
||||
|
||||
=== Telnetd
|
||||
|
||||
The telnetd protocol module is used for connections to MaxScale itself for the purposes of creating interactive user sessions with the MaxScale instance itself. Currently this is used in conjunction with a special router implementation, the debugcli.
|
||||
|
||||
=== maxscaled
|
||||
The protocol used used by the maxadmin client application in order to connect to MaxScale and access the command line interface.
|
||||
|
||||
=== HTTPD
|
||||
|
||||
This protocol module is currently still under development, it provides a means to create HTTP connections to MaxScale for use by web browsers or RESTful API clients.
|
||||
==
|
||||
==
|
||||
== Router Modules
|
||||
The main task of MaxScale is to accept database connections from client applications and route the connections or the statements sent over those connections to the various services supported by MaxScale.
|
||||
|
||||
There are two flavours of routing that MaxScale can perform, connection based routing and statement based routine. These each have their own characteristics and costs associated with them.
|
||||
|
||||
=== Connection Based Routing
|
||||
|
||||
Connection based routing is a mechanism by which MaxScale will, for each incoming connection decide on an appropriate outbound server and will forward all statements to that server without examining the internals of the statement. Once an inbound connection is associated to a particular backend database it will remain connected to that server until the connection is closed or the server fails.
|
||||
|
||||
=== Statement Based Routing
|
||||
|
||||
Statement based routing is somewhat different, the routing modules examine every statement the client sends and determines, on a per statement basis, which of the set of backend servers in the service is best to execute the statement. This gives better dynamic balancing of the load within the cluster but comes at a cost. The query router must understand the statement that is being routing and will typically need to parse the statement in order to achieve this. This parsing within the router adds a significant overhead to the cost of routing and makes this type of router only really suitable for loads in which the gains outweigh this added cost.
|
||||
|
||||
=== Available Routing Modules
|
||||
|
||||
Currently a small number of query routers are available, these are in different stages of completion and offer different facilities.
|
||||
|
||||
==== Readconnroute
|
||||
This is a statement based query router that was originally targeted at environments in which the clients already performed splitting of read and write queries into separate connections.
|
||||
|
||||
Whenever a new connection is received the router will examine the state of all the servers that form part of the service and route the connection to the server with least connections currently that matches the filter constraints given in the router options. This results in a balancing of the active connections, however different connections may have different lifetimes and the connections may become unbalanced when later viewed.
|
||||
|
||||
The readconnroute router can be configured to balance the connections from the clients across all the backend servers that are running, just those backend servers that are currently replication slaves or those that are replication masters when routing to a master slave replication environment. When a Galera cluster environment is in use the servers can be filtered to just the set that are part of the cluster and in the ‘synced’ state. These options are configurable via the router_options that can be set within a service. The router_option strings supported are “master”, “slave” and “synced”.
|
||||
|
||||
===== Master/Slave Replication Setup
|
||||
|
||||
To setup MaxScale to route connections evenly between all the current slave servers in a replication cluster, a service entry of the form shown below is required.
|
||||
|
||||
----
|
||||
[Read Service]
|
||||
type=service
|
||||
router=readconnroute
|
||||
router_options=slave
|
||||
servers=server1,server2,server3,server4
|
||||
user=maxscale
|
||||
auth=thepasswd
|
||||
----
|
||||
|
||||
With the addition of a listener for this service, which defines the port and protocol that MaxScale uses +
|
||||
----
|
||||
[Read Listener]
|
||||
type=listener
|
||||
service=Read Service
|
||||
protocol=MySQLClient
|
||||
port=4006
|
||||
----
|
||||
|
||||
the client can now connect to port 4006 on the host which is running MaxScale. Statements sent using this connection will then be routed to one of the slaves in the server set defined in the Read Service. Exactly which is selected will be determined by balancing the number of connections to each of those whose current state is “slave”.
|
||||
|
||||
Altering the router options to be slave, master would result in the connections being balanced between all the servers within the cluster.
|
||||
|
||||
It is assumed that the client will have a separate connection to the master server, however this can be routed via MaxScale, allowing MaxScale to manage the determination of which server is master. To do this you would add a second service and listener definition for the master server.
|
||||
|
||||
----
|
||||
[Write Service]
|
||||
type=service
|
||||
router=readconnroute
|
||||
router_options=master
|
||||
servers=server1,server2,server3,server4
|
||||
user=maxscale
|
||||
auth=thepasswd
|
||||
|
||||
[Write Listener]
|
||||
type=listener
|
||||
service=Write Service
|
||||
protocol=MySQLClient
|
||||
port=4007
|
||||
----
|
||||
|
||||
This allows the clients to direct write requests to port 4007 and read requests to port 4006 of the MaxScale host without the clients needing to understand the configuration of the Master/Slave replication cluster.
|
||||
|
||||
Connections to port 4007 would automatically be directed to the server that is the master for replication at the time connection is opened. Whilst this is a simple mapping to a single server it does give the advantage that the clients have no requirement to track which server is currently the master, devolving responsibility for managing the failover to MaxScale.
|
||||
|
||||
In order for MaxScale to be able to determine the state of these servers the mysqlmon monitor module should be run against the set of servers that comprise the service.
|
||||
|
||||
===== Galera Cluster Configuration
|
||||
|
||||
Although not primarily designed for a multi-master replication setup, it is possible to use the readconnroute in this situation. The readconnroute connection router can be used to balance the connections across a Galera cluster. A special monitor is available that detects if nodes are joined to a Galera Cluster, with the addition of a router option to only route connections to nodes marked as synced. MaxScale can ensure that users are never connected to a node that is not a full cluster member.
|
||||
|
||||
----
|
||||
[Galera Service]
|
||||
type=service
|
||||
router=readconnroute
|
||||
router_options=synced
|
||||
servers=server1,server2,server3,server4
|
||||
user=maxscale
|
||||
auth=thepasswd
|
||||
|
||||
[Galera Listener]
|
||||
type=listener
|
||||
service=Galera Service
|
||||
protocol=MySQLClient
|
||||
port=3336
|
||||
----
|
||||
|
||||
----
|
||||
[Galera Monitor]
|
||||
type=monitor
|
||||
module=galeramon
|
||||
servers=server1,server2,server3,server4
|
||||
user=galeramon
|
||||
passwd=galeramon
|
||||
|
||||
|
||||
----
|
||||
The specialized Galera monitor can also select one of the node in the cluster as master, the others will be marked as slave. +
|
||||
These roles are only assigned to synced nodes.
|
||||
|
||||
It then possible to have services/listeners with router_options=master or slave accessing a subset of all galera nodes. +
|
||||
The “synced” simply means: access all nodes.
|
||||
|
||||
Examples:
|
||||
|
||||
----
|
||||
[Galera Master Service]
|
||||
type=service
|
||||
router=readconnroute
|
||||
router_options=master
|
||||
|
||||
[Galera Slave Service]
|
||||
type=service
|
||||
router=readconnroute
|
||||
router_options=slave
|
||||
|
||||
----
|
||||
The Master and Slave roles are also available for the Read/Write Split router operation
|
||||
|
||||
==== Readwritesplit
|
||||
|
||||
The readwritesplit is a statement based router that has been designed for use within Master/Slave replication environments. It examines every statement, parsing it to determine if the statement falls into one of three categories;
|
||||
* read only statement
|
||||
* possible write statement
|
||||
session modification statement
|
||||
Each of these three categories has a different action associated with it. Read only statements are sent to a slave server in the replication cluster. Possible write statements, which may include read statements that have an undeterminable side effect, are sent to the current replication master. Statements that modify the session are sent to all the servers, with the result that is generated by the master server being returned to the user.
|
||||
|
||||
Session modification statements must be replicated as they affect the future results of read and write operations, so they must be executed on all servers that could execute statements on behalf of this client.
|
||||
|
||||
Currently the readwritesplit router module is under development and has the following limitations:
|
||||
* Connection failover support has not yet been implemented. Client connections will fail if the master server fails over.
|
||||
===== Master/Slave Replication Setup
|
||||
|
||||
To setup the readwritesplit connection router in a master/slave failover environment is extremely simple, a service definition is required with the router defined for the service and an associated listener. +
|
||||
The router_options parameter is not required but it can be used to specify how slave(s) are selected. Available option is slave_selection_criteria and possible value are LEAST_BEHIND_MASTER and LEAST_CURRENT_OPERATIONS. +
|
||||
max_slave_connections is a readwritesplit-only option, which sets the upper limit for the number of slaves a router session can use. max_slave_replication_lag is (currently) another readwritesplit-specific option, which sets maximum allowed lag for slave in seconds. The criteria is checked when router chooses slaves and only slaves having smaller lag are eligible for selection. The lag is not checked after connection phase.
|
||||
|
||||
----
|
||||
[Split Service]
|
||||
type=service
|
||||
router=readwritesplit
|
||||
router_options=slave_selection_criteria=LEAST_BEHIND_MASTER
|
||||
max_slave_connections=50%
|
||||
max_slave_replication_lag=30
|
||||
servers=server1,server2,server3,server4
|
||||
user=maxscale
|
||||
auth=thepasswd
|
||||
|
||||
[Split Listener]
|
||||
type=listener
|
||||
service=Split Service
|
||||
protocol=MySQLClient
|
||||
port=3336
|
||||
----
|
||||
|
||||
The client would merely connect to port 3336 on the MaxScale host and statements would be directed to the master or slave as appropriate. Determination of the master or slave status may be done via a monitor module within MaxScale or externally. In this latter case the server flags would need to be set via the MaxScale debug interface, in future versions an API will be available for this purpose.
|
||||
|
||||
+++<u>Galera Cluster Configuration</u>+++ +
|
||||
Master and Slave roles that galera monitor assign to nodes make possible the Read Write split approach to Galera Cluster as well.
|
||||
|
||||
Simply configure a Split Service with galera nodes:
|
||||
|
||||
----
|
||||
[Galera Split Service]
|
||||
type=service
|
||||
router=readwritesplit
|
||||
----
|
||||
servers=galera_node1,galera_node2,galera_node3
|
||||
|
||||
|
||||
==== Debugcli
|
||||
|
||||
The debugcli is a special case of a statement based router. Rather than direct the statements at an external data source they are handled internally. These statements are simple text commands and the results are the output of debug commands within MaxScale. The service and listener definitions for a debug cli service only differ from other services in that they require no backend server definitions.
|
||||
|
||||
===== Debug CLI Configuration
|
||||
|
||||
The definition of the debug cli service is illustrated below
|
||||
|
||||
----
|
||||
[Debug Service]
|
||||
type=service
|
||||
router=debugcli
|
||||
|
||||
[Debug Listener]
|
||||
type=listener
|
||||
service=Debug Service
|
||||
protocol=telnetd
|
||||
port=4442
|
||||
----
|
||||
|
||||
Connections using the telnet protocol to port 4442 of the MaxScale host will result in a new debug CLI session. A default username and password are used for this module, new users may be created using the add user command. As soon as any users are explicitly created the default username will no longer continue to work. The default username is admin with a password of skysql.
|
||||
|
||||
The debugcli supports two modes of operation, developer mode and user mode. The mode is set via the router_options parameter of the debugcli. The user mode is more suited to end-users and administrators, whilst the develop mode is explicitly targeted to software developing adding or maintaining the MaxScale code base. Details of the differences between the modes can be found in the debugging guide for MaxScale. The default mode for the debugcli is user mode. The following service definition would enable a developer version of the debugcli.
|
||||
|
||||
----
|
||||
[Debug Service]
|
||||
type=service
|
||||
router=debugcli
|
||||
----
|
||||
router_options=developer
|
||||
|
||||
It should be noted that both a user and a developer version of the debugcli may be defined within the same instance of MaxScale, however they must be defined as two distinct services, each with a distinct listener.
|
||||
----
|
||||
|
||||
[Debug Service]
|
||||
type=service
|
||||
router=debugcli
|
||||
router_options=developer
|
||||
|
||||
[Debug Listener]
|
||||
type=listener
|
||||
service=Debug Service
|
||||
protocol=telnetd
|
||||
port=4442
|
||||
|
||||
[Admin Service]
|
||||
type=service
|
||||
router=debugcli
|
||||
|
||||
[Admin Listener]
|
||||
type=listener
|
||||
service=Debug Service
|
||||
protocol=telnetd
|
||||
----
|
||||
port=4242
|
||||
|
||||
==== CLI
|
||||
The command line interface as used by maxadmin. This is a variant of the debugcli that is built slightly differently so that it may be accessed by the client application maxadmin. The CLI requires the use of the maxscaled protocol.
|
||||
|
||||
===== CLI Configuration
|
||||
There are two components to the definition required in order to run the command line interface to use with MaxAdmin; a service and a listener. +
|
||||
The default entries required are shown below.
|
||||
----
|
||||
|
||||
[CLI]
|
||||
type=service
|
||||
router=cli
|
||||
|
||||
[CLI Listener]
|
||||
type=listener
|
||||
service=CLI
|
||||
protocol=maxscaled
|
||||
address=localhost
|
||||
port=6603
|
||||
----
|
||||
|
||||
Note that this uses the default port of 6603 and confines the connections to localhost connections only. Remove the address= entry to allow connections from any machine on your network. Changing the port from 6603 will mean that you must allows pass a -p option to the MaxAdmin command.
|
||||
==
|
||||
==
|
||||
== Monitor Modules
|
||||
Monitor modules are used by MaxScale to internally monitor the state of the backend databases in order to set the server flags for each of those servers. The router modules then use these flags to determine if the particular server is a suitable destination for routing connections for particular query classifications. The monitors are run within separate threads of MaxScale and do not affect the MaxScale performance.
|
||||
|
||||
The use of monitors is optional, it is possible to run MaxScale with external monitoring, in which case arrangements must be made for an external entity to set the status of each of the servers that MaxScale can route to.
|
||||
|
||||
=== Mysqlmon
|
||||
|
||||
The MySQLMon monitor is a simple monitor designed for use with MySQL Master/Slave replication cluster. To execute the mysqlmon monitor an entry as shown below should be added to the MaxScale configuration file.
|
||||
|
||||
----
|
||||
[MySQL Monitor]
|
||||
type=monitor
|
||||
module=mysqlmon
|
||||
servers=server1,server2,server3,server4
|
||||
----
|
||||
|
||||
This will monitor the 4 servers; server1, server2, server3 and server4. It will set the status of running or failed and master or slave for each of the servers.
|
||||
|
||||
The monitor uses the username given in the monitor section or the server specific user that is given in the server section to connect to the server. This user must have sufficient permissions on the database to determine the state of replication. The roles that must be granted to this user are REPLICATION SLAVE and REPLICATION CLIENT.
|
||||
|
||||
To create a user that can be used to monitor the state of the cluster, the following commands could be used.
|
||||
|
||||
----
|
||||
MariaDB [mysql]> create user 'maxscalemon'@'maxscalehost' identified by 'Ha79hjds';
|
||||
Query OK, 0 rows affected (0.01 sec)
|
||||
|
||||
MariaDB [mysql]> grant REPLICATION SLAVE on *.* to 'maxscalemon'@'maxscalehost';
|
||||
Query OK, 0 rows affected (0.00 sec)
|
||||
|
||||
MariaDB [mysql]> grant REPLICATION CLIENT on *.* to 'maxscalemon'@'maxscalehost';
|
||||
Query OK, 0 rows affected (0.00 sec)
|
||||
|
||||
MariaDB [mysql]>
|
||||
----
|
||||
|
||||
Assuming that MaxScale is running on the host maxscalehost.
|
||||
|
||||
=== Galeramon
|
||||
|
||||
The Galeramon monitor is a simple router designed for use with MySQL Galera cluster. To execute the galeramon monitor an entry as shown below should be added to the MaxScale configuration file.
|
||||
|
||||
----
|
||||
[Galera Monitor]
|
||||
type=monitor
|
||||
module=galeramon
|
||||
servers=server1,server2,server3,server4
|
||||
----
|
||||
|
||||
This will monitor the 4 servers; server1, server2, server3 and server4. It will set the status of running or failed and joined for those servers that reported the Galera JOINED status.
|
||||
|
||||
The user that is configured for use with the Galera monitor must have sufficient privileges to select from the information_schema database and GLOBAL_STATUS table within that database.
|
||||
|
||||
To create a user that can be used to monitor the state of the cluster, the following commands could be used.
|
||||
|
||||
----
|
||||
MariaDB [mysql]> create user 'maxscalemon'@'maxscalehost' identified by 'Ha79hjds';
|
||||
Query OK, 0 rows affected (0.01 sec)
|
||||
|
||||
MariaDB [mysql]> grant SELECT on INFORMATION_SCHEMA.GLOBAL_STATUS to 'maxscalemon'@'maxscalehost';
|
||||
Query OK, 0 rows affected (0.00 sec)
|
||||
|
||||
MariaDB [mysql]>
|
||||
----
|
||||
|
||||
Assuming that MaxScale is running on the host maxscalehost.
|
||||
|
||||
|
||||
The Galera monitor can also assign Master and Slave roles to the configured nodes:
|
||||
|
||||
among the set of synced servers, the one with the lowest value of ‘wsrep_local_index’ is selected as the current master while the others are slaves.
|
||||
|
||||
This way is possible to configure the node access based not only on ‘synced’ state but even on Master and Slave role enabling the use of Read Write split operation on a Galera cluster and avoiding any possible write conflict.
|
||||
|
||||
Example status for a Galera server node is:
|
||||
|
||||
----
|
||||
Server 0x261fe50 (server2)
|
||||
Server: 192.168.1.101
|
||||
----
|
||||
Status: Master, Synced, Running
|
||||
|
||||
|
||||
==
|
||||
==
|
||||
== Filter Modules
|
||||
Currently four example filters are included in the MaxScale distribution
|
||||
|
||||
|===
|
||||
|*Module*|*Description*
|
||||
|
||||
|testfilter|Statement counting Filter - a simple filter that counts the number of SQL statements executed within a session. Results may be viewed via the debug interface.
|
||||
|qlafilter|Query Logging Filter - a simple query logging filter that write all statements for a session into a log file for that session.
|
||||
|regexfilter|Query Rewrite Filter - an example of how filters can alter the query contents. This filter allows a regular expression to be defined, along with replacement text that should be substituted for every match of that regular expression.
|
||||
|tee|A filter that duplicates SQL requests and sends the duplicates to another service within MaxScale.
|
||||
|===
|
||||
|
||||
These filters are merely examples of what may be achieved with the filter API and are not sophisticated or consider as suitable for production use, they merely illustrate the functionality possible.
|
||||
|
||||
=== Statement Counting Filter
|
||||
The statement counting filter is implemented in the module names testfilter and merely keeps a count of the number of SQL statements executed. The filter requires no options to be passed and takes no parameters. The statement count can be viewed via the diagnostic and debug interface of MaxScale.
|
||||
|
||||
In order to add this filter to an existing service create a filter section to name the filter as follows
|
||||
|
||||
----
|
||||
[counter]
|
||||
type=filter
|
||||
module=testfilter
|
||||
----
|
||||
|
||||
Then add the filter to your service by including the filters= parameter in the service section.
|
||||
|
||||
+filters=counter+
|
||||
|
||||
=== Query Log All Filter
|
||||
The QLA filter simply writes all SQL statements to a log file along with a timestamp for the statement. An example of the file produced by the QLA filter is shown below
|
||||
|
||||
----
|
||||
00:36:04.922 5/06/2014, select @@version_comment limit 1
|
||||
00:36:12.663 5/06/2014, SELECT DATABASE()
|
||||
00:36:12.664 5/06/2014, show databases
|
||||
00:36:12.665 5/06/2014, show tables
|
||||
----
|
||||
|
||||
A new file is created for each client connection, the name of the logfile can be controlled by the use of the router options. No parameters are used by the QLA filter. The filter is implemented by the loadable module qlafilter.
|
||||
|
||||
To add the QLA filter to a service you must create a filter section to name the filter, associated the loadable module and define the filename option.
|
||||
|
||||
----
|
||||
[QLA]
|
||||
type=filter
|
||||
module=qlafilter
|
||||
options=/tmp/QueryLog
|
||||
----
|
||||
|
||||
Then add the filters= parameter into the service that you wish to log by adding this parameter to the service section
|
||||
|
||||
+filters=QLA+
|
||||
|
||||
A log file will be created for each client connection, the name of that log file will be /tmp/QueryLog.<number>
|
||||
|
||||
=== Regular Expression Filter
|
||||
The regular expression filter is a simple text based query rewriting filter. It allows a regular expression to be used to match text in a SQL query and then a string replacement to be made against that match. The filter is implemented by the regexfilter loadable module and is passed two parameters, a match string and a replacement string.
|
||||
|
||||
To add the filter to your service you must first create a filter section to name the filter and give the match and replacement strings. Here we define a filter that will convert to MariaDB 10 command show all slaves status to the older form of show slave status for MariaDB 5.5.
|
||||
|
||||
----
|
||||
[slavestatus]
|
||||
type=filter
|
||||
module=regexfilter
|
||||
match=show *all *slaves
|
||||
replace=show slave
|
||||
----
|
||||
|
||||
You must then add this filter to your service by adding the filters= option
|
||||
|
||||
+filters=slavestatus+
|
||||
|
||||
Another example would be a filter to convert from the MySQL 5.1 create table syntax that used the TYPE keyword to the newer ENGINE keyword.
|
||||
|
||||
----
|
||||
[EnginerFilter]
|
||||
type=filter
|
||||
module=regexfilter
|
||||
match=TYPE
|
||||
replace=ENGINE
|
||||
----
|
||||
|
||||
This would then change the SQL sent by a client application written to work with MySQL 5.1 into SQL that was compliant with MySQL 5.5. The statement
|
||||
|
||||
+create table supplier(id integer, name varchar(80)) type=innodb+
|
||||
|
||||
would be replaced with
|
||||
|
||||
+create table supplier(id integer, name varchar(80)) ENGINE=innodb+
|
||||
|
||||
before being sent to the server. Note that the text in the match string is case independent.
|
||||
|
||||
=== Tee Filter
|
||||
The tee filter is a filter module for MaxScale is a “plumbing” fitting in the MaxScale filter toolkit. It can be used in a filter pipeline of a service to make a copy of requests from the client and dispatch a copy of the request to another service within MaxScale.
|
||||
|
||||
The configuration block for the TEE filter requires the minimal filter parameters in it’s section within the MaxScale.cnf file that defines the filter to load and the service to send the duplicates to.
|
||||
|
||||
----
|
||||
[ArchieveFilter]
|
||||
type=filter
|
||||
module=tee
|
||||
service=Archieve
|
||||
|
||||
----
|
||||
In addition parameters may be added to define patterns to match against to either include or exclude particular SQL statements to be duplicated. You may also define that the filter is only active for connections from a particular source or when a particular user is connected.
|
||||
==
|
||||
==
|
||||
== Encrypting Passwords
|
||||
|
||||
Passwords stored in the MaxScale.cnf file may optionally be encrypted for added security. This is done by creation of an encryption key on installation of MaxScale. Encryption keys may be created manually by executing the maxkeys utility with the argument of the filename to store the key.
|
||||
|
||||
+maxkeys $MAXSCALE_HOME/etc/.secrets+
|
||||
|
||||
Changing the encryption key for MaxScale will invalidate any currently encrypted keys stored in the MaxScale.cnf file.
|
||||
|
||||
=== Creating Encrypted Passwords
|
||||
|
||||
Encrypted passwords are created by executing the maxpasswd command with the password you require to encrypt as an argument. The environment variable MAXSCALE_HOME must be set, or MaxScale must be installed in the default location before maxpasswd can be executed.
|
||||
|
||||
----
|
||||
maxpasswd MaxScalePw001
|
||||
61DD955512C39A4A8BC4BB1E5F116705
|
||||
----
|
||||
|
||||
The output of the maxpasswd command is a hexadecimal string, this should be inserted into the MaxScale.cnf file in place of the ordinary, plain text, password. MaxScale will determine this as an encrypted password and automatically decrypt it before sending it the database server.
|
||||
|
||||
----
|
||||
[Split Service]
|
||||
type=service
|
||||
router=readwritesplit
|
||||
servers=server1,server2,server3,server4
|
||||
user=maxscale
|
||||
password=61DD955512C39A4A8BC4BB1E5F116705
|
||||
----
|
||||
==
|
||||
==
|
||||
== Configuration Updates
|
||||
The current MaxScale configuration may be updating by editing the configuration file and then forcing MaxScale to reread the configuration file. To force MaxScale to reread the configuration file a SIGTERM signal is sent to the MaxScale process.
|
||||
|
||||
Some changes in configuration can not be dynamically changed and require a complete restart of MaxScale, whilst others will take some time to be applied.
|
||||
|
||||
=== Limitations
|
||||
Services that are removed via the configuration update mechanism can not be physically removed from MaxScale until there are no longer any connections using the service.
|
||||
|
||||
When the number of threads is decreased the threads will not actually be terminated until such time as they complete the current operation of that thread.
|
||||
|
||||
Monitors can not be completely removed from the running MaxScale.
|
||||
==
|
||||
==
|
||||
== Authentication
|
||||
MySQL uses username, passwords and the client host in order to authenticate a user, so a typical user would be defined as user X at host Y and would be given a password to connect. MaxScale uses exactly the same rules as MySQL when users connect to the MaxScale instance, i.e. it will check the address from which the client is connecting and treat this in exactly the same way that MySQL would. MaxScale will pull the authentication data from one of the backend servers and use this to match the incoming connections, the assumption being that all the backend servers for a particular service will share the same set of user credentials.
|
||||
|
||||
It is important to understand, however, that when MaxScale itself makes connections to the backend servers the backend server will see all connections as originating from the host that runs MaxScale and not the original host from which the client connected to MaxScale. Therefore the backend servers should be configured to allow connections from the MaxScale host for every user that can connect from any host. Since there is only a single password within the database server for a given host, this limits the configuration such that a given user name must have the same password for every host from which they can connect.
|
||||
|
||||
To clarify, if a user X is defined as using password _pass1_ from host a and _pass2_ from host b then there must be an entry in the user table for user X form the MaxScale host, say _pass1_.
|
||||
|
||||
This would result in rows in the user table as follows
|
||||
|===
|
||||
|*Username*|*Password*|*Client Host*
|
||||
|
||||
|X|pass1|a
|
||||
|X|pass2|b
|
||||
|X|pass1|MaxScale
|
||||
|===
|
||||
|
||||
|
||||
In this case the user X would be able to connect to MaxScale from host a giving the password of _pass1_. In addition MaxScale would be able to create connections for this user to the backend servers using the username X and password _pass1_, since the MaxScale host is also defined to have password _pass1_. User X would not however be able to connect from host b since they would need to provide the password _pass2_ in order to connect to MaxScale, but then MaxScale would not be able to connect to the backends as it would also use the password _pass2_ for these connections.
|
||||
|
||||
=== Wildcard Hosts
|
||||
|
||||
Hostname mapping in MaxScale works in exactly the same way as for MySQL, if the wildcard is used for the host then any host other than the localhost (127.0.0.1) will match. It is important to consider that the localhost check will be performed at the MaxScale level and at the MySQL server level.
|
||||
|
||||
If MaxScale and the databases are on separate hosts there are two important changes in behaviour to consider:
|
||||
|
||||
. Clients running on the same machine as the backend database now may access the database using the wildcard entry. The localhost check between the client and MaxScale will allow the use of the wildcard, since the client is not running on the MaxScale host. Also the wildcard entry can be used on the database host as MaxScale is making that connection and it is not running on the same host as the database.
|
||||
. Clients running on the same host as MaxScale can not access the database via MaxScale using the wildcard entry since the connection to MaxScale will be from the localhost. These clients are able to access the database directly, as they will use the wildcard entry.
|
||||
|
||||
If MaxScale is running on the same host as one or more of the database nodes to which it is acting as a proxy then the wildcard host entries can be used to connect to MaxScale but not to connect onwards to the database running on the same node.
|
||||
|
||||
In all these cases the issue may be solved by adding an explicit entry for the localhost address that has the same password as the wildcard entry. This may be done using a statement as below for each of the databases that are required:
|
||||
|
||||
+MariaDB [mysql]> GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP ON employee.* 'user1'@'localhost' IDENTIFIED BY ‘xxx’;+ +
|
||||
Query OK, 0 rows affected (0.00 sec)
|
||||
|
||||
=== Limitations
|
||||
|
||||
At the time of writing the authentication mechanism within MaxScale does not support IPV6 address matching in connections rules. This is also in line with the current protocol modules that do not support IPV6.
|
||||
|
||||
Partial address matching, such as 10.% is also not supported in the current version of MaxScale.
|
||||
==
|
||||
== Error Reporting
|
||||
MaxScale is designed to be executed as a service, therefore all error reports, including configuration errors, are written to the MaxScale error log file. MaxScale will log to a set of files in the directory $MAXSCALE_HOME/log, the only exception to this is if the log directory is not writable, in which case a message is sent to the standard error descriptor.
|
||||
|
||||
|
||||
|
||||
Troubleshooting +
|
||||
MaxScale binds on TCP ports and UNIX sockets as well.
|
||||
|
||||
If there is a local firewall in the server where MaxScale is installed, the IP and port must be configured in order to receive connections from outside.
|
||||
|
||||
If the firewall is a network facility among all the involved servers, a configuration update is required as well.
|
||||
|
||||
Example: +
|
||||
----
|
||||
[Galera Listener]
|
||||
type=listener
|
||||
----
|
||||
address=192.1681.3.33 +
|
||||
----
|
||||
port=4408
|
||||
socket=/servers/maxscale/galera.sock
|
||||
----
|
||||
|
||||
TCP/IP Traffic must be permitted to 192.1681.3.33 port 4408
|
||||
|
||||
For Unix socket, the socket file path (example: /servers/maxscale/galera.sock) must be writable by the Unix user MaxScale runs as.
|
||||
|
||||
|
108
Documentation/filters/Firewall-Filter.md
Normal file
@ -0,0 +1,108 @@
|
||||
#Firewall filter
|
||||
|
||||
## Overview
|
||||
The firewall filter is used to block queries that match a set of rules. It can be used to prevent harmful queries into the database or to limit the access to the database based on a more defined set of rules compared to the traditional GRANT-based rights management.
|
||||
|
||||
## Configuration
|
||||
|
||||
The firewall filter only requires a minimal set of configurations in the MaxScale.cnf file. The actual rules of the firewall filter are located in a separate text file. The following is an example of a firewall filter configuration in the MaxScale.cnf file.
|
||||
|
||||
|
||||
[Firewall]
|
||||
type=filter
|
||||
module=fwfilter
|
||||
rules=/home/user/rules.txt
|
||||
|
||||
### Filter Options
|
||||
|
||||
The firewall filter does not support any filter options.
|
||||
|
||||
### Filter Parameters
|
||||
|
||||
The firewall filter has one mandatory parameter that defines the location of the rule file. This is the 'rules' parameter and it expects an absolute path to the rule file.
|
||||
|
||||
## Rule syntax
|
||||
|
||||
The rules are defined by using the following syntax.
|
||||
|
||||
` rule NAME deny [wildcard | columns VALUE ... |
|
||||
regex REGEX | limit_queries COUNT TIMEPERIOD HOLDOFF |
|
||||
no_where_clause] [at_times VALUE...] [on_queries [select|update|insert|delete]]`
|
||||
|
||||
Rules always define a blocking action so the basic mode for the firewall filter is to allow all queries that do not match a given set of rules. Rules are identified by their name and have a mandatory part and optional parts.
|
||||
|
||||
The first step of defining a rule is to start with the keyword 'rule' which identifies this line of text as a rule. The second token is identified as the name of the rule. After that the mandatory token 'deny' is required to mark the start of the actual rule definition.
|
||||
|
||||
### Mandatory rule parameters
|
||||
|
||||
The firewall filter's rules expect a single mandatory parameter for a rule. You can define multiple rules to cover situations where you would like to apply multiple mandatory rules to a query.
|
||||
|
||||
#### Wildcard
|
||||
|
||||
This rule blocks all queries that use the wildcard character *.
|
||||
|
||||
#### Columns
|
||||
|
||||
This rule expects a list of values after the 'columns' keyword. These values are interpreted as column names and if a query targets any of these, it is blocked.
|
||||
|
||||
#### Regex
|
||||
|
||||
This rule blocks all queries matching a regex enclosed in single or double quotes.
|
||||
|
||||
#### Limit_queries
|
||||
|
||||
The limit_queries rule expects three parameters. The first parameter is the number of allowed queries during the time period. The second is the time period in seconds and the third is the amount of time for which the rule is considered active and blocking.
|
||||
|
||||
#### No_where_clause
|
||||
|
||||
This rule inspects the query and blocks it if it has no where clause. This way you can't do a DELETE FROM ... query without having the where clause. This does not prevent wrongful usage of the where clause e.g. DELETE FROM ... WHERE 1=1.
|
||||
|
||||
### Optional rule parameters
|
||||
|
||||
Each mandatory rule accepts one or more optional parameters. These are to be defined after the mandatory part of the rule.
|
||||
|
||||
#### At_times
|
||||
|
||||
This rule expects a list of time ranges that define the times when the rule in question is active. The time formats are expected to be ISO-8601 compliant and to be separated by a single dash (the - character). For example defining the active period of a rule to be 17:00 to 19:00 you would add 'at times 17:00:00-19:00:00' to the end of the rule.
|
||||
|
||||
#### On_queries
|
||||
|
||||
This limits the rule to be active only on certain types of queries.
|
||||
|
||||
### Applying rules to users
|
||||
|
||||
To apply the defined rules to users use the following syntax.
|
||||
|
||||
`users NAME ... match [any|all] rules RULE ...`
|
||||
|
||||
The first keyword is users which identifies this line as a user definition line. After this a list of user names and network addresses in the format 'user@0.0.0.0' is expected. The first part is the user name and the second part is the network address. You can use the '%' character as the wildcard to enable user name matching from any address or network matching for all users. After the list of users and networks the keyword match is expected. After this either the keyword 'any' or 'all' is expected. This defined how the rules are matched. If 'any' is used when the first rule is matched the query is considered blocked and the rest of the rules are skipped. If instead the 'all' keyword is used all rules must match for the query to be blocked.
|
||||
|
||||
After the matching part comes the rules keyword after which a list of rule names is expected. This allows reusing of the rules and enables varying levels of query restriction.
|
||||
|
||||
## Examples
|
||||
|
||||
### Example rule file
|
||||
|
||||
The following is an example of a rule file which defines six rules and applies them to three sets of users. This rule file is used in all of the examples.
|
||||
|
||||
rule block_wildcard deny wildcard at_times 8:00:00-17:00:00
|
||||
rule no_personal_info deny columns phone salary address on_queries select|delete at_times 12:00:00-18:00:00
|
||||
rule simple_regex deny regex '.*insert.*into.*select.*'
|
||||
rule dos_block deny limit_queries 10000 1.0 500.0 at_times 12:00:00-18:00:00
|
||||
rule safe_delete deny no_where_clause on_queries delete
|
||||
rule managers_table deny regex '.*from.*managers.*'
|
||||
users John@% Jane@% match any rules no_personal_info block_wildcard
|
||||
users %@80.120.% match any rules block_wildcard dos_block
|
||||
users %@% match all rules safe_delete managers_table
|
||||
|
||||
### Example 1 - Deny access to personal information and prevent huge queries during peak hours
|
||||
|
||||
Assume that a database cluster with tables that have a large number of columns is under heavy load during certain times of the day. Now also assume that large selects and querying of personal information creates unwanted stress on the cluster. Now we wouldn't want to completely prevent all the users from accessing personal information or performing large select queries, we only want to block the users John and Jane.
|
||||
|
||||
This can be achieved by creating two rules. One that blocks the usage of the wildcard and one that prevents queries that target a set of columns. To apply these rules to the users we define a users line into the rule file with both the rules and all the users we want to apply the rules to. The rules are defined in the example rule file on line 1 and 2 and the users line is defined on line 7.
|
||||
|
||||
### Example 2 - Only safe deletes into the managers table
|
||||
|
||||
We want to prevent accidental deletes into the managers table where the where clause is missing. This poses a problem, we don't want to require all the delete queries to have a where clause. We only want to prevent the data in the managers table from being deleted without a where clause.
|
||||
|
||||
To achieve this, we need two rules. The first rule can be seen on line 5 in the example rule file. This defines that all delete operations must have a where clause. This rule alone does us no good so we need a second one. The second rule is defined on line 6 and it blocks all queries that match the provided regular expression. When we combine these two rules we get the result we want. You can see the application of these rules on line 9 of the example rule file. The usage of the 'all' matching mode requires that all the rules must match for the query to be blocked. This in effect combines the two rules into a more complex rule.
|
79
Documentation/filters/Query-Log-All-Filter.md
Normal file
@ -0,0 +1,79 @@
|
||||
# Query Log All Filter
|
||||
|
||||
## Overview
|
||||
|
||||
The Query Log All (QLA) filter is a filter module for MaxScale that is able to log all query content on a per client session basis. Logs are written in a csv format file that lists the time submitted and the SQL statement text.
|
||||
|
||||
## Configuration
|
||||
|
||||
The configuration block for the QLA filter requires the minimal filter options in it's section within the MaxScale.cnf file, stored in $MAXSCALE_HOME/etc/MaxScale.cnf.
|
||||
|
||||
[MyLogFilter]
|
||||
type=filter
|
||||
module=qlafilter
|
||||
|
||||
## Filter Options
|
||||
|
||||
The QLA filter accepts one option value, this is the name that is used for the log files that are written. The file that is created appends the session number to the name given in the options entry. For example:
|
||||
|
||||
options=/tmp/QueryLog
|
||||
|
||||
would create log files /tmp/QueryLog.1 etc.
|
||||
|
||||
Note, this is included for backward compatibility with the version of the QLA filter that was provided in the initial filters implementation preview in 0.7 of MaxScale. The filebase parameter can now be used and will take precedence over the filter option.
|
||||
|
||||
## Filter Parameters
|
||||
|
||||
The QLA filter accepts a number of optional parameters, these were introduced in the 1.0 release of MaxScale.
|
||||
|
||||
### Filebase
|
||||
|
||||
The basename of the output file created for each session. A session index is added to the filename for each file written.
|
||||
|
||||
filebase=/tmp/SqlQueryLog
|
||||
|
||||
The filebase may also be set as the filter, the mechanism to set the filebase via the filter option is superseded by the parameter. If both are set the parameter setting will be used and the filter option ignored.
|
||||
|
||||
### Match
|
||||
|
||||
An optional parameter that can be used to limit the queries that will be logged by the QLA filter. The parameter value is a regular expression that is used to match against the SQL text. Only SQL statements that matches the text passed as the value of this parameter will be logged.
|
||||
|
||||
match=select.*from.*customer.*where
|
||||
|
||||
All regular expressions are evaluated with the option to ignore the case of the text, therefore a match option of select will match both select, SELECT and any form of the word with upper or lowercase characters.
|
||||
|
||||
### Exclude
|
||||
|
||||
An optional parameter that can be used to limit the queries that will be logged by the QLA filter. The parameter value is a regular expression that is used to match against the SQL text. SQL statements that match the text passed as the value of this parameter will be excluded from the log output.
|
||||
|
||||
exclude=where
|
||||
|
||||
All regular expressions are evaluated with the option to ignore the case of the text, therefore an exclude option of select will exclude statements that contain both select, SELECT or any form of the word with upper or lowercase characters.
|
||||
|
||||
### Source
|
||||
|
||||
The optional source parameter defines an address that is used to match against the address from which the client connection to MaxScale originates. Only sessions that originate from this address will be logged.
|
||||
|
||||
source=127.0.0.1
|
||||
|
||||
### User
|
||||
|
||||
The optional user parameter defines a user name that is used to match against the user from which the client connection to MaxScale originates. Only sessions that are connected using this username are logged.
|
||||
|
||||
user=john
|
||||
|
||||
## Examples
|
||||
|
||||
### Example 1 - Query without primary key
|
||||
|
||||
Imagine you have observed an issue with a particular table and you want to determine if there are queries that are accessing that table but not using the primary key of the table. Let's assume the table name is PRODUCTS and the primary key is called PRODUCT_ID. Add a filter with the following definition:
|
||||
|
||||
[ProductsSelectLogger]
|
||||
type=filter
|
||||
module=qlafilter
|
||||
match=SELECT.*from.*PRODUCTS .*
|
||||
exclude=WHERE.*PRODUCT_ID.*
|
||||
filebase=/var/logs/qla/SelectProducts
|
||||
|
||||
The result of then putting this filter into the service used by the application would be a log file of all select queries that mentioned the table but did not mention the PRODUCT_ID primary key in the predicates for the query.
|
||||
|
72
Documentation/filters/Regex-Filter.md
Normal file
@ -0,0 +1,72 @@
|
||||
Regex Filter
|
||||
|
||||
# Overview
|
||||
|
||||
The regex filter is a filter module for MaxScale that is able to rewrite query content using regular expression matches and text substitution.
|
||||
|
||||
# Configuration
|
||||
|
||||
The configuration block for the Regex filter requires the minimal filter options in it’s section within the MaxScale.cnf file, stored in $MAXSCALE_HOME/etc/MaxScale.cnf.
|
||||
|
||||
[MyRegexFilter]
|
||||
|
||||
type=filter
|
||||
|
||||
module=regexfilter
|
||||
|
||||
match=some string
|
||||
|
||||
replace=replacement string
|
||||
|
||||
## Filter Options
|
||||
|
||||
The regex filter accepts the options ignorecase or case. These define if the pattern text should take the case of the string it is matching against into consideration or not.
|
||||
|
||||
## Filter Parameters
|
||||
|
||||
The Regex filter requires two mandatory parameters to be defined.
|
||||
|
||||
### Match
|
||||
|
||||
A parameter that can be used to match text in the SQL statement which should be replaced.
|
||||
|
||||
match=TYPE[ ]*=
|
||||
|
||||
If the filter option ignorecase is used all regular expressions are evaluated with the option to ignore the case of the text, therefore a match option of select will match both type, TYPE and any form of the word with upper or lowercase characters.
|
||||
|
||||
### Replace
|
||||
|
||||
The replace parameter defines the text that should replace the text in the SQL text which matches the match.
|
||||
|
||||
replace=ENGINE =
|
||||
|
||||
### Source
|
||||
|
||||
The optional source parameter defines an address that is used to match against the address from which the client connection to MaxScale originates. Only sessions that originate from this address will have the match and replacement applied to them.
|
||||
|
||||
source=127.0.0.1
|
||||
|
||||
### User
|
||||
|
||||
The optional user parameter defines a user name that is used to match against the user from which the client connection to MaxScale originates. Only sessions that are connected using this username will have the match and replacement applied to them.
|
||||
|
||||
user=john
|
||||
|
||||
## Examples
|
||||
|
||||
### Example 1 - Replace MySQL 5.1 create table syntax with that for later versions
|
||||
|
||||
MySQL 5.1 used the parameter TYPE = to set the storage engine that should be used for a table. In later versions this changed to be ENGINE =. Imagine you have an application that you can not change for some reason, but you wish to migrate to a newer version of MySQL. The regexfilter can be used to transform the create table statments into the form that could be used by MySQL 5.5
|
||||
|
||||
[CreateTableFilter]
|
||||
|
||||
type=filter
|
||||
|
||||
module=regexfilter
|
||||
|
||||
options=ignorecase
|
||||
|
||||
match=TYPE[ ]*=
|
||||
|
||||
replace=ENGINE=
|
||||
|
128
Documentation/filters/Tee-Filter.md
Normal file
@ -0,0 +1,128 @@
|
||||
TEE Filter
|
||||
|
||||
# Overview
|
||||
|
||||
The tee filter is a filter module for MaxScale is a "plumbing" fitting in the MaxScale filter toolkit. It can be used in a filter pipeline of a service to make a copy of requests from the client and dispatch a copy of the request to another service within MaxScale.
|
||||
|
||||
# Configuration
|
||||
|
||||
The configuration block for the TEE filter requires the minimal filter parameters in it’s section within the MaxScale.cnf file, stored in $MAXSCALE_HOME/etc/MaxScale.cnf, that defines the filter to load and the service to send the duplicates to.
|
||||
|
||||
[DataMartFilter]
|
||||
|
||||
type=filter
|
||||
|
||||
module=tee
|
||||
|
||||
service=DataMart
|
||||
|
||||
## Filter Options
|
||||
|
||||
The tee filter does not support any filter options.
|
||||
|
||||
## Filter Parameters
|
||||
|
||||
The tee filter requires a mandatory parameter to define the service to replicate statements to and accepts a number of optional parameters.
|
||||
|
||||
### Match
|
||||
|
||||
An optional parameter that can be used to limit the queries that will be replicated by the tee filter. The parameter value is a regular expression that is used to match against the SQL text. Only SQL statements that matches the text passed as the value of this parameter will be sent to the service defined in the filter section.
|
||||
|
||||
match=insert.*into.*order*
|
||||
|
||||
All regular expressions are evaluated with the option to ignore the case of the text, therefore a match option of select will match both insert, INSERT and any form of the word with upper or lowercase characters.
|
||||
|
||||
### Exclude
|
||||
|
||||
An optional parameter that can be used to limit the queries that will be replicated by the tee filter. The parameter value is a regular expression that is used to match against the SQL text. SQL statements that match the text passed as the value of this parameter will be excluded from the replication stream.
|
||||
|
||||
exclude=select
|
||||
|
||||
All regular expressions are evaluated with the option to ignore the case of the text, therefore an exclude option of select will exclude statements that contain both select, SELECT or any form of the word with upper or lowercase characters.
|
||||
|
||||
### Source
|
||||
|
||||
The optional source parameter defines an address that is used to match against the address from which the client connection to MaxScale originates. Only sessions that originate from this address will be replicated.
|
||||
|
||||
source=127.0.0.1
|
||||
|
||||
### User
|
||||
|
||||
The optional user parameter defines a user name that is used to match against the user from which the client connection to MaxScale originates. Only sessions that are connected using this username are replicated.
|
||||
|
||||
user=john
|
||||
|
||||
## Examples
|
||||
|
||||
### Example 1 - Replicate all inserts into the orders table
|
||||
|
||||
Assume an order processing system that has a table called orders. You also have another database server, the datamart server, that requires all inserts into orders to be replicated to it. Deletes and updates are not however required.
|
||||
|
||||
Set up a service in MaxScale, called Orders, to communicate with the order processing system with the tee filter applied to it. Also set up a service to talk the datamart server, using the DataMart service. The tee filter woudl have as it’s service entry the DataMart service, by adding a match parameter of "insert into orders" would then result in all requests being sent to the order processing system, and insert statements that include the orders table being additionally sent to the datamart server.
|
||||
|
||||
[Orders]
|
||||
|
||||
type=service
|
||||
|
||||
router=readconnroute
|
||||
|
||||
servers=server1, server2, server3, server4
|
||||
|
||||
user=massi
|
||||
|
||||
passwd=6628C50E07CCE1F0392EDEEB9D1203F3
|
||||
|
||||
filters=ReplicateOrders
|
||||
|
||||
[ReplicateOrders]
|
||||
|
||||
type=filter
|
||||
|
||||
module=tee
|
||||
|
||||
service=DataMart
|
||||
|
||||
match=insert[ ]*into[ ]*orders
|
||||
|
||||
[DataMart]
|
||||
|
||||
type=service
|
||||
|
||||
router=readconnroute
|
||||
|
||||
servers=datamartserver
|
||||
|
||||
user=massi
|
||||
|
||||
passwd=6628C50E07CCE1F0392EDEEB9D1203F3
|
||||
|
||||
filters=QLA_DataMart
|
||||
|
||||
[QLA_DataMart]
|
||||
|
||||
type=filter
|
||||
|
||||
module=qlafilter
|
||||
|
||||
options=/var/log/DataMart/InsertsLog
|
||||
|
||||
[Orders Listener]
|
||||
|
||||
type=listener
|
||||
|
||||
service=Orders
|
||||
|
||||
protocol=MySQLClient
|
||||
|
||||
port=4011
|
||||
|
||||
[DataMart Listener]
|
||||
|
||||
type=listener
|
||||
|
||||
service=DataMart
|
||||
|
||||
protocol=MySQLClient
|
||||
|
||||
port=4012
|
||||
|
182
Documentation/filters/Top-N-Filter.md
Normal file
@ -0,0 +1,182 @@
|
||||
Top Filter
|
||||
|
||||
# Overview
|
||||
|
||||
The top filter is a filter module for MaxScale that monitors every SQL statement that passes through the filter. It measures the duration of that statement, the time between the statement being sent and the first result being returned. The top N times are kept, along with the SQL text itself and a list sorted on the execution times of the query is written to a file upon closure of the client session.
|
||||
|
||||
# Configuration
|
||||
|
||||
The configuration block for the TOP filter requires the minimal filter options in it’s section within the MaxScale.cnf file, stored in $MAXSCALE_HOME/etc/MaxScale.cnf.
|
||||
|
||||
[MyLogFilter]
|
||||
|
||||
type=filter
|
||||
|
||||
module=topfilter
|
||||
|
||||
## Filter Options
|
||||
|
||||
The top filter does not support any filter options currently.
|
||||
|
||||
## Filter Parameters
|
||||
|
||||
The top filter accepts a number of optional parameters.
|
||||
|
||||
### Filebase
|
||||
|
||||
The basename of the output file created for each session. A session index is added to the filename for each file written.
|
||||
|
||||
filebase=/tmp/SqlQueryLog
|
||||
|
||||
The filebase may also be set as the filter, the mechanism to set the filebase via the filter option is superseded by the parameter. If both are set the parameter setting will be used and the filter option ignored.
|
||||
|
||||
### Count
|
||||
|
||||
The number of SQL statements to store and report upon.
|
||||
|
||||
count=30
|
||||
|
||||
The default vakue for the numebr of statements recorded is 10.
|
||||
|
||||
### Match
|
||||
|
||||
An optional parameter that can be used to limit the queries that will be logged by the top filter. The parameter value is a regular expression that is used to match against the SQL text. Only SQL statements that matches the text passed as the value of this parameter will be logged.
|
||||
|
||||
match=select.*from.*customer.*where
|
||||
|
||||
All regular expressions are evaluated with the option to ignore the case of the text, therefore a match option of select will match both select, SELECT and any form of the word with upper or lowercase characters.
|
||||
|
||||
### Exclude
|
||||
|
||||
An optional parameter that can be used to limit the queries that will be logged by the top filter. The parameter value is a regular expression that is used to match against the SQL text. SQL statements that match the text passed as the value of this parameter will be excluded from the log output.
|
||||
|
||||
exclude=where
|
||||
|
||||
All regular expressions are evaluated with the option to ignore the case of the text, therefore an exclude option of select will exclude statements that contain both where, WHERE or any form of the word with upper or lowercase characters.
|
||||
|
||||
### Source
|
||||
|
||||
The optional source parameter defines an address that is used to match against the address from which the client connection to MaxScale originates. Only sessions that originate from this address will be logged.
|
||||
|
||||
source=127.0.0.1
|
||||
|
||||
### User
|
||||
|
||||
The optional user parameter defines a user name that is used to match against the user from which the client connection to MaxScale originates. Only sessions that are connected using this username will result in results being gebnerated.
|
||||
|
||||
user=john
|
||||
|
||||
## Examples
|
||||
|
||||
### Example 1 - Heavily Contended Table
|
||||
|
||||
You have an order system and believe the updates of the PRODUCTS table is causing some performance issues for the rest of your application. You would like to know which of the many updates in your application is causing the issue.
|
||||
|
||||
Add a filter with the following definition;
|
||||
|
||||
[ProductsUpdateTop20]
|
||||
|
||||
type=filter
|
||||
|
||||
module=topfilter
|
||||
|
||||
count=20
|
||||
|
||||
match=UPDATE.*PRODUCTS.*WHERE
|
||||
|
||||
exclude=UPDATE.*PRODUCTS_STOCK.*WHERE
|
||||
|
||||
filebase=/var/logs/top/ProductsUpdate
|
||||
|
||||
Note the exclude entry, this is to prevent updates to the PRODUCTS_STOCK table from being included in the report.
|
||||
|
||||
### Example 2 - One Application Server is Slow
|
||||
|
||||
One of your applications servers is slower than the rest, you believe it is related to database access but you not not sure what is taking the time.
|
||||
|
||||
Add a filter with the following definition;
|
||||
|
||||
[SlowAppServer]
|
||||
|
||||
type=filter
|
||||
|
||||
module=topfilter
|
||||
|
||||
count=20
|
||||
|
||||
source=192.168.0.32
|
||||
|
||||
filebase=/var/logs/top/SlowAppServer
|
||||
|
||||
In order to produce a comparison with an unaffected application server you can also add a second filter as a control.
|
||||
|
||||
[ControlAppServer]
|
||||
|
||||
type=filter
|
||||
|
||||
module=topfilter
|
||||
|
||||
count=20
|
||||
|
||||
source=192.168.0.42
|
||||
|
||||
filebase=/var/logs/top/ControlAppServer
|
||||
|
||||
In the router definition add both filters
|
||||
|
||||
filters=SlowAppServer | ControlAppServer
|
||||
|
||||
You will then have two sets of logs files written, one which profiles the top 20 queries of the slow application server and another that gives you the top 20 queries of your control application server. These two sets of files can then be compared to determine what if anythign is different between the two.
|
||||
|
||||
# Output Report
|
||||
|
||||
The following is an example report for a number of fictitious queries executed against the employees exaple database available for MySQL.
|
||||
|
||||
-bash-4.1$ cat /var/logs/top/Employees-top-10.137
|
||||
|
||||
Top 10 longest running queries in session.
|
||||
|
||||
==========================================
|
||||
|
||||
Time (sec) | Query
|
||||
|
||||
-----------+-----------------------------------------------------------------
|
||||
|
||||
22.985 | select sum(salary), year(from_date) from salaries s, (select distinct year(from_date) as y1 from salaries) y where (makedate(y.y1, 1) between s.from_date and s.to_date) group by y.y1
|
||||
|
||||
5.304 | select d.dept_name as "Department", y.y1 as "Year", count(*) as "Count" from departments d, dept_emp de, (select distinct year(from_date) as y1 from dept_emp order by 1) y where d.dept_no = de.dept_no and (makedate(y.y1, 1) between de.from_date and de.to_date) group by y.y1, d.dept_name order by 1, 2
|
||||
|
||||
2.896 | select year(now()) - year(birth_date) as age, gender, avg(salary) as "Average Salary" from employees e, salaries s where e.emp_no = s.emp_no and ("1988-08-01" between from_date AND to_date) group by year(now()) - year(birth_date), gender order by 1,2
|
||||
|
||||
2.160 | select dept_name as "Department", sum(salary) / 12 as "Salary Bill" from employees e, departments d, dept_emp de, salaries s where e.emp_no = de.emp_no and de.dept_no = d.dept_no and ("1988-08-01" between de.from_date AND de.to_date) and ("1988-08-01" between s.from_date AND s.to_date) and s.emp_no = e.emp_no group by dept_name order by 1
|
||||
|
||||
0.845 | select dept_name as "Department", avg(year(now()) - year(birth_date)) as "Average Age", gender from employees e, departments d, dept_emp de where e.emp_no = de.emp_no and de.dept_no = d.dept_no and ("1988-08-01" between from_date AND to_date) group by dept_name, gender
|
||||
|
||||
0.668 | select year(hire_date) as "Hired", d.dept_name, count(*) as "Count" from employees e, departments d, dept_emp de where de.emp_no = e.emp_no and de.dept_no = d.dept_no group by d.dept_name, year(hire_date)
|
||||
|
||||
0.249 | select moves.n_depts As "No. of Departments", count(moves.emp_no) as "No. of Employees" from (select de1.emp_no as emp_no, count(de1.emp_no) as n_depts from dept_emp de1 group by de1.emp_no) as moves group by moves.n_depts order by 1
|
||||
|
||||
0.245 | select year(now()) - year(birth_date) as age, gender, count(*) as "Count" from employees group by year(now()) - year(birth_date), gender order by 1,2
|
||||
|
||||
0.179 | select year(hire_date) as "Hired", count(*) as "Count" from employees group by year(hire_date)
|
||||
|
||||
0.160 | select year(hire_date) - year(birth_date) as "Age", count(*) as Count from employees group by year(hire_date) - year(birth_date) order by 1
|
||||
|
||||
-----------+-----------------------------------------------------------------
|
||||
|
||||
Session started Wed Jun 18 18:41:03 2014
|
||||
|
||||
Connection from 127.0.0.1
|
||||
|
||||
Username massi
|
||||
|
||||
Total of 24 statements executed.
|
||||
|
||||
Total statement execution time 35.701 seconds
|
||||
|
||||
Average statement execution time 1.488 seconds
|
||||
|
||||
Total connection time 46.500 seconds
|
||||
|
||||
-bash-4.1$
|
||||
|
@ -63,6 +63,7 @@ static void DoSource(int so, char *cmd);
|
||||
static void DoUsage();
|
||||
static int isquit(char *buf);
|
||||
static void PrintVersion(const char *progname);
|
||||
static void read_inifile(char **hostname, char **port, char **user, char **passwd);
|
||||
|
||||
#ifdef HISTORY
|
||||
static char *
|
||||
@ -112,6 +113,8 @@ int so;
|
||||
int option_index = 0;
|
||||
char c;
|
||||
|
||||
read_inifile(&hostname, &port, &user, &passwd);
|
||||
|
||||
while ((c = getopt_long(argc, argv, "h:p:P:u:v?",
|
||||
long_options, &option_index))
|
||||
>= 0)
|
||||
@ -240,7 +243,7 @@ char c;
|
||||
*/
|
||||
el_source(el, NULL);
|
||||
|
||||
while ((buf = el_gets(el, &num)) != NULL && num != 0)
|
||||
while ((buf = (char *)el_gets(el, &num)) != NULL && num != 0)
|
||||
{
|
||||
#else
|
||||
while (printf("MaxScale> ") && fgets(buf, 1024, stdin) != NULL)
|
||||
@ -562,3 +565,77 @@ char *ptr = buf;
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Trim whitespace from the right hand end of the string
|
||||
*
|
||||
* @param str String to trim
|
||||
*/
|
||||
static void
|
||||
rtrim(char *str)
|
||||
{
|
||||
char *ptr = str + strlen(str);
|
||||
|
||||
if (ptr > str) // step back from the terminating null
|
||||
ptr--; // If the string has more characters
|
||||
while (ptr >= str && isspace(*ptr))
|
||||
*ptr-- = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Read defaults for hostname, port, user and password from
|
||||
* the .maxadmin file in the users home directory.
|
||||
*
|
||||
* @param hostname Pointer the hostname to be updated
|
||||
* @param port Pointer to the port to be updated
|
||||
* @param user Pointer to the user to be updated
|
||||
* @param passwd Pointer to the password to be updated
|
||||
*/
|
||||
static void
|
||||
read_inifile(char **hostname, char **port, char **user, char **passwd)
|
||||
{
|
||||
char pathname[400];
|
||||
char *home, *brkt;
|
||||
char *name, *value;
|
||||
FILE *fp;
|
||||
char line[400];
|
||||
|
||||
if ((home = getenv("HOME")) == NULL)
|
||||
return;
|
||||
snprintf(pathname, 400, "%s/.maxadmin", home);
|
||||
if ((fp = fopen(pathname, "r")) == NULL)
|
||||
return;
|
||||
while (fgets(line, 400, fp) != NULL)
|
||||
{
|
||||
rtrim(line);
|
||||
if (line[0] == 0)
|
||||
continue;
|
||||
if (line[0] == '#')
|
||||
continue;
|
||||
name = strtok_r(line, "=", &brkt);
|
||||
value = strtok_r(NULL, "=", &brkt);
|
||||
if (name && value)
|
||||
{
|
||||
if (strcmp(name, "hostname") == 0)
|
||||
*hostname = strdup(value);
|
||||
else if (strcmp(name, "port") == 0)
|
||||
*port = strdup(value);
|
||||
else if (strcmp(name, "user") == 0)
|
||||
*user = strdup(value);
|
||||
else if (strcmp(name, "passwd") == 0)
|
||||
*passwd = strdup(value);
|
||||
else
|
||||
{
|
||||
fprintf(stderr, "WARNING: Unrecognised "
|
||||
"parameter '%s' in .maxadmin file\n", name);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
fprintf(stderr, "WARNING: Expected name=value "
|
||||
"parameters in .maxadmin file but found "
|
||||
"'%s'.\n", line);
|
||||
}
|
||||
}
|
||||
fclose(fp);
|
||||
}
|
||||
|
@ -98,6 +98,7 @@ macro(check_dirs)
|
||||
|
||||
if(DEFINED MYSQL_DIR)
|
||||
debugmsg("Searching for MySQL headers at: ${MYSQL_DIR}")
|
||||
list(APPEND CMAKE_INCLUDE_PATH ${MYSQL_DIR})
|
||||
find_path(MYSQL_DIR_LOC mysql.h PATHS ${MYSQL_DIR} PATH_SUFFIXES mysql mariadb NO_DEFAULT_PATH)
|
||||
else()
|
||||
find_path(MYSQL_DIR_LOC mysql.h PATH_SUFFIXES mysql mariadb)
|
||||
|
@ -180,6 +180,7 @@ HASHENTRIES *entry, *ptr;
|
||||
}
|
||||
free(table->entries);
|
||||
|
||||
hashtable_write_unlock(table);
|
||||
if (!table->ht_isflat)
|
||||
{
|
||||
free(table);
|
||||
|
@ -1091,8 +1091,8 @@ int service_refresh_users(SERVICE *service) {
|
||||
if (! spinlock_acquire_nowait(&service->users_table_spin)) {
|
||||
LOGIF(LD, (skygw_log_write_flush(
|
||||
LOGFILE_DEBUG,
|
||||
"%lu [service_refresh_users] failed to get get lock for loading new users' table: another thread is loading users",
|
||||
pthread_self())));
|
||||
"%s: [service_refresh_users] failed to get get lock for loading new users' table: another thread is loading users",
|
||||
service->name)));
|
||||
|
||||
return 1;
|
||||
}
|
||||
@ -1100,12 +1100,12 @@ int service_refresh_users(SERVICE *service) {
|
||||
|
||||
/* check if refresh rate limit has exceeded */
|
||||
if ( (time(NULL) < (service->rate_limit.last + USERS_REFRESH_TIME)) || (service->rate_limit.nloads > USERS_REFRESH_MAX_PER_TIME)) {
|
||||
spinlock_release(&service->users_table_spin);
|
||||
LOGIF(LE, (skygw_log_write_flush(
|
||||
LOGFILE_ERROR,
|
||||
"Refresh rate limit exceeded for load of users' table for service '%s'.",
|
||||
"%s: Refresh rate limit exceeded for load of users' table.",
|
||||
service->name)));
|
||||
|
||||
spinlock_release(&service->users_table_spin);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -1427,4 +1427,4 @@ void service_shutdown()
|
||||
svc = svc->next;
|
||||
}
|
||||
spinlock_release(&service_spin);
|
||||
}
|
||||
}
|
||||
|
@ -168,6 +168,9 @@ typedef struct gwbuf {
|
||||
/*< Consume a number of bytes in the buffer */
|
||||
#define GWBUF_CONSUME(b, bytes) ((b)->start = bytes > ((char *)(b)->end - (char *)(b)->start) ? (b)->end : (void *)((char *)(b)->start + (bytes)));
|
||||
|
||||
/*< Consume a complete buffer */
|
||||
#define GWBUF_CONSUME_ALL(b) gwbuf_consume((b), GWBUF_LENGTH((b)))
|
||||
|
||||
#define GWBUF_RTRIM(b, bytes) ((b)->end = bytes > ((char *)(b)->end - (char *)(b)->start) ? (b)->start : (void *)((char *)(b)->end - (bytes)));
|
||||
|
||||
#define GWBUF_TYPE(b) (b)->gwbuf_type
|
||||
|
@ -207,6 +207,7 @@ typedef struct {
|
||||
time_t lastReply;
|
||||
uint64_t n_fakeevents; /*< Fake events not written to disk */
|
||||
uint64_t n_artificial; /*< Artificial events not written to disk */
|
||||
int n_badcrc; /*< No. of bad CRC's from master */
|
||||
uint64_t events[0x24]; /*< Per event counters */
|
||||
uint64_t lastsample;
|
||||
int minno;
|
||||
@ -230,6 +231,7 @@ typedef struct {
|
||||
GWBUF *selectver; /*< select version() */
|
||||
GWBUF *selectvercom; /*< select @@version_comment */
|
||||
GWBUF *selecthostname;/*< select @@hostname */
|
||||
GWBUF *map; /*< select @@max_allowed_packet */
|
||||
uint8_t *fde_event; /*< Format Description Event */
|
||||
int fde_len; /*< Length of fde_event */
|
||||
} MASTER_RESPONSES;
|
||||
@ -305,17 +307,18 @@ typedef struct router_instance {
|
||||
#define BLRM_SELECTVER 0x000E
|
||||
#define BLRM_SELECTVERCOM 0x000F
|
||||
#define BLRM_SELECTHOSTNAME 0x0010
|
||||
#define BLRM_REGISTER 0x0011
|
||||
#define BLRM_BINLOGDUMP 0x0012
|
||||
#define BLRM_MAP 0x0011
|
||||
#define BLRM_REGISTER 0x0012
|
||||
#define BLRM_BINLOGDUMP 0x0013
|
||||
|
||||
#define BLRM_MAXSTATE 0x0012
|
||||
#define BLRM_MAXSTATE 0x0013
|
||||
|
||||
static char *blrm_states[] = { "Unconnected", "Connecting", "Authenticated", "Timestamp retrieval",
|
||||
"Server ID retrieval", "HeartBeat Period setup", "binlog checksum config",
|
||||
"binlog checksum rerieval", "GTID Mode retrieval", "Master UUID retrieval",
|
||||
"Set Slave UUID", "Set Names latin1", "Set Names utf8", "select 1",
|
||||
"select version()", "select @@version_comment", "select @@hostname",
|
||||
"Register slave", "Binlog Dump" };
|
||||
"select @@mx_allowed_packet", "Register slave", "Binlog Dump" };
|
||||
|
||||
#define BLRS_CREATED 0x0000
|
||||
#define BLRS_UNREGISTERED 0x0001
|
||||
|
@ -514,6 +514,13 @@ static int gw_mysql_do_authentication(DCB *dcb, GWBUF *queue) {
|
||||
username,
|
||||
stage1_hash);
|
||||
}
|
||||
else
|
||||
{
|
||||
LOGIF(LM, (skygw_log_write(LOGFILE_MESSAGE,
|
||||
"%s: login attempt for user %s, user not "
|
||||
"found.",
|
||||
dcb->service->name, username)));
|
||||
}
|
||||
}
|
||||
|
||||
/* Do again the database check */
|
||||
|
@ -165,6 +165,7 @@ createInstance(SERVICE *service, char **options)
|
||||
ROUTER_INSTANCE *inst;
|
||||
char *value, *name;
|
||||
int i;
|
||||
unsigned char *defuuid;
|
||||
|
||||
if ((inst = calloc(1, sizeof(ROUTER_INSTANCE))) == NULL) {
|
||||
return NULL;
|
||||
@ -191,6 +192,21 @@ int i;
|
||||
inst->binlogdir = NULL;
|
||||
inst->heartbeat = 300; // Default is every 5 minutes
|
||||
|
||||
inst->user = strdup(service->credentials.name);
|
||||
inst->password = strdup(service->credentials.authdata);
|
||||
|
||||
my_uuid_init((ulong)rand()*12345,12345);
|
||||
if ((defuuid = (char *)malloc(20)) != NULL)
|
||||
{
|
||||
my_uuid(defuuid);
|
||||
if ((inst->uuid = (char *)malloc(38)) != NULL)
|
||||
sprintf(inst->uuid, "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
|
||||
defuuid[0], defuuid[1], defuuid[2], defuuid[3],
|
||||
defuuid[4], defuuid[5], defuuid[6], defuuid[7],
|
||||
defuuid[8], defuuid[9], defuuid[10], defuuid[11],
|
||||
defuuid[12], defuuid[13], defuuid[14], defuuid[15]);
|
||||
}
|
||||
|
||||
/*
|
||||
* We only support one server behind this router, since the server is
|
||||
* the master from which we replicate binlog records. Therefore check
|
||||
@ -328,10 +344,16 @@ int i;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (inst->fileroot == NULL)
|
||||
inst->fileroot = strdup(BINLOG_NAME_ROOT);
|
||||
}
|
||||
else
|
||||
{
|
||||
LOGIF(LE, (skygw_log_write(
|
||||
LOGFILE_ERROR, "%s: No router options supplied for binlogrouter",
|
||||
service->name)));
|
||||
}
|
||||
|
||||
if (inst->fileroot == NULL)
|
||||
inst->fileroot = strdup(BINLOG_NAME_ROOT);
|
||||
inst->active_logs = 0;
|
||||
inst->reconnect_pending = 0;
|
||||
inst->handling_threads = 0;
|
||||
@ -340,6 +362,24 @@ int i;
|
||||
inst->slaves = NULL;
|
||||
inst->next = NULL;
|
||||
|
||||
/*
|
||||
* Read any cached response messages
|
||||
*/
|
||||
inst->saved_master.server_id = blr_cache_read_response(inst, "serverid");
|
||||
inst->saved_master.heartbeat = blr_cache_read_response(inst, "heartbeat");
|
||||
inst->saved_master.chksum1 = blr_cache_read_response(inst, "chksum1");
|
||||
inst->saved_master.chksum2 = blr_cache_read_response(inst, "chksum2");
|
||||
inst->saved_master.gtid_mode = blr_cache_read_response(inst, "gtidmode");
|
||||
inst->saved_master.uuid = blr_cache_read_response(inst, "uuid");
|
||||
inst->saved_master.setslaveuuid = blr_cache_read_response(inst, "ssuuid");
|
||||
inst->saved_master.setnames = blr_cache_read_response(inst, "setnames");
|
||||
inst->saved_master.utf8 = blr_cache_read_response(inst, "utf8");
|
||||
inst->saved_master.select1 = blr_cache_read_response(inst, "select1");
|
||||
inst->saved_master.selectver = blr_cache_read_response(inst, "selectver");
|
||||
inst->saved_master.selectvercom = blr_cache_read_response(inst, "selectvercom");
|
||||
inst->saved_master.selecthostname = blr_cache_read_response(inst, "selecthostname");
|
||||
inst->saved_master.map = blr_cache_read_response(inst, "map");
|
||||
|
||||
/*
|
||||
* Initialise the binlog file and position
|
||||
*/
|
||||
@ -702,6 +742,8 @@ struct tm tm;
|
||||
router_inst->stats.n_binlogs_ses);
|
||||
dcb_printf(dcb, "\tTotal no. of binlog events received: %u\n",
|
||||
router_inst->stats.n_binlogs);
|
||||
dcb_printf(dcb, "\tNo. of bad CRC received from master: %u\n",
|
||||
router_inst->stats.n_badcrc);
|
||||
minno = router_inst->stats.minno - 1;
|
||||
if (minno == -1)
|
||||
minno = 30;
|
||||
|
@ -290,7 +290,7 @@ blr_file_flush(ROUTER_INSTANCE *router)
|
||||
BLFILE *
|
||||
blr_open_binlog(ROUTER_INSTANCE *router, char *binlog)
|
||||
{
|
||||
char *ptr, path[1024];
|
||||
char path[1024];
|
||||
BLFILE *file;
|
||||
|
||||
spinlock_acquire(&router->fileslock);
|
||||
@ -613,3 +613,85 @@ struct stat statb;
|
||||
return statb.st_size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Write the response packet to a cache file so that MaxScale can respond
|
||||
* even if there is no master running when MaxScale starts.
|
||||
*
|
||||
* @param router The instance of the router
|
||||
* @param response The name of the response, used to name the cached file
|
||||
* @param buf The buffer to written to the cache
|
||||
*/
|
||||
void
|
||||
blr_cache_response(ROUTER_INSTANCE *router, char *response, GWBUF *buf)
|
||||
{
|
||||
char path[4096], *ptr;
|
||||
int fd;
|
||||
|
||||
strcpy(path, "/usr/local/skysql/MaxScale");
|
||||
if ((ptr = getenv("MAXSCALE_HOME")) != NULL)
|
||||
{
|
||||
strncpy(path, ptr, 4096);
|
||||
}
|
||||
strncat(path, "/", 4096);
|
||||
strncat(path, router->service->name, 4096);
|
||||
|
||||
if (access(path, R_OK) == -1)
|
||||
mkdir(path, 0777);
|
||||
strncat(path, "/.cache", 4096);
|
||||
if (access(path, R_OK) == -1)
|
||||
mkdir(path, 0777);
|
||||
strncat(path, "/", 4096);
|
||||
strncat(path, response, 4096);
|
||||
|
||||
if ((fd = open(path, O_WRONLY|O_CREAT|O_TRUNC, 0666)) == -1)
|
||||
return;
|
||||
write(fd, GWBUF_DATA(buf), GWBUF_LENGTH(buf));
|
||||
close(fd);
|
||||
}
|
||||
|
||||
/**
|
||||
* Read a cached copy of a master response message. This allows
|
||||
* the router to start and serve any binlogs it already has on disk
|
||||
* if the master is not available.
|
||||
*
|
||||
* @param router The router instance structure
|
||||
* @param response The name of the response
|
||||
* @return A pointer to a GWBUF structure
|
||||
*/
|
||||
GWBUF *
|
||||
blr_cache_read_response(ROUTER_INSTANCE *router, char *response)
|
||||
{
|
||||
struct stat statb;
|
||||
char path[4096], *ptr;
|
||||
int fd;
|
||||
GWBUF *buf;
|
||||
|
||||
strcpy(path, "/usr/local/skysql/MaxScale");
|
||||
if ((ptr = getenv("MAXSCALE_HOME")) != NULL)
|
||||
{
|
||||
strncpy(path, ptr, 4096);
|
||||
}
|
||||
strncat(path, "/", 4096);
|
||||
strncat(path, router->service->name, 4096);
|
||||
strncat(path, "/.cache/", 4096);
|
||||
strncat(path, response, 4096);
|
||||
|
||||
if ((fd = open(path, O_RDONLY)) == -1)
|
||||
return NULL;
|
||||
|
||||
if (fstat(fd, &statb) != 0)
|
||||
{
|
||||
close(fd);
|
||||
return NULL;
|
||||
}
|
||||
if ((buf = gwbuf_alloc(statb.st_size)) == NULL)
|
||||
{
|
||||
close(fd);
|
||||
return NULL;
|
||||
}
|
||||
read(fd, GWBUF_DATA(buf), statb.st_size);
|
||||
close(fd);
|
||||
return buf;
|
||||
}
|
||||
|
@ -48,6 +48,7 @@
|
||||
#include <dcb.h>
|
||||
#include <spinlock.h>
|
||||
#include <housekeeper.h>
|
||||
#include <buffer.h>
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/socket.h>
|
||||
@ -104,6 +105,15 @@ GWBUF *buf;
|
||||
return;
|
||||
}
|
||||
router->master_state = BLRM_CONNECTING;
|
||||
|
||||
/* Discard the queued residual data */
|
||||
buf = router->residual;
|
||||
while (buf)
|
||||
{
|
||||
buf = gwbuf_consume(buf, GWBUF_LENGTH(buf));
|
||||
}
|
||||
router->residual = NULL;
|
||||
|
||||
spinlock_release(&router->lock);
|
||||
if ((client = dcb_alloc(DCB_ROLE_INTERNAL)) == NULL)
|
||||
{
|
||||
@ -141,7 +151,7 @@ GWBUF *buf;
|
||||
router->master->remote = strdup(router->service->dbref->server->name);
|
||||
LOGIF(LM,(skygw_log_write(
|
||||
LOGFILE_MESSAGE,
|
||||
"%s: atempting to connect to master server %s.",
|
||||
"%s: attempting to connect to master server %s.",
|
||||
router->service->name, router->master->remote)));
|
||||
router->connect_time = time(0);
|
||||
|
||||
@ -361,7 +371,10 @@ char query[128];
|
||||
break;
|
||||
case BLRM_SERVERID:
|
||||
// Response to fetch of master's server-id
|
||||
if (router->saved_master.server_id)
|
||||
GWBUF_CONSUME_ALL(router->saved_master.server_id);
|
||||
router->saved_master.server_id = buf;
|
||||
blr_cache_response(router, "serverid", buf);
|
||||
// TODO: Extract the value of server-id and place in router->master_id
|
||||
{
|
||||
char str[80];
|
||||
@ -373,35 +386,50 @@ char query[128];
|
||||
break;
|
||||
case BLRM_HBPERIOD:
|
||||
// Response to set the heartbeat period
|
||||
if (router->saved_master.heartbeat)
|
||||
GWBUF_CONSUME_ALL(router->saved_master.heartbeat);
|
||||
router->saved_master.heartbeat = buf;
|
||||
blr_cache_response(router, "heartbeat", buf);
|
||||
buf = blr_make_query("SET @master_binlog_checksum = @@global.binlog_checksum");
|
||||
router->master_state = BLRM_CHKSUM1;
|
||||
router->master->func.write(router->master, buf);
|
||||
break;
|
||||
case BLRM_CHKSUM1:
|
||||
// Response to set the master binlog checksum
|
||||
if (router->saved_master.chksum1)
|
||||
GWBUF_CONSUME_ALL(router->saved_master.chksum1);
|
||||
router->saved_master.chksum1 = buf;
|
||||
blr_cache_response(router, "chksum1", buf);
|
||||
buf = blr_make_query("SELECT @master_binlog_checksum");
|
||||
router->master_state = BLRM_CHKSUM2;
|
||||
router->master->func.write(router->master, buf);
|
||||
break;
|
||||
case BLRM_CHKSUM2:
|
||||
// Response to the master_binlog_checksum, should be stored
|
||||
if (router->saved_master.chksum2)
|
||||
GWBUF_CONSUME_ALL(router->saved_master.chksum2);
|
||||
router->saved_master.chksum2 = buf;
|
||||
blr_cache_response(router, "chksum2", buf);
|
||||
buf = blr_make_query("SELECT @@GLOBAL.GTID_MODE");
|
||||
router->master_state = BLRM_GTIDMODE;
|
||||
router->master->func.write(router->master, buf);
|
||||
break;
|
||||
case BLRM_GTIDMODE:
|
||||
// Response to the GTID_MODE, should be stored
|
||||
if (router->saved_master.gtid_mode)
|
||||
GWBUF_CONSUME_ALL(router->saved_master.gtid_mode);
|
||||
router->saved_master.gtid_mode = buf;
|
||||
blr_cache_response(router, "gtidmode", buf);
|
||||
buf = blr_make_query("SHOW VARIABLES LIKE 'SERVER_UUID'");
|
||||
router->master_state = BLRM_MUUID;
|
||||
router->master->func.write(router->master, buf);
|
||||
break;
|
||||
case BLRM_MUUID:
|
||||
// Response to the SERVER_UUID, should be stored
|
||||
if (router->saved_master.uuid)
|
||||
GWBUF_CONSUME_ALL(router->saved_master.uuid);
|
||||
router->saved_master.uuid = buf;
|
||||
blr_cache_response(router, "uuid", buf);
|
||||
sprintf(query, "SET @slave_uuid='%s'", router->uuid);
|
||||
buf = blr_make_query(query);
|
||||
router->master_state = BLRM_SUUID;
|
||||
@ -409,49 +437,80 @@ char query[128];
|
||||
break;
|
||||
case BLRM_SUUID:
|
||||
// Response to the SET @server_uuid, should be stored
|
||||
if (router->saved_master.setslaveuuid)
|
||||
GWBUF_CONSUME_ALL(router->saved_master.setslaveuuid);
|
||||
router->saved_master.setslaveuuid = buf;
|
||||
blr_cache_response(router, "ssuuid", buf);
|
||||
buf = blr_make_query("SET NAMES latin1");
|
||||
router->master_state = BLRM_LATIN1;
|
||||
router->master->func.write(router->master, buf);
|
||||
break;
|
||||
case BLRM_LATIN1:
|
||||
// Response to the SET NAMES latin1, should be stored
|
||||
if (router->saved_master.setnames)
|
||||
GWBUF_CONSUME_ALL(router->saved_master.setnames);
|
||||
router->saved_master.setnames = buf;
|
||||
blr_cache_response(router, "setnames", buf);
|
||||
buf = blr_make_query("SET NAMES utf8");
|
||||
router->master_state = BLRM_UTF8;
|
||||
router->master->func.write(router->master, buf);
|
||||
break;
|
||||
case BLRM_UTF8:
|
||||
// Response to the SET NAMES utf8, should be stored
|
||||
if (router->saved_master.utf8)
|
||||
GWBUF_CONSUME_ALL(router->saved_master.utf8);
|
||||
router->saved_master.utf8 = buf;
|
||||
blr_cache_response(router, "utf8", buf);
|
||||
buf = blr_make_query("SELECT 1");
|
||||
router->master_state = BLRM_SELECT1;
|
||||
router->master->func.write(router->master, buf);
|
||||
break;
|
||||
case BLRM_SELECT1:
|
||||
// Response to the SELECT 1, should be stored
|
||||
if (router->saved_master.select1)
|
||||
GWBUF_CONSUME_ALL(router->saved_master.select1);
|
||||
router->saved_master.select1 = buf;
|
||||
blr_cache_response(router, "select1", buf);
|
||||
buf = blr_make_query("SELECT VERSION();");
|
||||
router->master_state = BLRM_SELECTVER;
|
||||
router->master->func.write(router->master, buf);
|
||||
break;
|
||||
case BLRM_SELECTVER:
|
||||
// Response to SELECT VERSION should be stored
|
||||
if (router->saved_master.selectver)
|
||||
GWBUF_CONSUME_ALL(router->saved_master.selectver);
|
||||
router->saved_master.selectver = buf;
|
||||
blr_cache_response(router, "selectver", buf);
|
||||
buf = blr_make_query("SELECT @@version_comment limit 1;");
|
||||
router->master_state = BLRM_SELECTVERCOM;
|
||||
router->master->func.write(router->master, buf);
|
||||
break;
|
||||
case BLRM_SELECTVERCOM:
|
||||
// Response to SELECT @@version_comment should be stored
|
||||
if (router->saved_master.selectvercom)
|
||||
GWBUF_CONSUME_ALL(router->saved_master.selectvercom);
|
||||
router->saved_master.selectvercom = buf;
|
||||
blr_cache_response(router, "selectvercom", buf);
|
||||
buf = blr_make_query("SELECT @@hostname;");
|
||||
router->master_state = BLRM_SELECTHOSTNAME;
|
||||
router->master->func.write(router->master, buf);
|
||||
break;
|
||||
case BLRM_SELECTHOSTNAME:
|
||||
// Response to SELECT @@hostname should be stored
|
||||
if (router->saved_master.selecthostname)
|
||||
GWBUF_CONSUME_ALL(router->saved_master.selecthostname);
|
||||
router->saved_master.selecthostname = buf;
|
||||
blr_cache_response(router, "selecthostname", buf);
|
||||
buf = blr_make_query("SELECT @@max_allowed_packet;");
|
||||
router->master_state = BLRM_MAP;
|
||||
router->master->func.write(router->master, buf);
|
||||
break;
|
||||
case BLRM_MAP:
|
||||
// Response to SELECT @@max_allowed_packet should be stored
|
||||
if (router->saved_master.map)
|
||||
GWBUF_CONSUME_ALL(router->saved_master.map);
|
||||
router->saved_master.map = buf;
|
||||
blr_cache_response(router, "map", buf);
|
||||
buf = blr_make_registration(router);
|
||||
router->master_state = BLRM_REGISTER;
|
||||
router->master->func.write(router->master, buf);
|
||||
@ -622,6 +681,11 @@ static REP_HEADER phdr;
|
||||
}
|
||||
|
||||
pkt_length = gwbuf_length(pkt);
|
||||
/*
|
||||
* Loop over all the packets while we still have some data
|
||||
* and the packet length is enough to hold a replication event
|
||||
* header.
|
||||
*/
|
||||
while (pkt && pkt_length > 24)
|
||||
{
|
||||
reslen = GWBUF_LENGTH(pkt);
|
||||
@ -649,6 +713,7 @@ static REP_HEADER phdr;
|
||||
{
|
||||
len = EXTRACT24(pdata) + 4;
|
||||
}
|
||||
/* len is now the payload length for the packet we are working on */
|
||||
|
||||
if (reslen < len && pkt_length >= len)
|
||||
{
|
||||
@ -728,10 +793,17 @@ static REP_HEADER phdr;
|
||||
n_bufs = 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* ptr now points at the current message in a contiguous buffer,
|
||||
* this buffer is either within the GWBUF or in a malloc'd
|
||||
* copy if the message straddles GWBUF's.
|
||||
*/
|
||||
|
||||
if (len < BINLOG_EVENT_HDR_LEN)
|
||||
{
|
||||
char *msg = "";
|
||||
|
||||
/* Packet is too small to be a binlog event */
|
||||
if (ptr[4] == 0xfe) /* EOF Packet */
|
||||
{
|
||||
msg = "end of file";
|
||||
@ -753,7 +825,7 @@ static REP_HEADER phdr;
|
||||
|
||||
blr_extract_header(ptr, &hdr);
|
||||
|
||||
if (hdr.event_size != len - 5)
|
||||
if (hdr.event_size != len - 5) /* Sanity check */
|
||||
{
|
||||
LOGIF(LE,(skygw_log_write(
|
||||
LOGFILE_ERROR,
|
||||
@ -784,6 +856,35 @@ static REP_HEADER phdr;
|
||||
phdr = hdr;
|
||||
if (hdr.ok == 0)
|
||||
{
|
||||
/*
|
||||
* First check that the checksum we calculate matches the
|
||||
* checksum in the packet we received.
|
||||
*/
|
||||
uint32_t chksum, pktsum;
|
||||
|
||||
chksum = crc32(0L, NULL, 0);
|
||||
chksum = crc32(chksum, ptr + 5, hdr.event_size - 4);
|
||||
pktsum = EXTRACT32(ptr + hdr.event_size + 1);
|
||||
if (pktsum != chksum)
|
||||
{
|
||||
router->stats.n_badcrc++;
|
||||
if (msg)
|
||||
{
|
||||
free(msg);
|
||||
msg = NULL;
|
||||
}
|
||||
LOGIF(LE,(skygw_log_write(LOGFILE_ERROR,
|
||||
"%s: Checksum error in event "
|
||||
"from master, "
|
||||
"binlog %s @ %d. "
|
||||
"Closing master connection.",
|
||||
router->service->name,
|
||||
router->binlog_name,
|
||||
router->binlog_position)));
|
||||
blr_master_close(router);
|
||||
blr_master_delayed_connect(router);
|
||||
return;
|
||||
}
|
||||
router->stats.n_binlogs++;
|
||||
router->lastEventReceived = hdr.event_type;
|
||||
|
||||
|
@ -52,6 +52,7 @@
|
||||
#include <skygw_types.h>
|
||||
#include <skygw_utils.h>
|
||||
#include <log_manager.h>
|
||||
#include <version.h>
|
||||
|
||||
static uint32_t extract_field(uint8_t *src, int bits);
|
||||
static void encode_value(unsigned char *data, unsigned int value, int len);
|
||||
@ -66,6 +67,14 @@ uint8_t *blr_build_header(GWBUF *pkt, REP_HEADER *hdr);
|
||||
int blr_slave_callback(DCB *dcb, DCB_REASON reason, void *data);
|
||||
static int blr_slave_fake_rotate(ROUTER_INSTANCE *router, ROUTER_SLAVE *slave);
|
||||
static void blr_slave_send_fde(ROUTER_INSTANCE *router, ROUTER_SLAVE *slave);
|
||||
static int blr_slave_send_maxscale_version(ROUTER_INSTANCE *router, ROUTER_SLAVE *slave);
|
||||
static int blr_slave_send_maxscale_variables(ROUTER_INSTANCE *router, ROUTER_SLAVE *slave);
|
||||
static int blr_slave_send_master_status(ROUTER_INSTANCE *router, ROUTER_SLAVE *slave);
|
||||
static int blr_slave_send_slave_status(ROUTER_INSTANCE *router, ROUTER_SLAVE *slave);
|
||||
static int blr_slave_send_slave_hosts(ROUTER_INSTANCE *router, ROUTER_SLAVE *slave);
|
||||
static int blr_slave_send_fieldcount(ROUTER_INSTANCE *router, ROUTER_SLAVE *slave, int count);
|
||||
static int blr_slave_send_columndef(ROUTER_INSTANCE *router, ROUTER_SLAVE *slave, char *name, int type, int len, uint8_t seqno);
|
||||
static int blr_slave_send_eof(ROUTER_INSTANCE *router, ROUTER_SLAVE *slave, int seqno);
|
||||
|
||||
extern int lm_enabled_logfiles_bitmask;
|
||||
extern size_t log_ses_count[];
|
||||
@ -141,7 +150,11 @@ blr_slave_request(ROUTER_INSTANCE *router, ROUTER_SLAVE *slave, GWBUF *queue)
|
||||
* when MaxScale registered as a slave. The exception to the rule is the
|
||||
* request to obtain the current timestamp value of the server.
|
||||
*
|
||||
* Seven select statements are currently supported:
|
||||
* The original set added for the registration process has been enhanced in
|
||||
* order to support some commands that are useful for monitoring the binlog
|
||||
* router.
|
||||
*
|
||||
* Eight select statements are currently supported:
|
||||
* SELECT UNIX_TIMESTAMP();
|
||||
* SELECT @master_binlog_checksum
|
||||
* SELECT @@GLOBAL.GTID_MODE
|
||||
@ -149,10 +162,15 @@ blr_slave_request(ROUTER_INSTANCE *router, ROUTER_SLAVE *slave, GWBUF *queue)
|
||||
* SELECT 1
|
||||
* SELECT @@version_comment limit 1
|
||||
* SELECT @@hostname
|
||||
* SELECT @@max_allowed_packet
|
||||
* SELECT @@maxscale_version
|
||||
*
|
||||
* Two show commands are supported:
|
||||
* Five show commands are supported:
|
||||
* SHOW VARIABLES LIKE 'SERVER_ID'
|
||||
* SHOW VARIABLES LIKE 'SERVER_UUID'
|
||||
* SHOW VARIABLES LIKE 'MAXSCALE%
|
||||
* SHOW MASTER STATUS
|
||||
* SHOW SLAVE HOSTS
|
||||
*
|
||||
* Five set commands are supported:
|
||||
* SET @master_binlog_checksum = @@global.binlog_checksum
|
||||
@ -189,11 +207,20 @@ int query_len;
|
||||
* own interaction with the real master. We simply replay these saved responses
|
||||
* to the slave.
|
||||
*/
|
||||
word = strtok_r(query_text, sep, &brkb);
|
||||
if (strcasecmp(word, "SELECT") == 0)
|
||||
if ((word = strtok_r(query_text, sep, &brkb)) == NULL)
|
||||
{
|
||||
word = strtok_r(NULL, sep, &brkb);
|
||||
if (strcasecmp(word, "UNIX_TIMESTAMP()") == 0)
|
||||
|
||||
LOGIF(LE, (skygw_log_write(LOGFILE_ERROR, "%s: Incomplete query.",
|
||||
router->service->name)));
|
||||
}
|
||||
else if (strcasecmp(word, "SELECT") == 0)
|
||||
{
|
||||
if ((word = strtok_r(NULL, sep, &brkb)) == NULL)
|
||||
{
|
||||
LOGIF(LE, (skygw_log_write(LOGFILE_ERROR, "%s: Incomplete select query.",
|
||||
router->service->name)));
|
||||
}
|
||||
else if (strcasecmp(word, "UNIX_TIMESTAMP()") == 0)
|
||||
{
|
||||
free(query_text);
|
||||
return blr_slave_send_timestamp(router, slave);
|
||||
@ -228,17 +255,41 @@ int query_len;
|
||||
free(query_text);
|
||||
return blr_slave_replay(router, slave, router->saved_master.selecthostname);
|
||||
}
|
||||
else if (strcasecmp(word, "@@max_allowed_packet") == 0)
|
||||
{
|
||||
free(query_text);
|
||||
return blr_slave_replay(router, slave, router->saved_master.map);
|
||||
}
|
||||
else if (strcasecmp(word, "@@maxscale_version") == 0)
|
||||
{
|
||||
free(query_text);
|
||||
return blr_slave_send_maxscale_version(router, slave);
|
||||
}
|
||||
}
|
||||
else if (strcasecmp(word, "SHOW") == 0)
|
||||
{
|
||||
word = strtok_r(NULL, sep, &brkb);
|
||||
if (strcasecmp(word, "VARIABLES") == 0)
|
||||
if ((word = strtok_r(NULL, sep, &brkb)) == NULL)
|
||||
{
|
||||
word = strtok_r(NULL, sep, &brkb);
|
||||
if (strcasecmp(word, "LIKE") == 0)
|
||||
LOGIF(LE, (skygw_log_write(LOGFILE_ERROR, "%s: Incomplete show query.",
|
||||
router->service->name)));
|
||||
}
|
||||
else if (strcasecmp(word, "VARIABLES") == 0)
|
||||
{
|
||||
if ((word = strtok_r(NULL, sep, &brkb)) == NULL)
|
||||
{
|
||||
word = strtok_r(NULL, sep, &brkb);
|
||||
if (strcasecmp(word, "'SERVER_ID'") == 0)
|
||||
LOGIF(LE, (skygw_log_write(LOGFILE_ERROR,
|
||||
"%s: Expected LIKE clause in SHOW VARIABLES.",
|
||||
router->service->name)));
|
||||
}
|
||||
else if (strcasecmp(word, "LIKE") == 0)
|
||||
{
|
||||
if ((word = strtok_r(NULL, sep, &brkb)) == NULL)
|
||||
{
|
||||
LOGIF(LE, (skygw_log_write(LOGFILE_ERROR,
|
||||
"%s: Missing LIKE clause in SHOW VARIABLES.",
|
||||
router->service->name)));
|
||||
}
|
||||
else if (strcasecmp(word, "'SERVER_ID'") == 0)
|
||||
{
|
||||
free(query_text);
|
||||
return blr_slave_replay(router, slave, router->saved_master.server_id);
|
||||
@ -248,13 +299,55 @@ int query_len;
|
||||
free(query_text);
|
||||
return blr_slave_replay(router, slave, router->saved_master.uuid);
|
||||
}
|
||||
else if (strcasecmp(word, "'MAXSCALE%'") == 0)
|
||||
{
|
||||
free(query_text);
|
||||
return blr_slave_send_maxscale_variables(router, slave);
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (strcasecmp(word, "MASTER") == 0)
|
||||
{
|
||||
if ((word = strtok_r(NULL, sep, &brkb)) == NULL)
|
||||
{
|
||||
LOGIF(LE, (skygw_log_write(LOGFILE_ERROR,
|
||||
"%s: Expected SHOW MASTER STATUS command",
|
||||
router->service->name)));
|
||||
}
|
||||
else if (strcasecmp(word, "STATUS") == 0)
|
||||
{
|
||||
free(query_text);
|
||||
return blr_slave_send_master_status(router, slave);
|
||||
}
|
||||
}
|
||||
else if (strcasecmp(word, "SLAVE") == 0)
|
||||
{
|
||||
if ((word = strtok_r(NULL, sep, &brkb)) == NULL)
|
||||
{
|
||||
LOGIF(LE, (skygw_log_write(LOGFILE_ERROR,
|
||||
"%s: Expected SHOW MASTER STATUS command",
|
||||
router->service->name)));
|
||||
}
|
||||
else if (strcasecmp(word, "STATUS") == 0)
|
||||
{
|
||||
free(query_text);
|
||||
return blr_slave_send_slave_status(router, slave);
|
||||
}
|
||||
else if (strcasecmp(word, "HOSTS") == 0)
|
||||
{
|
||||
free(query_text);
|
||||
return blr_slave_send_slave_hosts(router, slave);
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (strcasecmp(query_text, "SET") == 0)
|
||||
{
|
||||
word = strtok_r(NULL, sep, &brkb);
|
||||
if (strcasecmp(word, "@master_heartbeat_period") == 0)
|
||||
if ((word = strtok_r(NULL, sep, &brkb)) == NULL)
|
||||
{
|
||||
LOGIF(LE, (skygw_log_write(LOGFILE_ERROR, "%s: Incomplete set command.",
|
||||
router->service->name)));
|
||||
}
|
||||
else if (strcasecmp(word, "@master_heartbeat_period") == 0)
|
||||
{
|
||||
free(query_text);
|
||||
return blr_slave_replay(router, slave, router->saved_master.heartbeat);
|
||||
@ -262,7 +355,7 @@ int query_len;
|
||||
else if (strcasecmp(word, "@master_binlog_checksum") == 0)
|
||||
{
|
||||
word = strtok_r(NULL, sep, &brkb);
|
||||
if (strcasecmp(word, "'none'") == 0)
|
||||
if (word && (strcasecmp(word, "'none'") == 0))
|
||||
slave->nocrc = 1;
|
||||
else
|
||||
slave->nocrc = 0;
|
||||
@ -278,8 +371,12 @@ int query_len;
|
||||
}
|
||||
else if (strcasecmp(word, "NAMES") == 0)
|
||||
{
|
||||
word = strtok_r(NULL, sep, &brkb);
|
||||
if (strcasecmp(word, "latin1") == 0)
|
||||
if ((word = strtok_r(NULL, sep, &brkb)) == NULL)
|
||||
{
|
||||
LOGIF(LE, (skygw_log_write(LOGFILE_ERROR, "%s: Truncated SET NAMES command.",
|
||||
router->service->name)));
|
||||
}
|
||||
else if (strcasecmp(word, "latin1") == 0)
|
||||
{
|
||||
free(query_text);
|
||||
return blr_slave_replay(router, slave, router->saved_master.setnames);
|
||||
@ -412,6 +509,480 @@ int len, ts_len;
|
||||
return slave->dcb->func.write(slave->dcb, pkt);
|
||||
}
|
||||
|
||||
/**
|
||||
* Send a response the the SQL command SELECT @@MAXSCALE_VERSION
|
||||
*
|
||||
* @param router The binlog router instance
|
||||
* @param slave The slave server to which we are sending the response
|
||||
* @return Non-zero if data was sent
|
||||
*/
|
||||
static int
|
||||
blr_slave_send_maxscale_version(ROUTER_INSTANCE *router, ROUTER_SLAVE *slave)
|
||||
{
|
||||
GWBUF *pkt;
|
||||
char version[40];
|
||||
uint8_t *ptr;
|
||||
int len, vers_len;
|
||||
|
||||
sprintf(version, "%s", MAXSCALE_VERSION);
|
||||
vers_len = strlen(version);
|
||||
blr_slave_send_fieldcount(router, slave, 1);
|
||||
blr_slave_send_columndef(router, slave, "MAXSCALE_VERSION", 0xf, vers_len, 2);
|
||||
blr_slave_send_eof(router, slave, 3);
|
||||
|
||||
len = 5 + vers_len;
|
||||
if ((pkt = gwbuf_alloc(len)) == NULL)
|
||||
return 0;
|
||||
ptr = GWBUF_DATA(pkt);
|
||||
encode_value(ptr, vers_len + 1, 24); // Add length of data packet
|
||||
ptr += 3;
|
||||
*ptr++ = 0x04; // Sequence number in response
|
||||
*ptr++ = vers_len; // Length of result string
|
||||
strncpy((char *)ptr, version, vers_len); // Result string
|
||||
ptr += vers_len;
|
||||
slave->dcb->func.write(slave->dcb, pkt);
|
||||
return blr_slave_send_eof(router, slave, 5);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Send the response to the SQL command "SHOW VARIABLES LIKE 'MAXSCALE%'
|
||||
*
|
||||
* @param router The binlog router instance
|
||||
* @param slave The slave server to which we are sending the response
|
||||
* @return Non-zero if data was sent
|
||||
*/
|
||||
static int
|
||||
blr_slave_send_maxscale_variables(ROUTER_INSTANCE *router, ROUTER_SLAVE *slave)
|
||||
{
|
||||
GWBUF *pkt;
|
||||
char name[40];
|
||||
char version[40];
|
||||
uint8_t *ptr;
|
||||
int len, vers_len, seqno = 2;
|
||||
|
||||
blr_slave_send_fieldcount(router, slave, 2);
|
||||
blr_slave_send_columndef(router, slave, "Variable_name", 0xf, 40, seqno++);
|
||||
blr_slave_send_columndef(router, slave, "value", 0xf, 40, seqno++);
|
||||
blr_slave_send_eof(router, slave, seqno++);
|
||||
|
||||
sprintf(version, "%s", MAXSCALE_VERSION);
|
||||
vers_len = strlen(version);
|
||||
strcpy(name, "MAXSCALE_VERSION");
|
||||
len = 5 + vers_len + strlen(name) + 1;
|
||||
if ((pkt = gwbuf_alloc(len)) == NULL)
|
||||
return 0;
|
||||
ptr = GWBUF_DATA(pkt);
|
||||
encode_value(ptr, vers_len + 2 + strlen(name), 24); // Add length of data packet
|
||||
ptr += 3;
|
||||
*ptr++ = seqno++; // Sequence number in response
|
||||
*ptr++ = strlen(name); // Length of result string
|
||||
strncpy((char *)ptr, name, strlen(name)); // Result string
|
||||
ptr += strlen(name);
|
||||
*ptr++ = vers_len; // Length of result string
|
||||
strncpy((char *)ptr, version, vers_len); // Result string
|
||||
ptr += vers_len;
|
||||
slave->dcb->func.write(slave->dcb, pkt);
|
||||
|
||||
return blr_slave_send_eof(router, slave, seqno++);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Send the response to the SQL command "SHOW MASTER STATUS"
|
||||
*
|
||||
* @param router The binlog router instance
|
||||
* @param slave The slave server to which we are sending the response
|
||||
* @return Non-zero if data was sent
|
||||
*/
|
||||
static int
|
||||
blr_slave_send_master_status(ROUTER_INSTANCE *router, ROUTER_SLAVE *slave)
|
||||
{
|
||||
GWBUF *pkt;
|
||||
char file[40];
|
||||
char position[40];
|
||||
uint8_t *ptr;
|
||||
int len, file_len;
|
||||
|
||||
blr_slave_send_fieldcount(router, slave, 5);
|
||||
blr_slave_send_columndef(router, slave, "File", 0xf, 40, 2);
|
||||
blr_slave_send_columndef(router, slave, "Position", 0xf, 40, 3);
|
||||
blr_slave_send_columndef(router, slave, "Binlog_Do_DB", 0xf, 40, 4);
|
||||
blr_slave_send_columndef(router, slave, "Binlog_Ignore_DB", 0xf, 40, 5);
|
||||
blr_slave_send_columndef(router, slave, "Execute_Gtid_Set", 0xf, 40, 6);
|
||||
blr_slave_send_eof(router, slave, 7);
|
||||
|
||||
sprintf(file, "%s", router->binlog_name);
|
||||
file_len = strlen(file);
|
||||
sprintf(position, "%d", router->binlog_position);
|
||||
len = 5 + file_len + strlen(position) + 1 + 3;
|
||||
if ((pkt = gwbuf_alloc(len)) == NULL)
|
||||
return 0;
|
||||
ptr = GWBUF_DATA(pkt);
|
||||
encode_value(ptr, len - 4, 24); // Add length of data packet
|
||||
ptr += 3;
|
||||
*ptr++ = 0x08; // Sequence number in response
|
||||
*ptr++ = strlen(file); // Length of result string
|
||||
strncpy((char *)ptr, file, strlen(file)); // Result string
|
||||
ptr += strlen(file);
|
||||
*ptr++ = strlen(position); // Length of result string
|
||||
strncpy((char *)ptr, position, strlen(position)); // Result string
|
||||
ptr += strlen(position);
|
||||
*ptr++ = 0; // Send 3 empty values
|
||||
*ptr++ = 0;
|
||||
*ptr++ = 0;
|
||||
slave->dcb->func.write(slave->dcb, pkt);
|
||||
return blr_slave_send_eof(router, slave, 9);
|
||||
}
|
||||
|
||||
/*
|
||||
* Columns to send for a "SHOW SLAVE STATUS" command
|
||||
*/
|
||||
static char *slave_status_columns[] = {
|
||||
"Slave_IO_State", "Master_Host", "Master_User", "Master_Port", "Connect_Retry",
|
||||
"Master_Log_File", "Read_Master_Log_Pos", "Relay_Log_File", "Relay_Log_Pos",
|
||||
"Relay_Master_Log_File", "Slave_IO_Running", "Slave_SQL_Running", "Replicate_Do_DB",
|
||||
"Replicate_Ignore_DB", "Replicate_Do_Table",
|
||||
"Replicate_Ignore_Table", "Replicate_Wild_Do_Table", "Replicate_Wild_Ignore_Table",
|
||||
"Last_Errno", "Last_Error", "Skip_Counter", "Exec_Master_Log_Pos", "Relay_Log_Space",
|
||||
"Until_Condition", "Until_Log_File", "Until_Log_Pos", "Master_SSL_Allowed",
|
||||
"Master_SSL_CA_File", "Master_SSL_CA_Path", "Master_SSL_Cert", "Master_SSL_Cipher",
|
||||
"Master_SSL_Key",
|
||||
"Seconds_Behind_Master", "Last_IO_Errno", "Last_IO_Error", "Last_SQL_Errno",
|
||||
"Last_SQL_Error", "Replicate_Ignore_Server_Ids", "Master_Server_Id", "Master_UUID",
|
||||
"Master_Info_File", "SQL_Delay", "SQL_Remaining_Delay", "Slave_SQL_Running_State",
|
||||
"Master_Retry_Count", "Master_Bind", "Last_IO_Error_TimeStamp",
|
||||
"Last_SQL_Error_Timestamp", "Master_SSL_Crl", "Master_SSL_Crlpath",
|
||||
"Retrieved_Gtid_Set", "Executed_Gtid_Set", "Auto_Position", NULL
|
||||
};
|
||||
|
||||
/**
|
||||
* Send the response to the SQL command "SHOW SLAVE STATUS"
|
||||
*
|
||||
* @param router The binlog router instance
|
||||
* @param slave The slave server to which we are sending the response
|
||||
* @return Non-zero if data was sent
|
||||
*/
|
||||
static int
|
||||
blr_slave_send_slave_status(ROUTER_INSTANCE *router, ROUTER_SLAVE *slave)
|
||||
{
|
||||
GWBUF *pkt;
|
||||
char column[42];
|
||||
uint8_t *ptr;
|
||||
int len, actual_len, col_len, seqno, ncols, i;
|
||||
|
||||
/* Count the columns */
|
||||
for (ncols = 0; slave_status_columns[ncols]; ncols++);
|
||||
|
||||
blr_slave_send_fieldcount(router, slave, ncols);
|
||||
seqno = 2;
|
||||
for (i = 0; slave_status_columns[i]; i++)
|
||||
blr_slave_send_columndef(router, slave, slave_status_columns[i], 0xf, 40, seqno++);
|
||||
blr_slave_send_eof(router, slave, seqno++);
|
||||
|
||||
len = 5 + (ncols * 41); // Max length
|
||||
if ((pkt = gwbuf_alloc(len)) == NULL)
|
||||
return 0;
|
||||
ptr = GWBUF_DATA(pkt);
|
||||
encode_value(ptr, len - 4, 24); // Add length of data packet
|
||||
ptr += 3;
|
||||
*ptr++ = seqno++; // Sequence number in response
|
||||
|
||||
sprintf(column, "%s", blrm_states[router->master_state]);
|
||||
col_len = strlen(column);
|
||||
*ptr++ = col_len; // Length of result string
|
||||
strncpy((char *)ptr, column, col_len); // Result string
|
||||
ptr += col_len;
|
||||
|
||||
sprintf(column, "%s", router->master->remote ? router->master->remote : "");
|
||||
col_len = strlen(column);
|
||||
*ptr++ = col_len; // Length of result string
|
||||
strncpy((char *)ptr, column, col_len); // Result string
|
||||
ptr += col_len;
|
||||
|
||||
sprintf(column, "%s", router->user ? router->user : "");
|
||||
col_len = strlen(column);
|
||||
*ptr++ = col_len; // Length of result string
|
||||
strncpy((char *)ptr, column, col_len); // Result string
|
||||
ptr += col_len;
|
||||
|
||||
sprintf(column, "%d", router->service->databases->port);
|
||||
col_len = strlen(column);
|
||||
*ptr++ = col_len; // Length of result string
|
||||
strncpy((char *)ptr, column, col_len); // Result string
|
||||
ptr += col_len;
|
||||
|
||||
sprintf(column, "%d", 60); // Connect retry
|
||||
col_len = strlen(column);
|
||||
*ptr++ = col_len; // Length of result string
|
||||
strncpy((char *)ptr, column, col_len); // Result string
|
||||
ptr += col_len;
|
||||
|
||||
sprintf(column, "%s", router->binlog_name);
|
||||
col_len = strlen(column);
|
||||
*ptr++ = col_len; // Length of result string
|
||||
strncpy((char *)ptr, column, col_len); // Result string
|
||||
ptr += col_len;
|
||||
|
||||
sprintf(column, "%ld", router->binlog_position);
|
||||
col_len = strlen(column);
|
||||
*ptr++ = col_len; // Length of result string
|
||||
strncpy((char *)ptr, column, col_len); // Result string
|
||||
ptr += col_len;
|
||||
|
||||
/* We have no relay log, we relay the binlog, so we will send the same data */
|
||||
sprintf(column, "%s", router->binlog_name);
|
||||
col_len = strlen(column);
|
||||
*ptr++ = col_len; // Length of result string
|
||||
strncpy((char *)ptr, column, col_len); // Result string
|
||||
ptr += col_len;
|
||||
|
||||
sprintf(column, "%ld", router->binlog_position);
|
||||
col_len = strlen(column);
|
||||
*ptr++ = col_len; // Length of result string
|
||||
strncpy((char *)ptr, column, col_len); // Result string
|
||||
ptr += col_len;
|
||||
|
||||
/* We have no relay log, we relay the binlog, so we will send the same data */
|
||||
sprintf(column, "%s", router->binlog_name);
|
||||
col_len = strlen(column);
|
||||
*ptr++ = col_len; // Length of result string
|
||||
strncpy((char *)ptr, column, col_len); // Result string
|
||||
ptr += col_len;
|
||||
|
||||
strcpy(column, "Yes");
|
||||
col_len = strlen(column);
|
||||
*ptr++ = col_len; // Length of result string
|
||||
strncpy((char *)ptr, column, col_len); // Result string
|
||||
ptr += col_len;
|
||||
|
||||
strcpy(column, "Yes");
|
||||
col_len = strlen(column);
|
||||
*ptr++ = col_len; // Length of result string
|
||||
strncpy((char *)ptr, column, col_len); // Result string
|
||||
ptr += col_len;
|
||||
|
||||
*ptr++ = 0; // Send 6 empty values
|
||||
*ptr++ = 0;
|
||||
*ptr++ = 0;
|
||||
*ptr++ = 0;
|
||||
*ptr++ = 0;
|
||||
*ptr++ = 0;
|
||||
|
||||
/* Last error information */
|
||||
sprintf(column, "%d", 0);
|
||||
col_len = strlen(column);
|
||||
*ptr++ = col_len; // Length of result string
|
||||
strncpy((char *)ptr, column, col_len); // Result string
|
||||
ptr += col_len;
|
||||
|
||||
*ptr++ = 0;
|
||||
|
||||
/* Skip_Counter */
|
||||
sprintf(column, "%d", 0);
|
||||
col_len = strlen(column);
|
||||
*ptr++ = col_len; // Length of result string
|
||||
strncpy((char *)ptr, column, col_len); // Result string
|
||||
ptr += col_len;
|
||||
|
||||
sprintf(column, "%ld", router->binlog_position);
|
||||
col_len = strlen(column);
|
||||
*ptr++ = col_len; // Length of result string
|
||||
strncpy((char *)ptr, column, col_len); // Result string
|
||||
ptr += col_len;
|
||||
|
||||
sprintf(column, "%ld", router->binlog_position);
|
||||
col_len = strlen(column);
|
||||
*ptr++ = col_len; // Length of result string
|
||||
strncpy((char *)ptr, column, col_len); // Result string
|
||||
ptr += col_len;
|
||||
|
||||
strcpy(column, "None");
|
||||
col_len = strlen(column);
|
||||
*ptr++ = col_len; // Length of result string
|
||||
strncpy((char *)ptr, column, col_len); // Result string
|
||||
ptr += col_len;
|
||||
|
||||
*ptr++ = 0;
|
||||
|
||||
/* Until_Log_Pos */
|
||||
sprintf(column, "%d", 0);
|
||||
col_len = strlen(column);
|
||||
*ptr++ = col_len; // Length of result string
|
||||
strncpy((char *)ptr, column, col_len); // Result string
|
||||
ptr += col_len;
|
||||
|
||||
/* Master_SSL_Allowed */
|
||||
strcpy(column, "No");
|
||||
col_len = strlen(column);
|
||||
*ptr++ = col_len; // Length of result string
|
||||
strncpy((char *)ptr, column, col_len); // Result string
|
||||
ptr += col_len;
|
||||
|
||||
*ptr++ = 0; // Empty SSL columns
|
||||
*ptr++ = 0;
|
||||
*ptr++ = 0;
|
||||
*ptr++ = 0;
|
||||
*ptr++ = 0;
|
||||
|
||||
/* Seconds_Behind_Master */
|
||||
sprintf(column, "%d", 0);
|
||||
col_len = strlen(column);
|
||||
*ptr++ = col_len; // Length of result string
|
||||
strncpy((char *)ptr, column, col_len); // Result string
|
||||
ptr += col_len;
|
||||
|
||||
/* Master_SSL_Verify_Server_Cert */
|
||||
strcpy(column, "No");
|
||||
col_len = strlen(column);
|
||||
*ptr++ = col_len; // Length of result string
|
||||
strncpy((char *)ptr, column, col_len); // Result string
|
||||
ptr += col_len;
|
||||
|
||||
/* Last_IO_Error */
|
||||
sprintf(column, "%d", 0);
|
||||
col_len = strlen(column);
|
||||
*ptr++ = col_len; // Length of result string
|
||||
strncpy((char *)ptr, column, col_len); // Result string
|
||||
ptr += col_len;
|
||||
|
||||
*ptr++ = 0;
|
||||
|
||||
/* Last_SQL_Error */
|
||||
sprintf(column, "%d", 0);
|
||||
col_len = strlen(column);
|
||||
*ptr++ = col_len; // Length of result string
|
||||
strncpy((char *)ptr, column, col_len); // Result string
|
||||
ptr += col_len;
|
||||
|
||||
*ptr++ = 0;
|
||||
|
||||
*ptr++ = 0;
|
||||
|
||||
sprintf(column, "%s", router->uuid);
|
||||
col_len = strlen(column);
|
||||
*ptr++ = col_len; // Length of result string
|
||||
strncpy((char *)ptr, column, col_len); // Result string
|
||||
ptr += col_len;
|
||||
|
||||
*ptr++ = 0;
|
||||
|
||||
/* SQL_Delay*/
|
||||
sprintf(column, "%d", 0);
|
||||
col_len = strlen(column);
|
||||
*ptr++ = col_len; // Length of result string
|
||||
strncpy((char *)ptr, column, col_len); // Result string
|
||||
ptr += col_len;
|
||||
|
||||
*ptr++ = 0xfb; // NULL value
|
||||
|
||||
/* Slave_Running_State */
|
||||
strcpy(column, "Slave running");
|
||||
col_len = strlen(column);
|
||||
*ptr++ = col_len; // Length of result string
|
||||
strncpy((char *)ptr, column, col_len); // Result string
|
||||
ptr += col_len;
|
||||
|
||||
/* Master_Retry_Count */
|
||||
sprintf(column, "%d", 1000);
|
||||
col_len = strlen(column);
|
||||
*ptr++ = col_len; // Length of result string
|
||||
strncpy((char *)ptr, column, col_len); // Result string
|
||||
ptr += col_len;
|
||||
|
||||
*ptr++ = 0; // Send 5 empty values
|
||||
*ptr++ = 0;
|
||||
*ptr++ = 0;
|
||||
*ptr++ = 0;
|
||||
*ptr++ = 0;
|
||||
|
||||
// No GTID support send empty values
|
||||
*ptr++ = 0;
|
||||
*ptr++ = 0;
|
||||
*ptr++ = 0;
|
||||
*ptr++ = 0;
|
||||
|
||||
actual_len = ptr - (uint8_t *)GWBUF_DATA(pkt);
|
||||
ptr = GWBUF_DATA(pkt);
|
||||
encode_value(ptr, actual_len - 4, 24); // Add length of data packet
|
||||
|
||||
pkt = gwbuf_rtrim(pkt, len - actual_len); // Trim the buffer to the actual size
|
||||
|
||||
slave->dcb->func.write(slave->dcb, pkt);
|
||||
return blr_slave_send_eof(router, slave, seqno++);
|
||||
}
|
||||
|
||||
/**
|
||||
* Send the response to the SQL command "SHOW SLAVE HOSTS"
|
||||
*
|
||||
* @param router The binlog router instance
|
||||
* @param slave The slave server to which we are sending the response
|
||||
* @return Non-zero if data was sent
|
||||
*/
|
||||
static int
|
||||
blr_slave_send_slave_hosts(ROUTER_INSTANCE *router, ROUTER_SLAVE *slave)
|
||||
{
|
||||
GWBUF *pkt;
|
||||
char server_id[40];
|
||||
char host[40];
|
||||
char port[40];
|
||||
char master_id[40];
|
||||
char slave_uuid[40];
|
||||
uint8_t *ptr;
|
||||
int len, seqno;
|
||||
ROUTER_SLAVE *sptr;
|
||||
|
||||
blr_slave_send_fieldcount(router, slave, 5);
|
||||
blr_slave_send_columndef(router, slave, "Server_id", 0xf, 40, 2);
|
||||
blr_slave_send_columndef(router, slave, "Host", 0xf, 40, 3);
|
||||
blr_slave_send_columndef(router, slave, "Port", 0xf, 40, 4);
|
||||
blr_slave_send_columndef(router, slave, "Master_id", 0xf, 40, 5);
|
||||
blr_slave_send_columndef(router, slave, "Slave_UUID", 0xf, 40, 6);
|
||||
blr_slave_send_eof(router, slave, 7);
|
||||
|
||||
seqno = 8;
|
||||
spinlock_acquire(&router->lock);
|
||||
sptr = router->slaves;
|
||||
while (sptr)
|
||||
{
|
||||
if (sptr->state != 0)
|
||||
{
|
||||
sprintf(server_id, "%d", sptr->serverid);
|
||||
sprintf(host, "%s", sptr->hostname ? sptr->hostname : "");
|
||||
sprintf(port, "%d", sptr->port);
|
||||
sprintf(master_id, "%d", router->serverid);
|
||||
sprintf(slave_uuid, "%s", sptr->uuid ? sptr->uuid : "");
|
||||
len = 5 + strlen(server_id) + strlen(host) + strlen(port)
|
||||
+ strlen(master_id) + strlen(slave_uuid) + 5;
|
||||
if ((pkt = gwbuf_alloc(len)) == NULL)
|
||||
return 0;
|
||||
ptr = GWBUF_DATA(pkt);
|
||||
encode_value(ptr, len - 4, 24); // Add length of data packet
|
||||
ptr += 3;
|
||||
*ptr++ = seqno++; // Sequence number in response
|
||||
*ptr++ = strlen(server_id); // Length of result string
|
||||
strncpy((char *)ptr, server_id, strlen(server_id)); // Result string
|
||||
ptr += strlen(server_id);
|
||||
*ptr++ = strlen(host); // Length of result string
|
||||
strncpy((char *)ptr, host, strlen(host)); // Result string
|
||||
ptr += strlen(host);
|
||||
*ptr++ = strlen(port); // Length of result string
|
||||
strncpy((char *)ptr, port, strlen(port)); // Result string
|
||||
ptr += strlen(port);
|
||||
*ptr++ = strlen(master_id); // Length of result string
|
||||
strncpy((char *)ptr, master_id, strlen(master_id)); // Result string
|
||||
ptr += strlen(master_id);
|
||||
*ptr++ = strlen(slave_uuid); // Length of result string
|
||||
strncpy((char *)ptr, slave_uuid, strlen(slave_uuid)); // Result string
|
||||
ptr += strlen(slave_uuid);
|
||||
slave->dcb->func.write(slave->dcb, pkt);
|
||||
}
|
||||
sptr = sptr->next;
|
||||
}
|
||||
spinlock_release(&router->lock);
|
||||
return blr_slave_send_eof(router, slave, seqno);
|
||||
}
|
||||
|
||||
/**
|
||||
* Process a slave replication registration message.
|
||||
*
|
||||
@ -685,7 +1256,7 @@ uint8_t *ptr;
|
||||
* call. The paramter "long" control the number of events in the burst. The
|
||||
* short burst is intended to be used when the master receive an event and
|
||||
* needs to put the slave into catchup mode. This prevents the slave taking
|
||||
* too much tiem away from the thread that is processing the master events.
|
||||
* too much time away from the thread that is processing the master events.
|
||||
*
|
||||
* At the end of the burst a fake EPOLLOUT event is added to the poll event
|
||||
* queue. This ensures that the slave callback for processing DCB write drain
|
||||
@ -880,9 +1451,10 @@ if (hkheartbeat - beat1 > 1) LOGIF(LE, (skygw_log_write(
|
||||
* we ignore these issues during the rotate processing.
|
||||
*/
|
||||
LOGIF(LE, (skygw_log_write(LOGFILE_ERROR,
|
||||
"Slave reached end of file for binlong file %s at %u "
|
||||
"Slave reached end of file for binlog file %s at %u "
|
||||
"which is not the file currently being downloaded. "
|
||||
"Master binlog is %s, %lu.",
|
||||
"Master binlog is %s, %lu. This may be caused by a "
|
||||
"previous failure of the master.",
|
||||
slave->binlogfile, slave->binlog_pos,
|
||||
router->binlog_name, router->binlog_position)));
|
||||
if (blr_slave_fake_rotate(router, slave))
|
||||
@ -1008,11 +1580,7 @@ uint32_t chksum;
|
||||
return 0;
|
||||
|
||||
binlognamelen = strlen(slave->binlogfile);
|
||||
|
||||
if (slave->nocrc)
|
||||
len = 19 + 8 + binlognamelen;
|
||||
else
|
||||
len = 19 + 8 + 4 + binlognamelen;
|
||||
len = 19 + 8 + 4 + binlognamelen;
|
||||
|
||||
// Build a fake rotate event
|
||||
resp = gwbuf_alloc(len + 5);
|
||||
@ -1031,20 +1599,17 @@ uint32_t chksum;
|
||||
memcpy(ptr, slave->binlogfile, binlognamelen);
|
||||
ptr += binlognamelen;
|
||||
|
||||
if (!slave->nocrc)
|
||||
{
|
||||
/*
|
||||
* Now add the CRC to the fake binlog rotate event.
|
||||
*
|
||||
* The algorithm is first to compute the checksum of an empty buffer
|
||||
* and then the checksum of the event portion of the message, ie we do not
|
||||
* include the length, sequence number and ok byte that makes up the first
|
||||
* 5 bytes of the message. We also do not include the 4 byte checksum itself.
|
||||
*/
|
||||
chksum = crc32(0L, NULL, 0);
|
||||
chksum = crc32(chksum, GWBUF_DATA(resp) + 5, hdr.event_size - 4);
|
||||
encode_value(ptr, chksum, 32);
|
||||
}
|
||||
/*
|
||||
* Now add the CRC to the fake binlog rotate event.
|
||||
*
|
||||
* The algorithm is first to compute the checksum of an empty buffer
|
||||
* and then the checksum of the event portion of the message, ie we do not
|
||||
* include the length, sequence number and ok byte that makes up the first
|
||||
* 5 bytes of the message. We also do not include the 4 byte checksum itself.
|
||||
*/
|
||||
chksum = crc32(0L, NULL, 0);
|
||||
chksum = crc32(chksum, GWBUF_DATA(resp) + 5, hdr.event_size - 4);
|
||||
encode_value(ptr, chksum, 32);
|
||||
|
||||
slave->dcb->func.write(slave->dcb, resp);
|
||||
return 1;
|
||||
@ -1153,7 +1718,7 @@ uint8_t *ptr;
|
||||
*ptr++ = 'e';
|
||||
*ptr++ = 'f';
|
||||
*ptr++ = 0; // Schema name length
|
||||
*ptr++ = 0; // virtal table name length
|
||||
*ptr++ = 0; // virtual table name length
|
||||
*ptr++ = 0; // Table name length
|
||||
*ptr++ = strlen(name); // Column name length;
|
||||
while (*name)
|
||||
@ -1175,3 +1740,31 @@ uint8_t *ptr;
|
||||
*ptr++= 0;
|
||||
return slave->dcb->func.write(slave->dcb, pkt);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Send an EOF packet in a response packet sequence.
|
||||
*
|
||||
* @param router The router
|
||||
* @param slave The slave connection
|
||||
* @param seqno The sequence number of the EOF packet
|
||||
* @return Non-zero on success
|
||||
*/
|
||||
static int
|
||||
blr_slave_send_eof(ROUTER_INSTANCE *router, ROUTER_SLAVE *slave, int seqno)
|
||||
{
|
||||
GWBUF *pkt;
|
||||
uint8_t *ptr;
|
||||
|
||||
if ((pkt = gwbuf_alloc(9)) == NULL)
|
||||
return 0;
|
||||
ptr = GWBUF_DATA(pkt);
|
||||
encode_value(ptr, 5, 24); // Add length of data packet
|
||||
ptr += 3;
|
||||
*ptr++ = seqno; // Sequence number in response
|
||||
*ptr++ = 0xfe; // Length of result string
|
||||
encode_value(ptr, 0, 16); // No errors
|
||||
ptr += 2;
|
||||
encode_value(ptr, 2, 16); // Autocommit enabled
|
||||
return slave->dcb->func.write(slave->dcb, pkt);
|
||||
}
|
||||
|