Merge branch 'develop' into MAX-324

This commit is contained in:
Markus Makela
2015-01-29 20:36:26 +02:00
17 changed files with 384 additions and 158 deletions

View File

@ -20,7 +20,7 @@ check_deps()
check_dirs()
find_package(Valgrind)
find_package(MySQLClient)
find_package(MySQLConfig)
find_package(MySQL)
set(CMAKE_INSTALL_RPATH ${CMAKE_INSTALL_RPATH}:${CMAKE_INSTALL_PREFIX}/lib:${CMAKE_INSTALL_PREFIX}/modules)
@ -30,7 +30,7 @@ configure_file(${CMAKE_SOURCE_DIR}/maxscale.conf.in ${CMAKE_BINARY_DIR}/maxscale
configure_file(${CMAKE_SOURCE_DIR}/etc/init.d/maxscale.in ${CMAKE_BINARY_DIR}/etc/init.d/maxscale.prep @ONLY)
configure_file(${CMAKE_SOURCE_DIR}/etc/ubuntu/init.d/maxscale.in ${CMAKE_BINARY_DIR}/etc/ubuntu/init.d/maxscale.prep @ONLY)
configure_file(${CMAKE_SOURCE_DIR}/server/test/maxscale_test.h.in ${CMAKE_BINARY_DIR}/server/include/maxscale_test.h)
configure_file(${CMAKE_SOURCE_DIR}/postinstall.sh.in ${CMAKE_BINARY_DIR}/postinstall.sh)
set(CMAKE_C_FLAGS "-Wall -fPIC")
set(CMAKE_CXX_FLAGS "-Wall -fPIC")
@ -110,11 +110,11 @@ endif()
# Install startup scripts and ldconfig files
if( NOT ( (DEFINED INSTALL_SYSTEM_FILES) AND ( NOT ( INSTALL_SYSTEM_FILES ) ) ) )
install(FILES ${CMAKE_BINARY_DIR}/maxscale.conf.prep RENAME maxscale.conf DESTINATION /etc/ld.so.conf.d/ PERMISSIONS WORLD_EXECUTE WORLD_READ)
install(FILES ${CMAKE_BINARY_DIR}/maxscale.conf.prep RENAME maxscale.conf DESTINATION .)
if(DEB_BASED)
install(FILES ${CMAKE_BINARY_DIR}/etc/ubuntu/init.d/maxscale.prep RENAME maxscale DESTINATION /etc/init.d/ PERMISSIONS WORLD_EXECUTE)
install(FILES ${CMAKE_BINARY_DIR}/etc/ubuntu/init.d/maxscale.prep RENAME maxscale DESTINATION .)
else()
install(FILES ${CMAKE_BINARY_DIR}/etc/init.d/maxscale.prep RENAME maxscale DESTINATION /etc/init.d/ PERMISSIONS WORLD_EXECUTE)
install(FILES ${CMAKE_BINARY_DIR}/etc/init.d/maxscale.prep RENAME maxscale DESTINATION .)
endif()
message(STATUS "Installing maxscale.conf to: /etc/ld.so.conf.d")
message(STATUS "Installing startup scripts to: /etc/init.d")
@ -126,10 +126,10 @@ message(STATUS "Installing MaxScale to: ${CMAKE_INSTALL_PREFIX}/")
install(FILES server/MaxScale_template.cnf DESTINATION etc)
install(FILES ${ERRMSG} DESTINATION mysql)
install(FILES ${DOCS} DESTINATION Documentation)
install(FILES ${CMAKE_SOURCE_DIR}/COPYRIGHT DESTINATION ${CMAKE_INSTALL_PREFIX}/)
install(FILES ${CMAKE_SOURCE_DIR}/README DESTINATION ${CMAKE_INSTALL_PREFIX}/)
install(FILES ${CMAKE_SOURCE_DIR}/LICENSE DESTINATION ${CMAKE_INSTALL_PREFIX}/)
install(FILES ${CMAKE_SOURCE_DIR}/SETUP DESTINATION ${CMAKE_INSTALL_PREFIX}/)
install(FILES ${CMAKE_SOURCE_DIR}/COPYRIGHT DESTINATION .)
install(FILES ${CMAKE_SOURCE_DIR}/README DESTINATION .)
install(FILES ${CMAKE_SOURCE_DIR}/LICENSE DESTINATION .)
install(FILES ${CMAKE_SOURCE_DIR}/SETUP DESTINATION .)
install(DIRECTORY DESTINATION log)
if(${CMAKE_VERSION} VERSION_LESS 2.8.12)
@ -162,7 +162,9 @@ else()
set(CPACK_PACKAGE_VENDOR "MariaDB Corporation Ab")
set(CPACK_PACKAGE_DESCRIPTION_FILE ${CMAKE_SOURCE_DIR}/README)
set(CPACK_PACKAGING_INSTALL_PREFIX "${CMAKE_INSTALL_PREFIX}")
set(CPACK_RPM_SPEC_INSTALL_POST "/sbin/ldconfig")
set(CPACK_DEBIAN_PACKAGE_CONTROL_EXTRA ${CMAKE_BINARY_DIR}/postinstall.sh)
set(CPACK_RPM_POST_INSTALL_SCRIPT_FILE ${CMAKE_BINARY_DIR}/postinstall.sh)
set(CPACK_RPM_POST_UNINSTALL_SCRIPT_FILE ${CMAKE_SOURCE_DIR}/postuninstall.sh)
set(CPACK_RPM_PACKAGE_NAME "maxscale")
set(CPACK_RPM_PACKAGE_VENDOR "MariaDB Corporation Ab")
set(CPACK_RPM_PACKAGE_LICENSE "GPLv2")
@ -185,9 +187,7 @@ add_custom_target(testall
COMMAND ${CMAKE_COMMAND} -DDEPS_OK=Y -DBUILD_TESTS=Y -DBUILD_TYPE=Debug -DCMAKE_INSTALL_PREFIX=${CMAKE_BINARY_DIR} -DINSTALL_SYSTEM_FILES=N ${CMAKE_SOURCE_DIR}
COMMAND make install
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${CMAKE_SOURCE_DIR}/server/test/MaxScale_test.cnf ${CMAKE_BINARY_DIR}/etc/MaxScale.cnf
COMMAND /bin/sh -c "${CMAKE_BINARY_DIR}/bin/maxscale -c ${CMAKE_BINARY_DIR} &>/dev/null"
COMMAND /bin/sh -c "make test || echo \"Test results written to: ${CMAKE_BINARY_DIR}/Testing/Temporary/\""
COMMAND killall maxscale
COMMAND ${CMAKE_COMMAND} -P ${CMAKE_SOURCE_DIR}/testall.cmake
COMMENT "Running full test suite..." VERBATIM)
# uninstall target

37
FindMySQL.cmake Normal file
View File

@ -0,0 +1,37 @@
# This CMake file tries to find the the mysql_version.h header
# and to parse it for version and provider strings
# The following variables are set:
# MYSQL_VERSION - The MySQL version number
# MYSQL_PROVIDER - The MySQL provider e.g. MariaDB
find_file(MYSQL_VERSION_H mysql_version.h PATH_SUFFIXES mysql)
if(MYSQL_VERSION_H MATCHES "MYSQL_VERSION_H-NOTFOUND")
message(FATAL_ERROR "Cannot find the mysql_version.h header")
else()
message(STATUS "Found mysql_version.h: ${MYSQL_VERSION_H}")
endif()
file(READ ${MYSQL_VERSION_H} MYSQL_VERSION_CONTENTS)
string(REGEX REPLACE ".*MYSQL_SERVER_VERSION[^0-9.]+([0-9.]+).*" "\\1" MYSQL_VERSION ${MYSQL_VERSION_CONTENTS})
string(REGEX REPLACE ".*MYSQL_COMPILATION_COMMENT.+\"(.+)\".*" "\\1" MYSQL_PROVIDER ${MYSQL_VERSION_CONTENTS})
string(TOLOWER ${MYSQL_PROVIDER} MYSQL_PROVIDER)
if(MYSQL_PROVIDER MATCHES "mariadb")
set(MYSQL_PROVIDER "MariaDB" CACHE INTERNAL "The MySQL provider")
elseif(MYSQL_PROVIDER MATCHES "mysql")
set(MYSQL_PROVIDER "MySQL" CACHE INTERNAL "The MySQL provider")
elseif(MYSQL_PROVIDER MATCHES "percona")
set(MYSQL_PROVIDER "Percona" CACHE INTERNAL "The MySQL provider")
else()
set(MYSQL_PROVIDER "Unknown" CACHE INTERNAL "The MySQL provider")
endif()
message(STATUS "MySQL version: ${MYSQL_VERSION}")
message(STATUS "MySQL provider: ${MYSQL_PROVIDER}")
if(NOT MYSQL_PROVIDER STREQUAL "MariaDB")
message(WARNING "Not using MariaDB server.")
endif()
if(MYSQL_VERSION VERSION_LESS 5.5.41)
message(WARNING "MySQL version is ${MYSQL_VERSION}. Minimum supported version is 5.5.41")
endif()

View File

@ -1,20 +0,0 @@
# This CMake file tries to find the the MySQL configuration tool
# The following variables are set:
# MYSQLCONFIG_FOUND - System has MySQL and the tool was found
# MYSQLCONFIG_EXECUTABLE - The MySQL configuration tool executable
# MYSQL_VERSION - The MySQL version number
find_program(MYSQLCONFIG_EXECUTABLE mysql_config)
if(MYSQLCONFIG_EXECUTABLE MATCHES "MYSQLCONFIG_EXECUTABLE-NOTFOUND")
message(FATAL_ERROR "Cannot find mysql_config.")
set(MYSQLCONFIG_FOUND FALSE CACHE INTERNAL "")
unset(MYSQLCONFIG_EXECUTABLE)
else()
execute_process(COMMAND ${MYSQLCONFIG_EXECUTABLE} --version OUTPUT_VARIABLE MYSQL_VERSION)
string(REPLACE "\n" "" MYSQL_VERSION ${MYSQL_VERSION})
message(STATUS "mysql_config found: ${MYSQLCONFIG_EXECUTABLE}")
message(STATUS "MySQL version: ${MYSQL_VERSION}")
if(MYSQL_VERSION VERSION_LESS 5.5.40)
message(WARNING "Required MySQL version is 5.5.40 or greater.")
endif()
set(MYSQLCONFIG_FOUND TRUE CACHE INTERNAL "")
endif()

5
postinstall.sh.in Executable file
View File

@ -0,0 +1,5 @@
#!/bin/sh
install @CMAKE_INSTALL_PREFIX@/maxscale /etc/init.d/
install @CMAKE_INSTALL_PREFIX@/maxscale.conf /etc/ld.so.conf.d/
/sbin/ldconfig

3
postuninstall.sh Executable file
View File

@ -0,0 +1,3 @@
#!/bin/sh
rm /etc/init.d/maxscale
rm /etc/ld.so.conf.d/maxscale.conf

View File

@ -87,7 +87,7 @@
#define LOAD_MYSQL_USERS_WITH_DB_QUERY_NO_ROOT "SELECT * FROM (" LOAD_MYSQL_USERS_WITH_DB_QUERY ") AS t1 WHERE user NOT IN ('root')" MYSQL_USERS_WITH_DB_ORDER
#define LOAD_MYSQL_DATABASE_NAMES "SELECT * FROM ( (SELECT COUNT(1) AS ndbs FROM INFORMATION_SCHEMA.SCHEMATA) AS tbl1, (SELECT GRANTEE,PRIVILEGE_TYPE from INFORMATION_SCHEMA.USER_PRIVILEGES WHERE privilege_type='SHOW DATABASES' AND REPLACE(GRANTEE, \"\'\",\"\")=CURRENT_USER()) AS tbl2)"
#define LOAD_MYSQL_DATABASE_NAMES "SELECT * FROM ( (SELECT COUNT(1) AS ndbs FROM INFORMATION_SCHEMA.SCHEMATA) AS tbl1, (SELECT GRANTEE,PRIVILEGE_TYPE from INFORMATION_SCHEMA.USER_PRIVILEGES WHERE privilege_type='SHOW DATABASES' AND REPLACE(GRANTEE, \'\\'\',\'\')=CURRENT_USER()) AS tbl2)"
/** Defined in log_manager.cc */
extern int lm_enabled_logfiles_bitmask;

View File

@ -651,7 +651,7 @@ int rc;
}
memcpy(&(dcb->func), funcs, sizeof(GWPROTOCOL));
/*<
/**
* Link dcb to session. Unlink is called in dcb_final_free
*/
if (!session_link_dcb(session, dcb))
@ -693,7 +693,7 @@ int rc;
session->client->fd)));
}
ss_dassert(dcb->fd == DCBFD_CLOSED); /*< must be uninitialized at this point */
/*<
/**
* Successfully connected to backend. Assign file descriptor to dcb
*/
dcb->fd = fd;
@ -701,14 +701,14 @@ int rc;
dcb->dcb_server_status = server->status;
ss_debug(dcb->dcb_port = server->port;)
/*<
/**
* backend_dcb is connected to backend server, and once backend_dcb
* is added to poll set, authentication takes place as part of
* EPOLLOUT event that will be received once the connection
* is established.
*/
/*<
/**
* Add the dcb in the poll set
*/
rc = poll_add_dcb(dcb);
@ -718,7 +718,7 @@ int rc;
dcb_final_free(dcb);
return NULL;
}
/*<
/**
* The dcb will be addded into poll set by dcb->func.connect
*/
atomic_add(&server->stats.n_connections, 1);
@ -1218,7 +1218,7 @@ dcb_close(DCB *dcb)
"%lu [dcb_close]",
pthread_self())));
/*<
/**
* dcb_close may be called for freshly created dcb, in which case
* it only needs to be freed.
*/

View File

@ -1177,6 +1177,7 @@ int main(int argc, char **argv)
case 'v':
rc = EXIT_SUCCESS;
printf("%s\n",MAXSCALE_VERSION);
goto return_main;
case 'l':

View File

@ -980,6 +980,7 @@ SERVICE *ptr;
}
while (ptr)
{
ss_dassert(ptr->stats.n_current >= 0);
dcb_printf(dcb, "%-25s | %-20s | %6d | %5d\n",
ptr->name, ptr->routerModule,
ptr->stats.n_current, ptr->stats.n_sessions);

View File

@ -1578,7 +1578,7 @@ routeQuery(FILTER *instance, void *session, GWBUF *queue)
bool accept = my_instance->def_op;
char *msg = NULL, *fullquery = NULL,*ipaddr;
char uname_addr[128];
DCB* dcb = my_session->session->client;
DCB* dcb = my_session->session->client;
USER* user = NULL;
GWBUF* forward;
ipaddr = strdup(dcb->remote);
@ -1620,7 +1620,7 @@ routeQuery(FILTER *instance, void *session, GWBUF *queue)
goto queryresolved;
}
queryresolved:
queryresolved:
free(ipaddr);
free(fullquery);
@ -1628,7 +1628,7 @@ routeQuery(FILTER *instance, void *session, GWBUF *queue)
if(accept){
return my_session->down.routeQuery(my_session->down.instance,
my_session->down.session, queue);
my_session->down.session, queue);
}else{
gwbuf_free(queue);
@ -1636,7 +1636,7 @@ routeQuery(FILTER *instance, void *session, GWBUF *queue)
if(my_session->errmsg){
msg = my_session->errmsg;
}
forward = gen_dummy_error(my_session,msg);
forward = gen_dummy_error(my_session,msg);
if(my_session->errmsg){
free(my_session->errmsg);

View File

@ -0,0 +1,108 @@
Firewall filter
# Overview
The firewall filter is used to block queries that match a set of rules. It can be used to prevent harmful queries into the database or to limit the access to the database based on a more defined set of rules compared to the traditional GRANT-based rights management.
# Configuration
The firewall filter only requires a minimal set of configurations in the MaxScale.cnf file. The actual rules of the firewall filter are located in a separate text file. The following is an example of a firewall filter configuration in the MaxScale.cnf file.
[Firewall]
type=filter
module=fwfilter
rules=/home/user/rules.txt
## Filter Options
The firewall filter does not support anny filter options.
## Filter Parameters
The firewall filter has one mandatory parameter that defines the location of the rule file. This is the 'rules' parameter and it expects an absolute path to the rule file.
# Rule syntax
The rules are defined by using the following syntax.
` rule NAME deny [wildcard | columns VALUE ... |
regex REGEX | limit_queries COUNT TIMEPERIOD HOLDOFF |
no_where_clause] [at_times VALUE...] [on_queries [select|update|insert|delete]]`
Rules always define a blocking action so the basic mode for the firewall filter is to allow all queries that do not match a given set of rules. Rules are identified by their name and have a mandatory part and optional parts.
The first step of defining a rule is to start with the keyword 'rule' which identifies this line of text as a rule. The second token is identified as the name of the rule. After that the mandatory token 'deny' is required to mark the start of the actual rule definition.
## Mandator rule parameters
The firewall filter's rules expect a single mandatory parameter for a rule. You can define multiple rules to cover situations where you would like to apply multiple mandatory rules to a query.
### Wildcard
This rule blocks all queries that use the wildcard character *.
### Columns
This rule expects a list of values after the 'columns' keyword. These values are interpreted as column names and if a query targets any of these, it is blocked.
### Regex
This rule blocks all queries matching a regex enclosed in single or double quotes.
### Limit_queries
The limit_queries rule expects three parameters. The first parameter is the number of allowed queries during the time period. The second is the time period in seconds and the third is the amount of time for which the rule is considered active and blocking.
### No_where_clause
This rule inspects the query and blocks it if it has no where clause. This way you can't do a DELETE FROM ... query without having the where clause. This does not prevent wrongful usage of the where clause e.g. DELETE FROM ... WHERE 1=1.
## Optional rule parameters
Each mandatory rule accepts one or more optional parameters. These are to be defined after the mandatory part of the rule.
### At_times
This rule expects a list of time ranges that define the times when the rule in question is active. The time formats are expected to be ISO-8601 compliant and to be separated by a single dash (the - character). For example defining the active period of a rule to be 17:00 to 19:00 you would add 'at times 17:00:00-19:00:00' to the end of the rule.
### On_queries
This limits the rule to be active only on certain types of queries.
## Applying rules to users
To apply the defined rules to users use the following syntax.
`users NAME ... match [any|all] rules RULE ...`
The first keyword is users which identifies this line as a user definition line. After this a list of usernames and network addresses in the format 'user@0.0.0.0' is expected. The first part is the username and the second part is the network address. You can use the '%' character as the wildcard to enable username matching from any address or network matching for all users. After the list of users and networks the keyword match is expected. After this either the keyword 'any' or 'all' is expected. This defined how the rules are matched. If 'any' is used when the first rule is matched the query is considered blocked and the rest of the rules are skipped. If instead the 'all' keyword is used all rules must match for the query to be blocked.
After the matching part comes the rules keyword after which a list of rule names is expected. This allows reusing of the rules and enables varying levels of query restriction.
# Examples
## Example rule file
The following is an example of a rule file which defines six rules and applies them to three sets of users. This rule file is used in all of the examples.
rule block_wildcard deny wildcard at_times 8:00:00-17:00:00
rule no_personal_info deny columns phone salary address on_queries select|delete at_times 12:00:00-18:00:00
rule simple_regex deny regex '.*insert.*into.*select.*'
rule dos_block deny limit_queries 10000 1.0 500.0 at_times 12:00:00-18:00:00
rule safe_delete deny no_where_clause on_queries delete
rule managers_table deny regex '.*from.*managers.*'
users John@% Jane@% match any rules no_personal_info block_wildcard
users %@80.120.% match any rules block_wildcard dos_block
users %@% match all rules safe_delete managers_table
## Example 1 - Deny access to personal information and prevent huge queries during peak hours
Assume that a database cluster with tables that have a large number of columns is under heavy load during certain times of the day. Now also assume that large selects and querying of personal information creates unwanted stress on the cluster. Now we wouldn't want to completely prevent all the users from accessing personal information or performing large select queries, we only want to block the users John and Jane.
This can be achieved by creating two rules. One that blocks the usage of the wildcard and one that prevents queries that target a set of columns. To apply these rules to the users we define a users line into the rule file with both the rules and all the users we want to apply the rules to. The rules are defined in the example rule file on line 1 and 2 and the users line is defined on line 7.
## Example 2 - Only safe deletes into the managers table
We want to prevent accidental deletes into the managers table where the where clause is missing. This poses a problem, we don't want to require all the delete queries to have a where clause. We only want to prevent the data in the managers table from being deleted without a where clause.
To achieve this, we need two rules. The first rule can be seen on line 5 in the example rule file. This defines that all delete operations must have a where clause. This rule alone does us no good so we need a second one. The second rule is defined on line 6 and it blocks all queries that match the provided regular expression. When we combine these two rules we get the result we want. You can see the application of these rules on line 9 of the example rule file. The usage of the 'all' matching mode requires that all the rules must match for the query to be blocked. This in effect combines the two rules into a more complex rule.

View File

@ -359,7 +359,7 @@ char *server_string;
int rc;
int connect_timeout = handle->connect_timeout;
int read_timeout = handle->read_timeout;
int write_timeout = handle->write_timeout;;
int write_timeout = handle->write_timeout;
database->con = mysql_init(NULL);
@ -720,7 +720,7 @@ MYSQL_MONITOR *handle = (MYSQL_MONITOR *)arg;
}
/**
* Set the default id to use in the monitor.
* Set the timeouts to use in the monitor.
*
* @param arg The handle allocated by startMonitor
* @param type The connect timeout type

View File

@ -24,6 +24,7 @@
*
* Date Who Description
* 25/07/14 Massimiliano Pinto Initial implementation
* 10/11/14 Massimiliano Pinto Added setNetworkTimeout for connect,read,write
*
* @endverbatim
*/
@ -49,7 +50,7 @@ extern __thread log_info_t tls_log_info;
static void monitorMain(void *);
static char *version_str = "V1.0.0";
static char *version_str = "V1.1.0";
MODULE_INFO info = {
MODULE_API_MONITOR,
@ -65,6 +66,7 @@ static void unregisterServer(void *, SERVER *);
static void defaultUsers(void *, char *, char *);
static void diagnostics(DCB *, void *);
static void setInterval(void *, size_t);
static void setNetworkTimeout(void *arg, int type, int value);
static MONITOR_OBJECT MyObject = {
startMonitor,
@ -74,7 +76,7 @@ static MONITOR_OBJECT MyObject = {
defaultUsers,
diagnostics,
setInterval,
NULL,
setNetworkTimeout,
NULL,
NULL,
NULL,
@ -146,6 +148,9 @@ MYSQL_MONITOR *handle;
handle->defaultPasswd = NULL;
handle->id = MONITOR_DEFAULT_ID;
handle->interval = MONITOR_INTERVAL;
handle->connect_timeout=DEFAULT_CONNECT_TIMEOUT;
handle->read_timeout=DEFAULT_READ_TIMEOUT;
handle->write_timeout=DEFAULT_WRITE_TIMEOUT;
spinlock_init(&handle->lock);
}
handle->tid = (THREAD)thread_start(monitorMain, handle);
@ -263,6 +268,9 @@ char *sep;
}
dcb_printf(dcb,"\tSampling interval:\t%lu milliseconds\n", handle->interval);
dcb_printf(dcb,"\tConnect Timeout:\t%i seconds\n", handle->connect_timeout);
dcb_printf(dcb,"\tRead Timeout:\t\t%i seconds\n", handle->read_timeout);
dcb_printf(dcb,"\tWrite Timeout:\t\t%i seconds\n", handle->write_timeout);
dcb_printf(dcb, "\tMonitored servers: ");
db = handle->databases;
@ -303,7 +311,7 @@ MYSQL_MONITOR *handle = (MYSQL_MONITOR *)arg;
* @param database The database to probe
*/
static void
monitorDatabase(MONITOR_SERVERS *database, char *defaultUser, char *defaultPasswd)
monitorDatabase(MONITOR_SERVERS *database, char *defaultUser, char *defaultPasswd, MYSQL_MONITOR *handle)
{
MYSQL_ROW row;
MYSQL_RES *result;
@ -329,10 +337,15 @@ char *server_string;
{
char *dpwd = decryptPassword(passwd);
int rc;
int read_timeout = 1;
int connect_timeout = handle->connect_timeout;
int read_timeout = handle->read_timeout;
int write_timeout = handle->write_timeout;
database->con = mysql_init(NULL);
rc = mysql_options(database->con, MYSQL_OPT_READ_TIMEOUT, (void *)&read_timeout);
database->con = mysql_init(NULL);
rc = mysql_options(database->con, MYSQL_OPT_CONNECT_TIMEOUT, (void *)&connect_timeout);
rc = mysql_options(database->con, MYSQL_OPT_READ_TIMEOUT, (void *)&read_timeout);
rc = mysql_options(database->con, MYSQL_OPT_WRITE_TIMEOUT, (void *)&write_timeout);
if (mysql_real_connect(database->con, database->server->name,
uname, dpwd, NULL, database->server->port, NULL, 0) == NULL)
@ -470,7 +483,7 @@ size_t nrounds = 0;
while (ptr)
{
unsigned int prev_status = ptr->server->status;
monitorDatabase(ptr, handle->defaultUser, handle->defaultPasswd);
monitorDatabase(ptr, handle->defaultUser, handle->defaultPasswd,handle);
if (ptr->server->status != prev_status ||
SERVER_IS_DOWN(ptr->server))
@ -500,3 +513,65 @@ setInterval(void *arg, size_t interval)
MYSQL_MONITOR *handle = (MYSQL_MONITOR *)arg;
memcpy(&handle->interval, &interval, sizeof(unsigned long));
}
/**
* Set the timeouts to use in the monitor.
*
* @param arg The handle allocated by startMonitor
* @param type The connect timeout type
* @param value The timeout value to set
*/
static void
setNetworkTimeout(void *arg, int type, int value)
{
MYSQL_MONITOR *handle = (MYSQL_MONITOR *)arg;
int max_timeout = (int)(handle->interval/1000);
int new_timeout = max_timeout -1;
if (new_timeout <= 0)
new_timeout = DEFAULT_CONNECT_TIMEOUT;
switch(type) {
case MONITOR_CONNECT_TIMEOUT:
if (value < max_timeout) {
memcpy(&handle->connect_timeout, &value, sizeof(int));
} else {
memcpy(&handle->connect_timeout, &new_timeout, sizeof(int));
LOGIF(LE, (skygw_log_write_flush(
LOGFILE_ERROR,
"warning : Monitor Connect Timeout %i is greater than monitor interval ~%i seconds"
", lowering to %i seconds", value, max_timeout, new_timeout)));
}
break;
case MONITOR_READ_TIMEOUT:
if (value < max_timeout) {
memcpy(&handle->read_timeout, &value, sizeof(int));
} else {
memcpy(&handle->read_timeout, &new_timeout, sizeof(int));
LOGIF(LE, (skygw_log_write_flush(
LOGFILE_ERROR,
"warning : Monitor Read Timeout %i is greater than monitor interval ~%i seconds"
", lowering to %i seconds", value, max_timeout, new_timeout)));
}
break;
case MONITOR_WRITE_TIMEOUT:
if (value < max_timeout) {
memcpy(&handle->write_timeout, &value, sizeof(int));
} else {
memcpy(&handle->write_timeout, &new_timeout, sizeof(int));
LOGIF(LE, (skygw_log_write_flush(
LOGFILE_ERROR,
"warning : Monitor Write Timeout %i is greater than monitor interval ~%i seconds"
", lowering to %i seconds", value, max_timeout, new_timeout)));
}
break;
default:
LOGIF(LE, (skygw_log_write_flush(
LOGFILE_ERROR,
"Error : Monitor setNetworkTimeout received an unsupported action type %i", type)));
break;
}
}

View File

@ -344,7 +344,9 @@ static int gw_read_backend_event(DCB *dcb) {
if (backend_protocol->protocol_auth_state == MYSQL_AUTH_FAILED ||
backend_protocol->protocol_auth_state == MYSQL_HANDSHAKE_FAILED)
{
/**
GWBUF* errbuf;
bool succp;
/**
* protocol state won't change anymore,
* lock can be freed
*/
@ -357,54 +359,52 @@ static int gw_read_backend_event(DCB *dcb) {
dcb->delayq,
GWBUF_LENGTH(dcb->delayq))) != NULL);
}
spinlock_release(&dcb->delayqlock);
{
GWBUF* errbuf;
bool succp;
spinlock_release(&dcb->delayqlock);
/* try reload users' table for next connection */
if (backend_protocol->protocol_auth_state == MYSQL_AUTH_FAILED) {
service_refresh_users(dcb->session->service);
}
#if defined(SS_DEBUG)
LOGIF(LD, (skygw_log_write(
LOGFILE_DEBUG,
"%lu [gw_read_backend_event] "
"calling handleError. Backend "
"DCB %p, session %p",
pthread_self(),
dcb,
dcb->session)));
/* try reload users' table for next connection */
if (backend_protocol->protocol_auth_state ==
MYSQL_AUTH_FAILED)
{
service_refresh_users(dcb->session->service);
}
#if defined(SS_DEBUG)
LOGIF(LD, (skygw_log_write(
LOGFILE_DEBUG,
"%lu [gw_read_backend_event] "
"calling handleError. Backend "
"DCB %p, session %p",
pthread_self(),
dcb,
dcb->session)));
#endif
errbuf = mysql_create_custom_error(
1,
0,
"Authentication with backend failed. "
"Session will be closed.");
errbuf = mysql_create_custom_error(
1,
0,
"Authentication with backend failed. "
"Session will be closed.");
router->handleError(router_instance,
rsession,
errbuf,
dcb,
ERRACT_REPLY_CLIENT,
&succp);
gwbuf_free(errbuf);
LOGIF(LD, (skygw_log_write(
LOGFILE_DEBUG,
"%lu [gw_read_backend_event] "
"after calling handleError. Backend "
"DCB %p, session %p",
pthread_self(),
dcb,
dcb->session)));
spinlock_acquire(&session->ses_lock);
session->state = SESSION_STATE_STOPPING;
spinlock_release(&session->ses_lock);
ss_dassert(dcb->dcb_errhandle_called);
dcb_close(dcb);
}
router->handleError(router_instance,
rsession,
errbuf,
dcb,
ERRACT_REPLY_CLIENT,
&succp);
gwbuf_free(errbuf);
LOGIF(LD, (skygw_log_write(
LOGFILE_DEBUG,
"%lu [gw_read_backend_event] "
"after calling handleError. Backend "
"DCB %p, session %p",
pthread_self(),
dcb,
dcb->session)));
spinlock_acquire(&session->ses_lock);
session->state = SESSION_STATE_STOPPING;
spinlock_release(&session->ses_lock);
ss_dassert(dcb->dcb_errhandle_called);
dcb_close(dcb);
rc = 1;
goto return_rc;
}

View File

@ -660,7 +660,7 @@ int gw_read_client_event(
*/
mysql_send_ok(dcb, 2, 0, NULL);
}
else
else
{
protocol->protocol_auth_state = MYSQL_AUTH_FAILED;
LOGIF(LD, (skygw_log_write(
@ -738,7 +738,7 @@ int gw_read_client_event(
ss_dassert(nbytes_read >= 5);
session = dcb->session;
ss_dassert( session!= NULL);
ss_dassert(session!= NULL);
if (session != NULL)
{
@ -749,54 +749,9 @@ int gw_read_client_event(
rsession = session->router_session;
ss_dassert(rsession != NULL);
}
/* Now, we are assuming in the first buffer there is
* the information form mysql command */
payload = GWBUF_DATA(read_buffer);
/**
* Without rsession there is no access to backend.
* COM_QUIT : close client dcb
* else : write custom error to client dcb.
*/
if(rsession == NULL)
{
/** COM_QUIT */
if (MYSQL_IS_COM_QUIT(payload))
{
LOGIF(LD, (skygw_log_write_flush(
LOGFILE_DEBUG,
"%lu [gw_read_client_event] Client read "
"COM_QUIT and rsession == NULL. Closing "
"client dcb %p.",
pthread_self(),
dcb)));
/**
* close router session and that closes
* backends
*/
dcb_close(dcb);
}
else
{
#if defined(SS_DEBUG)
LOGIF(LE, (skygw_log_write_flush(
LOGFILE_ERROR,
"Client read error handling.")));
#endif
/* Send a custom error as MySQL command reply */
mysql_send_custom_error(
dcb,
1,
0,
"Can't route query. Connection to "
"backend lost");
}
rc = 1;
/** Free buffer */
read_buffer = gwbuf_consume(read_buffer, nbytes_read);
goto return_rc;
}
/** Ask what type of input the router expects */
cap = router->getCapabilities(router_instance, rsession);
@ -811,18 +766,47 @@ int gw_read_client_event(
/** Mark buffer to as MySQL type */
gwbuf_set_type(read_buffer, GWBUF_TYPE_MYSQL);
}
/**
* If router doesn't implement getCapabilities correctly we end
* up here.
*/
else
{
GWBUF* errbuf;
bool succp;
LOGIF(LD, (skygw_log_write_flush(
LOGFILE_DEBUG,
"%lu [gw_read_client_event] Reading router "
"capabilities failed.",
pthread_self())));
mysql_send_custom_error(dcb,
1,
0,
"Operation failed. Router "
"session is closed.");
errbuf = mysql_create_custom_error(
1,
0,
"Read invalid router capabilities. Routing failed. "
"Session will be closed.");
router->handleError(
router_instance,
rsession,
errbuf,
dcb,
ERRACT_REPLY_CLIENT,
&succp);
gwbuf_free(errbuf);
/**
* If there are not enough backends close
* session
*/
if (!succp)
{
LOGIF(LE, (skygw_log_write_flush(
LOGFILE_ERROR,
"Error : Routing the query failed. "
"Session will be closed.")));
dcb_close(dcb);
}
rc = 1;
goto return_rc;
}
@ -843,6 +827,7 @@ int gw_read_client_event(
}
else
{
/** Reset error handler when routing of the new query begins */
router->handleError(NULL, NULL, NULL, dcb, ERRACT_RESET, NULL);
if (stmt_input)
@ -1525,12 +1510,17 @@ retblock:
* It is assumed readbuf includes at least one complete packet.
* Return 1 in success. If the last packet is incomplete return success but
* leave incomplete packet to readbuf.
*
* @param session Session pointer
* @param p_readbuf Pointer to the address of GWBUF including the query
*
* @return 1 if succeed,
*/
static int route_by_statement(
SESSION* session,
GWBUF** p_readbuf)
{
int rc = -1;
int rc;
GWBUF* packetbuf;
#if defined(SS_DEBUG)
GWBUF* tmpbuf;

View File

@ -1402,17 +1402,19 @@ static route_target_t get_route_target (
QUERY_IS_TYPE(qtype, QUERY_TYPE_GSYSVAR_READ))) /*< read global sys var */
{
/** First set expected targets before evaluating hints */
if (QUERY_IS_TYPE(qtype, QUERY_TYPE_READ) ||
if (!QUERY_IS_TYPE(qtype, QUERY_TYPE_MASTER_READ) &&
(QUERY_IS_TYPE(qtype, QUERY_TYPE_READ) ||
QUERY_IS_TYPE(qtype, QUERY_TYPE_SHOW_TABLES) || /*< 'SHOW TABLES' */
/** Configured to allow reading variables from slaves */
(use_sql_variables_in == TYPE_ALL &&
(QUERY_IS_TYPE(qtype, QUERY_TYPE_USERVAR_READ) ||
QUERY_IS_TYPE(qtype, QUERY_TYPE_SYSVAR_READ) ||
QUERY_IS_TYPE(qtype, QUERY_TYPE_GSYSVAR_READ))))
QUERY_IS_TYPE(qtype, QUERY_TYPE_GSYSVAR_READ)))))
{
target = TARGET_SLAVE;
}
else if (QUERY_IS_TYPE(qtype, QUERY_TYPE_EXEC_STMT) ||
else if (QUERY_IS_TYPE(qtype, QUERY_TYPE_MASTER_READ) ||
QUERY_IS_TYPE(qtype, QUERY_TYPE_EXEC_STMT) ||
/** Configured not to allow reading variables from slaves */
(use_sql_variables_in == TYPE_MASTER &&
(QUERY_IS_TYPE(qtype, QUERY_TYPE_USERVAR_READ) ||
@ -2198,17 +2200,32 @@ static bool route_single_stmt(
}
if (bref != NULL && BREF_IS_IN_USE(bref))
{
{
/** Create and add MySQL error to eventqueue */
modutil_reply_parse_error(
bref->bref_dcb,
strdup("Routing query to backend failed. "
"See the error log for further "
"details."),
0);
}
succp = true;
}
else
{
/**
* If there were no available backend references
* available return false - session will be closed
*/
LOGIF(LE, (skygw_log_write_flush(
LOGFILE_ERROR,
"Error : Sending error message to client "
"failed. Router doesn't have any "
"available backends. Session will be "
"closed.")));
succp = false;
}
if (query_str) free (query_str);
if (qtype_str) free(qtype_str);
succp = true;
goto retblock;
}
/**
@ -4411,7 +4428,8 @@ static void rwsplit_process_router_options(
* @param errmsgbuf The error message to reply
* @param backend_dcb The backend DCB
* @param action The action: REPLY, REPLY_AND_CLOSE, NEW_CONNECTION
* @param succp Result of action.
* @param succp Result of action. True if there is at least master
* and enough slaves to continue session. Otherwise false.
*
* Even if succp == true connecting to new slave may have failed. succp is to
* tell whether router has enough master/slave connections to continue work.

8
testall.cmake Normal file
View File

@ -0,0 +1,8 @@
execute_process(COMMAND /bin/sh -c "${CMAKE_BINARY_DIR}/bin/maxscale -c ${CMAKE_BINARY_DIR} &>/dev/null")
execute_process(COMMAND make test RESULT_VARIABLE RVAL)
execute_process(COMMAND killall maxscale)
if(NOT RVAL EQUAL 0)
message(FATAL_ERROR "Test suite failed with status: ${RVAL}")
else()
message(STATUS "Test exited with status: ${RVAL}")
endif()