Merge branch '2.2' into 2.2-mrm

This commit is contained in:
Johan Wikman 2017-09-28 15:19:20 +03:00
commit 89d1f81e37
70 changed files with 748 additions and 705 deletions

View File

@ -62,9 +62,6 @@ typedef struct
#define MODULECMD_ARG_DCB 8 /**< DCB */
#define MODULECMD_ARG_MONITOR 9 /**< Monitor */
#define MODULECMD_ARG_FILTER 10 /**< Filter */
#define MODULECMD_ARG_OUTPUT 11 /**< DCB suitable for writing results to.
This should always be the first argument
if the function requires an output DCB. */
/** What type of an action does the command perform? */
enum modulecmd_type
@ -196,7 +193,6 @@ const MODULECMD* modulecmd_find_command(const char *domain, const char *identifi
* | MODULECMD_ARG_STRING | String |
* | MODULECMD_ARG_BOOLEAN | Boolean value |
* | MODULECMD_ARG_DCB | Raw DCB pointer |
* | MODULECMD_ARG_OUTPUT | DCB for output |
*
* @param cmd Command for which the parameters are parsed
* @param argc Number of arguments
@ -223,15 +219,6 @@ void modulecmd_arg_free(MODULECMD_ARG *arg);
*/
bool modulecmd_arg_is_present(const MODULECMD_ARG *arg, int idx);
/**
* @brief Check if module command requires an output DCB
*
* @param cmd Command to check
*
* @return True if module requires a DCB for printing output
*/
bool modulecmd_requires_output_dcb(const MODULECMD* cmd);
/**
* @brief Call a registered command
*

View File

@ -44,14 +44,16 @@ exports.handler = function() {}
exports.builder = function(yargs) {
yargs
.command('server <server> <key> <value>', 'Alter server parameters', function(yargs) {
return yargs.epilog('To display the server parameters, execute `show server <server>`');
return yargs.epilog('To display the server parameters, execute `show server <server>`')
.usage('Usage: alter server <server> <key> <value>')
}, function(argv) {
maxctrl(argv, function(host) {
return updateValue(host, 'servers/' + argv.server, 'data.attributes.parameters.' + argv.key, argv.value)
})
})
.command('monitor <monitor> <key> <value>', 'Alter monitor parameters', function(yargs) {
return yargs.epilog('To display the monitor parameters, execute `show monitor <monitor>`');
return yargs.epilog('To display the monitor parameters, execute `show monitor <monitor>`')
.usage('Usage: alter monitor <monitor> <key> <value>')
}, function(argv) {
maxctrl(argv, function(host) {
return updateValue(host, 'monitors/' + argv.monitor, 'data.attributes.parameters.' + argv.key, argv.value)
@ -59,14 +61,16 @@ exports.builder = function(yargs) {
})
.command('service <service> <key> <value>', 'Alter service parameters', function(yargs) {
return yargs.epilog('To display the service parameters, execute `show service <service>`. ' +
'The following list of parameters can be altered at runtime:\n\n' + JSON.stringify(service_params, null, 4));
'The following list of parameters can be altered at runtime:\n\n' + JSON.stringify(service_params, null, 4))
.usage('Usage: alter service <service> <key> <value>')
}, function(argv) {
maxctrl(argv, function(host) {
return updateValue(host, 'services/' + argv.service, 'data.attributes.parameters.' + argv.key, argv.value)
})
})
.command('logging <key> <value>', 'Alter logging parameters', function(yargs) {
return yargs.epilog('To display the logging parameters, execute `show logging`');
return yargs.epilog('To display the logging parameters, execute `show logging`')
.usage('Usage: alter logging <key> <value>')
}, function(argv) {
maxctrl(argv, function(host) {
return updateValue(host, 'maxscale/logs', 'data.attributes.parameters.' + argv.key, argv.value)
@ -74,7 +78,8 @@ exports.builder = function(yargs) {
})
.command('maxscale <key> <value>', 'Alter MaxScale parameters', function(yargs) {
return yargs.epilog('To display the MaxScale parameters, execute `show maxscale`. ' +
'The following list of parameters can be altered at runtime:\n\n' + JSON.stringify(maxscale_params, null, 4));
'The following list of parameters can be altered at runtime:\n\n' + JSON.stringify(maxscale_params, null, 4))
.usage('Usage: alter maxscale <key> <value>')
}, function(argv) {
maxctrl(argv, function(host) {
return updateValue(host, 'maxscale', 'data.attributes.parameters.' + argv.key, argv.value)

View File

@ -18,7 +18,8 @@ exports.handler = function() {}
exports.builder = function(yargs) {
yargs
.command('command <module> <command> [params...]', 'Call a module command', function(yargs) {
return yargs.epilog('To inspect the list of module commands, execute `list commands`');
return yargs.epilog('To inspect the list of module commands, execute `list commands`')
.usage('Usage: call command <module> <command> [params...]')
}, function(argv) {
// First we have to find the correct method to use
maxctrl(argv, function(host) {

View File

@ -18,7 +18,8 @@ exports.handler = function() {}
exports.builder = function(yargs) {
yargs
.command('server <server> <state>', 'Clear server state', function(yargs) {
return yargs.epilog('This command clears a server state set by the `set server <server> <state>` command');
return yargs.epilog('This command clears a server state set by the `set server <server> <state>` command')
.usage('Usage: clear server <server> <state>')
}, function(argv) {
var target = 'servers/' + argv.server + '/clear?state=' + argv.state
maxctrl(argv, function(host) {

View File

@ -128,7 +128,8 @@ exports.builder = function(yargs) {
.command('diff <target>', 'Show difference between host servers and <target>.', function(yargs) {
return yargs.epilog('The list of host servers is controlled with the --hosts option. ' +
'The target server should not be in the host list. Value of <target> ' +
'must be in HOST:PORT format');
'must be in HOST:PORT format')
.usage('Usage: cluster diff <target>')
}, function(argv) {
maxctrl(argv, function(host) {
@ -182,7 +183,8 @@ exports.builder = function(yargs) {
'command on that instance. Synchronization can be attempted again if a previous ' +
'attempt failed due to a network failure or some other ephemeral error. Any other ' +
'errors require manual synchronization of the MaxScale configuration files and a ' +
'restart of the failed Maxscale.');
'restart of the failed Maxscale.')
.usage('Usage: cluster sync <target>')
}, function(argv) {
maxctrl(argv, function(host) {
return getDiffs(argv.target, host)

View File

@ -68,7 +68,8 @@ exports.builder = function(yargs) {
return yargs.epilog('The created server will not be used by any services or monitors ' +
'unless the --services or --monitors options are given. The list ' +
'of servers a service or a monitor uses can be altered with the ' +
'`link` and `unlink` commands.');
'`link` and `unlink` commands.')
.usage('Usage: create server <name> <host> <port>')
}, function(argv) {
var server = {
'data': {
@ -124,7 +125,8 @@ exports.builder = function(yargs) {
})
.command('monitor <name> <module>', 'Create a new monitor', function(yargs) {
return yargs.epilog('The list of servers given with the --servers option should not ' +
'contain any servers that are already monitored by another monitor.');
'contain any servers that are already monitored by another monitor.')
.usage('Usage: create monitor <name> <module>')
}, function(argv) {
var monitor = {
@ -162,7 +164,8 @@ exports.builder = function(yargs) {
default: '::'
})
.command('listener <service> <name> <port>', 'Create a new listener', function(yargs) {
return yargs.epilog('The new listener will be taken into use immediately.');
return yargs.epilog('The new listener will be taken into use immediately.')
.usage('Usage: create listener <service> <name> <port>')
}, function(argv) {
var listener = {
@ -201,7 +204,8 @@ exports.builder = function(yargs) {
return yargs.epilog('The created user can be used with the MaxScale REST API as ' +
'well as the MaxAdmin network interface. By default the created ' +
'user will have read-only privileges. To make the user an ' +
'administrative user, use the `--type=admin` option.');
'administrative user, use the `--type=admin` option.')
.usage('Usage: create user <name> <password>')
}, function(argv) {
var user = {

View File

@ -18,14 +18,16 @@ exports.handler = function() {}
exports.builder = function(yargs) {
yargs
.command('server <name>', 'Destroy an unused server', function(yargs) {
return yargs.epilog('The server must be unlinked from all services and monitor before it can be destroyed.');
return yargs.epilog('The server must be unlinked from all services and monitor before it can be destroyed.')
.usage('Usage: destroy server <name>')
}, function(argv) {
maxctrl(argv, function(host) {
return doRequest(host, 'servers/' + argv.name, null, {method: 'DELETE'})
})
})
.command('monitor <name>', 'Destroy an unused monitor', function(yargs) {
return yargs.epilog('The monitor must be unlinked from all servers before it can be destroyed.');
return yargs.epilog('The monitor must be unlinked from all servers before it can be destroyed.')
.usage('Usage: destroy monitor <name>')
}, function(argv) {
maxctrl(argv, function(host) {
return doRequest(host, 'monitors/' + argv.name, null, {method: 'DELETE'})
@ -36,7 +38,8 @@ exports.builder = function(yargs) {
'Destroying a listener at runtime stops it from accepting new ' +
'connections but it will still be bound to the listening socket. This ' +
'means that new listeners cannot be created to replace destroyed listeners ' +
'without restarting MaxScale.');
'without restarting MaxScale.')
.usage('Usage: destroy listener <service> <name>')
}, function(argv) {
maxctrl(argv, function(host) {
return doRequest(host, 'services/' + argv.service + '/listeners/' + argv.name, null, {method: 'DELETE'})
@ -45,7 +48,8 @@ exports.builder = function(yargs) {
.command('user <name>', 'Remove a network user', function(yargs) {
return yargs.epilog('The last remaining administrative user cannot be removed. ' +
'Create a replacement administrative user before attempting ' +
'to remove the last administrative user.');
'to remove the last administrative user.')
.usage('Usage: destroy user <name>')
}, function(argv) {
maxctrl(argv, function(host) {
return doRequest(host, 'users/inet/' + argv.name, null, {method: 'DELETE'})

View File

@ -25,7 +25,8 @@ exports.handler = function() {}
exports.builder = function(yargs) {
yargs
.command('log-priority <log>', 'Disable log priority [warning|notice|info|debug]', function(yargs) {
return yargs.epilog('The `debug` log priority is only available for debug builds of MaxScale.');
return yargs.epilog('The `debug` log priority is only available for debug builds of MaxScale.')
.usage('Usage: disable log-priority <log>')
}, function(argv) {
if (log_levels.indexOf(argv.log) != -1) {
maxctrl(argv, function(host) {
@ -38,7 +39,8 @@ exports.builder = function(yargs) {
}
})
.command('account <name>', 'Disable a Linux user account from administrative use', function(yargs) {
return yargs.epilog('The Linux user accounts are used by the MaxAdmin UNIX Domain Socket interface');
return yargs.epilog('The Linux user accounts are used by the MaxAdmin UNIX Domain Socket interface')
.usage('Usage: disable account <name>')
}, function(argv) {
maxctrl(argv, function(host) {
return doRequest(host, 'users/unix/' + argv.name, null, { method: 'DELETE'})

View File

@ -25,7 +25,8 @@ exports.handler = function() {}
exports.builder = function(yargs) {
yargs
.command('log-priority <log>', 'Enable log priority [warning|notice|info|debug]', function(yargs) {
return yargs.epilog('The `debug` log priority is only available for debug builds of MaxScale.');
return yargs.epilog('The `debug` log priority is only available for debug builds of MaxScale.')
.usage('Usage: enable log-priority <log>')
}, function(argv) {
if (log_levels.indexOf(argv.log) != -1) {
maxctrl(argv, function(host) {
@ -45,7 +46,8 @@ exports.builder = function(yargs) {
choices: ['admin', 'basic']
})
.command('account <name>', 'Activate a Linux user account for administrative use', function(yargs) {
return yargs.epilog('The Linux user accounts are used by the MaxAdmin UNIX Domain Socket interface');
return yargs.epilog('The Linux user accounts are used by the MaxAdmin UNIX Domain Socket interface')
.usage('Usage: enable account <name>')
}, function(argv) {
var req_body = {
data: {

View File

@ -41,14 +41,16 @@ exports.builder = function(yargs) {
'linked to a service, it should be linked to a monitor so that ' +
'the server state is up to date. Newly linked server are only ' +
'available to new connections, existing connections will use the ' +
'old list of servers.');
'old list of servers.')
.usage('Usage: link service <name> <server...>')
}, function(argv) {
addServer(argv, 'services/' + argv.name, argv.server)
})
.command('monitor <name> <server...>', 'Link servers to a monitor', function(yargs) {
return yargs.epilog('Linking a server to a monitor will add it to the list of servers ' +
'that are monitored by that monitor. A server can be monitored by ' +
'only one monitor at a time.');
'only one monitor at a time.')
.usage('Usage: link monitor <name> <server...>')
}, function(argv) {
addServer(argv, 'monitors/' + argv.name, argv.server)
})

View File

@ -19,7 +19,8 @@ exports.handler = function() {}
exports.builder = function(yargs) {
yargs
.command('servers', 'List servers', function(yargs) {
return yargs.epilog('List all servers in MaxScale.');
return yargs.epilog('List all servers in MaxScale.')
.usage('Usage: list servers')
}, function(argv) {
maxctrl(argv, function(host) {
return getCollection(host, 'servers', [
@ -32,7 +33,8 @@ exports.builder = function(yargs) {
})
})
.command('services', 'List services', function(yargs) {
return yargs.epilog('List all services and the servers they use.');
return yargs.epilog('List all services and the servers they use.')
.usage('Usage: list services')
}, function(argv) {
maxctrl(argv, function(host) {
return getCollection(host, 'services',[
@ -45,7 +47,8 @@ exports.builder = function(yargs) {
})
})
.command('listeners <service>', 'List listeners of a service', function(yargs) {
return yargs.epilog('List listeners for a service.');
return yargs.epilog('List listeners for a service.')
.usage('Usage: list listeners <service>')
}, function(argv) {
maxctrl(argv, function(host) {
return getSubCollection(host, 'services/' + argv.service, 'attributes.listeners', [
@ -56,7 +59,8 @@ exports.builder = function(yargs) {
})
})
.command('monitors', 'List monitors', function(yargs) {
return yargs.epilog('List all monitors in MaxScale.');
return yargs.epilog('List all monitors in MaxScale.')
.usage('Usage: list monitors')
}, function(argv) {
maxctrl(argv, function(host) {
return getCollection(host, 'monitors', [
@ -67,7 +71,8 @@ exports.builder = function(yargs) {
})
})
.command('sessions', 'List sessions', function(yargs) {
return yargs.epilog('List all client sessions.');
return yargs.epilog('List all client sessions.')
.usage('Usage: list sessions')
}, function(argv) {
maxctrl(argv, function(host) {
return getCollection(host, 'sessions',[
@ -79,7 +84,8 @@ exports.builder = function(yargs) {
})
})
.command('filters', 'List filters', function(yargs) {
return yargs.epilog('List all filters in MaxScale.');
return yargs.epilog('List all filters in MaxScale.')
.usage('Usage: list filters')
}, function(argv) {
maxctrl(argv, function(host) {
return getCollection(host, 'filters', [
@ -90,7 +96,8 @@ exports.builder = function(yargs) {
})
})
.command('modules', 'List loaded modules', function(yargs) {
return yargs.epilog('List all currently loaded modules.');
return yargs.epilog('List all currently loaded modules.')
.usage('Usage: list modules')
}, function(argv) {
maxctrl(argv, function(host) {
return getCollection(host, 'maxscale/modules',[
@ -101,7 +108,8 @@ exports.builder = function(yargs) {
})
})
.command('users', 'List created network users', function(yargs) {
return yargs.epilog('List the users that can be used to connect to the MaxScale REST API.');
return yargs.epilog('List the users that can be used to connect to the MaxScale REST API.')
.usage('Usage: list users')
}, function(argv) {
maxctrl(argv, function(host) {
return getCollection(host, 'users/inet',[
@ -110,7 +118,8 @@ exports.builder = function(yargs) {
})
})
.command('commands', 'List module commands', function(yargs) {
return yargs.epilog('List all available module commands.');
return yargs.epilog('List all available module commands.')
.usage('Usage: list commands')
}, function(argv) {
maxctrl(argv, function(host) {
return getCollection(host, 'maxscale/modules',[

View File

@ -18,7 +18,8 @@ exports.handler = function() {}
exports.builder = function(yargs) {
yargs
.command('logs', 'Rotate log files by closing and reopening the files', function(yargs) {
return yargs.epilog('This command is intended to be used with the `logrotate` command.');
return yargs.epilog('This command is intended to be used with the `logrotate` command.')
.usage('Usage: rotate logs')
}, function(argv) {
maxctrl(argv, function(host){
return doRequest(host, 'maxscale/logs/flush/', null, {method: 'POST'})

View File

@ -23,7 +23,8 @@ exports.builder = function(yargs) {
'Any other states will be overridden by the monitor on the next ' +
'monitoring interval. To manually control server states, use the ' +
'`stop monitor <name>` command to stop the monitor before setting ' +
'the server states manually.');
'the server states manually.')
.usage('Usage: set server <server> <state>')
}, function(argv) {
var target = 'servers/' + argv.server + '/set?state=' + argv.state
maxctrl(argv, function(host) {

View File

@ -22,7 +22,8 @@ exports.builder = function(yargs) {
return yargs.epilog('Show detailed information about a server. The `Parameters` ' +
'field contains the currently configured parameters for this ' +
'server. See `help alter server` for more details about altering ' +
'server parameters.');
'server parameters.')
.usage('Usage: show server <server>')
}, function(argv) {
maxctrl(argv, function(host) {
return getResource(host, 'servers/' + argv.server, [
@ -46,7 +47,8 @@ exports.builder = function(yargs) {
return yargs.epilog('Show detailed information about a service. The `Parameters` ' +
'field contains the currently configured parameters for this ' +
'service. See `help alter service` for more details about altering ' +
'service parameters.');
'service parameters.')
.usage('Usage: show service <service>')
}, function(argv) {
maxctrl(argv, function(host) {
return getResource(host, 'services/' + argv.service, [
@ -66,7 +68,8 @@ exports.builder = function(yargs) {
return yargs.epilog('Show detailed information about a monitor. The `Parameters` ' +
'field contains the currently configured parameters for this ' +
'monitor. See `help alter monitor` for more details about altering ' +
'monitor parameters.');
'monitor parameters.')
.usage('Usage: show monitor <monitor>')
}, function(argv) {
maxctrl(argv, function(host) {
return getResource(host, 'monitors/' + argv.monitor, [
@ -82,7 +85,8 @@ exports.builder = function(yargs) {
return yargs.epilog('Show detailed information about a single session. ' +
'The list of sessions can be retrieved with the ' +
'`list sessions` command. The <session> is the session ' +
'ID of a particular session.');
'ID of a particular session.')
.usage('Usage: show session <session>')
}, function(argv) {
maxctrl(argv, function(host) {
return getResource(host, 'sessions/' + argv.session, [
@ -97,7 +101,8 @@ exports.builder = function(yargs) {
})
})
.command('filter <filter>', 'Show filter', function(yargs) {
return yargs.epilog('The list of services that use this filter is show in the `Services` field.');
return yargs.epilog('The list of services that use this filter is show in the `Services` field.')
.usage('Usage: show filter <filter>')
}, function(argv) {
maxctrl(argv, function(host) {
return getResource(host, 'filters/' + argv.filter, [
@ -110,7 +115,8 @@ exports.builder = function(yargs) {
})
.command('module <module>', 'Show loaded module', function(yargs) {
return yargs.epilog('This command shows all available parameters as well as ' +
'detailed version information of a loaded module.');
'detailed version information of a loaded module.')
.usage('Usage: show module <module>')
}, function(argv) {
maxctrl(argv, function(host) {
return getResource(host, 'maxscale/modules/' + argv.module, [
@ -126,7 +132,8 @@ exports.builder = function(yargs) {
})
.command('maxscale', 'Show MaxScale information', function(yargs) {
return yargs.epilog('See `help alter maxscale` for more details about altering ' +
'MaxScale parameters.');
'MaxScale parameters.')
.usage('Usage: show maxscale')
}, function(argv) {
maxctrl(argv, function(host) {
return getResource(host, 'maxscale', [
@ -141,7 +148,8 @@ exports.builder = function(yargs) {
})
.command('logging', 'Show MaxScale logging information', function(yargs) {
return yargs.epilog('See `help alter logging` for more details about altering ' +
'logging parameters.');
'logging parameters.')
.usage('Usage: show logging')
}, function(argv) {
maxctrl(argv, function(host) {
return getResource(host, 'maxscale/logs', [
@ -153,7 +161,8 @@ exports.builder = function(yargs) {
})
.command('commands <module>', 'Show module commands of a module', function(yargs) {
return yargs.epilog('This command shows the parameters the command expects with ' +
'the parameter descriptions.');
'the parameter descriptions.')
.usage('Usage: show commands <module>')
}, function(argv) {
maxctrl(argv, function(host) {
return getSubCollection(host, 'maxscale/modules/' + argv.module, 'attributes.commands', [

View File

@ -18,14 +18,16 @@ exports.handler = function() {}
exports.builder = function(yargs) {
yargs
.command('service <name>', 'Start a service', function(yargs) {
return yargs.epilog('This starts a service stopped by `stop service <name>`');
return yargs.epilog('This starts a service stopped by `stop service <name>`')
.usage('Usage: start service <name>')
}, function(argv) {
maxctrl(argv, function(host) {
return doRequest(host, 'services/' + argv.name + '/start', null, {method: 'PUT'})
})
})
.command('monitor <name>', 'Start a monitor', function(yargs) {
return yargs.epilog('This starts a monitor stopped by `stop monitor <name>`');
return yargs.epilog('This starts a monitor stopped by `stop monitor <name>`')
.usage('Usage: start monitor <name>')
}, function(argv) {
maxctrl(argv, function(host) {
return doRequest(host, 'monitors/' + argv.name + '/start', null, {method: 'PUT'})
@ -33,7 +35,8 @@ exports.builder = function(yargs) {
})
.command('maxscale', 'Start MaxScale by starting all services', function(yargs) {
return yargs.epilog('This command will execute the `start service` command for ' +
'all services in MaxScale.');
'all services in MaxScale.')
.usage('Usage: start maxscale')
}, function(argv) {
maxctrl(argv, function(host) {
return doRequest(host, 'services/', function(res) {

View File

@ -20,7 +20,8 @@ exports.builder = function(yargs) {
.command('service <name>', 'Stop a service', function(yargs) {
return yargs.epilog('Stopping a service will prevent all the listeners for that service ' +
'from accepting new connections. Existing connections will still be ' +
'handled normally until they are closed.');
'handled normally until they are closed.')
.usage('Usage: stop service <name>')
}, function(argv) {
maxctrl(argv, function(host) {
return doRequest(host, 'services/' + argv.name + '/stop', null, {method: 'PUT'})
@ -29,7 +30,8 @@ exports.builder = function(yargs) {
.command('monitor <name>', 'Stop a monitor', function(yargs) {
return yargs.epilog('Stopping a monitor will pause the monitoring of the servers. ' +
'This can be used to manually control server states with the ' +
'`set server` command.');
'`set server` command.')
.usage('Usage: stop monitor <name>')
}, function(argv) {
maxctrl(argv, function(host) {
return doRequest(host, 'monitors/' + argv.name + '/stop', null, {method: 'PUT'})
@ -37,7 +39,8 @@ exports.builder = function(yargs) {
})
.command('maxscale', 'Stop MaxScale by stopping all services', function(yargs) {
return yargs.epilog('This command will execute the `stop service` command for ' +
'all services in MaxScale.');
'all services in MaxScale.')
.usage('Usage: stop maxscale')
}, function(argv) {
maxctrl(argv, function(host) {
return doRequest(host, 'services/', function(res) {

View File

@ -39,14 +39,16 @@ exports.builder = function(yargs) {
return yargs.epilog('This command unlinks servers from a service, removing them from ' +
'the list of available servers for that service. New connections to ' +
'the service will not use the unlinked servers but existing ' +
'connections can still use the servers.');
'connections can still use the servers.')
.usage('Usage: unlink service <name> <server...>')
}, function(argv) {
removeServer(argv, 'services/' + argv.name, argv.server)
})
.command('monitor <name> <server...>', 'Unlink servers from a monitor', function(yargs) {
return yargs.epilog('This command unlinks servers from a monitor, removing them from ' +
'the list of monitored servers. The servers will be left in their ' +
'current state when they are unlinked from a monitor.');
'current state when they are unlinked from a monitor.')
.usage('Usage: unlink monitor <name> <server...>')
}, function(argv) {
removeServer(argv, 'monitors/' + argv.name, argv.server)
})

View File

@ -82,7 +82,7 @@ int main(int argc, char *argv[])
Test->set_timeout(30);
Test->tprintf("Trying query to RWSplit, expecting failure, but not a crash\n");
test.try_query(Test->conn_rwsplit, "show processlist;");
Test->add_result(execute_query(Test->conn_rwsplit, "show processlist;") == 0, "Query should fail");
Test->tprintf("Setup firewall back to allow mysql\n");
Test->repl->unblock_node(0);
fflush(stdout);

View File

@ -1203,7 +1203,7 @@ bool config_get_compiled_regexes(const MXS_CONFIG_PARAMETER *params,
{
ss_dassert(out_codes[i]);
*out_codes[i] = config_get_compiled_regex(params, keys[i], options,
&ovec_size_temp);
&ovec_size_temp);
if (*out_codes[i])
{
if (ovec_size_temp > max_ovec_size)

View File

@ -833,8 +833,8 @@ bool runtime_create_listener(SERVICE *service, const char *name, const char *add
if (ssl_key && ssl_cert && ssl_ca &&
(ssl = create_ssl(name, ssl_key, ssl_cert, ssl_ca, ssl_version, ssl_depth)) == NULL)
{
MXS_ERROR("SSL initialization for listener '%s' failed.", name);
runtime_error("SSL initialization for listener '%s' failed.", name);
MXS_ERROR("SSL initialization for listener '%s' failed.", name);
runtime_error("SSL initialization for listener '%s' failed.", name);
}
else
{
@ -1214,10 +1214,10 @@ static std::string json_int_to_string(json_t* json)
static inline bool have_ssl_json(json_t* params)
{
return mxs_json_pointer(params, CN_SSL_KEY) ||
mxs_json_pointer(params, CN_SSL_CERT) ||
mxs_json_pointer(params, CN_SSL_CA_CERT) ||
mxs_json_pointer(params, CN_SSL_VERSION) ||
mxs_json_pointer(params, CN_SSL_CERT_VERIFY_DEPTH);
mxs_json_pointer(params, CN_SSL_CERT) ||
mxs_json_pointer(params, CN_SSL_CA_CERT) ||
mxs_json_pointer(params, CN_SSL_VERSION) ||
mxs_json_pointer(params, CN_SSL_CERT_VERIFY_DEPTH);
}
static bool validate_ssl_json(json_t* params)
@ -1230,9 +1230,12 @@ static bool validate_ssl_json(json_t* params)
is_string_or_null(params, CN_SSL_VERSION) &&
is_count_or_null(params, CN_SSL_CERT_VERIFY_DEPTH))
{
if (!mxs_json_pointer(params, CN_SSL_KEY) ||
!mxs_json_pointer(params, CN_SSL_CERT) ||
!mxs_json_pointer(params, CN_SSL_CA_CERT))
if ((mxs_json_pointer(params, CN_SSL_KEY) ||
mxs_json_pointer(params, CN_SSL_CERT) ||
mxs_json_pointer(params, CN_SSL_CA_CERT)) &&
(!mxs_json_pointer(params, CN_SSL_KEY) ||
!mxs_json_pointer(params, CN_SSL_CERT) ||
!mxs_json_pointer(params, CN_SSL_CA_CERT)))
{
runtime_error("SSL configuration requires '%s', '%s' and '%s' parameters",
CN_SSL_KEY, CN_SSL_CERT, CN_SSL_CA_CERT);

View File

@ -1158,7 +1158,7 @@ void dcb_final_close(DCB* dcb)
if (dcb->dcb_role == DCB_ROLE_BACKEND_HANDLER && // Backend DCB
dcb->state == DCB_STATE_POLLING && // Being polled
dcb->persistentstart == 0 && // Not already in (> 0) or being evicted from (-1)
// the persistent pool.
// the persistent pool.
dcb->server) // And has a server
{
/* May be a candidate for persistence, so save user name */

View File

@ -189,7 +189,7 @@ static void log_output(const char* cmd, const std::string& str)
MXS_ALERT("%s: %s", cmd, skip_prefix(str.c_str()));
}
else if (mxs_pcre2_simple_match("(?i)^[[:space:]]*error[[:space:]]*[:]",
str.c_str(), 0, &err) == MXS_PCRE2_MATCH)
str.c_str(), 0, &err) == MXS_PCRE2_MATCH)
{
MXS_ERROR("%s: %s", cmd, skip_prefix(str.c_str()));
}

View File

@ -210,7 +210,7 @@ const DEBUG_ARGUMENT debug_arguments[] =
},
{
"redirect-output-to-file", redirect_output_to_file,
"redirect stdout and stderr to the file given as an argument"
"redirect stdout and stderr to the file given as an argument"
},
{NULL, NULL, NULL}
};
@ -2734,7 +2734,7 @@ static int set_user(const char* user)
}
if (prctl(PR_GET_DUMPABLE) == 0)
{
if (prctl(PR_SET_DUMPABLE , 1) == -1)
if (prctl(PR_SET_DUMPABLE, 1) == -1)
{
printf("Error: Failed to set dumpable flag on for the process '%s': %d %s\n",
pwname->pw_name, errno, mxs_strerror(errno));

View File

@ -146,9 +146,9 @@ struct ValueFormatter
}
static int value_combine_cb(void *cls,
enum MHD_ValueKind kind,
const char *key,
const char *value)
enum MHD_ValueKind kind,
const char *key,
const char *value)
{
ValueFormatter& cnf = *(ValueFormatter*)cls;

View File

@ -360,12 +360,6 @@ struct cb_param
bool modulecmd_cb(const MODULECMD *cmd, void *data)
{
if (modulecmd_requires_output_dcb(cmd))
{
/** Module requires an output DCB, don't print it */
return true;
}
cb_param* d = static_cast<cb_param*>(data);
json_t* obj = json_object();

View File

@ -548,7 +548,7 @@ private:
// Significantly faster than library version.
static char toupper(char c)
{
return (c >= 'a' && c <='z') ? c - ('a' - 'A') : c;
return (c >= 'a' && c <= 'z') ? c - ('a' - 'A') : c;
}
token_t expect_token(const char* zWord, int len, token_t token)

View File

@ -61,8 +61,8 @@ struct WORKER_STATISTICS
};
class Worker : public MXS_WORKER
, private MessageQueue::Handler
, private MXS_POLL_DATA
, private MessageQueue::Handler
, private MXS_POLL_DATA
{
Worker(const Worker&);
Worker& operator = (const Worker&);

View File

@ -358,12 +358,6 @@ static bool process_argument(const MODULECMD *cmd, modulecmd_arg_type_t *type, c
}
break;
case MODULECMD_ARG_OUTPUT:
arg->type.type = MODULECMD_ARG_OUTPUT;
arg->value.dcb = (DCB*)value;
rval = true;
break;
default:
ss_dassert(false);
MXS_ERROR("Undefined argument type: %0lx", type->type);
@ -713,10 +707,6 @@ const char* modulecmd_argtype_to_str(modulecmd_arg_type_t *type)
rval = format_type(type, "FILTER");
break;
case MODULECMD_ARG_OUTPUT:
rval = format_type(type, "OUTPUT");
break;
default:
ss_dassert(false);
MXS_ERROR("Unknown type");
@ -731,17 +721,3 @@ bool modulecmd_arg_is_present(const MODULECMD_ARG *arg, int idx)
return arg->argc > idx &&
MODULECMD_GET_TYPE(&arg->argv[idx].type) != MODULECMD_ARG_NONE;
}
bool modulecmd_requires_output_dcb(const MODULECMD* cmd)
{
for (int i = 0; i < cmd->arg_count_max; i++)
{
if (cmd->arg_types[i].type == MODULECMD_ARG_OUTPUT)
{
/** We can't call this as it requries a DCB for output so don't show it */
return true;
}
}
return false;
}

View File

@ -1334,66 +1334,66 @@ const char* STRPACKETTYPE(int p)
{
switch (p)
{
case MXS_COM_SLEEP:
return "COM_SLEEP";
case MXS_COM_QUIT:
return "COM_QUIT";
case MXS_COM_INIT_DB:
return "COM_INIT_DB";
case MXS_COM_QUERY:
return "COM_QUERY";
case MXS_COM_FIELD_LIST:
return "COM_FIELD_LIST";
case MXS_COM_CREATE_DB:
return "COM_CREATE_DB";
case MXS_COM_DROP_DB:
return "COM_DROP_DB";
case MXS_COM_REFRESH:
return "COM_REFRESH";
case MXS_COM_SHUTDOWN:
return "COM_SHUTDOWN";
case MXS_COM_STATISTICS:
return "COM_STATISTICS";
case MXS_COM_PROCESS_INFO:
return "COM_PROCESS_INFO";
case MXS_COM_CONNECT:
return "COM_CONNECT";
case MXS_COM_PROCESS_KILL:
return "COM_PROCESS_KILL";
case MXS_COM_DEBUG:
return "COM_DEBUG";
case MXS_COM_PING:
return "COM_PING";
case MXS_COM_TIME:
return "COM_TIME";
case MXS_COM_DELAYED_INSERT:
return "COM_DELAYED_INSERT";
case MXS_COM_CHANGE_USER:
return "COM_CHANGE_USER";
case MXS_COM_BINLOG_DUMP:
return "COM_BINLOG_DUMP";
case MXS_COM_TABLE_DUMP:
return "COM_TABLE_DUMP";
case MXS_COM_CONNECT_OUT:
return "COM_CONNECT_OUT";
case MXS_COM_REGISTER_SLAVE:
return "COM_REGISTER_SLAVE";
case MXS_COM_STMT_PREPARE:
return "COM_STMT_PREPARE";
case MXS_COM_STMT_EXECUTE:
return "COM_STMT_EXECUTE";
case MXS_COM_STMT_SEND_LONG_DATA:
return "COM_STMT_SEND_LONG_DATA";
case MXS_COM_STMT_CLOSE:
return "COM_STMT_CLOSE";
case MXS_COM_STMT_RESET:
return "COM_STMT_RESET";
case MXS_COM_SET_OPTION:
return "COM_SET_OPTION";
case MXS_COM_STMT_FETCH:
return "COM_STMT_FETCH";
case MXS_COM_DAEMON:
return "COM_DAEMON";
case MXS_COM_SLEEP:
return "COM_SLEEP";
case MXS_COM_QUIT:
return "COM_QUIT";
case MXS_COM_INIT_DB:
return "COM_INIT_DB";
case MXS_COM_QUERY:
return "COM_QUERY";
case MXS_COM_FIELD_LIST:
return "COM_FIELD_LIST";
case MXS_COM_CREATE_DB:
return "COM_CREATE_DB";
case MXS_COM_DROP_DB:
return "COM_DROP_DB";
case MXS_COM_REFRESH:
return "COM_REFRESH";
case MXS_COM_SHUTDOWN:
return "COM_SHUTDOWN";
case MXS_COM_STATISTICS:
return "COM_STATISTICS";
case MXS_COM_PROCESS_INFO:
return "COM_PROCESS_INFO";
case MXS_COM_CONNECT:
return "COM_CONNECT";
case MXS_COM_PROCESS_KILL:
return "COM_PROCESS_KILL";
case MXS_COM_DEBUG:
return "COM_DEBUG";
case MXS_COM_PING:
return "COM_PING";
case MXS_COM_TIME:
return "COM_TIME";
case MXS_COM_DELAYED_INSERT:
return "COM_DELAYED_INSERT";
case MXS_COM_CHANGE_USER:
return "COM_CHANGE_USER";
case MXS_COM_BINLOG_DUMP:
return "COM_BINLOG_DUMP";
case MXS_COM_TABLE_DUMP:
return "COM_TABLE_DUMP";
case MXS_COM_CONNECT_OUT:
return "COM_CONNECT_OUT";
case MXS_COM_REGISTER_SLAVE:
return "COM_REGISTER_SLAVE";
case MXS_COM_STMT_PREPARE:
return "COM_STMT_PREPARE";
case MXS_COM_STMT_EXECUTE:
return "COM_STMT_EXECUTE";
case MXS_COM_STMT_SEND_LONG_DATA:
return "COM_STMT_SEND_LONG_DATA";
case MXS_COM_STMT_CLOSE:
return "COM_STMT_CLOSE";
case MXS_COM_STMT_RESET:
return "COM_STMT_RESET";
case MXS_COM_SET_OPTION:
return "COM_SET_OPTION";
case MXS_COM_STMT_FETCH:
return "COM_STMT_FETCH";
case MXS_COM_DAEMON:
return "COM_DAEMON";
}
snprintf(unknow_type, sizeof(unknow_type), format_str, p);

View File

@ -803,7 +803,7 @@ qc_sql_mode_t qc_get_sql_mode()
qc_sql_mode_t sql_mode;
ss_debug(int32_t rv =) classifier->qc_get_sql_mode(&sql_mode);
ss_debug(int32_t rv = ) classifier->qc_get_sql_mode(&sql_mode);
ss_dassert(rv == QC_RESULT_OK);
return sql_mode;
@ -814,6 +814,6 @@ void qc_set_sql_mode(qc_sql_mode_t sql_mode)
QC_TRACE();
ss_dassert(classifier);
ss_debug(int32_t rv =) classifier->qc_set_sql_mode(sql_mode);
ss_debug(int32_t rv = ) classifier->qc_set_sql_mode(sql_mode);
ss_dassert(rv == QC_RESULT_OK);
}

View File

@ -638,7 +638,7 @@ HttpResponse cb_modulecmd(const HttpRequest& request)
const MODULECMD* cmd = modulecmd_find_command(module.c_str(), identifier.c_str());
if (cmd && !modulecmd_requires_output_dcb(cmd))
if (cmd)
{
if ((!MODULECMD_MODIFIES_DATA(cmd) && verb == MHD_HTTP_METHOD_GET) ||
(MODULECMD_MODIFIES_DATA(cmd) && verb == MHD_HTTP_METHOD_POST))

View File

@ -26,7 +26,7 @@ void Semaphore::get_current_timespec(time_t seconds,
timespec& ts = *pTs;
ss_debug(int rc=) clock_gettime(CLOCK_REALTIME, &ts);
ss_debug(int rc = ) clock_gettime(CLOCK_REALTIME, &ts);
ss_dassert(rc == 0);
ts.tv_sec += seconds;

View File

@ -566,7 +566,8 @@ dprintServer(DCB *dcb, const SERVER *server)
dcb_printf(dcb, "\tPersistent pool size limit: %ld\n", server->persistpoolmax);
dcb_printf(dcb, "\tPersistent max time (secs): %ld\n", server->persistmaxtime);
dcb_printf(dcb, "\tConnections taken from pool: %lu\n", server->stats.n_from_pool);
double d = (double)server->stats.n_from_pool / (double)(server->stats.n_connections + server->stats.n_from_pool + 1);
double d = (double)server->stats.n_from_pool / (double)(server->stats.n_connections +
server->stats.n_from_pool + 1);
dcb_printf(dcb, "\tPool availability: %0.2lf%%\n", d * 100.0);
}
if (server->server_ssl)

View File

@ -393,7 +393,7 @@ int serviceStartAllPorts(SERVICE* service)
/** Service failed to start any ports. Try again later. */
service->stats.n_failed_starts++;
char taskname[strlen(service->name) + strlen("_start_retry_") +
(int) ceil(log10(INT_MAX)) + 1];
(int) ceil(log10(INT_MAX)) + 1];
int retry_after = MXS_MIN(service->stats.n_failed_starts * 10, service->max_retry_interval);
snprintf(taskname, sizeof(taskname), "%s_start_retry_%d",
service->name, service->stats.n_failed_starts);
@ -788,9 +788,9 @@ bool serviceHasListener(SERVICE* service, const char* name, const char* protocol
// Listener with same name exists
(strcmp(listener->name, name) == 0 ||
// Listener listening on the same interface and port exists
((strcmp(listener->protocol, protocol) == 0 && listener->port == port &&
((address && listener->address && strcmp(listener->address, address) == 0) ||
(address == NULL && listener->address == NULL))))))
((strcmp(listener->protocol, protocol) == 0 && listener->port == port &&
((address && listener->address && strcmp(listener->address, address) == 0) ||
(address == NULL && listener->address == NULL))))))
{
return true;
}

View File

@ -473,7 +473,8 @@ test1()
ss_info_dassert(hint == buffer->hint, "Buffer should point to first and only hint");
ss_dfprintf(stderr, "\t..done\nSet a property for the buffer");
gwbuf_add_property(buffer, (char*)"name", (char*)"value");
ss_info_dassert(0 == strcmp("value", gwbuf_get_property(buffer, (char*)"name")), "Should now have correct property");
ss_info_dassert(0 == strcmp("value", gwbuf_get_property(buffer, (char*)"name")),
"Should now have correct property");
strcpy((char*)GWBUF_DATA(buffer), "The quick brown fox jumps over the lazy dog");
ss_dfprintf(stderr, "\t..done\nLoad some data into the buffer");
ss_info_dassert('q' == GWBUF_DATA_CHAR(buffer, 4), "Fourth character of buffer must be 'q'");

View File

@ -62,7 +62,8 @@ test1()
ss_dfprintf(stderr, "\t..done\nExtract SQL from buffer different way?");
ss_info_dassert(0 == modutil_MySQL_Query(buffer, &sql, &length, &residual), "Default buffer should fail");
ss_dfprintf(stderr, "\t..done\nReplace SQL in buffer");
ss_info_dassert(0 == modutil_replace_SQL(buffer, (char*)"select * from some_table;"), "Default buffer should fail");
ss_info_dassert(0 == modutil_replace_SQL(buffer, (char*)"select * from some_table;"),
"Default buffer should fail");
ss_dfprintf(stderr, "\t..done\nTidy up.");
gwbuf_free(buffer);
ss_dfprintf(stderr, "\t..done\n");

View File

@ -89,21 +89,21 @@ struct test_case
{ "START TRANSACTION WITH CONSISTENT SNAPSHOT, READ ONLY", QUERY_TYPE_BEGIN_TRX | QUERY_TYPE_READ },
{ "SET AUTOCOMMIT=true", QUERY_TYPE_COMMIT|QUERY_TYPE_ENABLE_AUTOCOMMIT },
{ "SET AUTOCOMMIT=true", QUERY_TYPE_COMMIT | QUERY_TYPE_ENABLE_AUTOCOMMIT },
{ "SET AUTOCOMMIT=1", QUERY_TYPE_COMMIT|QUERY_TYPE_ENABLE_AUTOCOMMIT },
{ "SET AUTOCOMMIT=1", QUERY_TYPE_COMMIT | QUERY_TYPE_ENABLE_AUTOCOMMIT },
{ "SET AUTOCOMMIT=false", QUERY_TYPE_BEGIN_TRX|QUERY_TYPE_DISABLE_AUTOCOMMIT },
{ "SET AUTOCOMMIT=false", QUERY_TYPE_BEGIN_TRX | QUERY_TYPE_DISABLE_AUTOCOMMIT },
{ "SET AUTOCOMMIT=0", QUERY_TYPE_BEGIN_TRX|QUERY_TYPE_DISABLE_AUTOCOMMIT },
{ "SET @@AUTOCOMMIT=0", QUERY_TYPE_BEGIN_TRX|QUERY_TYPE_DISABLE_AUTOCOMMIT },
{ "SET GLOBAL AUTOCOMMIT=0", QUERY_TYPE_BEGIN_TRX|QUERY_TYPE_DISABLE_AUTOCOMMIT },
{ "SET SESSION AUTOCOMMIT=0", QUERY_TYPE_BEGIN_TRX|QUERY_TYPE_DISABLE_AUTOCOMMIT },
{ "SET @@SESSION . AUTOCOMMIT=0", QUERY_TYPE_BEGIN_TRX|QUERY_TYPE_DISABLE_AUTOCOMMIT },
{ "SET @@GLOBAL . AUTOCOMMIT=0", QUERY_TYPE_BEGIN_TRX|QUERY_TYPE_DISABLE_AUTOCOMMIT },
{ "SET AUTOCOMMIT=0", QUERY_TYPE_BEGIN_TRX | QUERY_TYPE_DISABLE_AUTOCOMMIT },
{ "SET @@AUTOCOMMIT=0", QUERY_TYPE_BEGIN_TRX | QUERY_TYPE_DISABLE_AUTOCOMMIT },
{ "SET GLOBAL AUTOCOMMIT=0", QUERY_TYPE_BEGIN_TRX | QUERY_TYPE_DISABLE_AUTOCOMMIT },
{ "SET SESSION AUTOCOMMIT=0", QUERY_TYPE_BEGIN_TRX | QUERY_TYPE_DISABLE_AUTOCOMMIT },
{ "SET @@SESSION . AUTOCOMMIT=0", QUERY_TYPE_BEGIN_TRX | QUERY_TYPE_DISABLE_AUTOCOMMIT },
{ "SET @@GLOBAL . AUTOCOMMIT=0", QUERY_TYPE_BEGIN_TRX | QUERY_TYPE_DISABLE_AUTOCOMMIT },
};
const size_t N_TEST_CASES = sizeof(test_cases)/sizeof(test_cases[0]);
const size_t N_TEST_CASES = sizeof(test_cases) / sizeof(test_cases[0]);
bool test(uint32_t (*getter)(GWBUF*), const char* zStmt, uint32_t expected_type_mask)

View File

@ -989,7 +989,8 @@ static void set_port(struct sockaddr_storage *addr, uint16_t port)
}
}
int open_network_socket(enum mxs_socket_type type, struct sockaddr_storage *addr, const char *host, uint16_t port)
int open_network_socket(enum mxs_socket_type type, struct sockaddr_storage *addr, const char *host,
uint16_t port)
{
ss_dassert(type == MXS_SOCKET_NETWORK || type == MXS_SOCKET_LISTENER);
struct addrinfo *ai = NULL, hint = {};

View File

@ -330,7 +330,7 @@ static void merge_netmask(char *host)
*/
*delimiter_loc = '/';
MXS_ERROR("Unrecognized IP-bytes in host/mask-combination. "
"Merge incomplete: %s", host);
"Merge incomplete: %s", host);
return;
}
@ -346,7 +346,7 @@ static void merge_netmask(char *host)
{
*delimiter_loc = '/';
MXS_ERROR("Unequal number of IP-bytes in host/mask-combination. "
"Merge incomplete: %s", host);
"Merge incomplete: %s", host);
}
}
@ -463,21 +463,21 @@ static int gw_mysql_set_timeouts(MYSQL* handle)
MXS_CONFIG* cnf = config_get_global_options();
if ((rc = mysql_optionsv(handle, MYSQL_OPT_READ_TIMEOUT,
(void *) &cnf->auth_read_timeout)))
(void *) &cnf->auth_read_timeout)))
{
MXS_ERROR("Failed to set read timeout for backend connection.");
goto retblock;
}
if ((rc = mysql_optionsv(handle, MYSQL_OPT_CONNECT_TIMEOUT,
(void *) &cnf->auth_conn_timeout)))
(void *) &cnf->auth_conn_timeout)))
{
MXS_ERROR("Failed to set connect timeout for backend connection.");
goto retblock;
}
if ((rc = mysql_optionsv(handle, MYSQL_OPT_WRITE_TIMEOUT,
(void *) &cnf->auth_write_timeout)))
(void *) &cnf->auth_write_timeout)))
{
MXS_ERROR("Failed to set write timeout for backend connection.");
goto retblock;
@ -537,7 +537,7 @@ static bool check_server_permissions(SERVICE *service, SERVER* server,
const char *template = "SELECT user, host, %s, Select_priv FROM mysql.user limit 1";
const char* query_pw = strstr(server->version_string, "5.7.") ?
MYSQL57_PASSWORD : MYSQL_PASSWORD;
MYSQL57_PASSWORD : MYSQL_PASSWORD;
char query[strlen(template) + strlen(query_pw) + 1];
bool rval = true;
sprintf(query, template, query_pw);

View File

@ -250,7 +250,7 @@ void PamClientSession::get_pam_user_services(const DCB* dcb, const MYSQL_session
if (i == 0 || service_refresh_users(dcb->service) == 0)
{
if (sqlite3_exec(m_dbhandle, services_query.c_str(), user_services_cb,
services_out, &err) != SQLITE_OK)
services_out, &err) != SQLITE_OK)
{
MXS_ERROR("Failed to execute query: '%s'", err);
sqlite3_free(err);

View File

@ -16,14 +16,14 @@
namespace
{
/**
* Check that the AuthSwitchRequest packet is as expected. Inverse of
* create_auth_change_packet() in pam_auth.cc.
*
* @param dcb Backend DCB
* @param buffer Buffer containing an AuthSwitchRequest packet
* @return True on success, false on error
*/
/**
* Check that the AuthSwitchRequest packet is as expected. Inverse of
* create_auth_change_packet() in pam_auth.cc.
*
* @param dcb Backend DCB
* @param buffer Buffer containing an AuthSwitchRequest packet
* @return True on success, false on error
*/
bool check_auth_switch_request(DCB *dcb, GWBUF *buffer)
{
/**

View File

@ -90,18 +90,14 @@ void cache_config_reset(CACHE_CONFIG& config)
*/
bool cache_command_show(const MODULECMD_ARG* pArgs, json_t** output)
{
ss_dassert(pArgs->argc == 2);
ss_dassert(MODULECMD_GET_TYPE(&pArgs->argv[0].type) == MODULECMD_ARG_OUTPUT);
ss_dassert(pArgs->argc == 1);
ss_dassert(MODULECMD_GET_TYPE(&pArgs->argv[1].type) == MODULECMD_ARG_FILTER);
DCB* pDcb = pArgs->argv[0].value.dcb;
ss_dassert(pDcb);
const MXS_FILTER_DEF* pFilterDef = pArgs->argv[1].value.filter;
ss_dassert(pFilterDef);
CacheFilter* pFilter = reinterpret_cast<CacheFilter*>(filter_def_get_instance(pFilterDef));
MXS_EXCEPTION_GUARD(pFilter->cache().show(pDcb));
MXS_EXCEPTION_GUARD(*output = pFilter->cache().show_json());
return true;
}
@ -146,7 +142,6 @@ extern "C" MXS_MODULE* MXS_CREATE_MODULE()
{
static modulecmd_arg_type_t show_argv[] =
{
{ MODULECMD_ARG_OUTPUT, "The output dcb" },
{ MODULECMD_ARG_FILTER | MODULECMD_ARG_NAME_MATCHES_DOMAIN, "Cache name" }
};

View File

@ -79,8 +79,8 @@ const char* NON_CACHEABLE_VARIABLES[] =
"localtimestamp",
};
const size_t N_NON_CACHEABLE_FUNCTIONS = sizeof(NON_CACHEABLE_FUNCTIONS)/sizeof(NON_CACHEABLE_FUNCTIONS[0]);
const size_t N_NON_CACHEABLE_VARIABLES = sizeof(NON_CACHEABLE_VARIABLES)/sizeof(NON_CACHEABLE_VARIABLES[0]);
const size_t N_NON_CACHEABLE_FUNCTIONS = sizeof(NON_CACHEABLE_FUNCTIONS) / sizeof(NON_CACHEABLE_FUNCTIONS[0]);
const size_t N_NON_CACHEABLE_VARIABLES = sizeof(NON_CACHEABLE_VARIABLES) / sizeof(NON_CACHEABLE_VARIABLES[0]);
int compare_name(const void* pLeft, const void* pRight)
{
@ -146,7 +146,7 @@ bool is_select_statement(GWBUF* pStmt)
char* pSql;
int len;
ss_debug(int rc =) modutil_extract_SQL(pStmt, &pSql, &len);
ss_debug(int rc = ) modutil_extract_SQL(pStmt, &pSql, &len);
ss_dassert(rc == 1);
char* pSql_end = pSql + len;

View File

@ -66,14 +66,14 @@ struct user_test_case
const struct user_test_case user_test_cases[] =
{
USER_TEST_CASE(=, bob, CACHE_OP_LIKE, bob@.*),
USER_TEST_CASE(=, 'bob', CACHE_OP_LIKE, bob@.*),
USER_TEST_CASE(=, bob@%, CACHE_OP_LIKE, bob@.*),
USER_TEST_CASE(=, 'bob'@'%.52', CACHE_OP_LIKE, bob@.*\\.52),
USER_TEST_CASE(=, bob@127.0.0.1, CACHE_OP_EQ, bob@127.0.0.1),
USER_TEST_CASE(=, b*b@127.0.0.1, CACHE_OP_EQ, b*b@127.0.0.1),
USER_TEST_CASE(=, b*b@%.0.0.1, CACHE_OP_LIKE, b\\*b@.*\\.0\\.0\\.1),
USER_TEST_CASE(=, b*b@%.0.%.1, CACHE_OP_LIKE, b\\*b@.*\\.0\\..*\\.1),
USER_TEST_CASE( =, bob, CACHE_OP_LIKE, bob@.*),
USER_TEST_CASE( =, 'bob', CACHE_OP_LIKE, bob@.*),
USER_TEST_CASE( =, bob@ %, CACHE_OP_LIKE, bob@.*),
USER_TEST_CASE( =, 'bob'@'%.52', CACHE_OP_LIKE, bob@.*\\.52),
USER_TEST_CASE( =, bob@127.0.0.1, CACHE_OP_EQ, bob@127.0.0.1),
USER_TEST_CASE( =, b*b@127.0.0.1, CACHE_OP_EQ, b*b@127.0.0.1),
USER_TEST_CASE( =, b*b@ % .0.0.1, CACHE_OP_LIKE, b\\*b@.*\\.0\\.0\\.1),
USER_TEST_CASE( =, b*b@ % .0. % .1, CACHE_OP_LIKE, b\\*b@.*\\.0\\..*\\.1),
};
const size_t n_user_test_cases = sizeof(user_test_cases) / sizeof(user_test_cases[0]);

View File

@ -209,9 +209,9 @@ createInstance(const char *name, char **options, MXS_CONFIG_PARAMETER *params)
const char* keys[] = {PARAM_MATCH, PARAM_IGNORE};
pcre2_code** code_arr[] = {&my_instance->re, &my_instance->nore};
if (!config_get_compiled_regexes(params, keys, sizeof(keys)/sizeof(char*),
cflags, &my_instance->ovector_size,
code_arr))
if (!config_get_compiled_regexes(params, keys, sizeof(keys) / sizeof(char*),
cflags, &my_instance->ovector_size,
code_arr))
{
MXS_FREE(my_instance->match);
MXS_FREE(my_instance->nomatch);

View File

@ -78,6 +78,7 @@
#include <maxscale/pcre2.h>
#include <maxscale/alloc.h>
#include <maxscale/spinlock.hh>
#include <maxscale/utils.h>
#include "rules.hh"
#include "user.hh"
@ -99,7 +100,7 @@ namespace
struct DbfwThread
{
DbfwThread():
rule_version(0)
rule_version(0)
{
}
@ -499,27 +500,17 @@ MXS_MODULE* MXS_CREATE_MODULE()
};
modulecmd_register_command(MXS_MODULE_NAME, "rules/reload", MODULECMD_TYPE_ACTIVE,
dbfw_reload_rules, 2, args_rules_reload,
"Reload dbfwfilter rules");
modulecmd_arg_type_t args_rules_show[] =
{
{MODULECMD_ARG_OUTPUT, "DCB where result is written"},
{MODULECMD_ARG_FILTER | MODULECMD_ARG_NAME_MATCHES_DOMAIN, "Filter to inspect"}
};
modulecmd_register_command(MXS_MODULE_NAME, "rules", MODULECMD_TYPE_PASSIVE,
dbfw_show_rules, 2, args_rules_show,
"(deprecated) Show dbfwfilter rule statistics");
dbfw_reload_rules, MXS_ARRAY_NELEMS(args_rules_reload),
args_rules_reload, "Reload dbfwfilter rules");
modulecmd_arg_type_t args_rules_show_json[] =
{
{MODULECMD_ARG_FILTER | MODULECMD_ARG_NAME_MATCHES_DOMAIN, "Filter to inspect"}
};
modulecmd_register_command(MXS_MODULE_NAME, "rules/json", MODULECMD_TYPE_PASSIVE,
dbfw_show_rules_json, 1, args_rules_show_json,
"Show dbfwfilter rule statistics as JSON");
modulecmd_register_command(MXS_MODULE_NAME, "rules", MODULECMD_TYPE_PASSIVE,
dbfw_show_rules_json, MXS_ARRAY_NELEMS(args_rules_show_json),
args_rules_show_json, "Show dbfwfilter rule statistics");
static MXS_MODULE info =
{
@ -689,7 +680,7 @@ void dbfw_yyerror(void* scanner, const char* error)
*/
static SRule find_rule_by_name(const RuleList& rules, std::string name)
{
for (RuleList::const_iterator it = rules.begin(); it != rules.end(); it++)
for (RuleList::const_iterator it = rules.begin(); it != rules.end(); it++)
{
if ((*it)->name() == name)
{

View File

@ -14,6 +14,7 @@
#define MXS_MODULE_NAME "masking"
#include "maskingfilter.hh"
#include <maxscale/json_api.h>
#include <maxscale/modulecmd.h>
#include <maxscale/paths.h>
#include <maxscale/utils.h>
@ -37,20 +38,23 @@ char VERSION_STRING[] = "V1.0.0";
*/
bool masking_command_reload(const MODULECMD_ARG* pArgs, json_t** output)
{
ss_dassert(pArgs->argc == 2);
ss_dassert(MODULECMD_GET_TYPE(&pArgs->argv[0].type) == MODULECMD_ARG_OUTPUT);
ss_dassert(pArgs->argc == 1);
ss_dassert(MODULECMD_GET_TYPE(&pArgs->argv[1].type) == MODULECMD_ARG_FILTER);
DCB* pDcb = pArgs->argv[0].value.dcb;
ss_dassert(pDcb);
const MXS_FILTER_DEF* pFilterDef = pArgs->argv[1].value.filter;
ss_dassert(pFilterDef);
MaskingFilter* pFilter = reinterpret_cast<MaskingFilter*>(filter_def_get_instance(pFilterDef));
MXS_EXCEPTION_GUARD(pFilter->reload(pDcb));
bool rv = false;
MXS_EXCEPTION_GUARD(rv = pFilter->reload());
return true;
if (!rv)
{
modulecmd_set_error("Could not reload the rules. Check the log file "
"for more detailed information.");
}
return rv;
}
}
@ -63,7 +67,6 @@ extern "C" MXS_MODULE* MXS_CREATE_MODULE()
{
static modulecmd_arg_type_t reload_argv[] =
{
{ MODULECMD_ARG_OUTPUT, "The output dcb" },
{ MODULECMD_ARG_FILTER | MODULECMD_ARG_NAME_MATCHES_DOMAIN, "Masking name" }
};
@ -91,12 +94,16 @@ extern "C" MXS_MODULE* MXS_CREATE_MODULE()
NULL, /* Thread finish. */
{
{ Config::rules_name, MXS_MODULE_PARAM_STRING, NULL, MXS_MODULE_OPT_REQUIRED },
{ Config::warn_type_mismatch_name,
MXS_MODULE_PARAM_ENUM, Config::warn_type_mismatch_default,
MXS_MODULE_OPT_NONE, Config::warn_type_mismatch_values },
{ Config::large_payload_name,
MXS_MODULE_PARAM_ENUM, Config::large_payload_default,
MXS_MODULE_OPT_NONE, Config::large_payload_values },
{
Config::warn_type_mismatch_name,
MXS_MODULE_PARAM_ENUM, Config::warn_type_mismatch_default,
MXS_MODULE_OPT_NONE, Config::warn_type_mismatch_values
},
{
Config::large_payload_name,
MXS_MODULE_PARAM_ENUM, Config::large_payload_default,
MXS_MODULE_OPT_NONE, Config::large_payload_values
},
{ MXS_END_MODULE_PARAMS }
}
};
@ -164,19 +171,16 @@ std::tr1::shared_ptr<MaskingRules> MaskingFilter::rules() const
return m_sRules;
}
void MaskingFilter::reload(DCB* pOut)
bool MaskingFilter::reload()
{
bool rval = false;
auto_ptr<MaskingRules> sRules = MaskingRules::load(m_config.rules().c_str());
if (sRules.get())
{
m_sRules = sRules;
rval = true;
}
dcb_printf(pOut, "Rules reloaded.\n");
}
else
{
dcb_printf(pOut, "Could not reload the rules. Check the log file for more "
"detailed information.\n");
}
return rval;
}

View File

@ -38,7 +38,7 @@ public:
uint64_t getCapabilities();
void reload(DCB* pOut);
bool reload();
const Config& config() const
{

View File

@ -131,7 +131,7 @@ void MaskingFilterSession::handle_response(GWBUF* pPacket)
switch (response.type())
{
case ComResponse::OK_PACKET:
// We'll end up here also in the case of a multi-result.
// We'll end up here also in the case of a multi-result.
case ComResponse::LOCAL_INFILE_PACKET: // GET_MORE_CLIENT_DATA/SEND_MORE_CLIENT_DATA
m_state = EXPECTING_NOTHING;
break;

View File

@ -601,8 +601,8 @@ static bool rule_check_database_options(json_t* pColumn,
rule_type,
KEY_TABLE,
KEY_DATABASE);
}
return false;
}
return false;
}
}
@ -1189,11 +1189,11 @@ void MaskingRules::ObfuscateRule::rewrite(LEncString& s) const
for (LEncString::iterator i = s.begin(); i <= s.end(); i++)
{
// ASCII 32 is first printable char
unsigned char d = abs((char)(*i ^ c)) + 32;
c += d << 3;
// ASCII 126 is last printable char
*i = d <= 126 ? d : 126;
// ASCII 32 is first printable char
unsigned char d = abs((char)(*i ^ c)) + 32;
c += d << 3;
// ASCII 126 is last printable char
*i = d <= 126 ? d : 126;
}
}

View File

@ -62,14 +62,14 @@ static void diagnostics(MXS_FILTER *instance,
MXS_FILTER_SESSION *sdata,
DCB *dcb);
static json_t* diagnostics_json(const MXS_FILTER *instance,
const MXS_FILTER_SESSION *sdata);
const MXS_FILTER_SESSION *sdata);
static uint64_t getCapabilities(MXS_FILTER *instance);
enum maxrows_return_mode
{
MAXROWS_RETURN_EMPTY = 0,
MAXROWS_RETURN_ERR,
MAXROWS_RETURN_OK
MAXROWS_RETURN_EMPTY = 0,
MAXROWS_RETURN_ERR,
MAXROWS_RETURN_OK
};
static const MXS_ENUM_VALUE return_option_values[] =
@ -844,7 +844,7 @@ static int handle_rows(MAXROWS_SESSION_DATA *csdata, GWBUF* buffer, size_t extra
*/
if (pending_large_data &&
(packetlen >= MYSQL_HEADER_LEN &&
packetlen < MYSQL_EOF_PACKET_LEN))
packetlen < MYSQL_EOF_PACKET_LEN))
{
// Update offset, number of rows and break
offset += packetlen;
@ -1152,12 +1152,13 @@ static int send_ok_upstream(MAXROWS_SESSION_DATA *csdata)
{
/* Note: sequence id is always 01 (4th byte) */
const static uint8_t ok[MYSQL_OK_PACKET_MIN_LEN] = { 07, 00, 00, 01, 00, 00,
00, 02, 00, 00, 00 };
00, 02, 00, 00, 00
};
ss_dassert(csdata->res.data != NULL);
GWBUF *packet = gwbuf_alloc(MYSQL_OK_PACKET_MIN_LEN);
if(!packet)
if (!packet)
{
/* Abort clienrt connection */
poll_fake_hangup_event(csdata->session->client_dcb);
@ -1275,21 +1276,21 @@ static int send_error_upstream(MAXROWS_SESSION_DATA *csdata)
*/
static int send_maxrows_reply_limit(MAXROWS_SESSION_DATA *csdata)
{
switch(csdata->instance->config.m_return)
switch (csdata->instance->config.m_return)
{
case MAXROWS_RETURN_EMPTY:
return send_eof_upstream(csdata);
break;
case MAXROWS_RETURN_OK:
return send_ok_upstream(csdata);
break;
case MAXROWS_RETURN_ERR:
return send_error_upstream(csdata);
break;
default:
MXS_ERROR("MaxRows config value not expected!");
ss_dassert(!true);
return 0;
break;
case MAXROWS_RETURN_EMPTY:
return send_eof_upstream(csdata);
break;
case MAXROWS_RETURN_OK:
return send_ok_upstream(csdata);
break;
case MAXROWS_RETURN_ERR:
return send_error_upstream(csdata);
break;
default:
MXS_ERROR("MaxRows config value not expected!");
ss_dassert(!true);
return 0;
break;
}
}

View File

@ -41,7 +41,7 @@ const MXS_ENUM_VALUE capability_values[] =
};
size_t RCAP_TYPE_NAME_MAXLEN = 30; // strlen(RCAP_TYPE_TRANSACTION_TRACKING)
size_t RCAP_TYPE_COUNT = sizeof(capability_values)/sizeof(capability_values[0]);
size_t RCAP_TYPE_COUNT = sizeof(capability_values) / sizeof(capability_values[0]);
}

View File

@ -44,7 +44,7 @@ LocalClient::~LocalClient()
{
if (m_state != VC_ERROR)
{
close(m_sock);
close();
}
}
@ -65,10 +65,21 @@ bool LocalClient::queue_query(GWBUF* buffer)
return my_buf != NULL;
}
void LocalClient::close()
{
mxs::Worker* worker = mxs::Worker::get_current();
ss_dassert(worker);
worker->remove_fd(m_sock);
::close(m_sock);
}
void LocalClient::error()
{
close(m_sock);
m_state = VC_ERROR;
if (m_state != VC_ERROR)
{
close();
m_state = VC_ERROR;
}
}
void LocalClient::process(uint32_t events)
@ -228,7 +239,7 @@ LocalClient* LocalClient::create(MXS_SESSION* session, SERVICE* service)
int fd = open_network_socket(MXS_SOCKET_NETWORK, &addr, "127.0.0.1",
service->ports->port);
if (connect(fd, (struct sockaddr*)&addr, sizeof(addr)) == 0 || errno == EINPROGRESS)
if (fd > 0 && (connect(fd, (struct sockaddr*)&addr, sizeof(addr)) == 0 || errno == EINPROGRESS))
{
LocalClient* relay = new (std::nothrow) LocalClient(session, fd);
@ -242,12 +253,17 @@ LocalClient* LocalClient::create(MXS_SESSION* session, SERVICE* service)
}
else
{
relay->m_state = VC_ERROR;
delete rval;
rval = NULL;
}
}
}
if (rval == NULL && fd > 0)
{
::close(fd);
}
break;
}
}

View File

@ -55,6 +55,7 @@ private:
GWBUF* read_complete_packet();
void drain_queue();
void error();
void close();
/** Client states */
enum vc_state

View File

@ -1,4 +1,4 @@
add_library(mysqlmon SHARED mysql_mon.c)
add_library(mysqlmon SHARED mysql_mon.cc)
target_link_libraries(mysqlmon maxscale-common)
add_dependencies(mysqlmon pcre2)
set_target_properties(mysqlmon PROPERTIES VERSION "1.4.0")

View File

@ -78,6 +78,9 @@ static const char CN_SWITCHOVER_TIMEOUT[] = "switchover_timeout";
*
* @return The module object
*/
extern "C"
{
MXS_MODULE* MXS_CREATE_MODULE()
{
MXS_NOTICE("Initialise the MySQL Monitor module.");
@ -137,6 +140,8 @@ MXS_MODULE* MXS_CREATE_MODULE()
return &info;
}
}
/**
* Monitor specific information about a server
*/
@ -155,14 +160,11 @@ typedef struct mysql_server_info
bool binlog_relay; /** Server is a Binlog Relay */
} MYSQL_SERVER_INFO;
/** Other values are implicitly zero initialized */
#define MYSQL_SERVER_INFO_INIT {.binlog_name = ""}
void* info_copy_func(const void *val)
{
ss_dassert(val);
MYSQL_SERVER_INFO *old_val = (MYSQL_SERVER_INFO*)val;
MYSQL_SERVER_INFO *new_val = MXS_MALLOC(sizeof(MYSQL_SERVER_INFO));
MYSQL_SERVER_INFO *new_val = static_cast<MYSQL_SERVER_INFO*>(MXS_MALLOC(sizeof(MYSQL_SERVER_INFO)));
char *binlog_name = MXS_STRDUP(old_val->binlog_name);
if (new_val && binlog_name)
@ -200,9 +202,11 @@ void info_free_func(void *val)
*/
bool init_server_info(MYSQL_MONITOR *handle, MXS_MONITOR_SERVERS *database)
{
MYSQL_SERVER_INFO info = MYSQL_SERVER_INFO_INIT;
bool rval = true;
MYSQL_SERVER_INFO info = {};
info.binlog_name = const_cast<char*>("");
while (database)
{
/** Delete any existing structures and replace them with empty ones */
@ -348,7 +352,8 @@ static void diagnostics(DCB *dcb, const MXS_MONITOR *mon)
for (MXS_MONITOR_SERVERS *db = mon->databases; db; db = db->next)
{
MYSQL_SERVER_INFO *serv_info = hashtable_fetch(handle->server_info, db->server->unique_name);
MYSQL_SERVER_INFO *serv_info =
static_cast<MYSQL_SERVER_INFO*>(hashtable_fetch(handle->server_info, db->server->unique_name));
dcb_printf(dcb, "Server: %s\n", db->server->unique_name);
dcb_printf(dcb, "Server ID: %d\n", serv_info->server_id);
dcb_printf(dcb, "Read only: %s\n", serv_info->read_only ? "ON" : "OFF");
@ -404,7 +409,9 @@ static json_t* diagnostics_json(const MXS_MONITOR *mon)
for (MXS_MONITOR_SERVERS *db = mon->databases; db; db = db->next)
{
json_t* srv = json_object();
MYSQL_SERVER_INFO *serv_info = hashtable_fetch(handle->server_info, db->server->unique_name);
MYSQL_SERVER_INFO *serv_info =
static_cast<MYSQL_SERVER_INFO*>(hashtable_fetch(handle->server_info,
db->server->unique_name));
json_object_set_new(srv, "name", json_string(db->server->unique_name));
json_object_set_new(srv, "server_id", json_integer(serv_info->server_id));
json_object_set_new(srv, "master_id", json_integer(serv_info->master_id));
@ -441,7 +448,8 @@ enum mysql_server_version
static inline void monitor_mysql_db(MXS_MONITOR_SERVERS* database, MYSQL_SERVER_INFO *serv_info,
enum mysql_server_version server_version)
{
int columns, i_io_thread, i_sql_thread, i_binlog_pos, i_master_id, i_binlog_name;
unsigned int columns;
int i_io_thread, i_sql_thread, i_binlog_pos, i_master_id, i_binlog_name;
const char *query;
if (server_version == MYSQL_SERVER_VERSION_100)
@ -478,7 +486,7 @@ static inline void monitor_mysql_db(MXS_MONITOR_SERVERS* database, MYSQL_SERVER_
{
mysql_free_result(result);
MXS_ERROR("\"%s\" returned less than the expected amount of columns. "
"Expected %d columns.", query, columns);
"Expected %u columns.", query, columns);
return;
}
@ -577,7 +585,7 @@ static MXS_MONITOR_SERVERS *build_mysql51_replication_tree(MXS_MONITOR *mon)
MXS_MONITOR_SERVERS* database = mon->databases;
MXS_MONITOR_SERVERS *ptr, *rval = NULL;
int i;
MYSQL_MONITOR *handle = mon->handle;
MYSQL_MONITOR *handle = static_cast<MYSQL_MONITOR*>(mon->handle);
while (database)
{
@ -689,7 +697,7 @@ static MXS_MONITOR_SERVERS *build_mysql51_replication_tree(MXS_MONITOR *mon)
static void
monitorDatabase(MXS_MONITOR *mon, MXS_MONITOR_SERVERS *database)
{
MYSQL_MONITOR* handle = mon->handle;
MYSQL_MONITOR* handle = static_cast<MYSQL_MONITOR*>(mon->handle);
MYSQL_ROW row;
MYSQL_RES *result;
unsigned long int server_version = 0;
@ -761,7 +769,8 @@ monitorDatabase(MXS_MONITOR *mon, MXS_MONITOR_SERVERS *database)
mxs_mysql_set_server_version(database->con, database->server);
server_string = database->server->version_string;
MYSQL_SERVER_INFO *serv_info = hashtable_fetch(handle->server_info, database->server->unique_name);
MYSQL_SERVER_INFO *serv_info =
static_cast<MYSQL_SERVER_INFO*>(hashtable_fetch(handle->server_info, database->server->unique_name));
ss_dassert(serv_info);
/* Check whether current server is MaxScale Binlog Server */
@ -968,7 +977,8 @@ void find_graph_cycles(MYSQL_MONITOR *handle, MXS_MONITOR_SERVERS *database, int
for (MXS_MONITOR_SERVERS *db = database; db; db = db->next)
{
graph[nodes].info = hashtable_fetch(handle->server_info, db->server->unique_name);
graph[nodes].info =
static_cast<MYSQL_SERVER_INFO*>(hashtable_fetch(handle->server_info, db->server->unique_name));
graph[nodes].db = db;
ss_dassert(graph[nodes].info);
graph[nodes].index = graph[nodes].lowest_index = 0;
@ -1076,7 +1086,9 @@ bool failover_required(MYSQL_MONITOR *handle, MXS_MONITOR_SERVERS *db)
if (SERVER_IS_RUNNING(db->server))
{
candidates++;
MYSQL_SERVER_INFO *server_info = hashtable_fetch(handle->server_info, db->server->unique_name);
MYSQL_SERVER_INFO *server_info =
static_cast<MYSQL_SERVER_INFO*>(hashtable_fetch(handle->server_info,
db->server->unique_name));
if (server_info->read_only || server_info->slave_configured || candidates > 1)
{
@ -1303,7 +1315,9 @@ monitorMain(void *arg)
ptr = mon->databases;
while (ptr)
{
MYSQL_SERVER_INFO *serv_info = hashtable_fetch(handle->server_info, ptr->server->unique_name);
MYSQL_SERVER_INFO *serv_info =
static_cast<MYSQL_SERVER_INFO*>(hashtable_fetch(handle->server_info,
ptr->server->unique_name));
ss_dassert(serv_info);
if (ptr->server->node_id > 0 && ptr->server->master_id > 0 &&
@ -1332,7 +1346,9 @@ monitorMain(void *arg)
{
if (!SERVER_IN_MAINT(ptr->server))
{
MYSQL_SERVER_INFO *serv_info = hashtable_fetch(handle->server_info, ptr->server->unique_name);
MYSQL_SERVER_INFO *serv_info =
static_cast<MYSQL_SERVER_INFO*>(hashtable_fetch(handle->server_info,
ptr->server->unique_name));
ss_dassert(serv_info);
/** If "detect_stale_master" option is On, let's use the previous master.
@ -1367,7 +1383,7 @@ monitorMain(void *arg)
if (handle->detectStaleSlave)
{
int bits = SERVER_SLAVE | SERVER_RUNNING;
unsigned bits = SERVER_SLAVE | SERVER_RUNNING;
if ((ptr->mon_prev_status & bits) == bits &&
root_master && SERVER_IS_MASTER(root_master->server))
@ -1482,9 +1498,9 @@ monitorMain(void *arg)
while (ptr)
{
MYSQL_SERVER_INFO *serv_info;
serv_info = hashtable_fetch(handle->server_info,
ptr->server->unique_name);
MYSQL_SERVER_INFO *serv_info =
static_cast<MYSQL_SERVER_INFO*>(hashtable_fetch(handle->server_info,
ptr->server->unique_name));
ss_dassert(serv_info);
if ((!SERVER_IN_MAINT(ptr->server)) && SERVER_IS_RUNNING(ptr->server))
@ -1901,8 +1917,9 @@ static MXS_MONITOR_SERVERS *get_replication_tree(MXS_MONITOR *mon, int num_serve
monitor_clear_pending_status(handle->master, SERVER_MASTER);
}
MYSQL_SERVER_INFO* info = hashtable_fetch(handle->server_info,
master->server->unique_name);
MYSQL_SERVER_INFO* info =
static_cast<MYSQL_SERVER_INFO*>(hashtable_fetch(handle->server_info,
master->server->unique_name));
ss_dassert(info);
if (SERVER_IS_RUNNING(master->server))

View File

@ -670,12 +670,12 @@ static inline bool not_ok_packet(const GWBUF* buffer)
uint8_t* data = GWBUF_DATA(buffer);
return data[4] != MYSQL_REPLY_OK ||
// Should be more than 7 bytes of payload
gw_mysql_get_byte3(data) < MYSQL_OK_PACKET_MIN_LEN - MYSQL_HEADER_LEN ||
// Should have no affected rows
data[5] != 0 ||
// Should not generate an insert ID
data[6] != 0;
// Should be more than 7 bytes of payload
gw_mysql_get_byte3(data) < MYSQL_OK_PACKET_MIN_LEN - MYSQL_HEADER_LEN ||
// Should have no affected rows
data[5] != 0 ||
// Should not generate an insert ID
data[6] != 0;
}
static inline bool not_err_packet(const GWBUF* buffer)

View File

@ -96,46 +96,46 @@ static bool parse_kill_query(char *query, uint64_t *thread_id_out, kill_type_t *
extern "C"
{
MXS_MODULE* MXS_CREATE_MODULE()
{
static MXS_PROTOCOL MyObject =
MXS_MODULE* MXS_CREATE_MODULE()
{
gw_read_client_event, /* Read - EPOLLIN handler */
gw_MySQLWrite_client, /* Write - data from gateway */
gw_write_client_event, /* WriteReady - EPOLLOUT handler */
gw_error_client_event, /* Error - EPOLLERR handler */
gw_client_hangup_event, /* HangUp - EPOLLHUP handler */
gw_MySQLAccept, /* Accept */
NULL, /* Connect */
gw_client_close, /* Close */
gw_MySQLListener, /* Listen */
NULL, /* Authentication */
NULL, /* Session */
gw_default_auth, /* Default authenticator */
gw_connection_limit, /* Send error connection limit */
NULL
};
static MXS_MODULE info =
{
MXS_MODULE_API_PROTOCOL,
MXS_MODULE_GA,
MXS_PROTOCOL_VERSION,
"The client to MaxScale MySQL protocol implementation",
"V1.1.0",
MXS_NO_MODULE_CAPABILITIES,
&MyObject,
process_init,
process_finish,
thread_init,
thread_finish,
static MXS_PROTOCOL MyObject =
{
{MXS_END_MODULE_PARAMS}
}
};
gw_read_client_event, /* Read - EPOLLIN handler */
gw_MySQLWrite_client, /* Write - data from gateway */
gw_write_client_event, /* WriteReady - EPOLLOUT handler */
gw_error_client_event, /* Error - EPOLLERR handler */
gw_client_hangup_event, /* HangUp - EPOLLHUP handler */
gw_MySQLAccept, /* Accept */
NULL, /* Connect */
gw_client_close, /* Close */
gw_MySQLListener, /* Listen */
NULL, /* Authentication */
NULL, /* Session */
gw_default_auth, /* Default authenticator */
gw_connection_limit, /* Send error connection limit */
NULL
};
return &info;
}
static MXS_MODULE info =
{
MXS_MODULE_API_PROTOCOL,
MXS_MODULE_GA,
MXS_PROTOCOL_VERSION,
"The client to MaxScale MySQL protocol implementation",
"V1.1.0",
MXS_NO_MODULE_CAPABILITIES,
&MyObject,
process_init,
process_finish,
thread_init,
thread_finish,
{
{MXS_END_MODULE_PARAMS}
}
};
return &info;
}
}
/*lint +e14 */
@ -1384,7 +1384,7 @@ static int gw_client_close(DCB *dcb)
{
ss_dassert(target->state == SESSION_STATE_ROUTER_READY ||
target->state == SESSION_STATE_STOPPING);
ss_debug(bool removed =) mxs_worker_deregister_session(target->ses_id);
ss_debug(bool removed = ) mxs_worker_deregister_session(target->ses_id);
ss_dassert(removed);
session_close(target);
}
@ -1777,6 +1777,8 @@ static bool parse_kill_query(char *query, uint64_t *thread_id_out, kill_type_t *
{
const char WORD_CONNECTION[] = "CONNECTION";
const char WORD_QUERY[] = "QUERY";
const char WORD_HARD[] = "HARD";
const char WORD_SOFT[] = "SOFT";
const char DELIM[] = " \n\t";
kill_type_t kill_type = KT_CONNECTION;
@ -1822,9 +1824,19 @@ static bool parse_kill_query(char *query, uint64_t *thread_id_out, kill_type_t *
{
get_next = true;
}
/* Move to next state regardless of comparison result. The current
* part is optional and the process id may already be in the token. */
state = ID;
if (strncasecmp(token, WORD_HARD, sizeof(WORD_HARD) - 1) == 0 ||
strncasecmp(token, WORD_SOFT, sizeof(WORD_SOFT) - 1) == 0)
{
/* This is an optional token and needs to be ignored */
get_next = true;
}
else
{
/* Move to next state regardless of comparison result. The current
* part is optional and the process id may already be in the token. */
state = ID;
}
break;
case ID:

View File

@ -172,7 +172,7 @@ struct TEST_CASE
},
};
const int N_TEST_CASES = sizeof(test_cases)/sizeof(test_cases[0]);
const int N_TEST_CASES = sizeof(test_cases) / sizeof(test_cases[0]);
int test(GWBUF** ppStmt,
SetSqlModeParser::sql_mode_t expected_sql_mode,

View File

@ -728,7 +728,7 @@ void protocol_remove_srv_command(MySQLProtocol* p)
}
mxs_mysql_cmd_t protocol_get_srv_command(MySQLProtocol* p,
bool removep)
bool removep)
{
mxs_mysql_cmd_t cmd;
@ -1095,7 +1095,7 @@ int mxs_mysql_send_ok(DCB *dcb, int sequence, uint8_t affected_rows, const char*
* @return The length of the response packet
*/
static int response_length(bool with_ssl, bool ssl_established, char *user, uint8_t *passwd,
char *dbname, const char *auth_module)
char *dbname, const char *auth_module)
{
long bytes;
@ -1379,7 +1379,7 @@ int send_mysql_native_password_response(DCB* dcb)
gw_get_shared_session_auth_info(dcb, &local_session);
uint8_t *curr_passwd = memcmp(local_session.client_sha1, null_client_sha1, MYSQL_SCRAMBLE_LEN) ?
local_session.client_sha1 : null_client_sha1;
local_session.client_sha1 : null_client_sha1;
GWBUF* buffer = gwbuf_alloc(MYSQL_HEADER_LEN + GW_MYSQL_SCRAMBLE_SIZE);
uint8_t* data = GWBUF_DATA(buffer);

View File

@ -99,7 +99,8 @@ void avro_close_binlog(int fd)
* @param filepath Path to the created file
* @param json_schema The schema of the table in JSON format
*/
AVRO_TABLE* avro_table_alloc(const char* filepath, const char* json_schema, const char *codec, size_t block_size)
AVRO_TABLE* avro_table_alloc(const char* filepath, const char* json_schema, const char *codec,
size_t block_size)
{
AVRO_TABLE *table = MXS_CALLOC(1, sizeof(AVRO_TABLE));
if (table)

View File

@ -69,15 +69,15 @@ static const char* codec_to_string(enum mxs_avro_codec_type type)
{
switch (type)
{
case MXS_AVRO_CODEC_NULL:
return "null";
case MXS_AVRO_CODEC_DEFLATE:
return "deflate";
case MXS_AVRO_CODEC_SNAPPY:
return "snappy";
default:
ss_dassert(false);
return "null";
case MXS_AVRO_CODEC_NULL:
return "null";
case MXS_AVRO_CODEC_DEFLATE:
return "deflate";
case MXS_AVRO_CODEC_SNAPPY:
return "snappy";
default:
ss_dassert(false);
return "null";
}
}

View File

@ -87,11 +87,11 @@ static char *avro_client_ouput[] = { "Undefined", "JSON", "Avro" };
static inline bool is_reserved_word(const char* word)
{
return strcasecmp(word, avro_domain) == 0 ||
strcasecmp(word, avro_server_id) == 0 ||
strcasecmp(word, avro_sequence) == 0 ||
strcasecmp(word, avro_event_number) == 0 ||
strcasecmp(word, avro_event_type) == 0 ||
strcasecmp(word, avro_timestamp) == 0;
strcasecmp(word, avro_server_id) == 0 ||
strcasecmp(word, avro_sequence) == 0 ||
strcasecmp(word, avro_event_number) == 0 ||
strcasecmp(word, avro_event_type) == 0 ||
strcasecmp(word, avro_timestamp) == 0;
}
static inline void fix_reserved_word(char *tok)

View File

@ -185,13 +185,17 @@ MXS_MODULE* MXS_CREATE_MODULE()
{"transaction_safety", MXS_MODULE_PARAM_BOOL, "false"},
{"semisync", MXS_MODULE_PARAM_BOOL, "false"},
{"encrypt_binlog", MXS_MODULE_PARAM_BOOL, "false"},
{"encryption_algorithm", MXS_MODULE_PARAM_ENUM, "aes_cbc",
MXS_MODULE_OPT_NONE, enc_algo_values},
{
"encryption_algorithm", MXS_MODULE_PARAM_ENUM, "aes_cbc",
MXS_MODULE_OPT_NONE, enc_algo_values
},
{"encryption_key_file", MXS_MODULE_PARAM_PATH, NULL, MXS_MODULE_OPT_PATH_R_OK},
{"mariadb10_slave_gtid", MXS_MODULE_PARAM_BOOL, "false"},
{"mariadb10_master_gtid", MXS_MODULE_PARAM_BOOL, "false"},
{"binlog_structure", MXS_MODULE_PARAM_ENUM, "flat",
MXS_MODULE_OPT_NONE, binlog_storage_values},
{
"binlog_structure", MXS_MODULE_PARAM_ENUM, "flat",
MXS_MODULE_OPT_NONE, binlog_storage_values
},
{"shortburst", MXS_MODULE_PARAM_COUNT, DEF_SHORT_BURST},
{"longburst", MXS_MODULE_PARAM_COUNT, DEF_LONG_BURST},
{"burstsize", MXS_MODULE_PARAM_SIZE, DEF_BURST_SIZE},
@ -788,9 +792,9 @@ createInstance(SERVICE *service, char **options)
inst->storage_type == BLR_BINLOG_STORAGE_TREE)
{
MXS_ERROR("%s: binlog_structure 'tree' mode can be enabled only"
" with MariaDB Master GTID registration feature."
" Please enable it with option"
" 'mariadb10_master_gtid = on'",
" with MariaDB Master GTID registration feature."
" Please enable it with option"
" 'mariadb10_master_gtid = on'",
service->name);
free_instance(inst);
return NULL;
@ -1117,9 +1121,9 @@ createInstance(SERVICE *service, char **options)
" Please issue SET @@GLOBAL.GTID_SLAVE_POS =''"
" and START SLAVE."
" Existing binlogs might be overwritten.");
MXS_ERROR("%s: %s",
inst->service->name,
inst->m_errmsg);
MXS_ERROR("%s: %s",
inst->service->name,
inst->m_errmsg);
return (MXS_ROUTER *)inst;
}
@ -1239,7 +1243,7 @@ newSession(MXS_ROUTER *instance, MXS_SESSION *session)
slave->encryption_ctx = NULL;
slave->mariadb_gtid = NULL;
slave->gtid_maps = NULL;
memset(&slave->f_info, 0 , sizeof (MARIADB_GTID_INFO));
memset(&slave->f_info, 0, sizeof (MARIADB_GTID_INFO));
/**
* Add this session to the list of active sessions.
@ -1975,10 +1979,12 @@ static json_t* diagnostics_json(const MXS_ROUTER *router)
{
json_t* obj = json_object();
json_object_set_new(obj, "ssl_ca_cert", json_string(router_inst->service->dbref->server->server_ssl->ssl_ca_cert));
json_object_set_new(obj, "ssl_ca_cert",
json_string(router_inst->service->dbref->server->server_ssl->ssl_ca_cert));
json_object_set_new(obj, "ssl_cert", json_string(router_inst->service->dbref->server->server_ssl->ssl_cert));
json_object_set_new(obj, "ssl_key", json_string(router_inst->service->dbref->server->server_ssl->ssl_key));
json_object_set_new(obj, "ssl_version", json_string(router_inst->ssl_version ? router_inst->ssl_version : "MAX"));
json_object_set_new(obj, "ssl_version",
json_string(router_inst->ssl_version ? router_inst->ssl_version : "MAX"));
json_object_set_new(rval, "master_ssl", obj);
}
@ -1989,9 +1995,9 @@ static json_t* diagnostics_json(const MXS_ROUTER *router)
json_t* obj = json_object();
json_object_set_new(obj, "key", json_string(
router_inst->encryption.key_management_filename));
router_inst->encryption.key_management_filename));
json_object_set_new(obj, "algorithm", json_string(
blr_get_encryption_algorithm(router_inst->encryption.encryption_algorithm)));
blr_get_encryption_algorithm(router_inst->encryption.encryption_algorithm)));
json_object_set_new(obj, "key_length",
json_integer(8 * router_inst->encryption.key_len));
@ -2050,7 +2056,7 @@ static json_t* diagnostics_json(const MXS_ROUTER *router)
json_object_set_new(rval, "residual_packets", json_integer(router_inst->stats.n_residuals));
double average_packets = router_inst->stats.n_reads != 0 ?
((double)router_inst->stats.n_binlogs / router_inst->stats.n_reads) : 0;
((double)router_inst->stats.n_binlogs / router_inst->stats.n_reads) : 0;
json_object_set_new(rval, "average_events_per_packets", json_real(average_packets));
@ -2424,11 +2430,11 @@ errorReply(MXS_ROUTER *instance,
{
/* Stopped state, no reconnection */
MXS_INFO("%s: Master connection has been closed. State is '%s', "
"%snot retrying a new connection to master [%s]:%d",
router->service->name,
blrm_states[router->master_state], msg,
router->service->dbref->server->name,
router->service->dbref->server->port);
"%snot retrying a new connection to master [%s]:%d",
router->service->name,
blrm_states[router->master_state], msg,
router->service->dbref->server->name,
router->service->dbref->server->port);
}
if (errmsg)
@ -2853,9 +2859,9 @@ blr_handle_config_item(const char *name, const char *value, ROUTER_INSTANCE *ins
if (new_val < 0)
{
MXS_WARNING("Found invalid 'master_heartbeat_period' value"
" for service '%s': %s, ignoring it.",
inst->service->name,
value);
" for service '%s': %s, ignoring it.",
inst->service->name,
value);
}
else
{
@ -2872,9 +2878,9 @@ blr_handle_config_item(const char *name, const char *value, ROUTER_INSTANCE *ins
if (new_val <= 0)
{
MXS_WARNING("Found invalid 'master_connect_retry' value"
" for service '%s': %s, ignoring it.",
inst->service->name,
value);
" for service '%s': %s, ignoring it.",
inst->service->name,
value);
}
else
{
@ -3350,15 +3356,15 @@ static bool blr_open_gtid_maps_storage(ROUTER_INSTANCE *inst)
int rc = sqlite3_exec(inst->gtid_maps,
"BEGIN;"
"CREATE TABLE IF NOT EXISTS gtid_maps("
"id INTEGER PRIMARY KEY AUTOINCREMENT, "
"rep_domain INT, "
"server_id INT, "
"sequence BIGINT, "
"binlog_file VARCHAR(255), "
"start_pos BIGINT, "
"end_pos BIGINT);"
"id INTEGER PRIMARY KEY AUTOINCREMENT, "
"rep_domain INT, "
"server_id INT, "
"sequence BIGINT, "
"binlog_file VARCHAR(255), "
"start_pos BIGINT, "
"end_pos BIGINT);"
"CREATE UNIQUE INDEX IF NOT EXISTS gtid_index "
"ON gtid_maps(rep_domain, server_id, sequence, binlog_file);"
"ON gtid_maps(rep_domain, server_id, sequence, binlog_file);"
"COMMIT;",
NULL, NULL, &errmsg);
if (rc != SQLITE_OK)

View File

@ -158,10 +158,10 @@ static uint8_t *blr_create_ignorable_event(uint32_t event_size,
uint32_t event_pos,
bool do_checksum);
int blr_write_special_event(ROUTER_INSTANCE *router,
uint32_t file_offset,
uint32_t hole_size,
REP_HEADER *hdr,
int type);
uint32_t file_offset,
uint32_t hole_size,
REP_HEADER *hdr,
int type);
static uint8_t *blr_create_start_encryption_event(ROUTER_INSTANCE *router,
uint32_t event_pos,
bool do_checksum);
@ -481,13 +481,13 @@ blr_file_create(ROUTER_INSTANCE *router, char *orig_file)
strcpy(path, router->binlogdir);
strcat(path, "/");
/**
* Create file using domain and server_id prefix
*/
if (router->mariadb10_compat &&
router->mariadb10_master_gtid &&
router->storage_type == BLR_BINLOG_STORAGE_TREE)
{
/**
* Create file using domain and server_id prefix
*/
if (router->mariadb10_compat &&
router->mariadb10_master_gtid &&
router->storage_type == BLR_BINLOG_STORAGE_TREE)
{
char prefix[BINLOG_FILE_EXTRA_INFO];
// Add prefix
sprintf(prefix,
@ -1585,17 +1585,17 @@ blr_file_next_exists(ROUTER_INSTANCE *router,
char bigbuf[PATH_MAX + 1];
char select_query[GTID_SQL_BUFFER_SIZE];
const char select_tpl[] = "SELECT "
"(rep_domain || '/' || server_id || '/' || binlog_file) AS file, "
"rep_domain, "
"server_id, "
"binlog_file "
"(rep_domain || '/' || server_id || '/' || binlog_file) AS file, "
"rep_domain, "
"server_id, "
"binlog_file "
"FROM gtid_maps "
"WHERE id = "
"(SELECT MAX(id) "
"FROM gtid_maps "
"WHERE (binlog_file='%s' AND "
"rep_domain = %" PRIu32 " AND "
"server_id = %" PRIu32 ")) + 1;";
"WHERE id = "
"(SELECT MAX(id) "
"FROM gtid_maps "
"WHERE (binlog_file='%s' AND "
"rep_domain = %" PRIu32 " AND "
"server_id = %" PRIu32 ")) + 1;";
MARIADB_GTID_INFO result = {};
MARIADB_GTID_ELEMS gtid_elms = {};
@ -2603,8 +2603,8 @@ blr_read_events_all_events(ROUTER_INSTANCE *router,
n_sequence);
MXS_DEBUG("GTID List Event has %lu GTIDs, first one is %s",
n_gtids,
mariadb_gtid);
n_gtids,
mariadb_gtid);
if (router->storage_type == BLR_BINLOG_STORAGE_TREE)
{
@ -3945,27 +3945,27 @@ bool blr_save_mariadb_gtid(ROUTER_INSTANCE *inst)
{
int sql_ret;
static const char insert_tpl[] = "INSERT OR FAIL INTO gtid_maps("
"rep_domain, "
"server_id, "
"sequence, "
"binlog_file, "
"start_pos, "
"end_pos) "
"rep_domain, "
"server_id, "
"sequence, "
"binlog_file, "
"start_pos, "
"end_pos) "
"VALUES ( "
"%" PRIu32 ", "
"%" PRIu32 ", "
"%" PRIu64 ", "
"\"%s\", "
"%" PRIu64 ", "
"%" PRIu64 ");";
"%" PRIu32 ", "
"%" PRIu32 ", "
"%" PRIu64 ", "
"\"%s\", "
"%" PRIu64 ", "
"%" PRIu64 ");";
static const char update_tpl[] = "UPDATE gtid_maps SET "
"start_pos = %" PRIu64 ", "
"end_pos = %" PRIu64 " "
"start_pos = %" PRIu64 ", "
"end_pos = %" PRIu64 " "
"WHERE rep_domain = %" PRIu32 " AND "
"server_id = %" PRIu32 " AND "
"sequence = %" PRIu64 " AND "
"binlog_file = \"%s\";";
"server_id = %" PRIu32 " AND "
"sequence = %" PRIu64 " AND "
"binlog_file = \"%s\";";
char *errmsg;
char sql_stmt[GTID_SQL_BUFFER_SIZE];
MARIADB_GTID_INFO gtid_info;
@ -4132,19 +4132,19 @@ bool blr_fetch_mariadb_gtid(ROUTER_SLAVE *slave,
* with old content.
*/
static const char select_tpl[] = "SELECT "
"(rep_domain ||"
" '-' || server_id ||"
" '-' || sequence) AS gtid, "
"binlog_file, "
"start_pos, "
"end_pos, "
"rep_domain, "
"server_id, "
"sequence "
"(rep_domain ||"
" '-' || server_id ||"
" '-' || sequence) AS gtid, "
"binlog_file, "
"start_pos, "
"end_pos, "
"rep_domain, "
"server_id, "
"sequence "
"FROM gtid_maps "
"WHERE (rep_domain = %" PRIu32 " AND "
"server_id = %" PRIu32 " AND "
"sequence = %" PRIu64 ") "
"WHERE (rep_domain = %" PRIu32 " AND "
"server_id = %" PRIu32 " AND "
"sequence = %" PRIu64 ") "
"ORDER BY id DESC LIMIT 1;";
ss_dassert(gtid != NULL);
@ -4295,20 +4295,20 @@ bool blr_load_last_mariadb_gtid(ROUTER_INSTANCE *router,
char *errmsg = NULL;
MARIADB_GTID_ELEMS gtid_elms = {};
static const char last_gtid[] = "SELECT "
"(rep_domain ||"
" '-' || server_id ||"
" '-' || sequence) AS gtid, "
"binlog_file, "
"start_pos, "
"end_pos, "
"rep_domain, "
"server_id, "
"sequence "
"FROM gtid_maps "
"WHERE id = "
"(SELECT MAX(id) "
"FROM gtid_maps "
"WHERE start_pos > 4);";
"(rep_domain ||"
" '-' || server_id ||"
" '-' || sequence) AS gtid, "
"binlog_file, "
"start_pos, "
"end_pos, "
"rep_domain, "
"server_id, "
"sequence "
"FROM gtid_maps "
"WHERE id = "
"(SELECT MAX(id) "
"FROM gtid_maps "
"WHERE start_pos > 4);";
/* Find the last GTID */
if (sqlite3_exec(router->gtid_maps,
@ -4376,18 +4376,18 @@ bool blr_get_last_file(ROUTER_INSTANCE *router,
char *errmsg = NULL;
MARIADB_GTID_ELEMS gtid_elms = {};
static const char last_gtid[] = "SELECT "
"(rep_domain ||"
" '-' || server_id ||"
" '-' || sequence) AS gtid, "
"binlog_file, "
"start_pos, "
"end_pos, "
"rep_domain, "
"server_id, "
"sequence "
"FROM gtid_maps "
"WHERE id = "
"(SELECT MAX(id) FROM gtid_maps);";
"(rep_domain ||"
" '-' || server_id ||"
" '-' || sequence) AS gtid, "
"binlog_file, "
"start_pos, "
"end_pos, "
"rep_domain, "
"server_id, "
"sequence "
"FROM gtid_maps "
"WHERE id = "
"(SELECT MAX(id) FROM gtid_maps);";
/* Find the the last file */
if (sqlite3_exec(router->gtid_maps,
@ -4428,10 +4428,10 @@ bool blr_compare_binlogs(ROUTER_INSTANCE *router,
}
else
{
// domain_id, server_id and strcmp()
return ((router->mariadb10_gtid_domain == info->domain_id) &&
(router->orig_masterid == info->server_id) &&
strcmp(r_file, s_file) == 0);
// domain_id, server_id and strcmp()
return ((router->mariadb10_gtid_domain == info->domain_id) &&
(router->orig_masterid == info->server_id) &&
strcmp(r_file, s_file) == 0);
}
}
@ -4489,7 +4489,7 @@ bool blr_binlog_file_exists(ROUTER_INSTANCE *router,
"%" PRIu32 "/%" PRIu32 "/",
router->mariadb10_gtid_domain,
router->orig_masterid);
strcat(path, prefix);
strcat(path, prefix);
}
// Set final file name full path

View File

@ -172,8 +172,8 @@ static void blr_start_master(void* data)
else
{
MXS_NOTICE("%s: Master Connect: binlog current state is [%s]\n",
router->service->name,
blrm_states[router->master_state]);
router->service->name,
blrm_states[router->master_state]);
}
/* Return only if state is not BLRM_CONNECTING */
@ -3187,10 +3187,10 @@ static void blr_start_master_registration(ROUTER_INSTANCE *router, GWBUF *buf)
*/
if (router->master_state == BLRM_CHECK_SEMISYNC)
{
if (blr_register_setsemisync(router, buf))
{
break;
}
if (blr_register_setsemisync(router, buf))
{
break;
}
}
case BLRM_REQUEST_SEMISYNC:
/**
@ -3284,9 +3284,9 @@ static void blr_register_mariadb_gtid_request(ROUTER_INSTANCE *router,
format_gtid_val,
router->last_mariadb_gtid);
MXS_INFO("%s: Requesting GTID (%s) from master server.",
router->service->name,
router->last_mariadb_gtid);
MXS_INFO("%s: Requesting GTID (%s) from master server.",
router->service->name,
router->last_mariadb_gtid);
// Send the request
blr_register_send_command(router,
set_gtid,
@ -3412,27 +3412,27 @@ static void blr_handle_fake_gtid_list(ROUTER_INSTANCE *router,
*/
if (hdr->next_pos && (hdr->next_pos > binlog_file_eof))
{
uint64_t hole_size = hdr->next_pos - binlog_file_eof;
uint64_t hole_size = hdr->next_pos - binlog_file_eof;
MXS_INFO("Detected hole while processing"
" a Fake GTID_LIST Event: hole size will be %"
PRIu64 " bytes",
hole_size);
MXS_INFO("Detected hole while processing"
" a Fake GTID_LIST Event: hole size will be %"
PRIu64 " bytes",
hole_size);
/* Set the offet for the write routine */
spinlock_acquire(&router->binlog_lock);
/* Set the offet for the write routine */
spinlock_acquire(&router->binlog_lock);
router->last_written = binlog_file_eof;
router->last_written = binlog_file_eof;
spinlock_release(&router->binlog_lock);
spinlock_release(&router->binlog_lock);
// Write One Hole
// TODO: write small holes
blr_write_special_event(router,
binlog_file_eof,
hole_size,
hdr,
BLRM_IGNORABLE);
// Write One Hole
// TODO: write small holes
blr_write_special_event(router,
binlog_file_eof,
hole_size,
hdr,
BLRM_IGNORABLE);
}
else
{

View File

@ -100,13 +100,13 @@
*/
typedef struct
{
int seq_no; /* Output sequence in result set */
char *last_file; /* Last binlog file found in GTID repo */
const char *binlogdir; /* Binlog files cache dir */
DCB *client; /* Connected client DCB */
bool use_tree; /* Binlog structure type */
size_t n_files; /* How many files */
uint64_t rowid; /* ROWID of router current file*/
int seq_no; /* Output sequence in result set */
char *last_file; /* Last binlog file found in GTID repo */
const char *binlogdir; /* Binlog files cache dir */
DCB *client; /* Connected client DCB */
bool use_tree; /* Binlog structure type */
size_t n_files; /* How many files */
uint64_t rowid; /* ROWID of router current file*/
} BINARY_LOG_DATA_RESULT;
extern void poll_fake_write_event(DCB *dcb);
@ -486,7 +486,8 @@ blr_skip_leading_sql_comments(const char *sql_query)
{
const char *p = sql_query;
while (*p) {
while (*p)
{
if (*p == '/' && p[1] == '*')
{
++p; // skip '/'
@ -709,15 +710,16 @@ blr_slave_query(ROUTER_INSTANCE *router, ROUTER_SLAVE *slave, GWBUF *queue)
}
}
else
{ /* Handle ADMIN commands */
if (blr_handle_admin_stmt(router,
slave,
word,
brkb))
{
MXS_FREE(query_text);
return 1;
}
{
/* Handle ADMIN commands */
if (blr_handle_admin_stmt(router,
slave,
word,
brkb))
{
MXS_FREE(query_text);
return 1;
}
}
/* - 3 - Handle unsuppored statements from client */
@ -829,7 +831,8 @@ static uint8_t timestamp_def[] =
0x00, 0x00, 0x03, 0xfe, 0x00, 0x00, 0x02, 0x00
};
static uint8_t timestamp_eof[] = { 0x05, 0x00, 0x00, 0x05,
0xfe, 0x00, 0x00, 0x02, 0x00 };
0xfe, 0x00, 0x00, 0x02, 0x00
};
/**
* Send a response to a "SELECT UNIX_TIMESTAMP()" request.
@ -1206,8 +1209,8 @@ blr_slave_send_slave_status(ROUTER_INSTANCE *router,
/* Get the right GTID columns array */
const char **gtid_status_columns = router->mariadb10_gtid ?
mariadb10_gtid_status_columns :
mysql_gtid_status_columns;
mariadb10_gtid_status_columns :
mysql_gtid_status_columns;
/* Increment ncols with the right GTID columns */
while (gtid_status_columns[gtid_cols++])
{
@ -1664,7 +1667,7 @@ blr_slave_send_slave_status(ROUTER_INSTANCE *router,
* Send the response to the SQL command "SHOW SLAVE HOSTS"
*
* @param router The binlog router instance
* @param slave The connected slave server
* @param slave The connected slave server
* @return Non-zero if data was sent
*/
static int
@ -1898,10 +1901,10 @@ blr_slave_binlog_dump(ROUTER_INSTANCE *router, ROUTER_SLAVE *slave, GWBUF *queue
binlognamelen > 0,
requested_pos))
{
// ERROR
slave->state = BLRS_ERRORED;
dcb_close(slave->dcb);
return 1;
// ERROR
slave->state = BLRS_ERRORED;
dcb_close(slave->dcb);
return 1;
}
}
else
@ -2602,7 +2605,7 @@ blr_slave_catchup(ROUTER_INSTANCE *router, ROUTER_SLAVE *slave, bool large)
* 1) Same name and pos as current router file: aka Up To Date
*/
if (slave->binlog_pos == router->binlog_position &&
blr_is_current_binlog(router, slave))
blr_is_current_binlog(router, slave))
{
spinlock_acquire(&router->binlog_lock);
spinlock_acquire(&slave->catch_lock);
@ -3241,7 +3244,7 @@ blr_slave_send_disconnected_server(ROUTER_INSTANCE *router,
* and close the connection to that server
*
* @param router The binlog router instance
* @param slave The connected slave server
* @param slave The connected slave server
* @param server_id The slave server_id to disconnect
* @return Non-zero if data was sent to the client
*/
@ -3262,7 +3265,7 @@ blr_slave_disconnect_server(ROUTER_INSTANCE *router,
{
/* don't examine slaves with state = 0 */
if ((sptr->state == BLRS_REGISTERED ||
sptr->state == BLRS_DUMPING) &&
sptr->state == BLRS_DUMPING) &&
sptr->serverid == server_id)
{
/* server_id found */
@ -3752,7 +3755,7 @@ blr_start_slave(ROUTER_INSTANCE* router, ROUTER_SLAVE* slave)
/* A new binlog file has been created and opened
* by CHANGE MASTER TO: use it
*/
blr_file_append(router, router->binlog_name);
blr_file_append(router, router->binlog_name);
}
}
}
@ -3810,9 +3813,9 @@ blr_start_slave(ROUTER_INSTANCE* router, ROUTER_SLAVE* slave)
*/
static void
blr_slave_send_error_packet(ROUTER_SLAVE *slave,
char *msg,
unsigned int err_num,
char *status)
char *msg,
unsigned int err_num,
char *status)
{
GWBUF *pkt;
unsigned char *data;
@ -3948,7 +3951,7 @@ int blr_handle_change_master(ROUTER_INSTANCE* router,
* router->mariadb10_master_gtid is not set
*/
if (!router->mariadb10_master_gtid &&
change_master.use_mariadb10_gtid)
change_master.use_mariadb10_gtid)
{
snprintf(error,
BINLOG_ERROR_MSG_LEN,
@ -3978,11 +3981,11 @@ int blr_handle_change_master(ROUTER_INSTANCE* router,
h_val > BLR_HEARTBEAT_MAX_INTERVAL)
{
snprintf(error,
BINLOG_ERROR_MSG_LEN,
"The requested value for the heartbeat period is "
"either negative or exceeds the maximum allowed "
"(%d seconds).",
BLR_HEARTBEAT_MAX_INTERVAL);
BINLOG_ERROR_MSG_LEN,
"The requested value for the heartbeat period is "
"either negative or exceeds the maximum allowed "
"(%d seconds).",
BLR_HEARTBEAT_MAX_INTERVAL);
blr_abort_change_master(router,
current_master,
@ -4053,7 +4056,7 @@ int blr_handle_change_master(ROUTER_INSTANCE* router,
!change_master.ssl_key))
{
if (change_master.ssl_enabled &&
atoi(change_master.ssl_enabled))
atoi(change_master.ssl_enabled))
{
snprintf(error,
BINLOG_ERROR_MSG_LEN,
@ -4083,8 +4086,8 @@ int blr_handle_change_master(ROUTER_INSTANCE* router,
* or empty if router->mariadb10_master_gtid is set.
*/
if (!blr_binlog_change_check(router,
change_master,
error) ||
change_master,
error) ||
!blr_change_binlog_name(router,
change_master.binlog_file,
&master_logfile,
@ -6299,10 +6302,10 @@ static bool blr_handle_simple_select_stmt(ROUTER_INSTANCE *router,
else if (strcasecmp(word, "@@version_comment") == 0)
{
if (!router->saved_master.selectvercom)
/**
* This allows mysql client to get in when
* @@version_comment is not available
*/
/**
* This allows mysql client to get in when
* @@version_comment is not available
*/
{
blr_slave_send_ok(router, slave);
return true;
@ -6310,8 +6313,8 @@ static bool blr_handle_simple_select_stmt(ROUTER_INSTANCE *router,
else
{
blr_slave_replay(router,
slave,
router->saved_master.selectvercom);
slave,
router->saved_master.selectvercom);
return true;
}
}
@ -6494,10 +6497,10 @@ static GWBUF *blr_build_fake_rotate_event(ROUTER_SLAVE *slave,
return NULL;
}
/* Add 1 byte to paylod for status indicator */
/* Add 1 byte to paylod for status indicator */
hdr.payload_len = len + 1;
/* Add sequence and increment it */
/* Add sequence and increment it */
hdr.seqno = slave->seqno++;
/* Set status indicator byte to OK */
@ -6768,7 +6771,7 @@ static bool blr_slave_gtid_request(ROUTER_INSTANCE *router,
blr_get_file_fullpath(slave->binlogfile,
router->binlogdir,
file_path,
t_prefix[0] ? t_prefix: NULL);
t_prefix[0] ? t_prefix : NULL);
// File size is >=4 read: set pos.
if (blr_slave_get_file_size(file_path) >= 4)
{
@ -6947,7 +6950,7 @@ static bool blr_handle_maxwell_stmt(ROUTER_INSTANCE *router,
static const char maxwell_lower_case_tables_query[] = "select @@lower_case_table_names";
if (strcmp(blr_skip_leading_sql_comments(maxwell_stmt),
MYSQL_CONNECTOR_SERVER_VARS_QUERY) == 0)
MYSQL_CONNECTOR_SERVER_VARS_QUERY) == 0)
{
int rc = blr_slave_replay(router,
slave,
@ -7075,8 +7078,8 @@ static bool blr_handle_show_stmt(ROUTER_INSTANCE *router,
errmsg,
1198,
NULL);
}
return true;
}
return true;
}
else if (strcasecmp(word, "GLOBAL") == 0)
{
@ -7480,11 +7483,11 @@ static bool blr_handle_set_stmt(ROUTER_INSTANCE *router,
* Set the GTID string, it could be an empty
* in case of a fresh new setup.
*/
MXS_FREE(slave->mariadb_gtid);
slave->mariadb_gtid = MXS_STRDUP_A(heading);
MXS_FREE(slave->mariadb_gtid);
slave->mariadb_gtid = MXS_STRDUP_A(heading);
blr_slave_send_ok(router, slave);
return true;
blr_slave_send_ok(router, slave);
return true;
}
else
{
@ -7894,7 +7897,7 @@ static bool blr_handle_admin_stmt(ROUTER_INSTANCE *router,
if (!router->mariadb10_master_gtid &&
(strlen(router->prevbinlog) &&
strcmp(router->prevbinlog, router->binlog_name) != 0))
strcmp(router->prevbinlog, router->binlog_name) != 0))
{
if (blr_file_new_binlog(router, router->binlog_name))
{
@ -7985,7 +7988,7 @@ static void blr_slave_skip_empty_files(ROUTER_INSTANCE *router,
blr_get_file_fullpath(binlog_file,
router->binlogdir,
file_path,
t_prefix[0] ? t_prefix: NULL);
t_prefix[0] ? t_prefix : NULL);
/**
* Get the next file in sequence or next by GTID maps
@ -8012,7 +8015,7 @@ static void blr_slave_skip_empty_files(ROUTER_INSTANCE *router,
blr_get_file_fullpath(binlog_file,
router->binlogdir,
file_path,
t_prefix[0] ? t_prefix: NULL);
t_prefix[0] ? t_prefix : NULL);
skipped_files = true;
}
@ -8069,20 +8072,20 @@ blr_show_binary_logs(ROUTER_INSTANCE *router,
char current_file[BINLOG_FNAMELEN];
uint64_t current_pos = 0;
static const char select_query[] = "SELECT binlog_file, "
"MAX(end_pos) AS size, "
"rep_domain, "
"server_id "
"MAX(end_pos) AS size, "
"rep_domain, "
"server_id "
"FROM gtid_maps "
"GROUP BY binlog_file "
"GROUP BY binlog_file "
"ORDER BY id ASC;";
static const char select_query_full[] = "SELECT binlog_file, "
"MAX(end_pos) AS size, "
"rep_domain, "
"server_id "
"MAX(end_pos) AS size, "
"rep_domain, "
"server_id "
"FROM gtid_maps "
"GROUP BY rep_domain, "
"server_id, "
"binlog_file "
"GROUP BY rep_domain, "
"server_id, "
"binlog_file "
"ORDER BY id ASC;";
int seqno;
char *errmsg = NULL;
@ -8186,8 +8189,8 @@ blr_show_binary_logs(ROUTER_INSTANCE *router,
char t_prefix[BINLOG_FILE_EXTRA_INFO];
sprintf(t_prefix,
"%" PRIu32 "/%" PRIu32 "/",
router->mariadb10_gtid_domain,
router->orig_masterid);
router->mariadb10_gtid_domain,
router->orig_masterid);
// Add prefix before filename
sprintf(last_filename,
@ -8287,8 +8290,8 @@ static int binary_logs_select_cb(void *data,
GWBUF *pkt;
char file_path[PATH_MAX + 1];
char filename[1 +
strlen(values[0]) +
BINLOG_FILE_EXTRA_INFO];
strlen(values[0]) +
BINLOG_FILE_EXTRA_INFO];
char t_prefix[BINLOG_FILE_EXTRA_INFO] = "";
sprintf(t_prefix,
@ -8412,7 +8415,7 @@ static int blr_slave_send_id_ro(ROUTER_INSTANCE *router,
"0", // o = OFF
seqno++)) != NULL)
{
/* Write packet to client */
/* Write packet to client */
MXS_SESSION_ROUTE_REPLY(slave->dcb->session, pkt);
}
@ -8438,15 +8441,15 @@ static bool blr_handle_complex_select(ROUTER_INSTANCE *router,
const char *coln)
{
/* Strip leading spaces */
while(isspace(*coln))
while (isspace(*coln))
{
coln++;
}
if ((strcasecmp(col1, "@@server_id") == 0 ||
strcasecmp(col1, "@@global.server_id") == 0) &&
(strcasecmp(coln, "@@read_only") == 0 ||
strcasecmp(coln, "@@global.read_only") == 0))
strcasecmp(col1, "@@global.server_id") == 0) &&
(strcasecmp(coln, "@@read_only") == 0 ||
strcasecmp(coln, "@@global.read_only") == 0))
{
blr_slave_send_id_ro(router, slave);
return true;
@ -8567,7 +8570,7 @@ static const char *blr_purge_getfile(char *purge_command)
return NULL;
}
else
// Check for TO 'file'
// Check for TO 'file'
{
if (strcasecmp(word, "TO") != 0)
{
@ -8620,24 +8623,24 @@ blr_purge_binary_logs(ROUTER_INSTANCE *router,
size_t n_delete = 0;
// Select first ROWID of user specifed file
static const char find_file_tpl[] = "SELECT MIN(id) AS min_id, "
"(rep_domain || '/' || "
"server_id || '/' || "
"binlog_file) AS file "
"(rep_domain || '/' || "
"server_id || '/' || "
"binlog_file) AS file "
"FROM gtid_maps "
"WHERE binlog_file = '%s' "
"WHERE binlog_file = '%s' "
"GROUP BY binlog_file "
"ORDER BY id ASC;";
// SELECT files with ROWID < given one and DELETE
static const char delete_list_tpl[] = "SELECT binlog_file, "
"(rep_domain || '/' || "
"server_id || '/' || "
"binlog_file) AS file "
"(rep_domain || '/' || "
"server_id || '/' || "
"binlog_file) AS file "
"FROM gtid_maps "
"WHERE id < %" PRIu64 " "
"WHERE id < %" PRIu64 " "
"GROUP BY file "
"ORDER BY id ASC; "
"DELETE FROM gtid_maps "
"WHERE id < %" PRIu64 ";";
"WHERE id < %" PRIu64 ";";
static char sql_stmt[GTID_SQL_BUFFER_SIZE];
BINARY_LOG_DATA_RESULT result;
static const char *selected_file;
@ -8696,10 +8699,10 @@ blr_purge_binary_logs(ROUTER_INSTANCE *router,
/* Purge all files with ROWID < result.rowid */
if (sqlite3_exec(router->gtid_maps,
sql_stmt,
binary_logs_purge_cb,
&result,
&errmsg) != SQLITE_OK)
sql_stmt,
binary_logs_purge_cb,
&result,
&errmsg) != SQLITE_OK)
{
MXS_ERROR("Failed to select list of files to purge"
"from GTID maps DB: %s, select [%s]",
@ -8830,13 +8833,13 @@ static bool blr_check_connecting_slave(const ROUTER_INSTANCE *router,
int err_code = BINLOG_FATAL_ERROR_READING;
char *msg_detail = "";
switch(check)
switch (check)
{
case BLR_SLAVE_CONNECTING: // (1)
if (router->master_state == BLRM_UNCONFIGURED)
{
err_msg = "Binlog router is not yet configured"
" for replication.";
" for replication.";
rv = false;
}
break;
@ -8979,10 +8982,10 @@ static bool blr_binlog_change_check(const ROUTER_INSTANCE *router,
}
}
else
/**
* If binlog file is set in CHANGE MASTER TO
* and MASTER_USE_GTID option is on, then return an error.
*/
/**
* If binlog file is set in CHANGE MASTER TO
* and MASTER_USE_GTID option is on, then return an error.
*/
{
/**
* Check first MASTER_USE_GTID option:
@ -9048,16 +9051,16 @@ static bool blr_change_binlog_name(ROUTER_INSTANCE *router,
* - current router file
* - empty if router->mariadb10_master_gtid is set.
*/
*new_logfile = blr_set_master_logfile(router,
binlog_file,
error);
if (*new_logfile == NULL)
{
if (!router->mariadb10_master_gtid ||
strlen(binlog_file) > 1)
{
/* Binlog name can not be changed */
ret = false;
*new_logfile = blr_set_master_logfile(router,
binlog_file,
error);
if (*new_logfile == NULL)
{
if (!router->mariadb10_master_gtid ||
strlen(binlog_file) > 1)
{
/* Binlog name can not be changed */
ret = false;
}
else
{
@ -9065,9 +9068,9 @@ static bool blr_change_binlog_name(ROUTER_INSTANCE *router,
// Blank the error message
error[0] = 0;
}
}
}
return ret;
}
}
return ret;
}
/**
@ -9116,25 +9119,25 @@ static bool blr_apply_changes(ROUTER_INSTANCE *router,
/* MariaDB 10 GTID request */
if (router->mariadb10_master_gtid)
{
if (change_master.use_mariadb10_gtid)
{
if (change_master.use_mariadb10_gtid)
{
/* MASTER_USE_GTID=Slave_pos is set */
MXS_INFO("%s: MASTER_USE_GTID is [%s]",
router->service->name,
change_master.use_mariadb10_gtid);
}
}
/* Always log the current GTID value with CHANGE_MASTER TO */
MXS_INFO("%s: CHANGE MASTER TO, current GTID value is [%s]",
router->service->name,
router->last_mariadb_gtid);
/* Always log the current GTID value with CHANGE_MASTER TO */
MXS_INFO("%s: CHANGE MASTER TO, current GTID value is [%s]",
router->service->name,
router->last_mariadb_gtid);
/* Always set empty filename at pos 4 with CHANGE_MASTER TO */
strcpy(router->binlog_name, "");
/* Always set empty filename at pos 4 with CHANGE_MASTER TO */
strcpy(router->binlog_name, "");
router->current_pos = 4;
router->binlog_position = 4;
router->current_safe_event = 4;
router->current_pos = 4;
router->binlog_position = 4;
router->current_safe_event = 4;
}
/* The new filename is not the current one */
else if (strcmp(new_logfile, router->binlog_name) != 0 &&

View File

@ -262,7 +262,8 @@ printUsage(const char *progname)
printVersion(progname);
printf("The MaxScale binlog check utility.\n\n");
printf("Usage: %s [-f] [-M] [-d] [-V] [-H] [-K file] [-A algo] [-R pos] [-T pos] [<binlog file>]\n\n", progname);
printf("Usage: %s [-f] [-M] [-d] [-V] [-H] [-K file] [-A algo] [-R pos] [-T pos] [<binlog file>]\n\n",
progname);
printf(" -f|--fix Fix binlog file, require write permissions (truncate)\n");
printf(" -d|--debug Print debug messages\n");
printf(" -M|--mariadb10 MariaDB 10 binlog compatibility\n");

View File

@ -773,8 +773,8 @@ int main(int argc, char **argv)
inst->current_pos = 328;
strcpy(inst->fileroot, "file");
strcpy(query,
"CHANGE MASTER TO MASTER_LOG_POS=1991, MASTER_USE_GTID=Slave_pos, "
"MASTER_LOG_FILE=''");
"CHANGE MASTER TO MASTER_LOG_POS=1991, MASTER_USE_GTID=Slave_pos, "
"MASTER_LOG_FILE=''");
rc = blr_test_handle_change_master(inst, query, error_string);
@ -791,7 +791,7 @@ int main(int argc, char **argv)
printf("Test %d PASSED, GTID set MASTER_USE_GTID=Slave_pos "
"for [%s]: %s\n",
tests,
query,error_string);
query, error_string);
}
tests++;
@ -799,8 +799,10 @@ int main(int argc, char **argv)
/**
* Verify SQL query initial comment skipping function works on a real use case.
*/
const char *mysql_connector_j_actual = blr_skip_leading_sql_comments("/* mysql-connector-java-5.1.39 ( Revision: 3289a357af6d09ecc1a10fd3c26e95183e5790ad ) */SELECT @@session.auto_increment_increment AS auto_increment_increment, @@character_set_client AS character_set_client, @@character_set_connection AS character_set_connection, @@character_set_results AS character_set_results, @@character_set_server AS character_set_server, @@init_connect AS init_connect, @@interactive_timeout AS interactive_timeout, @@license AS license, @@lower_case_table_names AS lower_case_table_names, @@max_allowed_packet AS max_allowed_packet, @@net_buffer_length AS net_buffer_length, @@net_write_timeout AS net_write_timeout, @@query_cache_size AS query_cache_size, @@query_cache_type AS query_cache_type, @@sql_mode AS sql_mode, @@system_time_zone AS system_time_zone, @@time_zone AS time_zone, @@tx_isolation AS tx_isolation, @@wait_timeout AS wait_timeout");
const char *mysql_connector_j_expected = "SELECT @@session.auto_increment_increment AS auto_increment_increment, @@character_set_client AS character_set_client, @@character_set_connection AS character_set_connection, @@character_set_results AS character_set_results, @@character_set_server AS character_set_server, @@init_connect AS init_connect, @@interactive_timeout AS interactive_timeout, @@license AS license, @@lower_case_table_names AS lower_case_table_names, @@max_allowed_packet AS max_allowed_packet, @@net_buffer_length AS net_buffer_length, @@net_write_timeout AS net_write_timeout, @@query_cache_size AS query_cache_size, @@query_cache_type AS query_cache_type, @@sql_mode AS sql_mode, @@system_time_zone AS system_time_zone, @@time_zone AS time_zone, @@tx_isolation AS tx_isolation, @@wait_timeout AS wait_timeout";
const char *mysql_connector_j_actual =
blr_skip_leading_sql_comments("/* mysql-connector-java-5.1.39 ( Revision: 3289a357af6d09ecc1a10fd3c26e95183e5790ad ) */SELECT @@session.auto_increment_increment AS auto_increment_increment, @@character_set_client AS character_set_client, @@character_set_connection AS character_set_connection, @@character_set_results AS character_set_results, @@character_set_server AS character_set_server, @@init_connect AS init_connect, @@interactive_timeout AS interactive_timeout, @@license AS license, @@lower_case_table_names AS lower_case_table_names, @@max_allowed_packet AS max_allowed_packet, @@net_buffer_length AS net_buffer_length, @@net_write_timeout AS net_write_timeout, @@query_cache_size AS query_cache_size, @@query_cache_type AS query_cache_type, @@sql_mode AS sql_mode, @@system_time_zone AS system_time_zone, @@time_zone AS time_zone, @@tx_isolation AS tx_isolation, @@wait_timeout AS wait_timeout");
const char *mysql_connector_j_expected =
"SELECT @@session.auto_increment_increment AS auto_increment_increment, @@character_set_client AS character_set_client, @@character_set_connection AS character_set_connection, @@character_set_results AS character_set_results, @@character_set_server AS character_set_server, @@init_connect AS init_connect, @@interactive_timeout AS interactive_timeout, @@license AS license, @@lower_case_table_names AS lower_case_table_names, @@max_allowed_packet AS max_allowed_packet, @@net_buffer_length AS net_buffer_length, @@net_write_timeout AS net_write_timeout, @@query_cache_size AS query_cache_size, @@query_cache_type AS query_cache_type, @@sql_mode AS sql_mode, @@system_time_zone AS system_time_zone, @@time_zone AS time_zone, @@tx_isolation AS tx_isolation, @@wait_timeout AS wait_timeout";
if (strcmp(mysql_connector_j_actual, mysql_connector_j_expected) == 0)
{
printf("Test %d PASSED\n", tests);

View File

@ -303,13 +303,9 @@ bool listfuncs_cb(const MODULECMD *cmd, void *data)
for (int i = 0; i < cmd->arg_count_max; i++)
{
modulecmd_arg_type_t *type = &cmd->arg_types[i];
if (MODULECMD_GET_TYPE(type) != MODULECMD_ARG_OUTPUT)
{
dcb_printf(dcb, "%s%s",
modulecmd_argtype_to_str(&cmd->arg_types[i]),
i < cmd->arg_count_max - 1 ? " " : "");
}
dcb_printf(dcb, "%s%s",
modulecmd_argtype_to_str(&cmd->arg_types[i]),
i < cmd->arg_count_max - 1 ? " " : "");
}
dcb_printf(dcb, "\n\n");
@ -317,13 +313,9 @@ bool listfuncs_cb(const MODULECMD *cmd, void *data)
for (int i = 0; i < cmd->arg_count_max; i++)
{
modulecmd_arg_type_t *type = &cmd->arg_types[i];
if (MODULECMD_GET_TYPE(type) != MODULECMD_ARG_OUTPUT)
{
dcb_printf(dcb, " %s - %s\n",
modulecmd_argtype_to_str(&cmd->arg_types[i]),
cmd->arg_types[i].description);
}
dcb_printf(dcb, " %s - %s\n",
modulecmd_argtype_to_str(&cmd->arg_types[i]),
cmd->arg_types[i].description);
}
dcb_printf(dcb, "\n");
@ -931,7 +923,7 @@ struct subcommand addoptions[] =
{
{
"user", 2, 2, inet_add_admin_user,
"Add an administrative account for using maxadmin over the network",
"Add an administrative account for using maxadmin over the network",
"Usage: add user USER PASSWORD\n"
"\n"
"Parameters:\n"
@ -1664,12 +1656,6 @@ struct subcommand alteroptions[] =
}
};
static inline bool requires_output_dcb(const MODULECMD *cmd)
{
modulecmd_arg_type_t *type = &cmd->arg_types[0];
return cmd->arg_count_max > 0 && MODULECMD_GET_TYPE(type) == MODULECMD_ARG_OUTPUT;
}
static void callModuleCommand(DCB *dcb, char *domain, char *id, char *v3,
char *v4, char *v5, char *v6, char *v7, char *v8, char *v9,
char *v10, char *v11, char *v12)
@ -1687,18 +1673,6 @@ static void callModuleCommand(DCB *dcb, char *domain, char *id, char *v3,
if (cmd)
{
if (requires_output_dcb(cmd))
{
/** The command requires a DCB for output, add the client DCB
* as the first argument */
for (int i = valuelen - 1; i > 0; i--)
{
values[i] = values[i - 1];
}
values[0] = dcb;
numargs += numargs + 1 < valuelen - 1 ? 1 : 0;
}
MODULECMD_ARG *arg = modulecmd_arg_parse(cmd, numargs, values);
if (arg)
@ -1707,11 +1681,15 @@ static void callModuleCommand(DCB *dcb, char *domain, char *id, char *v3,
if (!modulecmd_call_command(cmd, arg, &output))
{
dcb_printf(dcb, "Error: %s\n", modulecmd_get_error());
const char* err = modulecmd_get_error();
dcb_printf(dcb, "Error: %s\n", *err ? err :
"Call to module command failed, see log file for more details");
}
else if (output)
{
dcb_printf(dcb, "%s\n", json_dumps(output, JSON_INDENT(4)));
char* js = json_dumps(output, JSON_INDENT(4));
dcb_printf(dcb, "%s\n", js);
MXS_FREE(js);
}
json_decref(output);