Merge branch '2.3' of github.com:mariadb-corporation/MaxScale into 2.3

This commit is contained in:
Timofey Turenko 2019-04-23 12:29:33 +03:00
commit 0f8b7450b1
22 changed files with 129 additions and 74 deletions

View File

@ -679,7 +679,9 @@ _qc_sqlite_.
#### `query_classifier_cache_size`
Specifies the maximum size of the query classifier cache. The default limit is
40% of total system memory.
15% of total system memory starting with MaxScale 2.3.7. In older versions the
default limit was 40% of total system memory. This feature was added in MaxScale
2.3.0.
When the query classifier cache has been enabled, MaxScale will, after a
statement has been parsed, store the classification result using the
@ -855,7 +857,7 @@ than `0`, this configuration setting will not have an effect.
#### `writeq_high_water`
High water mark for network write buffer. Controls when network traffic
throtting is started. The parameter accepts size type values.
throtting is started. The parameter accepts [size type values](#sizes).
More specifically, if the client side write queue is above this value, it will
block traffic coming from backend servers. If the backend side write queue is
@ -869,7 +871,7 @@ throtting is enabled. By default, traffic throttling is disabled.
Low water mark for network write buffer. Once the traffic throttling is enabled,
it will only be disabled when the write queue is below `writeq_low_water`. The
parameter accepts size type values. The minimum allowed size is 512
parameter accepts [size type values](#sizes). The minimum allowed size is 512
bytes. `writeq_high_water` must always be greater than `writeq_low_water`.
#### `load_persisted_configs`

View File

@ -1,4 +1,4 @@
# MariaDB MaxScale 2.3.6 Release Notes
# MariaDB MaxScale 2.3.6 Release Notes -- 2019-04-23
Release 2.3.6 is a GA release.

View File

@ -5,7 +5,7 @@
set(MAXSCALE_VERSION_MAJOR "2" CACHE STRING "Major version")
set(MAXSCALE_VERSION_MINOR "3" CACHE STRING "Minor version")
set(MAXSCALE_VERSION_PATCH "6" CACHE STRING "Patch version")
set(MAXSCALE_VERSION_PATCH "7" CACHE STRING "Patch version")
# This should only be incremented if a package is rebuilt
set(MAXSCALE_BUILD_NUMBER 1 CACHE STRING "Release number")

View File

@ -135,6 +135,28 @@ exports.builder = function(yargs) {
return updateValue(host, 'maxscale', 'data.attributes.parameters.' + argv.key, argv.value)
})
})
.command('user <name> <password>', 'Alter admin user passwords', function(yargs) {
return yargs.epilog('Changes the password for a user. To change the user type, destroy the user and then create it again.')
.usage('Usage: alter user <name> <password>')
}, function(argv) {
maxctrl(argv, function(host) {
var user = {
'data': {
'id': argv.name,
'type': 'inet',
'attributes': {
'password': argv.password
}
}
}
return getJson(host, 'users/inet/' + argv.name)
.then((res) => user.data.attributes.account = res.data.attributes.account)
.then(() => doRequest(host, 'users/inet/' + argv.name, null, {method: 'DELETE'}))
.then(() => doRequest(host, 'users/inet', null, {method: 'POST', body: user}))
})
})
.usage('Usage: alter <command>')
.help()
.command('*', 'the default command', {}, function(argv) {

View File

@ -159,12 +159,10 @@ exports.builder = function(yargs) {
}
}
if (argv.params) {
var err = validateParams(argv, argv.params)
if (err) {
return Promise.reject(err)
}
var err = false;
if (argv.params) {
err = validateParams(argv, argv.params)
monitor.data.attributes.parameters = argv.params.reduce(to_obj, {})
}
@ -182,6 +180,9 @@ exports.builder = function(yargs) {
}
maxctrl(argv, function(host) {
if (err) {
return Promise.reject(err)
}
return doRequest(host, 'monitors', null, {method: 'POST', body: monitor})
})
})

View File

@ -108,5 +108,17 @@ describe("Alter Commands", function() {
.should.be.rejected
})
it('creates user', function() {
return verifyCommand('create user testuser test', 'users/inet/testuser')
})
it('alters the password of a user', function() {
return verifyCommand('alter user testuser test2', 'users/inet/testuser')
})
it('destroys the altered user', function() {
return doCommand('destroy user testuser')
})
after(stopMaxScale)
});

View File

@ -18,6 +18,18 @@ describe("Create/Destroy Commands", function() {
.should.be.rejected
})
it('monitor without parameters fails due to missing user parameter', function() {
return verifyCommand('create monitor my-monitor mysqlmon', 'monitors/my-monitor')
.should.be.rejected
})
it('destroy monitor created without parameters', function() {
return doCommand('destroy monitor my-monitor')
.should.be.fulfilled
.then(() => doCommand('show monitor my-monitor'))
.should.be.rejected
})
it('will not destroy the same monitor again', function() {
return doCommand('destroy monitor my-monitor')
.should.be.rejected
@ -38,6 +50,11 @@ describe("Create/Destroy Commands", function() {
.should.be.rejected
})
it('will not create monitor with malformed parameters', function() {
return doCommand('create monitor my-monitor mariadbmon not-a-param')
.should.be.rejected
})
it('create monitor with options', function() {
return doCommand('unlink monitor MariaDB-Monitor server4')
.then(() => verifyCommand('create monitor my-monitor mysqlmon --servers server4 --monitor-user maxuser --monitor-password maxpwd',

View File

@ -15,5 +15,10 @@ describe("Draining servers", function() {
.should.eventually.have.string("Maintenance")
})
it('does not drain non-existent server', function() {
return doCommand('drain server not-a-server')
.should.be.rejected
})
after(stopMaxScale)
});

View File

@ -19,7 +19,8 @@ describe("Unknown Commands", function() {
'alter',
'rotate',
'call',
'cluster'
'cluster',
'drain'
]
endpoints.forEach(function (i) {

View File

@ -10,6 +10,7 @@ user=maxskysql
password= skysql
detect_stale_master=0
monitor_interval=1000
assume_unique_hostnames=false
[RW Split Router]
type=service
@ -18,6 +19,7 @@ servers=server1, server2, server3, server4
user=maxskysql
password=skysql
slave_selection_criteria=LEAST_ROUTER_CONNECTIONS
max_slave_replication_lag=1
[Read Connection Router Slave]
type=service

View File

@ -7,7 +7,7 @@ wsrep_on=ON
# Row binary log format is required by Galera
binlog_format=ROW
log-bin
log-bin=mar-bin
# InnoDB is currently the only storage engine supported by Galera
default-storage-engine=innodb
@ -16,9 +16,6 @@ innodb_file_per_table
# To avoid issues with 'bulk mode inserts' using autoincrement fields
innodb_autoinc_lock_mode=2
# Required to prevent deadlocks on parallel transaction execution
innodb_locks_unsafe_for_binlog=1
# Query Cache is not supported by Galera wsrep replication
query_cache_size=0
query_cache_type=0
@ -87,9 +84,6 @@ wsrep_auto_increment_control=1
# Retry autoinc insert, when the insert failed for "duplicate key error"
wsrep_drupal_282555_workaround=0
# Enable "strictly synchronous" semantics for read operations
wsrep_causal_reads=1
# Command to call when node status or cluster membership changes.
# Will be passed all or some of the following options:
# --status - new status of this node

View File

@ -7,7 +7,7 @@ wsrep_on=ON
# Row binary log format is required by Galera
binlog_format=ROW
log-bin
log-bin=mar-bin
# InnoDB is currently the only storage engine supported by Galera
default-storage-engine=innodb
@ -16,9 +16,6 @@ innodb_file_per_table
# To avoid issues with 'bulk mode inserts' using autoincrement fields
innodb_autoinc_lock_mode=2
# Required to prevent deadlocks on parallel transaction execution
innodb_locks_unsafe_for_binlog=1
# Query Cache is not supported by Galera wsrep replication
query_cache_size=0
query_cache_type=0
@ -87,9 +84,6 @@ wsrep_auto_increment_control=1
# Retry autoinc insert, when the insert failed for "duplicate key error"
wsrep_drupal_282555_workaround=0
# Enable "strictly synchronous" semantics for read operations
wsrep_causal_reads=1
# Command to call when node status or cluster membership changes.
# Will be passed all or some of the following options:
# --status - new status of this node

View File

@ -7,7 +7,7 @@ wsrep_on=ON
# Row binary log format is required by Galera
binlog_format=ROW
log-bin
log-bin=mar-bin
# InnoDB is currently the only storage engine supported by Galera
default-storage-engine=innodb
@ -16,9 +16,6 @@ innodb_file_per_table
# To avoid issues with 'bulk mode inserts' using autoincrement fields
innodb_autoinc_lock_mode=2
# Required to prevent deadlocks on parallel transaction execution
innodb_locks_unsafe_for_binlog=1
# Query Cache is not supported by Galera wsrep replication
query_cache_size=0
query_cache_type=0
@ -87,9 +84,6 @@ wsrep_auto_increment_control=1
# Retry autoinc insert, when the insert failed for "duplicate key error"
wsrep_drupal_282555_workaround=0
# Enable "strictly synchronous" semantics for read operations
wsrep_causal_reads=1
# Command to call when node status or cluster membership changes.
# Will be passed all or some of the following options:
# --status - new status of this node

View File

@ -7,7 +7,7 @@ wsrep_on=ON
# Row binary log format is required by Galera
binlog_format=ROW
log-bin
log-bin=mar-bin
# InnoDB is currently the only storage engine supported by Galera
default-storage-engine=innodb
@ -16,9 +16,6 @@ innodb_file_per_table
# To avoid issues with 'bulk mode inserts' using autoincrement fields
innodb_autoinc_lock_mode=2
# Required to prevent deadlocks on parallel transaction execution
innodb_locks_unsafe_for_binlog=1
# Query Cache is not supported by Galera wsrep replication
query_cache_size=0
query_cache_type=0
@ -87,9 +84,6 @@ wsrep_auto_increment_control=1
# Retry autoinc insert, when the insert failed for "duplicate key error"
wsrep_drupal_282555_workaround=0
# Enable "strictly synchronous" semantics for read operations
wsrep_causal_reads=1
# Command to call when node status or cluster membership changes.
# Will be passed all or some of the following options:
# --status - new status of this node

View File

@ -2850,7 +2850,7 @@ void config_set_global_defaults()
gateway.peer_password[0] = '\0';
gateway.log_target = MXB_LOG_TARGET_DEFAULT;
gateway.qc_cache_properties.max_size = get_total_memory() * 0.4;
gateway.qc_cache_properties.max_size = get_total_memory() * 0.15;
if (gateway.qc_cache_properties.max_size == 0)
{

View File

@ -565,6 +565,7 @@ int dcb_read(DCB* dcb,
GWBUF** head,
int maxbytes)
{
mxb_assert(dcb->poll.owner == RoutingWorker::get_current());
int nsingleread = 0;
int nreadtotal = 0;
@ -904,6 +905,7 @@ static int dcb_log_errors_SSL(DCB* dcb, int ret)
*/
int dcb_write(DCB* dcb, GWBUF* queue)
{
mxb_assert(dcb->poll.owner == RoutingWorker::get_current());
dcb->writeqlen += gwbuf_length(queue);
// The following guarantees that queue is not NULL
if (!dcb_write_parameter_check(dcb, queue))
@ -3301,6 +3303,7 @@ public:
RoutingWorker& rworker = static_cast<RoutingWorker&>(worker);
if (dcb_is_still_valid(m_dcb, rworker.id()) && m_dcb->m_uid == m_uid)
{
mxb_assert(m_dcb->poll.owner == RoutingWorker::get_current());
m_dcb->fakeq = m_buffer;
dcb_handler(m_dcb, m_ev);
}
@ -3321,6 +3324,7 @@ static void poll_add_event_to_dcb(DCB* dcb, GWBUF* buf, uint32_t ev)
{
if (dcb == this_thread.current_dcb)
{
mxb_assert(dcb->poll.owner == RoutingWorker::get_current());
// If the fake event is added to the current DCB, we arrange for
// it to be handled immediately in dcb_handler() when the handling
// of the current events are done...

View File

@ -176,10 +176,15 @@ public:
mxb_assert(peek(canonical_stmt) == nullptr);
mxb_assert(this_unit.classifier);
// 0xffffff is the maximum packet size, 4 is for packet header and 1 is for command byte. These are
// MariaDB/MySQL protocol specific values that are also defined in <maxscale/protocol/mysql.h> but
// should not be exposed to the core.
constexpr int64_t max_entry_size = 0xffffff - 5;
int64_t cache_max_size = this_unit.cache_max_size() / config_get_global_options()->n_threads;
int64_t size = canonical_stmt.size();
if (size <= cache_max_size)
if (size < max_entry_size && size <= cache_max_size)
{
int64_t required_space = (m_stats.size + size) - cache_max_size;

View File

@ -56,17 +56,22 @@ static std::string do_query(MXS_MONITORED_SERVER* srv, const char* query)
// Returns a numeric version similar to mysql_get_server_version
int get_cs_version(MXS_MONITORED_SERVER* srv)
{
// GCC 4.8 appears to have a broken std::regex_constants::ECMAScript that doesn't support brackets
std::regex re("Columnstore \\([0-9]*\\)[.]\\([0-9]*\\)[.]\\([0-9]*\\)-[0-9]*",
std::regex_constants::basic);
std::string result = do_query(srv, "SELECT @@version_comment");
std::smatch match;
int rval = 0;
std::string prefix = "Columnstore ";
std::string result = do_query(srv, "SELECT @@version_comment");
auto pos = result.find(prefix);
if (std::regex_match(result, match, re) && match.size() == 4)
if (pos != std::string::npos)
{
rval = atoi(match[1].str().c_str()) * 10000 + atoi(match[2].str().c_str()) * 100
+ atoi(match[3].str().c_str());
std::istringstream os(result.substr(pos + prefix.length()));
int major = 0, minor = 0, patch = 0;
char dot;
os >> major;
os >> dot;
os >> minor;
os >> dot;
os >> patch;
rval = major * 10000 + minor * 100 + patch;
}
return rval;

View File

@ -32,6 +32,8 @@ LocalClient::LocalClient(MYSQL_session* session, MySQLProtocol* proto, int fd)
, m_self_destruct(false)
{
MXB_POLL_DATA::handler = LocalClient::poll_handler;
m_protocol.owner_dcb = nullptr;
m_protocol.stored_query = nullptr;
}
LocalClient::~LocalClient()

View File

@ -34,6 +34,7 @@
#include <maxscale/alloc.h>
#include <maxscale/buffer.hh>
#include <maxscale/utils.hh>
#include <maxscale/routingworker.hh>
std::pair<std::string, std::string> get_avrofile_and_gtid(std::string file);
@ -239,22 +240,14 @@ bool file_in_dir(const char* dir, const char* file)
}
/**
* @brief The client callback for sending data
*
* @param dcb Client DCB
* @param reason Why the callback was called
* @param userdata Data provided when the callback was added
* @return Always 0
* Queue the client callback for execution
*/
int avro_client_callback(DCB* dcb, DCB_REASON reason, void* userdata)
void AvroSession::queue_client_callback()
{
if (reason == DCB_REASON_DRAINED)
{
AvroSession* client = static_cast<AvroSession*>(userdata);
client->client_callback();
}
return 0;
auto worker = mxs::RoutingWorker::get(mxs::RoutingWorker::MAIN);
worker->execute([this]() {
client_callback();
}, mxs::RoutingWorker::EXECUTE_QUEUED);
}
/**
@ -338,11 +331,7 @@ void AvroSession::process_command(GWBUF* queue)
if (file_in_dir(router->avrodir.c_str(), avro_binfile.c_str()))
{
/* set callback routine for data sending */
dcb_add_callback(dcb, DCB_REASON_DRAINED, avro_client_callback, this);
/* Add fake event that will call the avro_client_callback() routine */
poll_fake_write_event(dcb);
queue_client_callback();
}
else
{
@ -734,7 +723,7 @@ void AvroSession::client_callback()
if (next_file || read_more)
{
poll_fake_write_event(dcb);
queue_client_callback();
}
}

View File

@ -170,11 +170,6 @@ public:
*/
int routeQuery(GWBUF* buffer);
/**
* Handler for the EPOLLOUT event
*/
void client_callback();
private:
AvroSession(Avro* instance, MXS_SESSION* session);
@ -187,6 +182,8 @@ private:
bool seek_to_gtid();
bool stream_data();
void rotate_avro_file(std::string fullname);
void client_callback();
void queue_client_callback();
};
void read_table_info(uint8_t* ptr,

View File

@ -12,6 +12,21 @@ then
exit 1
fi
# Prevent failures in case if Docker is not available
command -v docker
if [ $? != 0 ]
then
echo "Docker is not available, skipping the test"
exit 0
fi
command -v docker-compose
if [ $? != 0 ]
then
echo "docker-compose is not available, skipping the test"
exit 0
fi
srcdir=$1
testsrc=$2
testdir=$3