Merge branch '2.1' into develop

This commit is contained in:
Markus Mäkelä 2017-03-22 15:20:21 +02:00
commit 7bd05d4581
107 changed files with 3365 additions and 3638 deletions

View File

@ -6,7 +6,7 @@
## About MariaDB MaxScale
- [About MariaDB MaxScale](About/About-MaxScale.md)
- [Release Notes](Release-Notes/MaxScale-2.0.0-Release-Notes.md)
- [Release Notes](Release-Notes/MaxScale-2.1.1-Release-Notes.md)
- [Changelog](Changelog.md)
- [Limitations](About/Limitations.md)
@ -19,10 +19,6 @@
## Upgrading MariaDB MaxScale
- [Upgrading MariaDB MaxScale from 1.4 to 2.0](Upgrading/Upgrading-To-MaxScale-2.0.md)
- [Upgrading MariaDB MaxScale from 1.3 to 1.4](Upgrading/Upgrading-To-MaxScale-1.4.md)
- [Upgrading MariaDB MaxScale from 1.2 to 1.3](Upgrading/Upgrading-To-MaxScale-1.3.md)
- [Upgrading MariaDB MaxScale from 1.1.1 to 1.2](Upgrading/Upgrading-To-MaxScale-1.2.md)
- [Upgrading MariaDB MaxScale from 1.0.5 to 1.1.0](Upgrading/Upgrading-To-MaxScale-1.1.0.md)
## Reference
@ -129,20 +125,3 @@ document.
- [DCB States (to be replaced in StarUML)](Design-Documents/DCB-States.pdf)
- [Schema Sharding Router Technical Documentation](Design-Documents/SchemaRouter-technical.md)
- [Plugin development guide](Design-Documents/Plugin-development-guide.md)
## Earlier Release Notes
- [MariaDB MaxScale 1.4.3 Release Notes](Release-Notes/MaxScale-1.4.3-Release-Notes.md)
- [MariaDB MaxScale 1.4.2 Release Notes](Release-Notes/MaxScale-1.4.2-Release-Notes.md)
- [MariaDB MaxScale 1.4.1 Release Notes](Release-Notes/MaxScale-1.4.1-Release-Notes.md)
- [MariaDB MaxScale 1.4.0 Release Notes](Release-Notes/MaxScale-1.4.0-Release-Notes.md)
- [MariaDB MaxScale 1.3.0 Release Notes](Release-Notes/MaxScale-1.3.0-Release-Notes.md)
- [MariaDB MaxScale 1.2.0 Release Notes](Release-Notes/MaxScale-1.2.0-Release-Notes.md)
- [MariaDB MaxScale 1.1.1 Release Notes](Release-Notes/MaxScale-1.1.1-Release-Notes.md)
- [MariaDB MaxScale 1.1.0 Release Notes](Release-Notes/MaxScale-1.1-Release-Notes.md)
- [MariaDB MaxScale 1.0.3 Release Notes](Release-Notes/MaxScale-1.0.3-Release-Notes.md)
- [MariaDB MaxScale 1.0.1 Release Notes](Release-Notes/MaxScale-1.0.1-Release-Notes.md)
- [MariaDB MaxScale 1.0 Release Notes](Release-Notes/MaxScale-1.0-Release-Notes.md)
- [MariaDB MaxScale 0.7 Release Notes](Release-Notes/MaxScale-0.7-Release-Notes.md)
- [MariaDB MaxScale 0.6 Release Notes](Release-Notes/MaxScale-0.6-Release-Notes.md)
- [MariaDB MaxScale 0.5 Release Notes](Release-Notes/MaxScale-0.5-Release-Notes.md)

View File

@ -64,6 +64,10 @@ An optional parameter that can be used to control which statements trigger the
statement re-routing. The parameter value is a regular expression that is used to
match against the SQL text. Only non-SELECT statements are inspected.
```
match=.*INSERT.*
```
### `ignore`
An optional parameter that can be used to control which statements don't trigger
@ -71,6 +75,10 @@ the statement re-routing. This does the opposite of the _match_ parameter. The
parameter value is a regular expression that is used to match against the SQL
text. Only non-SELECT statements are inspected.
```
ignore=.*UPDATE.*
```
## Example Configuration
Here is a minimal filter configuration for the CCRFilter which should solve most

View File

@ -10,17 +10,36 @@ The cache filter is a simple cache that is capable of caching the result of
SELECTs, so that subsequent identical SELECTs are served directly by MaxScale,
without the queries being routed to any server.
SELECTs using the following functions will not be cached: `BENCHMARK`,
`CONNECTION_ID`, `CONVERT_TZ`, `CURDATE`, `CURRENT_DATE`, `CURRENT_TIMESTAMP`,
`CURTIME`, `DATABASE`, `ENCRYPT`, `FOUND_ROWS`, `GET_LOCK`, `IS_FREE_LOCK`,
`IS_USED_LOCK`, `LAST_INSERT_ID`, `LOAD_FILE`, `LOCALTIME`, `LOCALTIMESTAMP`,
`MASTER_POS_WAIT`, `NOW`, `RAND`, `RELEASE_LOCK`, `SESSION_USER`, `SLEEP`,
`SYSDATE`, `SYSTEM_USER`, `UNIX_TIMESTAMP`, `USER`, `UUID`, `UUID_SHORT`.
The cache will be used and populated in the following circumstances:
* There is _no_ explicit transaction active, that is, _autocommit_ is used,
* there is an _explicitly_ read-only transaction (that is,`START TRANSACTION
READ ONLY`) active, or
* there is a transaction active and _no_ statement that modifies the database
has been performed.
Note that installing the cache causes all statements to be parsed. The
implication of that is that unless statements _already_ need to be parsed,
e.g. due to the presence of another filter or the chosen router, then adding
the cache will not necessarily improve the performance, but may decrease it.
In practice, the last bullet point basically means that if a transaction has
been started with `BEGIN`, `START TRANSACTION` or `START TRANSACTION READ
WRITE`, then the cache will be used and populated until the first `UPDATE`,
`INSERT` or `DELETE` statement is encountered.
By default, it is *ensured* that the cache is **not** used in the following
circumstances:
* The `SELECT` uses any of the following functions: `BENCHMARK`,
`CONNECTION_ID`, `CONVERT_TZ`, `CURDATE`, `CURRENT_DATE`, `CURRENT_TIMESTAMP`,
`CURTIME`, `DATABASE`, `ENCRYPT`, `FOUND_ROWS`, `GET_LOCK`, `IS_FREE_LOCK`,
`IS_USED_LOCK`, `LAST_INSERT_ID`, `LOAD_FILE`, `LOCALTIME`, `LOCALTIMESTAMP`,
`MASTER_POS_WAIT`, `NOW`, `RAND`, `RELEASE_LOCK`, `SESSION_USER`, `SLEEP`,
`SYSDATE`, `SYSTEM_USER`, `UNIX_TIMESTAMP`, `USER`, `UUID`, `UUID_SHORT`.
* The `SELECT` accesses any of the following fields: `CURRENT_DATE`,
`CURRENT_TIMESTAMP`, `LOCALTIME`, `LOCALTIMESTAMP`
* The `SELECT` uses system or user variables.
In order to ensure that, all `SELECT` statements have to be parsed, which
carries a _significant_ performance cost. If it is known that there are no
such statements or that it does not matter even if they are cached, that
safety measure can be turned off. Please read [performance](#performance)
for more details.
## Limitations
@ -32,24 +51,6 @@ Currently there is **no** cache invalidation, apart from _time-to-live_.
### Prepared Statements
Resultsets of prepared statements are **not** cached.
### Transactions
The cache will be used and populated in the following circumstances:
* There is _no_ explicit transaction active, that is, _autocommit_ is used,
* there is an _explicitly_ read-only transaction (that is,`START TRANSACTION
READ ONLY`) active, or
* there is a transaction active and _no_ statement that modify the database
has been performed.
In practice, the last bullet point basically means that if a transaction has
been started with `BEGIN` or `START TRANSACTION READ WRITE`, then the cache
will be used and populated until the first `UPDATE`, `INSERT` or `DELETE`
statement is encountered.
### Variables
If user or system variables are used in the _SELECT_ statement, the result
will not be cached.
### Security
The cache is **not** aware of grants.
@ -71,8 +72,6 @@ type=filter
module=cache
hard_ttl=30
soft_ttl=20
storage=...
storage_options=...
rules=...
...
@ -95,10 +94,10 @@ sharing.
### Filter Parameters
The cache filter has one mandatory parameter - `storage` - and a few
optional ones. Note that it is advisable to specify `max_size` to prevent
the cache from using up all memory there is, in case there is very litte
overlap among the queries.
The cache filter has no mandatory parameters but a range of optional ones.
Note that it is advisable to specify `max_size` to prevent the cache from
using up all memory there is, in case there is very litte overlap among the
queries.
#### `storage`
@ -108,6 +107,8 @@ argument. For instance:
```
storage=storage_inmemory
```
The default is `storage_inmemory`.
See [Storage](#storage-1) for what storage modules are available.
#### `storage_options`
@ -227,6 +228,29 @@ cached_data=thread_specific
Default is `shared`. See `max_count` and `max_size` what implication changing
this setting to `thread_specific` has.
#### `selects`
An enumeration option specifying what approach the cache should take with
respect to `SELECT` statements. The allowed values are:
* `assume_cacheable`: The cache can assume that all `SELECT` statements,
without exceptions, are cacheable.
* `verify_cacheable`: The cache can *not* assume that all `SELECT`
statements are cacheable, but must verify that.
```
select=assume_cacheable
```
Default is `verify_cacheable`. In this case, the `SELECT` statements will be
parsed and only those that are safe for caching - e.g. do *not* call any
non-cacheable functions or access any non-cacheable variables - will be
subject to caching.
If `assume_cacheable` is specified, then all `SELECT` statements are
assumed to be cacheable and will be parsed *only* if some specific rule
requires that.
#### `debug`
An integer value, using which the level of debug logging made by the cache
@ -678,3 +702,99 @@ The rules specify that the data of the table `sbtest` should be cached.
]
}
```
# Performance
Perhaps the most significant factor affecting the performance of the cache is
whether the statements need to be parsed or not. By default, all statements are
parsed in order to exclude `SELECT` statements that use non-cacheable functions,
access non-cacheable variables or refer to system or user variables.
If it is known that no such statements are used or if it does not matter if the
results are cached, that safety measure can be turned off. To do that, add the
following line to the cache configuration:
```
[MyCache]
...
selects=assume_cacheable
```
With that configuration, the cache itself will not cause the statements to be
parsed.
But note that even with `assume_cacheable` configured, a rule referring
specifically to a _database_, _table_ or _column_ will still cause the
statement to be parsed.
For instance, a simple rule like
```
{
"store": [
{
"attribute": "database",
"op": "=",
"value": "db1"
}
]
}
```
cannot be fulfilled without parsing the statement.
If the rule is instead expressed using a regular expression
```
{
"store": [
{
"attribute": "query",
"op": "like",
"value": "FROM db1\\..*"
}
]
}
```
then the statement will again not be parsed.
However, even if regular expression matching performance wise is cheaper
than parsing, it still carries a cost. In the following is a table with numbers
giving a rough picture of the relative cost of different approaches.
In the table, _regexp match_ means that the cacheable statements
were picked out using a rule like
```
{
"attribute": "query",
"op": "like",
"value": "FROM dbname"
}
```
while _exact match_ means that the cacheable statements were picked out using a
rule like
```
{
"attribute": "database",
"op": "=",
"value": "dbname"
}
```
The exact match rule requires all statements to be parsed.
Note that the qps figures are only indicative.
| `selects` | Rule | qps |
| -------------------| ---------------|-----|
| `assume_cacheable` | none | 100 |
| `assume_cacheable` | _regexp match_ | 98 |
| `assume_cacheable` | _exact match_ | 60 |
| `verify_cacheable` | none | 60 |
| `verify_cacheable` | _regexp match_ | 58 |
| `verify_cacheable` | _exact match_ | 58 |
## Summary
For maximum performance:
* Arrange the situation so that `selects=assume_cacheable` can be
configured, and use _no_ rules.
* If `selects=assume_cacheable` has been configured, use _only_
regexp based rules.
* If `selects=verify_cacheable` has been configured non-regex based
matching can be used.

View File

@ -5,7 +5,7 @@ requirements are as follows:
* CMake version 2.8 or later (Packaging requires version 2.8.12 or later)
* GCC version 4.4.7 or later
* libaio
* SQLite3 version 3.3 or later
* libcurl
* OpenSSL
* Bison 2.7 or later
@ -20,33 +20,35 @@ The following packages are required on CentOS/RHEL 7. Older releases may require
other packages in addition to these.
```
git gcc gcc-c++ ncurses-devel bison flex glibc-devel cmake libgcc perl make libtool \
openssl-devel libaio libaio-devel libcurl-devel pcre-devel tcl tcl-devel systemtap-sdt-devel libuuid libuuid-devel
git gcc gcc-c++ ncurses-devel bison flex glibc-devel cmake libgcc perl make \
libtool openssl openssl-devel libcurl-devel pcre-devel tcl tcl-devel \
systemtap-sdt-devel libuuid libuuid-devel sqlite sqlite-devel
```
You can install the packages with the following commands.
```
sudo yum install git gcc gcc-c++ ncurses-devel bison flex glibc-devel cmake libgcc perl \
make libtool openssl-devel libaio libaio-devel librabbitmq-devel \
libcurl-devel pcre-devel tcl tcl-devel systemtap-sdt-devel libuuid libuuid-devel
sudo yum install git gcc gcc-c++ ncurses-devel bison flex glibc-devel cmake \
libgcc perl make libtool openssl openssl-devel libcurl-devel pcre-devel \
tcl tcl-devel systemtap-sdt-devel libuuid libuuid-devel sqlite3 sqlite3-devel
```
### Required packages on Ubuntu and Debian systems
The following packages are required on Ubuntu 14.04. Different releases may require
other packages in addition to these.
The following packages are required on Ubuntu 16.04. Different releases may
require other packages in addition to these.
```
git build-essential libssl-dev libaio-dev ncurses-dev bison flex \
cmake perl libtool libcurl4-openssl-dev libpcre3-dev tlc tcl-dev uuid uuid-dev
git build-essential libssl-dev ncurses-dev bison flex cmake perl libtool \
libcurl4-openssl-dev libpcre3-dev tlc tcl-dev uuid uuid-dev sqlite3-dev
```
You can install the packages with the following command.
```
sudo apt-get install git build-essential libssl-dev libaio-dev ncurses-dev \
bison flex cmake perl libtool libcurl4-openssl-dev libpcre3-dev tcl tcl-dev uuid uuid-dev
sudo apt-get install git build-essential libssl-dev ncurses-dev bison flex \
cmake perl libtool libcurl4-openssl-dev libpcre3-dev tcl tcl-dev uuid \
uuid-dev libsqlite3-dev
```
## Preparing the MariaDB MaxScale build
@ -106,6 +108,11 @@ sudo make install
Other useful targets for Make are `documentation`, which generates the Doxygen documentation, and `uninstall` which uninstall MariaDB MaxScale binaries after an install.
**Note**: If you configure CMake multiple times, it's possible that you will run
into problems when building MaxScale. Most of the time this manifests as a
missing _pcre2.h_ header file. When this happens, delete everything in the
build directory and run the CMake command again.
# Building MariaDB MaxScale packages
In addition to the packages needed to build MariaDB MaxScale, you will need the

View File

@ -442,6 +442,17 @@ files.
execdir=/usr/local/bin/
```
#### `connector_plugindir`
Location of the MariaDB Connector-C plugin directory. The MariaDB Connector-C
used in MaxScale can use this directory to load authentication plugins. The
versions of the plugins must be binary compatible with the connector version
that MaxScale was built with.
```
connector_plugindir=/usr/lib/plugin/
```
#### `persistdir`
Configure the directory where persisted configurations are stored. When a new

View File

@ -1,336 +0,0 @@
# MariaDB MaxScale 0.5 Alpha Release Notes
0.5 Alpha
This document details the changes in version 0.5 since the release of the 0.4 alpha of the MaxScale product.
# New Features
## Read/Write Splitter Routing Module
In previous versions the read/write splitter routing module has had a number of limitations on it use, in the alpha release the router now removes the most important restrictions.
### Session Commands
Session commands are those statements that make some change to the user’s login session that may cause different effects from subsequent statements executed. Since the read/write splitter executes statements on either a master server or a slave server, depending upon the statement to execute, it is important that these session modifications are executed on all connections to both slave and master servers. This is resolved in release 0.5 such that session modification commands are executed on all active connections and a single return is forward back to the client that made the request.
### Transaction Support
Transaction support has been added into this version of the read/write splitter, there is one known outstanding limitation. If autocommit is enabled inside an active transaction it is not considered as commit in read/write splitter. Once a transaction has started all statements are routed to a master until the transaction is committed or rolled back.
## Authentication
A number of issues and shortcomings in the authentication performed by MaxScale have been resolved by this release.
### Host Considered in Authentication
Previously MaxScale did not follow the same rules as MySQL when authenticating a login request, it would always use the wildcard password entries and would not check the incoming host was allowed to connect. MaxScale now checks the incoming IP address for a connection request and verifies this against the authentication data loaded from the backend servers. The same rules are applied when choosing the password entry to authenticate with. Note however that authentication from MaxScale to the backend database will fail if the MaxScale host is not allowed to login using the matching password for the user.
### Stale Authentication Data
In previous releases of MaxScale the authentication data would be read at startup time only and would not be refreshed. Therefore if a user was added or modified in the backend server this will not be picked up by MaxScale and that user would be unable to connect via MaxScale. MaxScale now reloads user authentication data when a failure occurs and will refresh its internal tables if the data has changed in the backend. Please note that this reload process is rate limited to prevent incorrect logins to MaxScale being used for a denial of service attack on the backend servers.
### Enable Use Of "root" User
Previously MaxScale would prevent the use of the root user to login to the backend servers via MaxScale. This may be enabled on a per service basis by adding an "enable_root_user" options in the service entry to enable it in the MaxScale configuration file. This allows the use of root to be controlled on a per service basis.
## Network Support
### Unix Domain Sockets
MaxScale now supports Unix domain sockets for connecting to a local MaxScale server. The use of a Unix domain socket is controlled by adding a "socket" entry in the listener configuration entry for a service.
### Network Interface Binding
MaxScale has added the ability to bind a listener for a service to a network address via an "address" entry in the configuration file.
# Server Version
The server version reported when connected to a database via MaxScale has now been altered. This now shows the MaxScale name and version together with the backend server name. An example of this can be seen below for the 0.5 release.
-bash-4.1$ mysql -h 127.0.0.1 -P 4006 -uxxxx -pxxxx Welcome to the MariaDB monitor. Commands end with ; or \\g. Your MySQL connection id is 22320 Server version: MaxScale 0.5.0 MariaDB Server Copyright (c) 2000, 2012, Oracle, Monty Program Ab and others. Type 'help;' or '\\h' for help. Type '\\c' to clear the current input statement. MySQL [(none)]> \\ys -------------- mysql Ver 15.1 Distrib 5.5.28a-MariaDB, for Linux (i686) using readline 5.1 ... Server: MySQL Server version: MaxScale 0.5.0 MariaDB Server ... -------------- MySQL [(none)]>
# Bug Fixes
A number of bug fixes have been applied between the 0.4 alpha and this alpha release. The table below lists the bugs that have been resolved. The details for each of these may be found in bugs.skysql.com.
<table>
<tr>
<td>ID</td>
<td>Summary</td>
</tr>
<tr>
<td>141</td>
<td>No "delete user" command in debugcli</td>
</tr>
<tr>
<td>175</td>
<td>Buffer leak in dcb_read from Coverity run</td>
</tr>
<tr>
<td>178</td>
<td>Uninitialised variables from Coverity run</td>
</tr>
<tr>
<td>179</td>
<td>open with O_CREAT in second argument needs 3 arguments</td>
</tr>
<tr>
<td>363</td>
<td>simple_mutex "name" memory handling ...</td>
</tr>
<tr>
<td>126</td>
<td>"reload config" in debug interface causes maxscale server to segfault</td>
</tr>
<tr>
<td>149</td>
<td>It is possible to delete all maxscale users</td>
</tr>
<tr>
<td>218</td>
<td>there is no way to understand what is going on if MAXSCALE_HOME is incorrect</td>
</tr>
<tr>
<td>137</td>
<td>"show users" and "reload users" refer to very different things in debugcli</td>
</tr>
<tr>
<td>154</td>
<td>readwritesplit does not use router_options</td>
</tr>
<tr>
<td>160</td>
<td>telnetd leaks memory</td>
</tr>
<tr>
<td>169</td>
<td>Galera monitor is actually never compiled ....</td>
</tr>
<tr>
<td>172</td>
<td>Several compile errors in galera_mon.c</td>
</tr>
<tr>
<td>174</td>
<td>Resource leak in server.c</td>
</tr>
<tr>
<td>176</td>
<td>Resource leak in gw_utils.c</td>
</tr>
<tr>
<td>362</td>
<td>possible datadir_cleanup() problems ...</td>
</tr>
<tr>
<td>124</td>
<td>readconnroute does not validate router_options</td>
</tr>
<tr>
<td>153</td>
<td>MaxScale fails when max connections are exceeded</td>
</tr>
<tr>
<td>133</td>
<td>MaxScale leaves lots of "data<pid>" directories sitting around $MAXSCALE_HOME</td>
</tr>
<tr>
<td>166</td>
<td>readwritesplit causes MaxScale segfault when starting up</td>
</tr>
<tr>
<td>207</td>
<td>Quitting telnet session causes maxscale to fail</td>
</tr>
<tr>
<td>161</td>
<td>Memory leak in load_mysql_users.</td>
</tr>
<tr>
<td>177</td>
<td>Resource leak in secrets.c</td>
</tr>
<tr>
<td>182</td>
<td>On Startup logfiles are empty</td>
</tr>
<tr>
<td>135</td>
<td>MaxScale unsafely handles empty passwords in getUsers</td>
</tr>
<tr>
<td>145</td>
<td>.secret file for encrypted passwords cyclicly searched</td>
</tr>
<tr>
<td>171</td>
<td>ifndef logic in build_gateway.inc doesn't work, MARIADB_SRC_PATH from env not picked up</td>
</tr>
<tr>
<td>173</td>
<td>Resource leak in adminusers.c found by Coverity</td>
</tr>
<tr>
<td>376</td>
<td>Confusing Server Version</td>
</tr>
<tr>
<td>370</td>
<td>maxscale binary returns zero exit status on failures</td>
</tr>
<tr>
<td>150</td>
<td>telnetd listener should bind to 127.0.0.1 by default</td>
</tr>
<tr>
<td>152</td>
<td>listener configuration should support bind address</td>
</tr>
<tr>
<td>373</td>
<td>Documentation: it's not clear what privileges the maxscale user needs</td>
</tr>
<tr>
<td>128</td>
<td>Maxscale prints debug information to terminal session when run in background</td>
</tr>
<tr>
<td>129</td>
<td>MaxScale refuses to connect to server and reports nonsense error as a result</td>
</tr>
<tr>
<td>147</td>
<td>Maxscale's hashtable fails to handle deletion of entries.</td>
</tr>
<tr>
<td>148</td>
<td>users data structure's stats have incorrect values.</td>
</tr>
<tr>
<td>384</td>
<td>MaxScale crashes if backend authentication fails</td>
</tr>
<tr>
<td>210</td>
<td>Bad timing in freeing readconnrouter's dcbs cause maxscale crash</td>
</tr>
<tr>
<td>403</td>
<td>gwbuf_free doesn't protect freeing shared buffer</td>
</tr>
<tr>
<td>371</td>
<td>If router module load fails, MaxScale goes to inifinite loop</td>
</tr>
<tr>
<td>385</td>
<td>MaxScale (DEBUG-version) dasserts if backend dcb is closed in the middle of client dcb performing close_dcb</td>
</tr>
<tr>
<td>386</td>
<td>Starting MaxScale with -c pointing at existing file causes erroneous behavior</td>
</tr>
<tr>
<td>209</td>
<td>Error in backend hangs client connection</td>
</tr>
<tr>
<td>194</td>
<td>maxscale crashes at start if module load fails</td>
</tr>
<tr>
<td>369</td>
<td>typo in "QUERY_TYPE_UNKNWON"</td>
</tr>
<tr>
<td>163</td>
<td>MaxScale crashes with multiple threads</td>
</tr>
<tr>
<td>162</td>
<td>threads parameter in configuration file is not effective</td>
</tr>
<tr>
<td>400</td>
<td>hastable_get_stats returns value of uninitialized value in 'nelems'</td>
</tr>
<tr>
<td>212</td>
<td>Failing write causes maxscale to fail</td>
</tr>
<tr>
<td>222</td>
<td>Double freeing mutex corrupts log</td>
</tr>
<tr>
<td>208</td>
<td>current_connection_count is decreased multiple times per session, thus breaking load balancing logic</td>
</tr>
<tr>
<td>378</td>
<td>Misspelling maxscale section name in config file crashes maxscale</td>
</tr>
<tr>
<td>399</td>
<td>Every row in log starts with 0x0A00</td>
</tr>
<tr>
<td>205</td>
<td>MaxScale crashes due SEGFAULT because return value of dcb_read is not checked</td>
</tr>
<tr>
<td>220</td>
<td>Maxscale crash if socket listening fails in startup</td>
</tr>
<tr>
<td>372</td>
<td>Log manager hangs MaxScale if log string (mostly query length) exceeds block size</td>
</tr>
<tr>
<td>397</td>
<td>Free of uninitialised pointer if MAXSCALE_HOME is not set</td>
</tr>
<tr>
<td>402</td>
<td>gw_decode_mysql_server_handshake asserts with mysql 5.1 backend</td>
</tr>
<tr>
<td>345</td>
<td>MaxScale don't find backend servers if they are started after MaxScale</td>
</tr>
<tr>
<td>406</td>
<td>Memory leak in dcb_alloc()</td>
</tr>
<tr>
<td>360</td>
<td>MaxScale passwd option</td>
</tr>
<tr>
<td>151</td>
<td>Get parse_sql failed on array INSERT</td>
</tr>
<tr>
<td>216</td>
<td>Backend error handling doesn't update server's connection counter</td>
</tr>
<tr>
<td>127</td>
<td>MaxScale should handle out-of-date backend auth data more gracefully</td>
</tr>
<tr>
<td>146</td>
<td>"show dbusers" argument not documented</td>
</tr>
<tr>
<td>125</td>
<td>readconnroute causes maxscale server crash if no slaves are available</td>
</tr>
<tr>
<td>375</td>
<td>Tarball contains UID and maxscale base dir</td>
</tr>
</table>

View File

@ -1,31 +0,0 @@
# MariaDB MaxScale 0.6 Alpha Release Notes
0.6 Alpha
This document details the changes in version 0.6 since the release of the 0.5 alpha of the MaxScale product. The 0.6 version is merely a set of bug fixes based on the previous 0.5 version.
# Bug Fixes
A number of bug fixes have been applied between the 0.5 alpha and this alpha release. The table below lists the bugs that have been resolved. The details for each of these may be found in bugs.skysql.com.
<table>
<tr>
<td>ID</td>
<td>Summary</td>
</tr>
<tr>
<td>423</td>
<td>The new "version_string" parameter has been added to service section.
This allows a specific version string to be set for each service, this version string is used in the MySQL handshake from MaxScale to clients and is reported as the server version to clients.
The version_string is optional, the default value will be taken from the embedded MariaDB library which supplies the parser to MaxScale.</td>
</tr>
<tr>
<td>418</td>
<td>Statements are not routed to master if a transaction is started implicitly by setting autocommit=0. In such cases statements were previously routed as if they were not part of a transaction.
This fix changes the behavior so that is autocommit is disabled, all statements are routed to the master and in case of session variable updates, to both master and slave.</td>
</tr>
</table>

View File

@ -1,158 +0,0 @@
# MariaDB MaxScale 0.7 Alpha Release Notes
0.7 Alpha
This document details the changes in version 0.7 since the release of the 0.6 alpha of the MaxScale product.
# New Features
## Galera Support
Enhanced support for Galera cluster to allow Galera to be used as a High Available Cluster with no write contention between the nodes..
MaxScale will control access to a Galera Cluster such that one node is designated as the master node to which all write operations will be sent. Read operations will be sent to any of the remaining nodes that are part of the cluster. Should the currently elected master node fail MaxScale will automatically promote one of the remaining nodes to become the new master node.
## Multiple Slave Connections
The Read/Write Split query router has been enhanced to allow multiple slaves connections to be created. The number of slave connections is configurable via a parameter in the MaxScale configuration file.
Adding multiple connections allows for better load balancing between the slaves and in a pre-requisite for providing improved fault tolerance within the Read/Write Splitter. The selection of which slave to use for a particular read operation can be controlled via options in the router configuration.
## Debug Interface Enhancements
A number of new list commands have been added to the debug interface to allow more concise tabular output of certain object types within the interface.
**MaxScale>** help list
Available options to the list command:
filters List all the filters defined within MaxScale
listeners List all the listeners defined within MaxScale
modules Show all currently loaded modules
services List all the services defined within MaxScale
servers List all the servers defined within MaxScale
sessions List all the active sessions within MaxScale
**MaxScale>**
Those objects that are defined in the configuration file can now be referenced by the names used in the configuration file rather than by using memory addresses. This means that services, servers, monitors and filters can all now be referenced using meaningful names provided by the user. Internal objects such as DCB’s and sessions, which are not named in the configuration file still require the use of memory addresses.
Two modes of operation of the interface are now available, user mode and developer mode. The user mode restricts access to the feature that allow arbitrary structures to be examined and checks all memory address for validity before allowing access.
## Maintenance Mode for Servers
MaxScale now provides a maintenance mode for servers, this mode allows servers to be set such that no new connections will be opened to that server. Also, servers in maintenance mode are not monitored by MaxScale. This allows an administrator to set a server into maintenance mode when it is required to be taken out of use. The connections will then diminish over time and since no new connections are created, the administrator can remove the node from use to perform some maintenance activities.
Nodes are placed into maintenance mode via the debug interface using the set server command.
**MaxScale>** set server datanode3 maintenance
Nodes are taken out of maintenance using the clear server command.
**MaxScale>** clear server datanode3 maintenance
## Configurable Monitoring Interval
All monitor plugins now provide a configuration parameter that can be set to control how frequently the MaxScale monitoring is performed.
## Replication Lag Heartbeat Monitor
The mysqlmon monitor module now implements a replication heartbeat protocol that is used to determine the lag between updates to the master and those updates being applied to the slave. This information is then made available to routing modules and may be used to determine if a particular slave node may be used or which slave node is most up to date.
## Filters API
The first phase of the filter API is available as part of this release. This provides filtering for the statements from the client application to the router. Filtering for the returned results has not yet been implemented and will be available in a future version.
Three example filters are including in the release
1. Statement counting Filter - a simple filter that counts the number of SQL statements executed within a session. Results may be viewed via the debug interface.
2. Query Logging Filter - a simple query logging filter that write all statements for a session into a log file for that session.
3. Query Rewrite Filter - an example of how filters can alter the query contents. This filter allows a regular expression to be defined, along with replacement text that should be substituted for every match of that regular expression.
## MariaDB 10 Replication Support
The myqlmon monitor module has been updated to support the new syntax for show all slaves status in MariaDB in order to correctly determine the master and slave state of each server being monitor. Determination of MariaDB 10 is automatically performed by the monitor and no configuration is required.
## API Versioning
The module interface has been enhanced to allow the API version in use to be reported, along with the status of the module and a short description of the module. The status allows for differentiation of the release status of a plugin to be identified independently of the core of MaxScale. plugins may be designated as "in development", “alpha”, “beta” or “GA”.
**MaxScale>** list modules
Module Name | Module Type | Version | API | Status
----------------------------------------------------------------
regexfilter | Filter | V1.0.0 | 1.0.0 | Alpha
MySQLBackend | Protocol | V2.0.0 | 1.0.0 | Alpha
telnetd | Protocol | V1.0.1 | 1.0.0 | Alpha
MySQLClient | Protocol | V1.0.0 | 1.0.0 | Alpha
mysqlmon | Monitor | V1.2.0 | 1.0.0 | Alpha
readwritesplit | Router | V1.0.2 | 1.0.0 | Alpha
readconnroute | Router | V1.0.2 | 1.0.0 | Alpha
debugcli | Router | V1.1.1 | 1.0.0 | Alpha
**MaxScale>**
# Bug Fixes
A number of bug fixes have been applied between the 0.6 alpha and this alpha release. The table below lists the bugs that have been resolved. The details for each of these may be found in bugs.skysql.com.
<table>
<tr>
<td>ID</td>
<td>Summary</td>
</tr>
<tr>
<td>443</td>
<td>mysql/galera monitors hang when backend fails</td>
</tr>
<tr>
<td>424</td>
<td>Read/Write Splitter closes connection without sending COM_QUIT</td>
</tr>
<tr>
<td>438</td>
<td>Internal thread deadlock</td>
</tr>
<tr>
<td>436</td>
<td>Sessions in invalid state</td>
</tr>
<tr>
<td>359</td>
<td>Router options for Read/Write Split module</td>
</tr>
<tr>
<td>435</td>
<td>Some automated tests have invalid SQL syntax</td>
</tr>
<tr>
<td>431</td>
<td>rwsplit.sh test script has incorrect bash syntax</td>
</tr>
<tr>
<td>425</td>
<td>MaxScale crashes after prolonged use</td>
</tr>
</table>
# Linking
Following reported issues with incompatibilities between MaxScale and the shared library used by MySQL this version of MaxScale will be statically linked with the MariaDB 5.5 embedded library that it requires. This library is used for internal purposes only and does not result in MaxScale support for other versions of MySQL or MariaDB being affected.

View File

@ -1,124 +0,0 @@
# MariaDB MaxScale 1.0 Beta Release Notes
1.0 Beta
This document details the changes in version 1.0 since the release of the 0.7 alpha of the MaxScale product.
# New Features
## Complex Replication Structures
The MaxScale monitor module for Master/Slave replication is now able to correctly identify tree structured replication environments and route write statements to the master server at the root level of the tree. Isolated database instances and now also correctly identified as external to the replication tree.
## Read/Write Splitter Enhancements
### Support For Prepared Statements
Prepared statements are now correctly recognized by MaxScale, with the prepare stage being sent to all the eligible servers that could eventually run the statement. Statements are then execute on a single server.
### Slave Failure Resilience
The Read/Write splitter can not be used to establish multiple connections to different slave servers. The read load will be distributed across these slaves and slave failure will be masked from the application as MaxScale will automatically failover to another slave when one fails.
### Configurable Load Balancing Options
It is now possible to configure the criteria that the Read/Write Splitter uses for load balancing, the options are:
* The total number of connections to the servers, from this MaxScale instance
* The number of connections to the server for this particular MaxScale service
* The number of statements currently being executed on the server on behalf of this MaxScale instance
* Route statements to the slave that has the least replication lag
### Replication Consistency
The Read/Write splitter may now be configured to exclude nodes that are currently showing a replication lag greater than a configurable threshold. The replication lag is measured using the MySQL Monitor module of MaxScale.
Alternatively it is possible to define that read operations should be routed to the slave that has the least measured replication lag.
## Weighted Routing Options
The distribution of connections and statement across the set of nodes can be controlled by attaching arbitrary parameters to the servers and then configuring the router to use that parameter value as a weighting factor when deciding which of the valid servers to which to connect or route queries.
Several parameters may be used on each host and different routers may choose to use different parameters as the weighting parameter for that router. The use of weighting is optional, if no weighting parameter is given in the service definition then all eligible servers will have an equal distribution applied.
Server weighting is supported by both the Read/Write Splitter and the connection router.
## MaxAdmin Client
A new administrative interface has been added that uses a MaxScale specific client application to interact with MaxScale to control and monitor the MaxScale activities. This client application may be used interactively or within scripts, passing commands to MaxScale via command line arguments. Command scripts are available, allowing command sets of commands to be stored in script files.
MaxAdmin also supports command history via libedit on those distributions that support the libedit library. This allows for the use of the up and down cursor keys or selection of previous commands and editing of lines using vi or emacs style editing commands.
## Pacemaker Support
MaxScale now ships with an init.d script that is compatible with the use of Pacemaker and Heartbeat to provide for a highly available implementation of MaxScale. A tutorial on setting up MaxScale under Pacemaker control is included in the Documentation directory.
## Filter API Enhancements
The filter API has now been enhanced to operate not just on downstream query filtering but also upstream result set filtering.
## Enhanced and New Filters
Addition of new filters and enhancements to those existing filters that appeared in 0.7 of MaxScale.
### Top Filter
A new filter to capture and log the longest running queries within a client session. The filter can be configured to capture a specific number of queries that take the longest time between the query being submitted to the database server and the first result being returned.
The queries captured can be defined using regular expressions to include and exclude queries that match these expressions. In addition the inclusion of a session may be based on the user name used to connect to the database or the source address of the client session.
### Tee Filter
A filter to optionally duplicate requests received from the client and send them to other services within MaxScale. This allows a single statement sent by a client to be routed to multiple storage backends via MaxScale.
The queries duplicated can be defined using regular expressions to include and exclude queries that match these expressions. In addition the inclusion of a session may be based on the user name used to connect to the database or the source client session.
### QLA and Regex Filter Improvements
These filters have been enhanced to provide for the inclusion of sessions by specifying the username used to connect to the database or the source of the client connection as a criteria to trigger the use of these filters for particular sessions connected to the MaxScale service.
# Bug Fixes
A number of bug fixes have been applied between the 0.6 alpha and this alpha release. The table below lists the bugs that have been resolved. The details for each of these may be found in bugs.skysql.com.
<table>
<tr>
<td>ID</td>
<td>Summary</td>
</tr>
<tr>
<td>441</td>
<td>Possible failure to return a value in setipaddress</td>
</tr>
<tr>
<td>396</td>
<td>Build instruction suggest forcing install of RPM’s</td>
</tr>
<tr>
<td>452</td>
<td>Make install copies the modules to an incorrect directory</td>
</tr>
<tr>
<td>450</td>
<td>Read/Write splitter does not balance load between multiple slaves</td>
</tr>
<tr>
<td>449</td>
<td>The router clientReply function does not handle GWBUF structures correctly</td>
</tr>
</table>
# Packaging
Both RPM and Debian packages are available for MaxScale in addition to the tar based releases previously distributed we now provide
* CentOS/RedHat 5 RPM
* CentOS/RedHat 6 RPM
* Ubuntu 14.04 package

View File

@ -1,332 +0,0 @@
# MariaDB MaxScale 1.0.1 Beta Release Notes
1.0.1 Beta
This document details the changes in version 1.0.1 since the release of the 1.0 beta of the MaxScale product.
# New Features
## CMake build system
Building MaxScale is now easier than ever thanks to the introduction of CMake into the build process. Building with CMake removes the need to edit files, specify directory locations or change build flags, in all but the rarest of the cases, and building with non-standard configurations is a lot easier thanks to the easy configuration of all the build parameters.
Here’s a short list of the most common build parameters,their functions and default values.
<table>
<tr>
<td>Variable</td>
<td>Purpose</td>
<td>Default value</td>
</tr>
<tr>
<td>INSTALL_DIR</td>
<td>Root location of the MaxScale install</td>
<td>/usr/local/skysql/maxscale</td>
</tr>
<tr>
<td>STATIC_EMBEDDED</td>
<td>Whether to use the static or the dynamic version of the embedded library</td>
<td>No</td>
</tr>
<tr>
<td>OLEVEL</td>
<td>Level of optimization used when compiling</td>
<td>No optimization</td>
</tr>
<tr>
<td>INSTALL_SYSTEM_FILES</td>
<td>If startup scripts should be installed into /etc/init.d and ldconfig configuration files to /etc/ld.so.conf.d</td>
<td>Yes</td>
</tr>
<tr>
<td>BUILD_TYPE</td>
<td>The type of the build. ‘None’ for normal, ‘Debug’ for debugging and ‘Optimized’ for an optimized build.</td>
<td>None</td>
</tr>
</table>
Details on all the configurable parameters and instructions on how to use CMake can be found in the README file.
## Enhancements
The polling mechanism in MaxScale has been modified to overcome a flaw which mean that connections with a heavy I/O load could starve other connections within MaxScale and prevent query execution. This has been resolved with a more fairer event scheduling mechanism within the MaxScale polling subsystem. This has led to improve overall performance in high load situations.
# Bug Fixes
A number of bug fixes have been applied between the 1.0 beta release and this release candidate. The table below lists the bugs that have been resolved. The details for each of these may be found in bugs.skysql.com.
<table>
<tr>
<td>ID</td>
<td>Summary</td>
</tr>
<tr>
<td>462</td>
<td>Testall target fails in server/test to invalid MAXSCALE_HOME path specification</td>
</tr>
<tr>
<td>467</td>
<td>max_slave_replication lag is not effective after session creation</td>
</tr>
<tr>
<td>468</td>
<td>query_classifier : if parsing fails, parse tree and thread context are freed but used</td>
</tr>
<tr>
<td>469</td>
<td>rwsplit counts every connection twice in master - connection counts leak</td>
</tr>
<tr>
<td>466</td>
<td>hint_next_token doesn't detect <param>=<value> pair if there are no spaces around '='</td>
</tr>
<tr>
<td>470</td>
<td>Maxscale crashes after a normal query if a query with named hint was used before</td>
</tr>
<tr>
<td>473</td>
<td>Entering a hint with route server target as '=(' causes a crash</td>
</tr>
<tr>
<td>472</td>
<td>Using a named hint after its initial use causes a crash</td>
</tr>
<tr>
<td>471</td>
<td>Routing Hints route to server sometimes doesn't work</td>
</tr>
<tr>
<td>463</td>
<td>MaxScale hangs receiving more than 16K in input</td>
</tr>
<tr>
<td>476</td>
<td>mysql_common.c:protocol_archive_srv_command leaks memory and accesses freed memory</td>
</tr>
<tr>
<td>479</td>
<td>Undefined filter reference in maxscale.cnf causes a crash</td>
</tr>
<tr>
<td>410</td>
<td>maxscale.cnf server option is not parsed for spaces</td>
</tr>
<tr>
<td>417</td>
<td>Galera monitor freezes on network failure of a server</td>
</tr>
<tr>
<td>488</td>
<td>SHOW VARIABLES randomly failing with "Lost connection to MySQL server"</td>
</tr>
<tr>
<td>484</td>
<td>Hashtable does not always release write lock during add</td>
</tr>
<tr>
<td>485</td>
<td>Hashtable not locked soon enough in iterator get next item</td>
</tr>
<tr>
<td>493</td>
<td>Can have same section name multiple times without warning</td>
</tr>
<tr>
<td>510</td>
<td>Embedded library crashes on a call to free_embedded_thd</td>
</tr>
<tr>
<td>511</td>
<td>Format strings in log_manager.cc should be const char*</td>
</tr>
<tr>
<td>509</td>
<td>rw-split sensitive to order of terms in field list of SELECT</td>
</tr>
<tr>
<td>507</td>
<td>rw-split router does not send last_insert_id() to master</td>
</tr>
<tr>
<td>490</td>
<td>session handling for non-determinstic user variables broken</td>
</tr>
<tr>
<td>489</td>
<td>@@hostname and @@server_id treated differently from @@wsrep_node_address</td>
</tr>
<tr>
<td>528</td>
<td>Wrong service name in tee filter crashes maxscale on connect</td>
</tr>
<tr>
<td>530</td>
<td>MaxScale socket permission</td>
</tr>
<tr>
<td>536</td>
<td>log_manager doesn't write buffers to disk in the order they are written</td>
</tr>
<tr>
<td>447</td>
<td>Error log is flooded with same warning if there are no slaves present</td>
</tr>
<tr>
<td>475</td>
<td>The end comment tag in hints isn't properly detected.</td>
</tr>
<tr>
<td>181</td>
<td>Missing log entry if server not reachable</td>
</tr>
<tr>
<td>486</td>
<td>Hashtable problems when created with size less than one</td>
</tr>
<tr>
<td>516</td>
<td>maxadmin CLI client sessions are not closed?</td>
</tr>
<tr>
<td>495</td>
<td>Referring to a nonexisting server in servers=... doesn't even raise a warning</td>
</tr>
<tr>
<td>538</td>
<td>maxscale should expose details of "Down" server</td>
</tr>
<tr>
<td>539</td>
<td>MaxScale crashes in session_setup_filters</td>
</tr>
<tr>
<td>494</td>
<td>The service 'CLI' is missing a definition of the servers that provide the service</td>
</tr>
<tr>
<td>180</td>
<td>Documentation: No information found in the documentation about firewall settings</td>
</tr>
<tr>
<td>524</td>
<td>Connecting to MaxScale from localhost tries matching @127.0.0.1 grant</td>
</tr>
<tr>
<td>481</td>
<td>MySQL monitor doesn't set master server if the replication is broken</td>
</tr>
<tr>
<td>437</td>
<td>Failure to detect MHA master switch</td>
</tr>
<tr>
<td>541</td>
<td>Long queries cause MaxScale to block</td>
</tr>
<tr>
<td>492</td>
<td>In dcb.c switch fallthrough appears to be used without comment</td>
</tr>
<tr>
<td>439</td>
<td>Memory leak in getUsers</td>
</tr>
<tr>
<td>545</td>
<td>RWSplit: session modification commands weren't routed to all if executed inside open transaction</td>
</tr>
<tr>
<td>543</td>
<td>RWSplit router statistics counters are not updated correctly</td>
</tr>
<tr>
<td>544</td>
<td>server with weight=0 gets one connection</td>
</tr>
<tr>
<td>525</td>
<td>Crash when saving post in Wordpress</td>
</tr>
<tr>
<td>533</td>
<td>Drupal installer hangs</td>
</tr>
<tr>
<td>497</td>
<td>Can’t enable debug/trace logs in configuration file</td>
</tr>
<tr>
<td>430</td>
<td>Temporary tables not working in MaxScale</td>
</tr>
<tr>
<td>527</td>
<td>No signal handler for segfault etc</td>
</tr>
<tr>
<td>546</td>
<td>Use of weightby router parameter causes error log write</td>
</tr>
<tr>
<td>506</td>
<td>Don’t write shm/tmpfs by default without telling the user or giving a way to override it</td>
</tr>
<tr>
<td>552</td>
<td>Long argument options to maxadmin and maxscale broke maxadmin commands</td>
</tr>
<tr>
<td>521</td>
<td>Many commands in maxadmin client simply hang</td>
</tr>
<tr>
<td>478</td>
<td>Parallel session command processing fails</td>
</tr>
<tr>
<td>499</td>
<td>make clean leavessoem .o files behind</td>
</tr>
<tr>
<td>500</td>
<td>"depend: no such file warnings during make</td>
</tr>
<tr>
<td>501</td>
<td>log_manager, query classifier rebuilds unconditionally</td>
</tr>
<tr>
<td>502</td>
<td>log_manager and query_classifier builds always rebuild utils</td>
</tr>
<tr>
<td>504</td>
<td>clean rule for Documentation directory in wrong makefile</td>
</tr>
<tr>
<td>505</td>
<td>utils/makefile builds stuff unconditionally, misses "depend" target</td>
</tr>
<tr>
<td>548</td>
<td>MaxScale accesses freed client DCB and crashes</td>
</tr>
<tr>
<td>550</td>
<td>modutil functions process length incorrectly</td>
</tr>
</table>
# Packaging
Both RPM and Debian packages are available for MaxScale in addition to the tar based releases previously distributed we now provide
* CentOS/RedHat 5 RPM
* CentOS/RedHat 6 RPM
* Ubuntu 14.04 package

View File

@ -1,136 +0,0 @@
# MariaDB MaxScale 1.0.3 Release Notes
1.0.3 GA
This document details the changes in version 1.0.3 since the release of the 1.0.2 Release Candidate of the MaxScale product.
# New Features
No new features have been introduced since the released candidate was released.
# Bug Fixes
A number of bug fixes have been applied between the 0.6 alpha and this alpha release. The table below lists the bugs that have been resolved. The details for each of these may be found in bugs.mariadb.com.
<table>
<tr>
<td>ID</td>
<td>Summary</td>
</tr>
<tr>
<td>644</td>
<td>Buffered that were cloned using the gwbuf_clone routine failed to initialise the buffer lock structure correctly.</td>
</tr>
<tr>
<td>643</td>
<td>Recursive filter definitions in the configuration file could cause MaxScale to loop</td>
</tr>
<tr>
<td>665</td>
<td>An access to memory that had already been freed could be made within the MaxScale core</td>
</tr>
<tr>
<td>664</td>
<td>MySQL Authentication code could access memory that had already been freed.</td>
</tr>
<tr>
<td>673</td>
<td>MaxScale could crash if it had an empty user table and the MaxAdmin show dbusers command was run</td>
</tr>
<tr>
<td>670</td>
<td>The tee filter could lose statement on the branch service if the branch service was significantly slower at executing statements compared with the main service.</td>
</tr>
<tr>
<td>653</td>
<td>Memory corruption could occur with extremely long hostnames in the mysql.user table.</td>
</tr>
<tr>
<td>657</td>
<td>If the branch service of a tee filter shutdown unexpectedly then MaxScale could fail</td>
</tr>
<tr>
<td>654</td>
<td>Missing quotes in MaxAdmin show dbusers command could cause MaxAdmin to crash</td>
</tr>
<tr>
<td>677</td>
<td>A race condition existed in the tee filter client reply handling</td>
</tr>
<tr>
<td>658</td>
<td>The readconnroute router did not correctly close sessions when a backend database failed</td>
</tr>
<tr>
<td>662</td>
<td>MaxScale startup hangs if no backend servers respond</td>
</tr>
<tr>
<td>676</td>
<td>MaxScale writes a log entry, "Write to backend failed. Session closed." when changing default database via readwritesplit with max_slave_connections != 100%</td>
</tr>
<tr>
<td>650</td>
<td>Tee filter does not correctly detect missing branch service</td>
</tr>
<tr>
<td>645</td>
<td>Tee filter can hang MaxScale if the read/write splitter is used</td>
</tr>
<tr>
<td>678</td>
<td>Tee filter does not always send full query to branch service</td>
</tr>
<tr>
<td>679</td>
<td>A shared pointer in the service was leading to misleading service states</td>
</tr>
<tr>
<td>680</td>
<td>The Read/Write Splitter can not load users if there are no databases available at startup</td>
</tr>
<tr>
<td>681</td>
<td>The Read/Write Splitter could crash is the value of max_slave_connections was set to a low percentage and only a small number of backend servers are available</td>
</tr>
</table>
# Known Issues
There are a number bugs and known limitations within this version of MaxScale, the most serious of this are listed below.
* The SQL construct "LOAD DATA LOCAL INFILE" is not fully supported.
* The Read/Write Splitter is a little too strict when it receives errors from slave servers during execution of session commands. This can result in sessions being terminated in situation in which MaxScale could recover without terminating the sessions.
* MaxScale can not manage authentication that uses wildcard matching in hostnames in the mysql.user table of the backend database. The only wildcards that can be used are in IP address entries.
* When users have different passwords based on the host from which they connect MaxScale is unable to determine which password it should use to connect to the backend database. This results in failed connections and unusable usernames in MaxScale.
# Packaging
Both RPM and Debian packages are available for MaxScale in addition to the tar based releases previously distributed we now provide
* CentOS/RedHat 5
* CentOS/RedHat 6
* CentOS/RedHat 7
* Debian 6
* Debian 7
* Ubuntu 12.04 LTS
* Ubuntu 13.10
* Ubuntu 14.04 LTS
* Fedora 19
* Fedora 20
* OpenSuSE 13

View File

@ -1,140 +0,0 @@
# MariaDB MaxScale 1.0.4 Release Notes
1.0.4 GA
This document details the changes in version 1.0.4 since the release of the 1.0.2 Release Candidate of the MaxScale product.
## New Features
No new features have been introduced since the released candidate was released.
## Bug Fixes
A number of bug fixes have been applied between the 0.6 alpha and this alpha release. The table below lists the bugs that have been resolved. The details for each of these may be found in bugs.mariadb.com.
<table>
<tr>
<td>ID</td>
<td>Summary</td>
</tr>
<tr>
<td>644</td>
<td>Buffered that were cloned using the gwbuf_clone routine failed to initialise the buffer lock structure correctly.</td>
</tr>
<tr>
<td>643</td>
<td>Recursive filter definitions in the configuration file could cause MaxScale to loop</td>
</tr>
<tr>
<td>665</td>
<td>An access to memory that had already been freed could be made within the MaxScale core</td>
</tr>
<tr>
<td>664</td>
<td>MySQL Authentication code could access memory that had already been freed.</td>
</tr>
<tr>
<td>673</td>
<td>MaxScale could crash if it had an empty user table and the MaxAdmin show dbusers command was run</td>
</tr>
<tr>
<td>670</td>
<td>The tee filter could lose statement on the branch service if the branch service was significantly slower at executing statements compared with the main service.</td>
</tr>
<tr>
<td>653</td>
<td>Memory corruption could occur with extremely long hostnames in the mysql.user table.</td>
</tr>
<tr>
<td>657</td>
<td>If the branch service of a tee filter shutdown unexpectedly then MaxScale could fail</td>
</tr>
<tr>
<td>654</td>
<td>Missing quotes in MaxAdmin show dbusers command could cause MaxAdmin to crash</td>
</tr>
<tr>
<td>677</td>
<td>A race condition existed in the tee filter client reply handling</td>
</tr>
<tr>
<td>658</td>
<td>The readconnroute router did not correctly close sessions when a backend database failed</td>
</tr>
<tr>
<td>662</td>
<td>MaxScale startup hangs if no backend servers respond</td>
</tr>
<tr>
<td>676</td>
<td>MaxScale writes a log entry, "Write to backend failed. Session closed." when changing default database via readwritesplit with max_slave_connections != 100%</td>
</tr>
<tr>
<td>650</td>
<td>Tee filter does not correctly detect missing branch service</td>
</tr>
<tr>
<td>645</td>
<td>Tee filter can hang MaxScale if the read/write splitter is used</td>
</tr>
<tr>
<td>678</td>
<td>Tee filter does not always send full query to branch service</td>
</tr>
<tr>
<td>679</td>
<td>A shared pointer in the service was leading to misleading service states</td>
</tr>
<tr>
<td>680</td>
<td>The Read/Write Splitter can not load users if there are no databases available at startup</td>
</tr>
<tr>
<td>681</td>
<td>The Read/Write Splitter could crash is the value of max_slave_connections was set to a low percentage and only a small number of backend servers are available</td>
</tr>
</table>
## Known Issues
There are a number bugs and known limitations within this version of MaxScale, the most serious of this are listed below.
* The SQL construct "LOAD DATA LOCAL INFILE" is not fully supported.
* The Read/Write Splitter is a little too strict when it receives errors from slave servers during execution of session commands. This can result in sessions being terminated in situation in which MaxScale could recover without terminating the sessions.
* MaxScale can not manage authentication that uses wildcard matching in hostnames in the mysql.user table of the backend database. The only wildcards that can be used are in IP address entries.
* When users have different passwords based on the host from which they connect MaxScale is unable to determine which password it should use to connect to the backend database. This results in failed connections and unusable usernames in MaxScale.
# Packaging
Both RPM and Debian packages are available for MaxScale in addition to the tar based releases previously distributed we now provide
* CentOS/RedHat 5
* CentOS/RedHat 6
* CentOS/RedHat 7
* Debian 6
* Debian 7
* Ubuntu 12.04 LTS
* Ubuntu 13.10
* Ubuntu 14.04 LTS
* Fedora 19
* Fedora 20
* OpenSuSE 13
# MaxScale Home Default Value
The installation assumes that the default value for the environment variable MAXSCALE_HOME is set to /usr/local/mariadb/maxscale. This is hard coded in the service startup file that is placed in /etc/init.d/maxscale by the installation process.

View File

@ -1,113 +0,0 @@
# MariaDB MaxScale 1.0.5 Release Notes
This document details the changes in version 1.0.5 since the release of the 1.0.4 GA of the MaxScale product.
# New Features
No new features have been introduced since the GA version was released. SuSE Enterprise 11 and 12 packages are now also supplied.
# Bug Fixes
A number of bug fixes have been applied between the 1.0.4 initial GA release and this GA release. The table below lists the bugs that have been resolved. The details for each of these may be found in bugs.mariadb.com.
<table>
<tr>
<td>ID</td>
<td>Summary</td>
</tr>
<tr>
<td>519</td>
<td>LOAD DATA LOCAL INFILE not handled?</td>
</tr>
<tr>
<td>714</td>
<td>Error log flooded when too many connect errors causes the MaxScale host to be blocked</td>
</tr>
<tr>
<td>711</td>
<td>Some MySQL Workbench Management actions hang with R/W split router</td>
</tr>
<tr>
<td>710</td>
<td>make package install files in /etc/init.d</td>
</tr>
<tr>
<td>683</td>
<td>Check for unsupported version of MariaDB</td>
</tr>
<tr>
<td>684</td>
<td>Use mysql_config to determine include/lib directory paths and compiler options</td>
</tr>
<tr>
<td>689</td>
<td>cmake ­DCMAKE_INSTALL_PREFIX has no effect</td>
</tr>
<tr>
<td>701</td>
<td>set server <svr> maint fails on the command line</td>
</tr>
<tr>
<td>705</td>
<td>Authentication fails when the user connects to a database with the SQL mode including ANSI_QUOTES</td>
</tr>
<tr>
<td>507</td>
<td>R/W split does not send last_insert_id() to the master</td>
</tr>
<tr>
<td>700</td>
<td>maxscale ­­version has no output</td>
</tr>
<tr>
<td>694</td>
<td>RWSplit SELECT @a:=@a+1 as a, test.b from test breaks client session</td>
</tr>
<tr>
<td>685</td>
<td>SELECT against readconnrouter fails when large volumes of data are returned and the tee filter is used</td>
</tr>
</table>
# Known Issues
There are a number bugs and known limitations within this version of MaxScale, the most serious of this are listed below.
* The Read/Write Splitter is a little too strict when it receives errors from slave servers during execution of session commands. This can result in sessions being terminated in situation in which MaxScale could recover without terminating the sessions.
* MaxScale can not manage authentication that uses wildcard matching in hostnames in the mysql.user table of the backend database. The only wildcards that can be used are in IP address entries.
* When users have different passwords based on the host from which they connect MaxScale is unable to determine which password it should use to connect to the backend database. This results in failed connections and unusable usernames in MaxScale.
# Packaging
Both RPM and Debian packages are available for MaxScale in addition to the tar based releases previously distributed we now provide
* CentOS/RedHat 5
* CentOS/RedHat 6
* CentOS/RedHat 7
* Debian 6
* Debian 7
* Ubuntu 12.04 LTS
* Ubuntu 13.10
* Ubuntu 14.04 LTS
* Fedora 19
* Fedora 20
* OpenSuSE 13
* SuSE Enterprise 11
* SuSE Enterprise 12
# MaxScale Home Default Value
The installation assumes that the default value for the environment variable MAXSCALE_HOME is set to /usr/local/skysql/maxscale. This is hard coded in the service startup file that is placed in /etc/init.d/maxscale by the installation process.

View File

@ -1,284 +0,0 @@
# MariaDB MaxScale 1.1 Release Notes
## 1.1 GA
This document details the changes in version 1.1 since the release of the 1.0.5 GA Release of the MaxScale product.
## New Features
### High Performance Binlog Relay
Replicate Binlog from the master to slave through MaxScale as simplified relay server for reduced network load and disaster recovery
### Database Firewall Filter
Block queries based on columns in the query, where condition, query type(select, insert, delete, update), presence of wildcard in column selection, regular expression match and time of the query
### Schema Sharding Router
Route to databases sharded by schema without application level knowledge of shard configuration
### Hint based routing
Pass hints in the SQL statement to influence the routing decision based on replication lag or time out
### Named Server Routing
Routing to a named server if incoming query matches a regular expression
### Canonical Query logging
Convert incoming queries to canonical form and push the query and response into RabbitMQ Broker for a RabbitMQ Client to later retrieve from
### Nagios Plugin
Plugin scripts for monitoring MaxScale status and performance from a Nagios Server
### Notification Service
Receive notification of security update and patches tailored to your MaxScale configuration
### MySQL NDB cluster support
Connection based routing to MySQL NDB clusters
### Updated installation path
MaxScale is now installed into `/usr/local/mariadb-maxscale`
## Bug Fixes
A number of bug fixes have been applied between the 1.0.5 GA and this GA release. The table below lists the bugs that have been resolved. The details for each of these may be found in https://jira.mariadb.org/projects/MXS or in the former http://bugs.mariadb.com Bug database
<table>
<tr>
<td>ID</td>
<td>Summary</td>
</tr>
<tr>
<td>MXS-80</td>
<td>"show sessions" can crash MaxScale</td>
</tr>
<tr>
<td>MXS-79</td>
<td>schemarouter hangs if client connects with empty database</td>
</tr>
<tr>
<td>MXS-78</td>
<td>"USE" statement gives unpredictable/unexpected results</td>
</tr>
<tr>
<td>MXS-76</td>
<td>core/dbusers.c needs better error messages</td>
</tr>
<tr>
<td>MXS-74</td>
<td>Crash when no arguments given to on_queries clause</td>
</tr>
<tr>
<td>MXS-72</td>
<td>dbfwfilter on_queries clause appears to be ignored</td>
</tr>
<tr>
<td>MXS-71</td>
<td>dbfwfilter at_times clause seems to erroneously block user</td>
</tr>
<tr>
<td>MXS-68</td>
<td>Wrong rule name in dbfwfilter leads to MaxScale crash</td>
</tr>
<tr>
<td>MXS-65</td>
<td>Omitting <any|all|strict_all> in users directive causes crash in libdbfwfilter.so(link_rules)</td>
</tr>
<tr>
<td>MXS-63</td>
<td>Maxkeys and Maxpasswd log to /tpm</td>
</tr>
<tr>
<td>MXS-57</td>
<td>MaxScale should write a message to the error log when config is not found</td>
</tr>
<tr>
<td>MXS-54</td>
<td>Write failed auth attempt to trace log</td>
</tr>
<tr>
<td>MXS-50</td>
<td>Removing 1.0.5 RPM gives error about /etc/ld.so.conf.d/maxscale.conf</td>
</tr>
<tr>
<td>MXS-47</td>
<td>Session freeze when small tail packet</td>
</tr>
<tr>
<td>MXS-5</td>
<td>Possible memory leak in readwritesplit router</td>
</tr>
<tr>
<td>736</td>
<td>Memory leak while doing read/write splitting</td>
</tr>
<tr>
<td>733</td>
<td>Init-script deletes bin/maxscale</td>
</tr>
<tr>
<td>732</td>
<td>Build is broken: CentOS/RHEL 5 and SLES 11</td>
</tr>
<tr>
<td>730</td>
<td>Regex filter and shorter than original replacement queries MaxScale</td>
</tr>
<tr>
<td>729</td>
<td>PDO prepared statements bug introduced in Maxscale 1.0.5</td>
</tr>
<tr>
<td>721</td>
<td>Documentation suggests SIGTERM to re-read config file</td>
</tr>
<tr>
<td>716</td>
<td>$this->getReadConnection()->query('SET @id = 0;');</td>
</tr>
<tr>
<td>709</td>
<td>"COPYRIGHT LICENSE README SETUP" files go to /usr/local/mariadb-maxscale/ after 'make package'</td>
</tr>
<tr>
<td>704</td>
<td>"make testall" returns success status (exit code 0) even on failures</td>
</tr>
<tr>
<td>698</td>
<td>Using invalid parameter in many maxadmin commands causes MaxScale to fail</td>
</tr>
<tr>
<td>693</td>
<td>Freeing tee filter's orphaned sessions causes a segfault when embedded server closes</td>
</tr>
<tr>
<td>690</td>
<td>CPU/architecture is hardcoded into debian/rules</td>
</tr>
<tr>
<td>686</td>
<td>TestService fails because of the modules used in it aren't meant for actual use</td>
</tr>
<tr>
<td>677</td>
<td>Race condition in tee filter clientReply</td>
</tr>
<tr>
<td>676</td>
<td>"Write to backend failed. Session closed." when changing default database via readwritesplit with max_slave_connections != 100%</td>
</tr>
<tr>
<td>673</td>
<td>MaxScale crashes if "Users table data" is empty and "show dbusers" is executed in maxadmin</td>
</tr>
<tr>
<td>670</td>
<td>Tee filter: statement router loses statements when other router gets enough ahead</td>
</tr>
<tr>
<td>665</td>
<td>Core: accessing freed memory when session is closed</td>
</tr>
<tr>
<td>659</td>
<td>MaxScale doesn't shutdown if none of the configured services start</td>
</tr>
<tr>
<td>648</td>
<td>use database is sent forever with tee filter to a readwrite split service</td>
</tr>
<tr>
<td>620</td>
<td>enable_root_user=true generates errors to error log</td>
</tr>
<tr>
<td>612</td>
<td>Service was started although no users could be loaded from database</td>
</tr>
<tr>
<td>600</td>
<td>RWSplit: if session command fails in some backend, it is not dropped from routing session</td>
</tr>
<tr>
<td>587</td>
<td>Hint filter don't work if listed before regex filter in configuration file</td>
</tr>
<tr>
<td>579</td>
<td>serviceStartProtocol test crashes</td>
</tr>
<tr>
<td>506</td>
<td>Don't write to shm/tmpfs by default without telling and without a way to override it</td>
</tr>
<tr>
<td>503</td>
<td>TOC in the bundled PDFs doesn't link to actual sections</td>
</tr>
<tr>
<td>457</td>
<td>Please provide a list of build dependencies for building MaxScale</td>
</tr>
<tr>
<td>361</td>
<td>file_exists() *modifies* the file it checks for???</td>
</tr>
<tr>
<td>338</td>
<td>Log manager spread down feature is disabled</td>
</tr>
<tr>
<td>159</td>
<td>Memory leak. Dbusers are loaded into memory but not unloaded</td>
</tr>
</table>
## Known Issues
There are a number bugs and known limitations within this version of MaxScale, the most serious of this are listed below.
* The Read/Write Splitter is a little too strict when it receives errors from slave servers during execution of session commands. This can result in sessions being terminated in situation in which MaxScale could recover without terminating the sessions.
* MaxScale can not manage authentication that uses wildcard matching in hostnames in the mysql.user table of the backend database. The only wildcards that can be used are in IP address entries.
* When users have different passwords based on the host from which they connect MaxScale is unable to determine which password it should use to connect to the backend database. This results in failed connections and unusable usernames in MaxScale.
* Service init script is missing after upgrade from 1.0 in RPM-based system. Can be fixed by reinstalling the package ('yum reinstall maxscale' or 'rpm -i --force /maxscale-1.1.rpm')
* Binlog Router Plugin is compatible with MySQL 5.6
Binlog Router Plugin currently does not work for MariaDB 5.5 and MariaDB 10.0
* LONGBLOG are currently not supported.
* Galera Cluster variables, such as @@wsrep_node_name, are not resolved by the embedded MariaDB parser.
* The Database Firewall filter does not support multi-statements. Using them will result in an error being sent to the client.
## Packaging
Both RPM and Debian packages are available for MaxScale in addition to the tar based releases previously distributed we now provide
* CentOS/RedHat 5
* CentOS/RedHat 6
* CentOS/RedHat 7
* Debian 6
* Debian 7
* Ubuntu 12.04 LTS
* Ubuntu 13.10
* Ubuntu 14.04 LTS
* Fedora 19
* Fedora 20
* OpenSuSE 13
* SuSE Linux Enterprise 11
* SuSE Linux Enterprise 12

View File

@ -1,94 +0,0 @@
# MariaDB MaxScale 1.1.1 Release Notes
## 1.1.1 GA
MaxScale 1.1 is the current stable (GA) release of MaxScale. Version 1.1.1 is mainly a bug fix release introducing fixes, but also introduces some improvements to existing functionality.
## New Features
### Improved memory management options
Readwritesplit and schemarouter now both support upper limits to session state modifying commands. They both also allow the complete disabling of the history, making the sessions consume the smallest amount of memory while still making sure all slaves keep identical session states.
### Improved trace logging
The process of the user authentication data retrieval is logged into the trace log and the readconnroute router also outputs more information into the trace log. This allows for easier problem detection and configuration tuning.
### More informative output from maxkeys and maxpasswd
Using the password functionality in MaxScale is now a lot easier. Both programs now produce verbose and exact error messages.
## Bug Fixes
Here is a list of bugs fixed since the release of the 1.1.0 version of MaxScale. The bug IDs are from the **[MariaDB Jira](https://jira.mariadb.org/)**.
* [MXS-99](https://jira.mariadb.org/browse/MXS-99): /etc/init.d/maxscale reload doesn't do anything
* [MXS-83](https://jira.mariadb.org/browse/MXS-83): linkage fails when system pcre library is recent
* [MXS-112](https://jira.mariadb.org/browse/MXS-112): Disable saving of session commands in the readwritesplit and schemarouter modules
* [MXS-114](https://jira.mariadb.org/browse/MXS-114): Disable recovery of disconnected slaves
* [MXS-73](https://jira.mariadb.org/browse/MXS-73): MaxScale uses nearly 100% CPU
* [MXS-36](https://jira.mariadb.org/browse/MXS-36): bugzillaId-671: wrong message if SHOW DATABASES privilege is missing
* [MXS-39](https://jira.mariadb.org/browse/MXS-39): bugzillaId-731:Boolean configuration parameters accept inconsistent parameters
* [MXS-64](https://jira.mariadb.org/browse/MXS-64): maxkeys and Maxpasswd do not produce informative error output
* [MXS-25](https://jira.mariadb.org/browse/MXS-25): bugzillaId-656: MySQL Monitor: claims that Master is available after master failure
* [MXS-82](https://jira.mariadb.org/browse/MXS-82): cmake warns when mariadb is compiled without mysql_release
* [MXS-69](https://jira.mariadb.org/browse/MXS-69): dbfwfilter should be pessimistic about rule syntax errors
* [MXS-98](https://jira.mariadb.org/browse/MXS-98): regexfilter log
* [MXS-28](https://jira.mariadb.org/browse/MXS-28): bugzillaId-433: Logging don't include assert information
* [MXS-75](https://jira.mariadb.org/browse/MXS-75): "wildcard" rule also blocks COUNT(*)
* [MXS-118](https://jira.mariadb.org/browse/MXS-118): Two monitors loaded at the same time result into not working installation
* [MXS-33](https://jira.mariadb.org/browse/MXS-33): bugzillaId-702: CLI: list services command shows negative values for the number of users of a service (Read Service).
* [MXS-17](https://jira.mariadb.org/browse/MXS-17): bugzillaId-736: Memory leak while doing read/write splitting
* [MXS-30](https://jira.mariadb.org/browse/MXS-30): bugzillaId-487: Buffer manager should not use pointer arithmetic on void*
* [MXS-81](https://jira.mariadb.org/browse/MXS-81): cmake fails when init scripts are missing
* [MXS-127](https://jira.mariadb.org/browse/MXS-127): disable_sescmd_history causes MaxScale to crash under load
## Known Issues
There are a number bugs and known limitations within this version of MaxScale, the most serious of these are listed below.
* The Read/Write Splitter is a little too strict when it receives errors from slave servers during execution of session commands. This can result in sessions being terminated in situations from which MaxScale could recover without terminating the sessions.
* MaxScale cannot manage authentication that uses wildcard matching in hostnames in the mysql.user table of the backend database. The only wildcards that can be used are in IP address entries.
* When users have different passwords based on the host from which they connect MaxScale is unable to determine which password it should use to connect to the backend database. This results in failed connections and unusable usernames in MaxScale.
* Binlog Router Plugin is compatible with MySQL 5.6
Binlog Router Plugin currently does not work for MariaDB 5.5 and MariaDB 10.0
* LONGBLOB are currently not supported.
* Galera Cluster variables, such as @@wsrep_node_name, are not resolved by the embedded MariaDB parser.
* The Database Firewall filter does not support multi-statements. Using them will result in an error being sent to the client.
## Packaging
Both RPM and Debian packages are available for MaxScale in addition to the tar based releases. Packages are now provided for:
* CentOS/RedHat 5
* CentOS/RedHat 6
* CentOS/RedHat 7
* Debian 6
* Debian 7
* Ubuntu 12.04 LTS
* Ubuntu 14.04 LTS
* Fedora 19
* Fedora 20
* Fedora 21
* OpenSuSE 13
* SuSE Linux Enterprise 11
* SuSE Linux Enterprise 12

View File

@ -1,99 +0,0 @@
# MariaDB MaxScale 1.2 Release Notes
## 1.2 GA
This document details the changes in version 1.2 since the release of the 1.1.1 GA Release of the MaxScale product.
###***PLEASE NOTICE: MaxScale installation directories have changed in this version***
The 1.2 version of MaxScale differs from previous versions in its installation layout. Please take great care when upgrading MaxScale from previous versions to version 1.2. An automatic upgrade will not work due to the severe changes in the installation layout.
## New Features
### Non-root MaxScale
You can now run MaxScale as any user. The standard installation of a MaxScale package now creates the maxscale user and the maxscale group.
### FHS-compliant installation
The 1.2 version of MaxScale now complies to the Filesystem Hierarchy Standard. This means that MAXSCALE_HOME is no longer necessary and directories can be moved to different locations.
A quick list of changes in installation directories and file names:
* Binaries go into `/usr/bin`
* Configuration files to `/etc` and the configuration file is now lower case: `maxscale.cnf`
* Logs to `/var/log/maxscale`
* The module and library directory have been combined into a single directory in `/usr/lib64/maxscale`. If you have custom modules please make sure they are located there.
* Data directory is `/var/lib/maxscale`. This is the default location for MaxScale-specific data.
* PID file can be found at `/var/run/maxscale`
### Client side SSL encryption
MaxScale now supports SSL/TLS encrypted connections to MaxScale.
### Launchable scripts
Now you can configure MaxScale monitor module to automatically launch a script when it detects change in the state of a backend server. The script can be any customer script defined by you to take diagnostic or reporting action. With this you can easily customize MaxScale's behavior.
### Lsyncd configuration guide
A new tutorial has been added which helps you keep MaxScale's configuration files in sync across multiple hosts. This allows for easier HA setups with MaxScale and guarantees up-to-date configuration files on all nodes. The tutorial can be found [here](../Tutorials/MaxScale-HA-with-lsyncd.md).
## Bug fixes
Here is a list of bugs fixed since the release of MaxScale 1.1.1.
* [MXS-24](https://jira.mariadb.org/browse/MXS-24): bugzillaId-604: Module load path documentation issues ...
* [MXS-40](https://jira.mariadb.org/browse/MXS-40): Display logged in users
* [MXS-113](https://jira.mariadb.org/browse/MXS-113): MaxScale seems to fail if built against MariaDB 10.0 libraries
* [MXS-116](https://jira.mariadb.org/browse/MXS-116): Do not run maxscale as root.
* [MXS-117](https://jira.mariadb.org/browse/MXS-117): Allow configuration of the log file directory
* [MXS-125](https://jira.mariadb.org/browse/MXS-125): inconsistency in maxkeys/maxpassword output and parameters
* [MXS-128](https://jira.mariadb.org/browse/MXS-128): cyclic dependency utils -> log_manager -> utils
* [MXS-136](https://jira.mariadb.org/browse/MXS-136): Check for MaxScale replication heartbeat table existence before creating
* [MXS-137](https://jira.mariadb.org/browse/MXS-137): cannot get sql for queries with length >= 0x80
* [MXS-139](https://jira.mariadb.org/browse/MXS-139): Schemarouter authentication for wildcard grants fails without optimize_wildcard
* [MXS-140](https://jira.mariadb.org/browse/MXS-140): strip_db_esc does not work without auth_all_servers
* [MXS-162](https://jira.mariadb.org/browse/MXS-162): Fix Incorrect info in Configuration Guide
* [MXS-165](https://jira.mariadb.org/browse/MXS-165): Concurrency issue while incrementing sessions in qlafilter
* [MXS-166](https://jira.mariadb.org/browse/MXS-166): Memory leak when creating a new event
* [MXS-171](https://jira.mariadb.org/browse/MXS-171): Allow reads on master for readwritesplit
* [MXS-176](https://jira.mariadb.org/browse/MXS-176): Missing dependencies in documentation
* [MXS-179](https://jira.mariadb.org/browse/MXS-179): Keep configuration changes in synch across MaxScale Mate Nodes
* [MXS-180](https://jira.mariadb.org/browse/MXS-180): MariaDB10 binlog router compatibilty
* [MXS-181](https://jira.mariadb.org/browse/MXS-181): Poor performance on TCP connection due to Nagle's algoritm
* [MXS-182](https://jira.mariadb.org/browse/MXS-182): SHOW SLAVE STATUS and maxadmin "show services" for binlog router needs updated when used with MariaDB 10 Master
* [MXS-212](https://jira.mariadb.org/browse/MXS-212): Stopped services accept connections
* [MXS-225](https://jira.mariadb.org/browse/MXS-225): RPM Debug build packages have no debugging symbols
* [MXS-227](https://jira.mariadb.org/browse/MXS-227): Memory leak in Galera Monitor
* [MXS-244](https://jira.mariadb.org/browse/MXS-244): Memory leak when using prepared statements without arguments
## Known Issues and Limitations
There are a number bugs and known limitations within this version of MaxScale, the most serious of this are listed below.
* MaxScale can not manage authentication that uses wildcard matching in hostnames in the mysql.user table of the backend database. The only wildcards that can be used are in IP address entries.
* When users have different passwords based on the host from which they connect MaxScale is unable to determine which password it should use to connect to the backend database. This results in failed connections and unusable usernames in MaxScale.
* LONGBLOB are currently not supported.
* Galera Cluster variables, such as @@wsrep_node_name, are not resolved by the embedded MariaDB parser.
* The Database Firewall filter does not support multi-statements. Using them will result in an error being sent to the client.
## Packaging
Both RPM and Debian packages are available for MaxScale in addition to the tar based releases previously distributed we now provide
* CentOS/RedHat 5
* CentOS/RedHat 6
* CentOS/RedHat 7
* Debian 6
* Debian 7
* Ubuntu 12.04 LTS
* Ubuntu 14.04 LTS
* SuSE Linux Enterprise 11
* SuSE Linux Enterprise 12

View File

@ -1,267 +0,0 @@
# MariaDB MaxScale 1.3 Release Notes
This document describes the changes in release 1.3, when compared to
release 1.2.1.
## 1.3.0
For any problems you encounter, please consider submitting a bug
report at [Jira](https://jira.mariadb.org).
## New Features
### Persistent Connections
MaxScale 1.3.0 introduces the concept of *Persistent Connections*. With
that is meant that the connection from MaxScale to the backend server is
not terminated even if the connection from the client to MaxScale is.
If a client makes frequent short connections, there may be a benefit from
using the *Persistent Connection* feature as it may reduce the time it
takes from establishing a connection from the client through MaxScale to
the backend server.
**NOTE**: The persistent connections do not track session state. This means
that changing the default database or modifying the session state will cause
those changes to be active even for new connections. If you use queries with
implicit databases or use connections with different client settings, you
should take great care when using persistent connections.
Additional information is available in the following document:
* [Administration Tutorial](../Tutorials/Administration-Tutorial.md#persistent-connections)
### Binlog Server
There are new administrative commands: STOP SLAVE, START SLAVE, RESET SLAVE
and CHANGE MASTER TO. The master server details are now provided by a
master.ini file located in binlog directory and could be changed via
CHANGE MASTER TO command issued via MySQL connection to MaxScale.
Before migrating to 1.3.0 it is necessary to put a writable master.ini file
into binlog directory, containing these parameters:
```
[binlog_configuration]
master_host=127.0.0.1
master_port=3308
master_user=repl
master_password=somepass
filestem=repl-bin
```
Users may change parameters according to their configuration.
**Note**: the "servers" parameter is no longer required in the service
definition.
Additional information is available in the following documents:
* [Binlogrouter Tutorial](../Tutorials/Replication-Proxy-Binlog-Router-Tutorial.md)
* [Upgrading Binlogrouter to 1.3](../Upgrading/Upgrading-BinlogRouter-To-Maxscale-1.3.md)
* [Binlogrouter Documentation](../Routers/Binlogrouter.md)
### Logging Changes
Before 1.3, MaxScale logged data to four different log files; *error*,
*message*, *trace* and *debug*. Complementary and/or alternatively, MaxScale
could also log to syslog, in which case messages intended for the error and
message file were logged there. What files were enabled and written to was
controlled by entries in the MaxScale configuration file.
This has now been changed so that MaxScale logs to a single
file - *maxscale.log* - and each logged entry is prepended with *error*,
*warning*, *notice*, *info* or *debug*, depending on the seriousness or
priority of the message. The levels are the same as those of syslog.
MaxScale is still capable of complementary or alternatively logging to syslog.
What used to be logged to the *message* file is now logged as a *notice*
message and what used to be written to the *trace* file, is logged as an
*info* message.
By default, *notice*, *warning* and *error* messages are logged, while
*info* and *debug* messages are not. Exactly what kind of messages are
logged can be controlled via the MaxScale configuration file, but enabling
and disabling different kinds of messages can also be performed at runtime
from maxadmin.
Earlier, the *error* and *message* files were written to the filesystem,
while the *trace* and *debug* files were written to shared memory. The
one and only log file of MaxScale is now by default written to the filesystem.
This will have performance implications if *info* and *debug* messages are
enabled.
If you want to retain the possibility of turning on *info* and *debug*
messages, without it impacting the performance too much, the recommended
approach is to add the following entries to the MaxScale configuration file:
```
[maxscale]
syslog=1
maxlog=0
log_to_shm=1
```
This will have the effect of MaxScale creating the *maxscale.log* into
shared memory, but not logging anything to it. However, all *notice*,
*warning* and *error* messages will be logged to syslog.
Then, if there is a need to turn on *info* messages that can be done via
the maxadmin interface:
```
MaxScale> enable log-priority info
MaxScale> enable maxlog
```
Note that *info* and *debug* messages are never logged to syslog.
### PCRE2 integration
MaxScale now uses the PCRE2 library for regular expressions. This has been
integrated into the core configuration processing and most of the modules.
The main module which uses this is the regexfilter which now fully supports
the PCRE2 syntax with proper substitutions. For a closer look at how this
differs from the POSIX regular expression syntax take a look at the
[PCRE2 documentation](http://www.pcre.org/current/doc/html/pcre2syntax.html).
**Please note**, that the substitution string follows different rules than
the traditional substitution strings. The usual way of referring to capture
groups in the substitution string is with the backslash character followed
by the capture group reference e.g. `\1` but the PCRE2 library uses the dollar
character followed by the group reference. To quote the PCRE2 native API manual:
```
In the replacement string, which is interpreted as a UTF string in UTF mode, and is checked for UTF validity unless the PCRE2_NO_UTF_CHECK option is set, a dollar character is an escape character that can specify the insertion of characters from capturing groups in the pattern. The following forms are recognized:
$$ insert a dollar character
$<n> insert the contents of group <n>
${<n>} insert the contents of group <n>
```
### Improved launchable scripts
The launchable scripts were modified to allow usage without wrapper scripts.
The scripts are now executed as they are in the configuration files with certain
keywords being replaced with the initiator, event and node list. For more
details, please read the [Monitor Common](../Monitors/Monitor-Common.md) document.
## Bug fixes
[Here is a list of bugs fixed since the release of MaxScale 1.2.1.](https://jira.mariadb.org/browse/MXS-550?jql=project%20%3D%20MXS%20AND%20issuetype%20%3D%20Bug%20AND%20resolution%20in%20(Fixed%2C%20Done)%20AND%20fixVersion%20%3D%201.3.0)
* [MXS-559](https://jira.mariadb.org/browse/MXS-559): Crash due to debug assertion in readwritesplit
* [MXS-551](https://jira.mariadb.org/browse/MXS-551): Maxscale BETA 1.3.0 running as root
* [MXS-548](https://jira.mariadb.org/browse/MXS-548): Maxscale 1.2.1 crash on Ubuntu 4.04.3 x86_64
* [MXS-508](https://jira.mariadb.org/browse/MXS-508): regex filter ignores username
* [MXS-505](https://jira.mariadb.org/browse/MXS-505): if Maxscale fails to start it goes to infinite "try-to-start and fail" loop
* [MXS-501](https://jira.mariadb.org/browse/MXS-501): USE <db> hangs when Tee filter uses matching
* [MXS-500](https://jira.mariadb.org/browse/MXS-500): Tee filter hangs when statements aren't duplicated.
* [MXS-499](https://jira.mariadb.org/browse/MXS-499): Init script error on Debian Wheezy
* [MXS-494](https://jira.mariadb.org/browse/MXS-494): Weight calculation favors servers without connections
* [MXS-493](https://jira.mariadb.org/browse/MXS-493): SIGFPE when weightby parameter is 0 and using LEAST_GLOBAL_CONNECTIONS
* [MXS-492](https://jira.mariadb.org/browse/MXS-492): Segfault if server is missing weighting parameter
* [MXS-491](https://jira.mariadb.org/browse/MXS-491): MaxScale can time out systemd if startup of services takes too long
* [MXS-480](https://jira.mariadb.org/browse/MXS-480): Readwritesplit defaults cause connection pileup
* [MXS-479](https://jira.mariadb.org/browse/MXS-479): localtime must not be used in the multi-threaded program.
* [MXS-472](https://jira.mariadb.org/browse/MXS-472): Monitors update status in multiple steps
* [MXS-464](https://jira.mariadb.org/browse/MXS-464): Upgrade 1.2.0 to 1.2.1 blocking start of `maxscale` service
* [MXS-450](https://jira.mariadb.org/browse/MXS-450): Syslog default prefix is MaxScale not maxscale
* [MXS-447](https://jira.mariadb.org/browse/MXS-447): Monitors are started before they have been fully configured
* [MXS-436](https://jira.mariadb.org/browse/MXS-436): Invalid threads argument is ignored and MaxScale starts with one thread
* [MXS-431](https://jira.mariadb.org/browse/MXS-431): Backend authentication fails with schemarouter
* [MXS-429](https://jira.mariadb.org/browse/MXS-429): Binlog Router crashes due to segmentation fault with no meaningful error if no listener is configured
* [MXS-428](https://jira.mariadb.org/browse/MXS-428): Maxscale crashes at startup.
* [MXS-427](https://jira.mariadb.org/browse/MXS-427): Logging a large string causes a segmentation fault
* [MXS-417](https://jira.mariadb.org/browse/MXS-417): Single character wildcard doesn't work in MaxScale
* [MXS-416](https://jira.mariadb.org/browse/MXS-416): Orphan sessions appear after many network errors
* [MXS-415](https://jira.mariadb.org/browse/MXS-415): MaxScale 1.2.1 crashed with Signal 6 and 11
* [MXS-414](https://jira.mariadb.org/browse/MXS-414): Maxscale crashed every day!
* [MXS-413](https://jira.mariadb.org/browse/MXS-413): MaxAdmin hangs with show session
* [MXS-412](https://jira.mariadb.org/browse/MXS-412): show dbusers segmentation fault
* [MXS-409](https://jira.mariadb.org/browse/MXS-409): prepare should not hit all servers
* [MXS-408](https://jira.mariadb.org/browse/MXS-408): Connections to backend databases do not clear promptly
* [MXS-407](https://jira.mariadb.org/browse/MXS-407): Maxscale binlogrouter binlog names are unncessarily length-limited
* [MXS-405](https://jira.mariadb.org/browse/MXS-405): Maxscale bin router crash
* [MXS-403](https://jira.mariadb.org/browse/MXS-403): Monitor callback to DCBs evades thread control causing crashes
* [MXS-394](https://jira.mariadb.org/browse/MXS-394): Faults in regex_replace function of regexfilter.c
* [MXS-392](https://jira.mariadb.org/browse/MXS-392): Update to "Rabbit MQ setup and MaxScale Integration" document
* [MXS-386](https://jira.mariadb.org/browse/MXS-386): max_sescmd_history should not close connections
* [MXS-385](https://jira.mariadb.org/browse/MXS-385): disable_sescmd_history can cause false data to be read.
* [MXS-379](https://jira.mariadb.org/browse/MXS-379): Incorrect handing of a GWBUF may cause SIGABRT.
* [MXS-376](https://jira.mariadb.org/browse/MXS-376): MaxScale terminates with SIGABRT.
* [MXS-373](https://jira.mariadb.org/browse/MXS-373): If config file is non-existent, maxscale crashes.
* [MXS-366](https://jira.mariadb.org/browse/MXS-366): Multi-source slave servers are not detected.
* [MXS-365](https://jira.mariadb.org/browse/MXS-365): Load data local infile connection abort when loading certain files
* [MXS-363](https://jira.mariadb.org/browse/MXS-363): rpm building seems to do something wrong with maxscale libraries
* [MXS-361](https://jira.mariadb.org/browse/MXS-361): crash on backend restart if persistent connections are in use
* [MXS-360](https://jira.mariadb.org/browse/MXS-360): Persistent connections: maxadmin reports 0 all the time even if connections are created
* [MXS-358](https://jira.mariadb.org/browse/MXS-358): Crash, Error in `/usr/bin/maxscale': free(): invalid next size (fast)
* [MXS-352](https://jira.mariadb.org/browse/MXS-352): With no backend connection, services aren't started
* [MXS-351](https://jira.mariadb.org/browse/MXS-351): Router error handling can cause crash by leaving dangling DCB pointer
* [MXS-345](https://jira.mariadb.org/browse/MXS-345): maxscale.conf in /etc/init.d prevents puppet from starting maxscale
* [MXS-342](https://jira.mariadb.org/browse/MXS-342): When ini_parse fails to parse config file, no log messages are printed.
* [MXS-333](https://jira.mariadb.org/browse/MXS-333): use_sql_variables_in=master doesn't work
* [MXS-329](https://jira.mariadb.org/browse/MXS-329): The session pointer in a DCB can be null unexpectedly
* [MXS-323](https://jira.mariadb.org/browse/MXS-323): mysql_client readwritesplit handleError seems using wrong dcb and cause wrong behavior
* [MXS-321](https://jira.mariadb.org/browse/MXS-321): Incorrect number of connections in maxadmin list view
* [MXS-310](https://jira.mariadb.org/browse/MXS-310): MaxScale 1.2 does not completely cleanly change to the maxscale user
* [MXS-297](https://jira.mariadb.org/browse/MXS-297): postinstall on debian copies wrong file in /etc/init.d
* [MXS-293](https://jira.mariadb.org/browse/MXS-293): Bug in init script, and maxscale --user=maxscale does run as root
* [MXS-291](https://jira.mariadb.org/browse/MXS-291): Random number generation has flaws
* [MXS-289](https://jira.mariadb.org/browse/MXS-289): Corrupted memory or empty value are in Master_host field of SHOW SLAVE STATUS when master connection is broken
* [MXS-286](https://jira.mariadb.org/browse/MXS-286): Fix the content and format of MaxScale-HA-with-Corosync-Pacemaker document
* [MXS-283](https://jira.mariadb.org/browse/MXS-283): SSL connections leak memory
* [MXS-282](https://jira.mariadb.org/browse/MXS-282): Add example to "Routing Hints" document
* [MXS-281](https://jira.mariadb.org/browse/MXS-281): SELECT INTO OUTFILE query goes several times to one slave
* [MXS-280](https://jira.mariadb.org/browse/MXS-280): SELECT INTO OUTFILE query succeeds even if backed fails
* [MXS-276](https://jira.mariadb.org/browse/MXS-276): Memory leak of buffer in connection router readQuery
* [MXS-274](https://jira.mariadb.org/browse/MXS-274): Memory Leak
* [MXS-271](https://jira.mariadb.org/browse/MXS-271): Schemarouter and unknown databases
* [MXS-269](https://jira.mariadb.org/browse/MXS-269): Crash in MySQL backend protocol
* [MXS-260](https://jira.mariadb.org/browse/MXS-260): Multiple MaxScale processes
* [MXS-258](https://jira.mariadb.org/browse/MXS-258): ERR_error_string could overflow in future
* [MXS-254](https://jira.mariadb.org/browse/MXS-254): Failure to read configuration file results in no error log messages
* [MXS-251](https://jira.mariadb.org/browse/MXS-251): Non-thread safe strerror
* [MXS-220](https://jira.mariadb.org/browse/MXS-220): LAST_INSERT_ID() query is redirect to slave if function call is in where clause
* [MXS-210](https://jira.mariadb.org/browse/MXS-210): Check MaxScale user privileges
* [MXS-202](https://jira.mariadb.org/browse/MXS-202): User password not handled correctly
* [MXS-197](https://jira.mariadb.org/browse/MXS-197): Incorrect sequence of operations with DCB
* [MXS-196](https://jira.mariadb.org/browse/MXS-196): DCB state is changed prior to polling operation
* [MXS-195](https://jira.mariadb.org/browse/MXS-195): maxscaled.c ineffective DCB disposal
* [MXS-184](https://jira.mariadb.org/browse/MXS-184): init script issues in CentOS 7
* [MXS-183](https://jira.mariadb.org/browse/MXS-183): MaxScale crash after 'reload config'
* [MXS-111](https://jira.mariadb.org/browse/MXS-111): maxscale binlog events shown in show services seems to be double-counted for the master connection
* [MXS-54](https://jira.mariadb.org/browse/MXS-54): Write failed auth attempt to trace log
* [MXS-35](https://jira.mariadb.org/browse/MXS-35): bugzillaId-451: maxscale main() exit code is always 0 after it daemonizes
* [MXS-29](https://jira.mariadb.org/browse/MXS-29): bugzillaId-589: detect if MAXSCALE_SCHEMA.HEARTBEAT table is not replicated
* [MXS-3](https://jira.mariadb.org/browse/MXS-3): Remove code for atomic_add in skygw_utils.cc
## Known Issues and Limitations
There are a number bugs and known limitations within this version of MaxScale,
the most serious of this are listed below.
* MaxScale can not manage authentication that uses wildcard matching in hostnames in the mysql.user table of the backend database. The only wildcards that can be used are in IP address entries.
* When users have different passwords based on the host from which they connect MaxScale is unable to determine which password it should use to connect to the backend database. This results in failed connections and unusable usernames in MaxScale.
* The readconnroute module does not support sending of LONGBLOB data.
* Galera Cluster variables, such as @@wsrep_node_name, are not resolved by the embedded MariaDB parser.
* The Database Firewall filter does not support multi-statements. Using them will result in an error being sent to the client.
* The SSL support is known to be unstable.
## Packaging
RPM and Debian packages are provided for the Linux distributions supported
by MariaDB Enterprise.
Packages can be downloaded [here](https://mariadb.com/resources/downloads).
## Source Code
The source code of MaxScale is tagged at GitHub with a tag, which is identical
with the version of MaxScale. For instance, the tag of version 1.2.1 of MaxScale
is 1.2.1. Further, *master* always refers to the latest released non-beta version.
The source code is available [here](https://github.com/mariadb-corporation/MaxScale).

View File

@ -1,112 +0,0 @@
# MariaDB MaxScale 1.4.0 (Beta) Release Notes
Release 1.4.0 is a beta release.
This document describes the changes in release 1.4.0, when compared to
release 1.3.0.
## 1.4.0
For any problems you encounter, please consider submitting a bug
report at [Jira](https://jira.mariadb.org).
## New Features
### Firewall Filter
The firewall filter now supports different actions when a rule is matched.
Currently possible actions are to either block the query, allow it or
ignore the match and allow it.
Matching and non-matching queries can now be logged and by combining this new
logging functionality with the _ignore_ action, you can set up the filter in
a dry-run mode. For more information about the firewall filter, please refer to
[Database Firewall Filter](../Filters/Database-Firewall-Filter.md).
### SSL
Client-side SSL support has been in MaxScale for some time, but has
been known to have been unstable. In 1.4.0, client side SSL is now
believed to be stable and fully usable.
The SSL configuration is now done on a per listener basis which
allows both SSL and non-SSL connections to a service. For more details
on how to configure this, please refer to the
[MaxScale Configuration Guide](../Getting-Started/Configuration-Guide.md#listener-and-ssl).
### POSIX Extended Regular Expression Syntax
The _qlafilter_, the _topfilter_ and the _namedserverfilter_ now
accept _extended_ as a filter option, which enables the POSIX Extended
Regular Expression syntax.
### Improved user grant detection
MaxScale now allows users with only table level access to connect with
a default database. The service users will require SELECT privileges on
the `mysql.tables_priv` table:
```
GRANT SELECT ON mysql.tables_priv TO 'maxscale'@'maxscalehost'
```
For more information, refer to the configuration guide:
[MaxScale Configuration Guide](../Getting-Started/Configuration-Guide.md#service).
### Improved password encryption
MaxScale 1.4.0 uses the MD5 version of the crypt function which is more secure
than the non-MD5 version. This means that a new password file needs to be
created with `maxkeys`. The configuration file should be updated to use the new
passwords. This can be done with the help of the `maxpasswd` utility. For more
details about how to do this, please refer to the installation guide:
[MariaDB MaxScale Installation Guide](../Getting-Started/MariaDB-MaxScale-Installation-Guide.md)
## Removed Features
* MaxScale no longer supports SSLv3.
* The `enabled` mode, which allows both SSL and non-SSL connections on the same port, has been removed.
## Bug fixes
[Here is a list of bugs fixed since the release of MaxScale 1.3.0.](https://jira.mariadb.org/browse/MXS-600?jql=project%20%3D%20MXS%20AND%20issuetype%20%3D%20Bug%20AND%20resolution%20in%20(Fixed%2C%20Done)%20AND%20fixVersion%20%3D%201.4.0)
* [MXS-400](https://jira.mariadb.org/browse/MXS-400): readwritesplit router doesn't allow connect when the only remaining server is master and slave
* [MXS-497](https://jira.mariadb.org/browse/MXS-497): MaxScale does not contemplate client multiple statements (CLIENT_MULTI_STATEMENTS)
* [MXS-504](https://jira.mariadb.org/browse/MXS-504): SSL connection handling needs work
* [MXS-511](https://jira.mariadb.org/browse/MXS-511): ReadWriteSplit router won't choose node as master and logs confusing "RUNNING MASTER" error message
* [MXS-563](https://jira.mariadb.org/browse/MXS-563): Maxscale fails to start
* [MXS-565](https://jira.mariadb.org/browse/MXS-565): Binlog Router doesn't handle 16MB larger transmissions
* [MXS-573](https://jira.mariadb.org/browse/MXS-573): Write permission to systemd service file
* [MXS-574](https://jira.mariadb.org/browse/MXS-574): Wrong parameter name in systemd service file
* [MXS-575](https://jira.mariadb.org/browse/MXS-575): Nagios scripts lack execute permissions
* [MXS-577](https://jira.mariadb.org/browse/MXS-577): Don't install systemd files and init.d scipts at the same time
* [MXS-581](https://jira.mariadb.org/browse/MXS-581): Only the first 8 characters of passwords are used
* [MXS-582](https://jira.mariadb.org/browse/MXS-582): crypt is not thread safe
* [MXS-585](https://jira.mariadb.org/browse/MXS-585): Intermittent connection failure with MaxScale 1.2/1.3 using MariaDB/J 1.3
* [MXS-589](https://jira.mariadb.org/browse/MXS-589): Password encryption looks for the file in the wrong directory
* [MXS-592](https://jira.mariadb.org/browse/MXS-592): Build failure with MariaDB 10.1 when doing a debug build
* [MXS-594](https://jira.mariadb.org/browse/MXS-594): Binlog name gets trunkated
* [MXS-600](https://jira.mariadb.org/browse/MXS-600): Threads=auto parameter configuration fails
## Known Issues and Limitations
There are some limitations and known issues within this version of MaxScale.
For more information, please refer to the [Limitations](../About/Limitations.md) document.
## Packaging
RPM and Debian packages are provided for the Linux distributions supported
by MariaDB Enterprise.
Packages can be downloaded [here](https://mariadb.com/resources/downloads).
## Source Code
The source code of MaxScale is tagged at GitHub with a tag, which is identical
with the version of MaxScale. For instance, the tag of version X.Y.Z of MaxScale
is X.Y.Z. Further, *master* always refers to the latest released non-beta version.
The source code is available [here](https://github.com/mariadb-corporation/MaxScale).

View File

@ -1,41 +0,0 @@
# MariaDB MaxScale 1.4.1 Release Notes
Release 1.4.1 is a GA release.
This document describes the changes in release 1.4.1, when compared to
release [1.4.0](MaxScale-1.4.0-Release-Notes.md).
For any problems you encounter, please consider submitting a bug
report at [Jira](https://jira.mariadb.org).
## Bug fixes
[Here is a list of bugs fixed since the release of MaxScale 1.4.0.](https://jira.mariadb.org/browse/MXS-646?jql=project%20%3D%20MXS%20AND%20issuetype%20%3D%20Bug%20AND%20resolution%20in%20(Fixed%2C%20Done)%20AND%20fixVersion%20%3D%201.4.1)
* [MXS-646](https://jira.mariadb.org/browse/MXS-646): Namedserverfilter ignores user and source parameters
* [MXS-632](https://jira.mariadb.org/browse/MXS-632): Replace or update VERSION
* [MXS-630](https://jira.mariadb.org/browse/MXS-630): Requirement of tables_priv access not documented in "Upgrading" guide
* [MXS-629](https://jira.mariadb.org/browse/MXS-629): Lack of tables_priv privilege causes confusing error message
* [MXS-627](https://jira.mariadb.org/browse/MXS-627): Failure to connect to MaxScale with MariaDB Connector/J
* [MXS-585](https://jira.mariadb.org/browse/MXS-585): Intermittent connection failure with MaxScale 1.2/1.3 using MariaDB/J 1.3
## Known Issues and Limitations
There are some limitations and known issues within this version of MaxScale.
For more information, please refer to the [Limitations](../About/Limitations.md) document.
## Packaging
RPM and Debian packages are provided for the Linux distributions supported
by MariaDB Enterprise.
Packages can be downloaded [here](https://mariadb.com/resources/downloads).
## Source Code
The source code of MaxScale is tagged at GitHub with a tag, which is identical
with the version of MaxScale. For instance, the tag of version X.Y.Z of MaxScale
is X.Y.Z. Further, *master* always refers to the latest released non-beta version.
The source code is available [here](https://github.com/mariadb-corporation/MaxScale).

View File

@ -1,44 +0,0 @@
# MariaDB MaxScale 1.4.2 Release Notes
Release 1.4.2 is a GA release.
This document describes the changes in release 1.4.2, when compared to
release 1.4.1.
For any problems you encounter, please consider submitting a bug
report at [Jira](https://jira.mariadb.org).
## Bug fixes
[Here is a list of bugs fixed since the release of MaxScale 1.4.1.](https://jira.mariadb.org/browse/MXS-683?jql=project%20%3D%20MXS%20AND%20issuetype%20%3D%20Bug%20AND%20resolution%20in%20(Fixed%2C%20Done)%20AND%20fixVersion%20%3D%201.4.2)
* [MXS-684](https://jira.mariadb.org/browse/MXS-684): Password field still used with MySQL 5.7
* [MXS-683](https://jira.mariadb.org/browse/MXS-683): qc_mysqlembedded reports as-name instead of original-name.
* [MXS-681](https://jira.mariadb.org/browse/MXS-681): Loading service users error
* [MXS-680](https://jira.mariadb.org/browse/MXS-680): qc_mysqlembedded fails to look into function when reporting affected fields
* [MXS-679](https://jira.mariadb.org/browse/MXS-679): qc_mysqlembedded excludes some fields, when reporting affected fields
* [MXS-662](https://jira.mariadb.org/browse/MXS-662): No Listener on different IPs but same port since 1.4.0
* [MXS-661](https://jira.mariadb.org/browse/MXS-661): Log fills with 'Length (0) is 0 or query string allocation failed'
* [MXS-656](https://jira.mariadb.org/browse/MXS-656): after upgrade from 1.3 to 1.4, selecting master isn't working as expected
* [MXS-616](https://jira.mariadb.org/browse/MXS-616): Duplicated binlog event under heavy load.
## Known Issues and Limitations
There are some limitations and known issues within this version of MaxScale.
For more information, please refer to the [Limitations](../About/Limitations.md) document.
## Packaging
RPM and Debian packages are provided for the Linux distributions supported
by MariaDB Enterprise.
Packages can be downloaded [here](https://mariadb.com/resources/downloads).
## Source Code
The source code of MaxScale is tagged at GitHub with a tag, which is identical
with the version of MaxScale. For instance, the tag of version X.Y.Z of MaxScale
is X.Y.Z. Further, *master* always refers to the latest released non-beta version.
The source code is available [here](https://github.com/mariadb-corporation/MaxScale).

View File

@ -1,37 +0,0 @@
# MariaDB MaxScale 1.4.3 Release Notes
Release 1.4.3 is a GA release.
This document describes the changes in release 1.4.3, when compared to
release 1.4.2.
For any problems you encounter, please consider submitting a bug
report at [Jira](https://jira.mariadb.org).
## Bug fixes
[Here is a list of bugs fixed since the release of MaxScale 1.4.2.](https://jira.mariadb.org/browse/MXS-700?jql=project%20%3D%20MXS%20AND%20issuetype%20%3D%20Bug%20AND%20resolution%20in%20(Fixed%2C%20Done)%20AND%20fixVersion%20%3D%201.4.3)
* [MXS-700](https://jira.mariadb.org/browse/MXS-700): Segfault on startup
* [MXS-699](https://jira.mariadb.org/browse/MXS-699): qc_mysqlembedded fails to return fields in comma expression
## Known Issues and Limitations
There are some limitations and known issues within this version of MaxScale.
For more information, please refer to the [Limitations](../About/Limitations.md) document.
## Packaging
RPM and Debian packages are provided for the Linux distributions supported
by MariaDB Enterprise.
Packages can be downloaded [here](https://mariadb.com/resources/downloads).
## Source Code
The source code of MaxScale is tagged at GitHub with a tag, which is identical
with the version of MaxScale. For instance, the tag of version X.Y.Z of MaxScale
is X.Y.Z. Further, *master* always refers to the latest released non-beta version.
The source code is available [here](https://github.com/mariadb-corporation/MaxScale).

View File

@ -1,129 +0,0 @@
# MariaDB MaxScale 2.0.0 Release Notes
Release 2.0.0 is a Beta release.
This document describes the changes in release 2.0.0, when compared to
release 1.4.3.
For any problems you encounter, please consider submitting a bug
report at [Jira](https://jira.mariadb.org).
## License
The license of MaxScale has been changed from GPLv2 to MariaDB BSL.
For more information about MariaDB BSL, please refer to
[MariaDB BSL](https://www.mariadb.com/bsl).
## New Features
### Binlog-to-Avro Translator
The 2.0 release of MaxScale contains the beta release of the binlog-to-Avro
conversion and distribution modules. These modules allow MaxScale to connect to
a MariaDB 10.0 master server and convert the binary log events to Avro format
change records. These records can then be queries as a continuous JSON or raw Avro
stream using the new CDC protocol.
The [Avrorouter Tutorial](../Tutorials/Avrorouter-Tutorial.md) contains
information on how to get started with the binlog-to-Avro translation.
The [Avrorouter](../Routers/Avrorouter.md) documentation has more information
on the details of this conversion process and how to configure the module.
The [CDC Protocol](../Protocols/CDC.md) documentation contains the details of
the new protocol.
### Read Continuation upon Master Down
The _readwritesplit_ routing module now supports a high availability read mode
where read queries are allowed even if the master server goes down. The new
functionality supports three modes: disconnection on master failure, disconnection
on first write after master failure and error on write after master failure.
The MySQL monitor module, _mysqlmon_, now supports stale states for both the master
and slave servers. This means that when a slave loses its master, it will retain
the slave state as long as it is running.
For more details about these new modes, please read the [ReadWriteSplit](../Routers/ReadWriteSplit.md)
and [MySQL Monitor](../Monitors/MySQL-Monitor.md) documentation.
### Backend SSL
The configuration for a backend server can now be set for SSL connections from MaxScale. Although loosely referred to as SSL, this is nowadays the TLS security protocol. If, in MaxScale, a server is configured with SSL parameters then MaxScale will only connect to it using a secure protocol. MaxScale supports TLS versions 1.0, 1.1 and 1.2; which can be used will depend on the capability of the backend server. Once configured, if a secure connection cannot be made, attempts to connect to MaxScale that require that server will fail. An alternative that should be considered is the use of SSH tunnels.
For more information about backend SSL, please refer to
[Server and SSL](../Getting-Started/Configuration-Guide.md#server-and-ssl)
### Connection Throttling
The option now exists to set [max_connections](../Getting-Started/Configuration-Guide.md#max_connections) for a service. If a non-zero number is specified, then MaxScale will accept connection requests only up to the specified limit. Further connections will receive the error message "Too many connections" with error number 1040. .
### MaxAdmin Security Improvements
The way a user of MaxAdmin is authenticated has been completely changed.
In 2.0, MaxAdmin can only connect to MaxScale using a domain socket, thus
_only when run on the same host_, and authorization is based upon the UNIX
identity. Remote access is no longer supported.
When 2.0 has been installed, MaxAdmin can only be used by `root` and
other users must be added anew. Please consult
[MaxAdmin documentation](../Reference/MaxAdmin.md) for more details.
### Query Classifier
The query classifier component that MaxScale uses when deciding what
to do with a particular query has been changed. It used to be based
upon the MariaDB embedded library, but is now based upon sqlite3.
This change should not cause any changes in the behaviour of MaxScale.
For more information, please refer to
[Configuration Guide](../Getting-Started/Configuration-Guide.md#query_classifier).
## Bug fixes
[Here is a list of bugs fixed since the release of MaxScale 1.4.3.](https://jira.mariadb.org/browse/MXS-739?jql=project%20%3D%20MXS%20AND%20issuetype%20%3D%20Bug%20AND%20resolution%20in%20(Fixed%2C%20Done)%20AND%20fixVersion%20%3D%202.0.0)
* [MXS-821](https://jira.mariadb.org/browse/MXS-821): filestem router option for binlog router is not documented
* [MXS-814](https://jira.mariadb.org/browse/MXS-814): Service and monitor permission checks only use the last available server
* [MXS-813](https://jira.mariadb.org/browse/MXS-813): binlogrouter, mariadb10.0, signal 11, crash
* [MXS-801](https://jira.mariadb.org/browse/MXS-801): strip_db_esc should default to True
* [MXS-790](https://jira.mariadb.org/browse/MXS-790): replication_heartbeat table privilege is not checked / fails silently / is not documented
* [MXS-776](https://jira.mariadb.org/browse/MXS-776): Documentation about limitations of reload config is not clear
* [MXS-772](https://jira.mariadb.org/browse/MXS-772): RPM installation produces errors
* [MXS-766](https://jira.mariadb.org/browse/MXS-766): R/W router sends DEALLOCATE PREPARE to ALL instead of MASTER
* [MXS-739](https://jira.mariadb.org/browse/MXS-739): Maxinfo issuing invalid null's in JSON response
* [MXS-733](https://jira.mariadb.org/browse/MXS-733): MaxScale `list sessions` can report "Invalid State" for some sessions.
* [MXS-720](https://jira.mariadb.org/browse/MXS-720): MaxScale fails to start and doesn't log any useful message when there are spurious characters in the config file
* [MXS-718](https://jira.mariadb.org/browse/MXS-718): qc_mysqlembedded does not report fields for INSERT
* [MXS-704](https://jira.mariadb.org/browse/MXS-704): start/stop scripts use which in a non-silent manner
* [MXS-695](https://jira.mariadb.org/browse/MXS-695): MaxScale does not build on Debian 8 following build from source instructions
* [MXS-685](https://jira.mariadb.org/browse/MXS-685): 1.4.1: ReadWrite Split on Master-Master setup doesn't chose master, logs "RUNNING MASTER" error message instead (related to MXS-511?)
* [MXS-675](https://jira.mariadb.org/browse/MXS-675): QLA Filter Output Log Improvements
* [MXS-658](https://jira.mariadb.org/browse/MXS-658): Crash in embedded library when MariaDB 10.0 is used
* [MXS-653](https://jira.mariadb.org/browse/MXS-653): maxpasswd writes notice message to stdout
* [MXS-652](https://jira.mariadb.org/browse/MXS-652): ssl is configured in a wrong way, but Maxscale can be started and works
* [MXS-633](https://jira.mariadb.org/browse/MXS-633): Galera Monitor should not require the REPLICATION CLIENT privilege
* [MXS-631](https://jira.mariadb.org/browse/MXS-631): Rename and clean up macros.cmake
* [MXS-477](https://jira.mariadb.org/browse/MXS-477): readconnroute misinterprets data as COM_CHANGE_USER
* [MXS-419](https://jira.mariadb.org/browse/MXS-419): Socket creation failed due 24, Too many open files.
## Known Issues and Limitations
There are some limitations and known issues within this version of MaxScale.
For more information, please refer to the [Limitations](../About/Limitations.md) document.
## Packaging
RPM and Debian packages are provided for the Linux distributions supported
by MariaDB Enterprise.
Packages can be downloaded [here](https://mariadb.com/resources/downloads).
## Source Code
The source code of MaxScale is tagged at GitHub with a tag, which is identical
with the version of MaxScale. For instance, the tag of version X.Y.Z of MaxScale
is X.Y.Z. Further, *master* always refers to the latest released non-beta version.
The source code is available [here](https://github.com/mariadb-corporation/MaxScale).

View File

@ -1,146 +0,0 @@
# MariaDB MaxScale 2.0.1 Release Notes
Release 2.0.1 is a GA release.
This document describes the changes in release 2.0.1, when compared to
[release 2.0.0](MaxScale-2.0.0-Release-Notes.md).
If you are upgrading from 1.4.3, please also read the release notes
of [2.0.0](./MaxScale-2.0.0-Release-Notes.md).
For any problems you encounter, please consider submitting a bug
report at [Jira](https://jira.mariadb.org).
## Changed default values
### `strip_db_esc`
The service parameter [_strip_db_esc_](../Getting-Started/Configuration-Guide.md#strip_db_esc)
now defaults to true.
### `detect_stale_master`
The [stale master detection](../Monitors/MySQL-Monitor.md#detect_stale_master)
feature is now enabled by default.
## Updated Features
### Starting MariaDB MaxScale
There is now a new command line parameter `--basedir=PATH` that will
cause all directory paths and the location of the configuration file
to be defined relative to that path.
For instance, invoking MariaDB MaxScale like
$ maxscale --basedir=/path/maxscale
has the same effect as invoking MariaDB MaxScale like
$ maxscale --config=/path/maxscale/etc/maxscale.cnf
--configdir=/path/maxscale/etc
--logdir=/path/maxscale/var/log/maxscale
--cachhedir=/path/maxscale/var/cache/maxscale
--libdir=/path/maxscale/lib/maxscale
--datadir=/path/maxscale/var/lib/maxscale
--execdir=/path/maxscale/bin
--language=/path/maxscale/var/lib/maxscale
--piddir=/path/maxscale/var/run/maxscale
### Password parameter
In the configuration entry for a _service_ or _monitor_, the value of
the password to be used can now be specified using `password` in addition
to `passwd`. The use of the latter will be deprecated and removed in later
releases of MaxScale.
[SomeService]
...
password=mypasswd
### Routing hint priority change
Routing hints now have the highest priority when a routing decision is made. If
there is a conflict between the original routing decision made by the
readwritesplit and the routing hint attached to the query, the routing hint
takes higher priority.
What this change means is that, if a query would normally be routed to the
master but the routing hint instructs the router to route it to the slave, it
would be routed to the slave.
**WARNING**: This change can alter the way some statements are routed and could
possibly cause data loss, corruption or inconsisteny. Please consult the [Hint
Syntax](../Reference/Hint-Syntax.md) and
[ReadWriteSplit](../Routers/ReadWriteSplit.md) documentation before using
routing hints.
### MaxAdmin Usage
In 2.0.0 (Beta), the authentication mechanism of MaxAdmin was completely
changed, so that MaxAdmin could only connect to MaxScale using a Unix domain
socket, thus _only when run on the same host_, and authorization was based
on the Unix identity. Remote access was no longer supported.
To the user this was visible so that while you in 1.4.3 had to provide
a password when starting _maxadmin_ and when adding a user
```
user@host $ maxadmin -p password
MaxAdmin> add user john johns-password
```
in 2.0.0 (Beta), where only Unix domain sockets could be used, you did not
have to provide a password neither when starting _maxadmin_, nor when adding
users
```
user@host $ maxadmin
MaxAdmin> add user john
```
as the MaxScale user corresponded to a Unix user, provided the Linux user
had been added as a user of MaxScale.
In 2.0.1 (GA) this has been changed so that the 1.4.3 behaviour is intact
but _deprecated_, and the 2.0.0 (Beta) behaviour is exposed using a new set
of commands:
```
MaxAdmin> enable account alice
MaxAdmin> disable account alice
```
Note that the way you need to invoke _maxadmin_ depends upon how MariaDB
MaxScale has been configued.
Please consult
[MaxAdmin documentation](../Reference/MaxAdmin.md) for more details.
## Bug fixes
[Here is a list of bugs fixed since the release of MaxScale 2.0.0.](https://jira.mariadb.org/browse/MXS-860?jql=project%20%3D%20MXS%20AND%20issuetype%20%3D%20Bug%20AND%20status%20%3D%20Closed%20AND%20fixVersion%20in%20(2.0.1)%20AND%20resolved%20%3E%3D%20-21d%20AND%20(resolution%20%3D%20Done%20OR%20resolution%20%3D%20Fixed)%20ORDER%20BY%20priority%20DESC)
* [MXS-860](https://jira.mariadb.org/browse/MXS-860): I want to access the web site if master server is down
* [MXS-870](https://jira.mariadb.org/browse/MXS-870): Assertion of Buffer Overflow
* [MXS-845](https://jira.mariadb.org/browse/MXS-845): "Server down" event is re-triggered after maintenance mode is repeated
* [MXS-836](https://jira.mariadb.org/browse/MXS-836): "Failed to start all MaxScale services" without retrying
* [MXS-835](https://jira.mariadb.org/browse/MXS-835): Please reinstate remote access to maxscaled protocol
* [MXS-773](https://jira.mariadb.org/browse/MXS-773): 100% CPU on idle MaxScale with MaxInfo
* [MXS-812](https://jira.mariadb.org/browse/MXS-812): Number of conns not matching number of operations
* [MXS-856](https://jira.mariadb.org/browse/MXS-856): If config file cannot be accessed and creation of log file fails, MaxScale crashes with SIGSEGV
* [MXS-829](https://jira.mariadb.org/browse/MXS-829): When the config file isn't readable or doesn't exist, maxscale silently ends
## Known Issues and Limitations
There are some limitations and known issues within this version of MaxScale.
For more information, please refer to the [Limitations](../About/Limitations.md) document.
## Packaging
RPM and Debian packages are provided for the Linux distributions supported
by MariaDB Enterprise.
Packages can be downloaded [here](https://mariadb.com/resources/downloads).
## Source Code
The source code of MaxScale is tagged at GitHub with a tag, which is derived
from the version of MaxScale. For instance, the tag of version `X.Y.Z` of MaxScale
is `maxscale-X.Y.Z`.
The source code is available [here](https://github.com/mariadb-corporation/MaxScale).

View File

@ -1,64 +0,0 @@
# MariaDB MaxScale 2.0.2 Release Notes
Release 2.0.2 is a GA release.
This document describes the changes in release 2.0.2, when compared to
release [2.0.1](MaxScale-2.0.1-Release-Notes.md).
If you are upgrading from release 1.4.4, please also read the release
notes of release [2.0.0](./MaxScale-2.0.0-Release-Notes.md) and
release [2.0.1](./MaxScale-2.0.1-Release-Notes.md).
For any problems you encounter, please submit a bug report at
[Jira](https://jira.mariadb.org).
## Updated Features
### [MXS-978] (https://jira.mariadb.org/browse/MXS-978) Support for stale master in case of restart
In case where replication monitor gets a stale master status (slaves are down)
and maxscale gets restarted, master loses the stale master status and no writes
can happen.
To cater for this situation there is now a `set server <name> stale` command.
## Bug fixes
[Here is a list of bugs fixed since the release of MaxScale 2.0.1.](https://jira.mariadb.org/browse/MXS-976?jql=project%20%3D%20MXS%20AND%20issuetype%20%3D%20Bug%20AND%20status%20%3D%20Closed%20AND%20fixVersion%20%3D%202.0.2)
* [MXS-1018](https://jira.mariadb.org/browse/MXS-1018): Internal connections don't use TLS
* [MXS-1008](https://jira.mariadb.org/browse/MXS-1008): MySQL Monitor with scripts leaks memory
* [MXS-976](https://jira.mariadb.org/browse/MXS-976): Crash in libqc_sqlite
* [MXS-975](https://jira.mariadb.org/browse/MXS-975): TCP backlog is capped at 1280
* [MXS-970](https://jira.mariadb.org/browse/MXS-970): A fatal problem with maxscale automatically shut down
* [MXS-969](https://jira.mariadb.org/browse/MXS-969): use_sql_variables_in=master can break functionality of important session variables
* [MXS-967](https://jira.mariadb.org/browse/MXS-967): setting connection_timeout=value cause error : Not a boolean value
* [MXS-965](https://jira.mariadb.org/browse/MXS-965): galeramon erlaubt keine TLS verschlüsselte Verbindung
* [MXS-960](https://jira.mariadb.org/browse/MXS-960): MaxScale Binlog Server does not allow comma to be in password
* [MXS-957](https://jira.mariadb.org/browse/MXS-957): Temporary table creation from another temporary table isn't detected
* [MXS-956](https://jira.mariadb.org/browse/MXS-956): Removing DCB 0x7fbf94016760 but was in state DCB_STATE_DISCONNECTED which is not legal for a call to dcb_close
* [MXS-955](https://jira.mariadb.org/browse/MXS-955): MaxScale 2.0.1 doesn't recognize user and passwd options in .maxadmin file
* [MXS-953](https://jira.mariadb.org/browse/MXS-953): Charset error when server configued in utf8mb4
* [MXS-942](https://jira.mariadb.org/browse/MXS-942): describe table query not routed to shard that contains the schema
* [MXS-917](https://jira.mariadb.org/browse/MXS-917): False error message about master not being in use
## Known Issues and Limitations
There are some limitations and known issues within this version of MaxScale.
For more information, please refer to the [Limitations](../About/Limitations.md) document.
## Packaging
RPM and Debian packages are provided for the Linux distributions supported
by MariaDB Enterprise.
Packages can be downloaded [here](https://mariadb.com/resources/downloads).
## Source Code
The source code of MaxScale is tagged at GitHub with a tag, which is derived
from the version of MaxScale. For instance, the tag of version `X.Y.Z` of MaxScale
is `maxscale-X.Y.Z`.
The source code is available [here](https://github.com/mariadb-corporation/MaxScale).

View File

@ -1,57 +0,0 @@
# MariaDB MaxScale 2.0.3 Release Notes
Release 2.0.3 is a GA release.
This document describes the changes in release 2.0.3, when compared to
release [2.0.2](MaxScale-2.0.2-Release-Notes.md).
If you are upgrading from release 1.4.4, please also read the release
notes of release [2.0.0](./MaxScale-2.0.0-Release-Notes.md),
release [2.0.1](./MaxScale-2.0.1-Release-Notes.md) and
release [2.0.2](./MaxScale-2.0.2-Release-Notes.md).
For any problems you encounter, please submit a bug report at
[Jira](https://jira.mariadb.org).
## Updated Features
### [MXS-1027] (https://jira.mariadb.org/browse/MXS-1027) Add Upstart support (including respawn) for MaxScale
MaxScale now provides an Upstart configuration file for systems that do not
support systemd.
## Bug fixes
[Here](https://jira.mariadb.org/issues/?jql=project%20%3D%20MXS%20AND%20issuetype%20%3D%20Bug%20AND%20status%20%3D%20Closed%20AND%20fixVersion%20%3D%202.0.3)
is a list of bugs fixed since the release of MaxScale 2.0.2.
* [MXS-1048](https://jira.mariadb.org/browse/MXS-1048): ShemaRouter can't handle backquoted database names
* [MXS-1047](https://jira.mariadb.org/browse/MXS-1047): Batch inserts through Maxscale with C/J stall
* [MXS-1045](https://jira.mariadb.org/browse/MXS-1045): Defunct processes after maxscale have executed script during failover
* [MXS-1044](https://jira.mariadb.org/browse/MXS-1044): Init-Script for SLES 11 displays error messages when called
* [MXS-1043](https://jira.mariadb.org/browse/MXS-1043): Reading last insert id from @@identity variable does not work with maxscale
* [MXS-1033](https://jira.mariadb.org/browse/MXS-1033): maxscale crushes on maxadmin request
* [MXS-1026](https://jira.mariadb.org/browse/MXS-1026): Crash with NullAuth authenticator
* [MXS-1009](https://jira.mariadb.org/browse/MXS-1009): maxinfo sigsegv in spinlock_release
* [MXS-964](https://jira.mariadb.org/browse/MXS-964): Fatal: MaxScale 2.0.1 received fatal signal 6
* [MXS-956](https://jira.mariadb.org/browse/MXS-956): Maxscale crash: Removing DCB 0x7fbf94016760 but was in state DCB_STATE_DISCONNECTED which is not legal for a call to dcb_close
## Known Issues and Limitations
There are some limitations and known issues within this version of MaxScale.
For more information, please refer to the [Limitations](../About/Limitations.md) document.
## Packaging
RPM and Debian packages are provided for the Linux distributions supported
by MariaDB Enterprise.
Packages can be downloaded [here](https://mariadb.com/resources/downloads).
## Source Code
The source code of MaxScale is tagged at GitHub with a tag, which is derived
from the version of MaxScale. For instance, the tag of version `X.Y.Z` of MaxScale
is `maxscale-X.Y.Z`.
The source code is available [here](https://github.com/mariadb-corporation/MaxScale).

View File

@ -1,57 +0,0 @@
# MariaDB MaxScale 2.0.4 Release Notes -- 2017-02-01
Release 2.0.4 is a GA release.
This document describes the changes in release 2.0.4, when compared to
release [2.0.3](MaxScale-2.0.3-Release-Notes.md).
If you are upgrading from release 1.4, please also read the release
notes of release [2.0.3](./MaxScale-2.0.3-Release-Notes.md),
release [2.0.2](./MaxScale-2.0.2-Release-Notes.md),
release [2.0.1](./MaxScale-2.0.1-Release-Notes.md) and
[2.0.0](./MaxScale-2.0.0-Release-Notes.md).
For any problems you encounter, please submit a bug report at
[Jira](https://jira.mariadb.org).
## Changed Features
- The dbfwfilter now rejects all prepared statements instead of ignoring
them. This affects _wildcard_, _columns_, _on_queries_ and _no_where_clause_
type rules which previously ignored prepared statements.
- The dbfwfilter now allows COM_PING and other commands though when
`action=allow`. See [../Filters/Database-Firewall-Filter.md](documentation)
for more details.
- The MariaDB Connector-C was upgraded to a preliminary release of version 2.3.3 (fixes MXS-951).
## Bug fixes
[Here](https://jira.mariadb.org/issues/?jql=project%20%3D%20MXS%20AND%20issuetype%20%3D%20Bug%20AND%20status%20%3D%20Closed%20AND%20fixVersion%20%3D%202.0.4)
is a list of bugs fixed since the release of MaxScale 2.0.3.
* [MXS-1111](https://jira.mariadb.org/browse/MXS-1111): Request Ping not allowed
* [MXS-1082](https://jira.mariadb.org/browse/MXS-1082): Block prepared statements
* [MXS-1080](https://jira.mariadb.org/browse/MXS-1080): Readwritesplit (documentation of max_slave_replication_lag)
* [MXS-951](https://jira.mariadb.org/browse/MXS-951): Using utf8mb4 on galera hosts stops maxscale connections
## Known Issues and Limitations
There are some limitations and known issues within this version of MaxScale.
For more information, please refer to the [Limitations](../About/Limitations.md) document.
## Packaging
RPM and Debian packages are provided for the Linux distributions supported
by MariaDB Enterprise.
Packages can be downloaded [here](https://mariadb.com/resources/downloads).
## Source Code
The source code of MaxScale is tagged at GitHub with a tag, which is derived
from the version of MaxScale. For instance, the tag of version `X.Y.Z` of MaxScale
is `maxscale-X.Y.Z`.
The source code is available [here](https://github.com/mariadb-corporation/MaxScale).

View File

@ -1,47 +0,0 @@
# MariaDB MaxScale 2.0.5 Release Notes
Release 2.0.5 is a GA release.
This document describes the changes in release 2.0.5, when compared to
release [2.0.4](MaxScale-2.0.4-Release-Notes.md).
If you are upgrading from release 1.4, please also read the following
release notes:
[2.0.4](./MaxScale-2.0.4-Release-Notes.md),
[2.0.3](./MaxScale-2.0.3-Release-Notes.md),
[2.0.2](./MaxScale-2.0.2-Release-Notes.md),
[2.0.1](./MaxScale-2.0.1-Release-Notes.md) and
[2.0.0](./MaxScale-2.0.0-Release-Notes.md).
For any problems you encounter, please submit a bug report at
[Jira](https://jira.mariadb.org).
## Bug fixes
[Here](https://jira.mariadb.org/issues/?jql=project%20%3D%20MXS%20AND%20issuetype%20%3D%20Bug%20AND%20status%20%3D%20Closed%20AND%20fixVersion%20%3D%202.0.5)
is a list of bugs fixed since the release of MaxScale 2.0.4.
* [MXS-1130](https://jira.mariadb.org/browse/MXS-1130): Unexpected length encoding 'ff' encountered
* [MXS-1123](https://jira.mariadb.org/browse/MXS-1123): connect_timeout setting causes frequent disconnects
* [MXS-1081](https://jira.mariadb.org/browse/MXS-1081): Avro data file corruption
* [MXS-1025](https://jira.mariadb.org/browse/MXS-1025): qc_sqlite always reports " Statement was parsed, but not classified"
## Known Issues and Limitations
There are some limitations and known issues within this version of MaxScale.
For more information, please refer to the [Limitations](../About/Limitations.md) document.
## Packaging
RPM and Debian packages are provided for the Linux distributions supported
by MariaDB Enterprise.
Packages can be downloaded [here](https://mariadb.com/resources/downloads).
## Source Code
The source code of MaxScale is tagged at GitHub with a tag, which is derived
from the version of MaxScale. For instance, the tag of version `X.Y.Z` of MaxScale
is `maxscale-X.Y.Z`.
The source code is available [here](https://github.com/mariadb-corporation/MaxScale).

View File

@ -1,4 +1,4 @@
# MariaDB MaxScale 2.1.1 Release Notes
# MariaDB MaxScale 2.1.1 Release Notes -- 2017-03-14
Release 2.1.1 is a Beta release.

View File

@ -15,11 +15,23 @@ report at [Jira](https://jira.mariadb.org).
## Changed Features
### Cache
* The storage `storage_inmemory` is now the default, so the parameter
`storage` no longer need to be set explicitly.
### Improved Wildcard Matching
The MySQLAuth module now supports all types of wildcards for both IP addresses
as well as hostnames.
### Configurable Connector-C Plugin Directory
The Connector-C used by MaxScale can now be configured to load authentication
plugins from a specific directory with the new `connector_plugindir`
parameter. Read the [Configuration Guide](../Getting-Started/Configuration-Guide.md)
for more details about this new parameter.
## New Features
### IPv6 Support
@ -31,6 +43,8 @@ well as being able to listen on IPv6 addresses.
[Here is a list of bugs fixed since the release of MaxScale 2.1.1.](https://jira.mariadb.org/issues/?jql=project%20%3D%20MXS%20AND%20issuetype%20%3D%20Bug%20AND%20resolution%20in%20(Fixed%2C%20Done)%20AND%20fixVersion%20%3D%202.1.2%20AND%20fixVersion%20NOT%20IN%20(2.1.1))
* [MXS-1032](https://jira.mariadb.org/browse/MXS-1032) missing mysql_clear_password.so plugin
## Known Issues and Limitations
There are some limitations and known issues within this version of MaxScale.

View File

@ -136,7 +136,7 @@ uint64_t avro_length_integer(uint64_t val)
*
* @see maxavro_get_error
*/
char* maxavro_read_string(MAXAVRO_FILE* file)
char* maxavro_read_string(MAXAVRO_FILE* file, size_t* size)
{
char *key = NULL;
uint64_t len;
@ -149,6 +149,7 @@ char* maxavro_read_string(MAXAVRO_FILE* file)
memcpy(key, file->buffer_ptr, len);
key[len] = '\0';
file->buffer_ptr += len;
*size = len;
}
else
{
@ -282,19 +283,10 @@ MAXAVRO_MAP* maxavro_read_map_from_file(MAXAVRO_FILE *file)
{
for (long i = 0; i < blocks; i++)
{
size_t size;
MAXAVRO_MAP* val = calloc(1, sizeof(MAXAVRO_MAP));
uint64_t keylen;
uint64_t valuelen;
if (val && maxavro_read_integer_from_file(file, &keylen) &&
(val->key = MXS_MALLOC(keylen + 1)) &&
fread(val->key, 1, keylen, file->file) == keylen &&
maxavro_read_integer_from_file(file, &valuelen) &&
(val->value = MXS_MALLOC(valuelen + 1)) &&
fread(val->value, 1, valuelen, file->file) == valuelen)
if (val && (val->key = maxavro_read_string(file, &size)) && (val->value = maxavro_read_string(file, &size)))
{
val->key[keylen] = '\0';
val->value[valuelen] = '\0';
val->next = rval;
rval = val;
}

View File

@ -22,7 +22,7 @@
/** Reading primitives */
bool maxavro_read_integer(MAXAVRO_FILE *file, uint64_t *val);
char* maxavro_read_string(MAXAVRO_FILE *file);
char* maxavro_read_string(MAXAVRO_FILE *file, size_t *size);
bool maxavro_skip_string(MAXAVRO_FILE* file);
bool maxavro_read_float(MAXAVRO_FILE *file, float *dest);
bool maxavro_read_double(MAXAVRO_FILE *file, double *dest);

View File

@ -98,10 +98,11 @@ static json_t* read_and_pack_value(MAXAVRO_FILE *file, MAXAVRO_SCHEMA_FIELD *fie
case MAXAVRO_TYPE_BYTES:
case MAXAVRO_TYPE_STRING:
{
char *str = maxavro_read_string(file);
size_t len;
char *str = maxavro_read_string(file, &len);
if (str)
{
value = json_string(str);
value = json_stringn(str, len);
MXS_FREE(str);
}
}

View File

@ -44,11 +44,6 @@ check_include_files(time.h HAVE_TIME)
check_include_files(unistd.h HAVE_UNISTD)
# Check for libraries MaxScale depends on
find_library(HAVE_LIBAIO NAMES aio)
if(NOT HAVE_LIBAIO)
message(FATAL_ERROR "Could not find libaio")
endif()
find_library(HAVE_LIBSSL NAMES ssl)
if(NOT HAVE_LIBSSL)
message(FATAL_ERROR "Could not find libssl")

View File

@ -22,6 +22,7 @@ set(DEFAULT_EXEC_SUBPATH "${MAXSCALE_BINDIR}" CACHE PATH "Default executable sub
set(DEFAULT_CONFIG_SUBPATH "etc" CACHE PATH "Default configuration subpath")
set(DEFAULT_CONFIG_PERSIST_SUBPATH "maxscale.cnf.d" CACHE PATH "Default persisted configuration subpath")
set(DEFAULT_MODULE_CONFIG_SUBPATH "${DEFAULT_CONFIG_SUBPATH}/maxscale.modules.d" CACHE PATH "Default configuration subpath")
set(DEFAULT_CONNECTOR_PLUGIN_SUBPATH "lib/plugin" CACHE PATH "Default connector plugin subpath")
set(DEFAULT_PIDDIR ${MAXSCALE_VARDIR}/${DEFAULT_PID_SUBPATH} CACHE PATH "Default PID file directory")
set(DEFAULT_LOGDIR ${MAXSCALE_VARDIR}/${DEFAULT_LOG_SUBPATH} CACHE PATH "Default log directory")
@ -33,6 +34,7 @@ set(DEFAULT_EXECDIR ${CMAKE_INSTALL_PREFIX}/${DEFAULT_EXEC_SUBPATH} CACHE PATH "
set(DEFAULT_CONFIGDIR /${DEFAULT_CONFIG_SUBPATH} CACHE PATH "Default configuration directory")
set(DEFAULT_CONFIG_PERSISTDIR ${DEFAULT_DATADIR}/${DEFAULT_CONFIG_PERSIST_SUBPATH} CACHE PATH "Default persisted configuration directory")
set(DEFAULT_MODULE_CONFIGDIR /${DEFAULT_MODULE_CONFIG_SUBPATH} CACHE PATH "Default module configuration directory")
set(DEFAULT_CONNECTOR_PLUGINDIR ${MAXSCALE_VARDIR}/${DEFAULT_CONNECTOR_PLUGIN_SUBPATH} CACHE PATH "Default connector plugin directory")
# Massage TARGET_COMPONENT into a list
if (TARGET_COMPONENT)

View File

@ -53,9 +53,29 @@ GWBUF* modutil_create_mysql_err_msg(int packet_number,
int merrno,
const char *statemsg,
const char *msg);
int modutil_count_signal_packets(GWBUF*, int, int, int*);
mxs_pcre2_result_t modutil_mysql_wildcard_match(const char* pattern, const char* string);
/**
* Given a buffer containing a MySQL statement, this function will return
* a pointer to the first character that is not whitespace. In this context,
* comments are also counted as whitespace. For instance:
*
* "SELECT" => "SELECT"
* " SELECT => "SELECT"
* " / * A comment * / SELECT" => "SELECT"
* "-- comment\nSELECT" => "SELECT"
*
* @param sql Pointer to buffer containing a MySQL statement
* @param len Length of sql.
*
* @return The first non whitespace (including comments) character. If the
* entire buffer is only whitespace, the returned pointer will point
* to the character following the buffer (i.e. sql + len).
*/
char* modutil_MySQL_bypass_whitespace(char* sql, size_t len);
/** Character and token searching functions */
char* strnchr_esc(char* ptr, char c, int len);
char* strnchr_esc_mysql(char* ptr, char c, int len);

View File

@ -31,6 +31,7 @@ MXS_BEGIN_DECLS
#define MXS_DEFAULT_CONFIG_SUBPATH "@DEFAULT_CONFIG_SUBPATH@"
#define MXS_DEFAULT_CONFIG_PERSIST_SUBPATH "@DEFAULT_CONFIG_PERSIST_SUBPATH@"
#define MXS_DEFAULT_MODULE_CONFIG_SUBPATH "@DEFAULT_MODULE_CONFIG_SUBPATH@"
#define MXS_DEFAULT_CONNECTOR_PLUGIN_SUBPATH "@DEFAULT_CONNECTOR_PLUGIN_SUBPATH@"
/** Default file locations, configured by CMake */
#define MXS_DEFAULT_CONFIGDIR "@DEFAULT_CONFIGDIR@"
@ -43,6 +44,7 @@ MXS_BEGIN_DECLS
#define MXS_DEFAULT_EXECDIR "@DEFAULT_EXECDIR@"
#define MXS_DEFAULT_CONFIG_PERSISTDIR "@DEFAULT_CONFIG_PERSISTDIR@"
#define MXS_DEFAULT_MODULE_CONFIGDIR "@DEFAULT_MODULE_CONFIGDIR@"
#define MXS_DEFAULT_CONNECTOR_PLUGINDIR "@DEFAULT_CONNECTOR_PLUGINDIR@"
static const char* default_cnf_fname = "maxscale.cnf";
static const char* default_configdir = MXS_DEFAULT_CONFIGDIR;
@ -59,6 +61,7 @@ static const char* default_langdir = MXS_DEFAULT_LANGDIR;
static const char* default_execdir = MXS_DEFAULT_EXECDIR;
static const char* default_config_persistdir = MXS_DEFAULT_CONFIG_PERSISTDIR;
static const char* default_module_configdir = MXS_DEFAULT_MODULE_CONFIGDIR;
static const char* default_connector_plugindir = MXS_DEFAULT_CONNECTOR_PLUGINDIR;
static char* configdir = NULL; /*< Where the config file is found e.g. /etc/ */
static char* config_persistdir = NULL;/*< Persisted configs e.g. /var/lib/maxscale/maxscale.cnf.d/ */
@ -71,6 +74,7 @@ static char* processdatadir = NULL; /*< Process specific data directory */
static char* langdir = NULL;
static char* piddir = NULL;
static char* execdir = NULL;
static char* connector_plugindir = NULL;
void set_libdir(char* param);
void set_datadir(char* param);
@ -83,6 +87,7 @@ void set_logdir(char* param);
void set_langdir(char* param);
void set_piddir(char* param);
void set_execdir(char* param);
void set_connector_plugindir(char* param);
char* get_libdir();
char* get_datadir();
char* get_process_datadir();
@ -94,5 +99,6 @@ char* get_piddir();
char* get_logdir();
char* get_langdir();
char* get_execdir();
char* get_connector_plugindir();
MXS_END_DECLS

View File

@ -19,6 +19,29 @@ MXS_BEGIN_DECLS
#define QUERY_CLASSIFIER_VERSION {1, 1, 0}
/**
* qc_init_kind_t specifies what kind of initialization should be performed.
*/
typedef enum qc_init_kind
{
QC_INIT_SELF = 0x01, /*< Initialize/finalize the query classifier itself. */
QC_INIT_PLUGIN = 0x02, /*< Initialize/finalize the plugin. */
QC_INIT_BOTH = 0x03
} qc_init_kind_t;
/**
* @c qc_collect_info_t specifies what information should be collected during parsing.
*/
typedef enum qc_collect_info
{
QC_COLLECT_ESSENTIALS = 0x00, /*< Collect only the base minimum. */
QC_COLLECT_TABLES = 0x01, /*< Collect table names. */
QC_COLLECT_DATABASES = 0x02, /*< Collect database names. */
QC_COLLECT_FIELDS = 0x04, /*< Collect field information. */
QC_COLLECT_FUNCTIONS = 0x08, /*< Collect function information. */
QC_COLLECT_ALL = (QC_COLLECT_TABLES|QC_COLLECT_DATABASES|QC_COLLECT_FIELDS|QC_COLLECT_FUNCTIONS)
} qc_collect_info_t;
/**
* qc_query_type_t defines bits that provide information about a
* particular statement.
@ -186,13 +209,16 @@ typedef struct query_classifier
/**
* Called to explicitly parse a statement.
*
* @param stmt The statement to be parsed.
* @param result On return, the parse result, if @c QC_RESULT_OK is returned.
* @param stmt The statement to be parsed.
* @param collect A bitmask of @c qc_collect_info_t values. Specifies what information
* should be collected. Only a hint and must not restrict what information
* later can be queried.
* @param result On return, the parse result, if @c QC_RESULT_OK is returned.
*
* @return QC_RESULT_OK, if the parsing was not aborted due to resource
* exhaustion or equivalent.
*/
int32_t (*qc_parse)(GWBUF* stmt, int32_t* result);
int32_t (*qc_parse)(GWBUF* stmt, uint32_t collect, int32_t* result);
/**
* Reports the type of the statement.
@ -377,11 +403,14 @@ bool qc_setup(const char* plugin_name, const char* plugin_args);
*
* MaxScale calls this functions, so plugins should not do that.
*
* @param kind What kind of initialization should be performed.
* Combination of qc_init_kind_t.
*
* @return True, if the process wide initialization could be performed.
*
* @see qc_process_end qc_thread_init
*/
bool qc_process_init(void);
bool qc_process_init(uint32_t kind);
/**
* Finalizes the query classifier.
@ -390,9 +419,12 @@ bool qc_process_init(void);
* by a call to this function. MaxScale calls this function, so plugins
* should not do that.
*
* @param kind What kind of finalization should be performed.
* Combination of qc_init_kind_t.
*
* @see qc_process_init qc_thread_end
*/
void qc_process_end(void);
void qc_process_end(uint32_t kind);
/**
* Loads a particular query classifier.
@ -426,11 +458,14 @@ void qc_unload(QUERY_CLASSIFIER* classifier);
*
* MaxScale calls this function, so plugins should not do that.
*
* @param kind What kind of initialization should be performed.
* Combination of qc_init_kind_t.
*
* @return True if the initialization succeeded, false otherwise.
*
* @see qc_thread_end
*/
bool qc_thread_init(void);
bool qc_thread_init(uint32_t kind);
/**
* Performs thread finalization needed by the query classifier.
@ -439,9 +474,12 @@ bool qc_thread_init(void);
*
* MaxScale calls this function, so plugins should not do that.
*
* @param kind What kind of finalization should be performed.
* Combination of qc_init_kind_t.
*
* @see qc_thread_init
*/
void qc_thread_end(void);
void qc_thread_end(uint32_t kind);
/**
* Parses the statement in the provided buffer and returns a value specifying
@ -454,11 +492,17 @@ void qc_thread_end(void);
* already then this function will only return the result of that parsing;
* the statement will not be parsed again.
*
* @param stmt A buffer containing an COM_QUERY or COM_STMT_PREPARE packet.
* @param stmt A buffer containing an COM_QUERY or COM_STMT_PREPARE packet.
* @param collect A bitmask of @c qc_collect_info_t values. Specifies what information
* should be collected.
*
* Note that this is merely a hint and does not restrict what
* information can be queried for. If necessary, the statement
* will transparently be reparsed.
*
* @return To what extent the statement could be parsed.
*/
qc_parse_result_t qc_parse(GWBUF* stmt);
qc_parse_result_t qc_parse(GWBUF* stmt, uint32_t collect);
/**
* Convert a qc_field_usage_t enum to corresponding string.
@ -623,6 +667,30 @@ char** qc_get_table_names(GWBUF* stmt, int* size, bool fullnames);
*/
uint32_t qc_get_type_mask(GWBUF* stmt);
/**
* Returns the type bitmask of transaction related statements.
*
* If the statement starts a transaction, ends a transaction or
* changes the autocommit state, the returned bitmap will be a
* combination of:
*
* QUERY_TYPE_BEGIN_TRX
* QUERY_TYPE_COMMIT
* QUERY_TYPE_ROLLBACK
* QUERY_TYPE_ENABLE_AUTOCOMMIT
* QUERY_TYPE_DISABLE_AUTOCOMMIT
* QUERY_TYPE_READ (explicitly read only transaction)
* QUERY_TYPE_WRITE (explicitly read write transaction)
*
* Otherwise the result will be 0.
*
* @param stmt A COM_QUERY or COM_STMT_PREPARE packet.
*
* @return The relevant type bits if the statement is transaction
* related, otherwise 0.
*/
uint32_t qc_get_trx_type_mask(GWBUF* stmt);
/**
* Returns whether the statement is a DROP TABLE statement.
*

View File

@ -79,21 +79,123 @@ typedef enum error_action
*/
typedef struct mxs_router_object
{
/**
* @brief Create a new instance of the router
*
* This function is called when a new router instance is created. The return
* value of this function will be passed as the first parameter to the
* other API functions.
*
* @param service The service where the instance is created
* @param options Router options
*
* @return New router instance on NULL on error
*/
MXS_ROUTER *(*createInstance)(SERVICE *service, char **options);
/**
* Called to create a new user session within the router
*
* This function is called when a new router session is created for a client.
* The return value of this function will be passed as the second parameter
* to the @c routeQuery, @c clientReply, @c closeSession, @c freeSession,
* and @c handleError functions.
*
* @param instance Router instance
* @param session Client MXS_SESSION object
*
* @return New router session or NULL on error
*/
MXS_ROUTER_SESSION *(*newSession)(MXS_ROUTER *instance, MXS_SESSION *session);
/**
* @brief Called when a session is closed
*
* The router should close all objects (including backend DCBs) but not free any memory.
*
* @param instance Router instance
* @param router_session Router session
*/
void (*closeSession)(MXS_ROUTER *instance, MXS_ROUTER_SESSION *router_session);
/**
* @brief Called when a session is freed
*
* The session should free all allocated memory in this function.
*
* @param instance Router instance
* @param router_session Router session
*/
void (*freeSession)(MXS_ROUTER *instance, MXS_ROUTER_SESSION *router_session);
/**
* @brief Called on each query that requires routing
*
* TODO: Document how routeQuery should be used
*
* @param instance Router instance
* @param router_session Router session
* @param queue Request from the client
*
* @return If successful, the function returns 1. If an error occurs
* and the session should be closed, the function returns 0.
*/
int32_t (*routeQuery)(MXS_ROUTER *instance, MXS_ROUTER_SESSION *router_session, GWBUF *queue);
/**
* @brief Called for diagnostic output
*
* @param instance Router instance
* @param dcb DCB where the diagnostic information should be written
*/
void (*diagnostics)(MXS_ROUTER *instance, DCB *dcb);
void (*clientReply)(MXS_ROUTER* instance, MXS_ROUTER_SESSION *router_session, GWBUF *queue,
DCB *backend_dcb);
/**
* @brief Called for each reply packet
*
* TODO: Document how clientReply should be used
*
* @param instance Router instance
* @param router_session Router session
* @param queue Response from the server
* @param backend_dcb The backend DCB which responded to the query
*/
void (*clientReply)(MXS_ROUTER* instance, MXS_ROUTER_SESSION *router_session,
GWBUF *queue, DCB *backend_dcb);
/**
* @brief Called when a backend DCB has failed
*
* @param instance Router instance
* @param router_session Router session
* @param errmsgbuf Error message buffer
* @param backend_dcb The backend DCB that has failed
* @param action The type of the action (TODO: Remove this parameter)
*
* @param succp Pointer to a `bool` which should be set to true for success or false for error
*/
void (*handleError)(MXS_ROUTER *instance,
MXS_ROUTER_SESSION *router_session,
GWBUF *errmsgbuf,
DCB *backend_dcb,
mxs_error_action_t action,
bool* succp);
/**
* @brief Called to obtain the capabilities of the router
*
* @return Zero or more bitwise-or'd values from the mxs_routing_capability_t enum
*
* @see routing.h
*/
uint64_t (*getCapabilities)(MXS_ROUTER *instance);
/**
* @brief Called for destroying a router instance
*
* @param instance Router instance
*/
void (*destroyInstance)(MXS_ROUTER *instance);
} MXS_ROUTER_OBJECT;

View File

@ -254,6 +254,14 @@ void server_add_parameter(SERVER *server, const char *name, const char *value);
*/
bool server_remove_parameter(SERVER *server, const char *name);
/**
* @brief Check if a server points to a local MaxScale service
*
* @param server Server to check
* @return True if the server points to a local MaxScale service
*/
bool server_is_mxs_service(const SERVER *server);
extern int server_free(SERVER *server);
extern SERVER *server_find_by_unique_name(const char *name);
extern int server_find_by_unique_names(char **server_names, int size, SERVER*** output);

View File

@ -241,6 +241,14 @@ bool serviceHasBackend(SERVICE *service, SERVER *server);
bool serviceHasListener(SERVICE *service, const char *protocol,
const char* address, unsigned short port);
/**
* @brief Check if a MaxScale service listens on a port
*
* @param port The port to check
* @return True if a MaxScale service uses the port
*/
bool service_port_is_used(unsigned short port);
int serviceGetUser(SERVICE *service, char **user, char **auth);
int serviceSetUser(SERVICE *service, char *user, char *auth);
bool serviceSetFilters(SERVICE *service, char *filters);

View File

@ -24,6 +24,7 @@
*/
#include <maxscale/cdefs.h>
#include <stdbool.h>
#include <maxscale/debug.h>
MXS_BEGIN_DECLS
@ -42,37 +43,73 @@ MXS_BEGIN_DECLS
*/
typedef struct spinlock
{
volatile int lock;/*< Is the lock held? */
int lock; /*< Is the lock held? */
#if SPINLOCK_PROFILE
int spins; /*< Number of spins on this lock */
int maxspins; /*< Max no of spins to acquire lock */
int acquired; /*< No. of times lock was acquired */
int waiting; /*< No. of threads acquiring this lock */
int max_waiting; /*< Max no of threads waiting for lock */
int contended; /*< No. of times acquire was contended */
THREAD owner; /*< Last owner of this lock */
uint64_t spins; /*< Number of spins on this lock */
uint64_t maxspins; /*< Max no of spins to acquire lock */
uint64_t acquired; /*< No. of times lock was acquired */
uint64_t waiting; /*< No. of threads acquiring this lock */
uint64_t max_waiting; /*< Max no of threads waiting for lock */
uint64_t contended; /*< No. of times acquire was contended */
THREAD owner; /*< Last owner of this lock */
#endif
} SPINLOCK;
#ifndef TRUE
#define TRUE true
#endif
#ifndef FALSE
#define FALSE false
#endif
#if SPINLOCK_PROFILE
#define SPINLOCK_INIT { 0, 0, 0, 0, 0, 0, 0, 0 }
#else
#define SPINLOCK_INIT { 0 }
#endif
/**
* Debugging macro for testing the state of a spinlock.
*
* @attention ONLY to be used in debugging context.
*/
#define SPINLOCK_IS_LOCKED(l) ((l)->lock != 0 ? true : false)
/**
* Initialise a spinlock.
*
* @param lock The spinlock to initialise.
*/
extern void spinlock_init(SPINLOCK *lock);
/**
* Acquire a spinlock.
*
* @param lock The spinlock to acquire
*/
extern void spinlock_acquire(const SPINLOCK *lock);
extern int spinlock_acquire_nowait(const SPINLOCK *lock);
/**
* Acquire a spinlock if it is not already locked.
*
* @param lock The spinlock to acquire
* @return True if the spinlock was acquired, otherwise false
*/
extern bool spinlock_acquire_nowait(const SPINLOCK *lock);
/*
* Release a spinlock.
*
* @param lock The spinlock to release
*/
extern void spinlock_release(const SPINLOCK *lock);
/**
* Report statistics on a spinlock. This only has an effect if the
* spinlock code has been compiled with the SPINLOCK_PROFILE option set.
*
* NB A callback function is used to return the data rather than
* merely printing to a DCB in order to avoid a dependency on the DCB
* form the spinlock code and also to facilitate other uses of the
* statistics reporting.
*
* @param lock The spinlock to report on
* @param reporter The callback function to pass the statistics to
* @param hdl A handle that is passed to the reporter function
*/
extern void spinlock_stats(const SPINLOCK *lock, void (*reporter)(void *, char *, int), void *hdl);
MXS_END_DECLS

View File

@ -16,7 +16,7 @@
#include "../../server/core/maxscale/config.h"
int32_t qc_dummy_parse(GWBUF* querybuf, int32_t* pResult)
int32_t qc_dummy_parse(GWBUF* querybuf, uint32_t collect, int32_t* pResult)
{
*pResult = QC_QUERY_INVALID;
return QC_RESULT_OK;

View File

@ -136,7 +136,7 @@ bool ensure_query_is_parsed(GWBUF* query)
return parsed;
}
int32_t qc_mysql_parse(GWBUF* querybuf, int32_t* result)
int32_t qc_mysql_parse(GWBUF* querybuf, uint32_t collect, int32_t* result)
{
bool parsed = ensure_query_is_parsed(querybuf);

View File

@ -55,6 +55,8 @@ static inline bool qc_info_was_parsed(qc_parse_result_t status)
typedef struct qc_sqlite_info
{
qc_parse_result_t status; // The validity of the information in this structure.
uint32_t collect; // What information should be collected.
uint32_t collected; // What information has been collected.
const char* query; // The query passed to sqlite.
size_t query_len; // The length of the query.
@ -128,18 +130,18 @@ typedef enum qc_token_position
static void buffer_object_free(void* data);
static char** copy_string_array(char** strings, int* pn);
static void enlarge_string_array(size_t n, size_t len, char*** ppzStrings, size_t* pCapacity);
static bool ensure_query_is_parsed(GWBUF* query);
static bool ensure_query_is_parsed(GWBUF* query, uint32_t collect);
static void free_field_infos(QC_FIELD_INFO* infos, size_t n_infos);
static void free_string_array(char** sa);
static QC_SQLITE_INFO* get_query_info(GWBUF* query);
static QC_SQLITE_INFO* info_alloc(void);
static QC_SQLITE_INFO* get_query_info(GWBUF* query, uint32_t collect);
static QC_SQLITE_INFO* info_alloc(uint32_t collect);
static void info_finish(QC_SQLITE_INFO* info);
static void info_free(QC_SQLITE_INFO* info);
static QC_SQLITE_INFO* info_init(QC_SQLITE_INFO* info);
static QC_SQLITE_INFO* info_init(QC_SQLITE_INFO* info, uint32_t collect);
static void log_invalid_data(GWBUF* query, const char* message);
static bool parse_query(GWBUF* query);
static bool parse_query(GWBUF* query, uint32_t collect);
static void parse_query_string(const char* query, size_t len);
static bool query_is_parsed(GWBUF* query);
static bool query_is_parsed(GWBUF* query, uint32_t collect);
static bool should_exclude(const char* zName, const ExprList* pExclude);
static void update_field_info(QC_SQLITE_INFO* info,
const char* database,
@ -259,13 +261,13 @@ static void enlarge_string_array(size_t n, size_t len, char*** ppzStrings, size_
}
}
static bool ensure_query_is_parsed(GWBUF* query)
static bool ensure_query_is_parsed(GWBUF* query, uint32_t collect)
{
bool parsed = query_is_parsed(query);
bool parsed = query_is_parsed(query, collect);
if (!parsed)
{
parsed = parse_query(query);
parsed = parse_query(query, collect);
}
return parsed;
@ -315,11 +317,11 @@ static void free_string_array(char** sa)
}
}
static QC_SQLITE_INFO* get_query_info(GWBUF* query)
static QC_SQLITE_INFO* get_query_info(GWBUF* query, uint32_t collect)
{
QC_SQLITE_INFO* info = NULL;
if (ensure_query_is_parsed(query))
if (ensure_query_is_parsed(query, collect))
{
info = (QC_SQLITE_INFO*) gwbuf_get_buffer_object_data(query, GWBUF_PARSING_INFO);
ss_dassert(info);
@ -328,12 +330,12 @@ static QC_SQLITE_INFO* get_query_info(GWBUF* query)
return info;
}
static QC_SQLITE_INFO* info_alloc(void)
static QC_SQLITE_INFO* info_alloc(uint32_t collect)
{
QC_SQLITE_INFO* info = MXS_MALLOC(sizeof(*info));
MXS_ABORT_IF_NULL(info);
info_init(info);
info_init(info, collect);
return info;
}
@ -359,11 +361,13 @@ static void info_free(QC_SQLITE_INFO* info)
}
}
static QC_SQLITE_INFO* info_init(QC_SQLITE_INFO* info)
static QC_SQLITE_INFO* info_init(QC_SQLITE_INFO* info, uint32_t collect)
{
memset(info, 0, sizeof(*info));
info->status = QC_QUERY_INVALID;
info->collect = collect;
info->collected = 0;
info->type_mask = QUERY_TYPE_UNKNOWN;
info->operation = QUERY_OP_UNDEFINED;
@ -495,10 +499,10 @@ static void parse_query_string(const char* query, size_t len)
}
}
static bool parse_query(GWBUF* query)
static bool parse_query(GWBUF* query, uint32_t collect)
{
bool parsed = false;
ss_dassert(!query_is_parsed(query));
ss_dassert(!query_is_parsed(query, collect));
if (GWBUF_IS_CONTIGUOUS(query))
{
@ -511,7 +515,29 @@ static bool parse_query(GWBUF* query)
if ((command == MYSQL_COM_QUERY) || (command == MYSQL_COM_STMT_PREPARE))
{
QC_SQLITE_INFO* info = info_alloc();
QC_SQLITE_INFO* info =
(QC_SQLITE_INFO*) gwbuf_get_buffer_object_data(query, GWBUF_PARSING_INFO);
if (info)
{
ss_dassert((~info->collect & collect) != 0);
ss_dassert((~info->collected & collect) != 0);
// If we get here, then the statement has been parsed once, but
// not all needed was collected. Now we turn on all blinkelichts to
// ensure that a statement is parsed at most twice.
info->collect = QC_COLLECT_ALL;
}
else
{
info = info_alloc(collect);
if (info)
{
// TODO: Add return value to gwbuf_add_buffer_object.
gwbuf_add_buffer_object(query, GWBUF_PARSING_INFO, info, buffer_object_free);
}
}
if (info)
{
@ -532,10 +558,8 @@ static bool parse_query(GWBUF* query)
info->type_mask |= QUERY_TYPE_PREPARE_STMT;
}
// TODO: Add return value to gwbuf_add_buffer_object.
// Always added; also when it was not recognized. If it was not recognized now,
// it won't be if we try a second time.
gwbuf_add_buffer_object(query, GWBUF_PARSING_INFO, info, buffer_object_free);
info->collected = info->collect;
parsed = true;
this_thread.info = NULL;
@ -566,9 +590,24 @@ static bool parse_query(GWBUF* query)
return parsed;
}
static bool query_is_parsed(GWBUF* query)
static bool query_is_parsed(GWBUF* query, uint32_t collect)
{
return query && GWBUF_IS_PARSED(query);
bool rc = query && GWBUF_IS_PARSED(query);
if (rc)
{
QC_SQLITE_INFO* info = (QC_SQLITE_INFO*) gwbuf_get_buffer_object_data(query, GWBUF_PARSING_INFO);
ss_dassert(info);
if ((~info->collected & collect) != 0)
{
// The statement has been parsed once, but the needed information
// was not collected at that time.
rc = false;
}
}
return rc;
}
/**
@ -652,6 +691,13 @@ static void update_field_info(QC_SQLITE_INFO* info,
{
ss_dassert(column);
if (!(info->collect & QC_COLLECT_FIELDS) || (info->collected & QC_COLLECT_FIELDS))
{
// If field information should not be collected, or if field information
// has already been collected, we just return.
return;
}
QC_FIELD_INFO item = { (char*)database, (char*)table, (char*)column, usage };
int i;
@ -737,6 +783,13 @@ static void update_function_info(QC_SQLITE_INFO* info,
{
ss_dassert(name);
if (!(info->collect & QC_COLLECT_FUNCTIONS) || (info->collected & QC_COLLECT_FUNCTIONS))
{
// If function information should not be collected, or if function information
// has already been collected, we just return.
return;
}
QC_FUNCTION_INFO item = { (char*)name, usage };
int i;
@ -1220,37 +1273,46 @@ static void update_database_names(QC_SQLITE_INFO* info, const char* zDatabase)
static void update_names(QC_SQLITE_INFO* info, const char* zDatabase, const char* zTable)
{
char* zCopy = MXS_STRDUP(zTable);
MXS_ABORT_IF_NULL(zCopy);
// TODO: Is this call really needed. Check also sqlite3Dequote.
exposed_sqlite3Dequote(zCopy);
enlarge_string_array(1, info->table_names_len, &info->table_names, &info->table_names_capacity);
info->table_names[info->table_names_len++] = zCopy;
info->table_names[info->table_names_len] = NULL;
if (zDatabase)
if ((info->collect & QC_COLLECT_TABLES) && !(info->collected & QC_COLLECT_TABLES))
{
zCopy = MXS_MALLOC(strlen(zDatabase) + 1 + strlen(zTable) + 1);
char* zCopy = MXS_STRDUP(zTable);
MXS_ABORT_IF_NULL(zCopy);
strcpy(zCopy, zDatabase);
strcat(zCopy, ".");
strcat(zCopy, zTable);
// TODO: Is this call really needed. Check also sqlite3Dequote.
exposed_sqlite3Dequote(zCopy);
update_database_names(info, zDatabase);
}
else
{
zCopy = MXS_STRDUP(zCopy);
MXS_ABORT_IF_NULL(zCopy);
enlarge_string_array(1, info->table_names_len, &info->table_names, &info->table_names_capacity);
info->table_names[info->table_names_len++] = zCopy;
info->table_names[info->table_names_len] = NULL;
if (zDatabase)
{
zCopy = MXS_MALLOC(strlen(zDatabase) + 1 + strlen(zTable) + 1);
MXS_ABORT_IF_NULL(zCopy);
strcpy(zCopy, zDatabase);
strcat(zCopy, ".");
strcat(zCopy, zTable);
exposed_sqlite3Dequote(zCopy);
}
else
{
zCopy = MXS_STRDUP(zCopy);
MXS_ABORT_IF_NULL(zCopy);
}
enlarge_string_array(1, info->table_fullnames_len,
&info->table_fullnames, &info->table_fullnames_capacity);
info->table_fullnames[info->table_fullnames_len++] = zCopy;
info->table_fullnames[info->table_fullnames_len] = NULL;
}
enlarge_string_array(1, info->table_fullnames_len,
&info->table_fullnames, &info->table_fullnames_capacity);
info->table_fullnames[info->table_fullnames_len++] = zCopy;
info->table_fullnames[info->table_fullnames_len] = NULL;
if ((info->collect & QC_COLLECT_DATABASES) && !(info->collected & QC_COLLECT_DATABASES))
{
if (zDatabase)
{
update_database_names(info, zDatabase);
}
}
}
static void update_names_from_srclist(QC_SQLITE_INFO* info, const SrcList* pSrc)
@ -1736,8 +1798,21 @@ void mxs_sqlite3StartTable(Parse *pParse, /* Parser context */
update_names(info, NULL, name);
}
info->created_table_name = MXS_STRDUP(info->table_names[0]);
MXS_ABORT_IF_NULL(info->created_table_name);
if (info->collect & QC_COLLECT_TABLES)
{
// If information is collected in several passes, then we may
// this information already.
if (!info->created_table_name)
{
info->created_table_name = MXS_STRDUP(info->table_names[0]);
MXS_ABORT_IF_NULL(info->created_table_name);
}
else
{
ss_dassert(info->collect != info->collected);
ss_dassert(strcmp(info->created_table_name, info->table_names[0]) == 0);
}
}
}
else
{
@ -1899,11 +1974,21 @@ void maxscaleDeallocate(Parse* pParse, Token* pName)
info->status = QC_QUERY_PARSED;
info->type_mask = QUERY_TYPE_WRITE;
info->prepare_name = MXS_MALLOC(pName->n + 1);
if (info->prepare_name)
// If information is collected in several passes, then we may
// this information already.
if (!info->prepare_name)
{
memcpy(info->prepare_name, pName->z, pName->n);
info->prepare_name[pName->n] = 0;
info->prepare_name = MXS_MALLOC(pName->n + 1);
if (info->prepare_name)
{
memcpy(info->prepare_name, pName->z, pName->n);
info->prepare_name[pName->n] = 0;
}
}
else
{
ss_dassert(info->collect != info->collected);
ss_dassert(strncmp(info->prepare_name, pName->z, pName->n) == 0);
}
}
@ -1942,11 +2027,21 @@ void maxscaleExecute(Parse* pParse, Token* pName)
info->status = QC_QUERY_PARSED;
info->type_mask = QUERY_TYPE_WRITE;
info->prepare_name = MXS_MALLOC(pName->n + 1);
if (info->prepare_name)
// If information is collected in several passes, then we may
// this information already.
if (!info->prepare_name)
{
memcpy(info->prepare_name, pName->z, pName->n);
info->prepare_name[pName->n] = 0;
info->prepare_name = MXS_MALLOC(pName->n + 1);
if (info->prepare_name)
{
memcpy(info->prepare_name, pName->z, pName->n);
info->prepare_name[pName->n] = 0;
}
}
else
{
ss_dassert(info->collect != info->collected);
ss_dassert(strncmp(info->prepare_name, pName->z, pName->n) == 0);
}
}
@ -2312,32 +2407,42 @@ void maxscalePrepare(Parse* pParse, Token* pName, Token* pStmt)
info->status = QC_QUERY_PARSED;
info->type_mask = QUERY_TYPE_PREPARE_NAMED_STMT;
info->prepare_name = MXS_MALLOC(pName->n + 1);
if (info->prepare_name)
// If information is collected in several passes, then we may
// this information already.
if (!info->prepare_name)
{
memcpy(info->prepare_name, pName->z, pName->n);
info->prepare_name[pName->n] = 0;
info->prepare_name = MXS_MALLOC(pName->n + 1);
if (info->prepare_name)
{
memcpy(info->prepare_name, pName->z, pName->n);
info->prepare_name[pName->n] = 0;
}
size_t preparable_stmt_len = pStmt->n - 2;
size_t payload_len = 1 + preparable_stmt_len;
size_t packet_len = MYSQL_HEADER_LEN + payload_len;
info->preparable_stmt = gwbuf_alloc(packet_len);
if (info->preparable_stmt)
{
uint8_t* ptr = GWBUF_DATA(info->preparable_stmt);
// Payload length
*ptr++ = payload_len;
*ptr++ = (payload_len >> 8);
*ptr++ = (payload_len >> 16);
// Sequence id
*ptr++ = 0x00;
// Command
*ptr++ = MYSQL_COM_QUERY;
memcpy(ptr, pStmt->z + 1, pStmt->n - 2);
}
}
size_t preparable_stmt_len = pStmt->n - 2;
size_t payload_len = 1 + preparable_stmt_len;
size_t packet_len = MYSQL_HEADER_LEN + payload_len;
info->preparable_stmt = gwbuf_alloc(packet_len);
if (info->preparable_stmt)
else
{
uint8_t* ptr = GWBUF_DATA(info->preparable_stmt);
// Payload length
*ptr++ = payload_len;
*ptr++ = (payload_len >> 8);
*ptr++ = (payload_len >> 16);
// Sequence id
*ptr++ = 0x00;
// Command
*ptr++ = MYSQL_COM_QUERY;
memcpy(ptr, pStmt->z + 1, pStmt->n - 2);
ss_dassert(info->collect != info->collected);
ss_dassert(strncmp(info->prepare_name, pName->z, pName->n) == 0);
}
}
@ -2792,7 +2897,7 @@ static int32_t qc_sqlite_process_init(void);
static void qc_sqlite_process_end(void);
static int32_t qc_sqlite_thread_init(void);
static void qc_sqlite_thread_end(void);
static int32_t qc_sqlite_parse(GWBUF* query, int32_t* result);
static int32_t qc_sqlite_parse(GWBUF* query, uint32_t collect, int32_t* result);
static int32_t qc_sqlite_get_type_mask(GWBUF* query, uint32_t* typemask);
static int32_t qc_sqlite_get_operation(GWBUF* query, int32_t* op);
static int32_t qc_sqlite_get_created_table_name(GWBUF* query, char** name);
@ -2952,7 +3057,7 @@ static int32_t qc_sqlite_thread_init(void)
MXS_INFO("In-memory sqlite database successfully opened for thread %lu.",
(unsigned long) pthread_self());
QC_SQLITE_INFO* info = info_alloc();
QC_SQLITE_INFO* info = info_alloc(QC_COLLECT_ALL);
if (info)
{
@ -3010,13 +3115,13 @@ static void qc_sqlite_thread_end(void)
this_thread.initialized = false;
}
static int32_t qc_sqlite_parse(GWBUF* query, int32_t* result)
static int32_t qc_sqlite_parse(GWBUF* query, uint32_t collect, int32_t* result)
{
QC_TRACE();
ss_dassert(this_unit.initialized);
ss_dassert(this_thread.initialized);
QC_SQLITE_INFO* info = get_query_info(query);
QC_SQLITE_INFO* info = get_query_info(query, collect);
if (info)
{
@ -3038,7 +3143,7 @@ static int32_t qc_sqlite_get_type_mask(GWBUF* query, uint32_t* type_mask)
ss_dassert(this_thread.initialized);
*type_mask = QUERY_TYPE_UNKNOWN;
QC_SQLITE_INFO* info = get_query_info(query);
QC_SQLITE_INFO* info = get_query_info(query, QC_COLLECT_ESSENTIALS);
if (info)
{
@ -3068,7 +3173,7 @@ static int32_t qc_sqlite_get_operation(GWBUF* query, int32_t* op)
ss_dassert(this_thread.initialized);
*op = QUERY_OP_UNDEFINED;
QC_SQLITE_INFO* info = get_query_info(query);
QC_SQLITE_INFO* info = get_query_info(query, QC_COLLECT_ESSENTIALS);
if (info)
{
@ -3098,7 +3203,7 @@ static int32_t qc_sqlite_get_created_table_name(GWBUF* query, char** created_tab
ss_dassert(this_thread.initialized);
*created_table_name = NULL;
QC_SQLITE_INFO* info = get_query_info(query);
QC_SQLITE_INFO* info = get_query_info(query, QC_COLLECT_TABLES);
if (info)
{
@ -3132,7 +3237,7 @@ static int32_t qc_sqlite_is_drop_table_query(GWBUF* query, int32_t* is_drop_tabl
ss_dassert(this_thread.initialized);
*is_drop_table = 0;
QC_SQLITE_INFO* info = get_query_info(query);
QC_SQLITE_INFO* info = get_query_info(query, QC_COLLECT_ESSENTIALS);
if (info)
{
@ -3166,7 +3271,7 @@ static int32_t qc_sqlite_get_table_names(GWBUF* query,
*table_names = NULL;
*tblsize = 0;
QC_SQLITE_INFO* info = get_query_info(query);
QC_SQLITE_INFO* info = get_query_info(query, QC_COLLECT_TABLES);
if (info)
{
@ -3227,7 +3332,7 @@ static int32_t qc_sqlite_query_has_clause(GWBUF* query, int32_t* has_clause)
ss_dassert(this_thread.initialized);
*has_clause = false;
QC_SQLITE_INFO* info = get_query_info(query);
QC_SQLITE_INFO* info = get_query_info(query, QC_COLLECT_ESSENTIALS);
if (info)
{
@ -3258,7 +3363,7 @@ static int32_t qc_sqlite_get_database_names(GWBUF* query, char*** database_names
*database_names = NULL;
*sizep = 0;
QC_SQLITE_INFO* info = get_query_info(query);
QC_SQLITE_INFO* info = get_query_info(query, QC_COLLECT_DATABASES);
if (info)
{
@ -3292,7 +3397,7 @@ static int32_t qc_sqlite_get_prepare_name(GWBUF* query, char** prepare_name)
ss_dassert(this_thread.initialized);
*prepare_name = NULL;
QC_SQLITE_INFO* info = get_query_info(query);
QC_SQLITE_INFO* info = get_query_info(query, QC_COLLECT_ESSENTIALS);
if (info)
{
@ -3328,7 +3433,7 @@ int32_t qc_sqlite_get_field_info(GWBUF* query, const QC_FIELD_INFO** infos, uint
*infos = NULL;
*n_infos = 0;
QC_SQLITE_INFO* info = get_query_info(query);
QC_SQLITE_INFO* info = get_query_info(query, QC_COLLECT_FIELDS);
if (info)
{
@ -3362,7 +3467,7 @@ int32_t qc_sqlite_get_function_info(GWBUF* query, const QC_FUNCTION_INFO** infos
*infos = NULL;
*n_infos = 0;
QC_SQLITE_INFO* info = get_query_info(query);
QC_SQLITE_INFO* info = get_query_info(query, QC_COLLECT_FUNCTIONS);
if (info)
{
@ -3395,7 +3500,7 @@ int32_t qc_sqlite_get_preparable_stmt(GWBUF* stmt, GWBUF** preparable_stmt)
*preparable_stmt = NULL;
QC_SQLITE_INFO* info = get_query_info(stmt);
QC_SQLITE_INFO* info = get_query_info(stmt, QC_COLLECT_ESSENTIALS);
if (info)
{

View File

@ -47,7 +47,7 @@ int main(int argc, char** argv)
set_process_datadir(strdup("/tmp"));
qc_setup("qc_sqlite", NULL);
qc_process_init();
qc_process_init(QC_INIT_BOTH);
infile = fopen(argv[1], "rb");
outfile = fopen(argv[2], "wb");
@ -83,6 +83,6 @@ int main(int argc, char** argv)
}
fclose(infile);
fclose(outfile);
qc_process_end();
qc_process_end(QC_INIT_BOTH);
return 0;
}

View File

@ -314,10 +314,10 @@ int main(int argc, char** argv)
if (mxs_log_init(NULL, ".", MXS_LOG_TARGET_DEFAULT))
{
if (qc_setup(lib, NULL) && qc_process_init())
if (qc_setup(lib, NULL) && qc_process_init(QC_INIT_BOTH))
{
rc = run(input_name, expected_name);
qc_process_end();
qc_process_end(QC_INIT_BOTH);
}
else
{

View File

@ -312,13 +312,13 @@ bool compare_parse(QUERY_CLASSIFIER* pClassifier1, GWBUF* pCopy1,
clock_gettime(CLOCK_MONOTONIC_RAW, &start);
int32_t rv1;
pClassifier1->qc_parse(pCopy1, &rv1);
pClassifier1->qc_parse(pCopy1, QC_COLLECT_ESSENTIALS, &rv1);
clock_gettime(CLOCK_MONOTONIC_RAW, &finish);
update_time(&global.time1, start, finish);
clock_gettime(CLOCK_MONOTONIC_RAW, &start);
int32_t rv2;
pClassifier2->qc_parse(pCopy2, &rv2);
pClassifier2->qc_parse(pCopy2, QC_COLLECT_ESSENTIALS, &rv2);
clock_gettime(CLOCK_MONOTONIC_RAW, &finish);
update_time(&global.time2, start, finish);

View File

@ -41,7 +41,7 @@ int main()
set_libdir(strdup("../qc_sqlite"));
if (qc_setup("qc_sqlite", NULL) && qc_process_init())
if (qc_setup("qc_sqlite", NULL) && qc_process_init(QC_INIT_BOTH))
{
const char s[] = "SELECT @@global.max_allowed_packet";
@ -51,9 +51,9 @@ int main()
// being of the opinion that the statement was not the one to be
// classified and hence an alien parse-tree being passed to sqlite3's
// code generator.
qc_parse(stmt);
qc_parse(stmt, QC_COLLECT_ALL);
qc_process_end();
qc_process_end(QC_INIT_BOTH);
rv = EXIT_SUCCESS;
}

View File

@ -1,4 +1,4 @@
add_library(maxscale-common SHARED adminusers.c alloc.c authenticator.c atomic.c buffer.c config.c config_runtime.c dcb.c filter.c filter.cc externcmd.c paths.c hashtable.c hint.c housekeeper.c load_utils.c log_manager.cc maxscale_pcre2.c misc.c mlist.c modutil.c monitor.c queuemanager.c query_classifier.c poll.c random_jkiss.c resultset.c router.cc secrets.c server.c service.c session.c spinlock.c thread.c users.c utils.c skygw_utils.cc statistics.c listener.c ssl.c mysql_utils.c mysql_binlog.c modulecmd.c )
add_library(maxscale-common SHARED adminusers.c alloc.c authenticator.c atomic.c buffer.c config.c config_runtime.c dcb.c filter.c filter.cc externcmd.c paths.c hashtable.c hint.c housekeeper.c load_utils.c log_manager.cc maxscale_pcre2.c misc.c mlist.c modutil.c monitor.c queuemanager.c query_classifier.cc poll.c random_jkiss.c resultset.c router.cc secrets.c server.c service.c session.c spinlock.c thread.c users.c utils.c skygw_utils.cc statistics.c listener.c ssl.c mysql_utils.c mysql_binlog.c modulecmd.c)
if(WITH_JEMALLOC)
target_link_libraries(maxscale-common ${JEMALLOC_LIBRARIES})

View File

@ -2711,8 +2711,7 @@ dcb_accept(DCB *listener)
if (client_conn.ss_family == AF_UNIX)
{
// client address
// Should this be `localhost` like it is in the MariaDB server?
client_dcb->remote = MXS_STRDUP_A("localhost_from_socket");
client_dcb->remote = MXS_STRDUP_A("localhost");
}
else
{

View File

@ -104,7 +104,7 @@ static int pidfd = PIDFD_CLOSED;
/**
* exit flag for log flusher.
*/
static bool do_exit = FALSE;
static bool do_exit = false;
/**
* If MaxScale is started to run in daemon process the value is true.
@ -138,6 +138,7 @@ static struct option long_options[] =
{"version", no_argument, 0, 'v'},
{"version-full", no_argument, 0, 'V'},
{"help", no_argument, 0, '?'},
{"connector_plugindir", required_argument, 0, 'H'},
{0, 0, 0, 0}
};
static bool syslog_configured = false;
@ -920,6 +921,8 @@ static void usage(void)
" -E, --execdir=PATH path to the maxscale and other executable files\n"
" -F, --persistdir=PATH path to persisted configuration directory\n"
" -M, --module_configdir=PATH path to module configuration directory\n"
" -H, --connector_plugindir=PATH\n"
" path to MariaDB Connector-C plugin directory\n"
" -N, --language=PATH path to errmsg.sys file\n"
" -P, --piddir=PATH path to PID file directory\n"
" -R, --basedir=PATH base path for all other paths\n"
@ -967,15 +970,25 @@ static void usage(void)
*/
void worker_thread_main(void* arg)
{
if (modules_thread_init())
if (qc_thread_init(QC_INIT_SELF))
{
poll_waitevents(arg);
if (modules_thread_init())
{
poll_waitevents(arg);
modules_thread_finish();
modules_thread_finish();
}
else
{
MXS_ERROR("Could not perform thread initialization for all modules. Thread exits.");
}
qc_thread_end(QC_INIT_SELF);
}
else
{
MXS_ERROR("Could not perform thread initialization for all modules. Thread exits.");
MXS_ERROR("Could not perform thread initialization for the "
"internal query classifier. Thread exits.");
}
}
@ -1236,6 +1249,12 @@ bool set_dirs(const char *basedir)
set_config_persistdir(path);
}
if (rv && (rv = handle_path_arg(&path, basedir,
"var/" MXS_DEFAULT_CONNECTOR_PLUGIN_SUBPATH, true, true)))
{
set_connector_plugindir(path);
}
return rv;
}
@ -1325,7 +1344,7 @@ int main(int argc, char **argv)
}
}
while ((opt = getopt_long(argc, argv, "dcf:l:vVs:S:?L:D:C:B:U:A:P:G:N:E:F:M:",
while ((opt = getopt_long(argc, argv, "dcf:l:vVs:S:?L:D:C:B:U:A:P:G:N:E:F:M:H:",
long_options, &option_index)) != -1)
{
bool succp = true;
@ -1491,6 +1510,16 @@ int main(int argc, char **argv)
succp = false;
}
break;
case 'H':
if (handle_path_arg(&tmp_path, optarg, NULL, true, false))
{
set_connector_plugindir(tmp_path);
}
else
{
succp = false;
}
break;
case 'F':
if (handle_path_arg(&tmp_path, optarg, NULL, true, true))
{
@ -1899,6 +1928,16 @@ int main(int argc, char **argv)
dcb_global_init();
/* Initialize the internal query classifier. The plugin will be initialized
* via the module initialization below.
*/
if (!qc_process_init(QC_INIT_SELF))
{
MXS_ERROR("Failed to initialize the internal query classifier.");
rc = MAXSCALE_INTERNALERROR;
goto return_main;
}
/* Init MaxScale modules */
if (!modules_process_init())
{
@ -2017,6 +2056,11 @@ int main(int argc, char **argv)
/*< Call finish on all modules. */
modules_process_finish();
/* Finalize the internal query classifier. The plugin was finalized
* via the module finalizarion above.
*/
qc_process_end(QC_INIT_SELF);
log_exit_status();
MXS_NOTICE("MaxScale is shutting down.");
@ -2078,7 +2122,7 @@ int maxscale_shutdown()
static void log_flush_shutdown(void)
{
do_exit = TRUE;
do_exit = true;
}
@ -2525,6 +2569,20 @@ static int cnf_preparser(void* data, const char* section, const char* name, cons
}
}
}
else if (strcmp(name, "connector_plugindir") == 0)
{
if (strcmp(get_connector_plugindir(), default_connector_plugindir) == 0)
{
if (handle_path_arg((char**)&tmp, (char*)value, NULL, true, false))
{
set_connector_plugindir(tmp);
}
else
{
return 0;
}
}
}
else if (strcmp(name, "persistdir") == 0)
{
if (strcmp(get_config_persistdir(), default_config_persistdir) == 0)

View File

@ -0,0 +1,39 @@
#pragma once
/*
* Copyright (c) 2016 MariaDB Corporation Ab
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file and at www.mariadb.com/bsl11.
*
* Change Date: 2019-07-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2 or later of the General
* Public License.
*/
#include <maxscale/cdefs.h>
#include <maxscale/query_classifier.h>
MXS_BEGIN_DECLS
typedef enum qc_trx_parse_using
{
QC_TRX_PARSE_USING_QC, /**< Use the query classifier. */
QC_TRX_PARSE_USING_PARSER, /**< Use custom parser. */
} qc_trx_parse_using_t;
/**
* Returns the type bitmask of transaction related statements.
*
* @param stmt A COM_QUERY or COM_STMT_PREPARE packet.
* @param use What method should be used.
*
* @return The relevant type bits if the statement is transaction
* related, otherwise 0.
*
* @see qc_get_trx_type_mask
*/
uint32_t qc_get_trx_type_mask_using(GWBUF* stmt, qc_trx_parse_using_t use);
MXS_END_DECLS

View File

@ -0,0 +1,841 @@
#pragma once
/*
* Copyright (c) 2016 MariaDB Corporation Ab
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file and at www.mariadb.com/bsl11.
*
* Change Date: 2019-07-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2 or later of the General
* Public License.
*/
#include <maxscale/cppdefs.hh>
#include <ctype.h>
#include <maxscale/modutil.h>
#include <maxscale/query_classifier.h>
namespace maxscale
{
#define TBP_EXPECT_TOKEN(string_literal) string_literal, (sizeof(string_literal) - 1)
/**
* @class TrxBoundaryParser
*
* @ TrxBoundaryParser is a class capable of parsing and returning the
* correct type mask of statements affecting the transaction state and
* autocommit mode.
*
* The class is intended to be used in context where the performance is
* of utmost importance; consequently it is defined in its entirety
* in the header to allow for aggressive inlining.
*/
class TrxBoundaryParser
{
public:
enum token_t
{
TK_AUTOCOMMIT,
TK_BEGIN,
TK_COMMA,
TK_COMMIT,
TK_CONSISTENT,
TK_DOT,
TK_EQ,
TK_FALSE,
TK_GLOBAL,
TK_GLOBAL_VAR,
TK_ONE,
TK_ONLY,
TK_READ,
TK_ROLLBACK,
TK_SESSION,
TK_SESSION_VAR,
TK_SET,
TK_SNAPSHOT,
TK_START,
TK_TRANSACTION,
TK_TRUE,
TK_WITH,
TK_WORK,
TK_WRITE,
TK_ZERO,
PARSER_UNKNOWN_TOKEN,
PARSER_EXHAUSTED,
};
/**
* TrxBoundaryParser is not thread-safe. As a very lightweight class,
* the intention is that an instance is created on the stack whenever
* parsing needs to be performed.
*
* @code
* void f(GWBUF *pBuf)
* {
* TrxBoundaryParser tbp;
*
* uint32_t type_mask = tbp.parse(pBuf);
* ...
* }
* @endcode
*/
TrxBoundaryParser()
: m_pSql(NULL)
, m_len(0)
, m_pI(NULL)
, m_pEnd(NULL)
{
}
/**
* Return the type mask of a statement, provided the statement affects
* transaction state or autocommit mode.
*
* @param pSql SQL statament.
* @param len Length of pSql.
*
* @return The corresponding type mask or 0, if the statement does not
* affect transaction state or autocommit mode.
*/
uint32_t type_mask_of(const char* pSql, size_t len)
{
uint32_t type_mask = 0;
m_pSql = pSql;
m_len = len;
m_pI = m_pSql;
m_pEnd = m_pI + m_len;
return parse();
}
/**
* Return the type mask of a statement, provided the statement affects
* transaction state or autocommit mode.
*
* @param pBuf A COM_QUERY
*
* @return The corresponding type mask or 0, if the statement does not
* affect transaction state or autocommit mode.
*/
uint32_t type_mask_of(GWBUF* pBuf)
{
uint32_t type_mask = 0;
char* pSql;
if (modutil_extract_SQL(pBuf, &pSql, &m_len))
{
m_pSql = pSql;
m_pI = m_pSql;
m_pEnd = m_pI + m_len;
type_mask = parse();
}
return type_mask;
}
private:
enum token_required_t
{
TOKEN_REQUIRED,
TOKEN_NOT_REQUIRED,
};
void log_unexpected()
{
#ifdef SS_DEBUG
MXS_NOTICE("Transaction tracking: In statement '%.*s', unexpected token at '%.*s'.",
(int)m_len, m_pSql, (int)(m_pEnd - m_pI), m_pI);
#endif
}
void log_exhausted()
{
#ifdef SS_DEBUG
MXS_NOTICE("Transaction tracking: More tokens expected in statement '%.*s'.", (int)m_len, m_pSql);
#endif
}
uint32_t parse()
{
uint32_t type_mask = 0;
token_t token = next_token();
switch (token)
{
case TK_BEGIN:
type_mask = parse_begin(type_mask);
break;
case TK_COMMIT:
type_mask = parse_commit(type_mask);
break;
case TK_ROLLBACK:
type_mask = parse_rollback(type_mask);
break;
case TK_START:
type_mask = parse_start(type_mask);
break;
case TK_SET:
type_mask = parse_set(0);
break;
default:
;
}
return type_mask;
}
uint32_t parse_begin(uint32_t type_mask)
{
type_mask |= QUERY_TYPE_BEGIN_TRX;
token_t token = next_token();
switch (token)
{
case TK_WORK:
type_mask = parse_work(type_mask);
break;
case PARSER_EXHAUSTED:
break;
default:
type_mask = 0;
log_unexpected();
}
return type_mask;
}
uint32_t parse_commit(uint32_t type_mask)
{
type_mask |= QUERY_TYPE_COMMIT;
token_t token = next_token();
switch (token)
{
case TK_WORK:
type_mask = parse_work(type_mask);
break;
case PARSER_EXHAUSTED:
break;
default:
type_mask = 0;
log_unexpected();
}
return type_mask;
}
uint32_t parse_only(uint32_t type_mask)
{
type_mask |= QUERY_TYPE_READ;
token_t token = next_token();
switch (token)
{
case TK_COMMA:
type_mask = parse_transaction(type_mask);
break;
case PARSER_EXHAUSTED:
break;
default:
type_mask = 0;
log_unexpected();
}
return type_mask;
}
uint32_t parse_read(uint32_t type_mask)
{
token_t token = next_token(TOKEN_REQUIRED);
switch (token)
{
case TK_ONLY:
type_mask = parse_only(type_mask);
break;
case TK_WRITE:
type_mask = parse_write(type_mask);
break;
case PARSER_EXHAUSTED:
type_mask = 0;
break;
default:
type_mask = 0;
log_unexpected();
}
return type_mask;
}
uint32_t parse_rollback(uint32_t type_mask)
{
type_mask |= QUERY_TYPE_ROLLBACK;
token_t token = next_token();
switch (token)
{
case TK_WORK:
type_mask = parse_work(type_mask);
break;
case PARSER_EXHAUSTED:
break;
default:
type_mask = 0;
log_unexpected();
}
return type_mask;
}
uint32_t parse_set_autocommit(uint32_t type_mask)
{
token_t token = next_token(TOKEN_REQUIRED);
switch (token)
{
case TK_EQ:
token = next_token(TOKEN_REQUIRED);
if (token == TK_ONE || token == TK_TRUE)
{
type_mask |= (QUERY_TYPE_COMMIT | QUERY_TYPE_ENABLE_AUTOCOMMIT);
}
else if (token == TK_ZERO || token == TK_FALSE)
{
type_mask = (QUERY_TYPE_BEGIN_TRX | QUERY_TYPE_DISABLE_AUTOCOMMIT);
}
else
{
type_mask = 0;
if (token != PARSER_EXHAUSTED)
{
log_unexpected();
}
}
break;
case PARSER_EXHAUSTED:
type_mask = 0;
break;
default:
type_mask = 0;
log_unexpected();
}
return type_mask;
}
uint32_t parse_set(uint32_t type_mask)
{
token_t token = next_token(TOKEN_REQUIRED);
switch (token)
{
case TK_AUTOCOMMIT:
type_mask = parse_set_autocommit(type_mask);
break;
case TK_GLOBAL:
case TK_SESSION:
token = next_token(TOKEN_REQUIRED);
if (token == TK_AUTOCOMMIT)
{
type_mask = parse_set_autocommit(type_mask);
}
else
{
type_mask = 0;
if (token != PARSER_EXHAUSTED)
{
log_unexpected();
}
}
break;
case TK_GLOBAL_VAR:
case TK_SESSION_VAR:
token = next_token(TOKEN_REQUIRED);
if (token == TK_DOT)
{
token = next_token(TOKEN_REQUIRED);
if (token == TK_AUTOCOMMIT)
{
type_mask = parse_set_autocommit(type_mask);
}
else
{
type_mask = 0;
if (token != PARSER_EXHAUSTED)
{
log_unexpected();
}
}
}
else
{
type_mask = 0;
if (token != PARSER_EXHAUSTED)
{
log_unexpected();
}
}
break;
case PARSER_EXHAUSTED:
type_mask = 0;
break;
default:
type_mask = 0;
log_unexpected();
}
return type_mask;
}
uint32_t parse_start(uint32_t type_mask)
{
token_t token = next_token(TOKEN_REQUIRED);
switch (token)
{
case TK_TRANSACTION:
type_mask = parse_transaction(type_mask);
break;
case PARSER_EXHAUSTED:
type_mask = 0;
break;
default:
type_mask = 0;
log_unexpected();
}
return type_mask;
}
uint32_t parse_transaction(uint32_t type_mask)
{
type_mask |= QUERY_TYPE_BEGIN_TRX;
token_t token = next_token();
switch (token)
{
case TK_READ:
type_mask = parse_read(type_mask);
break;
case TK_WITH:
type_mask = parse_with_consistent_snapshot(type_mask);
break;
case PARSER_EXHAUSTED:
break;
default:
type_mask = 0;
log_unexpected();
}
return type_mask;
}
uint32_t parse_with_consistent_snapshot(uint32_t type_mask)
{
token_t token = next_token(TOKEN_REQUIRED);
if (token == TK_CONSISTENT)
{
token = next_token(TOKEN_REQUIRED);
if (token == TK_SNAPSHOT)
{
token = next_token();
switch (token)
{
case TK_COMMA:
type_mask = parse_transaction(type_mask);
break;
case PARSER_EXHAUSTED:
break;
default:
type_mask = 0;
log_unexpected();
}
}
}
return type_mask;
}
uint32_t parse_work(uint32_t type_mask)
{
token_t token = next_token();
switch (token)
{
case PARSER_EXHAUSTED:
break;
default:
type_mask = 0;
log_unexpected();
}
return type_mask;
}
uint32_t parse_write(uint32_t type_mask)
{
type_mask |= QUERY_TYPE_WRITE;
token_t token = next_token();
switch (token)
{
case TK_COMMA:
type_mask = parse_transaction(type_mask);
break;
case PARSER_EXHAUSTED:
break;
default:
type_mask = 0;
log_unexpected();
}
return type_mask;
}
inline bool is_next_alpha(char uc, int offset = 1) const
{
ss_dassert(uc >= 'A' && uc <= 'Z');
char lc = uc + ('a' - 'A');
return
((m_pI + offset) < m_pEnd) &&
((*(m_pI + offset) == uc) || (*(m_pI + offset) == lc));
}
bool is_next_char(char c, int offset = 1) const
{
return ((m_pI + offset) < m_pEnd) && (*(m_pI + offset) == c);
}
bool peek_next_char(char* pC) const
{
bool rc = (m_pI + 1 < m_pEnd);
if (rc)
{
*pC = *(m_pI + 1);
}
return rc;
}
// Significantly faster than library version.
static char toupper(char c)
{
return (c >= 'a' && c <='z') ? c - ('a' - 'A') : c;
}
token_t expect_token(const char* zWord, int len, token_t token)
{
const char* pI = m_pI;
const char* pEnd = zWord + len;
while ((pI < m_pEnd) && (zWord < pEnd) && (toupper(*pI) == *zWord))
{
++pI;
++zWord;
}
if (zWord == pEnd)
{
if ((pI == m_pEnd) || (!isalpha(*pI))) // Handwritten isalpha not faster than library version.
{
m_pI = pI;
}
else
{
token = PARSER_UNKNOWN_TOKEN;
}
}
else
{
token = PARSER_UNKNOWN_TOKEN;
}
return token;
}
void bypass_whitespace()
{
m_pI = modutil_MySQL_bypass_whitespace(const_cast<char*>(m_pI), m_pEnd - m_pI);
}
token_t next_token(token_required_t required = TOKEN_NOT_REQUIRED)
{
token_t token = PARSER_UNKNOWN_TOKEN;
bypass_whitespace();
if (m_pI == m_pEnd)
{
token = PARSER_EXHAUSTED;
}
else if (*m_pI == ';')
{
++m_pI;
while ((m_pI != m_pEnd) && isspace(*m_pI))
{
++m_pI;
}
if (m_pI != m_pEnd)
{
MXS_WARNING("Non-space data found after semi-colon: '%.*s'.",
(int)(m_pEnd - m_pI), m_pI);
}
token = PARSER_EXHAUSTED;
}
else
{
switch (*m_pI)
{
case '@':
if (is_next_alpha('A', 2))
{
token = expect_token(TBP_EXPECT_TOKEN("@@AUTOCOMMIT"), TK_AUTOCOMMIT);
}
else if (is_next_alpha('S', 2))
{
token = expect_token(TBP_EXPECT_TOKEN("@@SESSION"), TK_SESSION_VAR);
}
else if (is_next_alpha('G', 2))
{
token = expect_token(TBP_EXPECT_TOKEN("@@GLOBAL"), TK_GLOBAL_VAR);
}
break;
case 'a':
case 'A':
token = expect_token(TBP_EXPECT_TOKEN("AUTOCOMMIT"), TK_AUTOCOMMIT);
break;
case 'b':
case 'B':
token = expect_token(TBP_EXPECT_TOKEN("BEGIN"), TK_BEGIN);
break;
case ',':
++m_pI;
token = TK_COMMA;
break;
case 'c':
case 'C':
if (is_next_alpha('O'))
{
if (is_next_alpha('M', 2))
{
token = expect_token(TBP_EXPECT_TOKEN("COMMIT"), TK_COMMIT);
}
else if (is_next_alpha('N', 2))
{
token = expect_token(TBP_EXPECT_TOKEN("CONSISTENT"), TK_CONSISTENT);
}
}
break;
case '.':
++m_pI;
token = TK_DOT;
break;
case '=':
++m_pI;
token = TK_EQ;
break;
case 'f':
case 'F':
token = expect_token(TBP_EXPECT_TOKEN("FALSE"), TK_FALSE);
break;
case 'g':
case 'G':
token = expect_token(TBP_EXPECT_TOKEN("GLOBAL"), TK_GLOBAL);
break;
case '1':
{
char c;
if (!peek_next_char(&c) || !isdigit(c))
{
++m_pI;
token = TK_ONE;
}
}
break;
case 'o':
case 'O':
if (is_next_alpha('F'))
{
token = expect_token(TBP_EXPECT_TOKEN("OFF"), TK_ZERO);
}
else if (is_next_alpha('N'))
{
if (is_next_char('L', 2))
{
token = expect_token(TBP_EXPECT_TOKEN("ONLY"), TK_ONLY);
}
else
{
token = expect_token(TBP_EXPECT_TOKEN("ON"), TK_ONE);
}
}
break;
case 'r':
case 'R':
if (is_next_alpha('E'))
{
token = expect_token(TBP_EXPECT_TOKEN("READ"), TK_READ);
}
else if (is_next_alpha('O'))
{
token = expect_token(TBP_EXPECT_TOKEN("ROLLBACK"), TK_ROLLBACK);
}
break;
case 's':
case 'S':
if (is_next_alpha('E'))
{
if (is_next_alpha('S', 2))
{
token = expect_token(TBP_EXPECT_TOKEN("SESSION"), TK_SESSION);
}
else
{
token = expect_token(TBP_EXPECT_TOKEN("SET"), TK_SET);
}
}
else if (is_next_alpha('N'))
{
token = expect_token(TBP_EXPECT_TOKEN("SNAPSHOT"), TK_SNAPSHOT);
}
else if (is_next_char('T'))
{
token = expect_token(TBP_EXPECT_TOKEN("START"), TK_START);
}
break;
case 't':
case 'T':
if (is_next_alpha('R'))
{
if (is_next_alpha('A', 2))
{
token = expect_token(TBP_EXPECT_TOKEN("TRANSACTION"), TK_TRANSACTION);
}
else if (is_next_alpha('U', 2))
{
token = expect_token(TBP_EXPECT_TOKEN("TRUE"), TK_TRUE);
}
}
break;
case 'w':
case 'W':
if (is_next_alpha('I'))
{
token = expect_token(TBP_EXPECT_TOKEN("WITH"), TK_WITH);
}
else if (is_next_alpha('O'))
{
token = expect_token(TBP_EXPECT_TOKEN("WORK"), TK_WORK);
}
else if (is_next_alpha('R'))
{
token = expect_token(TBP_EXPECT_TOKEN("WRITE"), TK_WRITE);
}
break;
case '0':
{
char c;
if (!peek_next_char(&c) || !isdigit(c))
{
++m_pI;
token = TK_ZERO;
}
}
break;
default:
;
}
}
if ((token == PARSER_EXHAUSTED) && (required == TOKEN_REQUIRED))
{
log_exhausted();
}
return token;
}
private:
TrxBoundaryParser(const TrxBoundaryParser&);
TrxBoundaryParser& operator = (const TrxBoundaryParser&);
private:
const char* m_pSql;
int m_len;
const char* m_pI;
const char* m_pEnd;
};
}

View File

@ -1213,3 +1213,111 @@ char* modutil_get_canonical(GWBUF* querybuf)
return querystr;
}
char* modutil_MySQL_bypass_whitespace(char* sql, size_t len)
{
char *i = sql;
char *end = i + len;
while (i != end)
{
if (isspace(*i))
{
++i;
}
else if (*i == '/') // Might be a comment
{
if ((i + 1 != end) && (*(i + 1) == '*')) // Indeed it was
{
i += 2;
while (i != end)
{
if (*i == '*') // Might be the end of the comment
{
++i;
if (i != end)
{
if (*i == '/') // Indeed it was
{
++i;
break; // Out of this inner while.
}
}
}
else
{
// It was not the end of the comment.
++i;
}
}
}
else
{
// Was not a comment, so we'll bail out.
break;
}
}
else if (*i == '-') // Might be the start of a comment to the end of line
{
bool is_comment = false;
if (i + 1 != end)
{
if (*(i + 1) == '-') // Might be, yes.
{
if (i + 2 != end)
{
if (isspace(*(i + 2))) // Yes, it is.
{
is_comment = true;
i += 3;
while ((i != end) && (*i != '\n'))
{
++i;
}
if (i != end)
{
ss_dassert(*i == '\n');
++i;
}
}
}
}
}
if (!is_comment)
{
break;
}
}
else if (*i == '#')
{
++i;
while ((i != end) && (*i != '\n'))
{
++i;
}
if (i != end)
{
ss_dassert(*i == '\n');
++i;
}
break;
}
else
{
// Neither whitespace not start of a comment, so we bail out.
break;
}
}
return i;
}

View File

@ -1181,10 +1181,10 @@ mon_connect_to_db(MXS_MONITOR* mon, MXS_MONITOR_SERVERS *database)
char *dpwd = decrypt_password(passwd);
mysql_options(database->con, MYSQL_OPT_CONNECT_TIMEOUT, (void *) &mon->connect_timeout);
mysql_options(database->con, MYSQL_OPT_READ_TIMEOUT, (void *) &mon->read_timeout);
mysql_options(database->con, MYSQL_OPT_WRITE_TIMEOUT, (void *) &mon->write_timeout);
mysql_optionsv(database->con, MYSQL_OPT_CONNECT_TIMEOUT, (void *) &mon->connect_timeout);
mysql_optionsv(database->con, MYSQL_OPT_READ_TIMEOUT, (void *) &mon->read_timeout);
mysql_optionsv(database->con, MYSQL_OPT_WRITE_TIMEOUT, (void *) &mon->write_timeout);
mysql_optionsv(database->con, MYSQL_PLUGIN_DIR, get_connector_plugindir());
time_t start = time(NULL);
bool result = (mxs_mysql_real_connect(database->con, database->server, uname, dpwd) != NULL);
time_t end = time(NULL);

View File

@ -137,6 +137,17 @@ void set_execdir(char* param)
execdir = param;
}
/**
* Set the connector plugin directory.
* @param str Path to directory
*/
void set_connector_plugindir(char* param)
{
MXS_FREE(connector_plugindir);
clean_up_pathname(param);
connector_plugindir = param;
}
/**
* Get the directory with all the modules.
* @return The module directory
@ -235,3 +246,12 @@ char* get_execdir()
{
return execdir ? execdir : (char*) default_execdir;
}
/**
* Get connector plugin directory
* @return The connector plugin directory
*/
char* get_connector_plugindir()
{
return connector_plugindir ? connector_plugindir : (char*) default_connector_plugindir;
}

View File

@ -11,11 +11,14 @@
* Public License.
*/
#include <maxscale/query_classifier.h>
#include "maxscale/query_classifier.h"
#include <maxscale/log_manager.h>
#include <maxscale/modutil.h>
#include <maxscale/alloc.h>
#include <maxscale/platform.h>
#include <maxscale/pcre2.h>
#include <maxscale/utils.h>
#include "maxscale/trxboundaryparser.hh"
#include "../core/maxscale/modules.h"
@ -34,10 +37,13 @@ struct type_name_info
size_t name_len;
};
static const char default_qc_name[] = "qc_sqlite";
static const char DEFAULT_QC_NAME[] = "qc_sqlite";
static const char QC_TRX_PARSE_USING[] = "QC_TRX_PARSE_USING";
static QUERY_CLASSIFIER* classifier;
static qc_trx_parse_using_t qc_trx_parse_using = QC_TRX_PARSE_USING_PARSER;
bool qc_setup(const char* plugin_name, const char* plugin_args)
{
@ -46,8 +52,8 @@ bool qc_setup(const char* plugin_name, const char* plugin_args)
if (!plugin_name || (*plugin_name == 0))
{
MXS_NOTICE("No query classifier specified, using default '%s'.", default_qc_name);
plugin_name = default_qc_name;
MXS_NOTICE("No query classifier specified, using default '%s'.", DEFAULT_QC_NAME);
plugin_name = DEFAULT_QC_NAME;
}
int32_t rv = QC_RESULT_ERROR;
@ -60,28 +66,67 @@ bool qc_setup(const char* plugin_name, const char* plugin_args)
if (rv != QC_RESULT_OK)
{
qc_unload(classifier);
classifier = NULL;
}
}
return (rv == QC_RESULT_OK) ? true : false;
}
bool qc_process_init(void)
bool qc_process_init(uint32_t kind)
{
QC_TRACE();
ss_dassert(classifier);
return classifier->qc_process_init() == 0;
const char* parse_using = getenv(QC_TRX_PARSE_USING);
if (parse_using)
{
if (strcmp(parse_using, "QC_TRX_PARSE_USING_QC") == 0)
{
qc_trx_parse_using = QC_TRX_PARSE_USING_QC;
MXS_NOTICE("Transaction detection using QC.");
}
else if (strcmp(parse_using, "QC_TRX_PARSE_USING_PARSER") == 0)
{
qc_trx_parse_using = QC_TRX_PARSE_USING_PARSER;
MXS_NOTICE("Transaction detection using custom PARSER.");
}
else
{
MXS_NOTICE("QC_TRX_PARSE_USING set, but the value %s is not known. "
"Parsing using QC.", parse_using);
}
}
bool rc = qc_thread_init(QC_INIT_SELF);
if (rc)
{
if (kind & QC_INIT_PLUGIN)
{
rc = classifier->qc_process_init() == 0;
if (!rc)
{
qc_thread_end(QC_INIT_SELF);
}
}
}
return rc;
}
void qc_process_end(void)
void qc_process_end(uint32_t kind)
{
QC_TRACE();
ss_dassert(classifier);
classifier->qc_process_end();
classifier = NULL;
if (kind & QC_INIT_PLUGIN)
{
classifier->qc_process_end();
}
qc_thread_end(QC_INIT_SELF);
}
QUERY_CLASSIFIER* qc_load(const char* plugin_name)
@ -104,32 +149,43 @@ void qc_unload(QUERY_CLASSIFIER* classifier)
{
// TODO: The module loading/unloading needs an overhaul before we
// TODO: actually can unload something.
classifier = NULL;
}
bool qc_thread_init(void)
bool qc_thread_init(uint32_t kind)
{
QC_TRACE();
ss_dassert(classifier);
return classifier->qc_thread_init() == 0;
bool rc = true;
if (kind & QC_INIT_PLUGIN)
{
rc = classifier->qc_thread_init() == 0;
}
return rc;
}
void qc_thread_end(void)
void qc_thread_end(uint32_t kind)
{
QC_TRACE();
ss_dassert(classifier);
return classifier->qc_thread_end();
if (kind & QC_INIT_PLUGIN)
{
classifier->qc_thread_end();
}
}
qc_parse_result_t qc_parse(GWBUF* query)
qc_parse_result_t qc_parse(GWBUF* query, uint32_t collect)
{
QC_TRACE();
ss_dassert(classifier);
int32_t result = QC_QUERY_INVALID;
classifier->qc_parse(query, &result);
classifier->qc_parse(query, collect, &result);
return (qc_parse_result_t)result;
}
@ -778,3 +834,70 @@ char* qc_typemask_to_string(uint32_t types)
return s;
}
static uint32_t qc_get_trx_type_mask_using_qc(GWBUF* stmt)
{
uint32_t type_mask = qc_get_type_mask(stmt);
if (qc_query_is_type(type_mask, QUERY_TYPE_WRITE) &&
qc_query_is_type(type_mask, QUERY_TYPE_COMMIT))
{
// This is a commit reported for "CREATE TABLE...",
// "DROP TABLE...", etc. that cause an implicit commit.
type_mask = 0;
}
else
{
// Only START TRANSACTION can be explicitly READ or WRITE.
if (!(type_mask & QUERY_TYPE_BEGIN_TRX))
{
// So, strip them away for everything else.
type_mask &= ~(QUERY_TYPE_WRITE | QUERY_TYPE_READ);
}
// Then leave only the bits related to transaction and
// autocommit state.
type_mask &= (QUERY_TYPE_BEGIN_TRX |
QUERY_TYPE_WRITE |
QUERY_TYPE_READ |
QUERY_TYPE_COMMIT |
QUERY_TYPE_ROLLBACK |
QUERY_TYPE_ENABLE_AUTOCOMMIT |
QUERY_TYPE_DISABLE_AUTOCOMMIT);
}
return type_mask;
}
static uint32_t qc_get_trx_type_mask_using_parser(GWBUF* stmt)
{
maxscale::TrxBoundaryParser parser;
return parser.type_mask_of(stmt);
}
uint32_t qc_get_trx_type_mask_using(GWBUF* stmt, qc_trx_parse_using_t use)
{
uint32_t type_mask = 0;
switch (use)
{
case QC_TRX_PARSE_USING_QC:
type_mask = qc_get_trx_type_mask_using_qc(stmt);
break;
case QC_TRX_PARSE_USING_PARSER:
type_mask = qc_get_trx_type_mask_using_parser(stmt);
break;
default:
ss_dassert(!true);
}
return type_mask;
}
uint32_t qc_get_trx_type_mask(GWBUF* stmt)
{
return qc_get_trx_type_mask_using(stmt, qc_trx_parse_using);
}

View File

@ -1379,3 +1379,22 @@ void server_clear_status(SERVER *server, int bit)
}
spinlock_release(&server->lock);
}
bool server_is_mxs_service(const SERVER *server)
{
bool rval = false;
/** Do a coarse check for local server pointing to a MaxScale service */
if (strcmp(server->name, "127.0.0.1") == 0 ||
strcmp(server->name, "::1") == 0 ||
strcmp(server->name, "localhost") == 0 ||
strcmp(server->name, "localhost.localdomain") == 0)
{
if (service_port_is_used(server->port))
{
rval = true;
}
}
return rval;
}

View File

@ -2299,3 +2299,29 @@ void service_print_users(DCB *dcb, const SERVICE *service)
}
}
}
bool service_port_is_used(unsigned short port)
{
bool rval = false;
spinlock_acquire(&service_spin);
for (SERVICE *service = allServices; service && !rval; service = service->next)
{
spinlock_acquire(&service->spin);
for (SERV_LISTENER *proto = service->ports; proto; proto = proto->next)
{
if (proto->port == port)
{
rval = true;
break;
}
}
spinlock_release(&service->spin);
}
spinlock_release(&service_spin);
return rval;
}

View File

@ -11,48 +11,27 @@
* Public License.
*/
/**
* @file spinlock.c - Spinlock operations for the MariaDB Corporation MaxScale
*
* @verbatim
* Revision History
*
* Date Who Description
* 10/06/13 Mark Riddoch Initial implementation
*
* @endverbatim
*/
#include <maxscale/spinlock.h>
#include <maxscale/atomic.h>
#include <time.h>
#include <maxscale/debug.h>
/**
* Initialise a spinlock.
*
* @param lock The spinlock to initialise.
*/
void
spinlock_init(SPINLOCK *lock)
void spinlock_init(SPINLOCK *lock)
{
lock->lock = 0;
#if SPINLOCK_PROFILE
lock->spins = 0;
lock->maxspins = 0;
lock->acquired = 0;
lock->waiting = 0;
lock->max_waiting = 0;
lock->contended = 0;
lock->owner = 0;
#endif
}
/**
* Acquire a spinlock.
*
* @param lock The spinlock to acquire
*/
void
spinlock_acquire(const SPINLOCK *const_lock)
void spinlock_acquire(const SPINLOCK *const_lock)
{
SPINLOCK *lock = (SPINLOCK*)const_lock;
#if SPINLOCK_PROFILE
@ -61,20 +40,14 @@ spinlock_acquire(const SPINLOCK *const_lock)
atomic_add(&(lock->waiting), 1);
#endif
#ifdef __GNUC__
while (__sync_lock_test_and_set(&(lock->lock), 1))
while (lock->lock)
{
#else
while (atomic_add(&(lock->lock), 1) != 0)
{
atomic_add(&(lock->lock), -1);
#endif
#if SPINLOCK_PROFILE
atomic_add(&(lock->spins), 1);
spins++;
atomic_add(&(lock->spins), 1);
spins++;
#endif
}
}
#if SPINLOCK_PROFILE
if (spins)
{
@ -90,42 +63,24 @@ spinlock_acquire(const SPINLOCK *const_lock)
#endif
}
/**
* Acquire a spinlock if it is not already locked.
*
* @param lock The spinlock to acquire
* @return True if the spinlock was acquired, otherwise false
*/
int
bool
spinlock_acquire_nowait(const SPINLOCK *const_lock)
{
SPINLOCK *lock = (SPINLOCK*)const_lock;
#ifdef __GNUC__
if (__sync_lock_test_and_set(&(lock->lock), 1))
{
return FALSE;
return false;
}
#else
if (atomic_add(&(lock->lock), 1) != 0)
{
atomic_add(&(lock->lock), -1);
return FALSE;
}
#endif
#if SPINLOCK_PROFILE
lock->acquired++;
lock->owner = thread_self();
#endif
return TRUE;
return true;
}
/*
* Release a spinlock.
*
* @param lock The spinlock to release
*/
void
spinlock_release(const SPINLOCK *const_lock)
void spinlock_release(const SPINLOCK *const_lock)
{
SPINLOCK *lock = (SPINLOCK*)const_lock;
ss_dassert(lock->lock != 0);
@ -135,48 +90,32 @@ spinlock_release(const SPINLOCK *const_lock)
lock->max_waiting = lock->waiting;
}
#endif
#ifdef __GNUC__
__sync_synchronize(); /* Memory barrier. */
lock->lock = 0;
#else
atomic_add(&(lock->lock), -1);
#endif
__sync_lock_release(&lock->lock);
}
/**
* Report statistics on a spinlock. This only has an effect if the
* spinlock code has been compiled with the SPINLOCK_PROFILE option set.
*
* NB A callback function is used to return the data rather than
* merely printing to a DCB in order to avoid a dependency on the DCB
* form the spinlock code and also to facilitate other uses of the
* statistics reporting.
*
* @param lock The spinlock to report on
* @param reporter The callback function to pass the statistics to
* @param hdl A handle that is passed to the reporter function
*/
void
spinlock_stats(const SPINLOCK *lock, void (*reporter)(void *, char *, int), void *hdl)
void spinlock_stats(const SPINLOCK *lock, void (*reporter)(void *, char *, int), void *hdl)
{
#if SPINLOCK_PROFILE
reporter(hdl, "Spinlock acquired", lock->acquired);
if (lock->acquired)
{
reporter(hdl, "Total no. of spins", lock->spins);
reporter(hdl, "Average no. of spins (overall)",
lock->spins / lock->acquired);
if (lock->acquired)
{
reporter(hdl, "Average no. of spins (overall)", lock->spins / lock->acquired);
}
if (lock->contended)
{
reporter(hdl, "Average no. of spins (when contended)",
lock->spins / lock->contended);
reporter(hdl, "Average no. of spins (when contended)", lock->spins / lock->contended);
}
reporter(hdl, "Maximum no. of spins", lock->maxspins);
reporter(hdl, "Maximim no. of blocked threads",
lock->max_waiting);
reporter(hdl, "Maximim no. of blocked threads", lock->max_waiting);
reporter(hdl, "Contended locks", lock->contended);
reporter(hdl, "Contention percentage",
(lock->contended * 100) / lock->acquired);
if (lock->acquired)
{
reporter(hdl, "Contention percentage", (lock->contended * 100) / lock->acquired);
}
}
#endif
}

View File

@ -13,11 +13,14 @@ add_executable(test_queuemanager testqueuemanager.c)
add_executable(test_server testserver.c)
add_executable(test_service testservice.c)
add_executable(test_spinlock testspinlock.c)
add_executable(test_trxcompare testtrxcompare.cc ../../../query_classifier/test/testreader.cc)
add_executable(test_trxtracking testtrxtracking.cc)
add_executable(test_users testusers.c)
add_executable(testfeedback testfeedback.c)
add_executable(testmaxscalepcre2 testmaxscalepcre2.c)
add_executable(testmodulecmd testmodulecmd.c)
add_executable(testconfig testconfig.c)
add_executable(trxboundaryparser_profile trxboundaryparser_profile.cc)
target_link_libraries(test_adminusers maxscale-common)
target_link_libraries(test_buffer maxscale-common)
target_link_libraries(test_dcb maxscale-common)
@ -33,11 +36,14 @@ target_link_libraries(test_queuemanager maxscale-common)
target_link_libraries(test_server maxscale-common)
target_link_libraries(test_service maxscale-common)
target_link_libraries(test_spinlock maxscale-common)
target_link_libraries(test_trxcompare maxscale-common)
target_link_libraries(test_trxtracking maxscale-common)
target_link_libraries(test_users maxscale-common)
target_link_libraries(testfeedback maxscale-common)
target_link_libraries(testmaxscalepcre2 maxscale-common)
target_link_libraries(testmodulecmd maxscale-common)
target_link_libraries(testconfig maxscale-common)
target_link_libraries(trxboundaryparser_profile maxscale-common)
add_test(TestAdminUsers test_adminusers)
add_test(TestBuffer test_buffer)
add_test(TestDCB test_dcb)
@ -58,6 +64,16 @@ add_test(TestSpinlock test_spinlock)
add_test(TestUsers test_users)
add_test(TestModulecmd testmodulecmd)
add_test(TestConfig testconfig)
add_test(TestTrxTracking test_trxtracking)
add_test(TestTrxCompare_Create test_trxcompare ${CMAKE_CURRENT_SOURCE_DIR}/../../../query_classifier/test/create.test)
add_test(TestTrxCompare_Delete test_trxcompare ${CMAKE_CURRENT_SOURCE_DIR}/../../../query_classifier/test/delete.test)
add_test(TestTrxCompare_Insert test_trxcompare ${CMAKE_CURRENT_SOURCE_DIR}/../../../query_classifier/test/insert.test)
add_test(TestTrxCompare_Join test_trxcompare ${CMAKE_CURRENT_SOURCE_DIR}/../../../query_classifier/test/join.test)
add_test(TestTrxCompare_Select test_trxcompare ${CMAKE_CURRENT_SOURCE_DIR}/../../../query_classifier/test/select.test)
add_test(TestTrxCompare_Set test_trxcompare ${CMAKE_CURRENT_SOURCE_DIR}/../../../query_classifier/test/set.test)
add_test(TestTrxCompare_Update test_trxcompare ${CMAKE_CURRENT_SOURCE_DIR}/../../../query_classifier/test/update.test)
add_test(TestTrxCompare_MaxScale test_trxcompare ${CMAKE_CURRENT_SOURCE_DIR}/../../../query_classifier/test/maxscale.test)
# This test requires external dependencies and thus cannot be run
# as a part of the core test set

View File

@ -578,6 +578,43 @@ void test_large_packets()
}
}
char* bypass_whitespace(char* sql)
{
return modutil_MySQL_bypass_whitespace(sql, strlen(sql));
}
void test_bypass_whitespace()
{
char* sql;
sql = bypass_whitespace("SELECT");
ss_info_dassert(*sql == 'S', "1");
sql = bypass_whitespace(" SELECT");
ss_info_dassert(*sql == 'S', "2");
sql = bypass_whitespace("\tSELECT");
ss_info_dassert(*sql == 'S', "3");
sql = bypass_whitespace("\nSELECT");
ss_info_dassert(*sql == 'S', "4");
sql = bypass_whitespace("/* comment */SELECT");
ss_info_dassert(*sql == 'S', "5");
sql = bypass_whitespace(" /* comment */ SELECT");
ss_info_dassert(*sql == 'S', "6");
sql = bypass_whitespace("-- comment\nSELECT");
ss_info_dassert(*sql == 'S', "7");
sql = bypass_whitespace("-- comment\n /* comment */ SELECT");
ss_info_dassert(*sql == 'S', "8");
sql = bypass_whitespace("# comment\nSELECT");
ss_info_dassert(*sql == 'S', "9");
}
int main(int argc, char **argv)
{
int result = 0;
@ -591,5 +628,6 @@ int main(int argc, char **argv)
test_strnchr_esc();
test_strnchr_esc_mysql();
test_large_packets();
test_bypass_whitespace();
exit(result);
}

View File

@ -0,0 +1,237 @@
/*
* Copyright (c) 2016 MariaDB Corporation Ab
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file and at www.mariadb.com/bsl11.
*
* Change Date: 2019-07-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2 or later of the General
* Public License.
*/
#include <maxscale/cppdefs.hh>
#include <unistd.h>
#include <fstream>
#include <iostream>
#include <string>
#include "../maxscale/query_classifier.h"
#include <maxscale/alloc.h>
#include <maxscale/paths.h>
#include <maxscale/protocol/mysql.h>
#include "../../../query_classifier/test/testreader.hh"
using namespace std;
namespace
{
char USAGE[] =
"test_trxcompare [-v] (-s stmt)|[file]"
"\n"
"-s test single statement\n"
"-v 0, only return code\n"
" 1, failed cases (default)\n"
" 2, successful transactional cases\n"
" 4, successful cases\n"
" 7, all cases\n";
enum verbosity_t
{
VERBOSITY_NOTHING = 0, // 000
VERBOSITY_FAILED = 1, // 001
VERBOSITY_SUCCESSFUL_TRANSACTIONAL = 2, // 010
VERBOSITY_SUCCESSFUL = 4, // 100
VERBOSITY_ALL = 7, // 111
};
GWBUF* create_gwbuf(const char* zStmt)
{
size_t len = strlen(zStmt);
size_t payload_len = len + 1;
size_t gwbuf_len = MYSQL_HEADER_LEN + payload_len;
GWBUF* pBuf = gwbuf_alloc(gwbuf_len);
*((unsigned char*)((char*)GWBUF_DATA(pBuf))) = payload_len;
*((unsigned char*)((char*)GWBUF_DATA(pBuf) + 1)) = (payload_len >> 8);
*((unsigned char*)((char*)GWBUF_DATA(pBuf) + 2)) = (payload_len >> 16);
*((unsigned char*)((char*)GWBUF_DATA(pBuf) + 3)) = 0x00;
*((unsigned char*)((char*)GWBUF_DATA(pBuf) + 4)) = 0x03;
memcpy((char*)GWBUF_DATA(pBuf) + 5, zStmt, len);
return pBuf;
}
class Tester
{
public:
Tester(uint32_t verbosity)
: m_verbosity(verbosity)
{
}
int run(const char* zStmt)
{
int rc = EXIT_SUCCESS;
GWBUF* pStmt = create_gwbuf(zStmt);
uint32_t type_mask_qc = qc_get_trx_type_mask_using(pStmt, QC_TRX_PARSE_USING_QC);
uint32_t type_mask_parser = qc_get_trx_type_mask_using(pStmt, QC_TRX_PARSE_USING_PARSER);
gwbuf_free(pStmt);
if (type_mask_qc == type_mask_parser)
{
if ((m_verbosity & VERBOSITY_SUCCESSFUL) ||
((m_verbosity & VERBOSITY_SUCCESSFUL_TRANSACTIONAL) && (type_mask_qc != 0)))
{
char* zType_mask = qc_typemask_to_string(type_mask_qc);
cout << zStmt << ": " << zType_mask << endl;
MXS_FREE(zType_mask);
}
}
else
{
if (m_verbosity & VERBOSITY_FAILED)
{
char* zType_mask_qc = qc_typemask_to_string(type_mask_qc);
char* zType_mask_parser = qc_typemask_to_string(type_mask_parser);
cout << zStmt << "\n"
<< " QC : " << zType_mask_qc << "\n"
<< " PARSER: " << zType_mask_parser << endl;
MXS_FREE(zType_mask_qc);
MXS_FREE(zType_mask_parser);
}
rc = EXIT_FAILURE;
}
return rc;
}
int run(istream& in)
{
int rc = EXIT_SUCCESS;
maxscale::TestReader reader(in);
string stmt;
while (reader.get_statement(stmt) == maxscale::TestReader::RESULT_STMT)
{
if (run(stmt.c_str()) == EXIT_FAILURE)
{
rc = EXIT_FAILURE;
}
}
return rc;
}
private:
Tester(const Tester&);
Tester& operator = (const Tester&);
private:
uint32_t m_verbosity;
};
}
int main(int argc, char* argv[])
{
int rc = EXIT_SUCCESS;
int verbosity = VERBOSITY_FAILED;
const char* zStatement = NULL;
int c;
while ((c = getopt(argc, argv, "s:v:")) != -1)
{
switch (c)
{
case 's':
zStatement = optarg;
break;
case 'v':
verbosity = atoi(optarg);
break;
default:
rc = EXIT_FAILURE;
}
}
if ((rc == EXIT_SUCCESS) && (verbosity >= VERBOSITY_NOTHING) && (verbosity <= VERBOSITY_ALL))
{
rc = EXIT_FAILURE;
set_datadir(strdup("/tmp"));
set_langdir(strdup("."));
set_process_datadir(strdup("/tmp"));
if (mxs_log_init(NULL, ".", MXS_LOG_TARGET_DEFAULT))
{
// We have to setup something in order for the regexes to be compiled.
if (qc_setup("qc_sqlite", NULL) && qc_process_init(QC_INIT_BOTH))
{
Tester tester(verbosity);
int n = argc - (optind - 1);
if (zStatement)
{
rc = tester.run(zStatement);
}
else if (n == 1)
{
rc = tester.run(cin);
}
else
{
ss_dassert(n == 2);
ifstream in(argv[argc - 1]);
if (in)
{
rc = tester.run(in);
}
else
{
cerr << "error: Could not open " << argv[argc - 1] << "." << endl;
}
}
qc_process_end(QC_INIT_BOTH);
}
else
{
cerr << "error: Could not initialize qc_sqlite." << endl;
}
mxs_log_finish();
}
else
{
cerr << "error: Could not initialize log." << endl;
}
}
else
{
cout << USAGE << endl;
}
return rc;
}

View File

@ -0,0 +1,455 @@
/*
* Copyright (c) 2016 MariaDB Corporation Ab
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file and at www.mariadb.com/bsl11.
*
* Change Date: 2019-07-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2 or later of the General
* Public License.
*/
#include <maxscale/cppdefs.hh>
#include <iostream>
#include <maxscale/modutil.h>
#include <maxscale/paths.h>
#include <maxscale/protocol/mysql.h>
#include "../core/maxscale/query_classifier.h"
using namespace std;
namespace
{
enum test_target_t
{
TEST_PARSER = 0x1,
TEST_QC = 0x2,
TEST_ALL = (TEST_PARSER | TEST_QC)
};
GWBUF* create_gwbuf(const char* zStmt)
{
size_t len = strlen(zStmt);
size_t payload_len = len + 1;
size_t gwbuf_len = MYSQL_HEADER_LEN + payload_len;
GWBUF* pBuf = gwbuf_alloc(gwbuf_len);
*((unsigned char*)((char*)GWBUF_DATA(pBuf))) = payload_len;
*((unsigned char*)((char*)GWBUF_DATA(pBuf) + 1)) = (payload_len >> 8);
*((unsigned char*)((char*)GWBUF_DATA(pBuf) + 2)) = (payload_len >> 16);
*((unsigned char*)((char*)GWBUF_DATA(pBuf) + 3)) = 0x00;
*((unsigned char*)((char*)GWBUF_DATA(pBuf) + 4)) = 0x03;
memcpy((char*)GWBUF_DATA(pBuf) + 5, zStmt, len);
return pBuf;
}
uint32_t get_qc_trx_type_mask(GWBUF* pBuf)
{
return qc_get_trx_type_mask_using(pBuf, QC_TRX_PARSE_USING_QC);
}
uint32_t get_parser_trx_type_mask(GWBUF* pBuf)
{
return qc_get_trx_type_mask_using(pBuf, QC_TRX_PARSE_USING_PARSER);
}
}
namespace
{
struct test_case
{
const char* zStmt;
uint32_t type_mask;
} test_cases[] =
{
{ "BEGIN", QUERY_TYPE_BEGIN_TRX },
{ "BEGIN WORK", QUERY_TYPE_BEGIN_TRX },
{ "COMMIT", QUERY_TYPE_COMMIT },
{ "COMMIT WORK", QUERY_TYPE_COMMIT },
{ "ROLLBACK", QUERY_TYPE_ROLLBACK },
{ "ROLLBACK WORK", QUERY_TYPE_ROLLBACK },
{ "START TRANSACTION", QUERY_TYPE_BEGIN_TRX },
{ "START TRANSACTION READ ONLY", QUERY_TYPE_BEGIN_TRX | QUERY_TYPE_READ },
{ "START TRANSACTION READ WRITE", QUERY_TYPE_BEGIN_TRX | QUERY_TYPE_WRITE },
{ "START TRANSACTION WITH CONSISTENT SNAPSHOT", QUERY_TYPE_BEGIN_TRX },
{ "START TRANSACTION WITH CONSISTENT SNAPSHOT, READ ONLY", QUERY_TYPE_BEGIN_TRX | QUERY_TYPE_READ },
{ "SET AUTOCOMMIT=true", QUERY_TYPE_COMMIT|QUERY_TYPE_ENABLE_AUTOCOMMIT },
{ "SET AUTOCOMMIT=1", QUERY_TYPE_COMMIT|QUERY_TYPE_ENABLE_AUTOCOMMIT },
{ "SET AUTOCOMMIT=false", QUERY_TYPE_BEGIN_TRX|QUERY_TYPE_DISABLE_AUTOCOMMIT },
{ "SET AUTOCOMMIT=0", QUERY_TYPE_BEGIN_TRX|QUERY_TYPE_DISABLE_AUTOCOMMIT },
{ "SET @@AUTOCOMMIT=0", QUERY_TYPE_BEGIN_TRX|QUERY_TYPE_DISABLE_AUTOCOMMIT },
{ "SET GLOBAL AUTOCOMMIT=0", QUERY_TYPE_BEGIN_TRX|QUERY_TYPE_DISABLE_AUTOCOMMIT },
{ "SET SESSION AUTOCOMMIT=0", QUERY_TYPE_BEGIN_TRX|QUERY_TYPE_DISABLE_AUTOCOMMIT },
{ "SET @@SESSION . AUTOCOMMIT=0", QUERY_TYPE_BEGIN_TRX|QUERY_TYPE_DISABLE_AUTOCOMMIT },
{ "SET @@GLOBAL . AUTOCOMMIT=0", QUERY_TYPE_BEGIN_TRX|QUERY_TYPE_DISABLE_AUTOCOMMIT },
};
const size_t N_TEST_CASES = sizeof(test_cases)/sizeof(test_cases[0]);
bool test(uint32_t (*getter)(GWBUF*), const char* zStmt, uint32_t expected_type_mask)
{
int rc = true;
GWBUF* pBuf = create_gwbuf(zStmt);
uint32_t type_mask = getter(pBuf);
if (type_mask != expected_type_mask)
{
cerr << "\"" << zStmt << "\""
<< ": expected " << expected_type_mask << ", but got " << type_mask << "." << endl;
rc = false;
}
gwbuf_free(pBuf);
return rc;
}
const char* prefixes[] =
{
" ",
" ",
"\n",
" \n",
"\n ",
"-- comment\n"
};
const int N_PREFIXES = sizeof(prefixes) / sizeof(prefixes[0]);
bool test_with_prefixes(uint32_t (*getter)(GWBUF*), const string& base, uint32_t type_mask)
{
bool rc = true;
for (int i = 0; i < N_PREFIXES; ++i)
{
string s = prefixes[i] + base;
if (!test(getter, s.c_str(), type_mask))
{
rc = false;
}
}
return rc;
}
const char* suffixes[] =
{
" ",
" ",
"\n",
" \n",
"\n ",
";",
" ;",
" ;",
" ;",
" ;",
" ; ",
";\n",
" ; ",
"-- comment this, comment that",
// "# comment this, comment that" /* qc_sqlite does not handle this */
};
const int N_SUFFIXES = sizeof(suffixes) / sizeof(suffixes[0]);
bool test_with_suffixes(uint32_t (*getter)(GWBUF*), const string& base, uint32_t type_mask)
{
bool rc = true;
for (int i = 0; i < N_SUFFIXES; ++i)
{
string s = base + suffixes[i];
if (!test(getter, s.c_str(), type_mask))
{
rc = false;
}
}
return rc;
}
const char* whitespace[] =
{
" ",
"\n",
"/**/",
"/***/",
"/****/",
"/* / * */",
"-- comment\n"
};
const int N_WHITESPACE = sizeof(whitespace) / sizeof(whitespace[0]);
bool test_with_whitespace(uint32_t (*getter)(GWBUF*), const string& base, uint32_t type_mask)
{
bool rc = true;
string::const_iterator i = base.begin();
string::const_iterator end = base.end();
string head;
while (i != end)
{
if (*i == ' ')
{
string tail(i + 1, end);
for (int j = 0; j < N_WHITESPACE; ++j)
{
string s = head + whitespace[j] + tail;
if (!test(getter, s.c_str(), type_mask))
{
rc = false;
}
}
}
head += *i;
++i;
}
return rc;
}
const char* commas[] =
{
" ,",
" ,",
" , ",
" , ",
};
const int N_COMMAS = sizeof(commas) / sizeof(commas[0]);
bool test_with_commas(uint32_t (*getter)(GWBUF*), const string& base, uint32_t type_mask)
{
bool rc = true;
string::const_iterator i = base.begin();
string::const_iterator end = base.end();
string head;
while (i != end)
{
if (*i == ',')
{
string tail(i + 1, end);
for (int j = 0; j < N_COMMAS; ++j)
{
string s = head + commas[j] + tail;
if (!test(getter, s.c_str(), type_mask))
{
rc = false;
}
}
}
head += *i;
++i;
}
return rc;
}
bool test(uint32_t (*getter)(GWBUF*), bool dont_bail_out)
{
bool rc = true;
test_case* pTest = test_cases;
test_case* pEnd = pTest + N_TEST_CASES;
while ((pTest < pEnd) && (dont_bail_out || rc))
{
string base(pTest->zStmt);
cout << base << endl;
string s;
s = base;
if (!test(getter, s.c_str(), pTest->type_mask))
{
rc = false;
}
if (dont_bail_out || rc)
{
if (!test_with_prefixes(getter, base, pTest->type_mask))
{
rc = false;
}
}
if (dont_bail_out || rc)
{
if (!test_with_whitespace(getter, base, pTest->type_mask))
{
rc = false;
}
}
if (dont_bail_out || rc)
{
if (!test_with_commas(getter, base, pTest->type_mask))
{
rc = false;
}
}
if (dont_bail_out || rc)
{
if (!test_with_suffixes(getter, base, pTest->type_mask))
{
rc = false;
}
}
++pTest;
}
return rc;
}
}
namespace
{
char USAGE[] =
"usage: test_trxtracking [-p] [-q] [-r] [-d]\n"
"\n"
"-p : Test using custom parser\n"
"-q : Test using query classifier\n"
"-r : Test using regex matching\n"
"-d : Don't bail out at first error\n"
"\n"
"If neither -p, -q or -r has been specified, then all will be tested.\n";
}
int main(int argc, char* argv[])
{
int rc = EXIT_SUCCESS;
bool test_all = true;
uint32_t test_target = 0;
bool dont_bail_out = false;
int c;
while ((c = getopt(argc, argv, "dpq")) != -1)
{
switch (c)
{
case 'p':
test_all = false;
test_target |= TEST_PARSER;
break;
case 'q':
test_all = false;
test_target = TEST_QC;
break;
case 'd':
dont_bail_out = true;
break;
default:
cout << USAGE << endl;
rc = EXIT_FAILURE;
}
}
if (rc == EXIT_SUCCESS)
{
rc = EXIT_FAILURE;
if (test_all)
{
test_target = TEST_ALL;
}
set_datadir(strdup("/tmp"));
set_langdir(strdup("."));
set_process_datadir(strdup("/tmp"));
if (mxs_log_init(NULL, ".", MXS_LOG_TARGET_DEFAULT))
{
// We have to setup something in order for the regexes to be compiled.
if (qc_setup("qc_sqlite", NULL) && qc_process_init(QC_INIT_BOTH))
{
rc = EXIT_SUCCESS;
if (test_target & TEST_QC)
{
cout << "QC" << endl;
cout << "==" << endl;
if (!test(get_qc_trx_type_mask, dont_bail_out))
{
rc = EXIT_FAILURE;
}
cout << endl;
}
if (test_target & TEST_PARSER)
{
cout << "Parser" << endl;
cout << "======" << endl;
if (!test(get_parser_trx_type_mask, dont_bail_out))
{
rc = EXIT_FAILURE;
}
cout << endl;
}
qc_process_end(QC_INIT_BOTH);
}
else
{
cerr << "error: Could not initialize qc_sqlite." << endl;
}
mxs_log_finish();
}
else
{
cerr << "error: Could not initialize log." << endl;
}
}
return rc;
}

View File

@ -0,0 +1,116 @@
/*
* Copyright (c) 2016 MariaDB Corporation Ab
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file and at www.mariadb.com/bsl11.
*
* Change Date: 2019-07-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2 or later of the General
* Public License.
*/
#include <maxscale/cppdefs.hh>
#include <iomanip>
#include <iostream>
#include <maxscale/paths.h>
#include "../maxscale/trxboundaryparser.hh"
using namespace std;
namespace
{
char USAGE[] = "usage: trxboundaryparser -n count -s statement\n";
timespec timespec_subtract(const timespec& later, const timespec& earlier)
{
timespec result = { 0, 0 };
ss_dassert((later.tv_sec > earlier.tv_sec) ||
((later.tv_sec == earlier.tv_sec) && (later.tv_nsec > earlier.tv_nsec)));
if (later.tv_nsec >= earlier.tv_nsec)
{
result.tv_sec = later.tv_sec - earlier.tv_sec;
result.tv_nsec = later.tv_nsec - earlier.tv_nsec;
}
else
{
result.tv_sec = later.tv_sec - earlier.tv_sec - 1;
result.tv_nsec = 1000000000 + later.tv_nsec - earlier.tv_nsec;
}
return result;
}
}
int main(int argc, char* argv[])
{
int rc = EXIT_SUCCESS;
int nCount = 0;
const char* zStatement = NULL;
int c;
while ((c = getopt(argc, argv, "n:s:")) != -1)
{
switch (c)
{
case 'n':
nCount = atoi(optarg);
break;
case 's':
zStatement = optarg;
break;
default:
rc = EXIT_FAILURE;
}
}
if ((rc == EXIT_SUCCESS) && zStatement && (nCount > 0))
{
rc = EXIT_FAILURE;
set_datadir(strdup("/tmp"));
set_langdir(strdup("."));
set_process_datadir(strdup("/tmp"));
if (mxs_log_init(NULL, ".", MXS_LOG_TARGET_DEFAULT))
{
size_t len = strlen(zStatement);
maxscale::TrxBoundaryParser parser;
struct timespec start;
clock_gettime(CLOCK_MONOTONIC_RAW, &start);
for (int i = 0; i < nCount; ++i)
{
parser.type_mask_of(zStatement, len);
}
struct timespec finish;
clock_gettime(CLOCK_MONOTONIC_RAW, &finish);
struct timespec diff = timespec_subtract(finish, start);
cout << "Time:" << diff.tv_sec << "." << setfill('0') << setw(9) << diff.tv_nsec << endl;
mxs_log_finish();
}
else
{
cerr << "error: Could not initialize log." << endl;
}
}
else
{
cout << USAGE << endl;
}
return rc;
}

View File

@ -31,6 +31,7 @@
#include <mysqld_error.h>
#include <maxscale/mysql_utils.h>
#include <maxscale/alloc.h>
#include <maxscale/paths.h>
/** Don't include the root user */
#define USERS_QUERY_NO_ROOT " AND user.user NOT IN ('root')"
@ -49,7 +50,7 @@
FROM mysql.user AS u LEFT JOIN mysql.tables_priv AS t \
ON (u.user = t.user AND u.host = t.host) %s"
static int get_users(SERV_LISTENER *listener);
static int get_users(SERV_LISTENER *listener, bool skip_local);
static MYSQL *gw_mysql_init(void);
static int gw_mysql_set_timeouts(MYSQL* handle);
static char *mysql_format_user_entry(void *data);
@ -71,10 +72,10 @@ static char* get_new_users_query(const char *server_version, bool include_root)
return rval;
}
int replace_mysql_users(SERV_LISTENER *listener)
int replace_mysql_users(SERV_LISTENER *listener, bool skip_local)
{
spinlock_acquire(&listener->lock);
int i = get_users(listener);
int i = get_users(listener, skip_local);
spinlock_release(&listener->lock);
return i;
}
@ -408,22 +409,7 @@ MYSQL *gw_mysql_init()
if (con)
{
if (gw_mysql_set_timeouts(con) == 0)
{
// MYSQL_OPT_USE_REMOTE_CONNECTION must be set if the embedded
// libary is used. With Connector-C (at least 2.2.1) the call
// fails.
#if !defined(LIBMARIADB)
if (mysql_options(con, MYSQL_OPT_USE_REMOTE_CONNECTION, NULL) != 0)
{
MXS_ERROR("Failed to set external connection. "
"It is needed for backend server connections.");
mysql_close(con);
con = NULL;
}
#endif
}
else
if (gw_mysql_set_timeouts(con) != 0)
{
MXS_ERROR("Failed to set timeout values for backend connection.");
mysql_close(con);
@ -454,21 +440,21 @@ static int gw_mysql_set_timeouts(MYSQL* handle)
MXS_CONFIG* cnf = config_get_global_options();
if ((rc = mysql_options(handle, MYSQL_OPT_READ_TIMEOUT,
if ((rc = mysql_optionsv(handle, MYSQL_OPT_READ_TIMEOUT,
(void *) &cnf->auth_read_timeout)))
{
MXS_ERROR("Failed to set read timeout for backend connection.");
goto retblock;
}
if ((rc = mysql_options(handle, MYSQL_OPT_CONNECT_TIMEOUT,
if ((rc = mysql_optionsv(handle, MYSQL_OPT_CONNECT_TIMEOUT,
(void *) &cnf->auth_conn_timeout)))
{
MXS_ERROR("Failed to set connect timeout for backend connection.");
goto retblock;
}
if ((rc = mysql_options(handle, MYSQL_OPT_WRITE_TIMEOUT,
if ((rc = mysql_optionsv(handle, MYSQL_OPT_WRITE_TIMEOUT,
(void *) &cnf->auth_write_timeout)))
{
MXS_ERROR("Failed to set write timeout for backend connection.");
@ -499,9 +485,10 @@ static bool check_server_permissions(SERVICE *service, SERVER* server,
}
MXS_CONFIG* cnf = config_get_global_options();
mysql_options(mysql, MYSQL_OPT_READ_TIMEOUT, &cnf->auth_read_timeout);
mysql_options(mysql, MYSQL_OPT_CONNECT_TIMEOUT, &cnf->auth_conn_timeout);
mysql_options(mysql, MYSQL_OPT_WRITE_TIMEOUT, &cnf->auth_write_timeout);
mysql_optionsv(mysql, MYSQL_OPT_READ_TIMEOUT, &cnf->auth_read_timeout);
mysql_optionsv(mysql, MYSQL_OPT_CONNECT_TIMEOUT, &cnf->auth_conn_timeout);
mysql_optionsv(mysql, MYSQL_OPT_WRITE_TIMEOUT, &cnf->auth_write_timeout);
mysql_optionsv(mysql, MYSQL_PLUGIN_DIR, get_connector_plugindir());
if (mxs_mysql_real_connect(mysql, server, user, password) == NULL)
{
@ -648,7 +635,8 @@ bool check_service_permissions(SERVICE* service)
for (SERVER_REF *server = service->dbref; server; server = server->next)
{
if (check_server_permissions(service, server->server, user, dpasswd))
if (server_is_mxs_service(server->server) ||
check_server_permissions(service, server->server, user, dpasswd))
{
rval = true;
}
@ -826,7 +814,7 @@ int get_users_from_server(MYSQL *con, SERVER_REF *server, SERVICE *service, SERV
* @param users The users table into which to load the users
* @return -1 on any error or the number of users inserted
*/
static int get_users(SERV_LISTENER *listener)
static int get_users(SERV_LISTENER *listener, bool skip_local)
{
char *service_user = NULL;
char *service_passwd = NULL;
@ -853,6 +841,12 @@ static int get_users(SERV_LISTENER *listener)
for (server = service->dbref; !service->svc_do_shutdown && server; server = server->next)
{
if (skip_local && server_is_mxs_service(server->server))
{
total_users = 0;
continue;
}
MYSQL *con = gw_mysql_init();
if (con)
{
@ -886,7 +880,7 @@ static int get_users(SERV_LISTENER *listener)
MXS_FREE(dpwd);
if (server == NULL)
if (server == NULL && total_users == -1)
{
MXS_ERROR("Unable to get user data from backend database for service [%s]."
" Failed to connect to any of the backend databases.", service->name);

View File

@ -614,8 +614,11 @@ static int mysql_auth_load_users(SERV_LISTENER *port)
return MXS_AUTH_LOADUSERS_FATAL;
}
bool skip_local = false;
if (instance->handle == NULL)
{
skip_local = true;
char path[PATH_MAX];
get_database_path(port, path, sizeof(path));
if (!open_instance_database(path, &instance->handle))
@ -624,7 +627,7 @@ static int mysql_auth_load_users(SERV_LISTENER *port)
}
}
int loaded = replace_mysql_users(port);
int loaded = replace_mysql_users(port, skip_local);
if (loaded < 0)
{
@ -642,7 +645,7 @@ static int mysql_auth_load_users(SERV_LISTENER *port)
}
}
if (loaded == 0)
if (loaded == 0 && !skip_local)
{
MXS_WARNING("[%s]: failed to load any user information. Authentication"
" will probably fail as a result.", service->name);

View File

@ -171,11 +171,12 @@ bool dbusers_save(sqlite3 *src, const char *filename);
/**
* Reload and replace the currently loaded database users
*
* @param service The current service
* @param service The current service
* @param skip_local Skip loading of users on local MaxScale services
*
* @return -1 on any error or the number of users inserted (0 means no users at all)
*/
int replace_mysql_users(SERV_LISTENER *listener);
int replace_mysql_users(SERV_LISTENER *listener, bool skip_local);
/**
* @brief Verify the user has access to the database

View File

@ -14,11 +14,19 @@
#define MXS_MODULE_NAME "cache"
#include "cache.hh"
#include <new>
#include <set>
#include <string>
#include <zlib.h>
#include <maxscale/alloc.h>
#include <maxscale/buffer.h>
#include <maxscale/modutil.h>
#include <maxscale/query_classifier.h>
#include <maxscale/paths.h>
#include "storagefactory.hh"
#include "storage.hh"
using namespace std;
Cache::Cache(const std::string& name,
const CACHE_CONFIG* pConfig,
SCacheRules sRules,
@ -102,6 +110,46 @@ void Cache::show(DCB* pDcb) const
}
}
cache_result_t Cache::get_key(const char* zDefault_db,
const GWBUF* pQuery,
CACHE_KEY* pKey) const
{
// TODO: Take config into account.
return get_default_key(zDefault_db, pQuery, pKey);
}
//static
cache_result_t Cache::get_default_key(const char* zDefault_db,
const GWBUF* pQuery,
CACHE_KEY* pKey)
{
ss_dassert(GWBUF_IS_CONTIGUOUS(pQuery));
char *pSql;
int length;
modutil_extract_SQL(const_cast<GWBUF*>(pQuery), &pSql, &length);
uint64_t crc1 = crc32(0, Z_NULL, 0);
const Bytef* pData;
if (zDefault_db)
{
pData = reinterpret_cast<const Bytef*>(zDefault_db);
crc1 = crc32(crc1, pData, strlen(zDefault_db));
}
pData = reinterpret_cast<const Bytef*>(pSql);
crc1 = crc32(crc1, pData, length);
uint64_t crc2 = crc32(crc1, pData, length);
pKey->data = (crc1 << 32 | crc2);
return CACHE_RESULT_OK;
}
bool Cache::should_store(const char* zDefaultDb, const GWBUF* pQuery)
{
return m_sRules->should_store(zDefaultDb, pQuery);

View File

@ -87,11 +87,31 @@ public:
virtual void refreshed(const CACHE_KEY& key, const CacheFilterSession* pSession) = 0;
/**
* See @Storage::get_key
* Returns a key for the statement. Takes the current config into account.
*
* @param zDefault_db The default database, can be NULL.
* @param pQuery A statement.
* @param pKey On output a key.
*
* @return CACHE_RESULT_OK if a key could be created.
*/
virtual cache_result_t get_key(const char* zDefaultDb,
const GWBUF* pQuery,
CACHE_KEY* pKey) const = 0;
cache_result_t get_key(const char* zDefault_db,
const GWBUF* pQuery,
CACHE_KEY* pKey) const;
/**
* Returns a key for the statement. Does not take the current config
* into account.
*
* @param zDefault_db The default database, can be NULL.
* @param pQuery A statement.
* @param pKey On output a key.
*
* @return CACHE_RESULT_OK if a key could be created.
*/
static cache_result_t get_default_key(const char* zDefault_db,
const GWBUF* pQuery,
CACHE_KEY* pKey);
/**
* See @Storage::get_value

View File

@ -17,20 +17,9 @@
size_t cache_key_hash(const CACHE_KEY* key)
{
ss_dassert(key);
ss_dassert(sizeof(key->data) == sizeof(size_t));
size_t hash = 0;
const char* i = key->data;
const char* end = i + CACHE_KEY_MAXLEN;
while (i < end)
{
int c = *i;
hash = c + (hash << 6) + (hash << 16) - hash;
++i;
}
return hash;
return key->data;
}
bool cache_key_equal_to(const CACHE_KEY* lhs, const CACHE_KEY* rhs)
@ -38,7 +27,7 @@ bool cache_key_equal_to(const CACHE_KEY* lhs, const CACHE_KEY* rhs)
ss_dassert(lhs);
ss_dassert(rhs);
return memcmp(lhs->data, rhs->data, CACHE_KEY_MAXLEN) == 0;
return lhs->data == rhs->data;
}

View File

@ -14,24 +14,15 @@
#define MXS_MODULE_NAME "cache"
#include "cache_storage_api.hh"
#include <ctype.h>
#include <sstream>
using std::string;
using std::stringstream;
std::string cache_key_to_string(const CACHE_KEY& key)
{
string s;
stringstream ss;
ss << key.data;
for (int i = 0; i < CACHE_KEY_MAXLEN; ++i)
{
char c = key.data[i];
if (!isprint(c))
{
c = '.';
}
s += c;
}
return s;
return ss.str();
}

View File

@ -60,14 +60,9 @@ typedef enum cache_thread_model
typedef void* CACHE_STORAGE;
enum
{
CACHE_KEY_MAXLEN = 128
};
typedef struct cache_key
{
char data[CACHE_KEY_MAXLEN];
uint64_t data;
} CACHE_KEY;
/**
@ -176,18 +171,6 @@ typedef struct cache_storage_api
const CACHE_STORAGE_CONFIG* config,
int argc, char* argv[]);
/**
* Create a key for a GWBUF.
*
* @param query An SQL query. Must be one contiguous buffer.
* @param key Pointer to key.
*
* @return CACHE_RESULT_OK if a key was created, otherwise some error code.
*/
cache_result_t (*getKey)(const char* default_db,
const GWBUF* query,
CACHE_KEY* key);
/**
* Frees an CACHE_STORAGE instance earlier created with createInstance.
*

View File

@ -51,7 +51,7 @@ std::string cache_key_to_string(const CACHE_KEY& key);
inline bool operator == (const CACHE_KEY& lhs, const CACHE_KEY& rhs)
{
return memcmp(lhs.data, rhs.data, sizeof(lhs.data)) == 0;
return lhs.data == rhs.data;
}
inline bool operator != (const CACHE_KEY& lhs, const CACHE_KEY& rhs)
@ -64,7 +64,7 @@ class CacheKey : public CACHE_KEY
public:
CacheKey()
{
memset(data, 0, sizeof(data));
data = 0;
}
};

View File

@ -49,6 +49,8 @@ void cache_config_finish(CACHE_CONFIG& config)
config.hard_ttl = 0;
config.soft_ttl = 0;
config.debug = 0;
config.thread_model = CACHE_THREAD_MODEL_MT;
config.selects = CACHE_SELECTS_VERIFY_CACHEABLE;
}
/**
@ -100,6 +102,20 @@ bool cache_command_show(const MODULECMD_ARG* pArgs)
return true;
}
int cache_process_init()
{
uint32_t jit_available;
pcre2_config(PCRE2_CONFIG_JIT, &jit_available);
if (!jit_available)
{
MXS_WARNING("pcre2 JIT is not available; regex matching will not be "
"as efficient as it could be.");
}
return 0;
}
}
//
@ -107,13 +123,21 @@ bool cache_command_show(const MODULECMD_ARG* pArgs)
//
// Enumeration values for `cached_data`
static const MXS_ENUM_VALUE cached_data_values[] =
static const MXS_ENUM_VALUE parameter_cached_data_values[] =
{
{"shared", CACHE_THREAD_MODEL_MT},
{"thread_specific", CACHE_THREAD_MODEL_ST},
{NULL}
};
// Enumeration values for `selects`
static const MXS_ENUM_VALUE parameter_selects_values[] =
{
{"assume_cacheable", CACHE_SELECTS_ASSUME_CACHEABLE},
{"verify_cacheable", CACHE_SELECTS_VERIFY_CACHEABLE},
{NULL}
};
extern "C" MXS_MODULE* MXS_CREATE_MODULE()
{
static modulecmd_arg_type_t show_argv[] =
@ -136,7 +160,7 @@ extern "C" MXS_MODULE* MXS_CREATE_MODULE()
VERSION_STRING,
RCAP_TYPE_TRANSACTION_TRACKING,
&CacheFilter::s_object,
NULL, /* Process init. */
cache_process_init, /* Process init. */
NULL, /* Process finish. */
NULL, /* Thread init. */
NULL, /* Thread finish. */
@ -144,8 +168,7 @@ extern "C" MXS_MODULE* MXS_CREATE_MODULE()
{
"storage",
MXS_MODULE_PARAM_STRING,
NULL,
MXS_MODULE_OPT_REQUIRED
CACHE_DEFAULT_STORAGE
},
{
"storage_options",
@ -195,7 +218,14 @@ extern "C" MXS_MODULE* MXS_CREATE_MODULE()
MXS_MODULE_PARAM_ENUM,
CACHE_DEFAULT_THREAD_MODEL,
MXS_MODULE_OPT_NONE,
cached_data_values
parameter_cached_data_values
},
{
"selects",
MXS_MODULE_PARAM_ENUM,
CACHE_DEFAULT_SELECTS,
MXS_MODULE_OPT_NONE,
parameter_selects_values
},
{MXS_END_MODULE_PARAMS}
}
@ -292,7 +322,10 @@ bool CacheFilter::process_params(char **pzOptions, MXS_CONFIG_PARAMETER *ppParam
config.max_resultset_size = config_get_size(ppParams, "max_resultset_size");
config.thread_model = static_cast<cache_thread_model_t>(config_get_enum(ppParams,
"cached_data",
cached_data_values));
parameter_cached_data_values));
config.selects = static_cast<cache_selects_t>(config_get_enum(ppParams,
"selects",
parameter_selects_values));
if (!config.storage)
{

View File

@ -54,6 +54,16 @@
#define CACHE_DEFAULT_MAX_SIZE "0"
// Thread model
#define CACHE_DEFAULT_THREAD_MODEL "shared"
// Cacheable selects
#define CACHE_DEFAULT_SELECTS "verify_cacheable"
// Storage
#define CACHE_DEFAULT_STORAGE "storage_inmemory"
typedef enum cache_selects
{
CACHE_SELECTS_ASSUME_CACHEABLE,
CACHE_SELECTS_VERIFY_CACHEABLE,
} cache_selects_t;
typedef struct cache_config
{
@ -70,4 +80,5 @@ typedef struct cache_config
uint64_t max_size; /**< Maximum size of the cache.*/
uint32_t debug; /**< Debug settings. */
cache_thread_model_t thread_model; /**< Thread model. */
cache_selects_t selects; /**< Assume/verify that selects are cacheable. */
} CACHE_CONFIG;

View File

@ -136,6 +136,46 @@ bool uses_non_cacheable_variable(GWBUF* pPacket)
}
namespace
{
bool is_select_statement(GWBUF* pStmt)
{
bool is_select = false;
char* pSql;
int len;
ss_debug(int rc =) modutil_extract_SQL(pStmt, &pSql, &len);
ss_dassert(rc == 1);
char* pSql_end = pSql + len;
pSql = modutil_MySQL_bypass_whitespace(pSql, len);
const char SELECT[] = "SELECT";
const char* pSelect = SELECT;
const char* pSelect_end = pSelect + sizeof(SELECT) - 1;
while ((pSql < pSql_end) && (pSelect < pSelect_end) && (toupper(*pSql) == *pSelect))
{
++pSql;
++pSelect;
}
if (pSelect == pSelect_end)
{
if ((pSql == pSql_end) || !isalpha(*pSql))
{
is_select = true;
}
}
return is_select;
}
}
CacheFilterSession::CacheFilterSession(MXS_SESSION* pSession, Cache* pCache, char* zDefaultDb)
: maxscale::FilterSession(pSession)
@ -146,7 +186,7 @@ CacheFilterSession::CacheFilterSession(MXS_SESSION* pSession, Cache* pCache, cha
, m_refreshing(false)
, m_is_read_only(true)
{
memset(m_key.data, 0, CACHE_KEY_MAXLEN);
m_key.data = 0;
reset_response_state();
}
@ -772,7 +812,7 @@ bool CacheFilterSession::should_consult_cache(GWBUF* pPacket)
{
bool consult_cache = false;
uint32_t type_mask = qc_get_type_mask(pPacket);
uint32_t type_mask = qc_get_trx_type_mask(pPacket); // Note, only trx-related type mask
const char* zReason = NULL;
@ -781,13 +821,6 @@ bool CacheFilterSession::should_consult_cache(GWBUF* pPacket)
// When a transaction is started, we initially assume it is read-only.
m_is_read_only = true;
}
else if (!qc_query_is_type(type_mask, QUERY_TYPE_READ))
{
// Thereafter, if there's any non-read statement we mark it as non-readonly.
// Note that the state of m_is_read_only is not consulted if there is no
// on-going transaction of if there is an explicitly read-only transaction.
m_is_read_only = false;
}
if (!session_trx_is_active(m_pSession))
{
@ -823,31 +856,42 @@ bool CacheFilterSession::should_consult_cache(GWBUF* pPacket)
if (consult_cache)
{
if (qc_get_operation(pPacket) == QUERY_OP_SELECT)
if (is_select_statement(pPacket))
{
if (qc_query_is_type(type_mask, QUERY_TYPE_USERVAR_READ))
if (m_pCache->config().selects == CACHE_SELECTS_VERIFY_CACHEABLE)
{
consult_cache = false;
zReason = "user variables are read";
}
else if (qc_query_is_type(type_mask, QUERY_TYPE_SYSVAR_READ))
{
consult_cache = false;
zReason = "system variables are read";
}
else if (uses_non_cacheable_function(pPacket))
{
consult_cache = false;
zReason = "uses non-cacheable function";
}
else if (uses_non_cacheable_variable(pPacket))
{
consult_cache = false;
zReason = "uses non-cacheable variable";
// Note that the type mask must be obtained a new. A few lines
// above we only got the transaction state related type mask.
type_mask = qc_get_type_mask(pPacket);
if (qc_query_is_type(type_mask, QUERY_TYPE_USERVAR_READ))
{
consult_cache = false;
zReason = "user variables are read";
}
else if (qc_query_is_type(type_mask, QUERY_TYPE_SYSVAR_READ))
{
consult_cache = false;
zReason = "system variables are read";
}
else if (uses_non_cacheable_function(pPacket))
{
consult_cache = false;
zReason = "uses non-cacheable function";
}
else if (uses_non_cacheable_variable(pPacket))
{
consult_cache = false;
zReason = "uses non-cacheable variable";
}
}
}
else
{
// A bit broad, as e.g. SHOW will cause the read only state to be turned
// off. However, during normal use this will always be an UPDATE, INSERT
// or DELETE.
m_is_read_only = false;
consult_cache = false;
zReason = "statement is not SELECT";
}

View File

@ -50,13 +50,6 @@ bool CacheSimple::Create(const CACHE_CONFIG& config,
return pRules != NULL;
}
cache_result_t CacheSimple::get_key(const char* zDefaultDb,
const GWBUF* pQuery,
CACHE_KEY* pKey) const
{
return m_pStorage->get_key(zDefaultDb, pQuery, pKey);
}
cache_result_t CacheSimple::get_value(const CACHE_KEY& key,
uint32_t flags,
GWBUF** ppValue) const

View File

@ -25,8 +25,6 @@ class CacheSimple : public Cache
public:
~CacheSimple();
cache_result_t get_key(const char* zDefaultDb, const GWBUF* pQuery, CACHE_KEY* pKey) const;
cache_result_t get_value(const CACHE_KEY& key, uint32_t flags, GWBUF** ppValue) const;
cache_result_t put_value(const CACHE_KEY& key, const GWBUF* pValue);

View File

@ -41,13 +41,6 @@ void LRUStorage::get_config(CACHE_STORAGE_CONFIG* pConfig)
*pConfig = m_config;
}
cache_result_t LRUStorage::get_key(const char* zDefault_db,
const GWBUF* pQuery,
CACHE_KEY* pKey) const
{
return m_pStorage->get_key(zDefault_db, pQuery, pKey);
}
cache_result_t LRUStorage::do_get_info(uint32_t what,
json_t** ppInfo) const
{

View File

@ -28,13 +28,6 @@ public:
*/
void get_config(CACHE_STORAGE_CONFIG* pConfig);
/**
* @see Storage::get_key
*/
cache_result_t get_key(const char* zDefault_db,
const GWBUF* pQuery,
CACHE_KEY* pKey) const;
protected:
LRUStorage(const CACHE_STORAGE_CONFIG& config, Storage* pStorage);

View File

@ -19,11 +19,25 @@
#include <maxscale/alloc.h>
#include <maxscale/modutil.h>
#include <maxscale/mysql_utils.h>
#include <maxscale/platform.h>
#include <maxscale/protocol/mysql.h>
#include <maxscale/query_classifier.h>
#include <maxscale/session.h>
#include "cachefilter.h"
static int next_thread_id = 0;
static thread_local int current_thread_id = -1;
inline int get_current_thread_id()
{
if (current_thread_id == -1)
{
current_thread_id = atomic_add(&next_thread_id, 1);
}
return current_thread_id;
}
static const char KEY_ATTRIBUTE[] = "attribute";
static const char KEY_OP[] = "op";
static const char KEY_STORE[] = "store";
@ -68,8 +82,8 @@ static bool cache_rule_attribute_get(struct cache_attribute_mapping *mapping,
static bool cache_rule_op_get(const char *s, cache_rule_op_t *op);
static bool cache_rule_compare(CACHE_RULE *rule, const char *value);
static bool cache_rule_compare_n(CACHE_RULE *rule, const char *value, size_t length);
static bool cache_rule_compare(CACHE_RULE *rule, int thread_id, const char *value);
static bool cache_rule_compare_n(CACHE_RULE *rule, int thread_id, const char *value, size_t length);
static CACHE_RULE *cache_rule_create_regexp(cache_rule_attribute_t attribute,
cache_rule_op_t op,
const char *value,
@ -95,36 +109,42 @@ static CACHE_RULE *cache_rule_create(cache_rule_attribute_t attribute,
const char *value,
uint32_t debug);
static bool cache_rule_matches_column_regexp(CACHE_RULE *rule,
int thread_id,
const char *default_db,
const GWBUF *query);
static bool cache_rule_matches_column_simple(CACHE_RULE *rule,
const char *default_db,
const GWBUF *query);
static bool cache_rule_matches_column(CACHE_RULE *rule,
int thread_id,
const char *default_db,
const GWBUF *query);
static bool cache_rule_matches_database(CACHE_RULE *rule,
int thread_id,
const char *default_db,
const GWBUF *query);
static bool cache_rule_matches_query(CACHE_RULE *rule,
int thread_id,
const char *default_db,
const GWBUF *query);
static bool cache_rule_matches_table(CACHE_RULE *rule,
int thread_id,
const char *default_db,
const GWBUF *query);
static bool cache_rule_matches_table_regexp(CACHE_RULE *rule,
int thread_id,
const char *default_db,
const GWBUF *query);
static bool cache_rule_matches_table_simple(CACHE_RULE *rule,
const char *default_db,
const GWBUF *query);
static bool cache_rule_matches_user(CACHE_RULE *rule, const char *user);
static bool cache_rule_matches_user(CACHE_RULE *rule, int thread_id, const char *user);
static bool cache_rule_matches(CACHE_RULE *rule,
int thread_id,
const char *default_db,
const GWBUF *query);
static void cache_rule_free(CACHE_RULE *rule);
static bool cache_rule_matches(CACHE_RULE *rule, const char *default_db, const GWBUF *query);
static void cache_rules_add_store_rule(CACHE_RULES* self, CACHE_RULE* rule);
static void cache_rules_add_use_rule(CACHE_RULES* self, CACHE_RULE* rule);
@ -138,6 +158,9 @@ static bool cache_rules_parse_array(CACHE_RULES *self, json_t *store, const char
static bool cache_rules_parse_store_element(CACHE_RULES *self, json_t *object, size_t index);
static bool cache_rules_parse_use_element(CACHE_RULES *self, json_t *object, size_t index);
static pcre2_match_data** alloc_match_datas(int count, pcre2_code* code);
static void free_match_datas(int count, pcre2_match_data** datas);
/*
* API begin
*/
@ -297,7 +320,7 @@ void cache_rules_print(const CACHE_RULES *self, DCB *dcb, size_t indent)
}
}
bool cache_rules_should_store(CACHE_RULES *self, const char *default_db, const GWBUF* query)
bool cache_rules_should_store(CACHE_RULES *self, int thread_id, const char *default_db, const GWBUF* query)
{
bool should_store = false;
@ -307,7 +330,7 @@ bool cache_rules_should_store(CACHE_RULES *self, const char *default_db, const G
{
while (rule && !should_store)
{
should_store = cache_rule_matches(rule, default_db, query);
should_store = cache_rule_matches(rule, thread_id, default_db, query);
rule = rule->next;
}
}
@ -319,7 +342,7 @@ bool cache_rules_should_store(CACHE_RULES *self, const char *default_db, const G
return should_store;
}
bool cache_rules_should_use(CACHE_RULES *self, const MXS_SESSION *session)
bool cache_rules_should_use(CACHE_RULES *self, int thread_id, const MXS_SESSION *session)
{
bool should_use = false;
@ -344,7 +367,7 @@ bool cache_rules_should_use(CACHE_RULES *self, const MXS_SESSION *session)
while (rule && !should_use)
{
should_use = cache_rule_matches_user(rule, account);
should_use = cache_rule_matches_user(rule, thread_id, account);
rule = rule->next;
}
}
@ -404,12 +427,12 @@ const json_t* CacheRules::json() const
bool CacheRules::should_store(const char* zDefault_db, const GWBUF* pQuery) const
{
return cache_rules_should_store(m_pRules, zDefault_db, pQuery);
return cache_rules_should_store(m_pRules, get_current_thread_id(), zDefault_db, pQuery);
}
bool CacheRules::should_use(const MXS_SESSION* pSession) const
{
return cache_rules_should_use(m_pRules, pSession);
return cache_rules_should_use(m_pRules, get_current_thread_id(), pSession);
}
/*
@ -507,9 +530,15 @@ static CACHE_RULE *cache_rule_create_regexp(cache_rule_attribute_t attribute,
if (code)
{
pcre2_match_data *data = pcre2_match_data_create_from_pattern(code, NULL);
// We do not care about the result. If JIT is not present, we have
// complained about it already.
pcre2_jit_compile(code, PCRE2_JIT_COMPLETE);
if (data)
int n_threads = config_threadcount();
pcre2_match_data **datas = alloc_match_datas(n_threads, code);
if (datas)
{
rule = (CACHE_RULE*)MXS_CALLOC(1, sizeof(CACHE_RULE));
char* value = MXS_STRDUP(cvalue);
@ -520,14 +549,14 @@ static CACHE_RULE *cache_rule_create_regexp(cache_rule_attribute_t attribute,
rule->op = op;
rule->value = value;
rule->regexp.code = code;
rule->regexp.data = data;
rule->regexp.datas = datas;
rule->debug = debug;
}
else
{
MXS_FREE(value);
MXS_FREE(rule);
pcre2_match_data_free(data);
free_match_datas(n_threads, datas);
pcre2_code_free(code);
}
}
@ -967,7 +996,7 @@ static void cache_rule_free(CACHE_RULE* rule)
}
else if ((rule->op == CACHE_OP_LIKE) || (rule->op == CACHE_OP_UNLIKE))
{
pcre2_match_data_free(rule->regexp.data);
free_match_datas(config_threadcount(), rule->regexp.datas);
pcre2_code_free(rule->regexp.code);
}
@ -978,18 +1007,19 @@ static void cache_rule_free(CACHE_RULE* rule)
/**
* Check whether a value matches a rule.
*
* @param self The rule object.
* @param value The value to check.
* @param self The rule object.
* @param thread_id The thread id of the calling thread.
* @param value The value to check.
*
* @return True if the value matches, false otherwise.
*/
static bool cache_rule_compare(CACHE_RULE *self, const char *value)
static bool cache_rule_compare(CACHE_RULE *self, int thread_id, const char *value)
{
bool rv;
if (value)
{
rv = cache_rule_compare_n(self, value, strlen(value));
rv = cache_rule_compare_n(self, thread_id, value, strlen(value));
}
else
{
@ -1009,13 +1039,14 @@ static bool cache_rule_compare(CACHE_RULE *self, const char *value)
/**
* Check whether a value matches a rule.
*
* @param self The rule object.
* @param value The value to check.
* @param len The length of value.
* @param self The rule object.
* @param thread_id The thread id of the calling thread.
* @param value The value to check.
* @param len The length of value.
*
* @return True if the value matches, false otherwise.
*/
static bool cache_rule_compare_n(CACHE_RULE *self, const char *value, size_t length)
static bool cache_rule_compare_n(CACHE_RULE *self, int thread_id, const char *value, size_t length)
{
bool compares = false;
@ -1028,9 +1059,10 @@ static bool cache_rule_compare_n(CACHE_RULE *self, const char *value, size_t len
case CACHE_OP_LIKE:
case CACHE_OP_UNLIKE:
ss_dassert((thread_id >= 0) && (thread_id < config_threadcount()));
compares = (pcre2_match(self->regexp.code,
(PCRE2_SPTR)value, length,
0, 0, self->regexp.data, NULL) >= 0);
0, 0, self->regexp.datas[thread_id], NULL) >= 0);
break;
default:
@ -1049,12 +1081,16 @@ static bool cache_rule_compare_n(CACHE_RULE *self, const char *value, size_t len
* Returns boolean indicating whether the column rule matches the query or not.
*
* @param self The CACHE_RULE object.
* @param thread_id The thread id of current thread.
* @param default_db The current default db.
* @param query The query.
*
* @return True, if the rule matches, false otherwise.
*/
static bool cache_rule_matches_column_regexp(CACHE_RULE *self, const char *default_db, const GWBUF *query)
static bool cache_rule_matches_column_regexp(CACHE_RULE *self,
int thread_id,
const char *default_db,
const GWBUF *query)
{
ss_dassert(self->attribute == CACHE_ATTRIBUTE_COLUMN);
ss_dassert((self->op == CACHE_OP_LIKE) || (self->op == CACHE_OP_UNLIKE));
@ -1154,7 +1190,7 @@ static bool cache_rule_matches_column_regexp(CACHE_RULE *self, const char *defau
strcat(buffer, info->column);
matches = cache_rule_compare(self, buffer);
matches = cache_rule_compare(self, thread_id, buffer);
}
++i;
@ -1346,12 +1382,16 @@ static bool cache_rule_matches_column_simple(CACHE_RULE *self, const char *defau
* Returns boolean indicating whether the column rule matches the query or not.
*
* @param self The CACHE_RULE object.
* @param thread_id The thread id of current thread.
* @param default_db The current default db.
* @param query The query.
*
* @return True, if the rule matches, false otherwise.
*/
static bool cache_rule_matches_column(CACHE_RULE *self, const char *default_db, const GWBUF *query)
static bool cache_rule_matches_column(CACHE_RULE *self,
int thread_id,
const char *default_db,
const GWBUF *query)
{
ss_dassert(self->attribute == CACHE_ATTRIBUTE_COLUMN);
@ -1366,7 +1406,7 @@ static bool cache_rule_matches_column(CACHE_RULE *self, const char *default_db,
case CACHE_OP_LIKE:
case CACHE_OP_UNLIKE:
matches = cache_rule_matches_column_regexp(self, default_db, query);
matches = cache_rule_matches_column_regexp(self, thread_id, default_db, query);
break;
default:
@ -1380,12 +1420,16 @@ static bool cache_rule_matches_column(CACHE_RULE *self, const char *default_db,
* Returns boolean indicating whether the database rule matches the query or not.
*
* @param self The CACHE_RULE object.
* @param thread_id The thread id of current thread.
* @param default_db The current default db.
* @param query The query.
*
* @return True, if the rule matches, false otherwise.
*/
static bool cache_rule_matches_database(CACHE_RULE *self, const char *default_db, const GWBUF *query)
static bool cache_rule_matches_database(CACHE_RULE *self,
int thread_id,
const char *default_db,
const GWBUF *query)
{
ss_dassert(self->attribute == CACHE_ATTRIBUTE_DATABASE);
@ -1415,7 +1459,7 @@ static bool cache_rule_matches_database(CACHE_RULE *self, const char *default_db
database = default_db;
}
matches = cache_rule_compare(self, database);
matches = cache_rule_compare(self, thread_id, database);
MXS_FREE(name);
++i;
@ -1435,13 +1479,17 @@ static bool cache_rule_matches_database(CACHE_RULE *self, const char *default_db
/**
* Returns boolean indicating whether the query rule matches the query or not.
*
* @param self The CACHE_RULE object.
* @param default_db The current default db.
* @param query The query.
* @param self The CACHE_RULE object.
* @param thread_id The thread id of the calling thread.
* @param default_db The current default db.
* @param query The query.
*
* @return True, if the rule matches, false otherwise.
*/
static bool cache_rule_matches_query(CACHE_RULE *self, const char *default_db, const GWBUF *query)
static bool cache_rule_matches_query(CACHE_RULE *self,
int thread_id,
const char *default_db,
const GWBUF *query)
{
ss_dassert(self->attribute == CACHE_ATTRIBUTE_QUERY);
@ -1451,19 +1499,23 @@ static bool cache_rule_matches_query(CACHE_RULE *self, const char *default_db, c
// Will succeed, query contains a contiguous COM_QUERY.
modutil_extract_SQL((GWBUF*)query, &sql, &len);
return cache_rule_compare_n(self, sql, len);
return cache_rule_compare_n(self, thread_id, sql, len);
}
/**
* Returns boolean indicating whether the table regexp rule matches the query or not.
*
* @param self The CACHE_RULE object.
* @param thread_id The thread id of current thread.
* @param default_db The current default db.
* @param query The query.
*
* @return True, if the rule matches, false otherwise.
*/
static bool cache_rule_matches_table_regexp(CACHE_RULE *self, const char *default_db, const GWBUF *query)
static bool cache_rule_matches_table_regexp(CACHE_RULE *self,
int thread_id,
const char *default_db,
const GWBUF *query)
{
ss_dassert(self->attribute == CACHE_ATTRIBUTE_TABLE);
ss_dassert((self->op == CACHE_OP_LIKE) || (self->op == CACHE_OP_UNLIKE));
@ -1499,11 +1551,11 @@ static bool cache_rule_matches_table_regexp(CACHE_RULE *self, const char *defaul
strcpy(name + default_db_len, ".");
strcpy(name + default_db_len + 1, name);
matches = cache_rule_compare(self, name);
matches = cache_rule_compare(self, thread_id, name);
}
else
{
matches = cache_rule_compare(self, name);
matches = cache_rule_compare(self, thread_id, name);
}
MXS_FREE(names[i]);
@ -1511,7 +1563,7 @@ static bool cache_rule_matches_table_regexp(CACHE_RULE *self, const char *defaul
else
{
// A qualified name "db.tbl".
matches = cache_rule_compare(self, name);
matches = cache_rule_compare(self, thread_id, name);
}
++i;
@ -1626,12 +1678,16 @@ static bool cache_rule_matches_table_simple(CACHE_RULE *self, const char *defaul
* Returns boolean indicating whether the table rule matches the query or not.
*
* @param self The CACHE_RULE object.
* @param thread_id The thread id of current thread.
* @param default_db The current default db.
* @param query The query.
*
* @return True, if the rule matches, false otherwise.
*/
static bool cache_rule_matches_table(CACHE_RULE *self, const char *default_db, const GWBUF *query)
static bool cache_rule_matches_table(CACHE_RULE *self,
int thread_id,
const char *default_db,
const GWBUF *query)
{
ss_dassert(self->attribute == CACHE_ATTRIBUTE_TABLE);
@ -1646,7 +1702,7 @@ static bool cache_rule_matches_table(CACHE_RULE *self, const char *default_db, c
case CACHE_OP_LIKE:
case CACHE_OP_UNLIKE:
matches = cache_rule_matches_table_regexp(self, default_db, query);
matches = cache_rule_matches_table_regexp(self, thread_id, default_db, query);
break;
default:
@ -1659,16 +1715,17 @@ static bool cache_rule_matches_table(CACHE_RULE *self, const char *default_db, c
/**
* Returns boolean indicating whether the user rule matches the account or not.
*
* @param self The CACHE_RULE object.
* @param account The account.
* @param self The CACHE_RULE object.
* @param thread_id The thread id of current thread.
* @param account The account.
*
* @return True, if the rule matches, false otherwise.
*/
static bool cache_rule_matches_user(CACHE_RULE *self, const char *account)
static bool cache_rule_matches_user(CACHE_RULE *self, int thread_id, const char *account)
{
ss_dassert(self->attribute == CACHE_ATTRIBUTE_USER);
bool matches = cache_rule_compare(self, account);
bool matches = cache_rule_compare(self, thread_id, account);
if ((matches && (self->debug & CACHE_DEBUG_MATCHING)) ||
(!matches && (self->debug & CACHE_DEBUG_NON_MATCHING)))
@ -1698,31 +1755,32 @@ static bool cache_rule_matches_user(CACHE_RULE *self, const char *account)
* Returns boolean indicating whether the rule matches the query or not.
*
* @param self The CACHE_RULE object.
* @param thread_id The thread id of the calling thread.
* @param default_db The current default db.
* @param query The query.
*
* @return True, if the rule matches, false otherwise.
*/
static bool cache_rule_matches(CACHE_RULE *self, const char *default_db, const GWBUF *query)
static bool cache_rule_matches(CACHE_RULE *self, int thread_id, const char *default_db, const GWBUF *query)
{
bool matches = false;
switch (self->attribute)
{
case CACHE_ATTRIBUTE_COLUMN:
matches = cache_rule_matches_column(self, default_db, query);
matches = cache_rule_matches_column(self, thread_id, default_db, query);
break;
case CACHE_ATTRIBUTE_DATABASE:
matches = cache_rule_matches_database(self, default_db, query);
matches = cache_rule_matches_database(self, thread_id, default_db, query);
break;
case CACHE_ATTRIBUTE_TABLE:
matches = cache_rule_matches_table(self, default_db, query);
matches = cache_rule_matches_table(self, thread_id, default_db, query);
break;
case CACHE_ATTRIBUTE_QUERY:
matches = cache_rule_matches_query(self, default_db, query);
matches = cache_rule_matches_query(self, thread_id, default_db, query);
break;
case CACHE_ATTRIBUTE_USER:
@ -1884,6 +1942,10 @@ static bool cache_rules_parse_json(CACHE_RULES *self, json_t *root)
MXS_ERROR("The cache rules object contains a `%s` key, but it is not an array.", KEY_USE);
}
}
else
{
parsed = true;
}
}
return parsed;
@ -2029,3 +2091,59 @@ static bool cache_rules_parse_use_element(CACHE_RULES *self, json_t *object, siz
return rule != NULL;
}
/**
* Allocates array of pcre2 match datas
*
* @param count How many match datas should be allocated.
* @param code The pattern to be used.
*
* @return Array of specified length, or NULL.
*/
static pcre2_match_data** alloc_match_datas(int count, pcre2_code* code)
{
pcre2_match_data** datas = (pcre2_match_data**)MXS_CALLOC(count, sizeof(pcre2_match_data*));
if (datas)
{
int i;
for (i = 0; i < count; ++i)
{
datas[i] = pcre2_match_data_create_from_pattern(code, NULL);
if (!datas[i])
{
break;
}
}
if (i != count)
{
for (; i >= 0; --i)
{
pcre2_match_data_free(datas[i]);
}
MXS_FREE(datas);
datas = NULL;
}
}
return datas;
}
/**
* Frees array of pcre2 match datas
*
* @param count The length of the array.
* @param datas The array of pcre2 match datas.
*/
static void free_match_datas(int count, pcre2_match_data** datas)
{
for (int i = 0; i < count; ++i)
{
pcre2_match_data_free(datas[i]);
}
MXS_FREE(datas);
}

View File

@ -54,8 +54,8 @@ typedef struct cache_rule
} simple; // Details, only for CACHE_OP_[EQ|NEQ]
struct
{
pcre2_code *code;
pcre2_match_data *data;
pcre2_code *code;
pcre2_match_data **datas;
} regexp; // Regexp data, only for CACHE_OP_[LIKE|UNLIKE].
uint32_t debug; // The debug level.
struct cache_rule *next;
@ -137,22 +137,24 @@ void cache_rules_print(const CACHE_RULES *rules, DCB* dcb, size_t indent);
* Returns boolean indicating whether the result of the query should be stored.
*
* @param rules The CACHE_RULES object.
* @param thread_id The thread id of current thread.
* @param default_db The current default database, NULL if there is none.
* @param query The query, expected to contain a COM_QUERY.
*
* @return True, if the results should be stored.
*/
bool cache_rules_should_store(CACHE_RULES *rules, const char *default_db, const GWBUF* query);
bool cache_rules_should_store(CACHE_RULES *rules, int thread_id, const char *default_db, const GWBUF* query);
/**
* Returns boolean indicating whether the cache should be used, that is consulted.
*
* @param rules The CACHE_RULES object.
* @param session The current session.
* @param rules The CACHE_RULES object.
* @param thread_id The thread id of current thread.
* @param session The current session.
*
* @return True, if the cache should be used.
*/
bool cache_rules_should_use(CACHE_RULES *rules, const MXS_SESSION *session);
bool cache_rules_should_use(CACHE_RULES *rules, int thread_id, const MXS_SESSION *session);
MXS_END_DECLS

View File

@ -44,19 +44,6 @@ public:
*/
virtual cache_result_t get_info(uint32_t what, json_t** ppInfo) const = 0;
/**
* Create a key for a GWBUF.
*
* @param zDefaultDb The default DB or NULL.
* @param query An SQL query. Must be one contiguous buffer.
* @param pKey Pointer to object where key will be stored.
*
* @return CACHE_RESULT_OK if a key was created, otherwise some error code.
*/
virtual cache_result_t get_key(const char* zDefaultDb,
const GWBUF* pQuery,
CACHE_KEY* pKey) const = 0;
/**
* Get a value from the cache.
*

View File

@ -13,10 +13,6 @@
#define MXS_MODULE_NAME "storage_inmemory"
#include "inmemorystorage.hh"
#include <openssl/sha.h>
#include <algorithm>
#include <memory>
#include <set>
#include <maxscale/alloc.h>
#include <maxscale/modutil.h>
#include <maxscale/query_classifier.h>
@ -24,7 +20,6 @@
#include "inmemorystoragemt.hh"
using std::auto_ptr;
using std::set;
using std::string;
@ -96,67 +91,6 @@ InMemoryStorage* InMemoryStorage::Create_instance(const char* zName,
return sStorage.release();
}
cache_result_t InMemoryStorage::Get_key(const char* zDefault_db, const GWBUF& query, CACHE_KEY* pKey)
{
ss_dassert(GWBUF_IS_CONTIGUOUS(&query));
int n;
bool fullnames = true;
char** pzTables = qc_get_table_names(const_cast<GWBUF*>(&query), &n, fullnames);
set<string> dbs; // Elements in set are sorted.
for (int i = 0; i < n; ++i)
{
char *zTable = pzTables[i];
char *zDot = strchr(zTable, '.');
if (zDot)
{
*zDot = 0;
dbs.insert(zTable);
}
else if (zDefault_db)
{
// If zdefault_db is NULL, then there will be a table for which we
// do not know the database. However, that will fail in the server,
// so nothing will be stored.
dbs.insert(zDefault_db);
}
MXS_FREE(zTable);
}
MXS_FREE(pzTables);
// dbs now contain each accessed database in sorted order. Now copy them to a single string.
string tag;
for (set<string>::const_iterator i = dbs.begin(); i != dbs.end(); ++i)
{
tag.append(*i);
}
memset(pKey->data, 0, CACHE_KEY_MAXLEN);
const unsigned char* pData;
// We store the databases in the first half of the key. That will ensure that
// identical queries targeting different default databases will not clash.
// This will also mean that entries related to the same databases will
// be placed near each other.
pData = reinterpret_cast<const unsigned char*>(tag.data());
SHA512(pData, tag.length(), reinterpret_cast<unsigned char*>(pKey->data));
char *pSql;
int length;
modutil_extract_SQL(const_cast<GWBUF*>(&query), &pSql, &length);
// Then we store the query itself in the second half of the key.
pData = reinterpret_cast<const unsigned char*>(pSql);
SHA512(pData, length, reinterpret_cast<unsigned char*>(pKey->data) + SHA512_DIGEST_LENGTH);
return CACHE_RESULT_OK;
}
void InMemoryStorage::get_config(CACHE_STORAGE_CONFIG* pConfig)
{
*pConfig = m_config;

View File

@ -30,8 +30,6 @@ public:
const CACHE_STORAGE_CONFIG& config,
int argc, char* argv[]);
static cache_result_t Get_key(const char* zDefault_db, const GWBUF& query, CACHE_KEY* pKey);
void get_config(CACHE_STORAGE_CONFIG* pConfig);
virtual cache_result_t get_info(uint32_t what, json_t** ppInfo) const = 0;
virtual cache_result_t get_value(const CACHE_KEY& key, uint32_t flags, GWBUF** ppResult) = 0;

View File

@ -13,12 +13,10 @@
#define MXS_MODULE_NAME "storage_rocksdb"
#include "rocksdbstorage.hh"
#include <openssl/sha.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <fts.h>
#include <algorithm>
#include <set>
#include <rocksdb/env.h>
#include <rocksdb/statistics.h>
#include <maxscale/alloc.h>
@ -28,7 +26,6 @@
#include "rocksdbinternals.hh"
using std::for_each;
using std::set;
using std::string;
using std::unique_ptr;
@ -36,12 +33,6 @@ using std::unique_ptr;
namespace
{
const size_t ROCKSDB_KEY_LENGTH = 2 * SHA512_DIGEST_LENGTH;
#if ROCKSDB_KEY_LENGTH > CACHE_KEY_MAXLEN
#error storage_rocksdb key is too long.
#endif
// See https://github.com/facebook/rocksdb/wiki/Basic-Operations#thread-pools
// These figures should perhaps depend upon the number of cache instances.
const size_t ROCKSDB_N_LOW_THREADS = 2;
@ -314,67 +305,6 @@ RocksDBStorage* RocksDBStorage::Create(const char* zName,
return sStorage.release();
}
cache_result_t RocksDBStorage::Get_key(const char* zDefault_db, const GWBUF& query, CACHE_KEY* pKey)
{
ss_dassert(GWBUF_IS_CONTIGUOUS(&query));
int n;
bool fullnames = true;
char** pzTables = qc_get_table_names(const_cast<GWBUF*>(&query), &n, fullnames);
set<string> dbs; // Elements in set are sorted.
for (int i = 0; i < n; ++i)
{
char *zTable = pzTables[i];
char *zDot = strchr(zTable, '.');
if (zDot)
{
*zDot = 0;
dbs.insert(zTable);
}
else if (zDefault_db)
{
// If zDefaultDB is NULL, then there will be a table for which we
// do not know the database. However, that will fail in the server,
// so nothing will be stored.
dbs.insert(zDefault_db);
}
MXS_FREE(zTable);
}
MXS_FREE(pzTables);
// dbs now contain each accessed database in sorted order. Now copy them to a single string.
string tag;
for_each(dbs.begin(), dbs.end(), [&tag](const string & db)
{
tag.append(db);
});
memset(pKey->data, 0, CACHE_KEY_MAXLEN);
const unsigned char* pData;
// We store the databases in the first half of the key. That will ensure that
// identical queries targeting different default databases will not clash.
// This will also mean that entries related to the same databases will
// be placed near each other.
pData = reinterpret_cast<const unsigned char*>(tag.data());
SHA512(pData, tag.length(), reinterpret_cast<unsigned char*>(pKey->data));
char *pSql;
int length;
modutil_extract_SQL(const_cast<GWBUF*>(&query), &pSql, &length);
// Then we store the query itself in the second half of the key.
pData = reinterpret_cast<const unsigned char*>(pSql);
SHA512(pData, length, reinterpret_cast<unsigned char*>(pKey->data) + SHA512_DIGEST_LENGTH);
return CACHE_RESULT_OK;
}
void RocksDBStorage::get_config(CACHE_STORAGE_CONFIG* pConfig)
{
*pConfig = m_config;
@ -410,7 +340,7 @@ cache_result_t RocksDBStorage::get_value(const CACHE_KEY& key, uint32_t flags, G
{
// Use the root DB so that we get the value *with* the timestamp at the end.
rocksdb::DB* pDb = m_sDb->GetRootDB();
rocksdb::Slice rocksdb_key(key.data, ROCKSDB_KEY_LENGTH);
rocksdb::Slice rocksdb_key(reinterpret_cast<const char*>(&key.data), sizeof(key.data));
string value;
rocksdb::Status status = pDb->Get(rocksdb::ReadOptions(), rocksdb_key, &value);
@ -497,7 +427,7 @@ cache_result_t RocksDBStorage::put_value(const CACHE_KEY& key, const GWBUF& valu
{
ss_dassert(GWBUF_IS_CONTIGUOUS(&value));
rocksdb::Slice rocksdb_key(key.data, ROCKSDB_KEY_LENGTH);
rocksdb::Slice rocksdb_key(reinterpret_cast<const char*>(&key.data), sizeof(key.data));
rocksdb::Slice rocksdb_value((char*)GWBUF_DATA(&value), GWBUF_LENGTH(&value));
rocksdb::Status status = m_sDb->Put(Write_options(), rocksdb_key, rocksdb_value);
@ -507,7 +437,7 @@ cache_result_t RocksDBStorage::put_value(const CACHE_KEY& key, const GWBUF& valu
cache_result_t RocksDBStorage::del_value(const CACHE_KEY& key)
{
rocksdb::Slice rocksdb_key(key.data, ROCKSDB_KEY_LENGTH);
rocksdb::Slice rocksdb_key(reinterpret_cast<const char*>(&key.data), sizeof(key.data));
rocksdb::Status status = m_sDb->Delete(Write_options(), rocksdb_key);

View File

@ -30,8 +30,6 @@ public:
int argc, char* argv[]);
~RocksDBStorage();
static cache_result_t Get_key(const char* zDefault_db, const GWBUF& query, CACHE_KEY* pKey);
void get_config(CACHE_STORAGE_CONFIG* pConfig);
cache_result_t get_info(uint32_t flags, json_t** ppInfo) const;
cache_result_t get_value(const CACHE_KEY& key, uint32_t flags, GWBUF** ppResult);

View File

@ -37,21 +37,6 @@ public:
return reinterpret_cast<CACHE_STORAGE*>(pStorage);
}
static cache_result_t getKey(const char* zDefault_db,
const GWBUF* pQuery,
CACHE_KEY* pKey)
{
// zdefault_db may be NULL.
ss_dassert(pQuery);
ss_dassert(pKey);
cache_result_t result = CACHE_RESULT_ERROR;
MXS_EXCEPTION_GUARD(result = StorageType::Get_key(zDefault_db, *pQuery, pKey));
return result;
}
static void freeInstance(CACHE_STORAGE* pInstance)
{
MXS_EXCEPTION_GUARD(delete reinterpret_cast<StorageType*>(pInstance));
@ -196,7 +181,6 @@ CACHE_STORAGE_API StorageModule<StorageType>::s_api =
{
&StorageModule<StorageType>::initialize,
&StorageModule<StorageType>::createInstance,
&StorageModule<StorageType>::getKey,
&StorageModule<StorageType>::freeInstance,
&StorageModule<StorageType>::getConfig,
&StorageModule<StorageType>::getInfo,

View File

@ -223,11 +223,3 @@ Storage* StorageFactory::createRawStorage(const char* zName,
return pStorage;
}
cache_result_t StorageFactory::get_key(const char* zDefaultDb,
const GWBUF* pQuery,
CACHE_KEY* pKey) const
{
return m_pApi->getKey(zDefaultDb, pQuery, pKey);
}

View File

@ -85,19 +85,6 @@ public:
const CACHE_STORAGE_CONFIG& config,
int argc = 0, char* argv[] = NULL);
/**
* Create a key for a GWBUF.
*
* @param zDefaultDb The default DB or NULL.
* @param query An SQL query. Must be one contiguous buffer.
* @param pKey Pointer to object where key will be stored.
*
* @return CACHE_RESULT_OK if a key was created, otherwise some error code.
*/
cache_result_t get_key(const char* zDefaultDb,
const GWBUF* pQuery,
CACHE_KEY* pKey) const;
private:
StorageFactory(void* handle, CACHE_STORAGE_API* pApi, uint32_t capabilities);

View File

@ -38,13 +38,6 @@ cache_result_t StorageReal::get_info(uint32_t flags, json_t** ppInfo) const
return m_pApi->getInfo(m_pStorage, flags, ppInfo);
}
cache_result_t StorageReal::get_key(const char* zDefaultDb,
const GWBUF* pQuery,
CACHE_KEY* pKey) const
{
return m_pApi->getKey(zDefaultDb, pQuery, pKey);
}
cache_result_t StorageReal::get_value(const CACHE_KEY& key,
uint32_t flags,
GWBUF** ppValue) const

View File

@ -25,10 +25,6 @@ public:
cache_result_t get_info(uint32_t flags,
json_t** ppInfo) const;
cache_result_t get_key(const char* zDefaultDb,
const GWBUF* pQuery,
CACHE_KEY* pKey) const;
cache_result_t get_value(const CACHE_KEY& key,
uint32_t flags,
GWBUF** ppValue) const;

View File

@ -15,6 +15,7 @@
#include <algorithm>
#include <iostream>
#include <set>
#include "cache.hh"
#include "storagefactory.hh"
// TODO: Move this to a common place.
#include "../../../../../query_classifier/test/testreader.hh"
@ -245,7 +246,7 @@ bool Tester::get_cache_items(const Statements& statements,
if (pQuery)
{
CACHE_KEY key;
cache_result_t result = factory.get_key(NULL, pQuery, &key);
cache_result_t result = Cache::get_default_key(NULL, pQuery, &key);
if (result == CACHE_RESULT_OK)
{

View File

@ -178,7 +178,7 @@ int TesterStorage::run(size_t n_threads,
CacheKey key;
sprintf(key.data, "%lu", i);
key.data = i;
vector<uint8_t> value(size, static_cast<uint8_t>(i));

View File

@ -18,6 +18,7 @@
#include <maxscale/query_classifier.h>
#include <maxscale/log_manager.h>
#include "storagefactory.hh"
#include "cache.hh"
#include "cache_storage_api.hh"
#include "tester.hh"
@ -60,7 +61,7 @@ int test(StorageFactory& factory, istream& in)
if (pQuery)
{
CACHE_KEY key;
cache_result_t result = factory.get_key(NULL, pQuery, &key);
cache_result_t result = Cache::get_default_key(NULL, pQuery, &key);
if (result == CACHE_RESULT_OK)
{
@ -127,7 +128,7 @@ int main(int argc, char* argv[])
{
if (mxs_log_init(NULL, ".", MXS_LOG_TARGET_DEFAULT))
{
if (qc_setup(NULL, NULL) && qc_process_init())
if (qc_setup(NULL, NULL) && qc_process_init(QC_INIT_BOTH))
{
const char* zModule = argv[1];
@ -158,7 +159,7 @@ int main(int argc, char* argv[])
cerr << "error: Could not initialize factory." << endl;
}
qc_process_end();
qc_process_end(QC_INIT_BOTH);
}
else
{

View File

@ -195,7 +195,7 @@ int test_store()
GWBUF *packet = create_gwbuf(test_case->query);
bool matches = cache_rules_should_store(rules, test_case->default_db, packet);
bool matches = cache_rules_should_store(rules, 0, test_case->default_db, packet);
if (matches != test_case->matches)
{
@ -237,12 +237,12 @@ int main()
if (mxs_log_init(NULL, ".", MXS_LOG_TARGET_DEFAULT))
{
set_libdir(MXS_STRDUP_A("../../../../../query_classifier/qc_sqlite/"));
if (qc_setup("qc_sqlite", "") && qc_process_init())
if (qc_setup("qc_sqlite", "") && qc_process_init(QC_INIT_BOTH))
{
set_libdir(MXS_STRDUP_A("../"));
rc = test();
qc_process_end();
qc_process_end(QC_INIT_BOTH);
}
else
{

View File

@ -50,7 +50,7 @@ int TestStorage::run(int argc, char** argv)
{
if (mxs_log_init(NULL, ".", MXS_LOG_TARGET_DEFAULT))
{
if (qc_setup(NULL, NULL) && qc_process_init())
if (qc_setup(NULL, NULL) && qc_process_init(QC_INIT_BOTH))
{
const char* zModule = NULL;
size_t threads = m_threads;
@ -113,6 +113,8 @@ int TestStorage::run(int argc, char** argv)
{
cerr << "error: Could not initialize factory " << zModule << "." << endl;
}
qc_process_end(QC_INIT_BOTH);
}
else
{

View File

@ -2006,7 +2006,7 @@ bool rule_matches(FW_INSTANCE* my_instance,
if (is_sql)
{
qc_parse_result_t parse_result = qc_parse(queue);
qc_parse_result_t parse_result = qc_parse(queue, QC_COLLECT_ALL);
if (parse_result == QC_QUERY_INVALID)
{

View File

@ -109,7 +109,6 @@ class AccountRegexp : public MaskingRules::Rule::Account
public:
~AccountRegexp()
{
pcre2_match_data_free(m_pData);
pcre2_code_free(m_pCode);
}
@ -126,24 +125,10 @@ public:
{
Closer<pcre2_code*> code(pCode);
pcre2_match_data* pData = pcre2_match_data_create_from_pattern(pCode, NULL);
sAccount = shared_ptr<AccountRegexp>(new AccountRegexp(user, host, pCode));
if (pData)
{
Closer<pcre2_match_data*> data(pData);
sAccount = shared_ptr<AccountRegexp>(new AccountRegexp(user, host, pCode, pData));
// Ownership of pCode and pData has been moved to the
// AccountRegexp instance.
data.release();
code.release();
}
else
{
MXS_ERROR("PCRE2 match data creation failed. Most likely due to a "
"lack of available memory.");
}
// Ownership of pCode has been moved to the AccountRegexp object.
code.release();
}
else
{
@ -171,20 +156,31 @@ public:
ss_dassert(zUser);
ss_dassert(zHost);
return
(m_user.empty() || (m_user == zUser)) &&
pcre2_match(m_pCode, (PCRE2_SPTR)zHost, 0, 0, 0, m_pData, NULL) >= 0;
bool rv = (m_user.empty() || (m_user == zUser));
if (rv)
{
ss_dassert(m_pCode);
pcre2_match_data* pData = pcre2_match_data_create_from_pattern(m_pCode, NULL);
if (pData)
{
Closer<pcre2_match_data*> data(pData);
rv = (pcre2_match(m_pCode, (PCRE2_SPTR)zHost, 0, 0, 0, pData, NULL) >= 0);
}
}
return rv;
}
private:
AccountRegexp(const string& user,
const string& host,
pcre2_code* pCode,
pcre2_match_data* pData)
pcre2_code* pCode)
: m_user(user)
, m_host(host)
, m_pCode(pCode)
, m_pData(pData)
{
}
@ -192,10 +188,9 @@ private:
AccountRegexp& operator = (const AccountRegexp&);
private:
string m_user;
string m_host;
pcre2_code* m_pCode;
pcre2_match_data* m_pData;
string m_user;
string m_host;
pcre2_code* m_pCode;
};
/**

Some files were not shown because too many files have changed in this diff Show More