Merge branch '2.2' into develop
This commit is contained in:
@ -991,30 +991,12 @@ GRANT SELECT ON mysql.roles_mapping TO 'maxscale'@'maxscalehost';
|
|||||||
GRANT SHOW DATABASES ON *.* TO 'maxscale'@'maxscalehost';
|
GRANT SHOW DATABASES ON *.* TO 'maxscale'@'maxscalehost';
|
||||||
```
|
```
|
||||||
|
|
||||||
MariaDB MaxScale will execute the following query to retrieve the users. If you
|
See [MaxScale Troubleshooting](https://mariadb.com/kb/en/mariadb-enterprise/maxscale-troubleshooting/)
|
||||||
suspect that you might have problems with grants, it is recommended to run this
|
for more information on how to troubleshoot authentication related problems.
|
||||||
query and see the results it returns.
|
|
||||||
|
|
||||||
```
|
**Note:** Due to a bug in MariaDB 10.2.9, if you see a
|
||||||
SELECT DISTINCT
|
`SELECT command denied to user ... for table 'users'`
|
||||||
user.user AS user,
|
error, grant `SELECT ON mysql.*` to this user.
|
||||||
user.host AS host,
|
|
||||||
user.password AS password,
|
|
||||||
concat(user.user,user.host,user.password,
|
|
||||||
IF((user.Select_priv+0)||find_in_set('Select',Coalesce(tp.Table_priv,0)),'Y','N') ,
|
|
||||||
COALESCE( db.db,tp.db, '')) AS userdata,
|
|
||||||
user.Select_priv AS anydb,
|
|
||||||
COALESCE( db.db,tp.db, NULL) AS db
|
|
||||||
FROM
|
|
||||||
mysql.user LEFT JOIN
|
|
||||||
mysql.db ON user.user=db.user AND user.host=db.host LEFT JOIN
|
|
||||||
mysql.tables_priv tp ON user.user=tp.user AND user.host=tp.host
|
|
||||||
WHERE user.user IS NOT NULL AND user.user <> ''
|
|
||||||
```
|
|
||||||
|
|
||||||
In versions of MySQL 5.7.6 and later, the `Password` column was replaced by
|
|
||||||
`authentication_string`. Change `user.password` above with
|
|
||||||
`user.authentication_string`.
|
|
||||||
|
|
||||||
<a id="passwd"></a>
|
<a id="passwd"></a>
|
||||||
#### `password`
|
#### `password`
|
||||||
|
@ -4,12 +4,6 @@ This tutorial is a short introduction to the
|
|||||||
[Avrorouter](../Routers/Avrorouter.md), how to set it up and how it interacts
|
[Avrorouter](../Routers/Avrorouter.md), how to set it up and how it interacts
|
||||||
with the binlogrouter.
|
with the binlogrouter.
|
||||||
|
|
||||||
The avrorouter can also be deployed directly on the master server which removes
|
|
||||||
the need to use the binlogrouter. This does require a lot more disk space on the
|
|
||||||
master server as both the binlogs and the Avro format files are stored there. It
|
|
||||||
is recommended to deploy the avrorouter and the binlogrouter on a remove server
|
|
||||||
so that the data streaming process has a minimal effect on performance.
|
|
||||||
|
|
||||||
The first part configures the services and sets them up for the binary log to Avro
|
The first part configures the services and sets them up for the binary log to Avro
|
||||||
file conversion. The second part of this tutorial uses the client listener
|
file conversion. The second part of this tutorial uses the client listener
|
||||||
interface for the avrorouter and shows how to communicate with the the service
|
interface for the avrorouter and shows how to communicate with the the service
|
||||||
@ -22,9 +16,9 @@ over the network.
|
|||||||
## Preparing the master server
|
## Preparing the master server
|
||||||
|
|
||||||
The master server where we will be replicating from needs to have binary logging
|
The master server where we will be replicating from needs to have binary logging
|
||||||
enabled, the binary log format set to row based replication and the binary log
|
enabled, `binlog_format` set to `row` and `binlog_row_image` set to
|
||||||
row image needs to contain all the changed. These can be enabled by adding the
|
`full`. These can be enabled by adding the two following lines to the _my.cnf_
|
||||||
two following lines to the _my.cnf_ file of the master.
|
file of the master.
|
||||||
|
|
||||||
```
|
```
|
||||||
binlog_format=row
|
binlog_format=row
|
||||||
@ -57,6 +51,8 @@ passwd=maxpwd
|
|||||||
type=service
|
type=service
|
||||||
router=avrorouter
|
router=avrorouter
|
||||||
source=replication-service
|
source=replication-service
|
||||||
|
filestem=binlog
|
||||||
|
start_index=15
|
||||||
|
|
||||||
# The listener for the replication-service
|
# The listener for the replication-service
|
||||||
[replication-listener]
|
[replication-listener]
|
||||||
@ -84,16 +80,19 @@ protocol=maxscaled
|
|||||||
socket=default
|
socket=default
|
||||||
```
|
```
|
||||||
|
|
||||||
You can see that the `source` parameter in the _avro-service_ points to the
|
The `source` parameter in the _avro-service_ points to the _replication-service_
|
||||||
_replication-service_ we defined before. This service will be the data source
|
we defined before. This service will be the data source for the avrorouter. The
|
||||||
for the avrorouter. The _filestem_ is the prefix in the binlog files. For more
|
_filestem_ is the prefix in the binlog files and _start_index_ is the binlog
|
||||||
information on the avrorouter options, read the
|
number to start from. With these parameters, the avrorouter will start reading
|
||||||
[Avrorouter Documentation](../Routers/Avrorouter.md).
|
events from binlog `binlog.000015`.
|
||||||
|
|
||||||
After the services were defined, we added the listeners for the
|
Note that the _filestem_ and _start_index_ must point to the file that is the
|
||||||
_replication-service_ and the _avro-service_. The _CDC_ protocol is a new
|
first binlog that the binlogrouter will replicate. For example, if the first
|
||||||
protocol added with the avrorouter and it is the only supported protocol for the
|
file you are replicating is `my-binlog-file.001234`, set the parameters to
|
||||||
avrorouter.
|
`filestem=my-binlog-file` and `start_index=1234`.
|
||||||
|
|
||||||
|
For more information on the avrorouter options, read the [Avrorouter
|
||||||
|
Documentation](../Routers/Avrorouter.md).
|
||||||
|
|
||||||
# Preparing the data in the master server
|
# Preparing the data in the master server
|
||||||
|
|
||||||
@ -104,53 +103,23 @@ binary logs before the conversion process is started.
|
|||||||
|
|
||||||
If the binary logs contain data modification events for tables that aren't
|
If the binary logs contain data modification events for tables that aren't
|
||||||
created in the binary logs, the Avro schema of the table needs to be manually
|
created in the binary logs, the Avro schema of the table needs to be manually
|
||||||
created. There are two ways to do this:
|
created. There are multiple ways to do this:
|
||||||
|
|
||||||
|
- Dump the database to a slave, configure it to replicate from the master and
|
||||||
|
point MaxScale to this slave (this is the recommended method as it requires no
|
||||||
|
extra steps)
|
||||||
|
|
||||||
|
- Use the [_cdc_schema_ Go utility](../Routers/Avrorouter.md#avro-schema-generator)
|
||||||
|
and copy the generated .avsc files to the _avrodir_
|
||||||
|
|
||||||
- Manually create the schema
|
|
||||||
- Use the [_cdc_schema_ Go utilty](../Routers/Avrorouter.md#avro-schema-generator)
|
|
||||||
- Use the [Python version of the schema generator](../../server/modules/protocol/examples/cdc_schema.py)
|
- Use the [Python version of the schema generator](../../server/modules/protocol/examples/cdc_schema.py)
|
||||||
|
and copy the generated .avsc files to the _avrodir_
|
||||||
|
|
||||||
All Avro file schemas follow the same general idea. They are in JSON and follow
|
If you used the schema generator scripts, all Avro schema files for tables that
|
||||||
the following format:
|
are not created in the binary logs need to be in the location pointed to by the
|
||||||
|
_avrodir_ parameter. The files use the following naming:
|
||||||
```
|
`<database>.<table>.<schema_version>.avsc`. For example, the schema file name of
|
||||||
{
|
the _test.t1_ table would be `test.t1.0000001.avsc`.
|
||||||
"namespace": "MaxScaleChangeDataSchema.avro",
|
|
||||||
"type": "record",
|
|
||||||
"name": "ChangeRecord",
|
|
||||||
"fields":
|
|
||||||
[
|
|
||||||
{
|
|
||||||
"name": "name",
|
|
||||||
"type": "string",
|
|
||||||
"real_type": "varchar",
|
|
||||||
"length": 200
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name":"address",
|
|
||||||
"type":"string",
|
|
||||||
"real_type": "varchar",
|
|
||||||
"length": 200
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name":"age",
|
|
||||||
"type":"int",
|
|
||||||
"real_type": "int",
|
|
||||||
"length": -1
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
The avrorouter uses the schema file to identify the columns, their names and
|
|
||||||
what type they are. The _name_ field contains the name of the column and the
|
|
||||||
_type_ contains the Avro type. Read the [Avro specification](https://avro.apache.org/docs/1.8.1/spec.html)
|
|
||||||
for details on the layout of the schema files.
|
|
||||||
|
|
||||||
All Avro schema files for tables that are not created in the binary logs need to
|
|
||||||
be in the location pointed by the _avrodir_ router_option and must use the
|
|
||||||
following naming: `<database>.<table>.<schema_version>.avsc`. For example, the
|
|
||||||
schema file name of the _test.t1_ table would be `test.t1.0000001.avsc`.
|
|
||||||
|
|
||||||
# Starting MariaDB MaxScale
|
# Starting MariaDB MaxScale
|
||||||
|
|
||||||
@ -161,7 +130,7 @@ executing a few commands.
|
|||||||
```
|
```
|
||||||
CHANGE MASTER TO MASTER_HOST='172.18.0.1',
|
CHANGE MASTER TO MASTER_HOST='172.18.0.1',
|
||||||
MASTER_PORT=3000,
|
MASTER_PORT=3000,
|
||||||
MASTER_LOG_FILE='binlog.000001',
|
MASTER_LOG_FILE='binlog.000015',
|
||||||
MASTER_LOG_POS=4,
|
MASTER_LOG_POS=4,
|
||||||
MASTER_USER='maxuser',
|
MASTER_USER='maxuser',
|
||||||
MASTER_PASSWORD='maxpwd';
|
MASTER_PASSWORD='maxpwd';
|
||||||
@ -169,30 +138,30 @@ CHANGE MASTER TO MASTER_HOST='172.18.0.1',
|
|||||||
START SLAVE;
|
START SLAVE;
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**NOTE:** GTID replication is not currently supported and file-and-position
|
||||||
|
replication must be used.
|
||||||
|
|
||||||
This will start the replication of binary logs from the master server at
|
This will start the replication of binary logs from the master server at
|
||||||
172.18.0.1:3000. For more details about the details of the commands, refer
|
172.18.0.1 listening on port 3000. The first file that the binlogrouter
|
||||||
to the [Binlogrouter](../Routers/Binlogrouter.md) documentation.
|
replicates is `binlog.000015`. This is the same file that was configured as the
|
||||||
|
starting file in the avrorouter.
|
||||||
|
|
||||||
|
For more details about the SQL commands, refer to the
|
||||||
|
[Binlogrouter](../Routers/Binlogrouter.md) documentation.
|
||||||
|
|
||||||
After the binary log streaming has started, the avrorouter will automatically
|
After the binary log streaming has started, the avrorouter will automatically
|
||||||
start converting the binlogs into Avro files.
|
start processing the binlogs.
|
||||||
|
|
||||||
For the purpose of this tutorial, create a simple test table using the following
|
# Creating and Processing Data
|
||||||
statement and populated it with some data.
|
|
||||||
|
Next, create a simple test table and populated it with some data by executing
|
||||||
|
the following statements.
|
||||||
|
|
||||||
```
|
```
|
||||||
CREATE TABLE test.t1 (id INT);
|
CREATE TABLE test.t1 (id INT);
|
||||||
INSERT INTO test.t1 VALUES (1), (2), (3), (4), (5), (6), (7), (8), (9), (10);
|
INSERT INTO test.t1 VALUES (1), (2), (3), (4), (5), (6), (7), (8), (9), (10);
|
||||||
```
|
```
|
||||||
|
|
||||||
This table will be replicated through MaxScale and it will be converted into an
|
|
||||||
Avro file, which you can inspect by using the _maxavrocheck_ utility program.
|
|
||||||
|
|
||||||
```
|
|
||||||
[markusjm@localhost avrodata]$ ../bin/maxavrocheck test.t1.000001.avro
|
|
||||||
File sync marker: caaed7778bbe58e701eec1f96d7719a
|
|
||||||
/home/markusjm/build/avrodata/test.t1.000001.avro: 1 blocks, 1 records and 12 bytes
|
|
||||||
```
|
|
||||||
|
|
||||||
To use the _cdc.py_ command line client to connect to the CDC service, we must first
|
To use the _cdc.py_ command line client to connect to the CDC service, we must first
|
||||||
create a user. This can be done via maxadmin by executing the following command.
|
create a user. This can be done via maxadmin by executing the following command.
|
||||||
|
|
||||||
@ -201,8 +170,29 @@ maxadmin call command cdc add_user avro-service maxuser maxpwd
|
|||||||
```
|
```
|
||||||
|
|
||||||
This will create the _maxuser:maxpwd_ credentials which can then be used to
|
This will create the _maxuser:maxpwd_ credentials which can then be used to
|
||||||
request a data stream of the `test.t1` table that was created earlier.
|
request a JSON data stream of the `test.t1` table that was created earlier.
|
||||||
|
|
||||||
```
|
```
|
||||||
cdc.py -u maxuser -p maxpwd -h 127.0.0.1 -P 4001 test.t1
|
cdc.py -u maxuser -p maxpwd -h 127.0.0.1 -P 4001 test.t1
|
||||||
```
|
```
|
||||||
|
|
||||||
|
The output is a stream of JSON events describing the changes done to the
|
||||||
|
database.
|
||||||
|
|
||||||
|
```
|
||||||
|
{"namespace": "MaxScaleChangeDataSchema.avro", "type": "record", "name": "ChangeRecord", "fields": [{"name": "domain", "type": "int"}, {"name": "server_id", "type": "int"}, {"name": "sequence", "type": "int"}, {"name": "event_number", "type": "int"}, {"name": "timestamp", "type": "int"}, {"name": "event_type", "type": {"type": "enum", "name": "EVENT_TYPES", "symbols": ["insert", "update_before", "update_after", "delete"]}}, {"name": "id", "type": "int", "real_type": "int", "length": -1}]}
|
||||||
|
{"domain": 0, "server_id": 3000, "sequence": 11, "event_number": 1, "timestamp": 1537429419, "event_type": "insert", "id": 1}
|
||||||
|
{"domain": 0, "server_id": 3000, "sequence": 11, "event_number": 2, "timestamp": 1537429419, "event_type": "insert", "id": 2}
|
||||||
|
{"domain": 0, "server_id": 3000, "sequence": 11, "event_number": 3, "timestamp": 1537429419, "event_type": "insert", "id": 3}
|
||||||
|
{"domain": 0, "server_id": 3000, "sequence": 11, "event_number": 4, "timestamp": 1537429419, "event_type": "insert", "id": 4}
|
||||||
|
{"domain": 0, "server_id": 3000, "sequence": 11, "event_number": 5, "timestamp": 1537429419, "event_type": "insert", "id": 5}
|
||||||
|
{"domain": 0, "server_id": 3000, "sequence": 11, "event_number": 6, "timestamp": 1537429419, "event_type": "insert", "id": 6}
|
||||||
|
{"domain": 0, "server_id": 3000, "sequence": 11, "event_number": 7, "timestamp": 1537429419, "event_type": "insert", "id": 7}
|
||||||
|
{"domain": 0, "server_id": 3000, "sequence": 11, "event_number": 8, "timestamp": 1537429419, "event_type": "insert", "id": 8}
|
||||||
|
{"domain": 0, "server_id": 3000, "sequence": 11, "event_number": 9, "timestamp": 1537429419, "event_type": "insert", "id": 9}
|
||||||
|
{"domain": 0, "server_id": 3000, "sequence": 11, "event_number": 10, "timestamp": 1537429419, "event_type": "insert", "id": 10}
|
||||||
|
```
|
||||||
|
|
||||||
|
The first record is always the JSON format schema for the table describing the
|
||||||
|
types and names of the fields. All records that follow it represent the changes
|
||||||
|
that have happened on the database.
|
||||||
|
@ -28,6 +28,7 @@
|
|||||||
#include <maxscale/mysql_utils.h>
|
#include <maxscale/mysql_utils.h>
|
||||||
#include <maxscale/paths.h>
|
#include <maxscale/paths.h>
|
||||||
#include <maxscale/protocol/mysql.h>
|
#include <maxscale/protocol/mysql.h>
|
||||||
|
#include <maxscale/pcre2.h>
|
||||||
#include <maxscale/router.h>
|
#include <maxscale/router.h>
|
||||||
#include <maxscale/secrets.h>
|
#include <maxscale/secrets.h>
|
||||||
#include <maxscale/service.h>
|
#include <maxscale/service.h>
|
||||||
@ -929,6 +930,35 @@ static bool roles_are_available(MYSQL* conn, SERVICE* service, SERVER* server)
|
|||||||
return rval;
|
return rval;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void report_mdev13453_problem(MYSQL* con, SERVER* server)
|
||||||
|
{
|
||||||
|
if (server->version >= 100200 && server->version < 100211
|
||||||
|
&& mxs_pcre2_simple_match("SELECT command denied to user .* for table 'users'",
|
||||||
|
mysql_error(con), 0, NULL) == MXS_PCRE2_MATCH)
|
||||||
|
{
|
||||||
|
char user[256] = "<failed to query user>"; // Enough for all user-hostname combinations
|
||||||
|
const char* quoted_user = "select concat(\"'\", user, \"'@'\", host, \"'\") as user "
|
||||||
|
"from mysql.user "
|
||||||
|
"where concat(user, \"@\", host) = current_user()";
|
||||||
|
MYSQL_RES* res;
|
||||||
|
|
||||||
|
if (mxs_mysql_query(con, quoted_user) == 0 && (res = mysql_store_result(con)))
|
||||||
|
{
|
||||||
|
MYSQL_ROW row = mysql_fetch_row(res);
|
||||||
|
|
||||||
|
if (row && row[0])
|
||||||
|
{
|
||||||
|
snprintf(user, sizeof(user), "%s", row[0]);
|
||||||
|
}
|
||||||
|
|
||||||
|
mysql_free_result(res);
|
||||||
|
}
|
||||||
|
|
||||||
|
MXS_ERROR("Due to MDEV-13453, the service user requires extra grants on the `mysql` database. "
|
||||||
|
"To fix the problem, add the following grant: GRANT SELECT ON `mysql`.* TO %s", user);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
int get_users_from_server(MYSQL* con, SERVER_REF* server_ref, SERVICE* service, SERV_LISTENER* listener)
|
int get_users_from_server(MYSQL* con, SERVER_REF* server_ref, SERVICE* service, SERV_LISTENER* listener)
|
||||||
{
|
{
|
||||||
if (server_ref->server->version_string[0] == 0)
|
if (server_ref->server->version_string[0] == 0)
|
||||||
@ -982,7 +1012,9 @@ int get_users_from_server(MYSQL* con, SERVER_REF* server_ref, SERVICE* service,
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
MXS_ERROR("Failed to load users: %s", mysql_error(con));
|
MXS_ERROR("Failed to load users from server '%s': %s", server_ref->server->name,
|
||||||
|
mysql_error(con));
|
||||||
|
report_mdev13453_problem(con, server_ref->server);
|
||||||
}
|
}
|
||||||
|
|
||||||
MXS_FREE(query);
|
MXS_FREE(query);
|
||||||
|
@ -878,22 +878,16 @@ static int gw_read_and_write(DCB* dcb)
|
|||||||
proto->collect_result = false;
|
proto->collect_result = false;
|
||||||
result_collected = true;
|
result_collected = true;
|
||||||
}
|
}
|
||||||
else if (expecting_ps_response(proto))
|
else if (expecting_ps_response(proto)
|
||||||
{
|
&& mxs_mysql_is_prep_stmt_ok(read_buffer)
|
||||||
if (mxs_mysql_is_prep_stmt_ok(read_buffer)
|
|
||||||
&& !complete_ps_response(read_buffer))
|
&& !complete_ps_response(read_buffer))
|
||||||
{
|
{
|
||||||
dcb_readq_prepend(dcb, read_buffer);
|
dcb_readq_prepend(dcb, read_buffer);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Collected the complete result
|
|
||||||
proto->collect_result = false;
|
|
||||||
result_collected = true;
|
|
||||||
}
|
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
// Assume that everything else responds with an single packet
|
// Collected the complete result
|
||||||
proto->collect_result = false;
|
proto->collect_result = false;
|
||||||
result_collected = true;
|
result_collected = true;
|
||||||
}
|
}
|
||||||
|
@ -406,7 +406,11 @@ bool RWSplitSession::route_session_write(GWBUF* querybuf, uint8_t command, uint3
|
|||||||
bool expecting_response = mxs_mysql_command_will_respond(command);
|
bool expecting_response = mxs_mysql_command_will_respond(command);
|
||||||
int nsucc = 0;
|
int nsucc = 0;
|
||||||
uint64_t lowest_pos = id;
|
uint64_t lowest_pos = id;
|
||||||
|
|
||||||
|
if (expecting_response)
|
||||||
|
{
|
||||||
gwbuf_set_type(querybuf, GWBUF_TYPE_COLLECT_RESULT);
|
gwbuf_set_type(querybuf, GWBUF_TYPE_COLLECT_RESULT);
|
||||||
|
}
|
||||||
|
|
||||||
if (qc_query_is_type(type, QUERY_TYPE_PREPARE_NAMED_STMT)
|
if (qc_query_is_type(type, QUERY_TYPE_PREPARE_NAMED_STMT)
|
||||||
|| qc_query_is_type(type, QUERY_TYPE_PREPARE_STMT))
|
|| qc_query_is_type(type, QUERY_TYPE_PREPARE_STMT))
|
||||||
|
Reference in New Issue
Block a user