From 8fc74e7b3737936d1a03591cb31aa34c0ab9fb3a Mon Sep 17 00:00:00 2001 From: Johan Wikman Date: Wed, 18 Jan 2017 12:42:46 +0200 Subject: [PATCH 01/18] Update version for 2.0.4 --- VERSION.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION.cmake b/VERSION.cmake index 1679f4800..0062fcaca 100644 --- a/VERSION.cmake +++ b/VERSION.cmake @@ -5,7 +5,7 @@ set(MAXSCALE_VERSION_MAJOR "2" CACHE STRING "Major version") set(MAXSCALE_VERSION_MINOR "0" CACHE STRING "Minor version") -set(MAXSCALE_VERSION_PATCH "3" CACHE STRING "Patch version") +set(MAXSCALE_VERSION_PATCH "4" CACHE STRING "Patch version") # This should only be incremented if a package is rebuilt set(MAXSCALE_BUILD_NUMBER 1 CACHE STRING "Release number") From 3793f685ac327082f15093322b66c492367e6b15 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Tue, 17 Jan 2017 21:42:22 +0200 Subject: [PATCH 02/18] Store DATETIME correctly in avrorouter DATETIME was stored as a raw year value when it should've been stored as the raw year value minus 1900. --- server/core/mysql_binlog.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/server/core/mysql_binlog.c b/server/core/mysql_binlog.c index 22da763a4..c2c67cd0d 100644 --- a/server/core/mysql_binlog.c +++ b/server/core/mysql_binlog.c @@ -291,7 +291,9 @@ static void unpack_datetime2(uint8_t *ptr, uint8_t decimals, struct tm *dest) dest->tm_hour = time >> 12; dest->tm_mday = date % (1 << 5); dest->tm_mon = yearmonth % 13; - dest->tm_year = yearmonth / 13; + + /** struct tm stores the year as: Year - 1900 */ + dest->tm_year = (yearmonth / 13) - 1900; } /** Unpack a "reverse" byte order value */ From fed61fde98ad3f05ad0238e46453e88180f3671b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Thu, 19 Jan 2017 08:56:17 +0200 Subject: [PATCH 03/18] Disable prepared statement execution in dbfwfilter The dbfwfilter doesn't parse prepared statements for all rules which requires that they are disabled. --- server/modules/filter/dbfwfilter/dbfwfilter.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/server/modules/filter/dbfwfilter/dbfwfilter.c b/server/modules/filter/dbfwfilter/dbfwfilter.c index 9e4ce22b8..13da00544 100644 --- a/server/modules/filter/dbfwfilter/dbfwfilter.c +++ b/server/modules/filter/dbfwfilter/dbfwfilter.c @@ -2193,6 +2193,13 @@ routeQuery(FILTER *instance, void *session, GWBUF *queue) int rval = 0; ss_dassert(dcb && dcb->session); + uint32_t type = 0; + + if (modutil_is_SQL(queue) || modutil_is_SQL_prepare(queue)) + { + type = qc_get_type(queue); + } + if (modutil_is_SQL(queue) && modutil_count_statements(queue) > 1) { GWBUF* err = gen_dummy_error(my_session, "This filter does not support " @@ -2202,6 +2209,17 @@ routeQuery(FILTER *instance, void *session, GWBUF *queue) my_session->errmsg = NULL; rval = dcb->func.write(dcb, err); } + else if (QUERY_IS_TYPE(type, QUERY_TYPE_PREPARE_STMT) || + QUERY_IS_TYPE(type, QUERY_TYPE_PREPARE_NAMED_STMT) || + modutil_is_SQL_prepare(queue)) + { + GWBUF* err = gen_dummy_error(my_session, "This filter does not support " + "prepared statements."); + gwbuf_free(queue); + free(my_session->errmsg); + my_session->errmsg = NULL; + rval = dcb->func.write(dcb, err); + } else { USER *user = find_user_data(my_instance->htable, dcb->user, dcb->remote); From 98c2d3715680a9c7f7ca00ab85d1503e2a0770f5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Thu, 19 Jan 2017 13:10:46 +0200 Subject: [PATCH 04/18] MXS-1080: Document max_slave_replication_lag behavior The max_slave_replication_lag parameter for readwritesplit only works for monitors that detect replication lag. As the MySQL monitor is the only one that implements this functionality, the parameter only has meaning when used with master-slave clusters. --- Documentation/Routers/ReadWriteSplit.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Documentation/Routers/ReadWriteSplit.md b/Documentation/Routers/ReadWriteSplit.md index b45f0b4b0..1f36d9296 100644 --- a/Documentation/Routers/ReadWriteSplit.md +++ b/Documentation/Routers/ReadWriteSplit.md @@ -32,6 +32,9 @@ This feature is disabled by default. This applies to Master/Slave replication with MySQL monitor and `detect_replication_lag=1` options set. Please note max_slave_replication_lag must be greater than monitor interval. +This option only affects Master-Slave cluster. Galera clusters do not have a +concept of slave lag even if the application of write sets might have lag. + ### `use_sql_variables_in` From c2b03100f06896821de6bd7958e8091972ae1c61 Mon Sep 17 00:00:00 2001 From: Johan Wikman Date: Wed, 25 Jan 2017 15:23:33 +0200 Subject: [PATCH 05/18] Update changelog and release notes of 2.0.4 --- Documentation/Changelog.md | 1 + .../MaxScale-2.0.4-Release-Notes.md | 49 +++++++++++++++++++ 2 files changed, 50 insertions(+) create mode 100644 Documentation/Release-Notes/MaxScale-2.0.4-Release-Notes.md diff --git a/Documentation/Changelog.md b/Documentation/Changelog.md index 5cd7b8fa2..a1452dc96 100644 --- a/Documentation/Changelog.md +++ b/Documentation/Changelog.md @@ -11,6 +11,7 @@ as JSON objects (beta level functionality). For more details, please refer to: +* [MariaDB MaxScale 2.0.4 Release Notes](Release-Notes/MaxScale-2.0.4-Release-Notes.md) * [MariaDB MaxScale 2.0.3 Release Notes](Release-Notes/MaxScale-2.0.3-Release-Notes.md) * [MariaDB MaxScale 2.0.2 Release Notes](Release-Notes/MaxScale-2.0.2-Release-Notes.md) * [MariaDB MaxScale 2.0.1 Release Notes](Release-Notes/MaxScale-2.0.1-Release-Notes.md) diff --git a/Documentation/Release-Notes/MaxScale-2.0.4-Release-Notes.md b/Documentation/Release-Notes/MaxScale-2.0.4-Release-Notes.md new file mode 100644 index 000000000..9246e421a --- /dev/null +++ b/Documentation/Release-Notes/MaxScale-2.0.4-Release-Notes.md @@ -0,0 +1,49 @@ +# MariaDB MaxScale 2.0.4 Release Notes + +Release 2.0.4 is a GA release. + +This document describes the changes in release 2.0.4, when compared to +release [2.0.3](MaxScale-2.0.3-Release-Notes.md). + +If you are upgrading from release 1.4, please also read the release +notes of release [2.0.3](./MaxScale-2.0.3-Release-Notes.md), +release [2.0.2](./MaxScale-2.0.2-Release-Notes.md), +release [2.0.1](./MaxScale-2.0.1-Release-Notes.md) and +[2.0.0](./MaxScale-2.0.0-Release-Notes.md). + +For any problems you encounter, please submit a bug report at +[Jira](https://jira.mariadb.org). + +## Changed Features + +- The dbfwfilter now rejects all prepared statements instead of ignoring + them. This affects _wildcard_, _columns_, _on_queries_ and _no_where_clause_ + type rules which previously ignored prepared statements. + +## Bug fixes + +[Here](https://jira.mariadb.org/issues/?jql=project%20%3D%20MXS%20AND%20issuetype%20%3D%20Bug%20AND%20status%20%3D%20Closed%20AND%20fixVersion%20%3D%202.0.4) +is a list of bugs fixed since the release of MaxScale 2.0.3. + +* [MXS-1082](https://jira.mariadb.org/browse/MXS-1082): Block prepared statements +* [MXS-1080](https://jira.mariadb.org/browse/MXS-1080): Readwritesplit (documentation of max_slave_replication_lag) + +## Known Issues and Limitations + +There are some limitations and known issues within this version of MaxScale. +For more information, please refer to the [Limitations](../About/Limitations.md) document. + +## Packaging + +RPM and Debian packages are provided for the Linux distributions supported +by MariaDB Enterprise. + +Packages can be downloaded [here](https://mariadb.com/resources/downloads). + +## Source Code + +The source code of MaxScale is tagged at GitHub with a tag, which is derived +from the version of MaxScale. For instance, the tag of version `X.Y.Z` of MaxScale +is `maxscale-X.Y.Z`. + +The source code is available [here](https://github.com/mariadb-corporation/MaxScale). From 84040be182a8f1f6f1c54b27e10de61230f45423 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Thu, 26 Jan 2017 10:09:48 +0200 Subject: [PATCH 06/18] Add missing error handling to Avro file handling Some of the JSON errors weren't handled which could cause problems when a malformed schema definition is read. Also added more error messages for situations when opening of the files fails. --- avro/maxavro_schema.c | 36 ++++++++---- server/modules/routing/avro/avro_client.c | 70 ++++++++++++----------- server/modules/routing/avro/avro_index.c | 5 ++ 3 files changed, 68 insertions(+), 43 deletions(-) diff --git a/avro/maxavro_schema.c b/avro/maxavro_schema.c index d26ea75e6..257274696 100644 --- a/avro/maxavro_schema.c +++ b/avro/maxavro_schema.c @@ -126,21 +126,35 @@ MAXAVRO_SCHEMA* maxavro_schema_alloc(const char* json) if (schema) { json_t *field_arr = NULL; - json_unpack(schema, "{s:o}", "fields", &field_arr); - size_t arr_size = json_array_size(field_arr); - rval->fields = malloc(sizeof(MAXAVRO_SCHEMA_FIELD) * arr_size); - rval->num_fields = arr_size; - for (int i = 0; i < arr_size; i++) + if (json_unpack(schema, "{s:o}", "fields", &field_arr) == 0) { - json_t *object = json_array_get(field_arr, i); - char *key; - json_t *value_obj; + size_t arr_size = json_array_size(field_arr); + rval->fields = malloc(sizeof(MAXAVRO_SCHEMA_FIELD) * arr_size); + rval->num_fields = arr_size; - json_unpack(object, "{s:s s:o}", "name", &key, "type", &value_obj); - rval->fields[i].name = strdup(key); - rval->fields[i].type = unpack_to_type(value_obj, &rval->fields[i]); + for (int i = 0; i < arr_size; i++) + { + json_t *object = json_array_get(field_arr, i); + char *key; + json_t *value_obj; + + if (json_unpack(object, "{s:s s:o}", "name", &key, "type", &value_obj) == 0) + { + rval->fields[i].name = strdup(key); + rval->fields[i].type = unpack_to_type(value_obj, &rval->fields[i]); + } + else + { + MXS_ERROR("Failed to unpack JSON Object \"name\": %s", json); + } + } } + else + { + MXS_ERROR("Failed to unpack JSON Object \"fields\": %s", json); + } + json_decref(schema); } diff --git a/server/modules/routing/avro/avro_client.c b/server/modules/routing/avro/avro_client.c index e7873f953..90cd9e268 100644 --- a/server/modules/routing/avro/avro_client.c +++ b/server/modules/routing/avro/avro_client.c @@ -775,48 +775,54 @@ static bool avro_client_stream_data(AVRO_CLIENT *client) char filename[PATH_MAX + 1]; snprintf(filename, PATH_MAX, "%s/%s", router->avrodir, client->avro_binfile); + bool ok = true; + spinlock_acquire(&client->file_lock); - if (client->file_handle == NULL) + if (client->file_handle == NULL && + (client->file_handle = maxavro_file_open(filename)) == NULL) { - client->file_handle = maxavro_file_open(filename); + ok = false; } spinlock_release(&client->file_lock); - switch (client->format) + if (ok) { - case AVRO_FORMAT_JSON: - /** Currently only JSON format supports seeking to a GTID */ - if (client->requested_gtid && - seek_to_index_pos(client, client->file_handle) && - seek_to_gtid(client, client->file_handle)) - { - client->requested_gtid = false; - } + switch (client->format) + { + case AVRO_FORMAT_JSON: + /** Currently only JSON format supports seeking to a GTID */ + if (client->requested_gtid && + seek_to_index_pos(client, client->file_handle) && + seek_to_gtid(client, client->file_handle)) + { + client->requested_gtid = false; + } - read_more = stream_json(client); - break; + read_more = stream_json(client); + break; - case AVRO_FORMAT_AVRO: - read_more = stream_binary(client); - break; + case AVRO_FORMAT_AVRO: + read_more = stream_binary(client); + break; - default: - MXS_ERROR("Unexpected format: %d", client->format); - break; + default: + MXS_ERROR("Unexpected format: %d", client->format); + break; + } + + + if (maxavro_get_error(client->file_handle) != MAXAVRO_ERR_NONE) + { + MXS_ERROR("Reading Avro file failed with error '%s'.", + maxavro_get_error_string(client->file_handle)); + } + + /* update client struct */ + memcpy(&client->avro_file, client->file_handle, sizeof(client->avro_file)); + + /* may be just use client->avro_file->records_read and remove this var */ + client->last_sent_pos = client->avro_file.records_read; } - - - if (maxavro_get_error(client->file_handle) != MAXAVRO_ERR_NONE) - { - MXS_ERROR("Reading Avro file failed with error '%s'.", - maxavro_get_error_string(client->file_handle)); - } - - /* update client struct */ - memcpy(&client->avro_file, client->file_handle, sizeof(client->avro_file)); - - /* may be just use client->avro_file->records_read and remove this var */ - client->last_sent_pos = client->avro_file.records_read; } else { diff --git a/server/modules/routing/avro/avro_index.c b/server/modules/routing/avro/avro_index.c index 3696adf14..905c712c4 100644 --- a/server/modules/routing/avro/avro_index.c +++ b/server/modules/routing/avro/avro_index.c @@ -72,6 +72,7 @@ int index_query_cb(void *data, int rows, char** values, char** names) void avro_index_file(AVRO_INSTANCE *router, const char* filename) { MAXAVRO_FILE *file = maxavro_file_open(filename); + if (file) { char *name = strrchr(filename, '/'); @@ -165,6 +166,10 @@ void avro_index_file(AVRO_INSTANCE *router, const char* filename) maxavro_file_close(file); } + else + { + MXS_ERROR("Failed to open file '%s' when generating file index.", filename); + } } /** From 8da655b7cb4e81b7dd8c5cfc475d2d486dfbb612 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Thu, 26 Jan 2017 15:45:41 +0200 Subject: [PATCH 07/18] Improve maxavro failure handling and error messages When the creation of the Avro schema would fail for a file that is being opened, the errors wouldn't be handled properly. Also free all allocated memory on failure. All errors that set errno are now properly logged with the error number and message. --- avro/maxavro_file.c | 125 ++++++++++++++++++++++++++++++++---------- avro/maxavro_schema.c | 20 ++++++- 2 files changed, 115 insertions(+), 30 deletions(-) diff --git a/avro/maxavro_file.c b/avro/maxavro_file.c index 6b8ac80b1..0f77794df 100644 --- a/avro/maxavro_file.c +++ b/avro/maxavro_file.c @@ -16,10 +16,31 @@ #include #include - static bool maxavro_read_sync(FILE *file, uint8_t* sync) { - return fread(sync, 1, SYNC_MARKER_SIZE, file) == SYNC_MARKER_SIZE; + bool rval = true; + + if (fread(sync, 1, SYNC_MARKER_SIZE, file) != SYNC_MARKER_SIZE) + { + rval = false; + + if (ferror(file)) + { + char err[STRERROR_BUFLEN]; + MXS_ERROR("Failed to read file sync marker: %d, %s", errno, + strerror_r(errno, err, sizeof(err))); + } + else if (feof(file)) + { + MXS_ERROR("Short read when reading file sync marker."); + } + else + { + MXS_ERROR("Unspecified error when reading file sync marker."); + } + } + + return rval; } bool maxavro_verify_block(MAXAVRO_FILE *file) @@ -72,12 +93,24 @@ bool maxavro_read_datablock_start(MAXAVRO_FILE* file) if (rval) { - file->block_size = bytes; - file->records_in_block = records; - file->records_read_from_block = 0; - file->data_start_pos = ftell(file->file); - ss_dassert(file->data_start_pos > file->block_start_pos); - file->metadata_read = true; + long pos = ftell(file->file); + + if (pos == -1) + { + rval = false; + char err[STRERROR_BUFLEN]; + MXS_ERROR("Failed to read datablock start: %d, %s", errno, + strerror_r(errno, err, sizeof(err))); + } + else + { + file->block_size = bytes; + file->records_in_block = records; + file->records_read_from_block = 0; + file->data_start_pos = pos; + ss_dassert(file->data_start_pos > file->block_start_pos); + file->metadata_read = true; + } } else if (maxavro_get_error(file) != MAXAVRO_ERR_NONE) { @@ -153,35 +186,47 @@ MAXAVRO_FILE* maxavro_file_open(const char* filename) return NULL; } - MAXAVRO_FILE* avrofile = calloc(1, sizeof(MAXAVRO_FILE)); + bool error = false; - if (avrofile) + MAXAVRO_FILE* avrofile = calloc(1, sizeof(MAXAVRO_FILE)); + char *my_filename = strdup(filename); + char *schema = read_schema(avrofile); + + if (avrofile && my_filename && schema) { avrofile->file = file; - avrofile->filename = strdup(filename); - char *schema = read_schema(avrofile); - avrofile->schema = schema ? maxavro_schema_alloc(schema) : NULL; + avrofile->filename = my_filename; + avrofile->schema = maxavro_schema_alloc(schema); avrofile->last_error = MAXAVRO_ERR_NONE; - if (!schema || !avrofile->schema || - !maxavro_read_sync(file, avrofile->sync) || - !maxavro_read_datablock_start(avrofile)) + if (avrofile->schema && + maxavro_read_sync(file, avrofile->sync) && + maxavro_read_datablock_start(avrofile)) + { + avrofile->header_end_pos = avrofile->block_start_pos; + } + else { MXS_ERROR("Failed to initialize avrofile."); - free(avrofile->schema); - free(avrofile); - avrofile = NULL; + maxavro_schema_free(avrofile->schema); + error = true; } - avrofile->header_end_pos = avrofile->block_start_pos; - free(schema); } else + { + error = true; + } + + if (error) { fclose(file); free(avrofile); + free(my_filename); avrofile = NULL; } + free(schema); + return avrofile; } @@ -248,19 +293,43 @@ void maxavro_file_close(MAXAVRO_FILE *file) GWBUF* maxavro_file_binary_header(MAXAVRO_FILE *file) { long pos = file->header_end_pos; - fseek(file->file, 0, SEEK_SET); - GWBUF *rval = gwbuf_alloc(pos); - if (rval) + GWBUF *rval = NULL; + + if (fseek(file->file, 0, SEEK_SET) == 0) { - if (fread(GWBUF_DATA(rval), 1, pos, file->file) != pos) + if ((rval = gwbuf_alloc(pos))) { - gwbuf_free(rval); - rval = NULL; + if (fread(GWBUF_DATA(rval), 1, pos, file->file) != pos) + { + if (ferror(file->file)) + { + char err[STRERROR_BUFLEN]; + MXS_ERROR("Failed to read binary header: %d, %s", errno, + strerror_r(errno, err, sizeof(err))); + } + else if (feof(file->file)) + { + MXS_ERROR("Short read when reading binary header."); + } + else + { + MXS_ERROR("Unspecified error when reading binary header."); + } + gwbuf_free(rval); + rval = NULL; + } + } + else + { + MXS_ERROR("Memory allocation failed when allocating %ld bytes.", pos); } } else { - MXS_ERROR("Memory allocation failed when allocating %ld bytes.", pos); + char err[STRERROR_BUFLEN]; + MXS_ERROR("Failed to read binary header: %d, %s", errno, + strerror_r(errno, err, sizeof(err))); } + return rval; } diff --git a/avro/maxavro_schema.c b/avro/maxavro_schema.c index 257274696..10cf1d096 100644 --- a/avro/maxavro_schema.c +++ b/avro/maxavro_schema.c @@ -120,6 +120,7 @@ MAXAVRO_SCHEMA* maxavro_schema_alloc(const char* json) if (rval) { + bool error = false; json_error_t err; json_t *schema = json_loads(json, 0, &err); @@ -139,7 +140,7 @@ MAXAVRO_SCHEMA* maxavro_schema_alloc(const char* json) char *key; json_t *value_obj; - if (json_unpack(object, "{s:s s:o}", "name", &key, "type", &value_obj) == 0) + if (object && json_unpack(object, "{s:s s:o}", "name", &key, "type", &value_obj) == 0) { rval->fields[i].name = strdup(key); rval->fields[i].type = unpack_to_type(value_obj, &rval->fields[i]); @@ -147,26 +148,41 @@ MAXAVRO_SCHEMA* maxavro_schema_alloc(const char* json) else { MXS_ERROR("Failed to unpack JSON Object \"name\": %s", json); + error = true; + + for (int j = 0; j < i; j++) + { + free(rval->fields[j].name); + } + break; } } } else { MXS_ERROR("Failed to unpack JSON Object \"fields\": %s", json); + error = true; } - json_decref(schema); } else { MXS_ERROR("Failed to read JSON schema: %s", json); + error = true; + } + + if (error) + { + free(rval); + rval = NULL; } } else { MXS_ERROR("Memory allocation failed."); } + return rval; } From 81b9d51aab460300f87f119337e2dad4d71ac6c6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Fri, 27 Jan 2017 12:55:07 +0200 Subject: [PATCH 08/18] Fix crash on startup The Avro file was initialized in the wrong order and uninitialized values were used. --- avro/maxavro_file.c | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/avro/maxavro_file.c b/avro/maxavro_file.c index 0f77794df..7c5083489 100644 --- a/avro/maxavro_file.c +++ b/avro/maxavro_file.c @@ -190,25 +190,35 @@ MAXAVRO_FILE* maxavro_file_open(const char* filename) MAXAVRO_FILE* avrofile = calloc(1, sizeof(MAXAVRO_FILE)); char *my_filename = strdup(filename); - char *schema = read_schema(avrofile); - if (avrofile && my_filename && schema) + if (avrofile && my_filename) { avrofile->file = file; avrofile->filename = my_filename; - avrofile->schema = maxavro_schema_alloc(schema); avrofile->last_error = MAXAVRO_ERR_NONE; - if (avrofile->schema && - maxavro_read_sync(file, avrofile->sync) && - maxavro_read_datablock_start(avrofile)) + char *schema = read_schema(avrofile); + + if (schema) { - avrofile->header_end_pos = avrofile->block_start_pos; + avrofile->schema = maxavro_schema_alloc(schema); + + if (avrofile->schema && + maxavro_read_sync(file, avrofile->sync) && + maxavro_read_datablock_start(avrofile)) + { + avrofile->header_end_pos = avrofile->block_start_pos; + } + else + { + MXS_ERROR("Failed to initialize avrofile."); + maxavro_schema_free(avrofile->schema); + error = true; + } + free(schema); } else { - MXS_ERROR("Failed to initialize avrofile."); - maxavro_schema_free(avrofile->schema); error = true; } } @@ -225,8 +235,6 @@ MAXAVRO_FILE* maxavro_file_open(const char* filename) avrofile = NULL; } - free(schema); - return avrofile; } From 78545b2c5c4bb0b82a0b70216b0a1532e6132383 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Fri, 27 Jan 2017 13:06:25 +0200 Subject: [PATCH 09/18] Read complete lines with Kafka producer There's no need to process the JSON twice as the Kafka producer is expected to be used with the Python CDC client which already splits the JSON with newlines. --- server/modules/protocol/examples/cdc_kafka_producer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/modules/protocol/examples/cdc_kafka_producer.py b/server/modules/protocol/examples/cdc_kafka_producer.py index 1d9a664db..5e09f0734 100755 --- a/server/modules/protocol/examples/cdc_kafka_producer.py +++ b/server/modules/protocol/examples/cdc_kafka_producer.py @@ -36,7 +36,7 @@ producer = KafkaProducer(bootstrap_servers=[opts.kafka_broker]) while True: try: - buf = sys.stdin.read(128) + buf = sys.stdin.readline() if len(buf) == 0: break From 22698fdf2151b5150a04c479329d62fc680c34f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Fri, 27 Jan 2017 14:19:51 +0200 Subject: [PATCH 10/18] MXS-951: Backport fix to 2.0 Backported the fix for 2.0.4. --- Documentation/Release-Notes/MaxScale-2.0.4-Release-Notes.md | 3 +++ cmake/BuildMariaDBConnector.cmake | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/Documentation/Release-Notes/MaxScale-2.0.4-Release-Notes.md b/Documentation/Release-Notes/MaxScale-2.0.4-Release-Notes.md index 9246e421a..1befb7346 100644 --- a/Documentation/Release-Notes/MaxScale-2.0.4-Release-Notes.md +++ b/Documentation/Release-Notes/MaxScale-2.0.4-Release-Notes.md @@ -20,6 +20,8 @@ For any problems you encounter, please submit a bug report at them. This affects _wildcard_, _columns_, _on_queries_ and _no_where_clause_ type rules which previously ignored prepared statements. +- The MariaDB Connector-C was upgraded to version 2.3.2. + ## Bug fixes [Here](https://jira.mariadb.org/issues/?jql=project%20%3D%20MXS%20AND%20issuetype%20%3D%20Bug%20AND%20status%20%3D%20Closed%20AND%20fixVersion%20%3D%202.0.4) @@ -27,6 +29,7 @@ is a list of bugs fixed since the release of MaxScale 2.0.3. * [MXS-1082](https://jira.mariadb.org/browse/MXS-1082): Block prepared statements * [MXS-1080](https://jira.mariadb.org/browse/MXS-1080): Readwritesplit (documentation of max_slave_replication_lag) +* [MXS-951](https://jira.mariadb.org/browse/MXS-951): Using utf8mb4 on galera hosts stops maxscale connections ## Known Issues and Limitations diff --git a/cmake/BuildMariaDBConnector.cmake b/cmake/BuildMariaDBConnector.cmake index 4b09b499e..b3982f809 100644 --- a/cmake/BuildMariaDBConnector.cmake +++ b/cmake/BuildMariaDBConnector.cmake @@ -10,8 +10,8 @@ include(ExternalProject) set(MARIADB_CONNECTOR_C_REPO "https://github.com/MariaDB/mariadb-connector-c.git" CACHE STRING "MariaDB Connector-C Git repository") -# Release 2.2.3 of the Connector-C -set(MARIADB_CONNECTOR_C_TAG "v2.3.0" +# Release 2.3.2 of the Connector-C +set(MARIADB_CONNECTOR_C_TAG "v2.3.2" CACHE STRING "MariaDB Connector-C Git tag") ExternalProject_Add(connector-c From 122027e4a3ceb714880738b62fe57a7952f3dbad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Mon, 30 Jan 2017 16:46:24 +0200 Subject: [PATCH 11/18] Fix binlogrouter test The test used the wrong working directory. blr_slave.c didn't check the input for NULL values. --- server/modules/routing/binlog/blr_slave.c | 2 +- server/modules/routing/binlog/test/CMakeLists.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/server/modules/routing/binlog/blr_slave.c b/server/modules/routing/binlog/blr_slave.c index 64616c674..3b78e3d01 100644 --- a/server/modules/routing/binlog/blr_slave.c +++ b/server/modules/routing/binlog/blr_slave.c @@ -4540,7 +4540,7 @@ blr_get_parsed_command_value(char *input) char *word; char *value = NULL; - if (strlen(input)) + if (input && strlen(input)) { value = strdup(input); } diff --git a/server/modules/routing/binlog/test/CMakeLists.txt b/server/modules/routing/binlog/test/CMakeLists.txt index bc8514791..d600418eb 100644 --- a/server/modules/routing/binlog/test/CMakeLists.txt +++ b/server/modules/routing/binlog/test/CMakeLists.txt @@ -1,5 +1,5 @@ if(BUILD_TESTS) add_executable(testbinlogrouter testbinlog.c ../blr.c ../blr_slave.c ../blr_master.c ../blr_file.c ../blr_cache.c) target_link_libraries(testbinlogrouter maxscale-common ${PCRE_LINK_FLAGS} uuid) - add_test(TestBinlogRouter ${CMAKE_CURRENT_BINARY_DIR}/testbinlogrouter) + add_test(NAME TestBinlogRouter COMMAND ./testbinlogrouter WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) endif() From fdee32919802a4e4aa0672059a87ca9e8b5b56e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Mon, 30 Jan 2017 18:53:12 +0200 Subject: [PATCH 12/18] MXS-1111: Allow COM_PING and other commands to pass the firewall The firewall filter should allow COM_PING and other similar commands to pass through as they are mainly used to check the status of the backend server or to display statistics. The COM_PROCESS_KILL is the exception as it affects the state of the backend server. This is better controlled with permissions in the server than in the firewall filter. Commands that require special grants aren't allowed to pass as they are mainly for maintenance purposes and these should not be done through the firewall. --- .../Filters/Database-Firewall-Filter.md | 16 +++++++++++++- server/modules/filter/dbfwfilter/dbfwfilter.c | 22 ++++++++++++++++++- 2 files changed, 36 insertions(+), 2 deletions(-) diff --git a/Documentation/Filters/Database-Firewall-Filter.md b/Documentation/Filters/Database-Firewall-Filter.md index b715e1119..7f6e23681 100644 --- a/Documentation/Filters/Database-Firewall-Filter.md +++ b/Documentation/Filters/Database-Firewall-Filter.md @@ -32,7 +32,21 @@ Absolute path to a file with the rule definitions in it. The file should be read #### `action` -This parameter is optional and determines what action is taken when a query matches a rule. The value can be either `allow`, which allows all matching queries to proceed but blocks those that don't match, or `block`, which blocks all matching queries, or `ignore` which allows all queries to proceed. +This parameter is optional and determines what action is taken when a query +matches a rule. The value can be either `allow`, which allows all matching +queries to proceed but blocks those that don't match, or `block`, which blocks +all matching queries, or `ignore` which allows all queries to proceed. + +The following statement types will always be allowed through when `action` is +set to `allow`: + + - COM_QUIT: Client closes connection + - COM_PING: Server is pinged + - COM_CHANGE_USER: The user is changed for an active connection + - COM_SET_OPTION: Client multi-statements are being configured + - COM_FIELD_LIST: Alias for the `SHOW TABLES;` query + - COM_PROCESS_KILL: Alias for `KILL ;` query + - COM_PROCESS_INFO: Alias for `SHOW PROCESSLIST;` You can have both blacklist and whitelist functionality by configuring one filter with `action=allow` and another one with `action=block`. You can then use diff --git a/server/modules/filter/dbfwfilter/dbfwfilter.c b/server/modules/filter/dbfwfilter/dbfwfilter.c index 13da00544..b504ea0af 100644 --- a/server/modules/filter/dbfwfilter/dbfwfilter.c +++ b/server/modules/filter/dbfwfilter/dbfwfilter.c @@ -2174,6 +2174,26 @@ USER* find_user_data(HASHTABLE *hash, const char *name, const char *remote) return user; } +static bool command_is_mandatory(GWBUF *buffer) +{ + uint8_t cmd = *(((uint8_t*)GWBUF_DATA(buffer)) + 4); + + switch (cmd) + { + case MYSQL_COM_QUIT: + case MYSQL_COM_PING: + case MYSQL_COM_CHANGE_USER: + case MYSQL_COM_SET_OPTION: + case MYSQL_COM_FIELD_LIST: + case MYSQL_COM_PROCESS_KILL: + case MYSQL_COM_PROCESS_INFO: + return true; + + default: + return false; + } +} + /** * The routeQuery entry point. This is passed the query buffer * to which the filter should be applied. Once processed the @@ -2223,7 +2243,7 @@ routeQuery(FILTER *instance, void *session, GWBUF *queue) else { USER *user = find_user_data(my_instance->htable, dcb->user, dcb->remote); - bool query_ok = false; + bool query_ok = command_is_mandatory(queue); if (user) { From 443af1aee3a58e31dd907a1f8309ccd067957f90 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Mon, 30 Jan 2017 19:20:46 +0200 Subject: [PATCH 13/18] Use preliminary 2.3.3 release of Connector/C The new version has fixed a bug which MaxScale depends on. --- cmake/BuildMariaDBConnector.cmake | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmake/BuildMariaDBConnector.cmake b/cmake/BuildMariaDBConnector.cmake index b3982f809..d75bf5d10 100644 --- a/cmake/BuildMariaDBConnector.cmake +++ b/cmake/BuildMariaDBConnector.cmake @@ -10,8 +10,8 @@ include(ExternalProject) set(MARIADB_CONNECTOR_C_REPO "https://github.com/MariaDB/mariadb-connector-c.git" CACHE STRING "MariaDB Connector-C Git repository") -# Release 2.3.2 of the Connector-C -set(MARIADB_CONNECTOR_C_TAG "v2.3.2" +# Release 2.3.3 (preliminary) of the Connector-C +set(MARIADB_CONNECTOR_C_TAG "v2.3.3_pre" CACHE STRING "MariaDB Connector-C Git tag") ExternalProject_Add(connector-c From 4bbd513b1e021b2172ff1a8392b425109b4a688b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Mon, 30 Jan 2017 19:26:18 +0200 Subject: [PATCH 14/18] Update release notes Added dbfwfilter changes and fixed bugs to release notes. --- .../Release-Notes/MaxScale-2.0.4-Release-Notes.md | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/Documentation/Release-Notes/MaxScale-2.0.4-Release-Notes.md b/Documentation/Release-Notes/MaxScale-2.0.4-Release-Notes.md index 1befb7346..684836f64 100644 --- a/Documentation/Release-Notes/MaxScale-2.0.4-Release-Notes.md +++ b/Documentation/Release-Notes/MaxScale-2.0.4-Release-Notes.md @@ -20,13 +20,18 @@ For any problems you encounter, please submit a bug report at them. This affects _wildcard_, _columns_, _on_queries_ and _no_where_clause_ type rules which previously ignored prepared statements. -- The MariaDB Connector-C was upgraded to version 2.3.2. +- The dbfwfilter now allows COM_PING and other commands though when + `action=allow`. See [../Filters/Database-Firewall-Filter.md](documentation) + for more details. + +- The MariaDB Connector-C was upgraded to a preliminary release of version 2.3.3 (fixes MXS-951). ## Bug fixes [Here](https://jira.mariadb.org/issues/?jql=project%20%3D%20MXS%20AND%20issuetype%20%3D%20Bug%20AND%20status%20%3D%20Closed%20AND%20fixVersion%20%3D%202.0.4) is a list of bugs fixed since the release of MaxScale 2.0.3. +* [MXS-1111](https://jira.mariadb.org/browse/MXS-1111): Request Ping not allowed * [MXS-1082](https://jira.mariadb.org/browse/MXS-1082): Block prepared statements * [MXS-1080](https://jira.mariadb.org/browse/MXS-1080): Readwritesplit (documentation of max_slave_replication_lag) * [MXS-951](https://jira.mariadb.org/browse/MXS-951): Using utf8mb4 on galera hosts stops maxscale connections From 6cd16d26b8f44265c03b6387b7312a54fcc035a6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Mon, 30 Jan 2017 20:14:27 +0200 Subject: [PATCH 15/18] Flush the producer after every new record As the cdc_kafka_producer script is an example, it should flush the producer after every new record. This should make it easier to see that events from MaxScale are sent to Kafka. --- server/modules/protocol/examples/cdc_kafka_producer.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/server/modules/protocol/examples/cdc_kafka_producer.py b/server/modules/protocol/examples/cdc_kafka_producer.py index 5e09f0734..cddcf1bff 100755 --- a/server/modules/protocol/examples/cdc_kafka_producer.py +++ b/server/modules/protocol/examples/cdc_kafka_producer.py @@ -48,6 +48,7 @@ while True: data = decoder.raw_decode(rbuf.decode('ascii')) rbuf = rbuf[data[1]:] producer.send(topic=opts.kafka_topic, value=json.dumps(data[0]).encode()) + producer.flush() # JSONDecoder will return a ValueError if a partial JSON object is read except ValueError as err: @@ -57,5 +58,3 @@ while True: except Exception as ex: print(ex) break - -producer.flush() From 82105d20e1c9650ee69fb2099e92e3df374f6b7f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Tue, 31 Jan 2017 10:10:25 +0200 Subject: [PATCH 16/18] Fix a typo in readwritesplit documentation Fixed a typo in readwritesplit documentation. The cluster should be in plural form. --- Documentation/Routers/ReadWriteSplit.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/Routers/ReadWriteSplit.md b/Documentation/Routers/ReadWriteSplit.md index 1f36d9296..f76dbf30e 100644 --- a/Documentation/Routers/ReadWriteSplit.md +++ b/Documentation/Routers/ReadWriteSplit.md @@ -32,7 +32,7 @@ This feature is disabled by default. This applies to Master/Slave replication with MySQL monitor and `detect_replication_lag=1` options set. Please note max_slave_replication_lag must be greater than monitor interval. -This option only affects Master-Slave cluster. Galera clusters do not have a +This option only affects Master-Slave clusters. Galera clusters do not have a concept of slave lag even if the application of write sets might have lag. From 00f16e1fa56765678131faea59b2a819e59d49cc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Tue, 31 Jan 2017 10:16:33 +0200 Subject: [PATCH 17/18] Make parameters for command_is_mandatory const The parameters can be const as they aren't modified. --- server/modules/filter/dbfwfilter/dbfwfilter.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/server/modules/filter/dbfwfilter/dbfwfilter.c b/server/modules/filter/dbfwfilter/dbfwfilter.c index b504ea0af..a5ef051a5 100644 --- a/server/modules/filter/dbfwfilter/dbfwfilter.c +++ b/server/modules/filter/dbfwfilter/dbfwfilter.c @@ -2174,11 +2174,9 @@ USER* find_user_data(HASHTABLE *hash, const char *name, const char *remote) return user; } -static bool command_is_mandatory(GWBUF *buffer) +static bool command_is_mandatory(const GWBUF *buffer) { - uint8_t cmd = *(((uint8_t*)GWBUF_DATA(buffer)) + 4); - - switch (cmd) + switch (MYSQL_GET_COMMAND((uint8_t*)GWBUF_DATA(buffer))) { case MYSQL_COM_QUIT: case MYSQL_COM_PING: From a8780b892498f0a4df03ab8c09625603874582d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20M=C3=A4kel=C3=A4?= Date: Tue, 31 Jan 2017 13:48:51 +0200 Subject: [PATCH 18/18] Add PS limitation of schemarouter to limitations As prepared satements aren't parsed by the schemarouter, it can't support prepared statements. --- Documentation/About/Limitations.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Documentation/About/Limitations.md b/Documentation/About/Limitations.md index 87f75101f..3931b391f 100644 --- a/Documentation/About/Limitations.md +++ b/Documentation/About/Limitations.md @@ -229,6 +229,12 @@ and routed. Here is a list of the current limitations. the query will be routed to the first available server. This possibly returns an error about database rights instead of a missing database. +* The preparation of a prepared statement is routed to all servers. The + execution of a prepared statement is routed to the first available server or + to the server pointed by a routing hint attached to the query. As text + protocol prepared statements are relatively rare, prepared statements can't be + considered as supported in schemarouter + ## Avrorouter limitations (avrorouter) The avrorouter does not support the following data types and conversions.