Merge branch '2.1' into develop

This commit is contained in:
Markus Mäkelä
2017-06-14 13:59:18 +03:00
15 changed files with 210 additions and 157 deletions

View File

@ -154,7 +154,7 @@ then
echo "Error getting avro-c" echo "Error getting avro-c"
exit 1 exit 1
fi fi
avro_filename=`ls -1 *.tar.gz` avro_filename=`ls -1 avro*.tar.gz`
avro_dir=`echo "$avro_filename" | sed "s/.tar.gz//"` avro_dir=`echo "$avro_filename" | sed "s/.tar.gz//"`
tar -axf $avro_filename tar -axf $avro_filename
mkdir $avro_dir/build mkdir $avro_dir/build

View File

@ -37,6 +37,7 @@ For more details, please refer to:
as JSON objects (beta level functionality). as JSON objects (beta level functionality).
For more details, please refer to: For more details, please refer to:
* [MariaDB MaxScale 2.0.6 Release Notes](Release-Notes/MaxScale-2.0.6-Release-Notes.md)
* [MariaDB MaxScale 2.0.5 Release Notes](Release-Notes/MaxScale-2.0.5-Release-Notes.md) * [MariaDB MaxScale 2.0.5 Release Notes](Release-Notes/MaxScale-2.0.5-Release-Notes.md)
* [MariaDB MaxScale 2.0.4 Release Notes](Release-Notes/MaxScale-2.0.4-Release-Notes.md) * [MariaDB MaxScale 2.0.4 Release Notes](Release-Notes/MaxScale-2.0.4-Release-Notes.md)
* [MariaDB MaxScale 2.0.3 Release Notes](Release-Notes/MaxScale-2.0.3-Release-Notes.md) * [MariaDB MaxScale 2.0.3 Release Notes](Release-Notes/MaxScale-2.0.3-Release-Notes.md)

View File

@ -101,11 +101,14 @@ two steps from above.
## Creating additional grants for users ## Creating additional grants for users
Because MariaDB MaxScale sits between the clients and the backend databases, **Note:** The client host and MaxScale host must have the same username and
the backend databases will see all clients as if they were connecting from password for both client and MaxScale hosts.
MariaDB MaxScale's address.
This usually requires users to create additional grants for MariaDB MaxScale's hostname. Because MariaDB MaxScale sits between the clients and the backend databases, the
The best way to describe this process is with an example. backend databases will see all clients as if they were connecting from MariaDB
MaxScale's address. This usually requires users to create additional grants for
MariaDB MaxScale's hostname. The best way to describe this process is with an
example.
User `'jdoe'@'192.168.0.200` has the following grant on the cluster: User `'jdoe'@'192.168.0.200` has the following grant on the cluster:
`GRANT SELECT, INSERT, UPDATE, DELETE ON *.* TO 'jdoe'@'192.168.0.200'`. `GRANT SELECT, INSERT, UPDATE, DELETE ON *.* TO 'jdoe'@'192.168.0.200'`.
@ -134,18 +137,22 @@ MariaDB [(none)]> SHOW GRANTS FOR 'jdoe'@'192.168.0.200';
``` ```
Then creating the user `'jdoe'@'192.168.0.101'` and giving it the same grants: Then creating the user `'jdoe'@'192.168.0.101'` and giving it the same grants:
``` ```
MariaDB [(none)]> CREATE USER 'jdoe'@'192.168.0.101'; MariaDB [(none)]> CREATE USER 'jdoe'@'192.168.0.101' IDENTIFIED BY 'secret_password';
Query OK, 0 rows affected (0.00 sec) Query OK, 0 rows affected (0.00 sec)
MariaDB [(none)]> GRANT SELECT, INSERT, UPDATE, DELETE ON *.* TO 'jdoe'@'192.168.0.101'; MariaDB [(none)]> GRANT SELECT, INSERT, UPDATE, DELETE ON *.* TO 'jdoe'@'192.168.0.101';
Query OK, 0 rows affected (0.00 sec) Query OK, 0 rows affected (0.00 sec)
``` ```
The other option is to use a wildcard grant like The other option is to use a wildcard grant like the following:
`GRANT SELECT, INSERT, UPDATE, DELETE ON *.* TO 'jdoe'@'%'`.
This is more convenient but also less secure than having specific grants
for both the client's address and MariaDB MaxScale's address.
```
GRANT SELECT, INSERT, UPDATE, DELETE ON *.* TO 'jdoe'@'%' IDENTIFIED BY 'secret_password'
```
This is more convenient but less secure than having specific grants for both the
client's address and MariaDB MaxScale's address as it allows access from all
hosts.
## Creating the configuration file ## Creating the configuration file

View File

@ -607,6 +607,7 @@ add_test_executable(test_hints.cpp test_hints hints2 LABELS hintfilter LIGHT REP
# Binlogrouter tests, these heavily alter the replication so they are run last # Binlogrouter tests, these heavily alter the replication so they are run last
add_test_executable(avro.cpp avro avro LABELS avrorouter binlogrouter LIGHT BREAKS_REPL) add_test_executable(avro.cpp avro avro LABELS avrorouter binlogrouter LIGHT BREAKS_REPL)
add_test_executable(avro_alter.cpp avro_alter avro LABELS avrorouter binlogrouter LIGHT BREAKS_REPL)
# Test avrorouter file compression # Test avrorouter file compression
#add_test_script(avro_compression avro avro_compression LABELS avrorouter binlogrouter LIGHT BREAKS_REPL) #add_test_script(avro_compression avro avro_compression LABELS avrorouter binlogrouter LIGHT BREAKS_REPL)

View File

@ -87,6 +87,7 @@ int main(int argc, char *argv[])
} }
execute_query(test.repl->nodes[0], "DROP TABLE test.t1;RESET MASTER"); execute_query(test.repl->nodes[0], "DROP TABLE test.t1;RESET MASTER");
test.stop_timeout();
test.repl->fix_replication(); test.repl->fix_replication();
return test.global_result; return test.global_result;

View File

@ -0,0 +1,57 @@
/**
* @file avro_alter.cpp Test ALTER TABLE handling of avrorouter
*/
#include "testconnections.h"
#include <sstream>
int main(int argc, char *argv[])
{
TestConnections test(argc, argv);
test.set_timeout(600);
test.ssh_maxscale(true, (char *) "rm -rf /var/lib/maxscale/avro");
/** Start master to binlogrouter replication */
if (!test.replicate_from_master())
{
return 1;
}
test.set_timeout(120);
test.repl->connect();
execute_query_silent(test.repl->nodes[0], "DROP TABLE test.t1");
execute_query(test.repl->nodes[0], "CREATE TABLE test.t1(id INT)");
execute_query(test.repl->nodes[0], "INSERT INTO test.t1 VALUES (1)");
execute_query(test.repl->nodes[0], "ALTER TABLE test.t1 ADD COLUMN a VARCHAR(100)");
execute_query(test.repl->nodes[0], "INSERT INTO test.t1 VALUES (2, \"a\")");
execute_query(test.repl->nodes[0], "ALTER TABLE test.t1 ADD COLUMN b FLOAT");
execute_query(test.repl->nodes[0], "INSERT INTO test.t1 VALUES (3, \"b\", 3.0)");
execute_query(test.repl->nodes[0], "ALTER TABLE test.t1 CHANGE COLUMN b c DATETIME(3)");
execute_query(test.repl->nodes[0], "INSERT INTO test.t1 VALUES (4, \"c\", NOW())");
execute_query(test.repl->nodes[0], "ALTER TABLE test.t1 DROP COLUMN c");
execute_query(test.repl->nodes[0], "INSERT INTO test.t1 VALUES (5, \"d\")");
test.repl->close_connections();
/** Give avrorouter some time to process the events */
test.stop_timeout();
sleep(10);
test.set_timeout(120);
for (int i = 1; i <=5; i++)
{
std::stringstream cmd;
cmd << "maxavrocheck -d /var/lib/maxscale/avro/test.t1.00000" << i << ".avro|wc -l";
char* rows = test.ssh_maxscale_output(true, cmd.str().c_str());
int nrows = atoi(rows);
free(rows);
test.add_result(nrows != 1, "Expected 1 line in file number %d, got %d", i, nrows);
}
execute_query(test.repl->nodes[0], "DROP TABLE test.t1;RESET MASTER");
test.repl->fix_replication();
return test.global_result;
}

View File

@ -11,7 +11,7 @@ user=maxskysql
passwd= skysql passwd= skysql
[RW Split Router] [RW Split Router]
connection_timeout=30 connection_timeout=10
type=service type=service
router= readwritesplit router= readwritesplit
servers=server1, server2, server3,server4 servers=server1, server2, server3,server4

View File

@ -47,7 +47,8 @@ int main(int argc, char** argv)
test.add_result(!mysql_stmt_prepare(stmt, query, strlen(query)), "Binary protocol preparation should fail"); test.add_result(!mysql_stmt_prepare(stmt, query, strlen(query)), "Binary protocol preparation should fail");
mysql_stmt_close(stmt); mysql_stmt_close(stmt);
test.try_query(test.conn_rwsplit, "DROP TABLE test.t1"); test.repl->connect();
test.try_query(test.repl->nodes[0], "DROP TABLE test.t1");
return test.global_result; return test.global_result;
} }

View File

@ -432,6 +432,7 @@ int Mariadb_nodes::start_replication()
"mysql -u root %s -e \"STOP SLAVE; RESET SLAVE; RESET SLAVE ALL; RESET MASTER; SET GLOBAL read_only=OFF;\"", "mysql -u root %s -e \"STOP SLAVE; RESET SLAVE; RESET SLAVE ALL; RESET MASTER; SET GLOBAL read_only=OFF;\"",
socket_cmd[i]); socket_cmd[i]);
ssh_node(i, str, true); ssh_node(i, str, true);
ssh_node(i, "sudo rm -f /etc/my.cnf.d/kerb.cnf", true);
} }
sprintf(str, "%s/create_user.sh", test_dir); sprintf(str, "%s/create_user.sh", test_dir);

View File

@ -14,49 +14,40 @@ router_options=max_sescmd_history=10
* - execute one more session commad, excpect failure * - execute one more session commad, excpect failure
*/ */
#include <iostream>
#include "testconnections.h" #include "testconnections.h"
int main(int argc, char *argv[]) int main(int argc, char *argv[])
{ {
TestConnections * Test = new TestConnections(argc, argv); TestConnections test(argc, argv);
Test->set_timeout(200); int first_sleep = 5;
int i; int second_sleep = 12;
char sql[256];
Test->tprintf("Open session and wait 20 seconds\n"); test.set_timeout(200);
Test->connect_maxscale();
sleep(20);
Test->tprintf("Execute query to check session\n");
Test->try_query(Test->conn_rwsplit, "SELECT 1");
Test->tprintf("Wait 35 seconds more and try quiry again expecting failure\n"); test.tprintf("Open session, wait %d seconds and execute a query", first_sleep);
sleep(35); test.connect_maxscale();
if (execute_query(Test->conn_rwsplit, "SELECT 1") == 0) sleep(first_sleep);
test.try_query(test.conn_rwsplit, "SELECT 1");
test.tprintf("Wait %d seconds and execute query, expecting failure", second_sleep);
sleep(second_sleep);
test.add_result(execute_query(test.conn_rwsplit, "SELECT 1") == 0,
"Session was not closed after %d seconds",
second_sleep);
test.close_maxscale_connections();
test.tprintf("Open session and execute 10 session commands");
test.connect_maxscale();
for (int i = 0; i < 10; i++)
{ {
Test->add_result(1, "Session was not closed after 40 seconds\n"); test.try_query(test.conn_rwsplit, "set @test=1");
} }
Test->close_maxscale_connections();
Test->tprintf("Open session and execute 10 session commands\n"); test.tprintf("Execute one more session command and expect message in error log");
fflush(stdout); execute_query(test.conn_rwsplit, "set @test=1");
Test->connect_maxscale(); sleep(1);
for (i = 0; i < 10; i++) test.check_log_err("Router session exceeded session command history limit", true);
{ test.close_maxscale_connections();
sprintf(sql, "set @test=%d", i);
Test->try_query(Test->conn_rwsplit, sql); return test.global_result;
}
Test->tprintf("done!\n");
Test->tprintf("Execute one more session command and expect message in error log\n");
execute_query(Test->conn_rwsplit, "set @test=11");
sleep(5);
Test->check_log_err((char *) "Router session exceeded session command history limit", true);
Test->close_maxscale_connections();
int rval = Test->global_result;
delete Test;
return rval;
} }

View File

@ -6,7 +6,7 @@ pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
static char** sql = NULL; static char** sql = NULL;
static size_t sql_size = 0; static size_t sql_size = 0;
int execute_select_query_and_check(MYSQL *conn, char *sql, unsigned long long int rows) int execute_select_query_and_check(MYSQL *conn, const char *sql, unsigned long long int rows)
{ {
MYSQL_RES *res; MYSQL_RES *res;
MYSQL_ROW row; MYSQL_ROW row;

View File

@ -11,7 +11,7 @@
* @param rows Expected number of rows * @param rows Expected number of rows
* @return 0 in case of success * @return 0 in case of success
*/ */
int execute_select_query_and_check(MYSQL *conn, char *sql, unsigned long long int rows); int execute_select_query_and_check(MYSQL *conn, const char *sql, unsigned long long int rows);
/** /**
* @brief create_t1 Create t1 table, fileds: (x1 int, fl int) * @brief create_t1 Create t1 table, fileds: (x1 int, fl int)

View File

@ -1,16 +1,16 @@
/** /**
* @file temporal_tables.cpp Check temporal tables commands functionality (relates to bug 430) * Check temporary tables commands functionality (relates to bug 430)
*
* - create t1 table and put some data into it * - create t1 table and put some data into it
* - create tempral table t1 * - create temporary table t1
* - insert different data into t1 * - insert different data into t1
* - check that SELECT FROM t1 gives data from tempral table * - check that SELECT FROM t1 gives data from temporary table
* - create other connections using all Maxscale services and check that SELECT via these connections gives data from main t1, not temporal * - create other connections using all MaxScale services and check that SELECT
* - dropping tempral t1 * via these connections gives data from main t1, not temporary
* - dropping temporary t1
* - check that data from main t1 is not affected * - check that data from main t1 is not affected
*/ */
#include <iostream>
#include "testconnections.h" #include "testconnections.h"
#include "sql_t1.h" #include "sql_t1.h"
@ -18,82 +18,45 @@ using namespace std;
int main(int argc, char *argv[]) int main(int argc, char *argv[])
{ {
TestConnections test(argc, argv);
test.connect_maxscale();
TestConnections * Test = new TestConnections(argc, argv); test.tprintf("Create a table and insert two rows into it");
test.set_timeout(30);
Test->repl->connect(); execute_query(test.conn_rwsplit, "USE test");
create_t1(test.conn_rwsplit);
execute_query(test.conn_rwsplit, "INSERT INTO t1 (x1, fl) VALUES(0, 1)");
execute_query(test.conn_rwsplit, "INSERT INTO t1 (x1, fl) VALUES(1, 1)");
MYSQL * conn; test.tprintf("Create temporary table and insert one row");
char sql[100]; test.set_timeout(30);
Test->set_timeout(40); execute_query(test.conn_rwsplit, "create temporary table t1 as (SELECT * FROM t1 WHERE fl=3)");
conn = Test->open_rwsplit_connection(); execute_query(test.conn_rwsplit, "INSERT INTO t1 (x1, fl) VALUES(0, 1)");
Test->tprintf("Cleaning up DB\n"); test.tprintf("Check that the temporary table has one row");
execute_query(conn, (char *) "DROP DATABASE IF EXISTS test"); test.set_timeout(90);
execute_query(conn, (char *) "CREATE DATABASE test");
execute_query(conn, (char *) "USE test");
Test->tprintf("creating table t1\n"); test.add_result(execute_select_query_and_check(test.conn_rwsplit, "SELECT * FROM t1", 1),
Test->set_timeout(40); "Current connection should show one row");
create_t1(conn); test.add_result(execute_select_query_and_check(test.conn_master, "SELECT * FROM t1", 2),
"New connection should show two rows");
test.add_result(execute_select_query_and_check(test.conn_slave, "SELECT * FROM t1", 2),
"New connection should show two rows");
Test->tprintf("Inserting two rows into t1\n"); printf("Drop temporary table and check that the real table has two rows");
Test->set_timeout(40); test.set_timeout(90);
execute_query(conn, "INSERT INTO t1 (x1, fl) VALUES(0, 1);");
execute_query(conn, "INSERT INTO t1 (x1, fl) VALUES(1, 1);");
Test->tprintf("Creating temporal table t1\n"); execute_query(test.conn_rwsplit, "DROP TABLE t1");
execute_query(conn, "create temporary table t1 as (SELECT * FROM t1 WHERE fl=3);"); test.add_result(execute_select_query_and_check(test.conn_rwsplit, "SELECT * FROM t1", 2),
"check failed");
test.add_result(execute_select_query_and_check(test.conn_master, "SELECT * FROM t1", 2),
"check failed");
test.add_result(execute_select_query_and_check(test.conn_slave, "SELECT * FROM t1", 2),
"check failed");
Test->tprintf("Inserting one row into temporal table\n"); test.close_maxscale_connections();
execute_query(conn, "INSERT INTO t1 (x1, fl) VALUES(0, 1);");
Test->tprintf("Checking t1 temporal table\n"); return test.global_result;
Test->set_timeout(240);
Test->add_result(execute_select_query_and_check(conn, (char *) "SELECT * FROM t1;", 1), "check failed\n");
Test->tprintf("Connecting to all MaxScale routers and checking main t1 table (not temporal)\n");
Test->set_timeout(240);
Test->add_result(Test->connect_maxscale(), "Connectiong to Maxscale failed\n");
Test->tprintf("Checking t1 table using RWSplit router\n");
Test->set_timeout(240);
Test->add_result(execute_select_query_and_check(Test->conn_rwsplit, (char *) "SELECT * FROM t1;", 2),
"check failed\n");
Test->tprintf("Checking t1 table using ReadConn router in master mode\n");
Test->set_timeout(240);
Test->add_result(execute_select_query_and_check(Test->conn_master, (char *) "SELECT * FROM t1;", 2),
"check failed\n");
Test->tprintf("Checking t1 table using ReadConn router in slave mode\n");
Test->set_timeout(240);
Test->add_result(execute_select_query_and_check(Test->conn_slave, (char *) "SELECT * FROM t1;", 2),
"check failed\n");
Test->close_maxscale_connections();
printf("Dropping temparal table and check main table again\n");
execute_query(conn, "DROP TABLE t1;");
printf("Connecting to all MaxScale routers and checking main t1 table (not temporal)\n");
Test->add_result(Test->connect_maxscale(), "Connectiong to Maxscale failed\n");
Test->tprintf("Checking t1 table using RWSplit router\n");
Test->set_timeout(240);
Test->add_result(execute_select_query_and_check(Test->conn_rwsplit, (char *) "SELECT * FROM t1;", 2),
"check failed\n");
Test->tprintf("Checking t1 table using ReadConn router in master mode\n");
Test->set_timeout(240);
Test->add_result(execute_select_query_and_check(Test->conn_master, (char *) "SELECT * FROM t1;", 2),
"check failed\n");
Test->tprintf("Checking t1 table using ReadConn router in slave mode\n");
Test->set_timeout(240);
Test->add_result(execute_select_query_and_check(Test->conn_slave, (char *) "SELECT * FROM t1;", 2),
"check failed\n");
Test->close_maxscale_connections();
mysql_close(conn);
int rval = Test->global_result;
delete Test;
return rval;
} }

View File

@ -20,7 +20,10 @@ import selectors
import binascii import binascii
import os import os
schema_read = False
def read_data(): def read_data():
global schema_read
sel = selectors.DefaultSelector() sel = selectors.DefaultSelector()
sel.register(sock, selectors.EVENT_READ) sel.register(sock, selectors.EVENT_READ)
@ -29,8 +32,17 @@ def read_data():
events = sel.select(timeout=int(opts.read_timeout) if int(opts.read_timeout) > 0 else None) events = sel.select(timeout=int(opts.read_timeout) if int(opts.read_timeout) > 0 else None)
buf = sock.recv(4096, socket.MSG_DONTWAIT) buf = sock.recv(4096, socket.MSG_DONTWAIT)
if len(buf) > 0: if len(buf) > 0:
# If the request for data is rejected, an error will be sent instead of the table schema
if not schema_read:
if "err" in buf.decode().lower():
print(buf.decode(), file=sys.stderr)
exit(1)
else:
schema_read = True
os.write(sys.stdout.fileno(), buf) os.write(sys.stdout.fileno(), buf)
sys.stdout.flush() sys.stdout.flush()
else: else:
raise Exception('Socket was closed') raise Exception('Socket was closed')
@ -40,6 +52,13 @@ def read_data():
print(ex, file=sys.stderr) print(ex, file=sys.stderr)
break break
def check_for_err(err):
if "err" in err.lower().strip():
print(err.strip(), file=sys.stderr)
exit(1)
parser = argparse.ArgumentParser(description = "CDC Binary consumer", conflict_handler="resolve") parser = argparse.ArgumentParser(description = "CDC Binary consumer", conflict_handler="resolve")
parser.add_argument("-h", "--host", dest="host", help="Network address where the connection is made", default="localhost") parser.add_argument("-h", "--host", dest="host", help="Network address where the connection is made", default="localhost")
parser.add_argument("-P", "--port", dest="port", help="Port where the connection is made", default="4001") parser.add_argument("-P", "--port", dest="port", help="Port where the connection is made", default="4001")
@ -60,13 +79,17 @@ auth_string += bytes(hashlib.sha1(opts.password.encode("utf_8")).hexdigest().enc
sock.send(auth_string) sock.send(auth_string)
# Discard the response # Discard the response
response = str(sock.recv(1024)).encode('utf_8') response = sock.recv(1024).decode()
check_for_err(response)
# Register as a client as request Avro format data # Register as a client as request Avro format data
sock.send(bytes(("REGISTER UUID=XXX-YYY_YYY, TYPE=" + opts.format).encode())) sock.send(bytes(("REGISTER UUID=XXX-YYY_YYY, TYPE=" + opts.format).encode()))
# Discard the response again # Discard the response again
response = str(sock.recv(1024)).encode('utf_8') response = sock.recv(1024).decode()
check_for_err(response)
# Request a data stream # Request a data stream
sock.send(bytes(("REQUEST-DATA " + opts.FILE + (" " + opts.GTID if opts.GTID else "")).encode())) sock.send(bytes(("REQUEST-DATA " + opts.FILE + (" " + opts.GTID if opts.GTID else "")).encode()))

View File

@ -575,9 +575,12 @@ int extract_type_length(const char* ptr, char *dest)
} }
/** Store type */ /** Store type */
int typelen = ptr - start; for (const char* c = start; c < ptr; c++)
memcpy(dest, start, typelen); {
dest[typelen] = '\0'; *dest++ = tolower(*c);
}
*dest++ = '\0';
/** Skip whitespace */ /** Skip whitespace */
while (*ptr && isspace(*ptr)) while (*ptr && isspace(*ptr))
@ -880,7 +883,7 @@ void read_alter_identifier(const char *sql, const char *end, char *dest, int siz
void make_avro_token(char* dest, const char* src, int length) void make_avro_token(char* dest, const char* src, int length)
{ {
while (*src == '(' || *src == ')' || *src == '`' || isspace(*src)) while (length > 0 && (*src == '(' || *src == ')' || *src == '`' || isspace(*src)))
{ {
src++; src++;
length--; length--;
@ -902,16 +905,17 @@ void make_avro_token(char* dest, const char* src, int length)
fix_reserved_word(dest); fix_reserved_word(dest);
} }
int get_column_index(TABLE_CREATE *create, const char *tok) int get_column_index(TABLE_CREATE *create, const char *tok, int len)
{ {
int idx = -1; int idx = -1;
char safe_tok[strlen(tok) + 2]; char safe_tok[len + 2];
strcpy(safe_tok, tok); memcpy(safe_tok, tok, len);
safe_tok[len] = '\0';
fix_reserved_word(safe_tok); fix_reserved_word(safe_tok);
for (int x = 0; x < create->columns; x++) for (int x = 0; x < create->columns; x++)
{ {
if (strcasecmp(create->column_names[x], tok) == 0) if (strcasecmp(create->column_names[x], safe_tok) == 0)
{ {
idx = x; idx = x;
break; break;
@ -950,18 +954,17 @@ bool table_create_alter(TABLE_CREATE *create, const char *sql, const char *end)
{ {
tok = get_tok(tok + len, &len, end); tok = get_tok(tok + len, &len, end);
char ** tmp = MXS_REALLOC(create->column_names, sizeof(char*) * create->columns + 1); create->column_names = MXS_REALLOC(create->column_names, sizeof(char*) * create->columns + 1);
ss_dassert(tmp); create->column_types = MXS_REALLOC(create->column_types, sizeof(char*) * create->columns + 1);
create->column_lengths = MXS_REALLOC(create->column_lengths, sizeof(int) * create->columns + 1);
if (tmp == NULL)
{
return false;
}
create->column_names = tmp;
char avro_token[len + 1]; char avro_token[len + 1];
make_avro_token(avro_token, tok, len); make_avro_token(avro_token, tok, len);
char field_type[200] = ""; // Enough to hold all types
int field_length = extract_type_length(tok + len, field_type);
create->column_names[create->columns] = MXS_STRDUP_A(avro_token); create->column_names[create->columns] = MXS_STRDUP_A(avro_token);
create->column_types[create->columns] = MXS_STRDUP_A(field_type);
create->column_lengths[create->columns] = field_length;
create->columns++; create->columns++;
updates++; updates++;
tok = get_next_def(tok, end); tok = get_next_def(tok, end);
@ -971,25 +974,22 @@ bool table_create_alter(TABLE_CREATE *create, const char *sql, const char *end)
{ {
tok = get_tok(tok + len, &len, end); tok = get_tok(tok + len, &len, end);
int idx = get_column_index(create, tok); int idx = get_column_index(create, tok, len);
if (idx != -1) if (idx != -1)
{ {
MXS_FREE(create->column_names[idx]); MXS_FREE(create->column_names[idx]);
MXS_FREE(create->column_types[idx]);
for (int i = idx; i < (int)create->columns - 1; i++) for (int i = idx; i < (int)create->columns - 1; i++)
{ {
create->column_names[i] = create->column_names[i + 1]; create->column_names[i] = create->column_names[i + 1];
create->column_types[i] = create->column_types[i + 1];
create->column_lengths[i] = create->column_lengths[i + 1];
} }
char ** tmp = realloc(create->column_names, sizeof(char*) * create->columns - 1); create->column_names = MXS_REALLOC(create->column_names, sizeof(char*) * create->columns - 1);
ss_dassert(tmp); create->column_types = MXS_REALLOC(create->column_types, sizeof(char*) * create->columns - 1);
create->column_lengths = MXS_REALLOC(create->column_lengths, sizeof(int) * create->columns - 1);
if (tmp == NULL)
{
return false;
}
create->column_names = tmp;
create->columns--; create->columns--;
updates++; updates++;
} }
@ -1001,12 +1001,19 @@ bool table_create_alter(TABLE_CREATE *create, const char *sql, const char *end)
{ {
tok = get_tok(tok + len, &len, end); tok = get_tok(tok + len, &len, end);
int idx = get_column_index(create, tok); int idx = get_column_index(create, tok, len);
if (idx != -1) if (idx != -1 && (tok = get_tok(tok + len, &len, end)))
{ {
MXS_FREE(create->column_names[idx]); MXS_FREE(create->column_names[idx]);
create->column_names[idx] = strndup(tok, len); MXS_FREE(create->column_types[idx]);
char avro_token[len + 1];
make_avro_token(avro_token, tok, len);
char field_type[200] = ""; // Enough to hold all types
int field_length = extract_type_length(tok + len, field_type);
create->column_names[idx] = MXS_STRDUP_A(avro_token);
create->column_types[idx] = MXS_STRDUP_A(field_type);
create->column_lengths[idx] = field_length;
updates++; updates++;
} }
@ -1021,7 +1028,7 @@ bool table_create_alter(TABLE_CREATE *create, const char *sql, const char *end)
} }
/** Only increment the create version if it has an associated .avro /** Only increment the create version if it has an associated .avro
* file. The .avro file is only created if it is acutally used. */ * file. The .avro file is only created if it is actually used. */
if (updates > 0 && create->was_used) if (updates > 0 && create->was_used)
{ {
create->version++; create->version++;