Merge branch '2.0' into 2.1
This commit is contained in:
commit
4c9fc1a39e
@ -15,17 +15,7 @@ then
|
||||
sudo apt-get install -y --force-yes dpkg-dev git gcc g++ ncurses-dev bison \
|
||||
build-essential libssl-dev libaio-dev perl make libtool libcurl4-openssl-dev \
|
||||
libpcre3-dev flex tcl libeditline-dev uuid-dev liblzma-dev libsqlite3-dev \
|
||||
sqlite3 liblua5.1 liblua5.1-dev wget
|
||||
sudo apt-get install -y --force-yes libgnutls-dev
|
||||
if [ $? != 0 ]
|
||||
then
|
||||
sudo apt-get install -y --force-yes libgnutls28-dev
|
||||
fi
|
||||
sudo apt-get install -y --force-yes libgcrypt20
|
||||
if [ $? != 0 ]
|
||||
then
|
||||
sudo apt-get install -y --force-yes libgcrypt11
|
||||
fi
|
||||
sqlite3 liblua5.1 liblua5.1-dev wget
|
||||
else
|
||||
## RPM-based distro
|
||||
command -v yum
|
||||
@ -36,10 +26,7 @@ else
|
||||
sudo zypper -n install gcc gcc-c++ ncurses-devel bison glibc-devel libgcc_s1 perl \
|
||||
make libtool libopenssl-devel libaio libaio-devel flex libcurl-devel \
|
||||
pcre-devel git wget tcl libuuid-devel \
|
||||
xz-devel sqlite3 sqlite3-devel pkg-config lua lua-devel \
|
||||
gnutls
|
||||
sudo zypper -n install gcrypt
|
||||
sudo zypper -n install libgcrypt
|
||||
xz-devel sqlite3 sqlite3-devel pkg-config lua lua-devel
|
||||
sudo zypper -n install rpm-build
|
||||
cat /etc/*-release | grep "SUSE Linux Enterprise Server 11"
|
||||
|
||||
@ -54,8 +41,7 @@ else
|
||||
libgcc perl make libtool openssl-devel libaio libaio-devel libedit-devel \
|
||||
libedit-devel libcurl-devel curl-devel systemtap-sdt-devel rpm-sign wget \
|
||||
gnupg pcre-devel flex rpmdevtools git wget tcl openssl libuuid-devel xz-devel \
|
||||
sqlite sqlite-devel pkgconfig lua lua-devel rpm-build createrepo yum-utils \
|
||||
gnutls gcrypt
|
||||
sqlite sqlite-devel pkgconfig lua lua-devel rpm-build createrepo yum-utils
|
||||
|
||||
cat /etc/redhat-release | grep "release 5"
|
||||
if [ $? == 0 ]
|
||||
@ -149,7 +135,7 @@ then
|
||||
echo "Error getting avro-c"
|
||||
exit 1
|
||||
fi
|
||||
avro_filename=`ls -1 *.tar.gz`
|
||||
avro_filename=`ls -1 avro*.tar.gz`
|
||||
avro_dir=`echo "$avro_filename" | sed "s/.tar.gz//"`
|
||||
tar -axf $avro_filename
|
||||
mkdir $avro_dir/build
|
||||
|
@ -37,6 +37,7 @@ For more details, please refer to:
|
||||
as JSON objects (beta level functionality).
|
||||
|
||||
For more details, please refer to:
|
||||
* [MariaDB MaxScale 2.0.6 Release Notes](Release-Notes/MaxScale-2.0.6-Release-Notes.md)
|
||||
* [MariaDB MaxScale 2.0.5 Release Notes](Release-Notes/MaxScale-2.0.5-Release-Notes.md)
|
||||
* [MariaDB MaxScale 2.0.4 Release Notes](Release-Notes/MaxScale-2.0.4-Release-Notes.md)
|
||||
* [MariaDB MaxScale 2.0.3 Release Notes](Release-Notes/MaxScale-2.0.3-Release-Notes.md)
|
||||
|
@ -101,11 +101,14 @@ two steps from above.
|
||||
|
||||
## Creating additional grants for users
|
||||
|
||||
Because MariaDB MaxScale sits between the clients and the backend databases,
|
||||
the backend databases will see all clients as if they were connecting from
|
||||
MariaDB MaxScale's address.
|
||||
This usually requires users to create additional grants for MariaDB MaxScale's hostname.
|
||||
The best way to describe this process is with an example.
|
||||
**Note:** The client host and MaxScale host must have the same username and
|
||||
password for both client and MaxScale hosts.
|
||||
|
||||
Because MariaDB MaxScale sits between the clients and the backend databases, the
|
||||
backend databases will see all clients as if they were connecting from MariaDB
|
||||
MaxScale's address. This usually requires users to create additional grants for
|
||||
MariaDB MaxScale's hostname. The best way to describe this process is with an
|
||||
example.
|
||||
|
||||
User `'jdoe'@'192.168.0.200` has the following grant on the cluster:
|
||||
`GRANT SELECT, INSERT, UPDATE, DELETE ON *.* TO 'jdoe'@'192.168.0.200'`.
|
||||
@ -134,18 +137,22 @@ MariaDB [(none)]> SHOW GRANTS FOR 'jdoe'@'192.168.0.200';
|
||||
```
|
||||
Then creating the user `'jdoe'@'192.168.0.101'` and giving it the same grants:
|
||||
```
|
||||
MariaDB [(none)]> CREATE USER 'jdoe'@'192.168.0.101';
|
||||
MariaDB [(none)]> CREATE USER 'jdoe'@'192.168.0.101' IDENTIFIED BY 'secret_password';
|
||||
Query OK, 0 rows affected (0.00 sec)
|
||||
|
||||
MariaDB [(none)]> GRANT SELECT, INSERT, UPDATE, DELETE ON *.* TO 'jdoe'@'192.168.0.101';
|
||||
Query OK, 0 rows affected (0.00 sec)
|
||||
```
|
||||
|
||||
The other option is to use a wildcard grant like
|
||||
`GRANT SELECT, INSERT, UPDATE, DELETE ON *.* TO 'jdoe'@'%'`.
|
||||
This is more convenient but also less secure than having specific grants
|
||||
for both the client's address and MariaDB MaxScale's address.
|
||||
The other option is to use a wildcard grant like the following:
|
||||
|
||||
```
|
||||
GRANT SELECT, INSERT, UPDATE, DELETE ON *.* TO 'jdoe'@'%' IDENTIFIED BY 'secret_password'
|
||||
```
|
||||
|
||||
This is more convenient but less secure than having specific grants for both the
|
||||
client's address and MariaDB MaxScale's address as it allows access from all
|
||||
hosts.
|
||||
|
||||
## Creating the configuration file
|
||||
|
||||
|
@ -608,6 +608,7 @@ add_test_executable(test_hints.cpp test_hints hints2 LABELS hintfilter LIGHT REP
|
||||
|
||||
# Binlogrouter tests, these heavily alter the replication so they are run last
|
||||
add_test_executable(avro.cpp avro avro LABELS avrorouter binlogrouter LIGHT BREAKS_REPL)
|
||||
add_test_executable(avro_alter.cpp avro_alter avro LABELS avrorouter binlogrouter LIGHT BREAKS_REPL)
|
||||
|
||||
# Test avrorouter file compression
|
||||
#add_test_script(avro_compression avro avro_compression LABELS avrorouter binlogrouter LIGHT BREAKS_REPL)
|
||||
|
@ -87,6 +87,7 @@ int main(int argc, char *argv[])
|
||||
}
|
||||
|
||||
execute_query(test.repl->nodes[0], "DROP TABLE test.t1;RESET MASTER");
|
||||
test.stop_timeout();
|
||||
test.repl->fix_replication();
|
||||
|
||||
return test.global_result;
|
||||
|
57
maxscale-system-test/avro_alter.cpp
Normal file
57
maxscale-system-test/avro_alter.cpp
Normal file
@ -0,0 +1,57 @@
|
||||
/**
|
||||
* @file avro_alter.cpp Test ALTER TABLE handling of avrorouter
|
||||
*/
|
||||
|
||||
#include "testconnections.h"
|
||||
#include <sstream>
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
|
||||
TestConnections test(argc, argv);
|
||||
test.set_timeout(600);
|
||||
test.ssh_maxscale(true, (char *) "rm -rf /var/lib/maxscale/avro");
|
||||
|
||||
/** Start master to binlogrouter replication */
|
||||
if (!test.replicate_from_master())
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
test.set_timeout(120);
|
||||
test.repl->connect();
|
||||
|
||||
execute_query_silent(test.repl->nodes[0], "DROP TABLE test.t1");
|
||||
execute_query(test.repl->nodes[0], "CREATE TABLE test.t1(id INT)");
|
||||
execute_query(test.repl->nodes[0], "INSERT INTO test.t1 VALUES (1)");
|
||||
execute_query(test.repl->nodes[0], "ALTER TABLE test.t1 ADD COLUMN a VARCHAR(100)");
|
||||
execute_query(test.repl->nodes[0], "INSERT INTO test.t1 VALUES (2, \"a\")");
|
||||
execute_query(test.repl->nodes[0], "ALTER TABLE test.t1 ADD COLUMN b FLOAT");
|
||||
execute_query(test.repl->nodes[0], "INSERT INTO test.t1 VALUES (3, \"b\", 3.0)");
|
||||
execute_query(test.repl->nodes[0], "ALTER TABLE test.t1 CHANGE COLUMN b c DATETIME(3)");
|
||||
execute_query(test.repl->nodes[0], "INSERT INTO test.t1 VALUES (4, \"c\", NOW())");
|
||||
execute_query(test.repl->nodes[0], "ALTER TABLE test.t1 DROP COLUMN c");
|
||||
execute_query(test.repl->nodes[0], "INSERT INTO test.t1 VALUES (5, \"d\")");
|
||||
|
||||
test.repl->close_connections();
|
||||
|
||||
/** Give avrorouter some time to process the events */
|
||||
test.stop_timeout();
|
||||
sleep(10);
|
||||
test.set_timeout(120);
|
||||
|
||||
for (int i = 1; i <=5; i++)
|
||||
{
|
||||
std::stringstream cmd;
|
||||
cmd << "maxavrocheck -d /var/lib/maxscale/avro/test.t1.00000" << i << ".avro|wc -l";
|
||||
char* rows = test.ssh_maxscale_output(true, cmd.str().c_str());
|
||||
int nrows = atoi(rows);
|
||||
free(rows);
|
||||
test.add_result(nrows != 1, "Expected 1 line in file number %d, got %d", i, nrows);
|
||||
}
|
||||
|
||||
execute_query(test.repl->nodes[0], "DROP TABLE test.t1;RESET MASTER");
|
||||
test.repl->fix_replication();
|
||||
|
||||
return test.global_result;
|
||||
}
|
@ -11,7 +11,7 @@ user=maxskysql
|
||||
passwd= skysql
|
||||
|
||||
[RW Split Router]
|
||||
connection_timeout=30
|
||||
connection_timeout=10
|
||||
type=service
|
||||
router= readwritesplit
|
||||
servers=server1, server2, server3,server4
|
||||
|
@ -47,7 +47,8 @@ int main(int argc, char** argv)
|
||||
test.add_result(!mysql_stmt_prepare(stmt, query, strlen(query)), "Binary protocol preparation should fail");
|
||||
mysql_stmt_close(stmt);
|
||||
|
||||
test.try_query(test.conn_rwsplit, "DROP TABLE test.t1");
|
||||
test.repl->connect();
|
||||
test.try_query(test.repl->nodes[0], "DROP TABLE test.t1");
|
||||
|
||||
return test.global_result;
|
||||
}
|
||||
|
@ -432,6 +432,7 @@ int Mariadb_nodes::start_replication()
|
||||
"mysql -u root %s -e \"STOP SLAVE; RESET SLAVE; RESET SLAVE ALL; RESET MASTER; SET GLOBAL read_only=OFF;\"",
|
||||
socket_cmd[i]);
|
||||
ssh_node(i, str, true);
|
||||
ssh_node(i, "sudo rm -f /etc/my.cnf.d/kerb.cnf", true);
|
||||
}
|
||||
|
||||
sprintf(str, "%s/create_user.sh", test_dir);
|
||||
|
@ -14,49 +14,40 @@ router_options=max_sescmd_history=10
|
||||
* - execute one more session commad, excpect failure
|
||||
*/
|
||||
|
||||
|
||||
|
||||
#include <iostream>
|
||||
#include "testconnections.h"
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
TestConnections * Test = new TestConnections(argc, argv);
|
||||
Test->set_timeout(200);
|
||||
int i;
|
||||
char sql[256];
|
||||
TestConnections test(argc, argv);
|
||||
int first_sleep = 5;
|
||||
int second_sleep = 12;
|
||||
|
||||
Test->tprintf("Open session and wait 20 seconds\n");
|
||||
Test->connect_maxscale();
|
||||
sleep(20);
|
||||
Test->tprintf("Execute query to check session\n");
|
||||
Test->try_query(Test->conn_rwsplit, "SELECT 1");
|
||||
test.set_timeout(200);
|
||||
|
||||
Test->tprintf("Wait 35 seconds more and try quiry again expecting failure\n");
|
||||
sleep(35);
|
||||
if (execute_query(Test->conn_rwsplit, "SELECT 1") == 0)
|
||||
test.tprintf("Open session, wait %d seconds and execute a query", first_sleep);
|
||||
test.connect_maxscale();
|
||||
sleep(first_sleep);
|
||||
test.try_query(test.conn_rwsplit, "SELECT 1");
|
||||
|
||||
test.tprintf("Wait %d seconds and execute query, expecting failure", second_sleep);
|
||||
sleep(second_sleep);
|
||||
test.add_result(execute_query(test.conn_rwsplit, "SELECT 1") == 0,
|
||||
"Session was not closed after %d seconds",
|
||||
second_sleep);
|
||||
test.close_maxscale_connections();
|
||||
|
||||
test.tprintf("Open session and execute 10 session commands");
|
||||
test.connect_maxscale();
|
||||
for (int i = 0; i < 10; i++)
|
||||
{
|
||||
Test->add_result(1, "Session was not closed after 40 seconds\n");
|
||||
test.try_query(test.conn_rwsplit, "set @test=1");
|
||||
}
|
||||
Test->close_maxscale_connections();
|
||||
|
||||
Test->tprintf("Open session and execute 10 session commands\n");
|
||||
fflush(stdout);
|
||||
Test->connect_maxscale();
|
||||
for (i = 0; i < 10; i++)
|
||||
{
|
||||
sprintf(sql, "set @test=%d", i);
|
||||
Test->try_query(Test->conn_rwsplit, sql);
|
||||
}
|
||||
Test->tprintf("done!\n");
|
||||
test.tprintf("Execute one more session command and expect message in error log");
|
||||
execute_query(test.conn_rwsplit, "set @test=1");
|
||||
sleep(1);
|
||||
test.check_log_err("Router session exceeded session command history limit", true);
|
||||
test.close_maxscale_connections();
|
||||
|
||||
Test->tprintf("Execute one more session command and expect message in error log\n");
|
||||
execute_query(Test->conn_rwsplit, "set @test=11");
|
||||
sleep(5);
|
||||
Test->check_log_err((char *) "Router session exceeded session command history limit", true);
|
||||
Test->close_maxscale_connections();
|
||||
|
||||
int rval = Test->global_result;
|
||||
delete Test;
|
||||
return rval;
|
||||
return test.global_result;
|
||||
}
|
||||
|
@ -6,7 +6,7 @@ pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
static char** sql = NULL;
|
||||
static size_t sql_size = 0;
|
||||
|
||||
int execute_select_query_and_check(MYSQL *conn, char *sql, unsigned long long int rows)
|
||||
int execute_select_query_and_check(MYSQL *conn, const char *sql, unsigned long long int rows)
|
||||
{
|
||||
MYSQL_RES *res;
|
||||
MYSQL_ROW row;
|
||||
|
@ -11,7 +11,7 @@
|
||||
* @param rows Expected number of rows
|
||||
* @return 0 in case of success
|
||||
*/
|
||||
int execute_select_query_and_check(MYSQL *conn, char *sql, unsigned long long int rows);
|
||||
int execute_select_query_and_check(MYSQL *conn, const char *sql, unsigned long long int rows);
|
||||
|
||||
/**
|
||||
* @brief create_t1 Create t1 table, fileds: (x1 int, fl int)
|
||||
|
@ -1,16 +1,16 @@
|
||||
/**
|
||||
* @file temporal_tables.cpp Check temporal tables commands functionality (relates to bug 430)
|
||||
* Check temporary tables commands functionality (relates to bug 430)
|
||||
*
|
||||
* - create t1 table and put some data into it
|
||||
* - create tempral table t1
|
||||
* - create temporary table t1
|
||||
* - insert different data into t1
|
||||
* - check that SELECT FROM t1 gives data from tempral table
|
||||
* - create other connections using all Maxscale services and check that SELECT via these connections gives data from main t1, not temporal
|
||||
* - dropping tempral t1
|
||||
* - check that SELECT FROM t1 gives data from temporary table
|
||||
* - create other connections using all MaxScale services and check that SELECT
|
||||
* via these connections gives data from main t1, not temporary
|
||||
* - dropping temporary t1
|
||||
* - check that data from main t1 is not affected
|
||||
*/
|
||||
|
||||
|
||||
#include <iostream>
|
||||
#include "testconnections.h"
|
||||
#include "sql_t1.h"
|
||||
|
||||
@ -18,82 +18,45 @@ using namespace std;
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
TestConnections test(argc, argv);
|
||||
test.connect_maxscale();
|
||||
|
||||
TestConnections * Test = new TestConnections(argc, argv);
|
||||
test.tprintf("Create a table and insert two rows into it");
|
||||
test.set_timeout(30);
|
||||
|
||||
Test->repl->connect();
|
||||
execute_query(test.conn_rwsplit, "USE test");
|
||||
create_t1(test.conn_rwsplit);
|
||||
execute_query(test.conn_rwsplit, "INSERT INTO t1 (x1, fl) VALUES(0, 1)");
|
||||
execute_query(test.conn_rwsplit, "INSERT INTO t1 (x1, fl) VALUES(1, 1)");
|
||||
|
||||
MYSQL * conn;
|
||||
char sql[100];
|
||||
test.tprintf("Create temporary table and insert one row");
|
||||
test.set_timeout(30);
|
||||
|
||||
Test->set_timeout(40);
|
||||
conn = Test->open_rwsplit_connection();
|
||||
execute_query(test.conn_rwsplit, "create temporary table t1 as (SELECT * FROM t1 WHERE fl=3)");
|
||||
execute_query(test.conn_rwsplit, "INSERT INTO t1 (x1, fl) VALUES(0, 1)");
|
||||
|
||||
Test->tprintf("Cleaning up DB\n");
|
||||
execute_query(conn, (char *) "DROP DATABASE IF EXISTS test");
|
||||
execute_query(conn, (char *) "CREATE DATABASE test");
|
||||
execute_query(conn, (char *) "USE test");
|
||||
test.tprintf("Check that the temporary table has one row");
|
||||
test.set_timeout(90);
|
||||
|
||||
Test->tprintf("creating table t1\n");
|
||||
Test->set_timeout(40);
|
||||
create_t1(conn);
|
||||
test.add_result(execute_select_query_and_check(test.conn_rwsplit, "SELECT * FROM t1", 1),
|
||||
"Current connection should show one row");
|
||||
test.add_result(execute_select_query_and_check(test.conn_master, "SELECT * FROM t1", 2),
|
||||
"New connection should show two rows");
|
||||
test.add_result(execute_select_query_and_check(test.conn_slave, "SELECT * FROM t1", 2),
|
||||
"New connection should show two rows");
|
||||
|
||||
Test->tprintf("Inserting two rows into t1\n");
|
||||
Test->set_timeout(40);
|
||||
execute_query(conn, "INSERT INTO t1 (x1, fl) VALUES(0, 1);");
|
||||
execute_query(conn, "INSERT INTO t1 (x1, fl) VALUES(1, 1);");
|
||||
printf("Drop temporary table and check that the real table has two rows");
|
||||
test.set_timeout(90);
|
||||
|
||||
Test->tprintf("Creating temporal table t1\n");
|
||||
execute_query(conn, "create temporary table t1 as (SELECT * FROM t1 WHERE fl=3);");
|
||||
execute_query(test.conn_rwsplit, "DROP TABLE t1");
|
||||
test.add_result(execute_select_query_and_check(test.conn_rwsplit, "SELECT * FROM t1", 2),
|
||||
"check failed");
|
||||
test.add_result(execute_select_query_and_check(test.conn_master, "SELECT * FROM t1", 2),
|
||||
"check failed");
|
||||
test.add_result(execute_select_query_and_check(test.conn_slave, "SELECT * FROM t1", 2),
|
||||
"check failed");
|
||||
|
||||
Test->tprintf("Inserting one row into temporal table\n");
|
||||
execute_query(conn, "INSERT INTO t1 (x1, fl) VALUES(0, 1);");
|
||||
test.close_maxscale_connections();
|
||||
|
||||
Test->tprintf("Checking t1 temporal table\n");
|
||||
Test->set_timeout(240);
|
||||
Test->add_result(execute_select_query_and_check(conn, (char *) "SELECT * FROM t1;", 1), "check failed\n");
|
||||
|
||||
|
||||
Test->tprintf("Connecting to all MaxScale routers and checking main t1 table (not temporal)\n");
|
||||
Test->set_timeout(240);
|
||||
Test->add_result(Test->connect_maxscale(), "Connectiong to Maxscale failed\n");
|
||||
Test->tprintf("Checking t1 table using RWSplit router\n");
|
||||
Test->set_timeout(240);
|
||||
Test->add_result(execute_select_query_and_check(Test->conn_rwsplit, (char *) "SELECT * FROM t1;", 2),
|
||||
"check failed\n");
|
||||
Test->tprintf("Checking t1 table using ReadConn router in master mode\n");
|
||||
Test->set_timeout(240);
|
||||
Test->add_result(execute_select_query_and_check(Test->conn_master, (char *) "SELECT * FROM t1;", 2),
|
||||
"check failed\n");
|
||||
Test->tprintf("Checking t1 table using ReadConn router in slave mode\n");
|
||||
Test->set_timeout(240);
|
||||
Test->add_result(execute_select_query_and_check(Test->conn_slave, (char *) "SELECT * FROM t1;", 2),
|
||||
"check failed\n");
|
||||
Test->close_maxscale_connections();
|
||||
|
||||
|
||||
printf("Dropping temparal table and check main table again\n");
|
||||
execute_query(conn, "DROP TABLE t1;");
|
||||
|
||||
printf("Connecting to all MaxScale routers and checking main t1 table (not temporal)\n");
|
||||
Test->add_result(Test->connect_maxscale(), "Connectiong to Maxscale failed\n");
|
||||
Test->tprintf("Checking t1 table using RWSplit router\n");
|
||||
Test->set_timeout(240);
|
||||
Test->add_result(execute_select_query_and_check(Test->conn_rwsplit, (char *) "SELECT * FROM t1;", 2),
|
||||
"check failed\n");
|
||||
Test->tprintf("Checking t1 table using ReadConn router in master mode\n");
|
||||
Test->set_timeout(240);
|
||||
Test->add_result(execute_select_query_and_check(Test->conn_master, (char *) "SELECT * FROM t1;", 2),
|
||||
"check failed\n");
|
||||
Test->tprintf("Checking t1 table using ReadConn router in slave mode\n");
|
||||
Test->set_timeout(240);
|
||||
Test->add_result(execute_select_query_and_check(Test->conn_slave, (char *) "SELECT * FROM t1;", 2),
|
||||
"check failed\n");
|
||||
Test->close_maxscale_connections();
|
||||
|
||||
mysql_close(conn);
|
||||
|
||||
int rval = Test->global_result;
|
||||
delete Test;
|
||||
return rval;
|
||||
return test.global_result;
|
||||
}
|
||||
|
@ -20,7 +20,10 @@ import selectors
|
||||
import binascii
|
||||
import os
|
||||
|
||||
schema_read = False
|
||||
|
||||
def read_data():
|
||||
global schema_read
|
||||
sel = selectors.DefaultSelector()
|
||||
sel.register(sock, selectors.EVENT_READ)
|
||||
|
||||
@ -29,8 +32,17 @@ def read_data():
|
||||
events = sel.select(timeout=int(opts.read_timeout) if int(opts.read_timeout) > 0 else None)
|
||||
buf = sock.recv(4096, socket.MSG_DONTWAIT)
|
||||
if len(buf) > 0:
|
||||
# If the request for data is rejected, an error will be sent instead of the table schema
|
||||
if not schema_read:
|
||||
if "err" in buf.decode().lower():
|
||||
print(buf.decode(), file=sys.stderr)
|
||||
exit(1)
|
||||
else:
|
||||
schema_read = True
|
||||
|
||||
os.write(sys.stdout.fileno(), buf)
|
||||
sys.stdout.flush()
|
||||
|
||||
else:
|
||||
raise Exception('Socket was closed')
|
||||
|
||||
@ -40,6 +52,13 @@ def read_data():
|
||||
print(ex, file=sys.stderr)
|
||||
break
|
||||
|
||||
|
||||
def check_for_err(err):
|
||||
if "err" in err.lower().strip():
|
||||
print(err.strip(), file=sys.stderr)
|
||||
exit(1)
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(description = "CDC Binary consumer", conflict_handler="resolve")
|
||||
parser.add_argument("-h", "--host", dest="host", help="Network address where the connection is made", default="localhost")
|
||||
parser.add_argument("-P", "--port", dest="port", help="Port where the connection is made", default="4001")
|
||||
@ -60,13 +79,17 @@ auth_string += bytes(hashlib.sha1(opts.password.encode("utf_8")).hexdigest().enc
|
||||
sock.send(auth_string)
|
||||
|
||||
# Discard the response
|
||||
response = str(sock.recv(1024)).encode('utf_8')
|
||||
response = sock.recv(1024).decode()
|
||||
|
||||
check_for_err(response)
|
||||
|
||||
# Register as a client as request Avro format data
|
||||
sock.send(bytes(("REGISTER UUID=XXX-YYY_YYY, TYPE=" + opts.format).encode()))
|
||||
|
||||
# Discard the response again
|
||||
response = str(sock.recv(1024)).encode('utf_8')
|
||||
response = sock.recv(1024).decode()
|
||||
|
||||
check_for_err(response)
|
||||
|
||||
# Request a data stream
|
||||
sock.send(bytes(("REQUEST-DATA " + opts.FILE + (" " + opts.GTID if opts.GTID else "")).encode()))
|
||||
|
@ -575,9 +575,12 @@ int extract_type_length(const char* ptr, char *dest)
|
||||
}
|
||||
|
||||
/** Store type */
|
||||
int typelen = ptr - start;
|
||||
memcpy(dest, start, typelen);
|
||||
dest[typelen] = '\0';
|
||||
for (const char* c = start; c < ptr; c++)
|
||||
{
|
||||
*dest++ = tolower(*c);
|
||||
}
|
||||
|
||||
*dest++ = '\0';
|
||||
|
||||
/** Skip whitespace */
|
||||
while (*ptr && isspace(*ptr))
|
||||
@ -880,7 +883,7 @@ void read_alter_identifier(const char *sql, const char *end, char *dest, int siz
|
||||
|
||||
void make_avro_token(char* dest, const char* src, int length)
|
||||
{
|
||||
while (*src == '(' || *src == ')' || *src == '`' || isspace(*src))
|
||||
while (length > 0 && (*src == '(' || *src == ')' || *src == '`' || isspace(*src)))
|
||||
{
|
||||
src++;
|
||||
length--;
|
||||
@ -902,16 +905,17 @@ void make_avro_token(char* dest, const char* src, int length)
|
||||
fix_reserved_word(dest);
|
||||
}
|
||||
|
||||
int get_column_index(TABLE_CREATE *create, const char *tok)
|
||||
int get_column_index(TABLE_CREATE *create, const char *tok, int len)
|
||||
{
|
||||
int idx = -1;
|
||||
char safe_tok[strlen(tok) + 2];
|
||||
strcpy(safe_tok, tok);
|
||||
char safe_tok[len + 2];
|
||||
memcpy(safe_tok, tok, len);
|
||||
safe_tok[len] = '\0';
|
||||
fix_reserved_word(safe_tok);
|
||||
|
||||
for (int x = 0; x < create->columns; x++)
|
||||
{
|
||||
if (strcasecmp(create->column_names[x], tok) == 0)
|
||||
if (strcasecmp(create->column_names[x], safe_tok) == 0)
|
||||
{
|
||||
idx = x;
|
||||
break;
|
||||
@ -950,18 +954,17 @@ bool table_create_alter(TABLE_CREATE *create, const char *sql, const char *end)
|
||||
{
|
||||
tok = get_tok(tok + len, &len, end);
|
||||
|
||||
char ** tmp = MXS_REALLOC(create->column_names, sizeof(char*) * create->columns + 1);
|
||||
ss_dassert(tmp);
|
||||
create->column_names = MXS_REALLOC(create->column_names, sizeof(char*) * create->columns + 1);
|
||||
create->column_types = MXS_REALLOC(create->column_types, sizeof(char*) * create->columns + 1);
|
||||
create->column_lengths = MXS_REALLOC(create->column_lengths, sizeof(int) * create->columns + 1);
|
||||
|
||||
if (tmp == NULL)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
create->column_names = tmp;
|
||||
char avro_token[len + 1];
|
||||
make_avro_token(avro_token, tok, len);
|
||||
char field_type[200] = ""; // Enough to hold all types
|
||||
int field_length = extract_type_length(tok + len, field_type);
|
||||
create->column_names[create->columns] = MXS_STRDUP_A(avro_token);
|
||||
create->column_types[create->columns] = MXS_STRDUP_A(field_type);
|
||||
create->column_lengths[create->columns] = field_length;
|
||||
create->columns++;
|
||||
updates++;
|
||||
tok = get_next_def(tok, end);
|
||||
@ -971,25 +974,22 @@ bool table_create_alter(TABLE_CREATE *create, const char *sql, const char *end)
|
||||
{
|
||||
tok = get_tok(tok + len, &len, end);
|
||||
|
||||
int idx = get_column_index(create, tok);
|
||||
int idx = get_column_index(create, tok, len);
|
||||
|
||||
if (idx != -1)
|
||||
{
|
||||
MXS_FREE(create->column_names[idx]);
|
||||
MXS_FREE(create->column_types[idx]);
|
||||
for (int i = idx; i < (int)create->columns - 1; i++)
|
||||
{
|
||||
create->column_names[i] = create->column_names[i + 1];
|
||||
create->column_types[i] = create->column_types[i + 1];
|
||||
create->column_lengths[i] = create->column_lengths[i + 1];
|
||||
}
|
||||
|
||||
char ** tmp = realloc(create->column_names, sizeof(char*) * create->columns - 1);
|
||||
ss_dassert(tmp);
|
||||
|
||||
if (tmp == NULL)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
create->column_names = tmp;
|
||||
create->column_names = MXS_REALLOC(create->column_names, sizeof(char*) * create->columns - 1);
|
||||
create->column_types = MXS_REALLOC(create->column_types, sizeof(char*) * create->columns - 1);
|
||||
create->column_lengths = MXS_REALLOC(create->column_lengths, sizeof(int) * create->columns - 1);
|
||||
create->columns--;
|
||||
updates++;
|
||||
}
|
||||
@ -1001,12 +1001,19 @@ bool table_create_alter(TABLE_CREATE *create, const char *sql, const char *end)
|
||||
{
|
||||
tok = get_tok(tok + len, &len, end);
|
||||
|
||||
int idx = get_column_index(create, tok);
|
||||
int idx = get_column_index(create, tok, len);
|
||||
|
||||
if (idx != -1)
|
||||
if (idx != -1 && (tok = get_tok(tok + len, &len, end)))
|
||||
{
|
||||
MXS_FREE(create->column_names[idx]);
|
||||
create->column_names[idx] = strndup(tok, len);
|
||||
MXS_FREE(create->column_types[idx]);
|
||||
char avro_token[len + 1];
|
||||
make_avro_token(avro_token, tok, len);
|
||||
char field_type[200] = ""; // Enough to hold all types
|
||||
int field_length = extract_type_length(tok + len, field_type);
|
||||
create->column_names[idx] = MXS_STRDUP_A(avro_token);
|
||||
create->column_types[idx] = MXS_STRDUP_A(field_type);
|
||||
create->column_lengths[idx] = field_length;
|
||||
updates++;
|
||||
}
|
||||
|
||||
@ -1021,7 +1028,7 @@ bool table_create_alter(TABLE_CREATE *create, const char *sql, const char *end)
|
||||
}
|
||||
|
||||
/** Only increment the create version if it has an associated .avro
|
||||
* file. The .avro file is only created if it is acutally used. */
|
||||
* file. The .avro file is only created if it is actually used. */
|
||||
if (updates > 0 && create->was_used)
|
||||
{
|
||||
create->version++;
|
||||
|
Loading…
x
Reference in New Issue
Block a user