Merge commit '09cb4a885f88d30b5108d215dcdaa5163229a230' into develop
This commit is contained in:
commit
2aa3515fc8
@ -66,7 +66,7 @@ rules=/home/user/rules.txt
|
||||
|
||||
[Firewalled-Routing-Service]
|
||||
type=service
|
||||
router=readconnrouter
|
||||
router=readconnroute
|
||||
servers=server1
|
||||
user=myuser
|
||||
password=mypasswd
|
||||
|
@ -186,7 +186,7 @@ filebase=/var/logs/qla/SelectProducts
|
||||
|
||||
[Product-Service]
|
||||
type=service
|
||||
router=readconnrouter
|
||||
router=readconnroute
|
||||
servers=server1
|
||||
user=myuser
|
||||
password=mypasswd
|
||||
|
@ -41,7 +41,7 @@ logging_source_user=maxtest
|
||||
|
||||
[RabbitMQ-Service]
|
||||
type=service
|
||||
router=readconnrouter
|
||||
router=readconnroute
|
||||
servers=server1
|
||||
user=myuser
|
||||
password=mypasswd
|
||||
|
@ -21,7 +21,7 @@ replace=replacement string
|
||||
|
||||
[MyService]
|
||||
type=service
|
||||
router=readconnrouter
|
||||
router=readconnroute
|
||||
servers=server1
|
||||
user=myuser
|
||||
password=mypasswd
|
||||
@ -107,7 +107,7 @@ replace=ENGINE=
|
||||
|
||||
[MyService]
|
||||
type=service
|
||||
router=readconnrouter
|
||||
router=readconnroute
|
||||
servers=server1
|
||||
user=myuser
|
||||
password=mypasswd
|
||||
|
@ -24,7 +24,7 @@ service=DataMart
|
||||
|
||||
[Data-Service]
|
||||
type=service
|
||||
router=readconnrouter
|
||||
router=readconnroute
|
||||
servers=server1
|
||||
user=myuser
|
||||
password=mypasswd
|
||||
|
@ -37,7 +37,7 @@ module=topfilter
|
||||
|
||||
[Service]
|
||||
type=service
|
||||
router=readconnrouter
|
||||
router=readconnroute
|
||||
servers=server1
|
||||
user=myuser
|
||||
password=mypasswd
|
||||
@ -196,7 +196,7 @@ In the service definition add both filters
|
||||
```
|
||||
[App-Service]
|
||||
type=service
|
||||
router=readconnrouter
|
||||
router=readconnroute
|
||||
servers=server1
|
||||
user=myuser
|
||||
password=mypasswd
|
||||
|
@ -21,7 +21,7 @@ module=tpmfilter
|
||||
|
||||
[MyService]
|
||||
type=service
|
||||
router=readconnrouter
|
||||
router=readconnroute
|
||||
servers=server1
|
||||
user=myuser
|
||||
password=mypasswd
|
||||
@ -130,7 +130,7 @@ named_pipe=/tmp/tpmfilter
|
||||
|
||||
[Product-Service]
|
||||
type=service
|
||||
router=readconnrouter
|
||||
router=readconnroute
|
||||
servers=server1
|
||||
user=myuser
|
||||
password=mypasswd
|
||||
|
@ -167,6 +167,7 @@ runtime and can only be defined in a configuration file:
|
||||
* `sql_mode`
|
||||
* `local_address`
|
||||
* `users_refresh_time`
|
||||
* `load_persisted_configs`
|
||||
* `admin_auth`
|
||||
* `admin_ssl_key`
|
||||
* `admin_ssl_cert`
|
||||
@ -901,6 +902,19 @@ it will only be disabled when the write queue is below `writeq_low_water`. The
|
||||
parameter accepts size type values. The minimum allowed size is 512
|
||||
bytes. `writeq_high_water` must always be greater than `writeq_low_water`.
|
||||
|
||||
#### `load_persisted_configs`
|
||||
|
||||
Load persisted runtime changes on startup. This parameter accepts boolean values
|
||||
and is enabled by default. This parameter was added in MaxScale 2.3.6.
|
||||
|
||||
All runtime configuration changes are persisted in generated configuration files
|
||||
located by default in `/var/lib/maxscale/maxscale.cnf.d/` and are loaded on
|
||||
startup after main configuration files have been read. To make runtime
|
||||
configurations volatile (i.e. they are lost when maxscale is restarted), use
|
||||
`load_persisted_configs=false`. All changes are still persisted since it stores
|
||||
the current runtime state of MaxScale. This makes problem analysis easier if an
|
||||
unexpected outage happens.
|
||||
|
||||
### REST API Configuration
|
||||
|
||||
The MaxScale REST API is an HTTP interface that provides JSON format data
|
||||
|
@ -142,7 +142,7 @@ protocol=MariaDBBackend
|
||||
|
||||
[Cassandra]
|
||||
type=service
|
||||
router=readconnrouter
|
||||
router=readconnroute
|
||||
router_options=running
|
||||
servers=CassandraDB
|
||||
user=maxscale
|
||||
|
@ -129,6 +129,7 @@ extern const char CN_HITS[];
|
||||
extern const char CN_ID[];
|
||||
extern const char CN_INET[];
|
||||
extern const char CN_LINKS[];
|
||||
extern const char CN_LOAD_PERSISTED_CONFIGS[];
|
||||
extern const char CN_LISTENER[];
|
||||
extern const char CN_LISTENERS[];
|
||||
extern const char CN_LOCALHOST_MATCH_WILDCARD_HOST[];
|
||||
@ -510,6 +511,7 @@ struct MXS_CONFIG
|
||||
char peer_user[MAX_ADMIN_HOST_LEN]; /**< Username for maxscale-to-maxscale traffic */
|
||||
char peer_password[MAX_ADMIN_HOST_LEN]; /**< Password for maxscale-to-maxscale traffic */
|
||||
mxb_log_target_t log_target; /**< Log type */
|
||||
bool load_persisted_configs; /**< Load persisted configuration files on startup */
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -28,6 +28,7 @@ add_library(testcore SHARED testconnections.cpp nodes.cpp mariadb_nodes.cpp maxs
|
||||
sql_t1.cpp test_binlog_fnc.cpp get_my_ip.cpp big_load.cpp get_com_select_insert.cpp
|
||||
different_size.cpp fw_copy_rules maxinfo_func.cpp config_operations.cpp rds_vpc.cpp execute_cmd.cpp
|
||||
blob_test.cpp keepalived_func.cpp tcp_connection.cpp base/stopwatch.cpp fw_copy_rules.cpp
|
||||
labels_table.cpp envv.cpp
|
||||
# Include the CDC connector in the core library
|
||||
${CMAKE_SOURCE_DIR}/connectors/cdc-connector/cdc_connector.cpp)
|
||||
target_link_libraries(testcore ${MARIADB_CONNECTOR_LIBRARIES} ${JANSSON_LIBRARIES} z m pthread ssl dl rt crypto crypt maxbase)
|
||||
@ -38,21 +39,21 @@ add_dependencies(testcore connector-c jansson maxbase)
|
||||
include_directories(${CMAKE_SOURCE_DIR}/../connectors/cdc-connector/)
|
||||
|
||||
# Tool used to check backend state
|
||||
add_test_executable_notest(check_backend.cpp check_backend check_backend LABELS CONFIG)
|
||||
add_test_executable_notest(check_backend.cpp check_backend check_backend LABELS CONFIG REPL_BACKEND GALERA_BACKEND TWO_MAXSCALES)
|
||||
|
||||
# Configuration tests
|
||||
add_template(bug359 bug359)
|
||||
add_template(bug495 bug495)
|
||||
add_template(bug526 bug526)
|
||||
add_template(bug479 bug479)
|
||||
add_template(bug493 bug493)
|
||||
add_template(bug643_1 bug643_1)
|
||||
add_template(mxs652_bad_ssl bad_ssl)
|
||||
add_template(mxs710_bad_socket mxs710_bad_socket)
|
||||
add_template(mxs710_bad_socket mxs711_two_ports)
|
||||
add_template(mxs720_line_with_no_equal mxs720_line_with_no_equal)
|
||||
add_template(mxs720_wierd_line mxs720_wierd_line)
|
||||
add_template(mxs710_bad_socket mxs799)
|
||||
add_template_manual(bug359 bug359)
|
||||
add_template_manual(bug495 bug495)
|
||||
add_template_manual(bug526 bug526)
|
||||
add_template_manual(bug479 bug479)
|
||||
add_template_manual(bug493 bug493)
|
||||
add_template_manual(bug643_1 bug643_1)
|
||||
add_template_manual(mxs652_bad_ssl bad_ssl)
|
||||
add_template_manual(mxs710_bad_socket mxs710_bad_socket)
|
||||
add_template_manual(mxs710_bad_socket mxs711_two_ports)
|
||||
add_template_manual(mxs720_line_with_no_equal mxs720_line_with_no_equal)
|
||||
add_template_manual(mxs720_wierd_line mxs720_wierd_line)
|
||||
add_template_manual(mxs710_bad_socket mxs799)
|
||||
add_test_executable(config_test.cpp config_test replication LABELS CONFIG)
|
||||
|
||||
add_subdirectory(cdc_datatypes)
|
||||
@ -151,7 +152,7 @@ add_test_executable(bug658.cpp bug658 replication LABELS readwritesplit readconn
|
||||
add_test_executable(bug662.cpp bug662 bug662 LABELS readwritesplit readconnroute maxscale REPL_BACKEND)
|
||||
|
||||
# Bad TEE filter configuration
|
||||
add_test_executable(bug664.cpp bug664 bug664 LABELS MySQLAuth MySQLProtocol)
|
||||
add_test_executable(bug664.cpp bug664 bug664 LABELS MySQLAuth MySQLProtocol REPL_BACKEND)
|
||||
|
||||
# Regression case for the bug "MaxScale crashes if "Users table data" is empty and "show dbusers" is executed in maxadmin"
|
||||
add_test_executable(bug673.cpp bug673 bug673 LABELS MySQLAuth REPL_BACKEND)
|
||||
@ -340,9 +341,6 @@ add_test_executable(mxs244_prepared_stmt_loop.cpp mxs244_prepared_stmt_loop gale
|
||||
# Tries prepared stmt 'SELECT 1,1,1,1...." with different nu,ber of '1'
|
||||
add_test_executable(mxs314.cpp mxs314 galera LABELS MySQLProtocol readwritesplit LIGHT GALERA_BACKEND)
|
||||
|
||||
# Crash with Galera and backend restart when persistant cfonnections are in use
|
||||
add_test_derived(mxs361 pers_02 mxs361 mxs361 LABELS maxscale GALERA_BACKEND)
|
||||
|
||||
# Playing with blocking and unblocking nodes under INSERT load
|
||||
add_test_executable(mxs564_big_dump.cpp mxs564_big_dump galera_mxs564 LABELS readwritesplit readconnroute GALERA_BACKEND)
|
||||
|
||||
@ -396,7 +394,7 @@ add_test_executable(mxs1583_fwf.cpp mxs1583_fwf mxs1583_fwf LABELS dbfwfilter RE
|
||||
add_test_executable(kill_master.cpp kill_master replication LABELS readwritesplit LIGHT REPL_BACKEND)
|
||||
|
||||
# Test insertstream filter
|
||||
add_test_script(insertstream insertstream.sh insertstream LABELS insertstream REPL_BACKEND)
|
||||
add_test_script(insertstream.sh insertstream.sh insertstream LABELS insertstream REPL_BACKEND)
|
||||
|
||||
# Check load balancing
|
||||
add_test_executable(load_balancing.cpp load_balancing load LABELS readwritesplit LIGHT REPL_BACKEND)
|
||||
@ -408,7 +406,7 @@ add_test_derived(load_balancing_pers10 load_balancing load_pers10 LABELS readwri
|
||||
add_test_executable(longblob.cpp longblob longblob LABELS readwritesplit readconnroute UNSTABLE HEAVY REPL_BACKEND)
|
||||
|
||||
# Test with extremely big blob inserting/selecting with > 16 mb data blocks
|
||||
add_test_executable(mxs1110_16mb.cpp mxs1110_16mb longblob_filters LABELS readwritesplit readconnroute HEAVY REPL_BACKEND)
|
||||
add_test_executable(mxs1110_16mb.cpp mxs1110_16mb longblob_filters LABELS readwritesplit readconnroute HEAVY REPL_BACKEND GALERA_BACKEND)
|
||||
|
||||
# Schemarouter implicit database detection
|
||||
add_test_executable(mxs1310_implicit_db.cpp mxs1310_implicit_db mxs1310_implicit_db LABELS schemarouter REPL_BACKEND)
|
||||
@ -418,7 +416,10 @@ add_test_executable(mxs1323_retry_read.cpp mxs1323_retry_read mxs1323 LABELS rea
|
||||
add_test_executable(mxs1323_stress.cpp mxs1323_stress mxs1323 LABELS readwritesplit REPL_BACKEND)
|
||||
|
||||
# A set of MariaDB server tests executed against Maxscale RWSplit
|
||||
add_test_script(mariadb_tests_hartmut mariadb_tests_hartmut.sh replication LABELS readwritesplit REPL_BACKEND)
|
||||
add_test_script(mariadb_tests_hartmut.sh mariadb_tests_hartmut.sh replication LABELS readwritesplit REPL_BACKEND)
|
||||
|
||||
# A set of MariaDB server tests executed against Maxscale RWSplit (Galera backend)
|
||||
add_test_script(mariadb_tests_hartmut_galera.sh mariadb_tests_hartmut.sh galera_hartmut LABELS readwritesplit GALERA_BACKEND)
|
||||
|
||||
# Creates a number of connections > max_connections setting
|
||||
add_test_executable(max_connections.cpp max_connections replication LABELS MySQLAuth MySQLProtocol UNSTABLE HEAVY REPL_BACKEND)
|
||||
@ -433,7 +434,7 @@ add_test_script(maxinfo.py maxinfo.py maxinfo LABELS maxinfo LIGHT REPL_BACKEND)
|
||||
add_test_executable(maxscale_process_user.cpp maxscale_process_user replication LABELS maxscale LIGHT REPL_BACKEND)
|
||||
|
||||
# Test of multi master monitor
|
||||
add_test_executable(mm.cpp mm mm LABELS mysqlmon BREAKS_REPL)
|
||||
add_test_executable(mm.cpp mm mm LABELS mysqlmon BREAKS_REPL REPL_BACKEND)
|
||||
|
||||
# Regression case for the bug "Two monitors loaded at the same time result into not working installation"
|
||||
add_test_executable(mxs118.cpp mxs118 mxs118 LABELS maxscale LIGHT REPL_BACKEND)
|
||||
@ -457,6 +458,9 @@ add_test_executable(binary_ps_cursor.cpp binary_ps_cursor replication LABELS rea
|
||||
# Creates and closes a lot of connections, checks that 'maxadmin list servers' shows 0 connections at the end
|
||||
add_test_executable(mxs321.cpp mxs321 replication LABELS maxscale readwritesplit REPL_BACKEND)
|
||||
|
||||
# Crash with Galera and backend restart when persistant cfonnections are in use
|
||||
add_test_derived(mxs361 pers_02 mxs361 mxs361 LABELS maxscale REPL_BACKEND GALERA_BACKEND)
|
||||
|
||||
# Load huge file with 'LOAD DATA LOCAL INFILE'
|
||||
add_test_executable(mxs365.cpp mxs365 replication LABELS readwritesplit REPL_BACKEND)
|
||||
|
||||
@ -497,6 +501,9 @@ add_test_executable(mxs729_maxadmin.cpp mxs729_maxadmin replication LABELS MaxAd
|
||||
# Simple connect test in bash, checks that defined in cmd line DB is selected
|
||||
add_test_script(mxs791.sh mxs791.sh replication LABELS UNSTABLE HEAVY REPL_BACKEND)
|
||||
|
||||
# Simple connect test in bash, checks that defined in cmd line DB is selected (Galera backend)
|
||||
add_test_script(mxs791_galera.sh mxs791.sh galera LABELS UNSTABLE HEAVY GALERA_BACKEND)
|
||||
|
||||
# Checks "Current no. of conns" maxadmin output after long blob inserting
|
||||
add_test_executable(mxs812_1.cpp mxs812_1 longblob LABELS readwritesplit REPL_BACKEND)
|
||||
|
||||
@ -568,7 +575,7 @@ add_test_executable(cache_runtime_ttl.cpp cache_runtime_ttl cache_runtime_ttl LA
|
||||
add_test_executable(mxs951_utfmb4.cpp mxs951_utfmb4 replication LABELS REPL_BACKEND)
|
||||
|
||||
# Proxy protocol test
|
||||
add_test_executable(proxy_protocol.cpp proxy_protocol proxy_protocol LABELS MySQLAuth MySQLProtocol)
|
||||
add_test_executable(proxy_protocol.cpp proxy_protocol proxy_protocol LABELS MySQLAuth MySQLProtocol REPL_BACKEND)
|
||||
|
||||
# Regression case for the bug "Defunct processes after maxscale have executed script during failover"
|
||||
add_test_executable(mxs1045.cpp mxs1045 mxs1045 LABELS maxscale REPL_BACKEND)
|
||||
@ -625,9 +632,9 @@ add_test_executable(mxs1516.cpp mxs1516 replication LABELS readconnroute REPL_BA
|
||||
# https://jira.mariadb.org/browse/MXS-1549
|
||||
add_test_executable(mxs1549_optimistic_trx.cpp mxs1549_optimistic_trx mxs1549_optimistic_trx LABELS readwritesplit REPL_BACKEND)
|
||||
|
||||
# MXS-1585: Crash in MaxScale 2.1.12
|
||||
# https://jira.mariadb.org/browse/MXS-1585
|
||||
add_test_executable(mxs1585.cpp mxs1585 mxs1585 LABELS readwritesplit REPL_BACKEND)
|
||||
# MXS-1585: Crash in MaxScale 2.1.12
|
||||
# https://jira.mariadb.org/browse/MXS-1585
|
||||
add_test_executable(mxs1585.cpp mxs1585 mxs1585 LABELS GALERA_BACKEND)
|
||||
|
||||
# MXS-1643: Too many monitor events are triggered
|
||||
# https://jira.mariadb.org/browse/MXS-1643
|
||||
@ -718,7 +725,7 @@ add_test_executable(no_password.cpp no_password replication LABELS MySQLAuth LIG
|
||||
add_test_executable(open_close_connections.cpp open_close_connections replication LABELS maxscale readwritesplit REPL_BACKEND)
|
||||
|
||||
# Test with persistant connections configured and big number iof opened connections ,expect no crash
|
||||
add_test_executable(pers_02.cpp pers_02 pers_01 LABELS maxscale REPL_BACKEND readwritesplit)
|
||||
add_test_executable(pers_02.cpp pers_02 pers_01 LABELS maxscale GALERA_BACKEND REPL_BACKEND readwritesplit)
|
||||
|
||||
# Check if prepared statement works via Maxscale (via RWSplit)
|
||||
add_test_executable(prepared_statement.cpp prepared_statement replication LABELS readwritesplit LIGHT REPL_BACKEND)
|
||||
@ -760,13 +767,13 @@ add_test_executable(rwsplit_multi_stmt.cpp rwsplit_multi_stmt rwsplit_multi_stmt
|
||||
add_test_executable(schemarouter_duplicate.cpp schemarouter_duplicate schemarouter_duplicate LABELS schemarouter REPL_BACKEND)
|
||||
|
||||
# Test of external script execution
|
||||
add_test_executable(script.cpp script script LABELS maxscale REPL_BACKEND)
|
||||
add_test_executable(script.cpp script script LABELS maxscale REPL_BACKEND GALERA_BACKEND)
|
||||
|
||||
# Test 10.3 SEQUENCE objects
|
||||
add_test_executable(sequence.cpp sequence replication LABELS LIGHT)
|
||||
add_test_executable(sequence.cpp sequence replication LABELS LIGHT REPL_BACKEND)
|
||||
|
||||
# Test 10.1 compound statements with readwritesplit
|
||||
add_test_executable(compound_statement.cpp compound_statement replication LABELS readwritesplit LIGHT)
|
||||
# Test 10.1 compound statements
|
||||
add_test_executable(compound_statement.cpp compound_statement replication LABELS LIGHT REPL_BACKEND)
|
||||
|
||||
# Check if 'weightby' parameter works
|
||||
add_test_executable(server_weight.cpp server_weight server_weight LABELS readwritesplit readconnroute LIGHT REPL_BACKEND)
|
||||
@ -775,10 +782,10 @@ add_test_executable(server_weight.cpp server_weight server_weight LABELS readwri
|
||||
add_test_executable(session_limits.cpp session_limits session_limits LABELS readwritesplit REPL_BACKEND)
|
||||
|
||||
# Test of schema router
|
||||
add_test_executable(sharding.cpp sharding sharding LABELS schemarouter BREAKS_REPL)
|
||||
add_test_executable(sharding.cpp sharding sharding LABELS schemarouter BREAKS_REPL REPL_BACKEND)
|
||||
|
||||
# MXS-1160: LOAD DATA LOCAL INFILE with schemarouter
|
||||
add_test_executable(sharding_load_data.cpp sharding_load_data sharding LABELS schemarouter BREAKS_REPL)
|
||||
add_test_executable(sharding_load_data.cpp sharding_load_data sharding LABELS schemarouter BREAKS_REPL REPL_BACKEND)
|
||||
|
||||
# Do short sessions (open conn, short query, close conn) in the loop
|
||||
add_test_executable(short_sessions.cpp short_sessions replication LABELS readwritesplit readconnroute REPL_BACKEND)
|
||||
@ -787,7 +794,7 @@ add_test_executable(short_sessions.cpp short_sessions replication LABELS readwri
|
||||
add_test_derived(short_sessions_ssl short_sessions ssl LABELS readwritesplit readconnroute REPL_BACKEND)
|
||||
|
||||
# Regression case for crash if maxadmin 'show monitors' command is issued, but no monitor is not running
|
||||
add_test_executable(show_monitor_crash.cpp show_monitor_crash show_monitor_crash LABELS maxscale)
|
||||
add_test_executable(show_monitor_crash.cpp show_monitor_crash show_monitor_crash LABELS maxscale REPL_BACKEND)
|
||||
|
||||
# Check how Maxscale works in case of one slave failure, only one slave is configured
|
||||
add_test_executable(slave_failover.cpp slave_failover replication.one_slave LABELS readwritesplit REPL_BACKEND)
|
||||
@ -816,6 +823,10 @@ add_test_executable(temporal_tables.cpp temporal_tables replication LABELS readw
|
||||
# Test routing hints (mainly about how readwritesplit deals with them)
|
||||
add_test_executable(test_hints.cpp test_hints hints2 LABELS hintfilter readwritesplit LIGHT REPL_BACKEND)
|
||||
|
||||
# Run MaxCtrl test suite
|
||||
# add_test_executable(test_maxctrl.cpp test_maxctrl maxctrl LABELS REPL_BACKEND)
|
||||
|
||||
|
||||
# Creates KDC and tries authrization via GSSAPI (both client and backend)
|
||||
# works only with yum-based distributions
|
||||
# TODO: make it working with zypper and apt, move part of KDC setup to MDBCI
|
||||
@ -886,7 +897,7 @@ set_tests_properties(mxs1958_insert_priv PROPERTIES WILL_FAIL TRUE)
|
||||
|
||||
# MXS-1849: Table family sharding router
|
||||
# https://jira.mariadb.org/browse/MXS-1849
|
||||
add_test_executable(mxs1849_table_sharding.cpp mxs1849_table_sharding mxs1849_table_sharding LABELS schemarouter BREAKS_REPL)
|
||||
add_test_executable(mxs1849_table_sharding.cpp mxs1849_table_sharding mxs1849_table_sharding LABELS schemarouter BREAKS_REPL REPL_BACKEND)
|
||||
|
||||
# MXS-1985: MaxScale hangs on concurrent KILL processing
|
||||
# https://jira.mariadb.org/browse/MXS-1985
|
||||
@ -894,7 +905,7 @@ add_test_executable(mxs1985_kill_hang.cpp mxs1985_kill_hang replication LABELS R
|
||||
|
||||
# MXS-1113: Support of prepared statement for schemarouter
|
||||
# https://jira.mariadb.org/browse/MXS-1113
|
||||
add_test_executable(mxs1113_schemarouter_ps.cpp mxs1113_schemarouter_ps mxs1113_schemarouter_ps LABELS schemarouter BREAKS_REPL)
|
||||
add_test_executable(mxs1113_schemarouter_ps.cpp mxs1113_schemarouter_ps mxs1113_schemarouter_ps LABELS schemarouter BREAKS_REPL REPL_BACKEND)
|
||||
|
||||
# MXS-2037: Wildcards not working with source in Named Server Filter
|
||||
# https://jira.mariadb.org/browse/MXS-2037
|
||||
@ -933,6 +944,73 @@ add_test_executable(mxs2313_rank.cpp mxs2313_rank mxs2313_rank LABELS readwrites
|
||||
# BEGIN: binlogrouter and avrorouter tests #
|
||||
############################################
|
||||
|
||||
# Binlogrouter tests, these heavily alter the replication so they are run last
|
||||
add_test_executable(avro.cpp avro avro LABELS avrorouter binlogrouter LIGHT BREAKS_REPL REPL_BACKEND)
|
||||
add_test_executable(avro_alter.cpp avro_alter avro LABELS avrorouter binlogrouter LIGHT BREAKS_REPL REPL_BACKEND)
|
||||
|
||||
# Test avrorouter file compression
|
||||
#add_test_script(avro_compression avro avro_compression LABELS avrorouter binlogrouter LIGHT BREAKS_REPL REPL_BACKEND)
|
||||
|
||||
# In the binlog router setup stop Master and promote one of the Slaves to be new Master
|
||||
add_test_executable(binlog_change_master.cpp binlog_change_master setup_binlog_tx_safe LABELS binlogrouter BREAKS_REPL REPL_BACKEND)
|
||||
|
||||
# trying to start binlog setup with incomplete Maxscale.cnf
|
||||
add_test_executable(binlog_incompl.cpp binlog_incompl binlog_incompl LABELS binlogrouter BREAKS_REPL REPL_BACKEND)
|
||||
|
||||
# configure binlog router setup, execute queries and transactions, check data; install semysync plugin, router options semisync=1,transaction_safety=1
|
||||
add_test_executable(binlog_semisync.cpp binlog_semisync setup_binlog_semisync LABELS binlogrouter HEAVY BREAKS_REPL REPL_BACKEND)
|
||||
|
||||
# configure binlog router setup, execute queries and transactions, check data; install semysync plugin, router options semisync=0,transaction_safety=0
|
||||
add_test_derived(binlog_semisync_txs0_ss0 binlog_semisync setup_binlog_semisync_txs0_ss0 LABELS binlogrouter HEAVY BREAKS_REPL REPL_BACKEND)
|
||||
|
||||
# configure binlog router setup, execute queries and transactions, check data; install semysync plugin, router options semisync=0,transaction_safety=1
|
||||
add_test_derived(binlog_semisync_txs0_ss1 binlog_semisync setup_binlog_semisync_txs0_ss1 LABELS binlogrouter HEAVY BREAKS_REPL REPL_BACKEND)
|
||||
|
||||
# configure binlog router setup, execute queries and transactions, check data; install semysync plugin, router options semisync=1,transaction_safety=0
|
||||
add_test_derived(binlog_semisync_txs1_ss0 binlog_semisync setup_binlog_semisync_txs1_ss0 LABELS binlogrouter HEAVY BREAKS_REPL REPL_BACKEND)
|
||||
|
||||
set_tests_properties(binlog_semisync PROPERTIES TIMEOUT 3600)
|
||||
set_tests_properties(binlog_semisync_txs0_ss0 PROPERTIES TIMEOUT 3600)
|
||||
set_tests_properties(binlog_semisync_txs0_ss1 PROPERTIES TIMEOUT 3600)
|
||||
set_tests_properties(binlog_semisync_txs1_ss0 PROPERTIES TIMEOUT 3600)
|
||||
|
||||
#
|
||||
# The encryption tests don't work as they require the file key management plugin
|
||||
#
|
||||
# Binlog encription test (aes_cbr encryption)
|
||||
#add_test_executable(mxs1073_binlog_enc.cpp mxs1073_binlog_enc binlog_enc_aes_cbc LABELS binlogrouter 10.1 BREAKS_REPL REPL_BACKEND)
|
||||
# Binlog encription test (aes_ctr encryption)
|
||||
#add_test_script(mxs1073_binlog_enc_aes_ctr mxs1073_binlog_enc binlog_enc_aes_ctr LABELS binlogrouter 10.1 BREAKS_REPL REPL_BACKEND)
|
||||
|
||||
# Test of CDC protocol (avro listener)
|
||||
add_test_executable(cdc_client.cpp cdc_client avro LABELS avrorouter binlogrouter BREAKS_REPL REPL_BACKEND)
|
||||
|
||||
# Tries INSERTs with size close to 0x0ffffff * N (with binlog backend)
|
||||
add_test_executable(different_size_binlog.cpp different_size_binlog setup_binlog LABELS binlogrouter HEAVY BREAKS_REPL REPL_BACKEND)
|
||||
|
||||
# Try to configure binlog router to use wrong password for Master and check 'slave status' on binlog
|
||||
add_test_executable(mxs781_binlog_wrong_passwrd.cpp mxs781_binlog_wrong_passwrd setup_binlog LABELS binlogrouter BREAKS_REPL REPL_BACKEND)
|
||||
|
||||
# Regression case for crash if long host name is used for binlog router (in 'change master to ...')
|
||||
add_test_executable(mxs813_long_hostname.cpp mxs813_long_hostname setup_binlog LABELS binlogrouter BREAKS_REPL REPL_BACKEND)
|
||||
|
||||
# Test that masking filter can handle multi-statements.
|
||||
add_test_executable(mxs1719.cpp mxs1719 mxs1719 LABELS masking REPL_BACKEND)
|
||||
|
||||
# configure binlog router setup, execute queries and transactions, check data;
|
||||
add_test_executable(setup_binlog.cpp setup_binlog setup_binlog LABELS binlogrouter BREAKS_REPL REPL_BACKEND)
|
||||
|
||||
# configure binlog router setup, execute queries and transactions, check data;
|
||||
add_test_executable(setup_binlog_gtid.cpp setup_binlog_gtid setup_binlog_gtid LABELS binlogrouter BREAKS_REPL REPL_BACKEND)
|
||||
|
||||
# configure binlog router setup, execute queries and transactions, check data; install semysync plugin, backends started with --binlog-checksum=CRC32 option
|
||||
# disabled because it is included into setup_binlog test, separate test was created for debugging
|
||||
# add_test_executable(setup_binlog_crc_32.cpp setup_binlog_crc_32 setup_binlog LABELS binlogrouter BREAKS_REPL REPL_BACKEND)
|
||||
|
||||
# configure binlog router setup, execute queries and transactions, check data; install semysync plugin, backends started with --binlog-checksum=NONE option
|
||||
# disabled because it is included into setup_binlog test, separate test was created for debugging
|
||||
# add_test_executable(setup_binlog_crc_none.cpp setup_binlog_crc_none setup_binlog LABELS binlogrouter LIGHT BREAKS_REPL REPL_BACKEND)
|
||||
|
||||
# MXS-1542: Check that UTF16 strings work
|
||||
# https://jira.mariadb.org/browse/MXS-1542
|
||||
add_test_executable(mxs1542.cpp mxs1542 avro LABELS avrorouter REPL_BACKEND)
|
||||
@ -941,56 +1019,8 @@ add_test_executable(mxs1542.cpp mxs1542 avro LABELS avrorouter REPL_BACKEND)
|
||||
# https://jira.mariadb.org/browse/MXS-1543
|
||||
add_test_executable(mxs1543.cpp mxs1543 avro LABELS avrorouter REPL_BACKEND)
|
||||
|
||||
# Binlogrouter tests, these heavily alter the replication so they are run last
|
||||
add_test_executable(avro.cpp avro avro LABELS avrorouter binlogrouter LIGHT BREAKS_REPL)
|
||||
add_test_executable(avro_alter.cpp avro_alter avro LABELS avrorouter binlogrouter LIGHT BREAKS_REPL)
|
||||
|
||||
# In the binlog router setup stop Master and promote one of the Slaves to be new Master
|
||||
add_test_executable(binlog_change_master.cpp binlog_change_master setup_binlog_tx_safe LABELS binlogrouter BREAKS_REPL)
|
||||
|
||||
# In the binlog router setup stop Master and promote one of the Slaves to be new Master (use GTID)
|
||||
add_test_executable(binlog_change_master_gtid.cpp binlog_change_master_gtid setup_binlog_tx_safe_gtid LABELS binlogrouter BREAKS_REPL)
|
||||
|
||||
# trying to start binlog setup with incomplete Maxscale.cnf
|
||||
add_test_executable(binlog_incompl.cpp binlog_incompl binlog_incompl LABELS binlogrouter BREAKS_REPL)
|
||||
|
||||
# configure binlog router setup, execute queries and transactions, check data; install semysync plugin, router options semisync=1,transaction_safety=1
|
||||
add_test_executable(binlog_semisync.cpp binlog_semisync setup_binlog_semisync LABELS binlogrouter HEAVY BREAKS_REPL)
|
||||
|
||||
# configure binlog router setup, execute queries and transactions, check data; install semysync plugin, router options semisync=0,transaction_safety=0
|
||||
add_test_derived(binlog_semisync_txs0_ss0 binlog_semisync setup_binlog_semisync_txs0_ss0 LABELS binlogrouter HEAVY BREAKS_REPL)
|
||||
|
||||
# configure binlog router setup, execute queries and transactions, check data; install semysync plugin, router options semisync=0,transaction_safety=1
|
||||
add_test_derived(binlog_semisync_txs0_ss1 binlog_semisync setup_binlog_semisync_txs0_ss1 LABELS binlogrouter HEAVY BREAKS_REPL)
|
||||
|
||||
# configure binlog router setup, execute queries and transactions, check data; install semysync plugin, router options semisync=1,transaction_safety=0
|
||||
add_test_derived(binlog_semisync_txs1_ss0 binlog_semisync setup_binlog_semisync_txs1_ss0 LABELS binlogrouter HEAVY BREAKS_REPL)
|
||||
|
||||
set_tests_properties(binlog_semisync PROPERTIES TIMEOUT 3600)
|
||||
set_tests_properties(binlog_semisync_txs0_ss0 PROPERTIES TIMEOUT 3600)
|
||||
set_tests_properties(binlog_semisync_txs0_ss1 PROPERTIES TIMEOUT 3600)
|
||||
set_tests_properties(binlog_semisync_txs1_ss0 PROPERTIES TIMEOUT 3600)
|
||||
|
||||
# Test of CDC protocol (avro listener)
|
||||
add_test_executable(cdc_client.cpp cdc_client avro LABELS avrorouter binlogrouter BREAKS_REPL)
|
||||
|
||||
# Tries INSERTs with size close to 0x0ffffff * N (with binlog backend)
|
||||
add_test_executable(different_size_binlog.cpp different_size_binlog setup_binlog LABELS binlogrouter HEAVY BREAKS_REPL)
|
||||
|
||||
# Try to configure binlog router to use wrong password for Master and check 'slave status' on binlog
|
||||
add_test_executable(mxs781_binlog_wrong_passwrd.cpp mxs781_binlog_wrong_passwrd setup_binlog LABELS binlogrouter BREAKS_REPL)
|
||||
|
||||
# Regression case for crash if long host name is used for binlog router (in 'change master to ...')
|
||||
add_test_executable(mxs813_long_hostname.cpp mxs813_long_hostname setup_binlog LABELS binlogrouter BREAKS_REPL)
|
||||
|
||||
# Test that masking filter can handle multi-statements.
|
||||
add_test_executable(mxs1719.cpp mxs1719 mxs1719 LABELS masking REPL_BACKEND)
|
||||
|
||||
# configure binlog router setup, execute queries and transactions, check data;
|
||||
add_test_executable(setup_binlog.cpp setup_binlog setup_binlog LABELS binlogrouter BREAKS_REPL)
|
||||
|
||||
# configure binlog router setup, execute queries and transactions, check data;
|
||||
add_test_executable(setup_binlog_gtid.cpp setup_binlog_gtid setup_binlog_gtid LABELS binlogrouter BREAKS_REPL)
|
||||
add_test_executable(binlog_change_master_gtid.cpp binlog_change_master_gtid setup_binlog_tx_safe_gtid LABELS binlogrouter BREAKS_REPL REPL_BACKEND)
|
||||
|
||||
# MXS-701: Binlog filtering
|
||||
# https://jira.mariadb.org/browse/MXS-701
|
||||
|
@ -65,7 +65,7 @@ void load(long int* new_inserts,
|
||||
Test->maxscales->close_rwsplit(0);
|
||||
|
||||
Test->tprintf("Waiting for the table to replicate\n");
|
||||
Test->repl->sync_slaves();
|
||||
nodes->sync_slaves();
|
||||
|
||||
pthread_t thread1[threads_num];
|
||||
pthread_t thread2[threads_num];
|
||||
|
@ -1,6 +1,4 @@
|
||||
#ifndef BIG_LOAD_H
|
||||
#define BIG_LOAD_H
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "testconnections.h"
|
||||
#include "sql_t1.h"
|
||||
@ -33,6 +31,7 @@ void* query_thread2(void* ptr);
|
||||
* @param galera if true use Galera backend (Test->galera instead of Test->repl)
|
||||
* @param report_errors if true call add_result() in case of query failure
|
||||
*/
|
||||
|
||||
void load(long* new_inserts,
|
||||
long* new_selects,
|
||||
long* selects,
|
||||
@ -44,5 +43,3 @@ void load(long* new_inserts,
|
||||
int rwsplit_only,
|
||||
bool galera,
|
||||
bool report_errors);
|
||||
|
||||
#endif // BIG_LOAD_H
|
||||
|
@ -1,5 +1,4 @@
|
||||
#ifndef BIG_TRANSACTION_H
|
||||
#define BIG_TRANSACTION_H
|
||||
#pragma once
|
||||
|
||||
#include <mysql.h>
|
||||
#include <stdio.h>
|
||||
@ -13,5 +12,3 @@
|
||||
* @return 0 if success
|
||||
*/
|
||||
int big_transaction(MYSQL* conn, int N);
|
||||
|
||||
#endif // BIG_TRANSACTION_H
|
||||
|
@ -1,5 +1,4 @@
|
||||
#ifndef BLOB_TEST_H
|
||||
#define BLOB_TEST_H
|
||||
#pragma once
|
||||
|
||||
#include "testconnections.h"
|
||||
|
||||
@ -36,5 +35,3 @@ int check_longblob_data(TestConnections* Test,
|
||||
unsigned long chunk_size,
|
||||
int chunks,
|
||||
int rows);
|
||||
|
||||
#endif // BLOB_TEST_H
|
||||
|
@ -5,21 +5,10 @@
|
||||
## - try to connect with bad credestials directly to MariaDB server and via Maxscale
|
||||
## - compare error messages
|
||||
|
||||
rp=`realpath $0`
|
||||
export src_dir=`dirname $rp`
|
||||
export test_dir=`pwd`
|
||||
export test_name=`basename $rp`
|
||||
|
||||
$test_dir/non_native_setup $test_name
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "configuring maxscale failed"
|
||||
exit 1
|
||||
fi
|
||||
export ssl_options="--ssl-cert=$src_dir/ssl-cert/client-cert.pem --ssl-key=$src_dir/ssl-cert/client-key.pem"
|
||||
|
||||
mariadb_err=`mysql -u no_such_user -psome_pwd -h $node_001_network $ssl_option --socket=$node_000_socket test 2>&1`
|
||||
maxscale_err=`mysql -u no_such_user -psome_pwd -h $maxscale_IP -P 4006 $ssl_options test 2>&1`
|
||||
mariadb_err=`mysql -u no_such_user -psome_pwd -h $node_001_network $ssl_option $node_001_socket_cmd test 2>&1`
|
||||
maxscale_err=`mysql -u no_such_user -psome_pwd -h ${maxscale_000_network} -P 4006 $ssl_options test 2>&1`
|
||||
|
||||
echo "MariaDB message"
|
||||
echo "$mariadb_err"
|
||||
@ -39,5 +28,4 @@ else
|
||||
res=0
|
||||
fi
|
||||
|
||||
$src_dir/copy_logs.sh bug562
|
||||
exit $res
|
||||
|
@ -5,24 +5,14 @@
|
||||
## - call MariaDB client with different --default-character-set= settings
|
||||
## - check output of SHOW VARIABLES LIKE 'char%'
|
||||
|
||||
rp=`realpath $0`
|
||||
export src_dir=`dirname $rp`
|
||||
export test_dir=`pwd`
|
||||
export test_name=`basename $rp`
|
||||
$test_dir/non_native_setup $test_name
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "configuring maxscale failed"
|
||||
exit 1
|
||||
fi
|
||||
export ssl_options="--ssl-cert=$src_dir/ssl-cert/client-cert.pem --ssl-key=$src_dir/ssl-cert/client-key.pem"
|
||||
|
||||
for char_set in "latin1" "latin2"
|
||||
do
|
||||
|
||||
line1=`mysql -u$node_user -p$node_password -h $maxscale_IP -P 4006 $ssl_options --default-character-set="$char_set" -e "SHOW VARIABLES LIKE 'char%'" | grep "character_set_client"`
|
||||
line2=`mysql -u$node_user -p$node_password -h $maxscale_IP -P 4006 $ssl_options --default-character-set="$char_set" -e "SHOW VARIABLES LIKE 'char%'" | grep "character_set_connection"`
|
||||
line3=`mysql -u$node_user -p$node_password -h $maxscale_IP -P 4006 $ssl_options --default-character-set="$char_set" -e "SHOW VARIABLES LIKE 'char%'" | grep "character_set_results"`
|
||||
line1=`mysql -u$node_user -p$node_password -h ${maxscale_000_network} -P 4006 $ssl_options --default-character-set="$char_set" -e "SHOW VARIABLES LIKE 'char%'" | grep "character_set_client"`
|
||||
line2=`mysql -u$node_user -p$node_password -h ${maxscale_000_network} -P 4006 $ssl_options --default-character-set="$char_set" -e "SHOW VARIABLES LIKE 'char%'" | grep "character_set_connection"`
|
||||
line3=`mysql -u$node_user -p$node_password -h ${maxscale_000_network} -P 4006 $ssl_options --default-character-set="$char_set" -e "SHOW VARIABLES LIKE 'char%'" | grep "character_set_results"`
|
||||
|
||||
echo $line1 | grep "$char_set"
|
||||
res1=$?
|
||||
@ -34,10 +24,9 @@ do
|
||||
|
||||
if [[ $res1 != 0 ]] || [[ $res2 != 0 ]] || [[ $res3 != 0 ]] ; then
|
||||
echo "charset is ignored"
|
||||
mysql -u$node_user -p$node_password -h $maxscale_IP -P 4006 $ssl_options --default-character-set="latin2" -e "SHOW VARIABLES LIKE 'char%'"
|
||||
$src_dir/copy_logs.sh bug564
|
||||
mysql -u$node_user -p$node_password -h ${maxscale_000_network} -P 4006 $ssl_options --default-character-set="latin2" -e "SHOW VARIABLES LIKE 'char%'"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
$src_dir/copy_logs.sh bug564
|
||||
|
||||
exit 0
|
||||
|
@ -5,31 +5,20 @@
|
||||
## - try to remove everythign from /dev/shm/$maxscale_pid
|
||||
## check if Maxscale is alive
|
||||
|
||||
rp=`realpath $0`
|
||||
export src_dir=`dirname $rp`
|
||||
export test_dir=`pwd`
|
||||
export test_name=`basename $rp`
|
||||
$test_dir/non_native_setup $test_name
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "configuring maxscale failed"
|
||||
exit 1
|
||||
fi
|
||||
export ssl_options="--ssl-cert=$src_dir/ssl-cert/client-cert.pem --ssl-key=$src_dir/ssl-cert/client-key.pem"
|
||||
|
||||
#pid=`ssh -i $maxscale_sshkey -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $maxscale_access_user@$maxscale_IP "pgrep maxscale"`
|
||||
#pid=`ssh -i $maxscale_sshkey -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ${maxscale_000_whoami}@${maxscale_000_network} "pgrep maxscale"`
|
||||
#echo "Maxscale pid is $pid"
|
||||
echo "removing log directory from /dev/shm/"
|
||||
if [ $maxscale_IP != "127.0.0.1" ] ; then
|
||||
ssh -i $maxscale_sshkey -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $maxscale_access_user@$maxscale_IP "sudo rm -rf /dev/shm/maxscale/*"
|
||||
if [ ${maxscale_000_network} != "127.0.0.1" ] ; then
|
||||
ssh -i ${maxscale_000_keyfile} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ${maxscale_000_whoami}@${maxscale_000_network} "sudo rm -rf /dev/shm/maxscale/*"
|
||||
else
|
||||
sudo rm -rf /dev/shm/maxscale/*
|
||||
fi
|
||||
sleep 1
|
||||
echo "checking if Maxscale is alive"
|
||||
echo "show databases;" | mysql -u$node_user -p$node_password -h $maxscale_IP -P 4006 $ssl_options
|
||||
echo "show databases;" | mysql -u$node_user -p$node_password -h ${maxscale_000_network} -P 4006 $ssl_options
|
||||
res=$?
|
||||
|
||||
$src_dir/copy_logs.sh bug567
|
||||
exit $res
|
||||
|
||||
|
@ -1,5 +1,4 @@
|
||||
#ifndef BUG670_SQL_H
|
||||
#define BUG670_SQL_H
|
||||
#pragma once
|
||||
|
||||
const char* bug670_sql
|
||||
=
|
||||
@ -40,5 +39,3 @@ const char* bug670_sql
|
||||
set autocommit=1;\
|
||||
delete from mysql.t1 where id = 7; \
|
||||
select 1 as \"endof cycle\" from dual;\n";
|
||||
|
||||
#endif // BUG670_SQL_H
|
||||
|
@ -1,10 +1,8 @@
|
||||
#!/bin/bash
|
||||
|
||||
rp=`realpath $0`
|
||||
export src_dir=`dirname $rp`
|
||||
|
||||
user=skysql
|
||||
password=skysql
|
||||
user=$maxscale_user
|
||||
password=$maxscale_password
|
||||
|
||||
# See cnf/maxscale.cnf.template.cache_basic
|
||||
port=4008
|
||||
@ -20,8 +18,7 @@ function run_test
|
||||
echo $test_name
|
||||
logdir=log_$test_name
|
||||
mkdir -p $logdir
|
||||
|
||||
mysqltest --host=$maxscale_IP --port=$port \
|
||||
mysqltest --host=${maxscale_000_network} --port=$port \
|
||||
--user=$user --password=$password \
|
||||
--logdir=$logdir \
|
||||
--test-file=$dir/t/$test_name.test \
|
||||
@ -40,37 +37,15 @@ function run_test
|
||||
return $rc
|
||||
}
|
||||
|
||||
if [ $# -lt 1 ]
|
||||
then
|
||||
echo "usage: $script name"
|
||||
echo ""
|
||||
echo "name : The name of the test (from CMakeLists.txt) That selects the"
|
||||
echo " configuration template to be used."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$maxscale_IP" == "" ]
|
||||
then
|
||||
echo "Error: The environment variable maxscale_IP must be set."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
expected_name="cache_basic"
|
||||
|
||||
if [ "$1" != "$expected_name" ]
|
||||
then
|
||||
echo "warning: Expected test name to be $expected_name_basic, was $1."
|
||||
fi
|
||||
|
||||
export dir="$src_dir/cache/$expected_name"
|
||||
export dir="$src_dir/cache/$1"
|
||||
|
||||
source=$src_dir/cache/$1/cache_rules.json
|
||||
target=vagrant@$maxscale_IP:/home/$maxscale_access_user/cache_rules.json
|
||||
target=${maxscale_000_whoami}@${maxscale_000_network}:/home/${maxscale_000_whoami}/cache_rules.json
|
||||
|
||||
if [ $maxscale_IP != "127.0.0.1" ] ; then
|
||||
scp -i $maxscale_keyfile -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $source $target
|
||||
if [ ${maxscale_000_network} != "127.0.0.1" ] ; then
|
||||
scp -i ${maxscale_000_keyfile} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $source $target
|
||||
else
|
||||
cp $source /home/$maxscale_access_user/cache_rules.json
|
||||
cp $source /home/${maxscale_000_whoami}/cache_rules.json
|
||||
fi
|
||||
|
||||
if [ $? -ne 0 ]
|
||||
@ -79,13 +54,9 @@ then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo $source copied to $target
|
||||
echo $source copied to $target, restarting Maxscale
|
||||
|
||||
test_dir=`pwd`
|
||||
|
||||
$test_dir/non_native_setup $1
|
||||
|
||||
echo
|
||||
ssh -i $maxscale_000_keyfile -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ${maxscale_000_whoami}@${maxscale_000_network} 'sudo service maxscale restart'
|
||||
|
||||
# We sleep slightly longer than the TTL to ensure that the TTL mechanism
|
||||
# kicks in.
|
||||
|
@ -1,3 +1,3 @@
|
||||
add_test_executable(cdc_datatypes.cpp cdc_datatypes avro LABELS avrorouter binlogrouter BREAKS_REPL)
|
||||
add_test_executable(cdc_datatypes.cpp cdc_datatypes avro LABELS avrorouter binlogrouter BREAKS_REPL REPL_BACKEND)
|
||||
add_library(cdc_result cdc_result.cpp)
|
||||
target_link_libraries(cdc_datatypes cdc_result)
|
||||
|
@ -17,13 +17,13 @@
|
||||
void run_test(TestConnections& test, MYSQL* conn)
|
||||
{
|
||||
test.expect(mysql_change_user(conn, "user", "pass2", "test") == 0,
|
||||
"changing user failed: %s", mysql_error(conn));
|
||||
"changing user failed: %s", mysql_error(conn));
|
||||
|
||||
test.expect(execute_query_silent(conn, "INSERT INTO t1 VALUES (77, 11);") != 0,
|
||||
"INSERT query succeeded without INSERT privilege");
|
||||
|
||||
test.expect(mysql_change_user(conn, test.repl->user_name, test.repl->password, "test") == 0,
|
||||
"changing user failed: %s", mysql_error(conn));
|
||||
"changing user failed: %s", mysql_error(conn));
|
||||
|
||||
test.expect(execute_query_silent(conn, "INSERT INTO t1 VALUES (77, 11);") == 0,
|
||||
"INSERT query succeeded without INSERT privilege");
|
||||
@ -33,7 +33,7 @@ void run_test(TestConnections& test, MYSQL* conn)
|
||||
"changing user with wrong password successed!");
|
||||
|
||||
test.expect(strstr(mysql_error(conn), "Access denied for user"),
|
||||
"Wrong error message returned on failed authentication");
|
||||
"Wrong error message returned on failed authentication");
|
||||
|
||||
test.expect(execute_query_silent(conn, "INSERT INTO t1 VALUES (77, 11);") != 0,
|
||||
"Query should fail, MaxScale should disconnect on auth failure");
|
||||
|
@ -6,22 +6,14 @@
|
||||
#include <iostream>
|
||||
#include "testconnections.h"
|
||||
|
||||
int main(int argc, char* argv[])
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
|
||||
TestConnections* Test = new TestConnections(argc, argv);
|
||||
TestConnections * Test = new TestConnections(argc, argv);
|
||||
|
||||
// Reset server settings by replacing the config files
|
||||
Test->repl->reset_server_settings();
|
||||
|
||||
std::string src = std::string(test_dir) + "/mdbci/add_core_cnf.sh";
|
||||
Test->maxscales->copy_to_node(0, src.c_str(), Test->maxscales->access_homedir[0]);
|
||||
Test->maxscales->ssh_node_f(0,
|
||||
true,
|
||||
"%s/add_core_cnf.sh %s",
|
||||
Test->maxscales->access_homedir[0],
|
||||
Test->verbose ? "verbose" : "");
|
||||
|
||||
Test->set_timeout(10);
|
||||
|
||||
Test->tprintf("Connecting to Maxscale maxscales->routers[0] with Master/Slave backend\n");
|
||||
@ -30,43 +22,23 @@ int main(int argc, char* argv[])
|
||||
|
||||
Test->add_result(Test->test_maxscale_connections(0, true, true, true), "Can't connect to backend\n");
|
||||
|
||||
if ((Test->galera != NULL) && (Test->galera->N != 0))
|
||||
Test->tprintf("Connecting to Maxscale router with Galera backend\n");
|
||||
MYSQL * g_conn = open_conn(4016, Test->maxscales->IP[0], Test->maxscales->user_name, Test->maxscales->password, Test->ssl);
|
||||
if (g_conn != NULL )
|
||||
{
|
||||
Test->tprintf("Connecting to Maxscale router with Galera backend\n");
|
||||
MYSQL* g_conn = open_conn(4016,
|
||||
Test->maxscales->IP[0],
|
||||
Test->maxscales->user_name,
|
||||
Test->maxscales->password,
|
||||
Test->ssl);
|
||||
if (g_conn != NULL)
|
||||
{
|
||||
Test->tprintf("Testing connection\n");
|
||||
Test->add_result(Test->try_query(g_conn, (char*) "SELECT 1"),
|
||||
(char*) "Error executing query against RWSplit Galera\n");
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
Test->tprintf("Galera is not in use\n");
|
||||
Test->tprintf("Testing connection\n");
|
||||
Test->add_result(Test->try_query(g_conn, (char *) "SELECT 1"),
|
||||
(char *) "Error executing query against RWSplit Galera\n");
|
||||
}
|
||||
|
||||
Test->tprintf("Closing connections\n");
|
||||
Test->maxscales->close_maxscale_connections(0);
|
||||
Test->check_maxscale_alive(0);
|
||||
|
||||
int exit_code = 0;
|
||||
char* ver = Test->maxscales->ssh_node_output(0, "maxscale --version-full", false, &exit_code);
|
||||
char * ver = Test->maxscales->ssh_node_output(0, "maxscale --version-full", false, &exit_code);
|
||||
Test->tprintf("Maxscale_full_version_start:\n%s\nMaxscale_full_version_end\n", ver);
|
||||
|
||||
if ((Test->global_result == 0) && (Test->use_snapshots))
|
||||
{
|
||||
Test->tprintf("Taking snapshot\n");
|
||||
Test->take_snapshot((char*) "clean");
|
||||
}
|
||||
else
|
||||
{
|
||||
Test->tprintf("Snapshots are not in use\n");
|
||||
}
|
||||
|
||||
int rval = Test->global_result;
|
||||
delete Test;
|
||||
return rval;
|
||||
|
@ -22,20 +22,17 @@ if [ $? -ne 0 ]; then
|
||||
echo "Error creating log dir"
|
||||
fi
|
||||
|
||||
export maxscale_sshkey=$maxscale_keyfile
|
||||
echo "log_dir: $logs_dir"
|
||||
echo "maxscale_sshkey: $maxscale_sshkey"
|
||||
echo "maxscale_IP: $maxscale_IP"
|
||||
echo "maxscale_sshkey: $maxscale_000_keyfile"
|
||||
echo "maxscale_IP: $maxscale_000_network"
|
||||
|
||||
if [ $maxscale_IP != "127.0.0.1" ] ; then
|
||||
ssh -i $maxscale_sshkey -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o LogLevel=quiet $maxscale_access_user@$maxscale_IP "rm -rf logs; mkdir logs; $maxscale_access_sudo cp $maxscale_log_dir/*.log logs/; $maxscale_access_sudo cp /tmp/core* logs; $maxscale_access_sudo chmod 777 -R logs"
|
||||
scp -i $maxscale_sshkey -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o LogLevel=quiet $maxscale_access_user@$maxscale_IP:logs/* $logs_dir
|
||||
ssh -i ${maxscale_000_keyfile} -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o LogLevel=quiet ${maxscale_000_whoami}@${maxscale_000_network} "rm -rf logs; mkdir logs; ${maxscale_000_access_sudo} cp ${maxscale_log_dir}/*.log logs/; ${maxscale_000_access_sudo} cp /tmp/core* logs; ${maxscale_000_access_sudo} chmod 777 -R logs"
|
||||
scp -i ${maxscale_000_keyfile} -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o LogLevel=quiet ${maxscale_000_whoami}@${maxscale_000_network}:logs/* $logs_dir
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error copying Maxscale logs"
|
||||
fi
|
||||
#scp -i $maxscale_sshkey -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o LogLevel=quiet $maxscale_access_user@$maxscale_IP:/tmp/core* $logs_dir
|
||||
#scp -i $maxscale_sshkey -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o LogLevel=quiet $maxscale_access_user@$maxscale_IP:/root/core* $logs_dir
|
||||
scp -i $maxscale_sshkey -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o LogLevel=quiet $maxscale_access_user@$maxscale_IP:$maxscale_cnf $logs_dir
|
||||
scp -i ${maxscale_000_keyfile} -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o LogLevel=quiet ${maxscale_000_whoami}@${maxscale_000_network}:$maxscale_cnf $logs_dir
|
||||
chmod a+r $logs_dir/*
|
||||
else
|
||||
sudo cp $maxscale_log_dir/*.log $logs_dir
|
||||
|
@ -1,5 +1,4 @@
|
||||
#ifndef DIFFERENT_SIZE_H
|
||||
#define DIFFERENT_SIZE_H
|
||||
#pragma once
|
||||
|
||||
#include <iostream>
|
||||
#include <unistd.h>
|
||||
@ -35,5 +34,3 @@ void set_max_packet(TestConnections* Test, bool binlog, char* cmd);
|
||||
* @param binlog if true - connects to Master, otherwise - to RWSplit router
|
||||
*/
|
||||
void different_packet_size(TestConnections* Test, bool binlog);
|
||||
|
||||
#endif // DIFFERENT_SIZE_H
|
||||
|
61
maxscale-system-test/envv.cpp
Normal file
61
maxscale-system-test/envv.cpp
Normal file
@ -0,0 +1,61 @@
|
||||
#include <string.h>
|
||||
#include <string>
|
||||
#include "envv.h"
|
||||
|
||||
char * readenv(const char * name, const char *format, ...)
|
||||
{
|
||||
char * env = getenv(name);
|
||||
if (!env)
|
||||
{
|
||||
va_list valist;
|
||||
|
||||
va_start(valist, format);
|
||||
int message_len = vsnprintf(NULL, 0, format, valist);
|
||||
va_end(valist);
|
||||
|
||||
if (message_len < 0)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
env = (char*)malloc(message_len + 1);
|
||||
|
||||
va_start(valist, format);
|
||||
vsnprintf(env, message_len + 1, format, valist);
|
||||
va_end(valist);
|
||||
setenv(name, env, 1);
|
||||
}
|
||||
return env;
|
||||
}
|
||||
|
||||
int readenv_int(const char * name, int def)
|
||||
{
|
||||
int x;
|
||||
char * env = getenv(name);
|
||||
if (env)
|
||||
{
|
||||
sscanf(env, "%d", &x);
|
||||
}
|
||||
else
|
||||
{
|
||||
x = def;
|
||||
setenv(name, (std::to_string(x).c_str()), 1);
|
||||
}
|
||||
return x;
|
||||
}
|
||||
|
||||
bool readenv_bool(const char * name, bool def)
|
||||
{
|
||||
char * env = getenv(name);
|
||||
if (env)
|
||||
{
|
||||
return ((strcasecmp(env, "yes") == 0) ||
|
||||
(strcasecmp(env, "y") == 0) ||
|
||||
(strcasecmp(env, "true") == 0));
|
||||
}
|
||||
else
|
||||
{
|
||||
setenv(name, def ? "true" : "false", 1);
|
||||
return def;
|
||||
}
|
||||
}
|
30
maxscale-system-test/envv.h
Normal file
30
maxscale-system-test/envv.h
Normal file
@ -0,0 +1,30 @@
|
||||
#pragma once
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdarg.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
/**
|
||||
* @brief readenv Read enviromental variable, if emtpy - set dafault
|
||||
* @param name Name of the variable
|
||||
* @param format Default value
|
||||
* @return Enviromental variable value
|
||||
*/
|
||||
char * readenv(const char * name, const char *format, ...);
|
||||
|
||||
/**
|
||||
* @brief readenv_int Read integer value of enviromental variable, if empty - set dafault
|
||||
* @param name Name of the variable
|
||||
* @param def Default value
|
||||
* @return Enviromental variable value converted to int
|
||||
*/
|
||||
int readenv_int(const char * name, int def);
|
||||
|
||||
/**
|
||||
* @brief readenv_int Read boolean value of enviromental variable, if empty - set dafault
|
||||
* Values 'yes', 'y', 'true' (case independedant) are interpreted as TRUE, everything else - as FALSE
|
||||
* @param name Name of the variable
|
||||
* @param def Default value
|
||||
* @return Enviromental variable value converted to bool
|
||||
*/
|
||||
bool readenv_bool(const char * name, bool def);
|
@ -1,5 +1,4 @@
|
||||
#ifndef EXECUTE_CMD_H
|
||||
#define EXECUTE_CMD_H
|
||||
#pragma once
|
||||
|
||||
#include <iostream>
|
||||
#include <unistd.h>
|
||||
@ -12,6 +11,4 @@ using namespace std;
|
||||
* @param res Pointer to variable that will contain command console output (stdout)
|
||||
* @return Process exit code
|
||||
*/
|
||||
int execute_cmd(char* cmd, char** res);
|
||||
|
||||
#endif // EXECUTE_CMD_H
|
||||
int execute_cmd(char * cmd, char ** res);
|
||||
|
@ -1,5 +1,4 @@
|
||||
#ifndef FW_COPY_RULES_H
|
||||
#define FW_COPY_RULES_H
|
||||
#pragma once
|
||||
|
||||
#include "testconnections.h"
|
||||
|
||||
@ -10,5 +9,3 @@
|
||||
* @param rules_dir Directory where file is located
|
||||
*/
|
||||
void copy_rules(TestConnections* Test, const char* rules_name, const char* rules_dir);
|
||||
|
||||
#endif // FW_COPY_RULES_H
|
||||
|
@ -1,5 +1,4 @@
|
||||
#ifndef GET_COM_SELECT_INSERT_H
|
||||
#define GET_COM_SELECT_INSERT_H
|
||||
#pragma once
|
||||
|
||||
#include "testconnections.h"
|
||||
|
||||
@ -28,6 +27,3 @@ int print_delta(long int* new_selects,
|
||||
long int* selects,
|
||||
long int* inserts,
|
||||
int nodes_num);
|
||||
|
||||
|
||||
#endif // GET_COM_SELECT_INSERT_H
|
||||
|
@ -33,6 +33,7 @@ int get_my_ip(char* remote_ip, char* my_ip)
|
||||
serv.sin_addr.s_addr = inet_addr(remote_ip);
|
||||
serv.sin_port = htons(dns_port);
|
||||
|
||||
|
||||
connect(sock, (const struct sockaddr*) &serv, sizeof(serv));
|
||||
|
||||
struct sockaddr_in name;
|
||||
|
@ -1,5 +1,4 @@
|
||||
#ifndef GET_MY_IP_H
|
||||
#define GET_MY_IP_H
|
||||
#pragma once
|
||||
|
||||
/**
|
||||
* @brief get_my_ip Get IP address of machine where this code is executed as it is visible from remote machine
|
||||
@ -8,6 +7,4 @@
|
||||
* @param my_ip Pointer to result (own IP string)
|
||||
* @return 0 in case of success
|
||||
*/
|
||||
int get_my_ip(char* remote_ip, char* my_ip);
|
||||
|
||||
#endif // GET_MY_IP_H
|
||||
int get_my_ip(char * remote_ip, char *my_ip );
|
||||
|
@ -1,8 +1,4 @@
|
||||
#!/bin/bash
|
||||
|
||||
rp=`realpath $0`
|
||||
export src_dir=`dirname $rp`
|
||||
|
||||
./non_native_setup insertstream
|
||||
|
||||
$src_dir/mysqltest_driver.sh insertstream $src_dir/insertstream 4006
|
||||
|
@ -1,5 +1,4 @@
|
||||
#ifndef KEEPALIVED_FUNC_H
|
||||
#define KEEPALIVED_FUNC_H
|
||||
#pragma once
|
||||
|
||||
#include "testconnections.h"
|
||||
|
||||
@ -9,5 +8,3 @@ char virtual_ip[27];
|
||||
char* print_version_string(TestConnections* Test);
|
||||
void configure_keepalived(TestConnections* Test, char* keepalived_file);
|
||||
void stop_keepalived(TestConnections* Test);
|
||||
|
||||
#endif // KEEPALIVED_FUNC_H
|
||||
|
20
maxscale-system-test/labels_table.cpp
Normal file
20
maxscale-system-test/labels_table.cpp
Normal file
@ -0,0 +1,20 @@
|
||||
#include <cstring>
|
||||
#include <string>
|
||||
#include <stdio.h>
|
||||
#include "labels_table.h"
|
||||
|
||||
std::string get_mdbci_lables(const char *labels_string)
|
||||
{
|
||||
std::string mdbci_labels("MAXSCALE");
|
||||
|
||||
for (size_t i = 0; i < sizeof(labels_table) / sizeof(labels_table_t); i++)
|
||||
{
|
||||
printf("%lu\t %s\n", i, labels_table[i].test_label);
|
||||
if (strstr(labels_string, labels_table[i].test_label))
|
||||
{
|
||||
mdbci_labels += "," + std::string(labels_table[i].mdbci_label);
|
||||
}
|
||||
}
|
||||
printf("mdbci labels %s\n", mdbci_labels.c_str());
|
||||
return mdbci_labels;
|
||||
}
|
29
maxscale-system-test/labels_table.h
Normal file
29
maxscale-system-test/labels_table.h
Normal file
@ -0,0 +1,29 @@
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
|
||||
struct labels_table_t
|
||||
{
|
||||
const char* test_label;
|
||||
const char* mdbci_label;
|
||||
|
||||
};
|
||||
|
||||
const labels_table_t labels_table [] __attribute__((unused)) =
|
||||
{
|
||||
{"REPL_BACKEND", "REPL_BACKEND"},
|
||||
{"GALERA_BACKEND", "GALERA_BACKEND"},
|
||||
{"TWO_MAXSCALES", "SECOND_MAXSCALE"},
|
||||
{"COLUMNSTORE_BACKEND", "COLUMNSTORE_BACKEND"},
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief get_mdbci_lables Finds all MDBCI labels which are needed by test
|
||||
* Every test has a number of labels defined in the CMakeLists.txt,
|
||||
* some of these lables defines which nodes (virtual machines) are needed
|
||||
* for this particular test. Function finds such labels and forms labels string
|
||||
* in the 'mdbci up' command format
|
||||
* @param labels_string All lables from CMakeLists.txt
|
||||
* @return Labels string in the 'mdbci up' --labels parameter format
|
||||
*/
|
||||
std::string get_mdbci_lables(const char * labels_string);
|
@ -121,7 +121,7 @@ int main(int argc, char *argv[])
|
||||
for (i = 0; i < threads_num[j]; i++)
|
||||
{
|
||||
data[j][i].sql = (char*) malloc((i +1) * 32 * 14 + 32);
|
||||
create_insert_string(data[j][i].sql, (i + 1) * 32 , i);
|
||||
create_insert_string(data[j][i].sql, (i + 1) * 32, i);
|
||||
Test->tprintf("sqL %d: %d\n", i, strlen(data[j][i].sql));
|
||||
data[j][i].exit_flag = false;
|
||||
data[j][i].id = i;
|
||||
@ -178,12 +178,12 @@ void try_and_reconnect(MYSQL * conn, char * db, char * sql)
|
||||
Test->tprintf("reconnect");
|
||||
mysql_close(conn);
|
||||
conn = open_conn_db_timeout(port,
|
||||
IP,
|
||||
db,
|
||||
Test->repl->user_name,
|
||||
Test->repl->password,
|
||||
20,
|
||||
Test->ssl);
|
||||
IP,
|
||||
db,
|
||||
Test->repl->user_name,
|
||||
Test->repl->password,
|
||||
20,
|
||||
Test->ssl);
|
||||
}
|
||||
}
|
||||
|
||||
@ -194,12 +194,12 @@ void *query_thread(void *ptr )
|
||||
int inserts_until_optimize = 100000;
|
||||
int tn = 0;
|
||||
conn = open_conn_db_timeout(port,
|
||||
IP,
|
||||
(char *) "test",
|
||||
Test->repl->user_name,
|
||||
Test->repl->password,
|
||||
20,
|
||||
Test->ssl);
|
||||
IP,
|
||||
(char *) "test",
|
||||
Test->repl->user_name,
|
||||
Test->repl->password,
|
||||
20,
|
||||
Test->ssl);
|
||||
while (!data->exit_flag)
|
||||
{
|
||||
|
||||
@ -227,12 +227,12 @@ void *read_thread(void *ptr )
|
||||
int i = 0;
|
||||
char sql[256];
|
||||
conn = open_conn_db_timeout(port,
|
||||
IP,
|
||||
(char *) "test",
|
||||
Test->repl->user_name,
|
||||
Test->repl->password,
|
||||
20,
|
||||
Test->ssl);
|
||||
IP,
|
||||
(char *) "test",
|
||||
Test->repl->user_name,
|
||||
Test->repl->password,
|
||||
20,
|
||||
Test->ssl);
|
||||
while (!data->exit_flag)
|
||||
{
|
||||
sprintf(sql, "SELECT * FROM t1 WHERE fl=%d", data->id);
|
||||
@ -250,12 +250,12 @@ void *transaction_thread(void *ptr )
|
||||
int tn = 0;
|
||||
t_data * data = (t_data *) ptr;
|
||||
conn = open_conn_db_timeout(port,
|
||||
IP,
|
||||
(char *) "test1",
|
||||
Test->repl->user_name,
|
||||
Test->repl->password,
|
||||
20,
|
||||
Test->ssl);
|
||||
IP,
|
||||
(char *) "test1",
|
||||
Test->repl->user_name,
|
||||
Test->repl->password,
|
||||
20,
|
||||
Test->ssl);
|
||||
while (!data->exit_flag)
|
||||
{
|
||||
|
||||
@ -281,12 +281,12 @@ void *transaction_thread(void *ptr )
|
||||
mysql_close(conn);
|
||||
|
||||
conn = open_conn_db_timeout(port,
|
||||
IP,
|
||||
(char *) "",
|
||||
Test->maxscales->user_name,
|
||||
Test->maxscales->password,
|
||||
20,
|
||||
Test->ssl);
|
||||
IP,
|
||||
(char *) "",
|
||||
Test->maxscales->user_name,
|
||||
Test->maxscales->password,
|
||||
20,
|
||||
Test->ssl);
|
||||
Test->try_query(conn, "DROP DATABASE test1");
|
||||
mysql_close(conn);
|
||||
return NULL;
|
||||
@ -299,12 +299,12 @@ void *short_session_thread(void *ptr )
|
||||
while (!data->exit_flag)
|
||||
{
|
||||
conn = open_conn_db_timeout(port,
|
||||
IP,
|
||||
(char *) "test",
|
||||
Test->repl->user_name,
|
||||
Test->repl->password,
|
||||
20,
|
||||
Test->ssl);
|
||||
IP,
|
||||
(char *) "test",
|
||||
Test->repl->user_name,
|
||||
Test->repl->password,
|
||||
20,
|
||||
Test->ssl);
|
||||
mysql_close(conn);
|
||||
}
|
||||
return NULL;
|
||||
@ -317,12 +317,12 @@ void *prepared_stmt_thread(void *ptr )
|
||||
t_data * data = (t_data *) ptr;
|
||||
char sql[256];
|
||||
conn = open_conn_db_timeout(port,
|
||||
IP,
|
||||
(char *) "test2",
|
||||
Test->repl->user_name,
|
||||
Test->repl->password,
|
||||
20,
|
||||
Test->ssl);
|
||||
IP,
|
||||
(char *) "test2",
|
||||
Test->repl->user_name,
|
||||
Test->repl->password,
|
||||
20,
|
||||
Test->ssl);
|
||||
while (!data->exit_flag)
|
||||
{
|
||||
sprintf(sql, "PREPARE stmt%d FROM 'SELECT * FROM t1 WHERE fl=@x;';", data->id);
|
||||
@ -340,12 +340,12 @@ void *prepared_stmt_thread(void *ptr )
|
||||
mysql_close(conn);
|
||||
|
||||
conn = open_conn_db_timeout(port,
|
||||
IP,
|
||||
(char *) "",
|
||||
Test->maxscales->user_name,
|
||||
Test->maxscales->password,
|
||||
20,
|
||||
Test->ssl);
|
||||
IP,
|
||||
(char *) "",
|
||||
Test->maxscales->user_name,
|
||||
Test->maxscales->password,
|
||||
20,
|
||||
Test->ssl);
|
||||
Test->try_query(conn, "DROP DATABASE test2");
|
||||
mysql_close(conn);
|
||||
return NULL;
|
||||
|
@ -31,7 +31,7 @@ int main(int argc, char* argv[])
|
||||
|
||||
Test->tprintf("done, syncing slaves");
|
||||
Test->stop_timeout();
|
||||
Test->repl->sync_slaves();
|
||||
Test->galera->sync_slaves();
|
||||
Test->tprintf("Trying SELECT");
|
||||
Test->set_timeout(60);
|
||||
Test->try_query(Test->maxscales->conn_rwsplit[0], (char*) "SELECT * FROM t1");
|
||||
|
@ -1,6 +1,4 @@
|
||||
#ifndef MARIADB_FUNC_H
|
||||
#define MARIADB_FUNC_H
|
||||
|
||||
#pragma once
|
||||
|
||||
/**
|
||||
* @file mariadb_func.h - basic DB interaction routines
|
||||
@ -353,5 +351,3 @@ private:
|
||||
bool m_ssl;
|
||||
MYSQL* m_conn = nullptr;
|
||||
};
|
||||
|
||||
#endif // MARIADB_FUNC_H
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
#include <future>
|
||||
#include "envv.h"
|
||||
|
||||
using std::cout;
|
||||
using std::endl;
|
||||
@ -32,8 +33,9 @@ void Mariadb_nodes::require_gtid(bool value)
|
||||
g_require_gtid = value;
|
||||
}
|
||||
|
||||
Mariadb_nodes::Mariadb_nodes(const char* pref, const char* test_cwd, bool verbose)
|
||||
: v51(false)
|
||||
Mariadb_nodes::Mariadb_nodes(const char *pref, const char *test_cwd, bool verbose,
|
||||
std::string network_config):
|
||||
v51(false)
|
||||
{
|
||||
use_ipv6 = false;
|
||||
strcpy(prefix, pref);
|
||||
@ -41,6 +43,7 @@ Mariadb_nodes::Mariadb_nodes(const char* pref, const char* test_cwd, bool verbos
|
||||
memset(blocked, 0, sizeof(blocked));
|
||||
no_set_pos = false;
|
||||
this->verbose = verbose;
|
||||
this->network_config = network_config;
|
||||
strcpy(test_dir, test_cwd);
|
||||
read_env();
|
||||
truncate_mariadb_logs();
|
||||
@ -125,43 +128,20 @@ void Mariadb_nodes::close_connections()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
void Mariadb_nodes::read_env()
|
||||
{
|
||||
char* env;
|
||||
char env_name[64];
|
||||
|
||||
read_basic_env();
|
||||
|
||||
sprintf(env_name, "%s_user", prefix);
|
||||
env = getenv(env_name);
|
||||
if (env != NULL)
|
||||
{
|
||||
sscanf(env, "%s", user_name);
|
||||
}
|
||||
else
|
||||
{
|
||||
sprintf(user_name, "skysql");
|
||||
}
|
||||
sprintf(env_name, "%s_password", prefix);
|
||||
env = getenv(env_name);
|
||||
if (env != NULL)
|
||||
{
|
||||
sscanf(env, "%s", password);
|
||||
}
|
||||
else
|
||||
{
|
||||
sprintf(password, "skysql");
|
||||
}
|
||||
user_name = readenv(env_name, "skysql");
|
||||
|
||||
sprintf(env_name, "%s_password", prefix);
|
||||
password = readenv(env_name, "skysql");
|
||||
|
||||
ssl = false;
|
||||
sprintf(env_name, "%s_ssl", prefix);
|
||||
env = getenv(env_name);
|
||||
if ((env != NULL) && ((strcasecmp(env, "yes") == 0) || (strcasecmp(env, "true") == 0)))
|
||||
{
|
||||
ssl = true;
|
||||
}
|
||||
ssl = readenv_bool(env_name, false);
|
||||
|
||||
if ((N > 0) && (N < 255))
|
||||
{
|
||||
@ -169,66 +149,34 @@ void Mariadb_nodes::read_env()
|
||||
{
|
||||
// reading ports
|
||||
sprintf(env_name, "%s_%03d_port", prefix, i);
|
||||
env = getenv(env_name);
|
||||
if (env != NULL)
|
||||
{
|
||||
sscanf(env, "%d", &port[i]);
|
||||
}
|
||||
else
|
||||
{
|
||||
port[i] = 3306;
|
||||
}
|
||||
// reading sockets
|
||||
sprintf(env_name, "%s_%03d_socket", prefix, i);
|
||||
env = getenv(env_name);
|
||||
if (env != NULL)
|
||||
{
|
||||
sprintf(socket[i], "%s", env);
|
||||
sprintf(socket_cmd[i], "--socket=%s", env);
|
||||
}
|
||||
else
|
||||
{
|
||||
sprintf(socket[i], " ");
|
||||
sprintf(socket_cmd[i], " ");
|
||||
}
|
||||
port[i] = readenv_int(env_name, 3306);
|
||||
|
||||
//reading sockets
|
||||
sprintf(env_name, "%s_%03d_socket", prefix, i);
|
||||
socket[i] = readenv(env_name, " ");
|
||||
if (strcmp(socket[i], " "))
|
||||
{
|
||||
socket_cmd[i] = (char *) malloc(strlen(socket[i]) + 10);
|
||||
sprintf(socket_cmd[i], "--socket=%s", socket[i]);
|
||||
}
|
||||
else
|
||||
{
|
||||
socket_cmd[i] = (char *) " ";
|
||||
}
|
||||
sprintf(env_name, "%s_%03d_socket_cmd", prefix, i);
|
||||
setenv(env_name, socket_cmd[i], 1);
|
||||
|
||||
// reading start_db_command
|
||||
sprintf(env_name, "%s_%03d_start_db_command", prefix, i);
|
||||
env = getenv(env_name);
|
||||
if (env != NULL)
|
||||
{
|
||||
sprintf(start_db_command[i], "%s", env);
|
||||
}
|
||||
else
|
||||
{
|
||||
sprintf(start_db_command[i], "%s", "service mysql start");
|
||||
}
|
||||
start_db_command[i] = readenv(env_name, (char *) "service mysql start");
|
||||
|
||||
// reading stop_db_command
|
||||
sprintf(env_name, "%s_%03d_stop_db_command", prefix, i);
|
||||
env = getenv(env_name);
|
||||
if (env != NULL)
|
||||
{
|
||||
sprintf(stop_db_command[i], "%s", env);
|
||||
}
|
||||
else
|
||||
{
|
||||
sprintf(stop_db_command[i], "%s", "service mysql stop");
|
||||
}
|
||||
stop_db_command[i] = readenv(env_name, (char *) "service mysql stop");
|
||||
|
||||
// reading cleanup_db_command
|
||||
sprintf(env_name, "%s_%03d_cleanup_db_command", prefix, i);
|
||||
env = getenv(env_name);
|
||||
if (env != NULL)
|
||||
{
|
||||
sprintf(cleanup_db_command[i], "%s", env);
|
||||
}
|
||||
else
|
||||
{
|
||||
sprintf(cleanup_db_command[i],
|
||||
"service mysql stop; killall -9 mysqld; rm -rf /var/lib/mysql/*");
|
||||
}
|
||||
cleanup_db_command[i] = readenv(env_name, (char *) "rm -rf /var/lib/mysql/*; killall -9 mysqld");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,4 @@
|
||||
#ifndef MARIADB_NODES_H
|
||||
#define MARIADB_NODES_H
|
||||
#pragma once
|
||||
|
||||
/**
|
||||
* @file mariadb_nodes.h - backend nodes routines
|
||||
@ -36,7 +35,7 @@ public:
|
||||
* @brief Constructor
|
||||
* @param pref name of backend setup (like 'repl' or 'galera')
|
||||
*/
|
||||
Mariadb_nodes(const char* pref, const char* test_cwd, bool verbose);
|
||||
Mariadb_nodes(const char *pref, const char *test_cwd, bool verbose, std::string network_config);
|
||||
|
||||
virtual ~Mariadb_nodes();
|
||||
|
||||
@ -55,20 +54,20 @@ public:
|
||||
/**
|
||||
* @brief Unix socket to connecto to MariaDB
|
||||
*/
|
||||
char socket[256][1024];
|
||||
char * socket[256];
|
||||
/**
|
||||
* @brief 'socket=$socket' line
|
||||
*/
|
||||
char socket_cmd[256][1024];
|
||||
char * socket_cmd[256];
|
||||
|
||||
/**
|
||||
* @brief User name to access backend nodes
|
||||
*/
|
||||
char user_name[256];
|
||||
char * user_name;
|
||||
/**
|
||||
* @brief Password to access backend nodes
|
||||
*/
|
||||
char password[256];
|
||||
char * password;
|
||||
/**
|
||||
* @brief master index of node which was last configured to be Master
|
||||
*/
|
||||
@ -77,18 +76,18 @@ public:
|
||||
/**
|
||||
* @brief start_db_command Command to start DB server
|
||||
*/
|
||||
char start_db_command[256][4096];
|
||||
char * start_db_command[256];
|
||||
|
||||
/**
|
||||
* @brief stop_db_command Command to start DB server
|
||||
*/
|
||||
char stop_db_command[256][4096];
|
||||
char * stop_db_command[256];
|
||||
|
||||
/**
|
||||
* @brief cleanup_db_command Command to remove all
|
||||
* data files and re-install DB with mysql_install_db
|
||||
*/
|
||||
char cleanup_db_command[256][4096];
|
||||
char * cleanup_db_command[256];
|
||||
|
||||
/**
|
||||
* @brief ssl if true ssl will be used
|
||||
@ -381,7 +380,7 @@ public:
|
||||
* Only works with master-slave replication and should not be used with Galera clusters.
|
||||
* The function expects that the first node, @c nodes[0], is the master.
|
||||
*/
|
||||
void sync_slaves(int node = 0);
|
||||
virtual void sync_slaves(int node = 0);
|
||||
|
||||
/**
|
||||
* @brief Close all connections to this node
|
||||
@ -488,10 +487,8 @@ class Galera_nodes : public Mariadb_nodes
|
||||
{
|
||||
public:
|
||||
|
||||
Galera_nodes(const char* pref, const char* test_cwd, bool verbose)
|
||||
: Mariadb_nodes(pref, test_cwd, verbose)
|
||||
{
|
||||
}
|
||||
Galera_nodes(const char *pref, const char *test_cwd, bool verbose, std::string network_config) :
|
||||
Mariadb_nodes(pref, test_cwd, verbose, network_config) { }
|
||||
|
||||
int start_galera();
|
||||
|
||||
@ -508,6 +505,9 @@ public:
|
||||
}
|
||||
|
||||
std::string get_config_name(int node) override;
|
||||
};
|
||||
|
||||
#endif // MARIADB_NODES_H
|
||||
virtual void sync_slaves(int node = 0)
|
||||
{
|
||||
sleep(10);
|
||||
}
|
||||
};
|
||||
|
@ -4,8 +4,6 @@
|
||||
#
|
||||
# TODO: Don't test correctness of routing with mysqltest
|
||||
#
|
||||
rp=`realpath $0`
|
||||
export src_dir=`dirname $rp`
|
||||
|
||||
# TODO: Don't copy this and "unmangle" the test instead
|
||||
cp -r $src_dir/Hartmut_tests/maxscale-mysqltest ./Hartmut_tests/maxscale-mysqltest/
|
||||
@ -15,9 +13,9 @@ echo "--disable_query_log" > Hartmut_tests/maxscale-mysqltest/testconf.inc
|
||||
echo "SET @TMASTER_ID=$master_id;" >> Hartmut_tests/maxscale-mysqltest/testconf.inc
|
||||
echo "--enable_query_log" >> Hartmut_tests/maxscale-mysqltest/testconf.inc
|
||||
|
||||
$src_dir/mysqltest_driver.sh $1 $PWD/Hartmut_tests/maxscale-mysqltest 4006
|
||||
echo "--disable_query_log" > testconf.inc
|
||||
echo "SET @TMASTER_ID=$master_id;" >> testconf.inc
|
||||
echo "--enable_query_log" >> testconf.inc
|
||||
|
||||
ret=$?
|
||||
$src_dir/copy_logs.sh $1
|
||||
$src_dir/mysqltest_driver.sh "$1" "$PWD/Hartmut_tests/maxscale-mysqltest" 4006
|
||||
|
||||
exit $ret
|
||||
|
@ -2,31 +2,13 @@
|
||||
|
||||
script=`basename "$0"`
|
||||
|
||||
if [ $# -lt 1 ]
|
||||
then
|
||||
echo "usage: $script name [user] [password]"
|
||||
echo ""
|
||||
echo "name : The name of the test (from CMakeLists.txt) That selects the"
|
||||
echo " configuration template to be used."
|
||||
echo "user : The user using which the test should be run."
|
||||
echo "password: The password of the user."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$maxscale_IP" == "" ]
|
||||
then
|
||||
echo "Error: The environment variable maxscale_IP must be set."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
src_dir=$(dirname $(realpath $0))
|
||||
source=$src_dir/masking/$1/masking_rules.json
|
||||
target=$maxscale_access_user@$maxscale_IP:/home/$maxscale_access_user/masking_rules.json
|
||||
target=${maxscale_000_whoami}@${maxscale_000_network}:/home/${maxscale_000_whoami}/masking_rules.json
|
||||
|
||||
if [ $maxscale_IP != "127.0.0.1" ] ; then
|
||||
scp -i $maxscale_keyfile -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $source $target
|
||||
if [ ${maxscale_000_network} != "127.0.0.1" ] ; then
|
||||
scp -i $maxscale_000_keyfile -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $source $target
|
||||
else
|
||||
cp $source /home/$maxscale_access_user/masking_rules.json
|
||||
cp $source /home/${maxscale_000_whoami}/masking_rules.json
|
||||
fi
|
||||
|
||||
if [ $? -ne 0 ]
|
||||
@ -35,25 +17,11 @@ then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo $source copied to $target
|
||||
echo $source copied to $target, restarting Maxscale
|
||||
|
||||
test_dir=`pwd`
|
||||
|
||||
$test_dir/non_native_setup $1
|
||||
|
||||
password=
|
||||
if [ $# -ge 3 ]
|
||||
then
|
||||
password=$3
|
||||
fi
|
||||
|
||||
user=
|
||||
if [ $# -ge 2 ]
|
||||
then
|
||||
user=$2
|
||||
fi
|
||||
ssh -i $maxscale_000_keyfile -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ${maxscale_000_whoami}@${maxscale_000_network} 'sudo service maxscale restart'
|
||||
|
||||
# [Read Connection Listener Master] in cnf/maxscale.maxscale.cnf.template.$1
|
||||
port=4008
|
||||
|
||||
$src_dir/mysqltest_driver.sh $1 $src_dir/masking/$1 $port $user $password
|
||||
$src_dir/mysqltest_driver.sh $1 $src_dir/masking/$1 $port $maxscale_user $maxscale_password
|
||||
|
@ -2,30 +2,13 @@
|
||||
|
||||
script=`basename "$0"`
|
||||
|
||||
if [ $# -lt 1 ]
|
||||
then
|
||||
echo "usage: $script name"
|
||||
echo ""
|
||||
echo "name : The name of the test (from CMakeLists.txt) That selects the"
|
||||
echo " configuration template to be used."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$maxscale_IP" == "" ]
|
||||
then
|
||||
echo "Error: The environment variable maxscale_IP must be set."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
src_dir=$(dirname $(realpath $0))
|
||||
echo "src_dir: $src_dir"
|
||||
source=$src_dir/masking/$1/masking_rules.json
|
||||
target=vagrant@$maxscale_IP:/home/$maxscale_access_user/masking_rules.json
|
||||
target=${maxscale_000_whoami}@${maxscale_000_network}:/home/${maxscale_000_whoami}/masking_rules.json
|
||||
|
||||
if [ $maxscale_IP != "127.0.0.1" ] ; then
|
||||
scp -i $maxscale_keyfile -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $source $target
|
||||
if [ ${maxscale_000_network} != "127.0.0.1" ] ; then
|
||||
scp -i $maxscale_000_keyfile -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $source $target
|
||||
else
|
||||
cp $source /home/$maxscale_access_user/masking_rules.json
|
||||
cp $source /home/${maxscale_000_whoami}/masking_rules.json
|
||||
fi
|
||||
|
||||
if [ $? -ne 0 ]
|
||||
@ -34,25 +17,25 @@ then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo $source copied to $target
|
||||
echo $source copied to $target, restarting maxscale
|
||||
|
||||
ssh -i $maxscale_000_keyfile -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ${maxscale_000_whoami}@${maxscale_000_network} 'sudo service maxscale restart'
|
||||
|
||||
test_dir=`pwd`
|
||||
|
||||
$test_dir/non_native_setup $1
|
||||
logdir=log_$1
|
||||
[ -d $logdir ] && rm -r $logdir
|
||||
mkdir $logdir || exit 1
|
||||
|
||||
# [Read Connection Listener Master] in cnf/maxscale.maxscale.cnf.template.$1
|
||||
port=4008
|
||||
password=skysql
|
||||
|
||||
dir="$src_dir/masking/$1"
|
||||
|
||||
user=skysql
|
||||
test_name=masking_user
|
||||
mysqltest --host=$maxscale_IP --port=$port \
|
||||
--user=$user --password=$password \
|
||||
mysqltest --host=${maxscale_000_network} --port=$port \
|
||||
--user=$maxscale_user --password=$maxscale_password \
|
||||
--logdir=$logdir \
|
||||
--test-file=$dir/t/$test_name.test \
|
||||
--result-file=$dir/r/"$test_name"_"$user".result \
|
||||
@ -67,8 +50,8 @@ fi
|
||||
|
||||
user=maxskysql
|
||||
test_name=masking_user
|
||||
mysqltest --host=$maxscale_IP --port=$port \
|
||||
--user=$user --password=$password \
|
||||
mysqltest --host=${maxscale_000_network} --port=$port \
|
||||
--user=$maxscale_user --password=$maxscale_password \
|
||||
--logdir=$logdir \
|
||||
--test-file=$dir/t/$test_name.test \
|
||||
--result-file=$dir/r/"$test_name"_"$user".result \
|
||||
@ -81,9 +64,4 @@ else
|
||||
res=1
|
||||
fi
|
||||
|
||||
echo
|
||||
|
||||
# Copy logs from the VM
|
||||
$src_dir/copy_logs.sh $1
|
||||
|
||||
exit $res
|
||||
echo $res
|
||||
|
@ -1,6 +1,4 @@
|
||||
#ifndef MAXADMIN_OPERATIONS_H
|
||||
#define MAXADMIN_OPERATIONS_H
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
@ -107,5 +105,3 @@ int execute_maxadmin_command_tcp(char* hostname, char* user, char* password, cha
|
||||
* @return 0 if parameter is found
|
||||
*/
|
||||
int execute_maxadmin_command_print_pcp(char* hostname, char* user, char* password, char* cmd);
|
||||
|
||||
#endif // MAXADMIN_OPERATIONS_H
|
||||
|
@ -1,5 +1,4 @@
|
||||
#ifndef MAXINFO_FUNC_H
|
||||
#define MAXINFO_FUNC_H
|
||||
#pragma once
|
||||
|
||||
int create_tcp_socket();
|
||||
char* get_ip(char* host);
|
||||
@ -20,6 +19,3 @@ static char* bin2hex(const unsigned char* old, const size_t oldlen);
|
||||
char* cdc_auth_srt(char* user, char* password);
|
||||
int setnonblocking(int sock);
|
||||
int get_x_fl_from_json(char* line, long long int* x1, long long int* fl);
|
||||
|
||||
|
||||
#endif // MAXINFO_FUNC_H
|
||||
|
@ -59,31 +59,31 @@ class MaxScaleTest:
|
||||
def __init__(self, testname = "python_test"):
|
||||
|
||||
self.testname = testname
|
||||
prepare_test(testname)
|
||||
# prepare_test(testname)
|
||||
|
||||
# MaxScale connections
|
||||
self.maxscale = dict()
|
||||
self.maxscale['rwsplit'] = SQLConnection(host = os.getenv("maxscale_IP"), port = "4006", user = os.getenv("maxscale_user"), password = os.getenv("maxscale_password"))
|
||||
self.maxscale['rcmaster'] = SQLConnection(host = os.getenv("maxscale_IP"), port = "4008", user = os.getenv("maxscale_user"), password = os.getenv("maxscale_password"))
|
||||
self.maxscale['rcslave'] = SQLConnection(host = os.getenv("maxscale_IP"), port = "4009", user = os.getenv("maxscale_user"), password = os.getenv("maxscale_password"))
|
||||
self.maxscale['rwsplit'] = SQLConnection(host = os.getenv("maxscale_000_network"), port = "4006", user = os.getenv("maxscale_user"), password = os.getenv("maxscale_password"))
|
||||
self.maxscale['rcmaster'] = SQLConnection(host = os.getenv("maxscale_000_network"), port = "4008", user = os.getenv("maxscale_user"), password = os.getenv("maxscale_password"))
|
||||
self.maxscale['rcslave'] = SQLConnection(host = os.getenv("maxscale_000_network"), port = "4009", user = os.getenv("maxscale_user"), password = os.getenv("maxscale_password"))
|
||||
|
||||
# Master-Slave nodes
|
||||
self.repl = dict()
|
||||
self.repl['node0'] = SQLConnection(host = os.getenv("node_000_network"), port = os.getenv("node_000_port"), user = os.getenv("maxscale_user"), password = os.getenv("maxscale_password"))
|
||||
self.repl['node1'] = SQLConnection(host = os.getenv("node_001_network"), port = os.getenv("node_001_port"), user = os.getenv("maxscale_user"), password = os.getenv("maxscale_password"))
|
||||
self.repl['node2'] = SQLConnection(host = os.getenv("node_002_network"), port = os.getenv("node_002_port"), user = os.getenv("maxscale_user"), password = os.getenv("maxscale_password"))
|
||||
self.repl['node3'] = SQLConnection(host = os.getenv("node_003_network"), port = os.getenv("node_003_port"), user = os.getenv("maxscale_user"), password = os.getenv("maxscale_password"))
|
||||
self.repl['node0'] = SQLConnection(host = os.getenv("node_000_network"), port = os.getenv("node_000_port"), user = os.getenv("node_user"), password = os.getenv("node_password"))
|
||||
self.repl['node1'] = SQLConnection(host = os.getenv("node_001_network"), port = os.getenv("node_001_port"), user = os.getenv("node_user"), password = os.getenv("node_password"))
|
||||
self.repl['node2'] = SQLConnection(host = os.getenv("node_002_network"), port = os.getenv("node_002_port"), user = os.getenv("node_user"), password = os.getenv("node_password"))
|
||||
self.repl['node3'] = SQLConnection(host = os.getenv("node_003_network"), port = os.getenv("node_003_port"), user = os.getenv("node_user"), password = os.getenv("node_password"))
|
||||
|
||||
# Galera nodes
|
||||
self.galera = dict()
|
||||
self.galera['node0'] = SQLConnection(host = os.getenv("galera_000_network"), port = os.getenv("galera_000_port"), user = os.getenv("maxscale_user"), password = os.getenv("maxscale_password"))
|
||||
self.galera['node1'] = SQLConnection(host = os.getenv("galera_001_network"), port = os.getenv("galera_001_port"), user = os.getenv("maxscale_user"), password = os.getenv("maxscale_password"))
|
||||
self.galera['node2'] = SQLConnection(host = os.getenv("galera_002_network"), port = os.getenv("galera_002_port"), user = os.getenv("maxscale_user"), password = os.getenv("maxscale_password"))
|
||||
self.galera['node3'] = SQLConnection(host = os.getenv("galera_003_network"), port = os.getenv("galera_003_port"), user = os.getenv("maxscale_user"), password = os.getenv("maxscale_password"))
|
||||
self.galera['node0'] = SQLConnection(host = os.getenv("galera_000_network"), port = os.getenv("galera_000_port"), user = os.getenv("galera_user"), password = os.getenv("galera_password"))
|
||||
self.galera['node1'] = SQLConnection(host = os.getenv("galera_001_network"), port = os.getenv("galera_001_port"), user = os.getenv("galera_user"), password = os.getenv("galera_password"))
|
||||
self.galera['node2'] = SQLConnection(host = os.getenv("galera_002_network"), port = os.getenv("galera_002_port"), user = os.getenv("galera_user"), password = os.getenv("galera_password"))
|
||||
self.galera['node3'] = SQLConnection(host = os.getenv("galera_003_network"), port = os.getenv("galera_003_port"), user = os.getenv("galera_user"), password = os.getenv("galera_password"))
|
||||
|
||||
def __del__(self):
|
||||
subprocess.call(os.getcwd() + "/copy_logs.sh " + str(self.testname), shell=True)
|
||||
# def __del__(self):
|
||||
# subprocess.call(os.getcwd() + "/copy_logs.sh " + str(self.testname), shell=True)
|
||||
|
||||
# Read test environment variables
|
||||
def prepare_test(testname = "replication"):
|
||||
subprocess.call(os.getcwd() + "/non_native_setup " + str(testname), shell=True)
|
||||
#def prepare_test(testname = "replication"):
|
||||
# subprocess.call(os.getcwd() + "/non_native_setup " + str(testname), shell=True)
|
||||
|
@ -1,14 +1,18 @@
|
||||
#include "maxscales.h"
|
||||
#include <sstream>
|
||||
#include <unordered_map>
|
||||
#include <string>
|
||||
#include "envv.h"
|
||||
|
||||
Maxscales::Maxscales(const char* pref, const char* test_cwd, bool verbose, bool use_valgrind)
|
||||
Maxscales::Maxscales(const char *pref, const char *test_cwd, bool verbose, bool use_valgrind,
|
||||
std::string network_config)
|
||||
{
|
||||
strcpy(prefix, pref);
|
||||
this->verbose = verbose;
|
||||
this->use_valgrind = use_valgrind;
|
||||
valgring_log_num = 0;
|
||||
strcpy(test_dir, test_cwd);
|
||||
this->network_config = network_config;
|
||||
read_env();
|
||||
if (use_valgrind)
|
||||
{
|
||||
@ -24,7 +28,6 @@ Maxscales::Maxscales(const char* pref, const char* test_cwd, bool verbose, bool
|
||||
|
||||
int Maxscales::read_env()
|
||||
{
|
||||
char* env;
|
||||
char env_name[64];
|
||||
|
||||
read_basic_env();
|
||||
@ -34,69 +37,16 @@ int Maxscales::read_env()
|
||||
for (int i = 0; i < N; i++)
|
||||
{
|
||||
sprintf(env_name, "%s_%03d_cnf", prefix, i);
|
||||
env = getenv(env_name);
|
||||
if (env == NULL)
|
||||
{
|
||||
sprintf(env_name, "%s_cnf", prefix);
|
||||
env = getenv(env_name);
|
||||
}
|
||||
if (env != NULL)
|
||||
{
|
||||
sprintf(maxscale_cnf[i], "%s", env);
|
||||
}
|
||||
else
|
||||
{
|
||||
sprintf(maxscale_cnf[i], "/etc/maxscale.cnf");
|
||||
}
|
||||
maxscale_cnf[i] = readenv(env_name, DEFAULT_MAXSCALE_CNF);
|
||||
|
||||
sprintf(env_name, "%s_%03d_log_dir", prefix, i);
|
||||
env = getenv(env_name);
|
||||
if (env == NULL)
|
||||
{
|
||||
sprintf(env_name, "%s_log_dir", prefix);
|
||||
env = getenv(env_name);
|
||||
}
|
||||
|
||||
if (env != NULL)
|
||||
{
|
||||
sprintf(maxscale_log_dir[i], "%s", env);
|
||||
}
|
||||
else
|
||||
{
|
||||
sprintf(maxscale_log_dir[i], "/var/log/maxscale/");
|
||||
}
|
||||
maxscale_log_dir[i] = readenv(env_name, DEFAULT_MAXSCALE_LOG_DIR);
|
||||
|
||||
sprintf(env_name, "%s_%03d_binlog_dir", prefix, i);
|
||||
env = getenv(env_name);
|
||||
if (env == NULL)
|
||||
{
|
||||
sprintf(env_name, "%s_binlog_dir", prefix);
|
||||
env = getenv(env_name);
|
||||
}
|
||||
if (env != NULL)
|
||||
{
|
||||
sprintf(maxscale_binlog_dir[i], "%s", env);
|
||||
}
|
||||
else
|
||||
{
|
||||
sprintf(maxscale_binlog_dir[i], "/var/lib/maxscale/Binlog_Service/");
|
||||
}
|
||||
maxscale_binlog_dir[i] = readenv(env_name, DEFAULT_MAXSCALE_BINLOG_DIR);
|
||||
|
||||
sprintf(env_name, "%s_%03d_maxadmin_password", prefix, i);
|
||||
env = getenv(env_name);
|
||||
if (env == NULL)
|
||||
{
|
||||
sprintf(env_name, "%s_maxadmin_password", prefix);
|
||||
env = getenv(env_name);
|
||||
}
|
||||
if (env != NULL)
|
||||
{
|
||||
sprintf(maxadmin_password[i], "%s", env);
|
||||
}
|
||||
else
|
||||
{
|
||||
sprintf(maxadmin_password[i], "mariadb");
|
||||
}
|
||||
maxadmin_password[i] = readenv(env_name, DEFAULT_MAXADMIN_PASSWORD);
|
||||
|
||||
rwsplit_port[i] = 4006;
|
||||
readconn_master_port[i] = 4008;
|
||||
@ -114,7 +64,6 @@ int Maxscales::read_env()
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int Maxscales::connect_rwsplit(int m, const std::string& db)
|
||||
{
|
||||
if (use_ipv6)
|
||||
@ -247,6 +196,21 @@ int Maxscales::restart_maxscale(int m)
|
||||
if (use_valgrind)
|
||||
{
|
||||
res = stop_maxscale(m);
|
||||
res += start_maxscale(m);
|
||||
}
|
||||
else
|
||||
{
|
||||
res = ssh_node(m, "service maxscale restart", true);
|
||||
}
|
||||
fflush(stdout);
|
||||
return res;
|
||||
}
|
||||
|
||||
int Maxscales::start_maxscale(int m)
|
||||
{
|
||||
int res;
|
||||
if (use_valgrind)
|
||||
{
|
||||
res = ssh_node_f(m, false,
|
||||
"sudo --user=maxscale valgrind --leak-check=full --show-leak-kinds=all "
|
||||
"--log-file=/%s/valgrind%02d.log --trace-children=yes "
|
||||
|
@ -1,11 +1,16 @@
|
||||
#ifndef MAXSCALES_H
|
||||
#define MAXSCALES_H
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include "nodes.h"
|
||||
#include "mariadb_func.h"
|
||||
#include "mariadb_nodes.h"
|
||||
|
||||
class Maxscales : public Nodes
|
||||
#define DEFAULT_MAXSCALE_CNF "/etc/maxscale.cnf"
|
||||
#define DEFAULT_MAXSCALE_LOG_DIR "/var/log/maxscale/"
|
||||
#define DEFAULT_MAXSCALE_BINLOG_DIR "/var/lib/maxscale/Binlog_Service/"
|
||||
#define DEFAULT_MAXADMIN_PASSWORD "mariadb"
|
||||
|
||||
class Maxscales: public Nodes
|
||||
{
|
||||
public:
|
||||
enum service
|
||||
@ -15,7 +20,9 @@ public:
|
||||
READCONN_SLAVE
|
||||
};
|
||||
|
||||
Maxscales(const char* pref, const char* test_cwd, bool verbose, bool use_valgrind);
|
||||
Maxscales(const char *pref, const char *test_cwd, bool verbose, bool use_valgrind,
|
||||
std::__cxx11::string network_config);
|
||||
|
||||
int read_env();
|
||||
|
||||
/**
|
||||
@ -76,29 +83,28 @@ public:
|
||||
/**
|
||||
* @brief maxadmin_Password Password to access Maxadmin tool
|
||||
*/
|
||||
char maxadmin_password[256][256];
|
||||
char * maxadmin_password[256];
|
||||
|
||||
/**
|
||||
* @brief maxscale_cnf full name of Maxscale configuration file
|
||||
*/
|
||||
char maxscale_cnf[256][4096];
|
||||
* @brief maxscale_cnf full name of Maxscale configuration file
|
||||
*/
|
||||
char * maxscale_cnf[256];
|
||||
|
||||
/**
|
||||
* @brief maxscale_log_dir name of log files directory
|
||||
*/
|
||||
char maxscale_log_dir[256][4096];
|
||||
* @brief maxscale_log_dir name of log files directory
|
||||
*/
|
||||
char * maxscale_log_dir[256];
|
||||
|
||||
/**
|
||||
* @brief maxscale_lbinog_dir name of binlog files (for binlog router) directory
|
||||
*/
|
||||
char maxscale_binlog_dir[256][4096];
|
||||
* @brief maxscale_lbinog_dir name of binlog files (for binlog router) directory
|
||||
*/
|
||||
char * maxscale_binlog_dir[256];
|
||||
|
||||
/**
|
||||
* @brief N_ports Default number of routers
|
||||
*/
|
||||
int N_ports[256];
|
||||
|
||||
|
||||
/**
|
||||
* @brief test_dir path to test application
|
||||
*/
|
||||
@ -252,10 +258,8 @@ public:
|
||||
/**
|
||||
* @brief alias for restart_maxscale
|
||||
*/
|
||||
int start_maxscale(int m = 0)
|
||||
{
|
||||
return restart_maxscale(m);
|
||||
}
|
||||
int start_maxscale(int m = 0);
|
||||
|
||||
int start(int m = 0)
|
||||
{
|
||||
return start_maxscale(m);
|
||||
@ -330,7 +334,4 @@ public:
|
||||
*/
|
||||
int valgring_log_num;
|
||||
|
||||
|
||||
};
|
||||
|
||||
#endif // MAXSCALES_H
|
||||
|
@ -31,11 +31,11 @@ Template for this configuration is
|
||||
|
||||
Another templates:
|
||||
|
||||
NOTE: templates 'nogalera' and 'onemaxscale' are removed. Please use 'default' and define MDBCI labels to limit the
|
||||
number of started VMs
|
||||
|
||||
Template name|Description
|
||||
---|---
|
||||
```nogalera``` |only 1 VM for Maxscale and 4 for Master/Slaves|
|
||||
```twomaxscales``` |2 VMs for Maxscale and 4 for Master/Slaves|
|
||||
```onemaxscale``` |1 VM for Maxscale, 4 for Master/Slaves and 4 for Galera|
|
||||
```big``` |1 VM for Maxscale, 8 for Master/Slaves and 4 for Galera|
|
||||
```big15``` |1 VM for Maxscale, 15 for Master/Slaves and 4 for Galera|
|
||||
|
||||
@ -45,8 +45,7 @@ Template can contain references to any environmental variables - they all
|
||||
will be replaced with values before VMs starting
|
||||
|
||||
The [maxscale-system-test/mdbci/run_test.sh](run_test.sh) script
|
||||
brings test VMs configuration up and tries to execute
|
||||
```maxscale-system-test``` using 'ctest'.
|
||||
Executes ```maxscale-system-test``` using 'ctest'.
|
||||
|
||||
Script can be executed without any parameters and without defining any
|
||||
environmental variables.
|
||||
@ -58,12 +57,22 @@ VMs will not be destroyed after the tests.
|
||||
The name of test run (and name of VMs set) is generated based on ```box``` parameter
|
||||
and current date/time
|
||||
|
||||
Only needed VMs will be started. Every test has labels like ```REPL_BACKEND```,
|
||||
```GALERA_BACKEND```
|
||||
|
||||
Test can be executed directly by calling its executable from command line or ```ctest```
|
||||
Every test checks running VMs, brings up VMs if they are not running, checks backend.
|
||||
If backend is broken test tries to fix it. If attempt to fix backend failed test tries
|
||||
to execute ```mdbci``` with ```--recreate``` option. In this case ```mdbci``` kills all VMs and
|
||||
brings up new ones
|
||||
|
||||
### Basic run_test.sh parameters
|
||||
|
||||
Variable name|Meaning
|
||||
---|---
|
||||
```target``` |name of binary repository to install Maxscale from|
|
||||
```box``` |Vagrant box to be used to create VMs |
|
||||
```box``` |Vagrant box to be used to create Maxscale VMs |
|
||||
```backend_box``` |Vagrant box to be used to create backend VMs |
|
||||
```test_set``` |Set of test to be executed in the 'ctest' format|
|
||||
```version```|Version of DB server in Master/Slave backend|
|
||||
```galera_version```|Version of DB server in Galera backend|
|
||||
@ -71,7 +80,7 @@ Variable name|Meaning
|
||||
```template```|Name of *.json.template file with VMs descriptions in MDBCI format|
|
||||
```team_keys```|Path to the file with public ssh keys - this file is loaded to VMs|
|
||||
```do_not_destroy_vm```|if 'yes' VMs stay alive after test|
|
||||
```name```|The name of test run - any string to identify VMs set|
|
||||
```mdbci_config_name```|The name of test run - any string to identify VMs set|
|
||||
|
||||
|
||||
For complete list of environmental variables see comments in
|
||||
@ -97,38 +106,22 @@ If ```galera_version``` is not defined the value of ```version``` is used also f
|
||||
|
||||
After execution of 'run_test.sh` by default VMs stay alive and other tests can be executed.
|
||||
|
||||
Test use environmental variables to get all infio about test setup (about VMs).
|
||||
Test use ```${MDBCI_VM_PATH}/${mdbci_config_name}_network_config``` file to get all info about test setup (about VMs).
|
||||
|
||||
The script [maxscale-system-test/mdbci/set_env.sh](set_env.sh)
|
||||
loads all needed values (IPs, paths to ssh keyfiles,
|
||||
user names, etc) into environmental variables. Script uses
|
||||
data from ```${MDBCI_VM_PATH}/${name}_network_config``` file
|
||||
and also calls MDBCI commands.
|
||||
NOTE: enviromental variables are not in use any more to describe backend. However test sets all these variables inside itself
|
||||
and any process called by test code can use enviromental variables. This way can be used to create non-c++ tests (bach, python, etc).
|
||||
|
||||
Script have to be sourced:
|
||||
TODO: describe 'non_native_setup`
|
||||
|
||||
```bash
|
||||
source ./mdbci/set_env.sh $name
|
||||
```
|
||||
The script [maxscale-system-test/mdbci/set_env.sh](set_env.sh) is not in use any more.
|
||||
|
||||
or
|
||||
|
||||
```bash
|
||||
. ./mdbci/set_env.sh $name
|
||||
```
|
||||
|
||||
After it, any 'maxscale-system-test ' can be executed, e.g.:
|
||||
|
||||
```bash
|
||||
./sql_queries
|
||||
```
|
||||
|
||||
### Basic MDBCI and Vagrant operations
|
||||
|
||||
#### Restore ${name}.config_file
|
||||
|
||||
```bash
|
||||
${mdbci_dir}/mdbci show network_config $name
|
||||
${mdbci_dir}/mdbci show network_config ${mdbci_config_name}
|
||||
```
|
||||
|
||||
#### Suspend VMs
|
||||
@ -137,45 +130,29 @@ Before rebooting computer it is recommended to suspend
|
||||
Vagrant-controlled VMs
|
||||
|
||||
```bash
|
||||
cd ${MDBCI_VM_PATH}/$name
|
||||
cd ${MDBCI_VM_PATH}/${mdbci_config_name}
|
||||
vagrant suspend
|
||||
```
|
||||
|
||||
#### Resume suspended VMs
|
||||
|
||||
```bash
|
||||
cd ${MDBCI_VM_PATH}/$name
|
||||
cd ${MDBCI_VM_PATH}/${mdbci_config_name}
|
||||
vagrant resume
|
||||
```
|
||||
|
||||
#### Destroying VMs
|
||||
|
||||
```bash
|
||||
${mdbci_dir}/mdbci destroy $name
|
||||
${mdbci_dir}/mdbci destroy ${mdbci_config_name}
|
||||
```
|
||||
|
||||
#### Restoring backend
|
||||
#### Start all backend VMs
|
||||
|
||||
Every test before any actions checks backend and tries to restore it if its broken.
|
||||
To restore backend separatelly and for intial backend setup check_backend' can be used:
|
||||
Every test before any actions checks backend and brings up needed VMs.
|
||||
To bring up all backend VMs without running excuting any test 'check_backend' can be used:
|
||||
|
||||
```bash
|
||||
. ./mdbci/set_env.sh $name
|
||||
./check_backend
|
||||
```
|
||||
|
||||
#### Restoring VMs from snapshot
|
||||
|
||||
'run_test.sh' makes snapshot of all VMs before tests. The name of snapshot is 'clean'.
|
||||
|
||||
In case of problem, after 'snapshot revert' it is recommended to re-create
|
||||
${name}_network_config file, re-load environmental variables and run
|
||||
'check_backend'
|
||||
|
||||
```bash
|
||||
${mdbci_dir}/mdbci snapshot revert --path-to-nodes $name --snapshot-name clean
|
||||
${mdbci_dir}/mdbci show network_config $name
|
||||
. ./mdbci/set_env.sh $name
|
||||
./check_backend
|
||||
```
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
set -x
|
||||
rsync -a --no-o --no-g LOGS ${logs_publish_dir}
|
||||
chmod a+r ${logs_publish_dir}/*
|
||||
cp -r ${MDBCI_VM_PATH}/$name ${logs_publish_dir}
|
||||
cp ${MDBCI_VM_PATH}/${name}.json ${logs_publish_dir}
|
||||
cp -r ${MDBCI_VM_PATH}/${mdbci_config_name} ${logs_publish_dir}
|
||||
cp ${MDBCI_VM_PATH}/${mdbci_config_name}.json ${logs_publish_dir}
|
||||
|
@ -7,14 +7,11 @@ export script_dir="$(dirname $(readlink -f $0))"
|
||||
|
||||
. ${script_dir}/set_run_test_variables.sh
|
||||
|
||||
export provider=`${mdbci_dir}/mdbci show provider $box --silent 2> /dev/null`
|
||||
export backend_box=${backend_box:-"centos_7_"$provider}
|
||||
|
||||
if [ "$product" == "mysql" ] ; then
|
||||
export cnf_path=${script_dir}/cnf/mysql56
|
||||
fi
|
||||
|
||||
${mdbci_dir}/mdbci destroy $name
|
||||
mdbci destroy $name
|
||||
mkdir -p ${MDBCI_VM_PATH}/$name
|
||||
|
||||
export cnf_path="${MDBCI_VM_PATH}/$name/cnf/"
|
||||
@ -27,22 +24,20 @@ fi
|
||||
$(<${script_dir}/templates/${template}.json.template)
|
||||
" 2> /dev/null > ${MDBCI_VM_PATH}/${name}.json
|
||||
|
||||
${mdbci_dir}/mdbci --override --template ${MDBCI_VM_PATH}/${name}.json generate $name
|
||||
mdbci --override --template ${MDBCI_VM_PATH}/${name}.json generate $name
|
||||
|
||||
mkdir ${MDBCI_VM_PATH}/$name/cnf
|
||||
cp -r ${script_dir}/cnf/* ${MDBCI_VM_PATH}/$name/cnf/
|
||||
|
||||
echo "running vagrant up $provider"
|
||||
|
||||
${mdbci_dir}/mdbci up $name --attempts 3
|
||||
mdbci up $name --attempts 3 --labels MAXSCALE
|
||||
if [ $? != 0 ]; then
|
||||
echo "Error creating configuration"
|
||||
rm -f ~/vagrant_lock
|
||||
exit 1
|
||||
fi
|
||||
|
||||
#cp ~/build-scripts/team_keys .
|
||||
${mdbci_dir}/mdbci public_keys --key ${team_keys} $name
|
||||
mdbci public_keys --key ${team_keys} $name
|
||||
|
||||
rm -f ~/vagrant_lock
|
||||
exit 0
|
||||
|
@ -16,13 +16,6 @@
|
||||
# If it is not defined, name will be automatically genereted
|
||||
# using $box and current date and time
|
||||
|
||||
# $ci_url - URL to Maxscale CI repository
|
||||
# (default "http://max-tst-01.mariadb.com/ci-repository/")
|
||||
# if build is done also locally and binaries are not uploaded to
|
||||
# max-tst-01.mariadb.com $ci_url should toint to local web server
|
||||
# e.g. http://192.168.122.1/repository (IP should be a host IP in the
|
||||
# virtual network (not 127.0.0.1))
|
||||
|
||||
# $product - 'mariadb' or 'mysql'
|
||||
|
||||
# $version - version of backend DB (e.g. '10.1', '10.2')
|
||||
@ -36,7 +29,7 @@
|
||||
# $team_keys - path to the file with open ssh keys to be
|
||||
# installed on all VMs (default ${HOME}/team_keys)
|
||||
|
||||
# $don_not_destroy_vm - if 'yes' VM won't be destored afther the test
|
||||
# $do_not_destroy_vm - if 'yes' VM won't be destored afther the test
|
||||
|
||||
# $test_set - parameters to be send to 'ctest' (e.g. '-I 1,100',
|
||||
# '-LE UNSTABLE'
|
||||
@ -58,60 +51,42 @@ export script_dir="$(dirname $(readlink -f $0))"
|
||||
rm -rf LOGS
|
||||
|
||||
export target=`echo $target | sed "s/?//g"`
|
||||
export name=`echo $name | sed "s/?//g"`
|
||||
export mdbci_config_name=`echo ${mdbci_config_name} | sed "s/?//g"`
|
||||
|
||||
export provider=`mdbci show provider $box --silent 2> /dev/null`
|
||||
export backend_box=${backend_box:-"centos_7_"$provider}
|
||||
|
||||
mdbci destroy ${mdbci_config_name}
|
||||
|
||||
. ${script_dir}/configure_log_dir.sh
|
||||
|
||||
${script_dir}/create_config.sh
|
||||
res=$?
|
||||
|
||||
ulimit -c unlimited
|
||||
if [ $res == 0 ] ; then
|
||||
. ${script_dir}/set_env.sh $name
|
||||
cd ${script_dir}/../../
|
||||
|
||||
mkdir build && cd build
|
||||
cmake .. -DBUILD_SYSTEM_TESTS=Y -DBUILDNAME=$name -DCMAKE_BUILD_TYPE=Debug
|
||||
cd maxscale-system-test
|
||||
make
|
||||
set -x
|
||||
echo ${test_set} | grep "NAME#"
|
||||
if [ $? == 0 ] ; then
|
||||
named_test=`echo ${test_set} | sed "s/NAME#//"`
|
||||
echo ${named_test} | grep "\./"
|
||||
if [ $? != 0 ] ; then
|
||||
named_test="./"${named_test}
|
||||
fi
|
||||
fi
|
||||
cd ${script_dir}/../../
|
||||
mkdir build && cd build
|
||||
cmake .. -DBUILD_SYSTEM_TESTS=Y -DBUILDNAME=${mdbci_config_name} -DCMAKE_BUILD_TYPE=Debug
|
||||
cd maxscale-system-test
|
||||
make
|
||||
|
||||
if [ ! -z "${named_test}" ] ; then
|
||||
eval ${named_test}
|
||||
else
|
||||
./check_backend
|
||||
if [ $? != 0 ]; then
|
||||
echo "Backend broken!"
|
||||
if [ "${do_not_destroy_vm}" != "yes" ] ; then
|
||||
${mdbci_dir}/mdbci destroy $name
|
||||
fi
|
||||
rm -f ~/vagrant_lock
|
||||
exit 1
|
||||
fi
|
||||
${mdbci_dir}/mdbci snapshot take --path-to-nodes $name --snapshot-name clean
|
||||
ctest -VV ${test_set}
|
||||
echo ${test_set} | grep "NAME#"
|
||||
if [ $? == 0 ] ; then
|
||||
named_test=`echo ${test_set} | sed "s/NAME#//"`
|
||||
echo ${named_test} | grep "\./"
|
||||
if [ $? != 0 ] ; then
|
||||
named_test="./"${named_test}
|
||||
fi
|
||||
cp core.* ${logs_publish_dir}
|
||||
${script_dir}/copy_logs.sh
|
||||
cd $dir
|
||||
else
|
||||
echo "Failed to create VMs, exiting"
|
||||
if [ "${do_not_destroy_vm}" != "yes" ] ; then
|
||||
${mdbci_dir}/mdbci destroy $name
|
||||
fi
|
||||
rm -f ~/vagrant_lock
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -z "${named_test}" ] ; then
|
||||
eval ${named_test}
|
||||
else
|
||||
ctest -VV ${test_set}
|
||||
fi
|
||||
cp core.* ${logs_publish_dir}
|
||||
${script_dir}/copy_logs.sh
|
||||
cd $dir
|
||||
|
||||
if [ "${do_not_destroy_vm}" != "yes" ] ; then
|
||||
${mdbci_dir}/mdbci destroy $name
|
||||
mdbci destroy ${mdbci_config_name}
|
||||
echo "clean up done!"
|
||||
fi
|
||||
|
@ -19,19 +19,19 @@ export dir=`pwd`
|
||||
export script_dir="$(dirname $(readlink -f $0))"
|
||||
|
||||
. ${script_dir}/set_run_test_variables.sh
|
||||
export name="$box-$product-$version-permanent"
|
||||
export mdbci_config_name="$box-$product-$version-permanent"
|
||||
|
||||
export snapshot_name=${snapshot_name:-"clean"}
|
||||
|
||||
rm -rf LOGS
|
||||
|
||||
export target=`echo $target | sed "s/?//g"`
|
||||
export name=`echo $name | sed "s/?//g"`
|
||||
export mdbci_config_name=`echo ${mdbci_config_name} | sed "s/?//g"`
|
||||
|
||||
. ${script_dir}/configure_log_dir.sh
|
||||
|
||||
# Setting snapshot_lock
|
||||
export snapshot_lock_file=${MDBCI_VM_PATH}/${name}_snapshot_lock
|
||||
export snapshot_lock_file=${MDBCI_VM_PATH}/${mdbci_config_name}_snapshot_lock
|
||||
if [ -f ${snapshot_lock_file} ]; then
|
||||
echo "Snapshot is locked, waiting ..."
|
||||
fi
|
||||
@ -43,34 +43,18 @@ done
|
||||
touch ${snapshot_lock_file}
|
||||
echo $JOB_NAME-$BUILD_NUMBER >> ${snapshot_lock_file}
|
||||
|
||||
${mdbci_dir}/mdbci snapshot revert --path-to-nodes $name --snapshot-name $snapshot_name
|
||||
mdbci snapshot revert --path-to-nodes ${mdbci_config_name} --snapshot-name ${snapshot_name}
|
||||
|
||||
if [ $? != 0 ]; then
|
||||
${mdbci_dir}/mdbci destroy $name
|
||||
${MDBCI_VM_PATH}/scripts/clean_vms.sh $name
|
||||
mdbci destroy ${mdbci_config_name}
|
||||
${MDBCI_VM_PATH}/scripts/clean_vms.sh ${mdbci_config_name}
|
||||
|
||||
${script_dir}/create_config.sh
|
||||
checkExitStatus $? "Error creating configuration" $snapshot_lock_file
|
||||
new_config=true
|
||||
|
||||
echo "Creating snapshot from new config"
|
||||
${mdbci_dir}/mdbci snapshot take --path-to-nodes $name --snapshot-name $snapshot_name
|
||||
fi
|
||||
|
||||
. ${script_dir}/set_env.sh "$name"
|
||||
|
||||
if [ ${maxscale_N} -gt 1 ] ; then
|
||||
maxscales_vm=`env | grep maxscale | grep network | sed 's/_network.*//' | grep "_"`
|
||||
else
|
||||
maxscales_vm="maxscale"
|
||||
fi
|
||||
|
||||
for maxscale_vm_name in ${maxscales_vm}
|
||||
do
|
||||
${mdbci_dir}/mdbci sudo --command 'yum remove maxscale -y' $name/${maxscale_vm_name}
|
||||
${mdbci_dir}/mdbci sudo --command 'yum clean all' $name/${maxscale_vm_name}
|
||||
|
||||
${mdbci_dir}/mdbci setup_repo --product maxscale_ci --product-version ${target} $name/${maxscale_vm_name}
|
||||
${mdbci_dir}/mdbci install_product --product maxscale_ci $name/${maxscale_vm_name}
|
||||
|
||||
checkExitStatus $? "Error installing Maxscale" $snapshot_lock_file
|
||||
done
|
||||
@ -85,9 +69,15 @@ cmake .. -DBUILDNAME=$JOB_NAME-$BUILD_NUMBER-$target -DBUILD_SYSTEM_TESTS=Y -DCM
|
||||
cd maxscale-system-test
|
||||
make
|
||||
|
||||
./check_backend --restart-galera
|
||||
|
||||
./check_backend --restart-galera --reinstall-maxscale
|
||||
checkExitStatus $? "Failed to check backends" $snapshot_lock_file
|
||||
|
||||
if [${new_config}] == "true" ; then
|
||||
echo "Creating snapshot from new config"
|
||||
mdbci snapshot take --path-to-nodes ${mdbci_config_name} --snapshot-name $snapshot_name
|
||||
fi
|
||||
|
||||
|
||||
ulimit -c unlimited
|
||||
ctest $test_set -VV
|
||||
cp core.* ${logs_publish_dir}
|
||||
|
@ -1,87 +1,2 @@
|
||||
#!/bin/bash
|
||||
set -x
|
||||
echo $*
|
||||
export MDBCI_VM_PATH=${MDBCI_VM_PATH:-$HOME/vms}
|
||||
export mdbci_dir=${mdbci_dir:-"$HOME/mdbci/"}
|
||||
|
||||
export config_name="$1"
|
||||
if [ -z $1 ] ; then
|
||||
config_name="test1"
|
||||
fi
|
||||
|
||||
export curr_dir=`pwd`
|
||||
|
||||
export maxscale_binlog_dir="/var/lib/maxscale/Binlog_Service"
|
||||
export maxdir="/usr/bin/"
|
||||
export maxdir_bin="/usr/bin/"
|
||||
export maxscale_cnf="/etc/maxscale.cnf"
|
||||
export maxscale_log_dir="/var/log/maxscale/"
|
||||
|
||||
# Number of nodes
|
||||
export galera_N=`cat "$MDBCI_VM_PATH/$config_name"_network_config | grep galera | grep network | wc -l`
|
||||
export node_N=`cat "$MDBCI_VM_PATH/$config_name"_network_config | grep node | grep network | wc -l`
|
||||
export maxscale_N=`cat "$MDBCI_VM_PATH/$config_name"_network_config | grep maxscale | grep network | wc -l`
|
||||
sed "s/^/export /g" "$MDBCI_VM_PATH/$config_name"_network_config > "$curr_dir"/"$config_name"_network_config_export
|
||||
source "$curr_dir"/"$config_name"_network_config_export
|
||||
rm "$curr_dir"/"$config_name"_network_config_export
|
||||
|
||||
|
||||
# User name and Password for Master/Slave replication setup (should have all PRIVILEGES)
|
||||
export node_user="skysql"
|
||||
export node_password="skysql"
|
||||
|
||||
# User name and Password for Galera setup (should have all PRIVILEGES)
|
||||
export galera_user="skysql"
|
||||
export galera_password="skysql"
|
||||
|
||||
export maxscale_user="skysql"
|
||||
export maxscale_password="skysql"
|
||||
|
||||
export maxadmin_password="mariadb"
|
||||
|
||||
for prefix in "node" "galera" "maxscale"
|
||||
do
|
||||
N_var="$prefix"_N
|
||||
Nx=${!N_var}
|
||||
N=`expr $Nx - 1`
|
||||
for i in $(seq 0 $N)
|
||||
do
|
||||
num=`printf "%03d" $i`
|
||||
eval 'export "$prefix"_"$num"_port=3306'
|
||||
eval 'export "$prefix"_"$num"_access_sudo=sudo'
|
||||
|
||||
start_cmd_var="$prefix"_"$num"_start_db_command
|
||||
stop_cmd_var="$prefix"_"$num"_stop_db_command
|
||||
eval 'export $start_cmd_var="service mysql start "'
|
||||
eval 'export $stop_cmd_var="service mysql stop "'
|
||||
eval 'export "$prefix"_"$num"_start_vm_command="cd ${MDBCI_VM_PATH}/$config_name;vagrant resume ${prefix}_$num ; cd $curr_dir"'
|
||||
eval 'export "$prefix"_"$num"_stop_vm_command="cd ${MDBCI_VM_PATH}/$config_name;vagrant suspend ${prefix}_$num ; cd $curr_dir"'
|
||||
done
|
||||
done
|
||||
|
||||
|
||||
export maxscale_access_sudo="sudo "
|
||||
|
||||
# IP Of MaxScale machine
|
||||
if [ ${maxscale_N} -gt 1 ] ; then
|
||||
export maxscale_whoami=$maxscale_000_whoami
|
||||
export maxscale_network=$maxscale_000_network
|
||||
export maxscale_keyfile=$maxscale_000_keyfile
|
||||
export maxscale_sshkey=$maxscale_000_keyfile
|
||||
fi
|
||||
|
||||
export maxscale_IP=$maxscale_network
|
||||
export maxscale_access_user=$maxscale_whoami
|
||||
|
||||
# Sysbench directory (should be sysbench >= 0.5)
|
||||
sb=`which sysbench`
|
||||
export sysbench_dir=$(dirname ${sb})
|
||||
#export sysbench_dir=${sysbench_dir:-""}
|
||||
|
||||
export ssl=true
|
||||
|
||||
export take_snapshot_command="${mdbci_dir}/mdbci snapshot take --path-to-nodes ${config_name} --snapshot-name "
|
||||
export revert_snapshot_command="${mdbci_dir}/mdbci snapshot revert --path-to-nodes ${config_name} --snapshot-name "
|
||||
#export use_snapshots=yes
|
||||
|
||||
set +x
|
||||
export PATH=$PATH:$HOME/mdbci/
|
||||
|
@ -1,31 +1,15 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
export MDBCI_VM_PATH=${MDBCI_VM_PATH:-$HOME/vms}
|
||||
mkdir -p $MDBCI_VM_PATH
|
||||
echo "MDBCI_VM_PATH=$MDBCI_VM_PATH"
|
||||
|
||||
export box=${box:-"centos_7_libvirt"}
|
||||
echo "box=$box"
|
||||
|
||||
export template=${template:-"default"}
|
||||
|
||||
export MDBCI_VM_PATH=${MDBCI_VM_PATH:-"$HOME/vms/"}
|
||||
export curr_date=`date '+%Y-%m-%d_%H-%M'`
|
||||
export mdbci_config_name=${name:-$box-${curr_date}}
|
||||
|
||||
export name=${name:-$box-${curr_date}}
|
||||
export PATH=$PATH:$HOME/mdbci/
|
||||
|
||||
export mdbci_dir=${mdbci_dir:-"$HOME/mdbci/"}
|
||||
export ci_url=${ci_url:-"http://max-tst-01.mariadb.com/ci-repository/"}
|
||||
|
||||
export product=${product:-"mariadb"}
|
||||
export version=${version:-"10.2"}
|
||||
export target=${target:-"develop"}
|
||||
export vm_memory=${vm_memory:-"2048"}
|
||||
export JOB_NAME=${JOB_NAME:-"local_test"}
|
||||
export BUILD_NUMBER=${BUILD_NUMBER:-`date '+%Y%m%d%H%M'`}
|
||||
export BUILD_TAG=${BUILD_TAG:-jenkins-${JOB_NAME}-${BUILD_NUMBER}}
|
||||
export team_keys=${team_keys:-${HOME}/team_keys}
|
||||
export galera_version=${galera_version:-$version}
|
||||
export do_not_destroy_vm=${do_not_destroy_vm:-"yes"}
|
||||
#export test_set=${test_set:-"-LE UNSTABLE"}
|
||||
export test_set=${test_set:-"-I 1,5"}
|
||||
|
@ -3,6 +3,9 @@
|
||||
{
|
||||
"hostname" : "node000",
|
||||
"box" : "centos_7_aws_large",
|
||||
"labels" : [
|
||||
"REPL_BACKEND"
|
||||
],
|
||||
"product" : {
|
||||
"name": "${product}",
|
||||
"version": "${version}",
|
||||
@ -16,6 +19,9 @@
|
||||
{
|
||||
"hostname" : "node001",
|
||||
"box" : "centos_7_aws_large",
|
||||
"labels" : [
|
||||
"REPL_BACKEND"
|
||||
],
|
||||
"product" : {
|
||||
"name": "${product}",
|
||||
"version": "${version}",
|
||||
@ -28,6 +34,9 @@
|
||||
{
|
||||
"hostname" : "node002",
|
||||
"box" : "centos_7_aws_large",
|
||||
"labels" : [
|
||||
"REPL_BACKEND"
|
||||
],
|
||||
"product" : {
|
||||
"name": "${product}",
|
||||
"version": "${version}",
|
||||
@ -40,6 +49,9 @@
|
||||
{
|
||||
"hostname" : "node003",
|
||||
"box" : "centos_7_aws_large",
|
||||
"labels" : [
|
||||
"REPL_BACKEND"
|
||||
],
|
||||
"product" : {
|
||||
"name": "${product}",
|
||||
"version": "${version}",
|
||||
@ -51,6 +63,9 @@
|
||||
{
|
||||
"hostname" : "node004",
|
||||
"box" : "centos_7_aws_large",
|
||||
"labels" : [
|
||||
"REPL_BACKEND"
|
||||
],
|
||||
"product" : {
|
||||
"name": "${product}",
|
||||
"version": "${version}",
|
||||
@ -62,6 +77,9 @@
|
||||
{
|
||||
"hostname" : "node005",
|
||||
"box" : "centos_7_aws_large",
|
||||
"labels" : [
|
||||
"REPL_BACKEND"
|
||||
],
|
||||
"product" : {
|
||||
"name": "${product}",
|
||||
"version": "${version}",
|
||||
@ -73,6 +91,9 @@
|
||||
{
|
||||
"hostname" : "node006",
|
||||
"box" : "centos_7_aws_large",
|
||||
"labels" : [
|
||||
"REPL_BACKEND"
|
||||
],
|
||||
"product" : {
|
||||
"name": "${product}",
|
||||
"version": "${version}",
|
||||
@ -84,6 +105,9 @@
|
||||
{
|
||||
"hostname" : "node007",
|
||||
"box" : "centos_7_aws_large",
|
||||
"labels" : [
|
||||
"REPL_BACKEND"
|
||||
],
|
||||
"product" : {
|
||||
"name": "${product}",
|
||||
"version": "${version}",
|
||||
@ -96,6 +120,9 @@
|
||||
{
|
||||
"hostname" : "galera000",
|
||||
"box" : "centos_7_aws",
|
||||
"labels" : [
|
||||
"GALERA_BACKEND"
|
||||
],
|
||||
"product" : {
|
||||
"name": "galera",
|
||||
"version": "${galera_version}",
|
||||
@ -108,6 +135,9 @@
|
||||
{
|
||||
"hostname" : "galera001",
|
||||
"box" : "centos_7_aws",
|
||||
"labels" : [
|
||||
"GALERA_BACKEND"
|
||||
],
|
||||
"product" : {
|
||||
"name": "galera",
|
||||
"version": "${galera_version}",
|
||||
@ -120,6 +150,9 @@
|
||||
{
|
||||
"hostname" : "galera002",
|
||||
"box" : "centos_7_aws",
|
||||
"labels" : [
|
||||
"GALERA_BACKEND"
|
||||
],
|
||||
"product" : {
|
||||
"name": "galera",
|
||||
"version": "${galera_version}",
|
||||
@ -132,6 +165,9 @@
|
||||
{
|
||||
"hostname" : "galera003",
|
||||
"box" : "centos_7_aws",
|
||||
"labels" : [
|
||||
"GALERA_BACKEND"
|
||||
],
|
||||
"product" : {
|
||||
"name": "galera",
|
||||
"version": "${galera_version}",
|
||||
@ -140,10 +176,13 @@
|
||||
}
|
||||
},
|
||||
|
||||
"maxscale" :
|
||||
"maxscale_000" :
|
||||
{
|
||||
"hostname" : "maxscale",
|
||||
"box" : "centos_7_aws_large",
|
||||
"labels" : [
|
||||
"MAXSCALE"
|
||||
],
|
||||
"product" : {
|
||||
"name" : "maxscale_ci",
|
||||
"version" : "${target}"
|
||||
|
@ -3,6 +3,9 @@
|
||||
{
|
||||
"hostname" : "node_000",
|
||||
"box" : "centos_7_aws_large",
|
||||
"labels" : [
|
||||
"REPL_BACKEND"
|
||||
],
|
||||
"product" : {
|
||||
"name": "${product}",
|
||||
"version": "${version}",
|
||||
@ -16,6 +19,9 @@
|
||||
{
|
||||
"hostname" : "node_001",
|
||||
"box" : "centos_7_aws_large",
|
||||
"labels" : [
|
||||
"REPL_BACKEND"
|
||||
],
|
||||
"product" : {
|
||||
"name": "${product}",
|
||||
"version": "${version}",
|
||||
@ -28,6 +34,9 @@
|
||||
{
|
||||
"hostname" : "node_002",
|
||||
"box" : "centos_7_aws_large",
|
||||
"labels" : [
|
||||
"REPL_BACKEND"
|
||||
],
|
||||
"product" : {
|
||||
"name": "${product}",
|
||||
"version": "${version}",
|
||||
@ -40,6 +49,9 @@
|
||||
{
|
||||
"hostname" : "node_003",
|
||||
"box" : "centos_7_aws_large",
|
||||
"labels" : [
|
||||
"REPL_BACKEND"
|
||||
],
|
||||
"product" : {
|
||||
"name": "${product}",
|
||||
"version": "${version}",
|
||||
@ -51,6 +63,9 @@
|
||||
{
|
||||
"hostname" : "node_004",
|
||||
"box" : "centos_7_aws_large",
|
||||
"labels" : [
|
||||
"REPL_BACKEND"
|
||||
],
|
||||
"product" : {
|
||||
"name": "${product}",
|
||||
"version": "${version}",
|
||||
@ -62,6 +77,9 @@
|
||||
{
|
||||
"hostname" : "node_005",
|
||||
"box" : "centos_7_aws_large",
|
||||
"labels" : [
|
||||
"REPL_BACKEND"
|
||||
],
|
||||
"product" : {
|
||||
"name": "${product}",
|
||||
"version": "${version}",
|
||||
@ -73,6 +91,9 @@
|
||||
{
|
||||
"hostname" : "node_006",
|
||||
"box" : "centos_7_aws_large",
|
||||
"labels" : [
|
||||
"REPL_BACKEND"
|
||||
],
|
||||
"product" : {
|
||||
"name": "${product}",
|
||||
"version": "${version}",
|
||||
@ -84,6 +105,9 @@
|
||||
{
|
||||
"hostname" : "node_007",
|
||||
"box" : "centos_7_aws_large",
|
||||
"labels" : [
|
||||
"REPL_BACKEND"
|
||||
],
|
||||
"product" : {
|
||||
"name": "${product}",
|
||||
"version": "${version}",
|
||||
@ -95,6 +119,9 @@
|
||||
{
|
||||
"hostname" : "node_008",
|
||||
"box" : "centos_7_aws_large",
|
||||
"labels" : [
|
||||
"REPL_BACKEND"
|
||||
],
|
||||
"product" : {
|
||||
"name": "${product}",
|
||||
"version": "${version}",
|
||||
@ -106,6 +133,9 @@
|
||||
{
|
||||
"hostname" : "node_009",
|
||||
"box" : "centos_7_aws_large",
|
||||
"labels" : [
|
||||
"REPL_BACKEND"
|
||||
],
|
||||
"product" : {
|
||||
"name": "${product}",
|
||||
"version": "${version}",
|
||||
@ -117,6 +147,9 @@
|
||||
{
|
||||
"hostname" : "node_0010",
|
||||
"box" : "centos_7_aws_large",
|
||||
"labels" : [
|
||||
"REPL_BACKEND"
|
||||
],
|
||||
"product" : {
|
||||
"name": "${product}",
|
||||
"version": "${version}",
|
||||
@ -128,6 +161,9 @@
|
||||
{
|
||||
"hostname" : "node_0011",
|
||||
"box" : "centos_7_aws_large",
|
||||
"labels" : [
|
||||
"REPL_BACKEND"
|
||||
],
|
||||
"product" : {
|
||||
"name": "${product}",
|
||||
"version": "${version}",
|
||||
@ -139,6 +175,9 @@
|
||||
{
|
||||
"hostname" : "node_0012",
|
||||
"box" : "centos_7_aws_large",
|
||||
"labels" : [
|
||||
"REPL_BACKEND"
|
||||
],
|
||||
"product" : {
|
||||
"name": "${product}",
|
||||
"version": "${version}",
|
||||
@ -150,6 +189,9 @@
|
||||
{
|
||||
"hostname" : "node_0013",
|
||||
"box" : "centos_7_aws_large",
|
||||
"labels" : [
|
||||
"REPL_BACKEND"
|
||||
],
|
||||
"product" : {
|
||||
"name": "${product}",
|
||||
"version": "${version}",
|
||||
@ -161,6 +203,9 @@
|
||||
{
|
||||
"hostname" : "node_0014",
|
||||
"box" : "centos_7_aws_large",
|
||||
"labels" : [
|
||||
"REPL_BACKEND"
|
||||
],
|
||||
"product" : {
|
||||
"name": "${product}",
|
||||
"version": "${version}",
|
||||
@ -173,6 +218,9 @@
|
||||
{
|
||||
"hostname" : "galera_000",
|
||||
"box" : "centos_7_aws",
|
||||
"labels" : [
|
||||
"GALERA_BACKEND"
|
||||
],
|
||||
"product" : {
|
||||
"name": "galera",
|
||||
"version": "${galera_version}",
|
||||
@ -185,6 +233,9 @@
|
||||
{
|
||||
"hostname" : "galera_001",
|
||||
"box" : "centos_7_aws",
|
||||
"labels" : [
|
||||
"GALERA_BACKEND"
|
||||
],
|
||||
"product" : {
|
||||
"name": "galera",
|
||||
"version": "${galera_version}",
|
||||
@ -197,6 +248,9 @@
|
||||
{
|
||||
"hostname" : "galera_002",
|
||||
"box" : "centos_7_aws",
|
||||
"labels" : [
|
||||
"GALERA_BACKEND"
|
||||
],
|
||||
"product" : {
|
||||
"name": "galera",
|
||||
"version": "${galera_version}",
|
||||
@ -209,6 +263,9 @@
|
||||
{
|
||||
"hostname" : "galera_003",
|
||||
"box" : "centos_7_aws",
|
||||
"labels" : [
|
||||
"GALERA_BACKEND"
|
||||
],
|
||||
"product" : {
|
||||
"name": "galera",
|
||||
"version": "${galera_version}",
|
||||
@ -217,10 +274,13 @@
|
||||
}
|
||||
},
|
||||
|
||||
"maxscale" :
|
||||
"maxscale_000" :
|
||||
{
|
||||
"hostname" : "maxscale",
|
||||
"box" : "centos_7_aws_large",
|
||||
"labels" : [
|
||||
"MAXSCALE"
|
||||
],
|
||||
"product" : {
|
||||
"name" : "maxscale_ci",
|
||||
"version" : "${target}"
|
||||
|
@ -4,6 +4,9 @@
|
||||
"hostname" : "node000",
|
||||
"box" : "${backend_box}",
|
||||
"memory_size" : "${vm_memory}",
|
||||
"labels" : [
|
||||
"REPL_BACKEND"
|
||||
],
|
||||
"product" : {
|
||||
"name": "${product}",
|
||||
"version": "${version}",
|
||||
@ -18,6 +21,9 @@
|
||||
"hostname" : "node001",
|
||||
"box" : "${backend_box}",
|
||||
"memory_size" : "${vm_memory}",
|
||||
"labels" : [
|
||||
"REPL_BACKEND"
|
||||
],
|
||||
"product" : {
|
||||
"name": "${product}",
|
||||
"version": "${version}",
|
||||
@ -31,6 +37,9 @@
|
||||
"hostname" : "node002",
|
||||
"box" : "${backend_box}",
|
||||
"memory_size" : "${vm_memory}",
|
||||
"labels" : [
|
||||
"REPL_BACKEND"
|
||||
],
|
||||
"product" : {
|
||||
"name": "${product}",
|
||||
"version": "${version}",
|
||||
@ -44,6 +53,9 @@
|
||||
"hostname" : "node003",
|
||||
"box" : "${backend_box}",
|
||||
"memory_size" : "${vm_memory}",
|
||||
"labels" : [
|
||||
"REPL_BACKEND"
|
||||
],
|
||||
"product" : {
|
||||
"name": "${product}",
|
||||
"version": "${version}",
|
||||
@ -57,6 +69,9 @@
|
||||
"hostname" : "galera000",
|
||||
"box" : "${backend_box}",
|
||||
"memory_size" : "${vm_memory}",
|
||||
"labels" : [
|
||||
"GALERA_BACKEND"
|
||||
],
|
||||
"product" : {
|
||||
"name": "galera",
|
||||
"version": "${galera_version}",
|
||||
@ -70,6 +85,9 @@
|
||||
"hostname" : "galera001",
|
||||
"box" : "${backend_box}",
|
||||
"memory_size" : "${vm_memory}",
|
||||
"labels" : [
|
||||
"GALERA_BACKEND"
|
||||
],
|
||||
"product" : {
|
||||
"name": "galera",
|
||||
"version": "${galera_version}",
|
||||
@ -83,6 +101,9 @@
|
||||
"hostname" : "galera002",
|
||||
"box" : "${backend_box}",
|
||||
"memory_size" : "${vm_memory}",
|
||||
"labels" : [
|
||||
"GALERA_BACKEND"
|
||||
],
|
||||
"product" : {
|
||||
"name": "galera",
|
||||
"version": "${galera_version}",
|
||||
@ -96,6 +117,9 @@
|
||||
"hostname" : "galera003",
|
||||
"box" : "${backend_box}",
|
||||
"memory_size" : "${vm_memory}",
|
||||
"labels" : [
|
||||
"GALERA_BACKEND"
|
||||
],
|
||||
"product" : {
|
||||
"name": "galera",
|
||||
"version": "${galera_version}",
|
||||
@ -109,6 +133,9 @@
|
||||
"hostname" : "maxscale",
|
||||
"box" : "${box}",
|
||||
"memory_size" : "${vm_memory}",
|
||||
"labels" : [
|
||||
"MAXSCALE"
|
||||
],
|
||||
"product" : {
|
||||
"name" : "maxscale_ci",
|
||||
"version" : "${target}"
|
||||
@ -121,6 +148,9 @@
|
||||
"hostname" : "maxscale2",
|
||||
"box" : "${box}",
|
||||
"memory_size" : "${vm_memory}",
|
||||
"labels" : [
|
||||
"SECOND_MAXSCALE"
|
||||
],
|
||||
"product" : {
|
||||
"name" : "maxscale_ci",
|
||||
"version" : "${target}"
|
||||
|
@ -1,66 +0,0 @@
|
||||
{
|
||||
"node_000" :
|
||||
{
|
||||
"hostname" : "node000",
|
||||
"box" : "${backend_box}",
|
||||
"memory_size" : "${vm_memory}",
|
||||
"product" : {
|
||||
"name": "${product}",
|
||||
"version": "${version}",
|
||||
"cnf_template" : "server1.cnf",
|
||||
"cnf_template_path": "${cnf_path}"
|
||||
}
|
||||
|
||||
},
|
||||
|
||||
"node_001" :
|
||||
{
|
||||
"hostname" : "node001",
|
||||
"box" : "${backend_box}",
|
||||
"memory_size" : "${vm_memory}",
|
||||
"product" : {
|
||||
"name": "${product}",
|
||||
"version": "${version}",
|
||||
"cnf_template" : "server2.cnf",
|
||||
"cnf_template_path": "${cnf_path}"
|
||||
}
|
||||
},
|
||||
|
||||
"node_002" :
|
||||
{
|
||||
"hostname" : "node002",
|
||||
"box" : "${backend_box}",
|
||||
"memory_size" : "${vm_memory}",
|
||||
"product" : {
|
||||
"name": "${product}",
|
||||
"version": "${version}",
|
||||
"cnf_template" : "server3.cnf",
|
||||
"cnf_template_path": "${cnf_path}"
|
||||
}
|
||||
},
|
||||
|
||||
"node_003" :
|
||||
{
|
||||
"hostname" : "node003",
|
||||
"box" : "${backend_box}",
|
||||
"memory_size" : "${vm_memory}",
|
||||
"product" : {
|
||||
"name": "${product}",
|
||||
"version": "${version}",
|
||||
"cnf_template" : "server4.cnf",
|
||||
"cnf_template_path": "${cnf_path}"
|
||||
}
|
||||
},
|
||||
|
||||
"maxscale_000" :
|
||||
{
|
||||
"hostname" : "maxscale1",
|
||||
"box" : "${box}",
|
||||
"memory_size" : "${vm_memory}",
|
||||
"product" : {
|
||||
"name" : "maxscale_ci",
|
||||
"version" : "${target}"
|
||||
}
|
||||
|
||||
}
|
||||
}
|
@ -1,118 +0,0 @@
|
||||
{
|
||||
"node_000" :
|
||||
{
|
||||
"hostname" : "node000",
|
||||
"box" : "${backend_box}",
|
||||
"memory_size" : "${vm_memory}",
|
||||
"product" : {
|
||||
"name": "${product}",
|
||||
"version": "${version}",
|
||||
"cnf_template" : "server1.cnf",
|
||||
"cnf_template_path": "${cnf_path}"
|
||||
}
|
||||
|
||||
},
|
||||
|
||||
"node_001" :
|
||||
{
|
||||
"hostname" : "node001",
|
||||
"box" : "${backend_box}",
|
||||
"memory_size" : "${vm_memory}",
|
||||
"product" : {
|
||||
"name": "${product}",
|
||||
"version": "${version}",
|
||||
"cnf_template" : "server2.cnf",
|
||||
"cnf_template_path": "${cnf_path}"
|
||||
}
|
||||
},
|
||||
|
||||
"node_002" :
|
||||
{
|
||||
"hostname" : "node002",
|
||||
"box" : "${backend_box}",
|
||||
"memory_size" : "${vm_memory}",
|
||||
"product" : {
|
||||
"name": "${product}",
|
||||
"version": "${version}",
|
||||
"cnf_template" : "server3.cnf",
|
||||
"cnf_template_path": "${cnf_path}"
|
||||
}
|
||||
},
|
||||
|
||||
"node_003" :
|
||||
{
|
||||
"hostname" : "node003",
|
||||
"box" : "${backend_box}",
|
||||
"memory_size" : "${vm_memory}",
|
||||
"product" : {
|
||||
"name": "${product}",
|
||||
"version": "${version}",
|
||||
"cnf_template" : "server4.cnf",
|
||||
"cnf_template_path": "${cnf_path}"
|
||||
}
|
||||
},
|
||||
|
||||
"galera_000" :
|
||||
{
|
||||
"hostname" : "galera000",
|
||||
"box" : "${backend_box}",
|
||||
"memory_size" : "${vm_memory}",
|
||||
"product" : {
|
||||
"name": "galera",
|
||||
"version": "${galera_version}",
|
||||
"cnf_template" : "galera_server1.cnf",
|
||||
"cnf_template_path": "${cnf_path}"
|
||||
}
|
||||
},
|
||||
|
||||
"galera_001" :
|
||||
{
|
||||
"hostname" : "galera001",
|
||||
"box" : "${backend_box}",
|
||||
"memory_size" : "${vm_memory}",
|
||||
"product" : {
|
||||
"name": "galera",
|
||||
"version": "${galera_version}",
|
||||
"cnf_template" : "galera_server2.cnf",
|
||||
"cnf_template_path": "${cnf_path}"
|
||||
}
|
||||
},
|
||||
|
||||
"galera_002" :
|
||||
{
|
||||
"hostname" : "galera002",
|
||||
"box" : "${backend_box}",
|
||||
"memory_size" : "${vm_memory}",
|
||||
"product" : {
|
||||
"name": "galera",
|
||||
"version": "${galera_version}",
|
||||
"cnf_template" : "galera_server3.cnf",
|
||||
"cnf_template_path": "${cnf_path}"
|
||||
}
|
||||
},
|
||||
|
||||
"galera_003" :
|
||||
{
|
||||
"hostname" : "galera003",
|
||||
"box" : "${backend_box}",
|
||||
"memory_size" : "${vm_memory}",
|
||||
"product" : {
|
||||
"name": "galera",
|
||||
"version": "${galera_version}",
|
||||
"cnf_template" : "galera_server4.cnf",
|
||||
"cnf_template_path": "${cnf_path}"
|
||||
}
|
||||
},
|
||||
|
||||
"maxscale" :
|
||||
{
|
||||
"hostname" : "maxscale",
|
||||
"box" : "${box}",
|
||||
"memory_size" : "${vm_memory}",
|
||||
"product" : {
|
||||
"name" : "maxscale_ci",
|
||||
"version" : "${target}"
|
||||
}
|
||||
|
||||
}
|
||||
}
|
@ -1,78 +0,0 @@
|
||||
{
|
||||
"node_000" :
|
||||
{
|
||||
"hostname" : "node000",
|
||||
"box" : "${backend_box}",
|
||||
"memory_size" : "${vm_memory}",
|
||||
"product" : {
|
||||
"name": "${product}",
|
||||
"version": "${version}",
|
||||
"cnf_template" : "server1.cnf",
|
||||
"cnf_template_path": "${cnf_path}"
|
||||
}
|
||||
|
||||
},
|
||||
|
||||
"node_001" :
|
||||
{
|
||||
"hostname" : "node001",
|
||||
"box" : "${backend_box}",
|
||||
"memory_size" : "${vm_memory}",
|
||||
"product" : {
|
||||
"name": "${product}",
|
||||
"version": "${version}",
|
||||
"cnf_template" : "server2.cnf",
|
||||
"cnf_template_path": "${cnf_path}"
|
||||
}
|
||||
},
|
||||
|
||||
"node_002" :
|
||||
{
|
||||
"hostname" : "node002",
|
||||
"box" : "${backend_box}",
|
||||
"memory_size" : "${vm_memory}",
|
||||
"product" : {
|
||||
"name": "${product}",
|
||||
"version": "${version}",
|
||||
"cnf_template" : "server3.cnf",
|
||||
"cnf_template_path": "${cnf_path}"
|
||||
}
|
||||
},
|
||||
|
||||
"node_003" :
|
||||
{
|
||||
"hostname" : "node003",
|
||||
"box" : "${backend_box}",
|
||||
"memory_size" : "${vm_memory}",
|
||||
"product" : {
|
||||
"name": "${product}",
|
||||
"version": "${version}",
|
||||
"cnf_template" : "server4.cnf",
|
||||
"cnf_template_path": "${cnf_path}"
|
||||
}
|
||||
},
|
||||
|
||||
"maxscale_000" :
|
||||
{
|
||||
"hostname" : "maxscale",
|
||||
"box" : "${box}",
|
||||
"memory_size" : "${vm_memory}",
|
||||
"product" : {
|
||||
"name" : "maxscale_ci",
|
||||
"version" : "${target}"
|
||||
}
|
||||
|
||||
},
|
||||
|
||||
"maxscale_001" :
|
||||
{
|
||||
"hostname" : "maxscale",
|
||||
"box" : "${box}",
|
||||
"memory_size" : "${vm_memory}",
|
||||
"product" : {
|
||||
"name" : "maxscale_ci",
|
||||
"version" : "${target}"
|
||||
}
|
||||
|
||||
}
|
||||
}
|
@ -334,10 +334,11 @@ void restore_server_ids(TestConnections& test, const map<int, string>& server_id
|
||||
{
|
||||
for_each(server_ids_by_index.begin(),
|
||||
server_ids_by_index.end(),
|
||||
[&test](const pair<int, string>& server_id_by_index) {
|
||||
test.try_query(test.galera->nodes[server_id_by_index.first],
|
||||
"set GLOBAL server_id=%s", server_id_by_index.second.c_str());
|
||||
});
|
||||
[&test](const pair<int, string>& server_id_by_index)
|
||||
{
|
||||
test.try_query(test.galera->nodes[server_id_by_index.first],
|
||||
"set GLOBAL server_id=%s", server_id_by_index.second.c_str());
|
||||
});
|
||||
}
|
||||
|
||||
// STOP SLAVE; START SLAVE cycle.
|
||||
@ -345,9 +346,10 @@ void restart_slave(TestConnections& test, MYSQL* pSlave)
|
||||
{
|
||||
Row row;
|
||||
|
||||
auto replication_failed = [](const std::string& column) {
|
||||
return column.find("Got fatal error") != string::npos;
|
||||
};
|
||||
auto replication_failed = [](const std::string & column)
|
||||
{
|
||||
return column.find("Got fatal error") != string::npos;
|
||||
};
|
||||
|
||||
cout << "Stopping slave." << endl;
|
||||
test.try_query(pSlave, "STOP SLAVE");
|
||||
@ -441,7 +443,10 @@ int main(int argc, char* argv[])
|
||||
|
||||
if (setup_server_ids(test, &server_ids_by_index))
|
||||
{
|
||||
for (Approach approach : {Approach::GTID, Approach::FILE_POS})
|
||||
for (Approach approach :
|
||||
{
|
||||
Approach::GTID, Approach::FILE_POS
|
||||
})
|
||||
{
|
||||
inserted_rows = 0;
|
||||
|
||||
@ -483,9 +488,9 @@ int main(int argc, char* argv[])
|
||||
{
|
||||
if (setup_blr(test, pMaxscale, gtid, approach))
|
||||
{
|
||||
int slave_index = test.repl->N - 1; // We use the last slave.
|
||||
int slave_index = test.galera->N - 1; // We use the last slave.
|
||||
|
||||
Mariadb_nodes& ms = *test.repl;
|
||||
Mariadb_nodes& ms = *test.galera;
|
||||
ms.connect(slave_index);
|
||||
|
||||
MYSQL* pSlave = ms.nodes[slave_index];
|
||||
|
@ -39,22 +39,22 @@ int main(int argc, char** argv)
|
||||
test.expect(conn.connect("test.test1"), "Failed to connect");
|
||||
|
||||
auto check = [&](const std::string& name) {
|
||||
static int i = 1;
|
||||
CDC::SRow row = conn.read();
|
||||
static int i = 1;
|
||||
CDC::SRow row = conn.read();
|
||||
|
||||
if (row)
|
||||
{
|
||||
test.expect(row->is_null(name),
|
||||
"%d: `%s` is not null: %s",
|
||||
i++,
|
||||
name.c_str(),
|
||||
row->value(name).c_str());
|
||||
}
|
||||
else
|
||||
{
|
||||
test.tprintf("Error: %s", conn.error().c_str());
|
||||
}
|
||||
};
|
||||
if (row)
|
||||
{
|
||||
test.expect(row->is_null(name),
|
||||
"%d: `%s` is not null: %s",
|
||||
i++,
|
||||
name.c_str(),
|
||||
row->value(name).c_str());
|
||||
}
|
||||
else
|
||||
{
|
||||
test.tprintf("Error: %s", conn.error().c_str());
|
||||
}
|
||||
};
|
||||
|
||||
// The three inserts
|
||||
check("some_date");
|
||||
|
@ -23,7 +23,6 @@ int main(int argc, char* argv[])
|
||||
int r = (Test->smoke) ? 1 : 3;
|
||||
|
||||
Test->set_timeout(5);
|
||||
Test->repl->connect();
|
||||
Test->maxscales->connect_maxscale(0);
|
||||
MYSQL* router[3];
|
||||
router[0] = Test->maxscales->conn_rwsplit[0];
|
||||
|
@ -29,7 +29,14 @@ int main(int argc, char* argv[])
|
||||
execute_query(Test->maxscales->conn_rwsplit[0], "GRANT SELECT ON test.t1 TO 'table_privilege'@'%%'");
|
||||
|
||||
Test->stop_timeout();
|
||||
Test->repl->sync_slaves();
|
||||
if (Test->repl)
|
||||
{
|
||||
Test->repl->sync_slaves();
|
||||
}
|
||||
else
|
||||
{
|
||||
Test->galera->sync_slaves();
|
||||
}
|
||||
|
||||
Test->tprintf("Trying to connect using this user\n");
|
||||
Test->set_timeout(20);
|
||||
|
62
maxscale-system-test/mxs682_cyrillic.cpp
Normal file
62
maxscale-system-test/mxs682_cyrillic.cpp
Normal file
@ -0,0 +1,62 @@
|
||||
/**
|
||||
* @file mxs682_cyrillic.cpp put cyrillic letters to the table
|
||||
* - put string with Cyrillic into table
|
||||
* - check SELECT from backend
|
||||
*/
|
||||
|
||||
#include "testconnections.h"
|
||||
|
||||
void check_val(MYSQL* conn, TestConnections& test)
|
||||
{
|
||||
char val[256] = "<failed to read value>";
|
||||
test.set_timeout(30);
|
||||
find_field(conn, "SELECT * FROM t2", "x", val);
|
||||
test.tprintf("result: %s\n", val);
|
||||
test.add_result(strcmp("Кот", val) != 0, "Wrong SELECT result: %s\n", val);
|
||||
test.stop_timeout();
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
TestConnections test(argc, argv);
|
||||
Mariadb_nodes* nodes = strstr(test.test_name, "galera") ? test.galera : test.repl;
|
||||
|
||||
test.set_timeout(60);
|
||||
|
||||
test.maxscales->connect();
|
||||
|
||||
auto conn = test.maxscales->conn_rwsplit[0];
|
||||
execute_query_silent(conn, "DROP TABLE t2;");
|
||||
test.try_query(conn, "CREATE TABLE t2 (x varchar(10));");
|
||||
test.try_query(conn, "INSERT INTO t2 VALUES (\"Кот\");");
|
||||
|
||||
test.maxscales->disconnect();
|
||||
|
||||
test.stop_timeout();
|
||||
if (test.repl)
|
||||
{
|
||||
test.repl->connect();
|
||||
test.repl->sync_slaves();
|
||||
test.repl->disconnect();
|
||||
}
|
||||
else
|
||||
{
|
||||
sleep(10);
|
||||
}
|
||||
|
||||
test.set_timeout(60);
|
||||
test.maxscales->connect();
|
||||
check_val(test.maxscales->conn_rwsplit[0], test);
|
||||
check_val(test.maxscales->conn_master[0], test);
|
||||
check_val(test.maxscales->conn_slave[0], test);
|
||||
test.maxscales->disconnect();
|
||||
|
||||
nodes->connect();
|
||||
for (int i = 0; i < nodes->N; i++)
|
||||
{
|
||||
check_val(nodes->nodes[i], test);
|
||||
}
|
||||
nodes->disconnect();
|
||||
|
||||
return test.global_result;
|
||||
}
|
@ -4,14 +4,6 @@
|
||||
## @file mxs791.sh Simple connect test in bash
|
||||
## - connects to Maxscale, checks that defined in cmd line DB is selected
|
||||
|
||||
rp=`realpath $0`
|
||||
export test_dir=`pwd`
|
||||
export test_name="mxs791.sh"
|
||||
echo test name is $test_name
|
||||
$src_dir/mxs791_base.sh
|
||||
|
||||
$test_dir/mxs791_base.sh
|
||||
|
||||
res=$?
|
||||
|
||||
$test_dir/copy_logs.sh $test_name
|
||||
exit $res
|
||||
|
@ -1,32 +1,26 @@
|
||||
#!/bin/bash
|
||||
|
||||
rp=`realpath $0`
|
||||
export src_dir=`dirname $rp`
|
||||
$PWD/non_native_setup $test_name
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "configuring maxscale failed"
|
||||
exit 1
|
||||
fi
|
||||
export ssl_options="--ssl-cert=$src_dir/ssl-cert/client-cert.pem --ssl-key=$src_dir/ssl-cert/client-key.pem"
|
||||
|
||||
res=0
|
||||
echo "Trying RWSplit"
|
||||
echo "show tables" | mysql -u$maxscale_user -p$maxscale_password -h $maxscale_IP -P 4006 $ssl_option test
|
||||
echo "show tables" | mysql -u$maxscale_user -p$maxscale_password -h ${maxscale_000_network} -P 4006 $ssl_option test
|
||||
if [ $? != 0 ] ; then
|
||||
res=1
|
||||
echo "Can't connect to DB 'test'"
|
||||
fi
|
||||
|
||||
echo "Trying ReadConn master"
|
||||
echo "show tables" | mysql -u$maxscale_user -p$maxscale_password -h $maxscale_IP -P 4008 $ssl_options test
|
||||
echo "show tables" | mysql -u$maxscale_user -p$maxscale_password -h ${maxscale_000_network} -P 4008 $ssl_options test
|
||||
if [ $? != 0 ] ; then
|
||||
res=1
|
||||
echo "Can't connect to DB 'test'"
|
||||
fi
|
||||
|
||||
echo "Trying ReadConn slave"
|
||||
echo "show tables" | mysql -u$maxscale_user -p$maxscale_password -h $maxscale_IP -P 4009 $ssl_options test
|
||||
echo "show tables" | mysql -u$maxscale_user -p$maxscale_password -h ${maxscale_000_network} -P 4009 $ssl_options test
|
||||
if [ $? != 0 ] ; then
|
||||
res=1
|
||||
echo "Can't connect to DB 'test'"
|
||||
|
@ -1,17 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
###
|
||||
## @file mxs791.sh Simple connect test in bash
|
||||
## - connects to Maxscale, checks that defined in cmd line DB is selected
|
||||
|
||||
srcdir=$(dirname $(realpath $0))
|
||||
export test_dir=`pwd`
|
||||
export test_name="mxs791_galera.sh"
|
||||
echo test name is $test_name
|
||||
|
||||
$srcdir/mxs791_base.sh
|
||||
|
||||
res=$?
|
||||
|
||||
$srcdir/copy_logs.sh $test_name
|
||||
exit $res
|
@ -12,7 +12,7 @@ then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$maxscale_IP" == "" ]
|
||||
if [ "${maxscale_000_network}" == "" ]
|
||||
then
|
||||
echo "Error: The environment variable maxscale_IP must be set."
|
||||
exit 1
|
||||
@ -42,19 +42,17 @@ res=0
|
||||
[ -d log_$1 ] && rm -r log_$1
|
||||
mkdir log_$1
|
||||
|
||||
echo
|
||||
|
||||
# Run the test
|
||||
for t in `$2/t/*.test|xargs -L 1 basename`
|
||||
for t in `ls $2/t/*.test|xargs -L 1 basename`
|
||||
do
|
||||
printf "$t:"
|
||||
test_name=${t%%.test}
|
||||
mysqltest --host=$maxscale_IP --port=$port \
|
||||
mysqltest --host=${maxscale_000_network} --port=$port \
|
||||
--user=$user --password=$password \
|
||||
--logdir=log_$1 \
|
||||
--test-file=$2/t/$test_name.test \
|
||||
--result-file=$2/r/$test_name.result \
|
||||
--silent
|
||||
--result-file=$2/r/$test_name.result #\
|
||||
# --silent
|
||||
|
||||
if [ $? -eq 0 ]
|
||||
then
|
||||
@ -65,9 +63,4 @@ do
|
||||
fi
|
||||
done
|
||||
|
||||
echo
|
||||
|
||||
# Copy logs from the VM
|
||||
$src_dir/copy_logs.sh $1
|
||||
|
||||
exit $res
|
||||
|
@ -3,6 +3,8 @@
|
||||
#include <cstring>
|
||||
#include <iostream>
|
||||
|
||||
#include "envv.h"
|
||||
|
||||
Nodes::Nodes()
|
||||
{
|
||||
}
|
||||
@ -114,8 +116,9 @@ char* Nodes::ssh_node_output(int node, const char* ssh, bool sudo, int* exit_cod
|
||||
char* cmd = (char*)malloc(strlen(ssh) + 1024);
|
||||
|
||||
generate_ssh_cmd(cmd, node, ssh, sudo);
|
||||
// tprintf("############ssh smd %s\n:", cmd);
|
||||
|
||||
FILE* output = popen(cmd, "r");
|
||||
|
||||
if (output == NULL)
|
||||
{
|
||||
printf("Error opening ssh %s\n", strerror(errno));
|
||||
@ -164,6 +167,7 @@ int Nodes::ssh_node(int node, const char* ssh, bool sudo)
|
||||
IP[node],
|
||||
verbose ? "" : " > /dev/null");
|
||||
}
|
||||
|
||||
int rc = 1;
|
||||
FILE* in = popen(cmd, "w");
|
||||
|
||||
@ -296,39 +300,15 @@ int Nodes::copy_from_node_legacy(const char* src, const char* dest, int i)
|
||||
|
||||
int Nodes::read_basic_env()
|
||||
{
|
||||
char* env;
|
||||
char env_name[64];
|
||||
sprintf(env_name, "%s_N", prefix);
|
||||
env = getenv(env_name);
|
||||
if (env != NULL)
|
||||
{
|
||||
sscanf(env, "%d", &N);
|
||||
}
|
||||
else
|
||||
{
|
||||
N = 1;
|
||||
}
|
||||
|
||||
sprintf(env_name, "%s_user", prefix);
|
||||
env = getenv(env_name);
|
||||
if (env != NULL)
|
||||
{
|
||||
sscanf(env, "%s", user_name);
|
||||
}
|
||||
else
|
||||
{
|
||||
sprintf(user_name, "skysql");
|
||||
}
|
||||
user_name = readenv(env_name, "skysql");
|
||||
|
||||
sprintf(env_name, "%s_password", prefix);
|
||||
env = getenv(env_name);
|
||||
if (env != NULL)
|
||||
{
|
||||
sscanf(env, "%s", password);
|
||||
}
|
||||
else
|
||||
{
|
||||
sprintf(password, "skysql");
|
||||
}
|
||||
password = readenv(env_name, "skysql");
|
||||
|
||||
N = get_N();
|
||||
|
||||
if ((N > 0) && (N < 255))
|
||||
{
|
||||
@ -336,155 +316,69 @@ int Nodes::read_basic_env()
|
||||
{
|
||||
// reading IPs
|
||||
sprintf(env_name, "%s_%03d_network", prefix, i);
|
||||
env = getenv(env_name);
|
||||
if (env == NULL)
|
||||
{
|
||||
sprintf(env_name, "%s_network", prefix);
|
||||
env = getenv(env_name);
|
||||
}
|
||||
if (env != NULL)
|
||||
{
|
||||
sprintf(IP[i], "%s", env);
|
||||
}
|
||||
IP[i] = get_nc_item((char*) env_name);
|
||||
|
||||
// reading private IPs
|
||||
sprintf(env_name, "%s_%03d_private_ip", prefix, i);
|
||||
env = getenv(env_name);
|
||||
if (env == NULL)
|
||||
IP_private[i] = get_nc_item((char*) env_name);
|
||||
if (IP_private[i] == NULL)
|
||||
{
|
||||
sprintf(env_name, "%s_private_ip", prefix);
|
||||
env = getenv(env_name);
|
||||
}
|
||||
if (env != NULL)
|
||||
{
|
||||
sprintf(IP_private[i], "%s", env);
|
||||
}
|
||||
else
|
||||
{
|
||||
sprintf(IP_private[i], "%s", IP[i]);
|
||||
IP_private[i] = IP[i];
|
||||
}
|
||||
setenv(env_name, IP_private[i], 1);
|
||||
|
||||
// reading IPv6
|
||||
sprintf(env_name, "%s_%03d_network6", prefix, i);
|
||||
env = getenv(env_name);
|
||||
if (env == NULL)
|
||||
IP6[i] = get_nc_item((char*) env_name);
|
||||
if (IP6[i] == NULL)
|
||||
{
|
||||
sprintf(env_name, "%s_network6", prefix);
|
||||
env = getenv(env_name);
|
||||
IP6[i] = IP[i];
|
||||
}
|
||||
if (env != NULL)
|
||||
{
|
||||
sprintf(IP6[i], "%s", env);
|
||||
}
|
||||
else
|
||||
{
|
||||
sprintf(IP6[i], "%s", IP[i]);
|
||||
}
|
||||
// reading sshkey
|
||||
setenv(env_name, IP6[i], 1);
|
||||
|
||||
//reading sshkey
|
||||
sprintf(env_name, "%s_%03d_keyfile", prefix, i);
|
||||
env = getenv(env_name);
|
||||
if (env == NULL)
|
||||
{
|
||||
sprintf(env_name, "%s_keyfile", prefix);
|
||||
env = getenv(env_name);
|
||||
}
|
||||
if (env != NULL)
|
||||
{
|
||||
sprintf(sshkey[i], "%s", env);
|
||||
}
|
||||
sshkey[i] = get_nc_item((char*) env_name);
|
||||
|
||||
|
||||
sprintf(env_name, "%s_%03d_whoami", prefix, i);
|
||||
env = getenv(env_name);
|
||||
if (env == NULL)
|
||||
access_user[i] = get_nc_item((char*) env_name);
|
||||
if (access_user[i] == NULL)
|
||||
{
|
||||
sprintf(env_name, "%s_whoami", prefix);
|
||||
env = getenv(env_name);
|
||||
}
|
||||
|
||||
if (env != NULL)
|
||||
{
|
||||
sprintf(access_user[i], "%s", env);
|
||||
}
|
||||
else
|
||||
{
|
||||
sprintf(access_user[i], "vagrant");
|
||||
access_user[i] = (char *) "vagrant";
|
||||
}
|
||||
setenv(env_name, access_user[i], 1);
|
||||
|
||||
sprintf(env_name, "%s_%03d_access_sudo", prefix, i);
|
||||
env = getenv(env_name);
|
||||
if (env == NULL)
|
||||
{
|
||||
sprintf(env_name, "%s_access_sudo", prefix);
|
||||
env = getenv(env_name);
|
||||
}
|
||||
if (env != NULL)
|
||||
{
|
||||
sprintf(access_sudo[i], "%s", env);
|
||||
}
|
||||
else
|
||||
{
|
||||
sprintf(access_sudo[i], " ");
|
||||
}
|
||||
access_sudo[i] = readenv(env_name, " sudo ");
|
||||
|
||||
if (strcmp(access_user[i], "root") == 0)
|
||||
{
|
||||
sprintf(access_homedir[i], "/%s/", access_user[i]);
|
||||
access_homedir[i] = (char *) "/root/";
|
||||
}
|
||||
else
|
||||
{
|
||||
access_homedir[i] = (char *) malloc(strlen(access_user[i] + 9));
|
||||
sprintf(access_homedir[i], "/home/%s/", access_user[i]);
|
||||
}
|
||||
|
||||
sprintf(env_name, "%s_%03d_hostname", prefix, i);
|
||||
env = getenv(env_name);
|
||||
if (env == NULL)
|
||||
hostname[i] = get_nc_item((char*) env_name);
|
||||
if (hostname[i] == NULL)
|
||||
{
|
||||
sprintf(env_name, "%s_hostname", prefix);
|
||||
env = getenv(env_name);
|
||||
}
|
||||
|
||||
if (env != NULL)
|
||||
{
|
||||
sprintf(hostname[i], "%s", env);
|
||||
}
|
||||
else
|
||||
{
|
||||
sprintf(hostname[i], "%s", IP[i]);
|
||||
hostname[i] = IP[i];
|
||||
}
|
||||
setenv(env_name, hostname[i], 1);
|
||||
|
||||
sprintf(env_name, "%s_%03d_start_vm_command", prefix, i);
|
||||
env = getenv(env_name);
|
||||
if (env == NULL)
|
||||
{
|
||||
sprintf(env_name, "%s_start_vm_command", prefix);
|
||||
env = getenv(env_name);
|
||||
}
|
||||
|
||||
if (env != NULL)
|
||||
{
|
||||
sprintf(start_vm_command[i], "%s", env);
|
||||
}
|
||||
else
|
||||
{
|
||||
sprintf(start_vm_command[i], "exit 0");
|
||||
}
|
||||
start_vm_command[i] = readenv(env_name, "curr_dir=`pwd`; cd %s/%s;vagrant resume %s_%03d ; cd $curr_dir",
|
||||
getenv("MDBCI_VM_PATH"), getenv("name"), prefix, i);
|
||||
setenv(env_name, start_vm_command[i], 1);
|
||||
|
||||
sprintf(env_name, "%s_%03d_stop_vm_command", prefix, i);
|
||||
env = getenv(env_name);
|
||||
if (env == NULL)
|
||||
{
|
||||
sprintf(env_name, "%s_stop_vm_command", prefix);
|
||||
env = getenv(env_name);
|
||||
}
|
||||
|
||||
if (env != NULL)
|
||||
{
|
||||
sprintf(stop_vm_command[i], "%s", env);
|
||||
}
|
||||
else
|
||||
{
|
||||
sprintf(stop_vm_command[i], "exit 0");
|
||||
}
|
||||
stop_vm_command[i] = readenv(env_name, "curr_dir=`pwd`; cd %s/%s;vagrant suspend %s_%03d ; cd $curr_dir",
|
||||
getenv("MDBCI_VM_PATH"), getenv("name"), prefix, i);
|
||||
setenv(env_name, stop_vm_command[i], 1);
|
||||
}
|
||||
}
|
||||
|
||||
@ -496,12 +390,52 @@ const char* Nodes::ip(int i) const
|
||||
return use_ipv6 ? IP6[i] : IP[i];
|
||||
}
|
||||
|
||||
char * Nodes::get_nc_item(char * item_name)
|
||||
{
|
||||
size_t start = network_config.find(item_name);
|
||||
if (start == std::string::npos)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
size_t end = network_config.find("\n", start);
|
||||
size_t equal = network_config.find("=", start);
|
||||
if (end == std::string::npos)
|
||||
{
|
||||
end = network_config.length();
|
||||
}
|
||||
if (equal == std::string::npos)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
char * cstr = new char [end - equal + 1];
|
||||
strcpy(cstr, network_config.substr(equal + 1, end - equal - 1).c_str());
|
||||
setenv(item_name, cstr, 1);
|
||||
|
||||
return (cstr);
|
||||
}
|
||||
|
||||
int Nodes::get_N()
|
||||
{
|
||||
int N = 0;
|
||||
char item[strlen(prefix) + 13];
|
||||
do
|
||||
{
|
||||
sprintf(item, "%s_%03d_network", prefix, N);
|
||||
N++;
|
||||
}
|
||||
while (network_config.find(item) != std::string::npos);
|
||||
sprintf(item, "%s_N", prefix);
|
||||
setenv(item, std::to_string(N).c_str(), 1);
|
||||
return N - 1 ;
|
||||
}
|
||||
|
||||
int Nodes::start_vm(int node)
|
||||
{
|
||||
return(system(start_vm_command[node]));
|
||||
return (system(start_vm_command[node]));
|
||||
}
|
||||
|
||||
int Nodes::stop_vm(int node)
|
||||
{
|
||||
return(system(stop_vm_command[node]));
|
||||
return (system(stop_vm_command[node]));
|
||||
}
|
||||
|
@ -1,5 +1,4 @@
|
||||
#ifndef NODES_H
|
||||
#define NODES_H
|
||||
#pragma once
|
||||
|
||||
#include <errno.h>
|
||||
#include <string>
|
||||
@ -17,16 +16,16 @@ class Nodes
|
||||
public:
|
||||
Nodes();
|
||||
|
||||
char IP[256][1024];
|
||||
char * IP[256];
|
||||
/**
|
||||
* @brief private IP address strings for every backend node (for AWS)
|
||||
*/
|
||||
|
||||
char IP_private[256][1024];
|
||||
char * IP_private[256];
|
||||
/**
|
||||
* @brief IP address strings for every backend node (IPv6)
|
||||
*/
|
||||
char IP6[256][1024];
|
||||
char * IP6[256];
|
||||
|
||||
/**
|
||||
* @brief use_ipv6 If true IPv6 addresses will be used to connect Maxscale and backed
|
||||
@ -37,7 +36,7 @@ public:
|
||||
/**
|
||||
* @brief Path to ssh key for every backend node
|
||||
*/
|
||||
char sshkey[256][4096];
|
||||
char * sshkey[256];
|
||||
|
||||
/**
|
||||
* @brief Number of backend nodes
|
||||
@ -52,38 +51,43 @@ public:
|
||||
/**
|
||||
* @brief access_user Unix users name to access nodes via ssh
|
||||
*/
|
||||
char access_user[256][256];
|
||||
char * access_user[256];
|
||||
|
||||
/**
|
||||
* @brief access_sudo empty if sudo is not needed or "sudo " if sudo is needed.
|
||||
*/
|
||||
char access_sudo[256][64];
|
||||
char * access_sudo[256];
|
||||
|
||||
/**
|
||||
* @brief access_homedir home directory of access_user
|
||||
*/
|
||||
char access_homedir[256][256];
|
||||
char * access_homedir[256];
|
||||
|
||||
char hostname[256][1024];
|
||||
char * hostname[256];
|
||||
|
||||
/**
|
||||
* @brief stop_vm_command Command to suspend VM
|
||||
*/
|
||||
char stop_vm_command[256][1024];
|
||||
char * stop_vm_command[256];
|
||||
/**
|
||||
*
|
||||
* @brief start_vm_command Command to resume VM
|
||||
*/
|
||||
char start_vm_command[256][1024];
|
||||
char * start_vm_command[256];
|
||||
|
||||
/**
|
||||
* @brief User name to access backend nodes
|
||||
*/
|
||||
char user_name[256];
|
||||
char * user_name;
|
||||
/**
|
||||
* @brief Password to access backend nodes
|
||||
*/
|
||||
char password[256];
|
||||
char * password;
|
||||
|
||||
/**
|
||||
* @brief network_config Content of MDBCI network_config file
|
||||
*/
|
||||
std::string network_config;
|
||||
|
||||
/**
|
||||
* @brief Verbose command output
|
||||
@ -174,6 +178,19 @@ public:
|
||||
*/
|
||||
int read_basic_env();
|
||||
|
||||
/**
|
||||
* @brief get_nc_item Find variable in the MDBCI network_config file
|
||||
* @param item_name Name of the variable
|
||||
* @return value of variable
|
||||
*/
|
||||
char *get_nc_item(char * item_name);
|
||||
|
||||
/**
|
||||
* @brief get_N Calculate the number of nodes discribed in the _netoek_config file
|
||||
* @return Number of nodes
|
||||
*/
|
||||
int get_N();
|
||||
|
||||
/**
|
||||
* @brief start_vm Start virtual machine
|
||||
* @param node Node number
|
||||
@ -191,5 +208,3 @@ public:
|
||||
private:
|
||||
int check_node_ssh(int node);
|
||||
};
|
||||
|
||||
#endif // NODES_H
|
||||
|
@ -14,17 +14,26 @@ using namespace std;
|
||||
|
||||
int main(int argc, char* argv[])
|
||||
{
|
||||
if (argc < 2)
|
||||
if (argc < 3)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
std::string sys =
|
||||
std::string(test_dir) +
|
||||
std::string("/") +
|
||||
std::string(argv[2]) +
|
||||
std::string(" ") +
|
||||
std::string(argv[1]);
|
||||
|
||||
int local_argc = argc - 1;
|
||||
char** local_argv = &argv[1];
|
||||
|
||||
TestConnections* Test = new TestConnections(local_argc, local_argv);
|
||||
(void)Test;
|
||||
TestConnections test(local_argc, local_argv);
|
||||
sleep(3);
|
||||
setenv("src_dir", test_dir, 1);
|
||||
|
||||
return 0;
|
||||
test.add_result(system(sys.c_str()), "Test %s FAILED!", argv[1]);
|
||||
|
||||
return test.global_result;
|
||||
}
|
||||
|
@ -94,9 +94,9 @@ int main(int argc, char *argv[])
|
||||
auto testconn = open_conn(maxscale_port, maxscale_ip, username, userpass);
|
||||
if (testconn)
|
||||
{
|
||||
test.expect(execute_query(testconn, "SELECT 1") != 0,
|
||||
"Query with user %s succeeded when failure was expected.", username.c_str());
|
||||
mysql_close(testconn);
|
||||
test.expect(execute_query(testconn, "SELECT 1") != 0,
|
||||
"Query with user %s succeeded when failure was expected.", username.c_str());
|
||||
mysql_close(testconn);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,4 @@
|
||||
#ifndef RDS_VPC_H
|
||||
#define RDS_VPC_H
|
||||
#pragma once
|
||||
|
||||
#include <iostream>
|
||||
#include <unistd.h>
|
||||
@ -259,5 +258,3 @@ public:
|
||||
const char* gw_intern;
|
||||
const char* sg_intern;
|
||||
};
|
||||
|
||||
#endif // RDS_VPC_H
|
||||
|
@ -3,30 +3,11 @@
|
||||
###
|
||||
## @file run_ctrl_c.sh
|
||||
## check that Maxscale is reacting correctly on ctrc+c signal and termination does not take ages
|
||||
|
||||
rp=`realpath $0`
|
||||
export src_dir=`dirname $rp`
|
||||
export test_dir=`pwd`
|
||||
export test_name=`basename $rp`
|
||||
|
||||
if [ $maxscale_IP == "127.0.0.1" ] ; then
|
||||
echo local test is not supporte
|
||||
exit 0
|
||||
fi
|
||||
|
||||
$test_dir/non_native_setup $test_name
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "configuring maxscale failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
scp -i $maxscale_sshkey -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -r $src_dir/test_ctrl_c/* $maxscale_access_user@$maxscale_IP:./
|
||||
ssh -i $maxscale_sshkey -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $maxscale_access_user@$maxscale_IP "export maxscale_access_sudo=$maxscale_access_sudo; ./test_ctrl_c.sh"
|
||||
|
||||
set -x
|
||||
scp -i ${maxscale_000_keyfile} -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -r $src_dir/test_ctrl_c/* ${maxscale_000_whoami}@${maxscale_000_network}:./
|
||||
ssh -i ${maxscale_000_keyfile} -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${maxscale_000_whoami}@${maxscale_000_network} "export maxscale_000_access_sudo=${maxscale_000_access_sudo}; ./test_ctrl_c.sh"
|
||||
res=$?
|
||||
|
||||
ssh -i $maxscale_sshkey -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $maxscale_access_user@$maxscale_IP "sudo rm -f /tmp/maxadmin.sock"
|
||||
ssh -i ${maxscale_000_keyfile} -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${maxscale_000_whoami}@${maxscale_000_network} "sudo rm -f /tmp/maxadmin.sock"
|
||||
|
||||
$src_dir/copy_logs.sh run_ctrl_c
|
||||
exit $res
|
||||
|
@ -4,21 +4,10 @@
|
||||
## @file run_session_hang.sh
|
||||
## run a set of queries in the loop (see setmix.sql) using Perl client
|
||||
|
||||
rp=`realpath $0`
|
||||
export src_dir=`dirname $rp`
|
||||
export test_dir=`pwd`
|
||||
export test_name=`basename $rp`
|
||||
|
||||
$test_dir/non_native_setup $test_name
|
||||
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
echo "configuring maxscale failed"
|
||||
exit 1
|
||||
fi
|
||||
export ssl_options="--ssl-cert=$src_dir/ssl-cert/client-cert.pem --ssl-key=$src_dir/ssl-cert/client-key.pem"
|
||||
|
||||
echo "drop table if exists t1; create table t1(id integer primary key); " | mysql -u$node_user -p$node_password -h$maxscale_IP -P 4006 $ssl_options test
|
||||
echo "drop table if exists t1; create table t1(id integer primary key); " | mysql -u$node_user -p$node_password -h${maxscale_000_network} -P 4006 $ssl_options test
|
||||
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
@ -37,7 +26,7 @@ fi
|
||||
|
||||
sleep 15
|
||||
|
||||
echo "show databases;" | mysql -u$node_user -p$node_password -h$maxscale_IP -P 4006 $ssl_options
|
||||
echo "show databases;" | mysql -u$node_user -p$node_password -h${maxscale_000_network} -P 4006 $ssl_options
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
res=1
|
||||
@ -46,13 +35,4 @@ fi
|
||||
echo "Waiting for jobs"
|
||||
wait
|
||||
|
||||
if [ $res -eq 1 ]
|
||||
then
|
||||
echo "Test FAILED"
|
||||
else
|
||||
echo "Test PASSED"
|
||||
fi
|
||||
|
||||
$src_dir/copy_logs.sh run_session_hang
|
||||
|
||||
exit $res
|
||||
|
@ -1,6 +1,7 @@
|
||||
#!/bin/bash
|
||||
for ((i=0 ; i<100 ; i++)) ;
|
||||
do
|
||||
mysql --host=$maxscale_IP -P 4006 -u $node_user -p$node_password --verbose --force --unbuffered=true --disable-reconnect $ssl_options > /dev/null < $src_dir/session_hang/setmix.sql >& /dev/null
|
||||
echo "Iteration $i"
|
||||
mysql --host=${maxscale_000_network} -P 4006 -u $node_user -p$node_password --verbose --force --unbuffered=true --disable-reconnect $ssl_options > /dev/null < $src_dir/session_hang/setmix.sql >& /dev/null
|
||||
done
|
||||
|
||||
|
@ -1,5 +1,4 @@
|
||||
#ifndef SQL_T1_H
|
||||
#define SQL_T1_H
|
||||
#pragma once
|
||||
|
||||
#include "mariadb_func.h"
|
||||
#include "testconnections.h"
|
||||
@ -72,6 +71,4 @@ int select_from_t1(MYSQL* conn, int N);
|
||||
* @param conn MYSQL handler
|
||||
* @return 0 if content of t1 is ok
|
||||
*/
|
||||
int check_if_t1_exists(MYSQL* conn);
|
||||
|
||||
#endif // SQL_T1_H
|
||||
int check_if_t1_exists(MYSQL *conn);
|
||||
|
@ -1,5 +1,4 @@
|
||||
#ifndef SYSBENCH_COMMANDS_H
|
||||
#define SYSBENCH_COMMANDS_H
|
||||
#pragma once
|
||||
|
||||
/*const char * SYSBENCH_PREPARE =
|
||||
* "sysbench oltp_read_write \
|
||||
@ -73,5 +72,3 @@ const char* SYSBENCH_COMMAND_SHORT =
|
||||
--mysql-db=test \
|
||||
--threads=32 \
|
||||
--max-requests=0 --report-interval=5 --time=300 run";
|
||||
|
||||
#endif // SYSBENCH_COMMANDS_H
|
||||
|
@ -5,8 +5,9 @@ static struct
|
||||
{
|
||||
const char* test_name;
|
||||
const char* test_template;
|
||||
const char* test_labels;
|
||||
} cnf_templates[] __attribute__((unused)) = {
|
||||
@CNF_TEMPLATES@ {NULL, NULL}};
|
||||
@CNF_TEMPLATES@ {NULL, NULL, NULL}};
|
||||
|
||||
/** The default template to use */
|
||||
static const char * default_template __attribute__((unused)) = "replication";
|
||||
|
@ -1,5 +1,4 @@
|
||||
#ifndef TEST_BINLOG_FNC_H
|
||||
#define TEST_BINLOG_FNC_H
|
||||
#pragma once
|
||||
|
||||
#include <iostream>
|
||||
#include "testconnections.h"
|
||||
@ -26,5 +25,3 @@ int start_transaction(TestConnections* Test);
|
||||
* @param Test TestConnections object
|
||||
*/
|
||||
void test_binlog(TestConnections* Test);
|
||||
|
||||
#endif // TEST_BINLOG_FNC_H
|
||||
|
@ -1,3 +1,3 @@
|
||||
sleep 5
|
||||
$maxscale_access_sudo /usr/bin/killall maxscale -s INT
|
||||
sudo /usr/bin/killall maxscale -s INT
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
$maxscale_access_sudo service maxscale stop
|
||||
sudo service maxscale stop
|
||||
|
||||
hm=`pwd`
|
||||
$hm/start_killer.sh &
|
||||
@ -10,7 +10,7 @@ fi
|
||||
|
||||
T="$(date +%s)"
|
||||
|
||||
$maxscale_access_sudo maxscale -d -U root
|
||||
sudo maxscale -d -U root
|
||||
if [ $? -ne 0 ] ; then
|
||||
exit 1
|
||||
fi
|
||||
|
@ -9,13 +9,17 @@
|
||||
#include <sys/stat.h>
|
||||
#include <iostream>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <maxbase/stacktrace.hh>
|
||||
|
||||
#include "mariadb_func.h"
|
||||
#include "maxadmin_operations.h"
|
||||
#include "sql_t1.h"
|
||||
#include "testconnections.h"
|
||||
#include "labels_table.h"
|
||||
#include "envv.h"
|
||||
|
||||
using namespace mxb;
|
||||
using std::cout;
|
||||
@ -145,14 +149,12 @@ TestConnections::TestConnections(int argc, char* argv[])
|
||||
#endif
|
||||
gettimeofday(&start_time, NULL);
|
||||
|
||||
read_env();
|
||||
repl = NULL;
|
||||
galera = NULL;
|
||||
maxscales = NULL;
|
||||
reinstall_maxscale = false;
|
||||
|
||||
char* gal_env = getenv("galera_000_network");
|
||||
if ((gal_env == NULL) || (strcmp(gal_env, "") == 0 ))
|
||||
{
|
||||
no_galera = true;
|
||||
tprintf("Galera backend variables are not defined, Galera won't be used\n");
|
||||
}
|
||||
read_env();
|
||||
|
||||
bool maxscale_init = true;
|
||||
|
||||
@ -170,6 +172,7 @@ TestConnections::TestConnections(int argc, char* argv[])
|
||||
{"no-timeouts", no_argument, 0, 'z' },
|
||||
{"no-galera", no_argument, 0, 'y' },
|
||||
{"local-maxscale", optional_argument, 0, 'l' },
|
||||
{"reinstall-maxscale",no_argument, 0, 'm' },
|
||||
{0, 0, 0, 0 }
|
||||
};
|
||||
|
||||
@ -257,21 +260,77 @@ TestConnections::TestConnections(int argc, char* argv[])
|
||||
}
|
||||
break;
|
||||
|
||||
case 'm':
|
||||
printf("Maxscale will be reinstalled");
|
||||
reinstall_maxscale = true;
|
||||
break;
|
||||
|
||||
default:
|
||||
printf("UNKNOWN OPTION: %c\n", c);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (optind < argc)
|
||||
test_name = basename(argv[0]);
|
||||
if (!strcmp(test_name, "non_native_setup"))
|
||||
{
|
||||
test_name = argv[optind];
|
||||
}
|
||||
else
|
||||
{
|
||||
test_name = basename(argv[0]);
|
||||
test_name = argv[1];
|
||||
}
|
||||
|
||||
const char * labels_string = NULL;
|
||||
template_name = get_template_name(test_name, &labels_string);
|
||||
labels = strstr(labels_string, "LABELS;");
|
||||
if (!labels)
|
||||
{
|
||||
labels = (char* ) "LABELS;REPL_BACKEND";
|
||||
}
|
||||
|
||||
mdbci_labels = get_mdbci_lables(labels);
|
||||
|
||||
std::string delimiter = std::string (",");
|
||||
size_t pos_start = 0, pos_end, delim_len = delimiter.length();
|
||||
std::string label;
|
||||
std::string mdbci_labels_c = mdbci_labels + delimiter;
|
||||
|
||||
bool mdbci_call_needed = false;
|
||||
|
||||
while ((pos_end = mdbci_labels_c.find (delimiter, pos_start)) != std::string::npos)
|
||||
{
|
||||
label = mdbci_labels_c.substr (pos_start, pos_end - pos_start);
|
||||
pos_start = pos_end + delim_len;
|
||||
if (configured_labels.find(label, 0) == std::string::npos)
|
||||
{
|
||||
mdbci_call_needed = true;
|
||||
tprintf("Machines with label '%s' are not running, MDBCI UP call is needed", label.c_str());
|
||||
}
|
||||
else
|
||||
{
|
||||
tprintf("Machines with label '%s' are running, MDBCI UP call is not needed", label.c_str());
|
||||
}
|
||||
}
|
||||
|
||||
if (mdbci_call_needed)
|
||||
{
|
||||
if (call_mdbci(""))
|
||||
{
|
||||
exit(MDBCI_FAUILT);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (mdbci_labels.find(std::string("REPL_BACKEND")) == std::string::npos)
|
||||
{
|
||||
no_repl = true;
|
||||
tprintf("No need to use Master/Slave");
|
||||
}
|
||||
|
||||
if (mdbci_labels.find(std::string("GALERA_BACKEND")) == std::string::npos)
|
||||
{
|
||||
no_galera = true;
|
||||
tprintf("No need to use Galera");
|
||||
}
|
||||
|
||||
get_logs_command = (char *) malloc(strlen(test_dir) + 14);
|
||||
sprintf(get_logs_command, "%s/get_logs.sh", test_dir);
|
||||
|
||||
sprintf(ssl_options,
|
||||
@ -280,102 +339,146 @@ TestConnections::TestConnections(int argc, char* argv[])
|
||||
test_dir);
|
||||
setenv("ssl_options", ssl_options, 1);
|
||||
|
||||
if (no_galera && maxscale::require_galera)
|
||||
{
|
||||
cout << "Galera not in use, skipping test" << endl;
|
||||
exit(0);
|
||||
}
|
||||
|
||||
if (maxscale::require_columnstore)
|
||||
{
|
||||
cout << "ColumnStore testing is not yet implemented, skipping test" << endl;
|
||||
exit(0);
|
||||
}
|
||||
|
||||
repl = new Mariadb_nodes("node", test_dir, verbose);
|
||||
if (!no_repl)
|
||||
{
|
||||
repl = new Mariadb_nodes("node", test_dir, verbose, network_config);
|
||||
repl->use_ipv6 = use_ipv6;
|
||||
repl->take_snapshot_command = take_snapshot_command;
|
||||
repl->revert_snapshot_command = revert_snapshot_command;
|
||||
if (repl->check_nodes())
|
||||
{
|
||||
if (call_mdbci("--recreate"))
|
||||
{
|
||||
exit(MDBCI_FAUILT);
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
repl = NULL;
|
||||
}
|
||||
|
||||
if (!no_galera)
|
||||
{
|
||||
galera = new Galera_nodes("galera", test_dir, verbose);
|
||||
// galera->use_ipv6 = use_ipv6;
|
||||
galera = new Galera_nodes("galera", test_dir, verbose, network_config);
|
||||
//galera->use_ipv6 = use_ipv6;
|
||||
galera->use_ipv6 = false;
|
||||
galera->take_snapshot_command = take_snapshot_command;
|
||||
galera->revert_snapshot_command = revert_snapshot_command;
|
||||
if (galera->check_nodes())
|
||||
{
|
||||
if (call_mdbci("--recreate"))
|
||||
{
|
||||
exit(MDBCI_FAUILT);
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
galera = NULL;
|
||||
}
|
||||
|
||||
repl->use_ipv6 = use_ipv6;
|
||||
repl->take_snapshot_command = take_snapshot_command;
|
||||
repl->revert_snapshot_command = revert_snapshot_command;
|
||||
maxscales = new Maxscales("maxscale", test_dir, verbose, use_valgrind, network_config);
|
||||
if (maxscales->check_nodes() ||
|
||||
((maxscales->N < 2) && (mdbci_labels.find(std::string("SECOND_MAXSCALE")) != std::string::npos))
|
||||
)
|
||||
{
|
||||
if (call_mdbci("--recreate"))
|
||||
{
|
||||
exit(MDBCI_FAUILT);
|
||||
}
|
||||
}
|
||||
|
||||
if (reinstall_maxscale)
|
||||
{
|
||||
if (reinstall_maxscales())
|
||||
{
|
||||
tprintf("Failed to install Maxscale: target is %s", target);
|
||||
exit(MDBCI_FAUILT);
|
||||
}
|
||||
}
|
||||
|
||||
std::string src = std::string(test_dir) + "/mdbci/add_core_cnf.sh";
|
||||
maxscales->copy_to_node(0, src.c_str(), maxscales->access_homedir[0]);
|
||||
maxscales->ssh_node_f(0, true, "%s/add_core_cnf.sh %s", maxscales->access_homedir[0],
|
||||
verbose ? "verbose" : "");
|
||||
|
||||
maxscales = new Maxscales("maxscale", test_dir, verbose, use_valgrind);
|
||||
|
||||
maxscales->use_ipv6 = use_ipv6;
|
||||
maxscales->ssl = ssl;
|
||||
|
||||
if (maxscale::required_repl_version.length())
|
||||
// Stop MaxScale to prevent it from interfering with the replication setup process
|
||||
if (!maxscale::manual_debug)
|
||||
{
|
||||
int ver_repl_required = get_int_version(maxscale::required_repl_version);
|
||||
std::string ver_repl = repl->get_lowest_version();
|
||||
int int_ver_repl = get_int_version(ver_repl);
|
||||
|
||||
if (int_ver_repl < ver_repl_required)
|
||||
for (int i = 0; i < maxscales->N; i++)
|
||||
{
|
||||
tprintf("Test requires a higher version of backend servers, skipping test.");
|
||||
tprintf("Required version: %s", maxscale::required_repl_version.c_str());
|
||||
tprintf("Master-slave version: %s", ver_repl.c_str());
|
||||
exit(0);
|
||||
maxscales->stop(i);
|
||||
}
|
||||
}
|
||||
|
||||
if (maxscale::required_galera_version.length())
|
||||
if (repl)
|
||||
{
|
||||
int ver_galera_required = get_int_version(maxscale::required_galera_version);
|
||||
std::string ver_galera = galera->get_lowest_version();
|
||||
int int_ver_galera = get_int_version(ver_galera);
|
||||
|
||||
if (int_ver_galera < ver_galera_required)
|
||||
if (maxscale::required_repl_version.length())
|
||||
{
|
||||
tprintf("Test requires a higher version of backend servers, skipping test.");
|
||||
tprintf("Required version: %s", maxscale::required_galera_version.c_str());
|
||||
tprintf("Galera version: %s", ver_galera.c_str());
|
||||
exit(0);
|
||||
int ver_repl_required = get_int_version(maxscale::required_repl_version);
|
||||
std::string ver_repl = repl->get_lowest_version();
|
||||
int int_ver_repl = get_int_version(ver_repl);
|
||||
|
||||
if (int_ver_repl < ver_repl_required)
|
||||
{
|
||||
tprintf("Test requires a higher version of backend servers, skipping test.");
|
||||
tprintf("Required version: %s", maxscale::required_repl_version.c_str());
|
||||
tprintf("Master-slave version: %s", ver_repl.c_str());
|
||||
exit(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ((maxscale::restart_galera) && (!no_galera))
|
||||
if (galera)
|
||||
{
|
||||
if (maxscale::required_galera_version.length())
|
||||
{
|
||||
int ver_galera_required = get_int_version(maxscale::required_galera_version);
|
||||
std::string ver_galera = galera->get_lowest_version();
|
||||
int int_ver_galera = get_int_version(ver_galera);
|
||||
|
||||
if (int_ver_galera < ver_galera_required)
|
||||
{
|
||||
tprintf("Test requires a higher version of backend servers, skipping test.");
|
||||
tprintf("Required version: %s", maxscale::required_galera_version.c_str());
|
||||
tprintf("Galera version: %s", ver_galera.c_str());
|
||||
exit(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ((maxscale::restart_galera) && (galera))
|
||||
{
|
||||
galera->stop_nodes();
|
||||
galera->start_replication();
|
||||
}
|
||||
|
||||
bool snapshot_reverted = false;
|
||||
|
||||
if (use_snapshots)
|
||||
if (maxscale::check_nodes)
|
||||
{
|
||||
snapshot_reverted = revert_snapshot((char*) "clean");
|
||||
}
|
||||
|
||||
if (!snapshot_reverted && maxscale::check_nodes
|
||||
&& (repl->check_replication() || (!no_galera && galera->check_replication())))
|
||||
{
|
||||
// Stop MaxScale to prevent it from interfering with the replication setup process
|
||||
if (!maxscale::manual_debug)
|
||||
if (repl)
|
||||
{
|
||||
maxscales->stop_all();
|
||||
if (!repl->fix_replication() )
|
||||
{
|
||||
exit(BROKEN_VM_FAUILT);
|
||||
}
|
||||
}
|
||||
|
||||
if (!repl->fix_replication())
|
||||
{
|
||||
exit(200);
|
||||
}
|
||||
if (!no_galera)
|
||||
if (galera)
|
||||
{
|
||||
if (!galera->fix_replication())
|
||||
{
|
||||
exit(200);
|
||||
exit(BROKEN_VM_FAUILT);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -389,7 +492,7 @@ TestConnections::TestConnections(int argc, char* argv[])
|
||||
{
|
||||
tprintf("Configuring backends for ssl \n");
|
||||
repl->configure_ssl(true);
|
||||
if (!no_galera)
|
||||
if (galera)
|
||||
{
|
||||
galera->configure_ssl(false);
|
||||
galera->start_replication();
|
||||
@ -448,8 +551,11 @@ TestConnections::~TestConnections()
|
||||
* }
|
||||
*/
|
||||
|
||||
delete repl;
|
||||
if (!no_galera)
|
||||
if (repl)
|
||||
{
|
||||
delete repl;
|
||||
}
|
||||
if (galera)
|
||||
{
|
||||
delete galera;
|
||||
}
|
||||
@ -507,129 +613,80 @@ void TestConnections::expect(bool result, const char* format, ...)
|
||||
}
|
||||
}
|
||||
|
||||
void TestConnections::read_mdbci_info()
|
||||
{
|
||||
mdbci_vm_path = readenv("MDBCI_VM_PATH", "%s/vms/", getenv("HOME"));
|
||||
|
||||
if (system((std::string("mkdir -p ") +
|
||||
std::string(mdbci_vm_path)).c_str()))
|
||||
{
|
||||
tprintf("Unable to create MDBCI VMs direcory '%s', exiting", mdbci_vm_path);
|
||||
exit(MDBCI_FAUILT);
|
||||
}
|
||||
mdbci_template = readenv("template", "default");
|
||||
target = readenv("target", "develop");
|
||||
|
||||
mdbci_config_name = readenv("mdbci_config_name", "local");
|
||||
vm_path = std::string(mdbci_vm_path) + std::string(mdbci_config_name);
|
||||
if (mdbci_config_name != NULL)
|
||||
{
|
||||
std::ifstream nc_file;
|
||||
nc_file.open(vm_path + "_network_config");
|
||||
std::stringstream strStream;
|
||||
strStream << nc_file.rdbuf();
|
||||
network_config = strStream.str();
|
||||
nc_file.close();
|
||||
|
||||
nc_file.open(vm_path + "_configured_labels");
|
||||
std::stringstream strStream1;
|
||||
strStream1 << nc_file.rdbuf();
|
||||
configured_labels = strStream1.str();
|
||||
nc_file.close();
|
||||
}
|
||||
else
|
||||
{
|
||||
tprintf("The name of MDBCI configuration is not defined, exiting!");
|
||||
exit(1);
|
||||
}
|
||||
if (verbose)
|
||||
{
|
||||
tprintf(network_config.c_str());
|
||||
}
|
||||
}
|
||||
|
||||
void TestConnections::read_env()
|
||||
{
|
||||
char* env;
|
||||
|
||||
read_mdbci_info();
|
||||
if (verbose)
|
||||
{
|
||||
printf("Reading test setup configuration from environmental variables\n");
|
||||
}
|
||||
|
||||
// env = getenv("get_logs_command"); if (env != NULL) {sprintf(get_logs_command, "%s", env);}
|
||||
ssl = readenv_bool("ssl", true);
|
||||
|
||||
env = getenv("sysbench_dir");
|
||||
if (env != NULL)
|
||||
{
|
||||
sprintf(sysbench_dir, "%s", env);
|
||||
}
|
||||
|
||||
// env = getenv("test_dir"); if (env != NULL) {sprintf(test_dir, "%s", env);}
|
||||
|
||||
ssl = false;
|
||||
env = getenv("ssl");
|
||||
if ((env != NULL) && ((strcasecmp(env, "yes") == 0) || (strcasecmp(env, "true") == 0)))
|
||||
{
|
||||
ssl = true;
|
||||
}
|
||||
env = getenv("mysql51_only");
|
||||
if ((env != NULL) && ((strcasecmp(env, "yes") == 0) || (strcasecmp(env, "true") == 0)))
|
||||
if (readenv_bool("mysql51_only", false) || readenv_bool("no_nodes_check", false))
|
||||
{
|
||||
maxscale::check_nodes = false;
|
||||
}
|
||||
|
||||
env = getenv("no_nodes_check");
|
||||
if ((env != NULL) && ((strcasecmp(env, "yes") == 0) || (strcasecmp(env, "true") == 0)))
|
||||
{
|
||||
maxscale::check_nodes = false;
|
||||
}
|
||||
env = getenv("no_backend_log_copy");
|
||||
if ((env != NULL) && ((strcasecmp(env, "yes") == 0) || (strcasecmp(env, "true") == 0)))
|
||||
{
|
||||
no_backend_log_copy = true;
|
||||
}
|
||||
env = getenv("no_maxscale_log_copy");
|
||||
if ((env != NULL) && ((strcasecmp(env, "yes") == 0) || (strcasecmp(env, "true") == 0)))
|
||||
{
|
||||
no_maxscale_log_copy = true;
|
||||
}
|
||||
env = getenv("use_ipv6");
|
||||
if ((env != NULL) && ((strcasecmp(env, "yes") == 0) || (strcasecmp(env, "true") == 0)))
|
||||
{
|
||||
use_ipv6 = true;
|
||||
}
|
||||
|
||||
env = getenv("backend_ssl");
|
||||
if (env != NULL && ((strcasecmp(env, "yes") == 0) || (strcasecmp(env, "true") == 0)))
|
||||
{
|
||||
backend_ssl = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
backend_ssl = false;
|
||||
}
|
||||
|
||||
env = getenv("smoke");
|
||||
if (env)
|
||||
{
|
||||
smoke = strcasecmp(env, "yes") == 0 || strcasecmp(env, "true") == 0;
|
||||
}
|
||||
|
||||
env = getenv("threads");
|
||||
if ((env != NULL))
|
||||
{
|
||||
sscanf(env, "%d", &threads);
|
||||
}
|
||||
else
|
||||
{
|
||||
threads = 4;
|
||||
}
|
||||
|
||||
env = getenv("use_snapshots");
|
||||
if (env != NULL && ((strcasecmp(env, "yes") == 0) || (strcasecmp(env, "true") == 0)))
|
||||
{
|
||||
use_snapshots = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
use_snapshots = false;
|
||||
}
|
||||
env = getenv("take_snapshot_command");
|
||||
if (env != NULL)
|
||||
{
|
||||
sprintf(take_snapshot_command, "%s", env);
|
||||
}
|
||||
else
|
||||
{
|
||||
sprintf(take_snapshot_command, "exit 1");
|
||||
}
|
||||
env = getenv("revert_snapshot_command");
|
||||
if (env != NULL)
|
||||
{
|
||||
sprintf(revert_snapshot_command, "%s", env);
|
||||
}
|
||||
else
|
||||
{
|
||||
sprintf(revert_snapshot_command, "exit 1");
|
||||
}
|
||||
|
||||
env = getenv("no_maxscale_start");
|
||||
if (env != NULL && ((strcasecmp(env, "yes") == 0) || (strcasecmp(env, "true") == 0)))
|
||||
if (readenv_bool("no_maxscale_start", false))
|
||||
{
|
||||
maxscale::start = false;
|
||||
}
|
||||
|
||||
env = getenv("no_vm_revert");
|
||||
if ((env != NULL) && ((strcasecmp(env, "no") == 0) || (strcasecmp(env, "false") == 0)))
|
||||
{
|
||||
no_vm_revert = false;
|
||||
}
|
||||
|
||||
env = getenv("use_valgrind");
|
||||
if ((env != NULL) && ((strcasecmp(env, "yes") == 0) || (strcasecmp(env, "true") == 0)))
|
||||
{
|
||||
use_valgrind = true;
|
||||
}
|
||||
no_backend_log_copy = readenv_bool("no_backend_log_copy", false);
|
||||
no_maxscale_log_copy = readenv_bool("no_maxscale_log_copy", false);
|
||||
use_ipv6 = readenv_bool("use_ipv6", false);
|
||||
backend_ssl = readenv_bool("backend_ssl", false);
|
||||
smoke = readenv_bool("smoke", false);
|
||||
threads = readenv_int("threads", 4);
|
||||
use_snapshots = readenv_bool("use_snapshots", false);
|
||||
take_snapshot_command = readenv("take_snapshot_command",
|
||||
"mdbci snapshot take --path-to-nodes %s --snapshot-name ", mdbci_config_name);
|
||||
revert_snapshot_command = readenv("revert_snapshot_command",
|
||||
"mdbci snapshot revert --path-to-nodes %s --snapshot-name ", mdbci_config_name);
|
||||
no_vm_revert = readenv_bool("no_vm_revert", true);
|
||||
use_valgrind = readenv_bool("use_valgrind", false);
|
||||
}
|
||||
|
||||
void TestConnections::print_env()
|
||||
@ -650,9 +707,10 @@ void TestConnections::print_env()
|
||||
}
|
||||
}
|
||||
|
||||
const char* get_template_name(char* test_name)
|
||||
const char * get_template_name(char * test_name, const char ** labels)
|
||||
{
|
||||
int i = 0;
|
||||
*labels = NULL;
|
||||
while (cnf_templates[i].test_name && strcmp(cnf_templates[i].test_name, test_name) != 0)
|
||||
{
|
||||
i++;
|
||||
@ -660,6 +718,7 @@ const char* get_template_name(char* test_name)
|
||||
|
||||
if (cnf_templates[i].test_name)
|
||||
{
|
||||
*labels = (char *) cnf_templates[i].test_labels;
|
||||
return cnf_templates[i].test_template;
|
||||
}
|
||||
|
||||
@ -680,72 +739,79 @@ void TestConnections::process_template(int m, const char* template_name, const c
|
||||
sprintf(template_file, "%s/cnf/maxscale.cnf.template.%s", test_dir, template_name);
|
||||
sprintf(extended_template_file, "%s.%03d", template_file, m);
|
||||
|
||||
if (stat(extended_template_file, &stb) == 0)
|
||||
if (stat((char*)extended_template_file, &stb) == 0)
|
||||
{
|
||||
strcpy(template_file, extended_template_file);
|
||||
}
|
||||
tprintf("Template file is %s\n", template_file);
|
||||
|
||||
std::stringstream ss;
|
||||
|
||||
ss << "sed ";
|
||||
sprintf(str, "cp %s maxscale.cnf", template_file);
|
||||
if (verbose)
|
||||
{
|
||||
tprintf("Executing '%s' command\n", str);
|
||||
}
|
||||
if (system(str) != 0)
|
||||
{
|
||||
tprintf("Error copying maxscale.cnf template\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (backend_ssl)
|
||||
{
|
||||
ss
|
||||
<<
|
||||
" -e \"s|type=server|type=server\\nssl=required\\nssl_cert=/###access_homedir###/certs/client-cert.pem\\nssl_key=/###access_homedir###/certs/client-key.pem\\nssl_ca_cert=/###access_homedir###/certs/ca.pem|g\" ";
|
||||
tprintf("Adding ssl settings\n");
|
||||
system("sed -i \"s|type=server|type=server\\nssl=required\\nssl_cert=/###access_homedir###/certs/client-cert.pem\\nssl_key=/###access_homedir###/certs/client-key.pem\\nssl_ca_cert=/###access_homedir###/certs/ca.pem|g\" maxscale.cnf");
|
||||
}
|
||||
|
||||
ss << " -e \"s/###threads###/" << threads << "/\" ";
|
||||
sprintf(str, "sed -i \"s/###threads###/%d/\" maxscale.cnf", threads);
|
||||
system(str);
|
||||
|
||||
Mariadb_nodes* mdn[2] {repl, galera};
|
||||
char* IPcnf;
|
||||
Mariadb_nodes * mdn[2];
|
||||
char * IPcnf;
|
||||
mdn[0] = repl;
|
||||
mdn[1] = galera;
|
||||
int i, j;
|
||||
int mdn_n = galera ? 2 : 1;
|
||||
|
||||
for (int j = 0; j < mdn_n; j++)
|
||||
for (j = 0; j < mdn_n; j++)
|
||||
{
|
||||
for (int i = 0; i < mdn[j]->N; i++)
|
||||
if (mdn[j])
|
||||
{
|
||||
if (mdn[j]->use_ipv6)
|
||||
for (i = 0; i < mdn[j]->N; i++)
|
||||
{
|
||||
IPcnf = mdn[j]->IP6[i];
|
||||
}
|
||||
else
|
||||
{
|
||||
IPcnf = mdn[j]->IP[i];
|
||||
}
|
||||
sprintf(str,
|
||||
" -e \"s/###%s_server_IP_%0d###/%s/\" ",
|
||||
mdn[j]->prefix,
|
||||
i + 1,
|
||||
IPcnf);
|
||||
ss << str;
|
||||
if (mdn[j]->use_ipv6)
|
||||
{
|
||||
IPcnf = mdn[j]->IP6[i];
|
||||
}
|
||||
else
|
||||
{
|
||||
IPcnf = mdn[j]->IP[i];
|
||||
}
|
||||
sprintf(str, "sed -i \"s/###%s_server_IP_%0d###/%s/\" maxscale.cnf",
|
||||
mdn[j]->prefix, i + 1, IPcnf);
|
||||
system(str);
|
||||
|
||||
sprintf(str,
|
||||
" -e \"s/###%s_server_port_%0d###/%d/\" ",
|
||||
mdn[j]->prefix,
|
||||
i + 1,
|
||||
mdn[j]->port[i]);
|
||||
ss << str;
|
||||
sprintf(str, "sed -i \"s/###%s_server_port_%0d###/%d/\" maxscale.cnf",
|
||||
mdn[j]->prefix, i + 1, mdn[j]->port[i]);
|
||||
system(str);
|
||||
}
|
||||
|
||||
mdn[j]->connect();
|
||||
execute_query(mdn[j]->nodes[0], (char *) "CREATE DATABASE IF NOT EXISTS test");
|
||||
mdn[j]->close_connections();
|
||||
}
|
||||
|
||||
mdn[j]->connect();
|
||||
execute_query(mdn[j]->nodes[0], "CREATE DATABASE IF NOT EXISTS test");
|
||||
mdn[j]->close_connections();
|
||||
}
|
||||
|
||||
sprintf(str, " -e \"s/###access_user###/%s/g\" ", maxscales->access_user[m]);
|
||||
ss << str;
|
||||
sprintf(str, "sed -i \"s/###access_user###/%s/g\" maxscale.cnf", maxscales->access_user[m]);
|
||||
system(str);
|
||||
|
||||
sprintf(str, " -e \"s|###access_homedir###|%s|g\" ", maxscales->access_homedir[m]);
|
||||
ss << str;
|
||||
sprintf(str, "sed -i \"s|###access_homedir###|%s|g\" maxscale.cnf", maxscales->access_homedir[m]);
|
||||
system(str);
|
||||
|
||||
ss << template_file << " > maxscale.cnf";
|
||||
call_system(ss.str().c_str());
|
||||
|
||||
maxscales->copy_to_node_legacy("maxscale.cnf", dest, m);
|
||||
// The config will now be in ~/maxscale.cnf and is moved into /etc before restarting maxscale
|
||||
if (repl && repl->v51)
|
||||
{
|
||||
system("sed -i \"s/###repl51###/mysql51_replication=true/g\" maxscale.cnf");
|
||||
}
|
||||
maxscales->copy_to_node_legacy((char *) "maxscale.cnf", (char *) dest, m);
|
||||
}
|
||||
|
||||
void TestConnections::init_maxscales()
|
||||
@ -764,10 +830,7 @@ void TestConnections::init_maxscales()
|
||||
|
||||
void TestConnections::init_maxscale(int m)
|
||||
{
|
||||
const char* template_name = get_template_name(test_name);
|
||||
|
||||
process_template(m, template_name, maxscales->access_homedir[m]);
|
||||
|
||||
if (maxscales->ssh_node_f(m, true, "test -d %s/certs", maxscales->access_homedir[m]))
|
||||
{
|
||||
tprintf("SSL certificates not found, copying to maxscale");
|
||||
@ -807,7 +870,7 @@ void TestConnections::init_maxscale(int m)
|
||||
}
|
||||
}
|
||||
|
||||
void TestConnections::copy_one_mariadb_log(int i, std::string filename)
|
||||
void TestConnections::copy_one_mariadb_log(Mariadb_nodes* nrepl, int i, std::string filename)
|
||||
{
|
||||
auto log_retrive_commands =
|
||||
{
|
||||
@ -820,7 +883,7 @@ void TestConnections::copy_one_mariadb_log(int i, std::string filename)
|
||||
|
||||
for (auto cmd : log_retrive_commands)
|
||||
{
|
||||
auto output = repl->ssh_output(cmd, i).second;
|
||||
auto output = nrepl->ssh_output(cmd, i).second;
|
||||
|
||||
if (!output.empty())
|
||||
{
|
||||
@ -834,22 +897,22 @@ void TestConnections::copy_one_mariadb_log(int i, std::string filename)
|
||||
}
|
||||
}
|
||||
|
||||
int TestConnections::copy_mariadb_logs(Mariadb_nodes* repl,
|
||||
int TestConnections::copy_mariadb_logs(Mariadb_nodes* nrepl,
|
||||
const char* prefix,
|
||||
std::vector<std::thread>& threads)
|
||||
{
|
||||
int local_result = 0;
|
||||
|
||||
if (repl)
|
||||
if (nrepl)
|
||||
{
|
||||
for (int i = 0; i < repl->N; i++)
|
||||
for (int i = 0; i < nrepl->N; i++)
|
||||
{
|
||||
// Do not copy MariaDB logs in case of local backend
|
||||
if (strcmp(repl->IP[i], "127.0.0.1") != 0)
|
||||
if (strcmp(nrepl->IP[i], "127.0.0.1") != 0)
|
||||
{
|
||||
char str[4096];
|
||||
sprintf(str, "LOGS/%s/%s%d_mariadb_log", test_name, prefix, i);
|
||||
threads.emplace_back(&TestConnections::copy_one_mariadb_log, this, i, str);
|
||||
threads.emplace_back(&TestConnections::copy_one_mariadb_log, this, nrepl, i, str);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -951,7 +1014,6 @@ int TestConnections::copy_all_logs_periodic()
|
||||
int TestConnections::prepare_binlog(int m)
|
||||
{
|
||||
char version_str[1024] = "";
|
||||
|
||||
repl->connect();
|
||||
find_field(repl->nodes[0], "SELECT @@version", "@@version", version_str);
|
||||
tprintf("Master server version '%s'", version_str);
|
||||
@ -1059,9 +1121,7 @@ int TestConnections::start_binlog(int m)
|
||||
execute_query(repl->nodes[i], "reset slave all");
|
||||
execute_query(repl->nodes[i], "reset master");
|
||||
}
|
||||
|
||||
prepare_binlog(m);
|
||||
|
||||
tprintf("Testing binlog when MariaDB is started with '%s' option\n", cmd_opt);
|
||||
|
||||
if (!local_maxscale)
|
||||
@ -2115,6 +2175,103 @@ bool TestConnections::test_bad_config(int m, const char* config)
|
||||
"maxscale -U maxscale -lstdout &> /dev/null && sleep 1 && pkill -9 maxscale")
|
||||
== 0;
|
||||
}
|
||||
int TestConnections::call_mdbci(const char * options)
|
||||
{
|
||||
struct stat buf;
|
||||
if (stat(
|
||||
(mdbci_vm_path + std::string("/") + mdbci_config_name).c_str(),
|
||||
&buf)
|
||||
)
|
||||
{
|
||||
if (process_mdbci_template())
|
||||
{
|
||||
tprintf("Failed to generate MDBCI virtual machines template");
|
||||
return 1;
|
||||
}
|
||||
if (system((std::string("mdbci --override --template ") +
|
||||
vm_path +
|
||||
std::string(".json generate ") +
|
||||
std::string(mdbci_config_name)).c_str() ))
|
||||
{
|
||||
tprintf("MDBCI failed to generate virtual machines description");
|
||||
return 1;
|
||||
}
|
||||
if (system((std::string("cp -r ") +
|
||||
std::string(test_dir) +
|
||||
std::string("/mdbci/cnf ") +
|
||||
std::string(vm_path) +
|
||||
std::string("/")).c_str()))
|
||||
{
|
||||
tprintf("Failed to copy my.cnf files");
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (system((std::string("mdbci up ") +
|
||||
std::string(mdbci_config_name) +
|
||||
std::string(" --labels ") +
|
||||
mdbci_labels +
|
||||
std::string(" ") +
|
||||
std::string(options)).c_str() ))
|
||||
{
|
||||
tprintf("MDBCI failed to bring up virtual machines");
|
||||
return 1;
|
||||
}
|
||||
read_env();
|
||||
if (repl)
|
||||
{
|
||||
repl->read_basic_env();
|
||||
}
|
||||
if (galera)
|
||||
{
|
||||
galera->read_basic_env();
|
||||
}
|
||||
if (maxscales)
|
||||
{
|
||||
maxscales->read_basic_env();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int TestConnections::process_mdbci_template()
|
||||
{
|
||||
char * product = readenv("product", "mariadb");
|
||||
char * box = readenv("box", "centos_7_libvirt");
|
||||
char * __attribute__((unused)) backend_box = readenv("backend_box", "%s", box);
|
||||
char * version = readenv("version", "10.3");
|
||||
char * __attribute__((unused)) target = readenv("target", "develop");
|
||||
char * __attribute__((unused)) vm_memory = readenv("vm_memory", "2048");
|
||||
char * __attribute__((unused)) galera_version = readenv("galera_version", "%s", version);
|
||||
|
||||
if (strcmp(product, "mysql") == 0 )
|
||||
{
|
||||
setenv("cnf_path",
|
||||
(vm_path + std::string("/cnf/mysql56/")).c_str(),
|
||||
1);
|
||||
}
|
||||
else
|
||||
{
|
||||
setenv("cnf_path",
|
||||
(vm_path + std::string("/cnf/")).c_str(),
|
||||
1);
|
||||
}
|
||||
|
||||
std::string name = std::string(test_dir) +
|
||||
std::string("/mdbci/templates/") +
|
||||
std::string(mdbci_template) +
|
||||
std::string(".json.template");
|
||||
|
||||
std::string sys = std::string("envsubst < ") +
|
||||
name +
|
||||
std::string(" > ") +
|
||||
vm_path +
|
||||
std::string(".json");
|
||||
if (verbose)
|
||||
{
|
||||
std::cout << sys << std::endl;
|
||||
}
|
||||
return system(sys.c_str());
|
||||
}
|
||||
|
||||
std::string dump_status(const StringSet& current, const StringSet& expected)
|
||||
{
|
||||
@ -2137,3 +2294,31 @@ std::string dump_status(const StringSet& current, const StringSet& expected)
|
||||
|
||||
return ss.str();
|
||||
}
|
||||
int TestConnections::reinstall_maxscales()
|
||||
{
|
||||
char sys[strlen(target) +
|
||||
strlen(mdbci_config_name) +
|
||||
strlen(maxscales->prefix) +
|
||||
70];
|
||||
for (int i = 0; i < maxscales->N; i++)
|
||||
{
|
||||
printf("Installing Maxscale on node %d\n", i);
|
||||
//TODO: make it via MDBCI and compatible with any distro
|
||||
maxscales->ssh_node(i, "yum remove maxscale -y", true);
|
||||
maxscales->ssh_node(i, "yum clean all", true);
|
||||
|
||||
sprintf(sys, "mdbci setup_repo --product maxscale_ci --product-version %s %s/%s_%03d",
|
||||
target, mdbci_config_name, maxscales->prefix, i);
|
||||
if (system(sys))
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
sprintf(sys, "mdbci install_product --product maxscale_ci --product-version %s %s/%s_%03d",
|
||||
target, mdbci_config_name, maxscales->prefix, i);
|
||||
if (system(sys))
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -1,5 +1,4 @@
|
||||
#ifndef TESTCONNECTIONS_H
|
||||
#define TESTCONNECTIONS_H
|
||||
#pragma once
|
||||
|
||||
#include "mariadb_nodes.h"
|
||||
#include "maxscales.h"
|
||||
@ -17,6 +16,9 @@
|
||||
|
||||
typedef std::set<std::string> StringSet;
|
||||
|
||||
#define MDBCI_FAUILT 200 // Exit code for the case when failure caused by MDBCI non-zero exit
|
||||
#define BROKEN_VM_FAUILT 201 // Exit code for the case when failure caused by screwed VMs
|
||||
|
||||
/**
|
||||
* @brief Class contains references to Master/Slave and Galera test setups
|
||||
* Test setup should consist of two setups: one Master/Slave and one Galera.
|
||||
@ -95,7 +97,7 @@ public:
|
||||
/**
|
||||
* @brief galera Mariadb_nodes object containing references to Galera setuo
|
||||
*/
|
||||
Mariadb_nodes* galera;
|
||||
Galera_nodes * galera;
|
||||
|
||||
/**
|
||||
* @brief repl Mariadb_nodes object containing references to Master/Slave setuo
|
||||
@ -107,21 +109,41 @@ public:
|
||||
*/
|
||||
Maxscales* maxscales;
|
||||
|
||||
/**
|
||||
* @brief mdbci_config_name Name of MDBCI VMs set
|
||||
*/
|
||||
char * mdbci_config_name;
|
||||
|
||||
/**
|
||||
* @brief mdbci_vm_path Path to directory with MDBCI VMs descriptions
|
||||
*/
|
||||
char * mdbci_vm_path;
|
||||
|
||||
/**
|
||||
* @brief mdbci_temlate Name of mdbci VMs tempate file
|
||||
*/
|
||||
char * mdbci_template;
|
||||
|
||||
/**
|
||||
* @brief target Name of Maxscale repository in the CI
|
||||
*/
|
||||
char * target;
|
||||
|
||||
/**
|
||||
* @brief GetLogsCommand Command to copy log files from node virtual machines (should handle one
|
||||
* parameter: IP address of virtual machine to kill)
|
||||
*/
|
||||
char get_logs_command[4096];
|
||||
char * get_logs_command;
|
||||
|
||||
/**
|
||||
* @brief make_snapshot_command Command line to create a snapshot of all VMs
|
||||
*/
|
||||
char take_snapshot_command[4096];
|
||||
char * take_snapshot_command;
|
||||
|
||||
/**
|
||||
* @brief revert_snapshot_command Command line to revert a snapshot of all VMs
|
||||
*/
|
||||
char revert_snapshot_command[4096];
|
||||
char * revert_snapshot_command;
|
||||
|
||||
/**
|
||||
* @brief use_snapshots if TRUE every test is trying to revert snapshot before running the test
|
||||
@ -139,13 +161,18 @@ public:
|
||||
* @param prefix file name prefix
|
||||
* @return 0 if success
|
||||
*/
|
||||
int copy_mariadb_logs(Mariadb_nodes* repl, const char* prefix, std::vector<std::thread>& threads);
|
||||
int copy_mariadb_logs(Mariadb_nodes* nrepl, const char* prefix, std::vector<std::thread>& threads);
|
||||
|
||||
/**
|
||||
* @brief MaxScale runs locally, specified using -l.
|
||||
*/
|
||||
bool local_maxscale;
|
||||
|
||||
/**
|
||||
* @brief network_config Content of MDBCI network_config file
|
||||
*/
|
||||
std::string network_config;
|
||||
|
||||
/**
|
||||
* @brief no_backend_log_copy if true logs from backends are not copied
|
||||
* (needed if case of Aurora RDS backend or similar)
|
||||
@ -193,6 +220,11 @@ public:
|
||||
*/
|
||||
bool binlog_slave_gtid;
|
||||
|
||||
/**
|
||||
* @brief no_repl Do not check, restart and use Maxster/Slave setup;
|
||||
*/
|
||||
bool no_repl;
|
||||
|
||||
/**
|
||||
* @brief no_galera Do not check, restart and use Galera setup; all Galera tests will fail
|
||||
*/
|
||||
@ -250,6 +282,39 @@ public:
|
||||
*/
|
||||
bool use_ipv6;
|
||||
|
||||
/**
|
||||
* @brief template_name Name of maxscale.cnf template
|
||||
*/
|
||||
const char * template_name;
|
||||
|
||||
/**
|
||||
* @brief labels 'LABELS' string from CMakeLists.txt
|
||||
*/
|
||||
const char * labels;
|
||||
|
||||
/**
|
||||
* @brief mdbci_labels labels to be passed to MDBCI
|
||||
*/
|
||||
std::string mdbci_labels;
|
||||
|
||||
/**
|
||||
* @brief configured_labels List of lables for which nodes are configured
|
||||
*/
|
||||
std::string configured_labels;
|
||||
|
||||
/**
|
||||
* @brief vm_path Path to the VM Vagrant directory
|
||||
*/
|
||||
std::string vm_path;
|
||||
|
||||
/**
|
||||
* @brief reinstall_maxscale Flag that is set when 'reinstall_maxscale'
|
||||
* option is provided;
|
||||
* if true Maxscale will be removed and re-installed on all Maxscale nodes
|
||||
* Used for 'run_test_snapshot'
|
||||
*/
|
||||
bool reinstall_maxscale;
|
||||
|
||||
/** Check whether all nodes are in a valid state */
|
||||
static void check_nodes(bool value);
|
||||
|
||||
@ -291,6 +356,11 @@ public:
|
||||
/** Same as add_result() but inverted */
|
||||
void expect(bool result, const char* format, ...) __attribute__ ((format(printf, 3, 4)));
|
||||
|
||||
/**
|
||||
* @brief read_mdbci_info Reads name of MDBCI config and tryes to load all network info
|
||||
*/
|
||||
void read_mdbci_info();
|
||||
|
||||
/**
|
||||
* @brief ReadEnv Reads all Maxscale and Master/Slave and Galera setups info from environmental variables
|
||||
*/
|
||||
@ -603,14 +673,34 @@ public:
|
||||
m_on_destroy.push_back(func);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief process_mdbci_template Read template file from maxscale-system-test/mdbci/templates
|
||||
* and replace all placeholders with acutal values
|
||||
* @return 0 in case of success
|
||||
*/
|
||||
int process_mdbci_template();
|
||||
|
||||
/**
|
||||
* @brief call_mdbci Execute MDBCI to bring up nodes
|
||||
* @return 0 if success
|
||||
*/
|
||||
int call_mdbci(const char *options);
|
||||
|
||||
/**
|
||||
* @brief use_valrind if true Maxscale will be executed under Valgrind
|
||||
*/
|
||||
bool use_valgrind;
|
||||
|
||||
/**
|
||||
* @brief resinstall_maxscales Remove Maxscale form all nodes and installs new ones
|
||||
* (to be used for run_test_snapshot)
|
||||
* @return 0 in case of success
|
||||
*/
|
||||
int reinstall_maxscales();
|
||||
|
||||
private:
|
||||
void report_result(const char* format, va_list argp);
|
||||
void copy_one_mariadb_log(int i, std::string filename);
|
||||
void copy_one_mariadb_log(Mariadb_nodes *nrepl, int i, std::string filename);
|
||||
|
||||
std::vector<std::function<void (void)>> m_on_destroy;
|
||||
};
|
||||
@ -639,4 +729,18 @@ void* log_copy_thread(void* ptr);
|
||||
*/
|
||||
std::string dump_status(const StringSet& current, const StringSet& expected);
|
||||
|
||||
#endif // TESTCONNECTIONS_H
|
||||
/**
|
||||
* @brief get_template_name Returns the name of maxscale.cnf template to use for given test
|
||||
* @param test_name Name of the test
|
||||
* @param labels pointer to string for storing all test labels
|
||||
* @return Name of maxscale.cnf file template
|
||||
*/
|
||||
const char *get_template_name(char * test_name, const char **labels);
|
||||
|
||||
/**
|
||||
* @brief readenv_and_set_default Read enviromental variable and set default values if
|
||||
* variable is not defined
|
||||
* @param name Name of the environmental variable
|
||||
* @param defaultenv Default values to be set
|
||||
* @return Envaronmental variable value
|
||||
*/
|
||||
|
@ -1,7 +1,13 @@
|
||||
|
||||
# Helper function to add a configuration template
|
||||
function(add_template name template)
|
||||
set(CNF_TEMPLATES "${CNF_TEMPLATES}{\"${name}\",\"${template}\"}," CACHE INTERNAL "")
|
||||
function(add_template name template labels)
|
||||
set(CNF_TEMPLATES "${CNF_TEMPLATES}{\"${name}\",\"${template}\", \"${labels}\"}," CACHE INTERNAL "")
|
||||
endfunction()
|
||||
|
||||
|
||||
# Helper function to add a configuration template
|
||||
function(add_template_manual name template)
|
||||
add_template(${name} ${template} "${name}.cpp;${name};${template};LABELS;CONFIG")
|
||||
endfunction()
|
||||
|
||||
# Default test timeout
|
||||
@ -18,7 +24,7 @@ set(TIMEOUT 900)
|
||||
# test set, the function should be called as follows:
|
||||
# add_test_executable(simple_test.cpp simple_test simple_config LABELS some_label)
|
||||
function(add_test_executable source name template)
|
||||
add_template(${name} ${template})
|
||||
add_template(${name} ${template} "${ARGV}")
|
||||
add_executable(${name} ${source})
|
||||
target_link_libraries(${name} testcore)
|
||||
add_test(NAME ${name} COMMAND ${CMAKE_CURRENT_BINARY_DIR}/${name} ${name} WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
|
||||
@ -34,14 +40,14 @@ endfunction()
|
||||
|
||||
# Same as add_test_executable, but do not add executable into tests list
|
||||
function(add_test_executable_notest source name template)
|
||||
add_template(${name} ${template})
|
||||
add_template(${name} ${template} "${ARGV}")
|
||||
add_executable(${name} ${source})
|
||||
target_link_libraries(${name} testcore)
|
||||
endfunction()
|
||||
|
||||
# Add a test which uses another test as the executable
|
||||
function(add_test_derived name executable template)
|
||||
add_template(${name} ${template})
|
||||
add_template(${name} ${template} "${ARGV}")
|
||||
add_test(NAME ${name} COMMAND ${CMAKE_CURRENT_BINARY_DIR}/${executable} ${name} WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
|
||||
set_property(TEST ${name} PROPERTY TIMEOUT ${TIMEOUT})
|
||||
|
||||
@ -57,8 +63,8 @@ endfunction()
|
||||
# The naming of the templates follow the same principles as add_test_executable.
|
||||
# also suitable for symlinks
|
||||
function(add_test_script name script template labels)
|
||||
add_template(${name} ${template})
|
||||
add_test(NAME ${name} COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/${script} ${name} WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
|
||||
add_template(${name} ${template} "${ARGV}")
|
||||
add_test(NAME ${name} COMMAND non_native_setup ${name} ${script} WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
|
||||
|
||||
list(REMOVE_AT ARGV 0 1 2)
|
||||
|
||||
|
@ -119,6 +119,7 @@ const char CN_INET[] = "inet";
|
||||
const char CN_LINKS[] = "links";
|
||||
const char CN_LISTENERS[] = "listeners";
|
||||
const char CN_LISTENER[] = "listener";
|
||||
const char CN_LOAD_PERSISTED_CONFIGS[] = "load_persisted_configs";
|
||||
const char CN_LOCALHOST_MATCH_WILDCARD_HOST[] = "localhost_match_wildcard_host";
|
||||
const char CN_LOCAL_ADDRESS[] = "local_address";
|
||||
const char CN_LOG_AUTH_WARNINGS[] = "log_auth_warnings";
|
||||
@ -1391,7 +1392,8 @@ static bool config_load_and_process(const char* filename, bool (* process_config
|
||||
const char* persist_cnf = get_config_persistdir();
|
||||
mxs_mkdir_all(persist_cnf, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
|
||||
|
||||
if (is_directory(persist_cnf) && contains_cnf_files(persist_cnf))
|
||||
if (config_get_global_options()->load_persisted_configs
|
||||
&& is_directory(persist_cnf) && contains_cnf_files(persist_cnf))
|
||||
{
|
||||
/**
|
||||
* Set the global flag that we are processing a persisted configuration.
|
||||
@ -2729,6 +2731,20 @@ static int handle_global_item(const char* name, const char* value)
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
else if (strcmp(name, CN_LOAD_PERSISTED_CONFIGS) == 0)
|
||||
{
|
||||
int b = config_truth_value(value);
|
||||
|
||||
if (b != -1)
|
||||
{
|
||||
gateway.load_persisted_configs = b;
|
||||
}
|
||||
else
|
||||
{
|
||||
MXS_ERROR("Invalid value for '%s': %s", CN_LOAD_PERSISTED_CONFIGS, value);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
bool found = false;
|
||||
@ -2947,6 +2963,7 @@ void config_set_global_defaults()
|
||||
gateway.query_retry_timeout = DEFAULT_QUERY_RETRY_TIMEOUT;
|
||||
gateway.passive = false;
|
||||
gateway.promoted_at = 0;
|
||||
gateway.load_persisted_configs = true;
|
||||
|
||||
gateway.peer_hosts[0] = '\0';
|
||||
gateway.peer_user[0] = '\0';
|
||||
|
@ -972,7 +972,7 @@ bool RWSplitSession::should_migrate_trx(RWBackend* target)
|
||||
|
||||
bool RWSplitSession::start_trx_migration(RWBackend* target, GWBUF* querybuf)
|
||||
{
|
||||
MXS_INFO("Starting transaction migration from '%s' to '%s'", m_current_master->name(), target->name());
|
||||
MXS_INFO("Starting transaction migration to '%s'", target->name());
|
||||
|
||||
/**
|
||||
* Stash the current query so that the transaction replay treats
|
||||
|
@ -126,12 +126,11 @@ int32_t RWSplitSession::routeQuery(GWBUF* querybuf)
|
||||
{
|
||||
MXS_INFO("New query received while transaction replay is active: %s",
|
||||
mxs::extract_sql(querybuf).c_str());
|
||||
mxb_assert(!m_interrupted_query.get());
|
||||
m_interrupted_query.reset(querybuf);
|
||||
m_query_queue.emplace_back(querybuf);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (can_route_queries())
|
||||
if ((m_query_queue.empty() || GWBUF_IS_REPLAYED(querybuf)) && can_route_queries())
|
||||
{
|
||||
/** Gather the information required to make routing decisions */
|
||||
if (!m_qc.large_query())
|
||||
@ -432,6 +431,10 @@ void RWSplitSession::trx_replay_next_stmt()
|
||||
MXS_INFO("Resuming execution: %s", mxs::extract_sql(m_interrupted_query.get()).c_str());
|
||||
retry_query(m_interrupted_query.release(), 0);
|
||||
}
|
||||
else if (!m_query_queue.empty())
|
||||
{
|
||||
route_stored_query();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -270,10 +270,9 @@ private:
|
||||
|
||||
inline bool can_route_queries() const
|
||||
{
|
||||
return m_query_queue.empty()
|
||||
&& (m_expected_responses == 0
|
||||
|| m_qc.load_data_state() == mxs::QueryClassifier::LOAD_DATA_ACTIVE
|
||||
|| m_qc.large_query());
|
||||
return m_expected_responses == 0
|
||||
|| m_qc.load_data_state() == mxs::QueryClassifier::LOAD_DATA_ACTIVE
|
||||
|| m_qc.large_query();
|
||||
}
|
||||
|
||||
inline mxs::QueryClassifier::current_target_t get_current_target() const
|
||||
|
Loading…
x
Reference in New Issue
Block a user