Merge branch '2.4' of github.com:mariadb-corporation/MaxScale into 2.4

This commit is contained in:
Timofey Turenko 2019-07-03 13:08:08 +03:00
commit 73803fff70
9 changed files with 103 additions and 31 deletions

View File

@ -60,7 +60,7 @@ else
sudo zypper -n update
sudo zypper -n install gcc gcc-c++ ncurses-devel bison glibc-devel libgcc_s1 perl \
make libtool libopenssl-devel libaio libaio-devel flex \
pcre-devel git wget tcl libuuid-devel \
pcre-devel git wget tcl tcl-devel libuuid-devel \
xz-devel sqlite3 sqlite3-devel pkg-config lua lua-devel \
gnutls-devel libgcrypt-devel pam-devel systemd-devel libcurl-devel
sudo zypper -n install rpm-build
@ -77,7 +77,7 @@ else
sudo yum install -y --nogpgcheck gcc gcc-c++ ncurses-devel bison glibc-devel \
libgcc perl make libtool openssl-devel libaio libaio-devel libedit-devel \
libedit-devel systemtap-sdt-devel rpm-sign wget \
gnupg pcre-devel flex rpmdevtools git wget tcl openssl libuuid-devel xz-devel \
gnupg pcre-devel flex rpmdevtools git wget tcl tcl-devel openssl libuuid-devel xz-devel \
sqlite sqlite-devel pkgconfig lua lua-devel rpm-build createrepo yum-utils \
gnutls-devel libgcrypt-devel pam-devel libcurl-devel
@ -153,24 +153,35 @@ sudo make install
cd ../../
# TCL
mkdir tcl
cd tcl
wget -q --no-check-certificate http://prdownloads.sourceforge.net/tcl/tcl8.6.5-src.tar.gz
# Methods allow to compare software versions according to semantic versioning
verlte() {
[ "$1" = "`echo -e "$1\n$2" | sort -V | head -n1`" ]
}
if [ $? != 0 ]
verlt() {
[ "$1" = "$2" ] && return 1 || verlte $1 $2
}
system_tcl_version=$(tclsh <<< 'puts [info patchlevel]')
if verlt "$system_tcl_version" "8.6.5"
then
echo "Error getting tcl"
sudo rm -rf $tmpdir
exit 1
mkdir tcl
cd tcl
wget -q --no-check-certificate http://prdownloads.sourceforge.net/tcl/tcl8.6.5-src.tar.gz
if [ $? != 0 ]
then
echo "Error getting tcl"
exit 1
fi
tar xzf tcl8.6.5-src.tar.gz
cd tcl8.6.5/unix
./configure
sudo make install
cd ../../..
fi
tar xzf tcl8.6.5-src.tar.gz
cd tcl8.6.5/unix
./configure
sudo make install
cd ../../..
# Jansson
git clone https://github.com/akheron/jansson.git
if [ $? != 0 ]

View File

@ -15,6 +15,7 @@
## Upgrading MariaDB MaxScale
- [Upgrading MariaDB MaxScale from 2.3 to 2.4](Upgrading/Upgrading-To-MaxScale-2.4.md)
- [Upgrading MariaDB MaxScale from 2.2 to 2.3](Upgrading/Upgrading-To-MaxScale-2.3.md)
- [Upgrading MariaDB MaxScale from 2.1 to 2.2](Upgrading/Upgrading-To-MaxScale-2.2.md)
- [Upgrading MariaDB MaxScale from 2.0 to 2.1](Upgrading/Upgrading-To-MaxScale-2.1.md)

View File

@ -10,6 +10,7 @@ Restart=on-abort
# Make sure /var/run/maxscale exists
PermissionsStartOnly=true
ExecStartPre=/usr/bin/install -d @MAXSCALE_VARDIR@/run/maxscale -o maxscale -g maxscale
ExecStartPre=/usr/bin/install -d @MAXSCALE_VARDIR@/lib/maxscale -o maxscale -g maxscale
PIDFile=@MAXSCALE_VARDIR@/run/maxscale/maxscale.pid

View File

@ -1,5 +1,6 @@
[maxscale]
threads=###threads###
log_info=1
[MySQL-Monitor]
type=monitor

View File

@ -415,9 +415,11 @@ TestConnections::TestConnections(int argc, char* argv[])
bool repl_ok = no_repl || repl_future.get();
bool galera_ok = no_galera || galera_future.get();
bool node_error = !maxscale_ok || !repl_ok || !galera_ok;
bool initialize = false;
if (node_error || too_many_maxscales())
{
initialize = true;
tprintf("Recreating VMs: %s", node_error ? "node check failed" : "too many maxscales");
if (call_mdbci("--recreate"))
@ -426,16 +428,24 @@ TestConnections::TestConnections(int argc, char* argv[])
}
}
if (reinstall_maxscale && reinstall_maxscales())
if (reinstall_maxscale)
{
tprintf("Failed to install Maxscale: target is %s", target);
exit(MDBCI_FAUILT);
initialize = true;
if (reinstall_maxscales())
{
tprintf("Failed to install Maxscale: target is %s", target);
exit(MDBCI_FAUILT);
}
}
std::string src = std::string(test_dir) + "/mdbci/add_core_cnf.sh";
maxscales->copy_to_node(0, src.c_str(), maxscales->access_homedir[0]);
maxscales->ssh_node_f(0, true, "%s/add_core_cnf.sh %s", maxscales->access_homedir[0],
verbose ? "verbose" : "");
if (initialize)
{
std::string src = std::string(test_dir) + "/mdbci/add_core_cnf.sh";
maxscales->copy_to_node(0, src.c_str(), maxscales->access_homedir[0]);
maxscales->ssh_node_f(0, true, "%s/add_core_cnf.sh %s", maxscales->access_homedir[0],
verbose ? "verbose" : "");
}
maxscales->use_ipv6 = use_ipv6;
@ -1885,6 +1895,22 @@ void TestConnections::tprintf(const char* format, ...)
fflush(stderr);
}
void TestConnections::log_printf(const char* format, ...)
{
va_list argp;
va_start(argp, format);
int n = vsnprintf(nullptr, 0, format, argp);
va_end(argp);
va_start(argp, format);
char buf[n + 1];
vsnprintf(buf, sizeof(buf), format, argp);
va_end(argp);
maxscales->ssh_node_f(0, true, "echo '--- %s ---' >> /var/log/maxscale/maxscale.log", buf);
tprintf("%s", buf);
}
int TestConnections::get_master_server_id(int m)
{
int master_id = -1;

View File

@ -505,6 +505,11 @@ public:
*/
void tprintf(const char* format, ...);
/**
* @brief injects a message into maxscale.log
*/
void log_printf(const char* format, ...) mxb_attribute((format(printf, 2, 3)));
/**
* @brief Creats t1 table, insert data into it and checks if data can be correctly read from all Maxscale
* services

View File

@ -172,6 +172,8 @@ public:
virtual long wait_no_more_than() const = 0;
virtual const std::vector<Result>& results() const = 0;
virtual const std::vector<std::string>& urls() const = 0;
};
/**
@ -258,6 +260,16 @@ public:
return m_sImp->results();
}
/**
* The URLs the async operation was invoked with.
*
* @return Vector of urls.
*/
const std::vector<std::string>& urls() const
{
return m_sImp->urls();
}
public:
Async(const std::shared_ptr<Imp>& sImp)
: m_sImp(sImp)

View File

@ -197,14 +197,20 @@ public:
return 0;
}
const std::vector<Result>& results() const
const vector<Result>& results() const
{
return m_results;
}
const vector<string>& urls() const
{
return m_urls;
}
private:
Async::status_t m_status;
vector<Result> m_results;
vector<string> m_urls;
};
class HttpImp : public Async::Imp
@ -248,6 +254,8 @@ public:
{
mxb_assert(m_status == Async::ERROR);
m_urls = urls;
m_results.reserve(urls.size());
m_errbufs.reserve(urls.size());
@ -417,11 +425,16 @@ public:
return m_wait_no_more_than;
}
const std::vector<Result>& results() const
const vector<Result>& results() const
{
return m_results;
}
const vector<string>& urls() const
{
return m_urls;
}
private:
void update_timeout()
{
@ -441,6 +454,7 @@ private:
unordered_map<CURL*, Context> m_curls;
int m_still_running;
long m_wait_no_more_than;
vector<string> m_urls;
};
}

View File

@ -408,11 +408,7 @@ void ClustrixMonitor::tick()
case http::Async::READY:
update_server_statuses();
if (!m_health_urls.empty())
{
make_health_check();
}
make_health_check();
break;
}
@ -743,7 +739,6 @@ bool ClustrixMonitor::refresh_nodes(MYSQL* pHub_con)
node.set_running(false, ClustrixNode::APPROACH_OVERRIDE);
}
update_http_urls();
cluster_checked();
}
else
@ -757,6 +752,11 @@ bool ClustrixMonitor::refresh_nodes(MYSQL* pHub_con)
MXS_ERROR("%s: Could not execute '%s' on %s: %s",
name(), ZQUERY, mysql_get_host_info(pHub_con), mysql_error(pHub_con));
}
// Since we are here, the call above to check_cluster_membership() succeeded. As that
// function may change the content of m_nodes_by_ids, we must always update the urls,
// irrespective of whether the SQL of this function succeeds or not.
update_http_urls();
}
return refreshed;
@ -1112,6 +1112,7 @@ bool ClustrixMonitor::check_http(Call::action_t action)
case http::Async::READY:
{
mxb_assert(m_health_urls == m_http.urls());
// There are as many results as there are nodes,
// and the results are in node order.
const vector<http::Result>& results = m_http.results();