Merge branch 'develop' into pull-102

This commit is contained in:
Markus Makela
2016-09-15 10:26:53 +03:00
28 changed files with 807 additions and 129 deletions

View File

@ -6,18 +6,8 @@ os: linux
env:
global:
- MARIADB_URL=https://downloads.mariadb.org/interstitial/mariadb-5.5.48/bintar-linux-glibc_214-x86_64/mariadb-5.5.48-linux-glibc_214-x86_64.tar.gz/from/http%3A//mirror.netinch.com/pub/mariadb/
- MARIADB_TAR=mariadb-5.5.48-linux-glibc_214-x86_64.tar.gz
- MARIADB_DIR=mariadb-5.5.48-linux-x86_64
- secure: "kfzqiIq1XhZ89XYsnqFhPKr5UWB+W4fYAYpOYOLgWMmqfjwqQTm1nN/A6TuFmdbTrzB6hLawsxIUrPS+QKs4TI8tTQMRZ8IZV4TIUQVa7SNQljwrKvnSu0fSoqpPrvXxjEjbTlvpo7X5EKCyCB0Xz6NaYVJIvE9bYnwCEAJw30k="
# prepare the environment
before_script:
# get mariadb packages from mariadb.org
- chmod +x .travis/download_mariadb.sh
- .travis/download_mariadb.sh
# actual compilation commands
script:
- chmod +x .travis/build_maxscale.sh

View File

@ -8,16 +8,15 @@
echo TRAVIS_BUILD_DIR: ${TRAVIS_BUILD_DIR}
echo MARIADB_DIR: ${MARIADB_DIR}
mkdir build
cd build
cmake .. -DCMAKE_INSTALL_PREFIX=/usr -DMYSQL_EMBEDDED_INCLUDE_DIR=${TRAVIS_BUILD_DIR}/${MARIADB_DIR}/include/ -DMYSQL_EMBEDDED_LIBRARIES=${TRAVIS_BUILD_DIR}/${MARIADB_DIR}/lib/libmysqld.a -DERRMSG=${TRAVIS_BUILD_DIR}/${MARIADB_DIR}/share/english/errmsg.sys
cmake .. -DCMAKE_INSTALL_PREFIX=/usr -DBUILD_TESTS=Y
make VERBOSE=1
make test
sudo make install
sudo make testcore
sudo ./postinst
maxscale --version

View File

@ -1,12 +0,0 @@
#/bin/sh -f
# these environment variables are set in .travis.yml
# MARIADB_URL=https://downloads.mariadb.org/interstitial/mariadb-5.5.48/bintar-linux-glibc_214-x86_64/mariadb-5.5.48-linux-glibc_214-x86_64.tar.gz/from/http%3A//mirror.netinch.com/pub/mariadb/
# MARIADB_TAR=mariadb-5.5.48-linux-glibc_214-x86_64.tar.gz
# MARIADB_DIR=mariadb-5.5.48-linux-x86_64
# get mariadb
wget --content-disposition ${MARIADB_URL}
# unompress
tar -axf ${MARIADB_TAR}

View File

@ -205,6 +205,8 @@ install_file(${CMAKE_BINARY_DIR}/ReleaseNotes.txt core)
install_file(${CMAKE_BINARY_DIR}/UpgradingToMaxScale12.txt core)
install_file(server/maxscale.cnf.template core)
install_file(server/maxscale_binlogserver_template.cnf core)
install_program(script/create_grants core)
install_file(script/create_roles.sql core)
# Install the template into /etc
if(WITH_MAXSCALE_CNF AND (NOT TARGET_COMPONENT OR "core" STREQUAL "${TARGET_COMPONENT}"))

View File

@ -62,9 +62,9 @@ storage_args=path=/usr/maxscale/cache/rocksdb
Specifies whether any or only fully qualified references are allowed in
queries stored to the cache.
```
allowed_references=[fully-qualified|any]
allowed_references=[qualified|any]
```
The default is `fully-qualified`, which means that only queries where
The default is `qualified`, which means that only queries where
the database name is included in the table name are subject to caching.
```
select col from db.tbl;
@ -87,13 +87,13 @@ The setting can be changed to `any`, provided fully qualified names
are always used or if the names of tables in different databases are
different.
#### `maximum_resultset_size`
#### `max_resultset_size`
Specifies the maximum size a resultset can have, measured in kibibytes,
in order to be stored in the cache. A resultset larger than this, will
not be stored.
```
maximum_resultset_size=64
max_resultset_size=64
```
The default value is TBD.

View File

@ -113,6 +113,15 @@ Enable or disable the high precision timestamps in logfiles. Enabling this adds
ms_timestamp=1
```
#### `skip_permission_checks`
Skip service and monitor user permission checks. This is useful when
you know the permissions are OK and you want to speed up the startup
process.
It is recommended to leave the permission checks on so that any
missing privileges are detected when maxscale is starting up.
#### `syslog`
Enable or disable the logging of messages to *syslog*.

61
script/create_grants Executable file
View File

@ -0,0 +1,61 @@
#!/bin/bash
function runQuery(){
mysql -s -s -h $host -P $port -u $user -p$password -e "$1"
if [ $? -ne 0 ]
then
echo "Failed to execute query: $1"
exit
fi
}
# Transform grants to from external hosts to MaxScale's host
function getGrants(){
result=$(runQuery "show grants for $1"|sed -e "s/@[^ ]*/@'$maxscalehost'/" -e "s/ *IDENTIFIED BY.*//" -e "s/$/;/")
echo "$result"
}
user=$(whoami)
host=$(hostname)
port=3306
include_root="and user <> 'root'"
if [ "$1" == "--help" ] || [ $# -eq 0 ]
then
echo "$0 -u USER -p PASSWORD -h HOST -P PORT [-r]"
exit
fi
while getopts "u:p:h:P:r" var
do
case $var in
u)
user=$OPTARG
;;
p)
password=$OPTARG
;;
h)
host=$OPTARG
;;
P)
port=$OPTARG
;;
r)
include_root=""
;;
esac
done
# Get the MaxScale hostname from the backend server
maxscalehost=$(runQuery "select user()")
maxscalehost=${maxscalehost#*@}
# List all the users
runQuery "select concat(\"'\", user, \"'\", '@', \"'\", host, \"'\") from mysql.user where user <> '' and host <> '%' $include_root"|while read i
do
getGrants "$i"
done

7
script/create_roles.sql Normal file
View File

@ -0,0 +1,7 @@
CREATE ROLE proxy_authenticator;
GRANT SELECT ON mysql.user TO proxy_authenticator;
GRANT SELECT ON mysql.db TO proxy_authenticator;
GRANT SELECT ON mysql.tables_priv TO proxy_authenticator;
GRANT SHOW DATABASES ON *.* TO proxy_authenticator;
CREATE ROLE proxy_monitor;
GRANT REPLICATION CLIENT ON *.* TO proxy_monitor;

View File

@ -1,6 +1,6 @@
add_library(maxscale-common SHARED adminusers.c alloc.c atomic.c buffer.c config.c dbusers.c dcb.c filter.c externcmd.c gwbitmask.c gwdirs.c gw_utils.c hashtable.c hint.c housekeeper.c listmanager.c load_utils.c log_manager.cc maxscale_pcre2.c memlog.c misc.c mlist.c modutil.c monitor.c queuemanager.c query_classifier.c poll.c random_jkiss.c resultset.c secrets.c server.c service.c session.c slist.c spinlock.c thread.c users.c utils.c skygw_utils.cc statistics.c listener.c gw_ssl.c mysql_utils.c mysql_binlog.c)
target_link_libraries(maxscale-common ${MARIADB_CONNECTOR_LIBRARIES} ${LZMA_LINK_FLAGS} ${PCRE2_LIBRARIES} ${CURL_LIBRARIES} ssl aio pthread crypt dl crypto inih z rt m stdc++)
target_link_libraries(maxscale-common ${MARIADB_CONNECTOR_LIBRARIES} ${LZMA_LINK_FLAGS} ${PCRE2_LIBRARIES} ${CURL_LIBRARIES} ssl pthread crypt dl crypto inih z rt m stdc++)
if(WITH_JEMALLOC)
target_link_libraries(maxscale-common ${JEMALLOC_LIBRARIES})

View File

@ -957,6 +957,10 @@ handle_global_item(const char *name, const char *value)
{
mxs_log_set_highprecision_enabled(config_truth_value((char*)value));
}
else if (strcmp(name, "skip_permission_checks") == 0)
{
gateway.skip_permission_checks = config_truth_value((char*)value);
}
else if (strcmp(name, "auth_connect_timeout") == 0)
{
char* endptr;
@ -1301,6 +1305,7 @@ global_defaults()
gateway.auth_conn_timeout = DEFAULT_AUTH_CONNECT_TIMEOUT;
gateway.auth_read_timeout = DEFAULT_AUTH_READ_TIMEOUT;
gateway.auth_write_timeout = DEFAULT_AUTH_WRITE_TIMEOUT;
gateway.skip_permission_checks = false;
if (version_string != NULL)
{
gateway.version_string = MXS_STRDUP_A(version_string);

View File

@ -2689,7 +2689,8 @@ static bool check_server_permissions(SERVICE *service, SERVER* server,
*/
bool check_service_permissions(SERVICE* service)
{
if (is_internal_service(service->routerModule))
if (is_internal_service(service->routerModule) ||
config_get_global_options()->skip_permission_checks)
{
return true;
}

View File

@ -491,88 +491,52 @@ int modutil_send_mysql_err_packet(DCB *dcb,
}
/**
* Buffer contains at least one of the following:
* complete [complete] [partial] mysql packet
* Return the first packet from a buffer.
*
* return pointer to gwbuf containing a complete packet or
* NULL if no complete packet was found.
* @param p_readbuf Pointer to pointer to GWBUF. If the GWBUF contains a
* complete packet, after the call it will have been updated
* to begin at the byte following the packet.
*
* @return Pointer to GWBUF if the buffer contained at least one complete packet,
* otherwise NULL.
*
* @attention The returned GWBUF is not necessarily contiguous.
*/
GWBUF* modutil_get_next_MySQL_packet(GWBUF** p_readbuf)
{
GWBUF* packetbuf;
GWBUF* readbuf;
size_t buflen;
size_t packetlen;
size_t totalbuflen;
uint8_t* data;
size_t nbytes_copied = 0;
uint8_t* target;
GWBUF *packet = NULL;
GWBUF *readbuf = *p_readbuf;
readbuf = *p_readbuf;
if (readbuf)
{
CHK_GWBUF(readbuf);
if (readbuf == NULL)
{
packetbuf = NULL;
goto return_packetbuf;
}
CHK_GWBUF(readbuf);
size_t totalbuflen = gwbuf_length(readbuf);
if (totalbuflen >= MYSQL_HEADER_LEN)
{
size_t packetlen;
if (GWBUF_EMPTY(readbuf))
{
packetbuf = NULL;
goto return_packetbuf;
}
totalbuflen = gwbuf_length(readbuf);
if (totalbuflen < MYSQL_HEADER_LEN)
{
packetbuf = NULL;
goto return_packetbuf;
if (GWBUF_LENGTH(readbuf) >= 3) // The length is in the 3 first bytes.
{
uint8_t *data = (uint8_t *)GWBUF_DATA((readbuf));
packetlen = MYSQL_GET_PACKET_LEN(data) + 4;
}
else
{
// The header is split between two GWBUFs.
uint8_t data[3];
gwbuf_copy_data(readbuf, 0, 3, data);
packetlen = MYSQL_GET_PACKET_LEN(data) + 4;
}
if (packetlen <= totalbuflen)
{
packet = gwbuf_split(p_readbuf, packetlen);
}
}
}
if (GWBUF_LENGTH(readbuf) >= 3) // The length is in the 3 first bytes.
{
data = (uint8_t *)GWBUF_DATA((readbuf));
packetlen = MYSQL_GET_PACKET_LEN(data) + 4;
}
else
{
// The header is split between two GWBUFs.
uint8_t length[3];
gwbuf_copy_data(readbuf, 0, 3, length);
packetlen = MYSQL_GET_PACKET_LEN(length) + 4;
}
/** packet is incomplete */
if (packetlen > totalbuflen)
{
packetbuf = NULL;
goto return_packetbuf;
}
packetbuf = gwbuf_alloc(packetlen);
target = GWBUF_DATA(packetbuf);
packetbuf->gwbuf_type = readbuf->gwbuf_type; /*< Copy the type too */
/**
* Copy first MySQL packet to packetbuf and leave posible other
* packets to read buffer.
*/
while (nbytes_copied < packetlen && totalbuflen > 0)
{
uint8_t* src = GWBUF_DATA((*p_readbuf));
size_t bytestocopy;
buflen = GWBUF_LENGTH((*p_readbuf));
bytestocopy = MIN(buflen, packetlen - nbytes_copied);
memcpy(target + nbytes_copied, src, bytestocopy);
*p_readbuf = gwbuf_consume((*p_readbuf), bytestocopy);
totalbuflen = gwbuf_length((*p_readbuf));
nbytes_copied += bytestocopy;
}
ss_dassert(buflen == 0 || nbytes_copied == packetlen);
return_packetbuf:
return packetbuf;
return packet;
}
/**

View File

@ -543,6 +543,11 @@ bool check_monitor_permissions(MONITOR* monitor, const char* query)
return false;
}
if (config_get_global_options()->skip_permission_checks)
{
return true;
}
char *user = monitor->user;
char *dpasswd = decryptPassword(monitor->password);
GATEWAY_CONF* cnf = config_get_global_options();

View File

@ -25,6 +25,10 @@
// To ensure that ss_info_assert asserts also when builing in non-debug mode.
#if !defined(SS_DEBUG)
#define SS_DEBUG
int debug_check_fail = 1;
#else
// This is defined in the queuemanager code but only in debug builds
extern int debug_check_fail;
#endif
#if defined(NDEBUG)
#undef NDEBUG
@ -44,8 +48,6 @@
*
*/
extern int debug_check_fail;
#define TEST_QUEUE_SIZE 5
#define HEARTBEATS_TO_EXPIRE 3
#define NUMBER_OF_THREADS 4

View File

@ -161,6 +161,9 @@ typedef struct gwbuf
/*< Check that the data in a buffer has the SQL marker*/
#define GWBUF_IS_SQL(b) (0x03 == GWBUF_DATA_CHAR(b,4))
/*< Check whether the buffer is contiguous*/
#define GWBUF_IS_CONTIGUOUS(b) (((b) == NULL) || ((b)->next == NULL))
/*< True if all bytes in the buffer have been consumed */
#define GWBUF_EMPTY(b) ((char *)(b)->start >= (char *)(b)->end)

View File

@ -121,6 +121,7 @@ typedef struct
unsigned int auth_conn_timeout; /**< Connection timeout for the user authentication */
unsigned int auth_read_timeout; /**< Read timeout for the user authentication */
unsigned int auth_write_timeout; /**< Write timeout for the user authentication */
bool skip_permission_checks; /**< Skip service and monitor permission checks */
char qc_name[PATH_MAX]; /**< The name of the query classifier to load */
char* qc_args; /**< Arguments for the query classifier */
} GATEWAY_CONF;

View File

@ -2,3 +2,5 @@ add_library(cache SHARED cache.c storage.c)
target_link_libraries(cache maxscale-common)
set_target_properties(cache PROPERTIES VERSION "1.0.0")
install_module(cache experimental)
add_subdirectory(storage)

View File

@ -22,7 +22,17 @@
static char VERSION_STRING[] = "V1.0.0";
static const int DEFAULT_TTL = 10;
typedef enum cache_references
{
CACHE_REFERENCES_ANY,
CACHE_REFERENCES_QUALIFIED
} cache_references_t;
#define DEFAULT_ALLOWED_REFERENCES CACHE_REFERENCES_QUALIFIED
// Bytes
#define DEFAULT_MAX_RESULTSET_SIZE 64 * 1024
// Seconds
#define DEFAULT_TTL 10
static FILTER *createInstance(const char *name, char **options, FILTER_PARAMETER **);
static void *newSession(FILTER *instance, SESSION *session);
@ -88,16 +98,32 @@ FILTER_OBJECT *GetModuleObject()
// Implementation
//
typedef struct cache_config
{
cache_references_t allowed_references;
uint32_t max_resultset_size;
const char *storage;
const char *storage_args;
uint32_t ttl;
} CACHE_CONFIG;
typedef struct cache_instance
{
const char *name;
const char *storage_name;
const char *storage_args;
uint32_t ttl; // Time to live in seconds.
CACHE_STORAGE_MODULE *module;
CACHE_STORAGE *storage;
const char *name;
CACHE_CONFIG config;
CACHE_STORAGE_MODULE *module;
CACHE_STORAGE *storage;
} CACHE_INSTANCE;
static const CACHE_CONFIG DEFAULT_CONFIG =
{
DEFAULT_ALLOWED_REFERENCES,
DEFAULT_MAX_RESULTSET_SIZE,
NULL,
NULL,
DEFAULT_TTL
};
typedef struct cache_session_data
{
CACHE_STORAGE_API *api; /**< The storage API to be used. */
@ -131,9 +157,7 @@ static bool route_using_cache(CACHE_INSTANCE *instance,
*/
static FILTER *createInstance(const char *name, char **options, FILTER_PARAMETER **params)
{
const char *storage_name = NULL;
const char *storage_args = NULL;
uint32_t ttl = DEFAULT_TTL;
CACHE_CONFIG config = DEFAULT_CONFIG;
bool error = false;
@ -141,13 +165,44 @@ static FILTER *createInstance(const char *name, char **options, FILTER_PARAMETER
{
const FILTER_PARAMETER *param = params[i];
if (strcmp(param->name, "storage_name") == 0)
if (strcmp(param->name, "allowed_references") == 0)
{
storage_name = param->value;
if (strcmp(param->value, "qualified") == 0)
{
config.allowed_references = CACHE_REFERENCES_QUALIFIED;
}
else if (strcmp(param->value, "any") == 0)
{
config.allowed_references = CACHE_REFERENCES_ANY;
}
else
{
MXS_ERROR("Unknown value '%s' for parameter '%s'.", param->value, param->name);
error = true;
}
}
else if (strcmp(param->name, "max_resultset_size") == 0)
{
int v = atoi(param->value);
if (v > 0)
{
config.max_resultset_size = v;
}
else
{
MXS_ERROR("The value of the configuration entry '%s' must "
"be an integer larger than 0.", param->name);
error = true;
}
}
else if (strcmp(param->name, "storage_args") == 0)
{
storage_args = param->value;
config.storage_args = param->value;
}
else if (strcmp(param->name, "storage") == 0)
{
config.storage = param->value;
}
else if (strcmp(param->name, "ttl") == 0)
{
@ -155,12 +210,12 @@ static FILTER *createInstance(const char *name, char **options, FILTER_PARAMETER
if (v > 0)
{
ttl = v;
config.ttl = v;
}
else
{
MXS_ERROR("The value of the configuration entry 'ttl' must "
"be an integer larger than 0.");
MXS_ERROR("The value of the configuration entry '%s' must "
"be an integer larger than 0.", param->name);
error = true;
}
}
@ -177,22 +232,20 @@ static FILTER *createInstance(const char *name, char **options, FILTER_PARAMETER
{
if ((cinstance = MXS_CALLOC(1, sizeof(CACHE_INSTANCE))) != NULL)
{
CACHE_STORAGE_MODULE *module = cache_storage_open(storage_name);
CACHE_STORAGE_MODULE *module = cache_storage_open(config.storage);
if (module)
{
CACHE_STORAGE *storage = module->api->createInstance(name, ttl, 0, NULL);
CACHE_STORAGE *storage = module->api->createInstance(name, config.ttl, 0, NULL);
if (storage)
{
cinstance->name = name;
cinstance->storage_name = storage_name;
cinstance->storage_args = storage_args;
cinstance->ttl = ttl;
cinstance->config = config;
cinstance->module = module;
cinstance->storage = storage;
MXS_NOTICE("Cache storage %s opened and initialized.", storage_name);
MXS_NOTICE("Cache storage %s opened and initialized.", config.storage);
}
else
{

View File

@ -0,0 +1 @@
add_subdirectory(storage_rocksdb)

View File

@ -0,0 +1,49 @@
# Build RocksDB
if ((CMAKE_CXX_COMPILER_ID STREQUAL "GNU") AND (NOT (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.7)))
message(STATUS "GCC >= 4.7, RocksDB is built.")
set(ROCKSDB_REPO "https://github.com/facebook/rocksdb.git"
CACHE STRING "RocksDB Git repository")
# Release 4.9 of RocksDB
set(ROCKSDB_TAG "v4.9"
CACHE STRING "RocksDB Git tag")
set(ROCKSDB_SUBPATH "/server/modules/filter/cache/storage/storage_rocksdb/RocksDB-prefix/src/RocksDB")
set(ROCKSDB_ROOT ${CMAKE_BINARY_DIR}${ROCKSDB_SUBPATH})
ExternalProject_Add(RocksDB
GIT_REPOSITORY ${ROCKSDB_REPO}
GIT_TAG ${ROCKSDB_TAG}
CONFIGURE_COMMAND ""
BINARY_DIR ${ROCKSDB_ROOT}
CONFIGURE_COMMAND ""
BUILD_COMMAND make DISABLE_JEMALLOC=1 EXTRA_CXXFLAGS=-fPIC static_lib
INSTALL_COMMAND "")
set(ROCKSDB_BUILT TRUE CACHE INTERNAL "")
set(ROCKSDB_INCLUDE_DIRS ${ROCKSDB_ROOT}/include ${ROCKSDB_ROOT})
set(ROCKSDB_LIB_DIR ${ROCKSDB_ROOT})
set(ROCKSDB_LIB librocksdb.a)
# RocksDB supports several compression libraries and automatically
# uses them if it finds the headers in the environment. Consequently,
# we must ensure that a user of RocksDB can link to the needed
# libraries.
#
# ROCKSDB_LINK_LIBS specifies that libraries a module using ROCKSDB_LIB
# must link with.
find_package(BZip2)
if (BZIP2_FOUND)
set(ROCKSDB_LINK_LIBS ${ROCKSDB_LINK_LIBS} ${BZIP2_LIBRARIES})
endif()
find_package(ZLIB)
if (ZLIB_FOUND)
set(ROCKSDB_LINK_LIBS ${ROCKSDB_LINK_LIBS} ${ZLIB_LIBRARIES})
endif()
else()
message(STATUS "RocksDB requires GCC >= 4.7, only ${CMAKE_CXX_COMPILER_VERSION} available.")
endif()

View File

@ -0,0 +1,26 @@
include(BuildRocksDB.cmake)
if (ROCKSDB_BUILT)
message(STATUS "RocksDB is built, storage_rocksdb will be built.")
set(CMAKE_CXX_FLAGS "-std=c++11 ${CMAKE_CXX_FLAGS} -DROCKSDB_PLATFORM_POSIX")
set(CMAKE_CXX_FLAGS_DEBUG "-std=c++11 ${CMAKE_CXX_FLAGS_DEBUG} -DROCKSDB_PLATFORM_POSIX")
set(CMAKE_CXX_FLAGS_RELEASE "-std=c++11 ${CMAKE_CXX_FLAGS_RELEASE} -DROCKSDB_PLATFORM_POSIX")
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-std=c++11 ${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -DROCKSDB_PLATFORM_POSIX")
include_directories(${ROCKSDB_INCLUDE_DIRS})
link_directories(${ROCKSDB_LIB_DIR})
add_library(storage_rocksdb SHARED
rocksdbinternals.cc
rocksdbstorage.cc
storage_rocksdb.cc
)
add_dependencies(storage_rocksdb RocksDB)
target_link_libraries(storage_rocksdb maxscale-common ${ROCKSDB_LIB} ${ROCKSDB_LINK_LIBS})
set_target_properties(storage_rocksdb PROPERTIES VERSION "1.0.0")
set_target_properties(storage_rocksdb PROPERTIES LINK_FLAGS -Wl,-z,defs)
install_module(storage_rocksdb experimental)
else()
message("RocksDB not built, storage_rocksdb cannot be built.")
endif()

View File

@ -0,0 +1,46 @@
/*
* Copyright (c) 2016 MariaDB Corporation Ab
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file and at www.mariadb.com/bsl.
*
* Change Date: 2019-07-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2 or later of the General
* Public License.
*/
#include "rocksdbinternals.h"
#include <rocksdb/env.h>
#include <util/coding.h>
/**
* Check whether a value is stale or not.
*
* @param value A value with the timestamp at the end.
* @param ttl The time-to-live in seconds.
* @param pEnv The used RocksDB environment instance.
*
* @return True of the value is stale.
*
* Basically a copy from RocksDB/utilities/ttl/db_ttl_impl.cc:160
* but note that the here we claim the data is stale if we fail to
* get the time while the original code claims it is fresh.
*/
bool RocksDBInternals::IsStale(const rocksdb::Slice& value, int32_t ttl, rocksdb::Env* pEnv)
{
if (ttl <= 0)
{ // Data is fresh if TTL is non-positive
return false;
}
int64_t curtime;
if (!pEnv->GetCurrentTime(&curtime).ok())
{
return true; // Treat the data as stale if could not get current time
}
int32_t timestamp = rocksdb::DecodeFixed32(value.data() + value.size() - TS_LENGTH);
return (timestamp + ttl) < curtime;
}

View File

@ -0,0 +1,41 @@
#pragma once
#ifndef _ROCKSDBINTERNALS_H
#define _ROCKSDBINTERNALS_H
/*
* Copyright (c) 2016 MariaDB Corporation Ab
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file and at www.mariadb.com/bsl.
*
* Change Date: 2019-07-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2 or later of the General
* Public License.
*/
#include "storage_rocksdb.h"
#include <rocksdb/env.h>
#include <rocksdb/version.h>
#include <rocksdb/slice.h>
#if (ROCKSDB_MAJOR != 4) || (ROCKSDB_MINOR != 9)
#error RocksDBStorage was created with knowledge of RocksDB 4.9 internals.\
The version used is something else. Ensure the knowledge is still applicable.
#endif
namespace RocksDBInternals
{
/**
* The length of the timestamp when stashed after the actual value.
*
* See RocksDB/utilities/ttl/db_ttl_impl.h
*/
static const uint32_t TS_LENGTH = sizeof(int32_t);
bool IsStale(const rocksdb::Slice& slice, int32_t ttl, rocksdb::Env* pEnv);
}
#endif

View File

@ -0,0 +1,201 @@
/*
* Copyright (c) 2016 MariaDB Corporation Ab
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file and at www.mariadb.com/bsl.
*
* Change Date: 2019-07-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2 or later of the General
* Public License.
*/
#include "rocksdbstorage.h"
#include <openssl/sha.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <rocksdb/env.h>
#include <gwdirs.h>
#include "rocksdbinternals.h"
using std::string;
using std::unique_ptr;
namespace
{
string u_storageDirectory;
const size_t ROCKSDB_KEY_LENGTH = SHA512_DIGEST_LENGTH;
// See https://github.com/facebook/rocksdb/wiki/Basic-Operations#thread-pools
// These figures should perhaps depend upon the number of cache instances.
const size_t ROCKSDB_N_LOW_THREADS = 2;
const size_t ROCKSDB_N_HIGH_THREADS = 1;
}
//private
RocksDBStorage::RocksDBStorage(unique_ptr<rocksdb::DBWithTTL>& sDb,
const string& name,
const string& path,
uint32_t ttl)
: m_sDb(std::move(sDb))
, m_name(name)
, m_path(path)
, m_ttl(ttl)
{
}
RocksDBStorage::~RocksDBStorage()
{
}
//static
bool RocksDBStorage::Initialize()
{
bool initialized = true;
u_storageDirectory = get_cachedir();
u_storageDirectory += "/storage_rocksdb";
if (mkdir(u_storageDirectory.c_str(), S_IRWXU) == 0)
{
MXS_NOTICE("Created storage directory %s.", u_storageDirectory.c_str());
}
else if (errno != EEXIST)
{
initialized = false;
char errbuf[STRERROR_BUFLEN];
MXS_ERROR("Failed to create storage directory %s: %s",
u_storageDirectory.c_str(),
strerror_r(errno, errbuf, sizeof(errbuf)));
}
else
{
auto pEnv = rocksdb::Env::Default();
pEnv->SetBackgroundThreads(ROCKSDB_N_LOW_THREADS, rocksdb::Env::LOW);
pEnv->SetBackgroundThreads(ROCKSDB_N_HIGH_THREADS, rocksdb::Env::HIGH);
}
return initialized;
}
//static
RocksDBStorage* RocksDBStorage::Create(const char* zName, uint32_t ttl, int argc, char* argv[])
{
ss_dassert(zName);
string path(u_storageDirectory);
path += "/";
path += zName;
rocksdb::Options options;
options.env = rocksdb::Env::Default();
options.max_background_compactions = ROCKSDB_N_LOW_THREADS;
options.max_background_flushes = ROCKSDB_N_HIGH_THREADS;
options.create_if_missing = true;
rocksdb::DBWithTTL* pDb;
rocksdb::Status status = rocksdb::DBWithTTL::Open(options, path, &pDb, ttl);
RocksDBStorage* pStorage = nullptr;
if (status.ok())
{
unique_ptr<rocksdb::DBWithTTL> sDb(pDb);
pStorage = new RocksDBStorage(sDb, zName, path, ttl);
}
else
{
MXS_ERROR("Could not open RocksDB database %s using path %s: %s",
zName, path.c_str(), status.ToString().c_str());
}
return pStorage;
}
cache_result_t RocksDBStorage::getKey(const GWBUF* pQuery, char* pKey)
{
// ss_dassert(gwbuf_is_contiguous(pQuery));
const uint8_t* pData = static_cast<const uint8_t*>(GWBUF_DATA(pQuery));
size_t len = MYSQL_GET_PACKET_LEN(pData) - 1; // Subtract 1 for packet type byte.
const uint8_t* pSql = &pData[5]; // Symbolic constant for 5?
memset(pKey, 0, CACHE_KEY_MAXLEN);
SHA512(pSql, len, reinterpret_cast<unsigned char*>(pKey));
return CACHE_RESULT_OK;
}
cache_result_t RocksDBStorage::getValue(const char* pKey, GWBUF** ppResult)
{
// Use the root DB so that we get the value *with* the timestamp at the end.
rocksdb::DB* pDb = m_sDb->GetRootDB();
rocksdb::Slice key(pKey, ROCKSDB_KEY_LENGTH);
string value;
rocksdb::Status status = pDb->Get(rocksdb::ReadOptions(), key, &value);
cache_result_t result = CACHE_RESULT_ERROR;
switch (status.code())
{
case rocksdb::Status::kOk:
if (value.length() >= RocksDBInternals::TS_LENGTH)
{
if (!RocksDBInternals::IsStale(value, m_ttl, rocksdb::Env::Default()))
{
size_t length = value.length() - RocksDBInternals::TS_LENGTH;
*ppResult = gwbuf_alloc(length);
if (*ppResult)
{
memcpy(GWBUF_DATA(*ppResult), value.data(), length);
result = CACHE_RESULT_OK;
}
}
else
{
MXS_NOTICE("Cache item is stale, not using.");
}
}
else
{
MXS_ERROR("RocksDB value too short. Database corrupted?");
result = CACHE_RESULT_ERROR;
}
break;
case rocksdb::Status::kNotFound:
result = CACHE_RESULT_NOT_FOUND;
break;
default:
MXS_ERROR("Failed to look up value: %s", status.ToString().c_str());
}
return result;
}
cache_result_t RocksDBStorage::putValue(const char* pKey, const GWBUF* pValue)
{
// ss_dassert(gwbuf_is_contiguous(pValue));
rocksdb::Slice key(pKey, ROCKSDB_KEY_LENGTH);
rocksdb::Slice value(static_cast<const char*>(GWBUF_DATA(pValue)), GWBUF_LENGTH(pValue));
rocksdb::Status status = m_sDb->Put(rocksdb::WriteOptions(), key, value);
return status.ok() ? CACHE_RESULT_OK : CACHE_RESULT_ERROR;
}

View File

@ -0,0 +1,50 @@
#ifndef _ROCKSDBSTORAGE_H
#define _ROCKSDBSTORAGE_H
/*
* Copyright (c) 2016 MariaDB Corporation Ab
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file and at www.mariadb.com/bsl.
*
* Change Date: 2019-07-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2 or later of the General
* Public License.
*/
#include "storage_rocksdb.h"
#include <memory>
#include <string>
#include <rocksdb/utilities/db_ttl.h>
#include "../../cache_storage_api.h"
class RocksDBStorage
{
public:
static bool Initialize();
static RocksDBStorage* Create(const char* zName, uint32_t ttl, int argc, char* argv[]);
~RocksDBStorage();
cache_result_t getKey(const GWBUF* pQuery, char* pKey);
cache_result_t getValue(const char* pKey, GWBUF** ppResult);
cache_result_t putValue(const char* pKey, const GWBUF* pValue);
private:
RocksDBStorage(std::unique_ptr<rocksdb::DBWithTTL>& sDb,
const std::string& name,
const std::string& path,
uint32_t ttl);
RocksDBStorage(const RocksDBStorage&) = delete;
RocksDBStorage& operator = (const RocksDBStorage&) = delete;
private:
std::unique_ptr<rocksdb::DBWithTTL> m_sDb;
std::string m_name;
std::string m_path;
uint32_t m_ttl;
};
#endif

View File

@ -0,0 +1,151 @@
/*
* Copyright (c) 2016 MariaDB Corporation Ab
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file and at www.mariadb.com/bsl.
*
* Change Date: 2019-07-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2 or later of the General
* Public License.
*/
#include "storage_rocksdb.h"
#include "../../cache_storage_api.h"
#include "rocksdbstorage.h"
namespace
{
bool initialize()
{
return RocksDBStorage::Initialize();
}
CACHE_STORAGE* createInstance(const char* zName, uint32_t ttl, int argc, char* argv[])
{
CACHE_STORAGE* pStorage = 0;
try
{
pStorage = reinterpret_cast<CACHE_STORAGE*>(RocksDBStorage::Create(zName, ttl, argc, argv));
}
catch (const std::bad_alloc&)
{
MXS_OOM();
}
catch (const std::exception& x)
{
MXS_ERROR("Standard exception caught: %s", x.what());
}
catch (...)
{
MXS_ERROR("Unknown exception caught.");
}
return pStorage;
}
void freeInstance(CACHE_STORAGE* pInstance)
{
delete reinterpret_cast<RocksDBStorage*>(pInstance);
}
cache_result_t getKey(CACHE_STORAGE* pStorage,
const GWBUF* pQuery,
char* pKey)
{
cache_result_t result = CACHE_RESULT_ERROR;
try
{
result = reinterpret_cast<RocksDBStorage*>(pStorage)->getKey(pQuery, pKey);
}
catch (const std::bad_alloc&)
{
MXS_OOM();
}
catch (const std::exception& x)
{
MXS_ERROR("Standard exception caught: %s", x.what());
}
catch (...)
{
MXS_ERROR("Unknown exception caught.");
}
return result;
}
cache_result_t getValue(CACHE_STORAGE* pStorage, const char* pKey, GWBUF** ppResult)
{
cache_result_t result = CACHE_RESULT_ERROR;
try
{
result = reinterpret_cast<RocksDBStorage*>(pStorage)->getValue(pKey, ppResult);
}
catch (const std::bad_alloc&)
{
MXS_OOM();
}
catch (const std::exception& x)
{
MXS_ERROR("Standard exception caught: %s", x.what());
}
catch (...)
{
MXS_ERROR("Unknown exception caught.");
}
return result;
}
cache_result_t putValue(CACHE_STORAGE* pStorage,
const char* pKey,
const GWBUF* pValue)
{
cache_result_t result = CACHE_RESULT_ERROR;
try
{
result = reinterpret_cast<RocksDBStorage*>(pStorage)->putValue(pKey, pValue);
}
catch (const std::bad_alloc&)
{
MXS_OOM();
}
catch (const std::exception& x)
{
MXS_ERROR("Standard exception caught: %s", x.what());
}
catch (...)
{
MXS_ERROR("Unknown exception caught.");
}
return result;
}
}
extern "C"
{
CACHE_STORAGE_API* CacheGetStorageAPI()
{
static CACHE_STORAGE_API api =
{
initialize,
createInstance,
freeInstance,
getKey,
getValue,
putValue
};
return &api;
}
}

View File

@ -0,0 +1,19 @@
#ifndef _STORAGE_ROCKSDB_H
#define _STORAGE_ROCKSDB_H
/*
* Copyright (c) 2016 MariaDB Corporation Ab
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file and at www.mariadb.com/bsl.
*
* Change Date: 2019-07-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2 or later of the General
* Public License.
*/
#define MXS_MODULE_NAME "storage_rocksdb"
#include <log_manager.h>
#endif

View File

@ -719,6 +719,7 @@ monitorDatabase(MONITOR *mon, MONITOR_SERVERS *database)
/* Also clear M/S state in both server and monitor server pending struct */
server_clear_status(database->server, SERVER_SLAVE);
server_clear_status(database->server, SERVER_MASTER);
server_clear_status(database->server, SERVER_RELAY_MASTER);
monitor_clear_pending_status(database, SERVER_SLAVE);
monitor_clear_pending_status(database, SERVER_MASTER);
monitor_clear_pending_status(database, SERVER_RELAY_MASTER);
@ -1207,7 +1208,8 @@ monitorMain(void *arg)
MYSQL_SERVER_INFO *serv_info = hashtable_fetch(handle->server_info, ptr->server->unique_name);
ss_dassert(serv_info);
if (getSlaveOfNodeId(mon->databases, ptr->server->node_id) &&
if (ptr->server->node_id > 0 && ptr->server->master_id > 0 &&
getSlaveOfNodeId(mon->databases, ptr->server->node_id) &&
getServerByNodeId(mon->databases, ptr->server->master_id) &&
(!handle->multimaster || serv_info->group == 0))
{