MXS-2067: Replace most SPINLOCKs
Replaced SPINLOCK with std::mutex where possible, leaving out the more complex cases. The big offenders remaining are the binlogrouter and the gateway.cc OpenSSL locks.
This commit is contained in:
parent
50451166bb
commit
ab4f870927
@ -18,10 +18,22 @@
|
||||
|
||||
MXS_BEGIN_DECLS
|
||||
|
||||
#define SPINLOCK pthread_mutex_t
|
||||
typedef pthread_mutex_t SPINLOCK;
|
||||
#define SPINLOCK_INIT PTHREAD_MUTEX_INITIALIZER
|
||||
#define spinlock_init(a) pthread_mutex_init(a, NULL)
|
||||
#define spinlock_acquire(a) pthread_mutex_lock((pthread_mutex_t*)a)
|
||||
#define spinlock_release(a) pthread_mutex_unlock((pthread_mutex_t*)a)
|
||||
|
||||
static inline void spinlock_init(SPINLOCK* a)
|
||||
{
|
||||
pthread_mutex_init(a, NULL);
|
||||
}
|
||||
|
||||
static inline void spinlock_acquire(const SPINLOCK* a)
|
||||
{
|
||||
pthread_mutex_lock((SPINLOCK*)a);
|
||||
}
|
||||
|
||||
static inline void spinlock_release(const SPINLOCK* a)
|
||||
{
|
||||
pthread_mutex_unlock((SPINLOCK*)a);
|
||||
}
|
||||
|
||||
MXS_END_DECLS
|
||||
|
@ -409,11 +409,10 @@ void mxs_crypt(const char* password, const char* salt, char* output)
|
||||
char* pw = crypt_r(password, salt, &cdata);
|
||||
snprintf(output, MXS_CRYPT_SIZE, "%s", pw);
|
||||
#else
|
||||
static SPINLOCK mxs_crypt_lock = SPINLOCK_INIT;
|
||||
spinlock_acquire(&mxs_crypt_lock);
|
||||
static std::mutex mxs_crypt_lock;
|
||||
std::lock_guard<std::mutex> guard(mxs_crypt_lock);
|
||||
char* pw = crypt(password, salt);
|
||||
snprintf(output, MXS_CRYPT_SIZE, "%s", pw);
|
||||
spinlock_release(&mxs_crypt_lock);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -960,17 +960,16 @@ static bool config_load_dir(const char* dir, DUPLICATE_CONTEXT* dcontext, CONFIG
|
||||
// Since there is no way to pass userdata to the callback, we need to store
|
||||
// the current context into a static variable. Consequently, we need lock.
|
||||
// Should not matter since config_load() is called once at startup.
|
||||
static SPINLOCK lock = SPINLOCK_INIT;
|
||||
static std::mutex lock;
|
||||
std::lock_guard<std::mutex> guard(lock);
|
||||
|
||||
int nopenfd = 5; // Maximum concurrently opened directory descriptors
|
||||
|
||||
spinlock_acquire(&lock);
|
||||
current_dcontext = dcontext;
|
||||
current_ccontext = ccontext;
|
||||
int rv = nftw(dir, config_cb, nopenfd, FTW_PHYS);
|
||||
current_ccontext = NULL;
|
||||
current_dcontext = NULL;
|
||||
spinlock_release(&lock);
|
||||
|
||||
return rv == 0;
|
||||
}
|
||||
|
@ -49,7 +49,7 @@ typedef struct modulecmd_domain
|
||||
|
||||
/** The global list of registered domains */
|
||||
static MODULECMD_DOMAIN* modulecmd_domains = NULL;
|
||||
static SPINLOCK modulecmd_lock = SPINLOCK_INIT;
|
||||
static std::mutex modulecmd_lock;
|
||||
|
||||
static inline void prepare_error()
|
||||
{
|
||||
@ -434,7 +434,7 @@ bool modulecmd_register_command(const char* domain,
|
||||
{
|
||||
reset_error();
|
||||
bool rval = false;
|
||||
spinlock_acquire(&modulecmd_lock);
|
||||
std::lock_guard<std::mutex> guard(modulecmd_lock);
|
||||
|
||||
MODULECMD_DOMAIN* dm = get_or_create_domain(domain);
|
||||
|
||||
@ -464,8 +464,6 @@ bool modulecmd_register_command(const char* domain,
|
||||
}
|
||||
}
|
||||
|
||||
spinlock_release(&modulecmd_lock);
|
||||
|
||||
return rval;
|
||||
}
|
||||
|
||||
@ -476,7 +474,7 @@ const MODULECMD* modulecmd_find_command(const char* domain, const char* identifi
|
||||
const char* effective_domain = mxs_module_get_effective_name(domain);
|
||||
|
||||
MODULECMD* rval = NULL;
|
||||
spinlock_acquire(&modulecmd_lock);
|
||||
std::lock_guard<std::mutex> guard(modulecmd_lock);
|
||||
|
||||
for (MODULECMD_DOMAIN* dm = modulecmd_domains; dm; dm = dm->next)
|
||||
{
|
||||
@ -494,8 +492,6 @@ const MODULECMD* modulecmd_find_command(const char* domain, const char* identifi
|
||||
}
|
||||
}
|
||||
|
||||
spinlock_release(&modulecmd_lock);
|
||||
|
||||
if (rval == NULL)
|
||||
{
|
||||
modulecmd_set_error("Command not found: %s::%s", domain, identifier);
|
||||
@ -641,7 +637,7 @@ bool modulecmd_foreach(const char* domain_re,
|
||||
{
|
||||
bool rval = true;
|
||||
bool stop = false;
|
||||
spinlock_acquire(&modulecmd_lock);
|
||||
std::lock_guard<std::mutex> guard(modulecmd_lock);
|
||||
|
||||
for (MODULECMD_DOMAIN* domain = modulecmd_domains; domain && rval && !stop; domain = domain->next)
|
||||
{
|
||||
@ -686,7 +682,6 @@ bool modulecmd_foreach(const char* domain_re,
|
||||
}
|
||||
}
|
||||
|
||||
spinlock_release(&modulecmd_lock);
|
||||
return rval;
|
||||
}
|
||||
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include <strings.h>
|
||||
|
||||
#include <iterator>
|
||||
#include <mutex>
|
||||
|
||||
#include <maxscale/alloc.h>
|
||||
#include <maxscale/buffer.h>
|
||||
@ -30,7 +31,6 @@
|
||||
#include <maxscale/mysql_utils.h>
|
||||
|
||||
/** These are used when converting MySQL wildcards to regular expressions */
|
||||
static SPINLOCK re_lock = SPINLOCK_INIT;
|
||||
static bool pattern_init = false;
|
||||
static pcre2_code* re_percent = NULL;
|
||||
static pcre2_code* re_single = NULL;
|
||||
@ -1113,7 +1113,9 @@ int modutil_count_packets(GWBUF* buffer)
|
||||
*/
|
||||
void prepare_pcre2_patterns()
|
||||
{
|
||||
spinlock_acquire(&re_lock);
|
||||
static std::mutex re_lock;
|
||||
std::lock_guard<std::mutex> guard(re_lock);
|
||||
|
||||
if (!pattern_init)
|
||||
{
|
||||
int err;
|
||||
@ -1158,7 +1160,6 @@ void prepare_pcre2_patterns()
|
||||
re_escape = NULL;
|
||||
}
|
||||
}
|
||||
spinlock_release(&re_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include <zlib.h>
|
||||
#include <sys/stat.h>
|
||||
#include <vector>
|
||||
#include <mutex>
|
||||
|
||||
#include <maxscale/alloc.h>
|
||||
#include <maxbase/atomic.hh>
|
||||
@ -80,7 +81,7 @@ const char CN_SCRIPT[] = "script";
|
||||
const char CN_SCRIPT_TIMEOUT[] = "script_timeout";
|
||||
|
||||
static MXS_MONITOR* allMonitors = NULL;
|
||||
static SPINLOCK monLock = SPINLOCK_INIT;
|
||||
static std::mutex monLock;
|
||||
|
||||
static void monitor_server_free_all(MXS_MONITORED_SERVER* servers);
|
||||
static void remove_server_journal(MXS_MONITOR* monitor);
|
||||
@ -173,10 +174,9 @@ MXS_MONITOR* monitor_create(const char* name, const char* module, MXS_CONFIG_PAR
|
||||
return NULL;
|
||||
}
|
||||
|
||||
spinlock_acquire(&monLock);
|
||||
std::lock_guard<std::mutex> guard(monLock);
|
||||
mon->next = allMonitors;
|
||||
allMonitors = mon;
|
||||
spinlock_release(&monLock);
|
||||
|
||||
return mon;
|
||||
}
|
||||
@ -191,7 +191,8 @@ void monitor_destroy(MXS_MONITOR* mon)
|
||||
{
|
||||
MXS_MONITOR* ptr;
|
||||
|
||||
spinlock_acquire(&monLock);
|
||||
std::unique_lock<std::mutex> guard(monLock);
|
||||
|
||||
if (allMonitors == mon)
|
||||
{
|
||||
allMonitors = mon->next;
|
||||
@ -208,7 +209,9 @@ void monitor_destroy(MXS_MONITOR* mon)
|
||||
ptr->next = mon->next;
|
||||
}
|
||||
}
|
||||
spinlock_release(&monLock);
|
||||
|
||||
guard.unlock();
|
||||
|
||||
mon->api->destroyInstance(mon->instance);
|
||||
delete mon->disk_space_threshold;
|
||||
config_parameter_free(mon->parameters);
|
||||
@ -271,8 +274,8 @@ void monitor_start(MXS_MONITOR* monitor, const MXS_CONFIG_PARAMETER* params)
|
||||
void monitor_start_all()
|
||||
{
|
||||
MXS_MONITOR* ptr;
|
||||
std::lock_guard<std::mutex> guard(monLock);
|
||||
|
||||
spinlock_acquire(&monLock);
|
||||
ptr = allMonitors;
|
||||
while (ptr)
|
||||
{
|
||||
@ -282,7 +285,6 @@ void monitor_start_all()
|
||||
}
|
||||
ptr = ptr->next;
|
||||
}
|
||||
spinlock_release(&monLock);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -319,9 +321,8 @@ void monitor_stop(MXS_MONITOR* monitor)
|
||||
|
||||
void monitor_deactivate(MXS_MONITOR* monitor)
|
||||
{
|
||||
spinlock_acquire(&monLock);
|
||||
std::lock_guard<std::mutex> guard(monLock);
|
||||
monitor->active = false;
|
||||
spinlock_release(&monLock);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -329,19 +330,15 @@ void monitor_deactivate(MXS_MONITOR* monitor)
|
||||
*/
|
||||
void monitor_stop_all()
|
||||
{
|
||||
spinlock_acquire(&monLock);
|
||||
std::lock_guard<std::mutex> guard(monLock);
|
||||
|
||||
MXS_MONITOR* monitor = allMonitors;
|
||||
while (monitor)
|
||||
for (MXS_MONITOR* monitor = allMonitors; monitor; monitor = monitor->next)
|
||||
{
|
||||
if (monitor->active)
|
||||
{
|
||||
monitor_stop(monitor);
|
||||
}
|
||||
monitor = monitor->next;
|
||||
}
|
||||
|
||||
spinlock_release(&monLock);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -515,19 +512,15 @@ void monitor_add_user(MXS_MONITOR* mon, const char* user, const char* passwd)
|
||||
*/
|
||||
void monitor_show_all(DCB* dcb)
|
||||
{
|
||||
MXS_MONITOR* ptr;
|
||||
std::lock_guard<std::mutex> guard(monLock);
|
||||
|
||||
spinlock_acquire(&monLock);
|
||||
ptr = allMonitors;
|
||||
while (ptr)
|
||||
for (MXS_MONITOR* ptr = allMonitors; ptr; ptr = ptr->next)
|
||||
{
|
||||
if (ptr->active)
|
||||
{
|
||||
monitor_show(dcb, ptr);
|
||||
}
|
||||
ptr = ptr->next;
|
||||
}
|
||||
spinlock_release(&monLock);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -585,7 +578,7 @@ void monitor_list(DCB* dcb)
|
||||
{
|
||||
MXS_MONITOR* ptr;
|
||||
|
||||
spinlock_acquire(&monLock);
|
||||
std::lock_guard<std::mutex> guard(monLock);
|
||||
ptr = allMonitors;
|
||||
dcb_printf(dcb, "---------------------+---------------------\n");
|
||||
dcb_printf(dcb, "%-20s | Status\n", "Monitor");
|
||||
@ -603,7 +596,6 @@ void monitor_list(DCB* dcb)
|
||||
ptr = ptr->next;
|
||||
}
|
||||
dcb_printf(dcb, "---------------------+---------------------\n");
|
||||
spinlock_release(&monLock);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -614,20 +606,17 @@ void monitor_list(DCB* dcb)
|
||||
*/
|
||||
MXS_MONITOR* monitor_find(const char* name)
|
||||
{
|
||||
MXS_MONITOR* ptr;
|
||||
std::lock_guard<std::mutex> guard(monLock);
|
||||
|
||||
spinlock_acquire(&monLock);
|
||||
ptr = allMonitors;
|
||||
while (ptr)
|
||||
for (MXS_MONITOR* ptr = allMonitors; ptr; ptr = ptr->next)
|
||||
{
|
||||
if (!strcmp(ptr->name, name) && ptr->active)
|
||||
{
|
||||
break;
|
||||
return ptr;
|
||||
}
|
||||
ptr = ptr->next;
|
||||
}
|
||||
spinlock_release(&monLock);
|
||||
return ptr;
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
/**
|
||||
* Find a destroyed monitor by name
|
||||
@ -638,8 +627,7 @@ MXS_MONITOR* monitor_find(const char* name)
|
||||
MXS_MONITOR* monitor_repurpose_destroyed(const char* name, const char* module)
|
||||
{
|
||||
MXS_MONITOR* rval = NULL;
|
||||
|
||||
spinlock_acquire(&monLock);
|
||||
std::lock_guard<std::mutex> guard(monLock);
|
||||
|
||||
for (MXS_MONITOR* ptr = allMonitors; ptr; ptr = ptr->next)
|
||||
{
|
||||
@ -651,8 +639,6 @@ MXS_MONITOR* monitor_repurpose_destroyed(const char* name, const char* module)
|
||||
}
|
||||
}
|
||||
|
||||
spinlock_release(&monLock);
|
||||
|
||||
return rval;
|
||||
}
|
||||
|
||||
@ -737,7 +723,7 @@ bool monitor_set_network_timeout(MXS_MONITOR* mon, int type, int value, const ch
|
||||
std::unique_ptr<ResultSet> monitor_get_list()
|
||||
{
|
||||
std::unique_ptr<ResultSet> set = ResultSet::create({"Monitor", "Status"});
|
||||
spinlock_acquire(&monLock);
|
||||
std::lock_guard<std::mutex> guard(monLock);
|
||||
|
||||
for (MXS_MONITOR* ptr = allMonitors; ptr; ptr = ptr->next)
|
||||
{
|
||||
@ -745,7 +731,6 @@ std::unique_ptr<ResultSet> monitor_get_list()
|
||||
set->add_row({ptr->name, state});
|
||||
}
|
||||
|
||||
spinlock_release(&monLock);
|
||||
return set;
|
||||
}
|
||||
|
||||
@ -1557,8 +1542,7 @@ static void mon_log_state_change(MXS_MONITORED_SERVER* ptr)
|
||||
MXS_MONITOR* monitor_server_in_use(const SERVER* server)
|
||||
{
|
||||
MXS_MONITOR* rval = NULL;
|
||||
|
||||
spinlock_acquire(&monLock);
|
||||
std::lock_guard<std::mutex> guard(monLock);
|
||||
|
||||
for (MXS_MONITOR* mon = allMonitors; mon && !rval; mon = mon->next)
|
||||
{
|
||||
@ -1578,8 +1562,6 @@ MXS_MONITOR* monitor_server_in_use(const SERVER* server)
|
||||
spinlock_release(&mon->lock);
|
||||
}
|
||||
|
||||
spinlock_release(&monLock);
|
||||
|
||||
return rval;
|
||||
}
|
||||
|
||||
@ -1865,7 +1847,7 @@ json_t* monitor_list_to_json(const char* host)
|
||||
{
|
||||
json_t* rval = json_array();
|
||||
|
||||
spinlock_acquire(&monLock);
|
||||
std::unique_lock<std::mutex> guard(monLock);
|
||||
|
||||
for (MXS_MONITOR* mon = allMonitors; mon; mon = mon->next)
|
||||
{
|
||||
@ -1880,7 +1862,7 @@ json_t* monitor_list_to_json(const char* host)
|
||||
}
|
||||
}
|
||||
|
||||
spinlock_release(&monLock);
|
||||
guard.unlock();
|
||||
|
||||
return mxs_json_resource(host, MXS_JSON_API_MONITORS, rval);
|
||||
}
|
||||
@ -1888,7 +1870,7 @@ json_t* monitor_list_to_json(const char* host)
|
||||
json_t* monitor_relations_to_server(const SERVER* server, const char* host)
|
||||
{
|
||||
std::vector<std::string> names;
|
||||
spinlock_acquire(&monLock);
|
||||
std::unique_lock<std::mutex> guard(monLock);
|
||||
|
||||
for (MXS_MONITOR* mon = allMonitors; mon; mon = mon->next)
|
||||
{
|
||||
@ -1909,7 +1891,7 @@ json_t* monitor_relations_to_server(const SERVER* server, const char* host)
|
||||
spinlock_release(&mon->lock);
|
||||
}
|
||||
|
||||
spinlock_release(&monLock);
|
||||
guard.unlock();
|
||||
|
||||
json_t* rel = NULL;
|
||||
|
||||
|
@ -983,8 +983,6 @@ void utils_end()
|
||||
replace_values_re = NULL;
|
||||
}
|
||||
|
||||
SPINLOCK tmplock = SPINLOCK_INIT;
|
||||
|
||||
static bool configure_network_socket(int so)
|
||||
{
|
||||
int sndbufsize = MXS_BACKEND_SO_SNDBUF;
|
||||
|
@ -203,7 +203,7 @@ typedef struct
|
||||
lua_State* global_lua_state;
|
||||
char* global_script;
|
||||
char* session_script;
|
||||
SPINLOCK lock;
|
||||
std::mutex lock;
|
||||
} LUA_INSTANCE;
|
||||
|
||||
/**
|
||||
@ -249,17 +249,16 @@ void expose_functions(lua_State* state, GWBUF** active_buffer)
|
||||
*/
|
||||
static MXS_FILTER* createInstance(const char* name, MXS_CONFIG_PARAMETER* params)
|
||||
{
|
||||
LUA_INSTANCE* my_instance;
|
||||
LUA_INSTANCE* my_instance = new (std::nothrow) LUA_INSTANCE;
|
||||
|
||||
if ((my_instance = (LUA_INSTANCE*) MXS_CALLOC(1, sizeof(LUA_INSTANCE))) == NULL)
|
||||
if (my_instance == NULL)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
spinlock_init(&my_instance->lock);
|
||||
|
||||
my_instance->global_script = config_copy_string(params, "global_script");
|
||||
my_instance->session_script = config_copy_string(params, "session_script");
|
||||
my_instance->global_lua_state = nullptr;
|
||||
|
||||
if (my_instance->global_script)
|
||||
{
|
||||
@ -366,7 +365,7 @@ static MXS_FILTER_SESSION* newSession(MXS_FILTER* instance, MXS_SESSION* session
|
||||
|
||||
if (my_session && my_instance->global_lua_state)
|
||||
{
|
||||
spinlock_acquire(&my_instance->lock);
|
||||
std::lock_guard<std::mutex> guard(my_instance->lock);
|
||||
|
||||
lua_getglobal(my_instance->global_lua_state, "newSession");
|
||||
lua_pushstring(my_instance->global_lua_state, session->client_dcb->user);
|
||||
@ -379,8 +378,6 @@ static MXS_FILTER_SESSION* newSession(MXS_FILTER* instance, MXS_SESSION* session
|
||||
lua_tostring(my_instance->global_lua_state, -1));
|
||||
lua_pop(my_instance->global_lua_state, -1); // Pop the error off the stack
|
||||
}
|
||||
|
||||
spinlock_release(&my_instance->lock);
|
||||
}
|
||||
|
||||
return (MXS_FILTER_SESSION*)my_session;
|
||||
@ -415,7 +412,7 @@ static void closeSession(MXS_FILTER* instance, MXS_FILTER_SESSION* session)
|
||||
|
||||
if (my_instance->global_lua_state)
|
||||
{
|
||||
spinlock_acquire(&my_instance->lock);
|
||||
std::lock_guard<std::mutex> guard(my_instance->lock);
|
||||
|
||||
lua_getglobal(my_instance->global_lua_state, "closeSession");
|
||||
|
||||
@ -426,7 +423,6 @@ static void closeSession(MXS_FILTER* instance, MXS_FILTER_SESSION* session)
|
||||
lua_tostring(my_instance->global_lua_state, -1));
|
||||
lua_pop(my_instance->global_lua_state, -1);
|
||||
}
|
||||
spinlock_release(&my_instance->lock);
|
||||
}
|
||||
}
|
||||
|
||||
@ -502,7 +498,7 @@ static int32_t clientReply(MXS_FILTER* instance, MXS_FILTER_SESSION* session, GW
|
||||
|
||||
if (my_instance->global_lua_state)
|
||||
{
|
||||
spinlock_acquire(&my_instance->lock);
|
||||
std::lock_guard<std::mutex> guard(my_instance->lock);
|
||||
|
||||
lua_getglobal(my_instance->global_lua_state, "clientReply");
|
||||
|
||||
@ -512,8 +508,6 @@ static int32_t clientReply(MXS_FILTER* instance, MXS_FILTER_SESSION* session, GW
|
||||
lua_tostring(my_session->lua_state, -1));
|
||||
lua_pop(my_instance->global_lua_state, -1);
|
||||
}
|
||||
|
||||
spinlock_release(&my_instance->lock);
|
||||
}
|
||||
|
||||
return my_session->up.clientReply(my_session->up.instance,
|
||||
@ -585,7 +579,7 @@ static int32_t routeQuery(MXS_FILTER* instance, MXS_FILTER_SESSION* session, GWB
|
||||
|
||||
if (my_instance->global_lua_state)
|
||||
{
|
||||
spinlock_acquire(&my_instance->lock);
|
||||
std::lock_guard<std::mutex> guard(my_instance->lock);
|
||||
current_global_query = queue;
|
||||
|
||||
lua_getglobal(my_instance->global_lua_state, "routeQuery");
|
||||
@ -612,7 +606,6 @@ static int32_t routeQuery(MXS_FILTER* instance, MXS_FILTER_SESSION* session, GWB
|
||||
}
|
||||
|
||||
current_global_query = NULL;
|
||||
spinlock_release(&my_instance->lock);
|
||||
}
|
||||
|
||||
MXS_FREE(fullquery);
|
||||
@ -652,7 +645,7 @@ static void diagnostic(MXS_FILTER* instance, MXS_FILTER_SESSION* fsession, DCB*
|
||||
{
|
||||
if (my_instance->global_lua_state)
|
||||
{
|
||||
spinlock_acquire(&my_instance->lock);
|
||||
std::lock_guard<std::mutex> guard(my_instance->lock);
|
||||
|
||||
lua_getglobal(my_instance->global_lua_state, "diagnostic");
|
||||
|
||||
@ -672,7 +665,6 @@ static void diagnostic(MXS_FILTER* instance, MXS_FILTER_SESSION* fsession, DCB*
|
||||
lua_tostring(my_instance->global_lua_state, -1));
|
||||
lua_pop(my_instance->global_lua_state, -1);
|
||||
}
|
||||
spinlock_release(&my_instance->lock);
|
||||
}
|
||||
if (my_instance->global_script)
|
||||
{
|
||||
@ -703,7 +695,7 @@ static json_t* diagnostic_json(const MXS_FILTER* instance, const MXS_FILTER_SESS
|
||||
{
|
||||
if (my_instance->global_lua_state)
|
||||
{
|
||||
spinlock_acquire(&my_instance->lock);
|
||||
std::lock_guard<std::mutex> guard(my_instance->lock);
|
||||
|
||||
lua_getglobal(my_instance->global_lua_state, "diagnostic");
|
||||
|
||||
@ -721,7 +713,6 @@ static json_t* diagnostic_json(const MXS_FILTER* instance, const MXS_FILTER_SESS
|
||||
{
|
||||
lua_pop(my_instance->global_lua_state, -1);
|
||||
}
|
||||
spinlock_release(&my_instance->lock);
|
||||
}
|
||||
if (my_instance->global_script)
|
||||
{
|
||||
|
@ -213,8 +213,8 @@ typedef struct
|
||||
int conn_stat; /**state of the connection to the server*/
|
||||
int rconn_intv; /**delay for reconnects, in seconds*/
|
||||
time_t last_rconn; /**last reconnect attempt*/
|
||||
SPINLOCK rconn_lock;
|
||||
SPINLOCK msg_lock;
|
||||
std::mutex rconn_lock;
|
||||
std::mutex msg_lock;
|
||||
mqmessage* messages;
|
||||
enum log_trigger_t trgtype;
|
||||
SRC_TRIG* src_trg;
|
||||
|
@ -749,7 +749,6 @@ AvroSession::AvroSession(Avro* instance, MXS_SESSION* session)
|
||||
: dcb(session->client_dcb)
|
||||
, state(AVRO_CLIENT_UNREGISTERED)
|
||||
, format(AVRO_FORMAT_UNDEFINED)
|
||||
, catch_lock(SPINLOCK_INIT)
|
||||
, router(instance)
|
||||
, file_handle(NULL)
|
||||
, last_sent_pos(0)
|
||||
|
@ -153,7 +153,6 @@ public:
|
||||
int state; /*< The state of this client */
|
||||
enum avro_data_format format; /*< Stream JSON or Avro data */
|
||||
std::string uuid; /*< Client UUID */
|
||||
SPINLOCK catch_lock; /*< Event catchup lock */
|
||||
Avro* router; /*< Pointer to the owning router */
|
||||
MAXAVRO_FILE* file_handle; /*< Current open file handle */
|
||||
uint64_t last_sent_pos;/*< The last record we sent */
|
||||
|
@ -54,9 +54,6 @@ static uint64_t getCapabilities(MXS_ROUTER* instance);
|
||||
|
||||
extern int execute_cmd(CLI_SESSION* cli);
|
||||
|
||||
static SPINLOCK instlock;
|
||||
static CLI_INSTANCE* instances;
|
||||
|
||||
/**
|
||||
* The module entry point routine. It is this routine that
|
||||
* must populate the structure that is referred to as the
|
||||
@ -68,8 +65,6 @@ static CLI_INSTANCE* instances;
|
||||
extern "C" MXS_MODULE* MXS_CREATE_MODULE()
|
||||
{
|
||||
MXS_NOTICE("Initialise CLI router module");
|
||||
spinlock_init(&instlock);
|
||||
instances = NULL;
|
||||
|
||||
static MXS_ROUTER_OBJECT MyObject =
|
||||
{
|
||||
@ -129,16 +124,6 @@ static MXS_ROUTER* createInstance(SERVICE* service, MXS_CONFIG_PARAMETER* params
|
||||
spinlock_init(&inst->lock);
|
||||
inst->sessions = NULL;
|
||||
|
||||
/*
|
||||
* We have completed the creation of the instance data, so now
|
||||
* insert this router instance into the linked list of routers
|
||||
* that have been created with this module.
|
||||
*/
|
||||
spinlock_acquire(&instlock);
|
||||
inst->next = instances;
|
||||
instances = inst;
|
||||
spinlock_release(&instlock);
|
||||
|
||||
return (MXS_ROUTER*)inst;
|
||||
}
|
||||
|
||||
|
@ -37,7 +37,6 @@
|
||||
*/
|
||||
struct ROUTER_CLIENT_SES : MXS_ROUTER_SESSION
|
||||
{
|
||||
SPINLOCK rses_lock; /*< protects rses_deleted */
|
||||
int rses_versno;/*< even = no active update, else odd */
|
||||
bool rses_closed;/*< true when closeSession is called */
|
||||
SERVER_REF* backend; /*< Backend used by the client session */
|
||||
@ -62,7 +61,6 @@ struct ROUTER_STATS
|
||||
struct ROUTER_INSTANCE : public MXS_ROUTER
|
||||
{
|
||||
SERVICE* service; /*< Pointer to the service using this router */
|
||||
SPINLOCK lock; /*< Spinlock for the instance data */
|
||||
uint64_t bitmask_and_bitvalue; /*< Lower 32-bits for bitmask and upper for bitvalue */
|
||||
ROUTER_STATS stats; /*< Statistics for this router */
|
||||
};
|
||||
|
@ -110,8 +110,6 @@ static void handleError(MXS_ROUTER* instance,
|
||||
bool* succp);
|
||||
static uint64_t getCapabilities(MXS_ROUTER* instance);
|
||||
static bool configureInstance(MXS_ROUTER* instance, MXS_CONFIG_PARAMETER* params);
|
||||
static bool rses_begin_locked_router_action(ROUTER_CLIENT_SES* rses);
|
||||
static void rses_end_locked_router_action(ROUTER_CLIENT_SES* rses);
|
||||
static SERVER_REF* get_root_master(SERVER_REF* servers);
|
||||
|
||||
/**
|
||||
@ -248,7 +246,6 @@ static MXS_ROUTER* createInstance(SERVICE* service, MXS_CONFIG_PARAMETER* params
|
||||
{
|
||||
|
||||
inst->service = service;
|
||||
spinlock_init(&inst->lock);
|
||||
inst->bitmask_and_bitvalue = 0;
|
||||
|
||||
if (!configureInstance((MXS_ROUTER*)inst, params))
|
||||
@ -478,28 +475,11 @@ static void freeSession(MXS_ROUTER* router_instance, MXS_ROUTER_SESSION* router_
|
||||
static void closeSession(MXS_ROUTER* instance, MXS_ROUTER_SESSION* router_session)
|
||||
{
|
||||
ROUTER_CLIENT_SES* router_cli_ses = (ROUTER_CLIENT_SES*) router_session;
|
||||
DCB* backend_dcb;
|
||||
mxb_assert(router_cli_ses->backend_dcb);
|
||||
|
||||
/**
|
||||
* Lock router client session for secure read and update.
|
||||
*/
|
||||
if (rses_begin_locked_router_action(router_cli_ses))
|
||||
if (router_cli_ses->backend_dcb)
|
||||
{
|
||||
/* decrease server current connection counter */
|
||||
|
||||
backend_dcb = router_cli_ses->backend_dcb;
|
||||
router_cli_ses->backend_dcb = NULL;
|
||||
router_cli_ses->rses_closed = true;
|
||||
/** Unlock */
|
||||
rses_end_locked_router_action(router_cli_ses);
|
||||
|
||||
/**
|
||||
* Close the backend server connection
|
||||
*/
|
||||
if (backend_dcb != NULL)
|
||||
{
|
||||
dcb_close(backend_dcb);
|
||||
}
|
||||
dcb_close(router_cli_ses->backend_dcb);
|
||||
}
|
||||
}
|
||||
|
||||
@ -596,33 +576,14 @@ static int routeQuery(MXS_ROUTER* instance, MXS_ROUTER_SESSION* router_session,
|
||||
DCB* backend_dcb;
|
||||
MySQLProtocol* proto = (MySQLProtocol*)router_cli_ses->client_dcb->protocol;
|
||||
mxs_mysql_cmd_t mysql_command = proto->current_command;
|
||||
bool rses_is_closed;
|
||||
bool rses_is_closed = router_cli_ses->rses_closed;
|
||||
|
||||
inst->stats.n_queries++;
|
||||
|
||||
// Due to the streaming nature of readconnroute, this is not accurate
|
||||
mxb::atomic::add(&router_cli_ses->backend->server->stats.packets, 1, mxb::atomic::RELAXED);
|
||||
|
||||
/** Dirty read for quick check if router is closed. */
|
||||
if (router_cli_ses->rses_closed)
|
||||
{
|
||||
rses_is_closed = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
/**
|
||||
* Lock router client session for secure read of DCBs
|
||||
*/
|
||||
rses_is_closed = !(rses_begin_locked_router_action(router_cli_ses));
|
||||
}
|
||||
|
||||
if (!rses_is_closed)
|
||||
{
|
||||
backend_dcb = router_cli_ses->backend_dcb;
|
||||
/** unlock */
|
||||
rses_end_locked_router_action(router_cli_ses);
|
||||
}
|
||||
|
||||
backend_dcb = router_cli_ses->backend_dcb;
|
||||
bool valid;
|
||||
char* trc = NULL;
|
||||
|
||||
@ -794,63 +755,6 @@ static void handleError(MXS_ROUTER* instance,
|
||||
*succp = false;
|
||||
}
|
||||
|
||||
/** to be inline'd */
|
||||
|
||||
/**
|
||||
* @node Acquires lock to router client session if it is not closed.
|
||||
*
|
||||
* Parameters:
|
||||
* @param rses - in, use
|
||||
*
|
||||
*
|
||||
* @return true if router session was not closed. If return value is true
|
||||
* it means that router is locked, and must be unlocked later. False, if
|
||||
* router was closed before lock was acquired.
|
||||
*
|
||||
*
|
||||
* @details (write detailed description here)
|
||||
*
|
||||
*/
|
||||
static bool rses_begin_locked_router_action(ROUTER_CLIENT_SES* rses)
|
||||
{
|
||||
bool succp = false;
|
||||
|
||||
if (rses->rses_closed)
|
||||
{
|
||||
goto return_succp;
|
||||
}
|
||||
spinlock_acquire(&rses->rses_lock);
|
||||
if (rses->rses_closed)
|
||||
{
|
||||
spinlock_release(&rses->rses_lock);
|
||||
goto return_succp;
|
||||
}
|
||||
succp = true;
|
||||
|
||||
return_succp:
|
||||
return succp;
|
||||
}
|
||||
|
||||
/** to be inline'd */
|
||||
|
||||
/**
|
||||
* @node Releases router client session lock.
|
||||
*
|
||||
* Parameters:
|
||||
* @param rses - <usage>
|
||||
* <description>
|
||||
*
|
||||
* @return void
|
||||
*
|
||||
*
|
||||
* @details (write detailed description here)
|
||||
*
|
||||
*/
|
||||
static void rses_end_locked_router_action(ROUTER_CLIENT_SES* rses)
|
||||
{
|
||||
spinlock_release(&rses->rses_lock);
|
||||
}
|
||||
|
||||
static uint64_t getCapabilities(MXS_ROUTER* instance)
|
||||
{
|
||||
return RCAP_TYPE_RUNTIME_CONFIG;
|
||||
|
@ -47,7 +47,6 @@ SchemaRouter::SchemaRouter(SERVICE* service, SConfig config)
|
||||
, m_config(config)
|
||||
, m_service(service)
|
||||
{
|
||||
spinlock_init(&m_lock);
|
||||
}
|
||||
|
||||
SchemaRouter::~SchemaRouter()
|
||||
|
@ -14,6 +14,7 @@
|
||||
|
||||
#include "schemarouter.hh"
|
||||
|
||||
#include <mutex>
|
||||
#include <set>
|
||||
#include <string>
|
||||
|
||||
@ -51,7 +52,7 @@ private:
|
||||
SConfig m_config; /*< expanded config info from SERVICE */
|
||||
ShardManager m_shard_manager; /*< Shard maps hashed by user name */
|
||||
SERVICE* m_service; /*< Pointer to service */
|
||||
SPINLOCK m_lock; /*< Lock for the instance data */
|
||||
std::mutex m_lock; /*< Lock for the instance data */
|
||||
Stats m_stats; /*< Statistics for this router */
|
||||
};
|
||||
}
|
||||
|
@ -111,7 +111,8 @@ void SchemaRouterSession::close()
|
||||
}
|
||||
}
|
||||
|
||||
spinlock_acquire(&m_router->m_lock);
|
||||
std::lock_guard<std::mutex> guard(m_router->m_lock);
|
||||
|
||||
if (m_router->m_stats.longest_sescmd < m_stats.longest_sescmd)
|
||||
{
|
||||
m_router->m_stats.longest_sescmd = m_stats.longest_sescmd;
|
||||
@ -129,8 +130,6 @@ void SchemaRouterSession::close()
|
||||
m_router->m_stats.ses_average =
|
||||
(ses_time + ((m_router->m_stats.sessions - 1) * m_router->m_stats.ses_average))
|
||||
/ (m_router->m_stats.sessions);
|
||||
|
||||
spinlock_release(&m_router->m_lock);
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user