Merge branch '2.2' into develop

This commit is contained in:
Markus Mäkelä
2018-07-03 21:13:41 +03:00
41 changed files with 233 additions and 110032 deletions

View File

@ -87,6 +87,7 @@ gwbuf_alloc(unsigned int size)
rval->hint = NULL;
rval->properties = NULL;
rval->gwbuf_type = GWBUF_TYPE_UNDEFINED;
rval->server = NULL;
CHK_GWBUF(rval);
retblock:
if (rval == NULL)
@ -303,6 +304,7 @@ gwbuf_clone_one(GWBUF *buf)
}
atomic_add(&buf->sbuf->refcount, 1);
rval->server = buf->server;
rval->sbuf = buf->sbuf;
rval->start = buf->start;
rval->end = buf->end;
@ -384,6 +386,7 @@ static GWBUF *gwbuf_clone_portion(GWBUF *buf,
return NULL;
}
atomic_add(&buf->sbuf->refcount, 1);
clonebuf->server = buf->server;
clonebuf->sbuf = buf->sbuf;
clonebuf->gwbuf_type = buf->gwbuf_type; /*< clone info bits too */
clonebuf->start = (void *)((char*)buf->start + start_offset);

View File

@ -3588,23 +3588,30 @@ int poll_add_dcb(DCB *dcb)
events = EPOLLIN;
new_state = DCB_STATE_LISTENING;
}
else if (dcb->dcb_role == DCB_ROLE_CLIENT_HANDLER &&
(strcasecmp(dcb->service->routerModule, "cli") == 0 ||
strcasecmp(dcb->service->routerModule, "maxinfo") == 0))
else if (dcb->dcb_role == DCB_ROLE_CLIENT_HANDLER)
{
// If the DCB refers to an accepted maxadmin/maxinfo socket, we force it
// to the main thread. That's done in order to prevent a deadlock
// that may happen if there are multiple concurrent administrative calls,
// handled by different worker threads.
// See: https://jira.mariadb.org/browse/MXS-1805 and https://jira.mariadb.org/browse/MXS-1833
if (strcasecmp(dcb->service->routerModule, "cli") == 0 ||
strcasecmp(dcb->service->routerModule, "maxinfo") == 0)
{
// If the DCB refers to an accepted maxadmin/maxinfo socket, we force it
// to the main thread. That's done in order to prevent a deadlock
// that may happen if there are multiple concurrent administrative calls,
// handled by different worker threads.
// See: https://jira.mariadb.org/browse/MXS-1805 and https://jira.mariadb.org/browse/MXS-1833
owner = RoutingWorker::get(RoutingWorker::MAIN);
}
else
{
// Round-robin the client connection worker assignment
owner = RoutingWorker::pick_worker();
}
new_state = DCB_STATE_POLLING;
owner = RoutingWorker::get(RoutingWorker::MAIN);
dcb->poll.owner = owner;
}
else
{
ss_dassert(dcb->dcb_role == DCB_ROLE_CLIENT_HANDLER ||
dcb->dcb_role == DCB_ROLE_BACKEND_HANDLER);
ss_dassert(dcb->dcb_role == DCB_ROLE_BACKEND_HANDLER);
ss_dassert(RoutingWorker::get_current_id() != -1);
ss_dassert(RoutingWorker::get_current() == dcb->poll.owner);

View File

@ -271,6 +271,13 @@ public:
*/
static int64_t get_one_statistic(POLL_STAT what);
/**
* Get next worker
*
* @return The worker where work should be assigned
*/
static RoutingWorker* pick_worker();
private:
const int m_id; /*< The id of the worker. */
SessionsById m_sessions; /*< A mapping of session_id->MXS_SESSION. The map

View File

@ -1014,3 +1014,11 @@ json_t* mxs_rworker_list_to_json(const char* host)
RoutingWorker::execute_concurrently(task);
return task.resource();
}
// static
RoutingWorker* RoutingWorker::pick_worker()
{
static int id_generator = 0;
int id = this_unit.id_min_worker + (atomic_add(&id_generator, 1) % this_unit.nWorkers);
return get(id);
}