Cache: Assume we need to handle one packet at a time

This commit is contained in:
Johan Wikman
2016-09-14 13:26:05 +03:00
parent 1001654987
commit 7e2a21de9e
3 changed files with 43 additions and 54 deletions

View File

@ -364,40 +364,26 @@ static int routeQuery(FILTER *instance, void *sdata, GWBUF *packets)
if (csdata->packets)
{
C_DEBUG("Old packets exist.");
gwbuf_append(csdata->packets, packets);
}
else
{
C_DEBUG("NO old packets exist.");
csdata->packets = packets;
}
packets = modutil_get_complete_packets(&csdata->packets);
GWBUF *packet = modutil_get_next_MySQL_packet(&csdata->packets);
int rv;
if (packets)
if (packet)
{
C_DEBUG("At least one complete packet exist.");
GWBUF *packet;
// TODO: Is it really possible to get more that one packet
// TODO: is this loop? If so, can those packets be sent
// TODO: after one and other, or do we need to wait for
// TODO: a replies? If there are more complete packets
// TODO: than one, then either CACHE_SESSION_DATA::key
// TODO: needs to be a queue
// TODO: modutil_get_next_MySQL_packet *copies* the data.
while ((packet = modutil_get_next_MySQL_packet(&packets)))
{
C_DEBUG("Processing packet.");
bool use_default = true;
// TODO: This returns the wrong result if GWBUF_LENGTH(packet) is < 5.
if (modutil_is_SQL(packet))
{
C_DEBUG("Is SQL.");
packet = gwbuf_make_contiguous(packet);
// We do not care whether the query was fully parsed or not.
// If a query cannot be fully parsed, the worst thing that can
// happen is that caching is not used, even though it would be
@ -405,8 +391,6 @@ static int routeQuery(FILTER *instance, void *sdata, GWBUF *packets)
if (qc_get_operation(packet) == QUERY_OP_SELECT)
{
C_DEBUG("Is a SELECT");
GWBUF *result;
use_default = !route_using_cache(cinstance, csdata, packet, &result);
@ -421,14 +405,6 @@ static int routeQuery(FILTER *instance, void *sdata, GWBUF *packets)
rv = dcb->func.write(dcb, result);
}
}
else
{
C_DEBUG("Is NOT a SELECT");
}
}
else
{
C_DEBUG("Is NOT SQL.");
}
if (use_default)
@ -437,11 +413,9 @@ static int routeQuery(FILTER *instance, void *sdata, GWBUF *packets)
rv = csdata->down.routeQuery(csdata->down.instance, csdata->down.session, packet);
}
}
}
else
{
C_DEBUG("Not even one complete packet exist; more data needed.");
// Ok, we need more data before we can do something.
// We need more data before we can do something.
rv = 1;
}

View File

@ -168,6 +168,7 @@ cache_result_t RocksDBStorage::getValue(const char* pKey, GWBUF** ppResult)
else
{
MXS_NOTICE("Cache item is stale, not using.");
result = CACHE_RESULT_NOT_FOUND;
}
}
else
@ -190,7 +191,7 @@ cache_result_t RocksDBStorage::getValue(const char* pKey, GWBUF** ppResult)
cache_result_t RocksDBStorage::putValue(const char* pKey, const GWBUF* pValue)
{
// ss_dassert(gwbuf_is_contiguous(pValue));
ss_dassert(GWBUF_IS_CONTIGUOUS(pValue));
rocksdb::Slice key(pKey, ROCKSDB_KEY_LENGTH);
rocksdb::Slice value(static_cast<const char*>(GWBUF_DATA(pValue)), GWBUF_LENGTH(pValue));

View File

@ -25,6 +25,8 @@ bool initialize()
CACHE_STORAGE* createInstance(const char* zName, uint32_t ttl, int argc, char* argv[])
{
ss_dassert(zName);
CACHE_STORAGE* pStorage = 0;
try
@ -56,6 +58,10 @@ cache_result_t getKey(CACHE_STORAGE* pStorage,
const GWBUF* pQuery,
char* pKey)
{
ss_dassert(pStorage);
ss_dassert(pQuery);
ss_dassert(pKey);
cache_result_t result = CACHE_RESULT_ERROR;
try
@ -80,6 +86,10 @@ cache_result_t getKey(CACHE_STORAGE* pStorage,
cache_result_t getValue(CACHE_STORAGE* pStorage, const char* pKey, GWBUF** ppResult)
{
ss_dassert(pStorage);
ss_dassert(pKey);
ss_dassert(ppResult);
cache_result_t result = CACHE_RESULT_ERROR;
try
@ -106,6 +116,10 @@ cache_result_t putValue(CACHE_STORAGE* pStorage,
const char* pKey,
const GWBUF* pValue)
{
ss_dassert(pStorage);
ss_dassert(pKey);
ss_dassert(pValue);
cache_result_t result = CACHE_RESULT_ERROR;
try