Use simpler function for calculating hash
Now the key is a 64-bit integer and it is created by the cache component and can not be unique for the storage.
This commit is contained in:
80
server/modules/filter/cache/cache.cc
vendored
80
server/modules/filter/cache/cache.cc
vendored
@ -14,11 +14,19 @@
|
||||
#define MXS_MODULE_NAME "cache"
|
||||
#include "cache.hh"
|
||||
#include <new>
|
||||
#include <set>
|
||||
#include <string>
|
||||
#include <zlib.h>
|
||||
#include <maxscale/alloc.h>
|
||||
#include <maxscale/buffer.h>
|
||||
#include <maxscale/modutil.h>
|
||||
#include <maxscale/query_classifier.h>
|
||||
#include <maxscale/paths.h>
|
||||
#include "storagefactory.hh"
|
||||
#include "storage.hh"
|
||||
|
||||
using namespace std;
|
||||
|
||||
Cache::Cache(const std::string& name,
|
||||
const CACHE_CONFIG* pConfig,
|
||||
SCacheRules sRules,
|
||||
@ -102,6 +110,78 @@ void Cache::show(DCB* pDcb) const
|
||||
}
|
||||
}
|
||||
|
||||
cache_result_t Cache::get_key(const char* zDefault_db,
|
||||
const GWBUF* pQuery,
|
||||
CACHE_KEY* pKey) const
|
||||
{
|
||||
// TODO: Take config into account.
|
||||
return get_default_key(zDefault_db, pQuery, pKey);
|
||||
}
|
||||
|
||||
//static
|
||||
cache_result_t Cache::get_default_key(const char* zDefault_db,
|
||||
const GWBUF* pQuery,
|
||||
CACHE_KEY* pKey)
|
||||
{
|
||||
ss_dassert(GWBUF_IS_CONTIGUOUS(pQuery));
|
||||
|
||||
int n;
|
||||
bool fullnames = true;
|
||||
char** pzTables = qc_get_table_names(const_cast<GWBUF*>(pQuery), &n, fullnames);
|
||||
|
||||
set<string> dbs; // Elements in set are sorted.
|
||||
|
||||
for (int i = 0; i < n; ++i)
|
||||
{
|
||||
char *zTable = pzTables[i];
|
||||
char *zDot = strchr(zTable, '.');
|
||||
|
||||
if (zDot)
|
||||
{
|
||||
*zDot = 0;
|
||||
dbs.insert(zTable);
|
||||
}
|
||||
else if (zDefault_db)
|
||||
{
|
||||
// If zdefault_db is NULL, then there will be a table for which we
|
||||
// do not know the database. However, that will fail in the server,
|
||||
// so nothing will be stored.
|
||||
dbs.insert(zDefault_db);
|
||||
}
|
||||
MXS_FREE(zTable);
|
||||
}
|
||||
MXS_FREE(pzTables);
|
||||
|
||||
// dbs now contain each accessed database in sorted order. Now copy them to a single string.
|
||||
string tag;
|
||||
for (set<string>::const_iterator i = dbs.begin(); i != dbs.end(); ++i)
|
||||
{
|
||||
tag.append(*i);
|
||||
}
|
||||
|
||||
const unsigned char* pData;
|
||||
|
||||
// We hash the databases in the first half of the key. That will ensure that
|
||||
// identical queries targeting different default databases will not clash.
|
||||
pData = reinterpret_cast<const unsigned char*>(tag.data());
|
||||
uint64_t table_crc = crc32(0, Z_NULL, 0);
|
||||
table_crc = crc32(table_crc, pData, tag.length());
|
||||
|
||||
char *pSql;
|
||||
int length;
|
||||
|
||||
modutil_extract_SQL(const_cast<GWBUF*>(pQuery), &pSql, &length);
|
||||
|
||||
// Then we hash the query itself in the second half of the key.
|
||||
pData = reinterpret_cast<const unsigned char*>(pSql);
|
||||
uint64_t stmt_crc = crc32(0, Z_NULL, 0);
|
||||
stmt_crc = crc32(stmt_crc, pData, length);
|
||||
|
||||
pKey->data = (table_crc << 32) | stmt_crc;
|
||||
|
||||
return CACHE_RESULT_OK;
|
||||
}
|
||||
|
||||
bool Cache::should_store(const char* zDefaultDb, const GWBUF* pQuery)
|
||||
{
|
||||
return m_sRules->should_store(zDefaultDb, pQuery);
|
||||
|
26
server/modules/filter/cache/cache.hh
vendored
26
server/modules/filter/cache/cache.hh
vendored
@ -87,11 +87,31 @@ public:
|
||||
virtual void refreshed(const CACHE_KEY& key, const CacheFilterSession* pSession) = 0;
|
||||
|
||||
/**
|
||||
* See @Storage::get_key
|
||||
* Returns a key for the statement. Takes the current config into account.
|
||||
*
|
||||
* @param zDefault_db The default database, can be NULL.
|
||||
* @param pQuery A statement.
|
||||
* @param pKey On output a key.
|
||||
*
|
||||
* @return CACHE_RESULT_OK if a key could be created.
|
||||
*/
|
||||
virtual cache_result_t get_key(const char* zDefaultDb,
|
||||
cache_result_t get_key(const char* zDefault_db,
|
||||
const GWBUF* pQuery,
|
||||
CACHE_KEY* pKey) const = 0;
|
||||
CACHE_KEY* pKey) const;
|
||||
|
||||
/**
|
||||
* Returns a key for the statement. Does not take the current config
|
||||
* into account.
|
||||
*
|
||||
* @param zDefault_db The default database, can be NULL.
|
||||
* @param pQuery A statement.
|
||||
* @param pKey On output a key.
|
||||
*
|
||||
* @return CACHE_RESULT_OK if a key could be created.
|
||||
*/
|
||||
static cache_result_t get_default_key(const char* zDefault_db,
|
||||
const GWBUF* pQuery,
|
||||
CACHE_KEY* pKey);
|
||||
|
||||
/**
|
||||
* See @Storage::get_value
|
||||
|
17
server/modules/filter/cache/cache_storage_api.c
vendored
17
server/modules/filter/cache/cache_storage_api.c
vendored
@ -17,20 +17,9 @@
|
||||
size_t cache_key_hash(const CACHE_KEY* key)
|
||||
{
|
||||
ss_dassert(key);
|
||||
ss_dassert(sizeof(key->data) == sizeof(size_t));
|
||||
|
||||
size_t hash = 0;
|
||||
|
||||
const char* i = key->data;
|
||||
const char* end = i + CACHE_KEY_MAXLEN;
|
||||
|
||||
while (i < end)
|
||||
{
|
||||
int c = *i;
|
||||
hash = c + (hash << 6) + (hash << 16) - hash;
|
||||
++i;
|
||||
}
|
||||
|
||||
return hash;
|
||||
return key->data;
|
||||
}
|
||||
|
||||
bool cache_key_equal_to(const CACHE_KEY* lhs, const CACHE_KEY* rhs)
|
||||
@ -38,7 +27,7 @@ bool cache_key_equal_to(const CACHE_KEY* lhs, const CACHE_KEY* rhs)
|
||||
ss_dassert(lhs);
|
||||
ss_dassert(rhs);
|
||||
|
||||
return memcmp(lhs->data, rhs->data, CACHE_KEY_MAXLEN) == 0;
|
||||
return lhs->data == rhs->data;
|
||||
}
|
||||
|
||||
|
||||
|
19
server/modules/filter/cache/cache_storage_api.cc
vendored
19
server/modules/filter/cache/cache_storage_api.cc
vendored
@ -14,24 +14,15 @@
|
||||
#define MXS_MODULE_NAME "cache"
|
||||
#include "cache_storage_api.hh"
|
||||
#include <ctype.h>
|
||||
#include <sstream>
|
||||
|
||||
using std::string;
|
||||
using std::stringstream;
|
||||
|
||||
std::string cache_key_to_string(const CACHE_KEY& key)
|
||||
{
|
||||
string s;
|
||||
stringstream ss;
|
||||
ss << key.data;
|
||||
|
||||
for (int i = 0; i < CACHE_KEY_MAXLEN; ++i)
|
||||
{
|
||||
char c = key.data[i];
|
||||
|
||||
if (!isprint(c))
|
||||
{
|
||||
c = '.';
|
||||
}
|
||||
|
||||
s += c;
|
||||
}
|
||||
|
||||
return s;
|
||||
return ss.str();
|
||||
}
|
||||
|
19
server/modules/filter/cache/cache_storage_api.h
vendored
19
server/modules/filter/cache/cache_storage_api.h
vendored
@ -60,14 +60,9 @@ typedef enum cache_thread_model
|
||||
|
||||
typedef void* CACHE_STORAGE;
|
||||
|
||||
enum
|
||||
{
|
||||
CACHE_KEY_MAXLEN = 128
|
||||
};
|
||||
|
||||
typedef struct cache_key
|
||||
{
|
||||
char data[CACHE_KEY_MAXLEN];
|
||||
uint64_t data;
|
||||
} CACHE_KEY;
|
||||
|
||||
/**
|
||||
@ -176,18 +171,6 @@ typedef struct cache_storage_api
|
||||
const CACHE_STORAGE_CONFIG* config,
|
||||
int argc, char* argv[]);
|
||||
|
||||
/**
|
||||
* Create a key for a GWBUF.
|
||||
*
|
||||
* @param query An SQL query. Must be one contiguous buffer.
|
||||
* @param key Pointer to key.
|
||||
*
|
||||
* @return CACHE_RESULT_OK if a key was created, otherwise some error code.
|
||||
*/
|
||||
cache_result_t (*getKey)(const char* default_db,
|
||||
const GWBUF* query,
|
||||
CACHE_KEY* key);
|
||||
|
||||
/**
|
||||
* Frees an CACHE_STORAGE instance earlier created with createInstance.
|
||||
*
|
||||
|
@ -51,7 +51,7 @@ std::string cache_key_to_string(const CACHE_KEY& key);
|
||||
|
||||
inline bool operator == (const CACHE_KEY& lhs, const CACHE_KEY& rhs)
|
||||
{
|
||||
return memcmp(lhs.data, rhs.data, sizeof(lhs.data)) == 0;
|
||||
return lhs.data == rhs.data;
|
||||
}
|
||||
|
||||
inline bool operator != (const CACHE_KEY& lhs, const CACHE_KEY& rhs)
|
||||
@ -64,7 +64,7 @@ class CacheKey : public CACHE_KEY
|
||||
public:
|
||||
CacheKey()
|
||||
{
|
||||
memset(data, 0, sizeof(data));
|
||||
data = 0;
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -146,7 +146,7 @@ CacheFilterSession::CacheFilterSession(MXS_SESSION* pSession, Cache* pCache, cha
|
||||
, m_refreshing(false)
|
||||
, m_is_read_only(true)
|
||||
{
|
||||
memset(m_key.data, 0, CACHE_KEY_MAXLEN);
|
||||
m_key.data = 0;
|
||||
|
||||
reset_response_state();
|
||||
}
|
||||
|
7
server/modules/filter/cache/cachesimple.cc
vendored
7
server/modules/filter/cache/cachesimple.cc
vendored
@ -50,13 +50,6 @@ bool CacheSimple::Create(const CACHE_CONFIG& config,
|
||||
return pRules != NULL;
|
||||
}
|
||||
|
||||
cache_result_t CacheSimple::get_key(const char* zDefaultDb,
|
||||
const GWBUF* pQuery,
|
||||
CACHE_KEY* pKey) const
|
||||
{
|
||||
return m_pStorage->get_key(zDefaultDb, pQuery, pKey);
|
||||
}
|
||||
|
||||
cache_result_t CacheSimple::get_value(const CACHE_KEY& key,
|
||||
uint32_t flags,
|
||||
GWBUF** ppValue) const
|
||||
|
2
server/modules/filter/cache/cachesimple.hh
vendored
2
server/modules/filter/cache/cachesimple.hh
vendored
@ -25,8 +25,6 @@ class CacheSimple : public Cache
|
||||
public:
|
||||
~CacheSimple();
|
||||
|
||||
cache_result_t get_key(const char* zDefaultDb, const GWBUF* pQuery, CACHE_KEY* pKey) const;
|
||||
|
||||
cache_result_t get_value(const CACHE_KEY& key, uint32_t flags, GWBUF** ppValue) const;
|
||||
|
||||
cache_result_t put_value(const CACHE_KEY& key, const GWBUF* pValue);
|
||||
|
7
server/modules/filter/cache/lrustorage.cc
vendored
7
server/modules/filter/cache/lrustorage.cc
vendored
@ -41,13 +41,6 @@ void LRUStorage::get_config(CACHE_STORAGE_CONFIG* pConfig)
|
||||
*pConfig = m_config;
|
||||
}
|
||||
|
||||
cache_result_t LRUStorage::get_key(const char* zDefault_db,
|
||||
const GWBUF* pQuery,
|
||||
CACHE_KEY* pKey) const
|
||||
{
|
||||
return m_pStorage->get_key(zDefault_db, pQuery, pKey);
|
||||
}
|
||||
|
||||
cache_result_t LRUStorage::do_get_info(uint32_t what,
|
||||
json_t** ppInfo) const
|
||||
{
|
||||
|
7
server/modules/filter/cache/lrustorage.hh
vendored
7
server/modules/filter/cache/lrustorage.hh
vendored
@ -28,13 +28,6 @@ public:
|
||||
*/
|
||||
void get_config(CACHE_STORAGE_CONFIG* pConfig);
|
||||
|
||||
/**
|
||||
* @see Storage::get_key
|
||||
*/
|
||||
cache_result_t get_key(const char* zDefault_db,
|
||||
const GWBUF* pQuery,
|
||||
CACHE_KEY* pKey) const;
|
||||
|
||||
protected:
|
||||
LRUStorage(const CACHE_STORAGE_CONFIG& config, Storage* pStorage);
|
||||
|
||||
|
13
server/modules/filter/cache/storage.hh
vendored
13
server/modules/filter/cache/storage.hh
vendored
@ -44,19 +44,6 @@ public:
|
||||
*/
|
||||
virtual cache_result_t get_info(uint32_t what, json_t** ppInfo) const = 0;
|
||||
|
||||
/**
|
||||
* Create a key for a GWBUF.
|
||||
*
|
||||
* @param zDefaultDb The default DB or NULL.
|
||||
* @param query An SQL query. Must be one contiguous buffer.
|
||||
* @param pKey Pointer to object where key will be stored.
|
||||
*
|
||||
* @return CACHE_RESULT_OK if a key was created, otherwise some error code.
|
||||
*/
|
||||
virtual cache_result_t get_key(const char* zDefaultDb,
|
||||
const GWBUF* pQuery,
|
||||
CACHE_KEY* pKey) const = 0;
|
||||
|
||||
/**
|
||||
* Get a value from the cache.
|
||||
*
|
||||
|
@ -13,10 +13,6 @@
|
||||
|
||||
#define MXS_MODULE_NAME "storage_inmemory"
|
||||
#include "inmemorystorage.hh"
|
||||
#include <openssl/sha.h>
|
||||
#include <algorithm>
|
||||
#include <memory>
|
||||
#include <set>
|
||||
#include <maxscale/alloc.h>
|
||||
#include <maxscale/modutil.h>
|
||||
#include <maxscale/query_classifier.h>
|
||||
@ -24,7 +20,6 @@
|
||||
#include "inmemorystoragemt.hh"
|
||||
|
||||
using std::auto_ptr;
|
||||
using std::set;
|
||||
using std::string;
|
||||
|
||||
|
||||
@ -96,67 +91,6 @@ InMemoryStorage* InMemoryStorage::Create_instance(const char* zName,
|
||||
return sStorage.release();
|
||||
}
|
||||
|
||||
cache_result_t InMemoryStorage::Get_key(const char* zDefault_db, const GWBUF& query, CACHE_KEY* pKey)
|
||||
{
|
||||
ss_dassert(GWBUF_IS_CONTIGUOUS(&query));
|
||||
|
||||
int n;
|
||||
bool fullnames = true;
|
||||
char** pzTables = qc_get_table_names(const_cast<GWBUF*>(&query), &n, fullnames);
|
||||
|
||||
set<string> dbs; // Elements in set are sorted.
|
||||
|
||||
for (int i = 0; i < n; ++i)
|
||||
{
|
||||
char *zTable = pzTables[i];
|
||||
char *zDot = strchr(zTable, '.');
|
||||
|
||||
if (zDot)
|
||||
{
|
||||
*zDot = 0;
|
||||
dbs.insert(zTable);
|
||||
}
|
||||
else if (zDefault_db)
|
||||
{
|
||||
// If zdefault_db is NULL, then there will be a table for which we
|
||||
// do not know the database. However, that will fail in the server,
|
||||
// so nothing will be stored.
|
||||
dbs.insert(zDefault_db);
|
||||
}
|
||||
MXS_FREE(zTable);
|
||||
}
|
||||
MXS_FREE(pzTables);
|
||||
|
||||
// dbs now contain each accessed database in sorted order. Now copy them to a single string.
|
||||
string tag;
|
||||
for (set<string>::const_iterator i = dbs.begin(); i != dbs.end(); ++i)
|
||||
{
|
||||
tag.append(*i);
|
||||
}
|
||||
|
||||
memset(pKey->data, 0, CACHE_KEY_MAXLEN);
|
||||
|
||||
const unsigned char* pData;
|
||||
|
||||
// We store the databases in the first half of the key. That will ensure that
|
||||
// identical queries targeting different default databases will not clash.
|
||||
// This will also mean that entries related to the same databases will
|
||||
// be placed near each other.
|
||||
pData = reinterpret_cast<const unsigned char*>(tag.data());
|
||||
SHA512(pData, tag.length(), reinterpret_cast<unsigned char*>(pKey->data));
|
||||
|
||||
char *pSql;
|
||||
int length;
|
||||
|
||||
modutil_extract_SQL(const_cast<GWBUF*>(&query), &pSql, &length);
|
||||
|
||||
// Then we store the query itself in the second half of the key.
|
||||
pData = reinterpret_cast<const unsigned char*>(pSql);
|
||||
SHA512(pData, length, reinterpret_cast<unsigned char*>(pKey->data) + SHA512_DIGEST_LENGTH);
|
||||
|
||||
return CACHE_RESULT_OK;
|
||||
}
|
||||
|
||||
void InMemoryStorage::get_config(CACHE_STORAGE_CONFIG* pConfig)
|
||||
{
|
||||
*pConfig = m_config;
|
||||
|
@ -30,8 +30,6 @@ public:
|
||||
const CACHE_STORAGE_CONFIG& config,
|
||||
int argc, char* argv[]);
|
||||
|
||||
static cache_result_t Get_key(const char* zDefault_db, const GWBUF& query, CACHE_KEY* pKey);
|
||||
|
||||
void get_config(CACHE_STORAGE_CONFIG* pConfig);
|
||||
virtual cache_result_t get_info(uint32_t what, json_t** ppInfo) const = 0;
|
||||
virtual cache_result_t get_value(const CACHE_KEY& key, uint32_t flags, GWBUF** ppResult) = 0;
|
||||
|
@ -13,12 +13,10 @@
|
||||
|
||||
#define MXS_MODULE_NAME "storage_rocksdb"
|
||||
#include "rocksdbstorage.hh"
|
||||
#include <openssl/sha.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
#include <fts.h>
|
||||
#include <algorithm>
|
||||
#include <set>
|
||||
#include <rocksdb/env.h>
|
||||
#include <rocksdb/statistics.h>
|
||||
#include <maxscale/alloc.h>
|
||||
@ -28,7 +26,6 @@
|
||||
#include "rocksdbinternals.hh"
|
||||
|
||||
using std::for_each;
|
||||
using std::set;
|
||||
using std::string;
|
||||
using std::unique_ptr;
|
||||
|
||||
@ -36,12 +33,6 @@ using std::unique_ptr;
|
||||
namespace
|
||||
{
|
||||
|
||||
const size_t ROCKSDB_KEY_LENGTH = 2 * SHA512_DIGEST_LENGTH;
|
||||
|
||||
#if ROCKSDB_KEY_LENGTH > CACHE_KEY_MAXLEN
|
||||
#error storage_rocksdb key is too long.
|
||||
#endif
|
||||
|
||||
// See https://github.com/facebook/rocksdb/wiki/Basic-Operations#thread-pools
|
||||
// These figures should perhaps depend upon the number of cache instances.
|
||||
const size_t ROCKSDB_N_LOW_THREADS = 2;
|
||||
@ -318,67 +309,6 @@ RocksDBStorage* RocksDBStorage::Create(const char* zName,
|
||||
return sStorage.release();
|
||||
}
|
||||
|
||||
cache_result_t RocksDBStorage::Get_key(const char* zDefault_db, const GWBUF& query, CACHE_KEY* pKey)
|
||||
{
|
||||
ss_dassert(GWBUF_IS_CONTIGUOUS(&query));
|
||||
|
||||
int n;
|
||||
bool fullnames = true;
|
||||
char** pzTables = qc_get_table_names(const_cast<GWBUF*>(&query), &n, fullnames);
|
||||
|
||||
set<string> dbs; // Elements in set are sorted.
|
||||
|
||||
for (int i = 0; i < n; ++i)
|
||||
{
|
||||
char *zTable = pzTables[i];
|
||||
char *zDot = strchr(zTable, '.');
|
||||
|
||||
if (zDot)
|
||||
{
|
||||
*zDot = 0;
|
||||
dbs.insert(zTable);
|
||||
}
|
||||
else if (zDefault_db)
|
||||
{
|
||||
// If zDefaultDB is NULL, then there will be a table for which we
|
||||
// do not know the database. However, that will fail in the server,
|
||||
// so nothing will be stored.
|
||||
dbs.insert(zDefault_db);
|
||||
}
|
||||
MXS_FREE(zTable);
|
||||
}
|
||||
MXS_FREE(pzTables);
|
||||
|
||||
// dbs now contain each accessed database in sorted order. Now copy them to a single string.
|
||||
string tag;
|
||||
for_each(dbs.begin(), dbs.end(), [&tag](const string & db)
|
||||
{
|
||||
tag.append(db);
|
||||
});
|
||||
|
||||
memset(pKey->data, 0, CACHE_KEY_MAXLEN);
|
||||
|
||||
const unsigned char* pData;
|
||||
|
||||
// We store the databases in the first half of the key. That will ensure that
|
||||
// identical queries targeting different default databases will not clash.
|
||||
// This will also mean that entries related to the same databases will
|
||||
// be placed near each other.
|
||||
pData = reinterpret_cast<const unsigned char*>(tag.data());
|
||||
SHA512(pData, tag.length(), reinterpret_cast<unsigned char*>(pKey->data));
|
||||
|
||||
char *pSql;
|
||||
int length;
|
||||
|
||||
modutil_extract_SQL(const_cast<GWBUF*>(&query), &pSql, &length);
|
||||
|
||||
// Then we store the query itself in the second half of the key.
|
||||
pData = reinterpret_cast<const unsigned char*>(pSql);
|
||||
SHA512(pData, length, reinterpret_cast<unsigned char*>(pKey->data) + SHA512_DIGEST_LENGTH);
|
||||
|
||||
return CACHE_RESULT_OK;
|
||||
}
|
||||
|
||||
void RocksDBStorage::get_config(CACHE_STORAGE_CONFIG* pConfig)
|
||||
{
|
||||
*pConfig = m_config;
|
||||
@ -414,7 +344,7 @@ cache_result_t RocksDBStorage::get_value(const CACHE_KEY& key, uint32_t flags, G
|
||||
{
|
||||
// Use the root DB so that we get the value *with* the timestamp at the end.
|
||||
rocksdb::DB* pDb = m_sDb->GetRootDB();
|
||||
rocksdb::Slice rocksdb_key(key.data, ROCKSDB_KEY_LENGTH);
|
||||
rocksdb::Slice rocksdb_key(reinterpret_cast<const char*>(&key.data), sizeof(key.data));
|
||||
string value;
|
||||
|
||||
rocksdb::Status status = pDb->Get(rocksdb::ReadOptions(), rocksdb_key, &value);
|
||||
@ -501,7 +431,7 @@ cache_result_t RocksDBStorage::put_value(const CACHE_KEY& key, const GWBUF& valu
|
||||
{
|
||||
ss_dassert(GWBUF_IS_CONTIGUOUS(&value));
|
||||
|
||||
rocksdb::Slice rocksdb_key(key.data, ROCKSDB_KEY_LENGTH);
|
||||
rocksdb::Slice rocksdb_key(reinterpret_cast<const char*>(&key.data), sizeof(key.data));
|
||||
rocksdb::Slice rocksdb_value((char*)GWBUF_DATA(&value), GWBUF_LENGTH(&value));
|
||||
|
||||
rocksdb::Status status = m_sDb->Put(Write_options(), rocksdb_key, rocksdb_value);
|
||||
@ -511,7 +441,7 @@ cache_result_t RocksDBStorage::put_value(const CACHE_KEY& key, const GWBUF& valu
|
||||
|
||||
cache_result_t RocksDBStorage::del_value(const CACHE_KEY& key)
|
||||
{
|
||||
rocksdb::Slice rocksdb_key(key.data, ROCKSDB_KEY_LENGTH);
|
||||
rocksdb::Slice rocksdb_key(reinterpret_cast<const char*>(&key.data), sizeof(key.data));
|
||||
|
||||
rocksdb::Status status = m_sDb->Delete(Write_options(), rocksdb_key);
|
||||
|
||||
|
@ -30,8 +30,6 @@ public:
|
||||
int argc, char* argv[]);
|
||||
~RocksDBStorage();
|
||||
|
||||
static cache_result_t Get_key(const char* zDefault_db, const GWBUF& query, CACHE_KEY* pKey);
|
||||
|
||||
void get_config(CACHE_STORAGE_CONFIG* pConfig);
|
||||
cache_result_t get_info(uint32_t flags, json_t** ppInfo) const;
|
||||
cache_result_t get_value(const CACHE_KEY& key, uint32_t flags, GWBUF** ppResult);
|
||||
|
@ -37,21 +37,6 @@ public:
|
||||
return reinterpret_cast<CACHE_STORAGE*>(pStorage);
|
||||
}
|
||||
|
||||
static cache_result_t getKey(const char* zDefault_db,
|
||||
const GWBUF* pQuery,
|
||||
CACHE_KEY* pKey)
|
||||
{
|
||||
// zdefault_db may be NULL.
|
||||
ss_dassert(pQuery);
|
||||
ss_dassert(pKey);
|
||||
|
||||
cache_result_t result = CACHE_RESULT_ERROR;
|
||||
|
||||
MXS_EXCEPTION_GUARD(result = StorageType::Get_key(zDefault_db, *pQuery, pKey));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static void freeInstance(CACHE_STORAGE* pInstance)
|
||||
{
|
||||
MXS_EXCEPTION_GUARD(delete reinterpret_cast<StorageType*>(pInstance));
|
||||
@ -196,7 +181,6 @@ CACHE_STORAGE_API StorageModule<StorageType>::s_api =
|
||||
{
|
||||
&StorageModule<StorageType>::initialize,
|
||||
&StorageModule<StorageType>::createInstance,
|
||||
&StorageModule<StorageType>::getKey,
|
||||
&StorageModule<StorageType>::freeInstance,
|
||||
&StorageModule<StorageType>::getConfig,
|
||||
&StorageModule<StorageType>::getInfo,
|
||||
|
@ -223,11 +223,3 @@ Storage* StorageFactory::createRawStorage(const char* zName,
|
||||
|
||||
return pStorage;
|
||||
}
|
||||
|
||||
cache_result_t StorageFactory::get_key(const char* zDefaultDb,
|
||||
const GWBUF* pQuery,
|
||||
CACHE_KEY* pKey) const
|
||||
{
|
||||
return m_pApi->getKey(zDefaultDb, pQuery, pKey);
|
||||
}
|
||||
|
||||
|
13
server/modules/filter/cache/storagefactory.hh
vendored
13
server/modules/filter/cache/storagefactory.hh
vendored
@ -85,19 +85,6 @@ public:
|
||||
const CACHE_STORAGE_CONFIG& config,
|
||||
int argc = 0, char* argv[] = NULL);
|
||||
|
||||
/**
|
||||
* Create a key for a GWBUF.
|
||||
*
|
||||
* @param zDefaultDb The default DB or NULL.
|
||||
* @param query An SQL query. Must be one contiguous buffer.
|
||||
* @param pKey Pointer to object where key will be stored.
|
||||
*
|
||||
* @return CACHE_RESULT_OK if a key was created, otherwise some error code.
|
||||
*/
|
||||
cache_result_t get_key(const char* zDefaultDb,
|
||||
const GWBUF* pQuery,
|
||||
CACHE_KEY* pKey) const;
|
||||
|
||||
private:
|
||||
StorageFactory(void* handle, CACHE_STORAGE_API* pApi, uint32_t capabilities);
|
||||
|
||||
|
7
server/modules/filter/cache/storagereal.cc
vendored
7
server/modules/filter/cache/storagereal.cc
vendored
@ -38,13 +38,6 @@ cache_result_t StorageReal::get_info(uint32_t flags, json_t** ppInfo) const
|
||||
return m_pApi->getInfo(m_pStorage, flags, ppInfo);
|
||||
}
|
||||
|
||||
cache_result_t StorageReal::get_key(const char* zDefaultDb,
|
||||
const GWBUF* pQuery,
|
||||
CACHE_KEY* pKey) const
|
||||
{
|
||||
return m_pApi->getKey(zDefaultDb, pQuery, pKey);
|
||||
}
|
||||
|
||||
cache_result_t StorageReal::get_value(const CACHE_KEY& key,
|
||||
uint32_t flags,
|
||||
GWBUF** ppValue) const
|
||||
|
4
server/modules/filter/cache/storagereal.hh
vendored
4
server/modules/filter/cache/storagereal.hh
vendored
@ -25,10 +25,6 @@ public:
|
||||
cache_result_t get_info(uint32_t flags,
|
||||
json_t** ppInfo) const;
|
||||
|
||||
cache_result_t get_key(const char* zDefaultDb,
|
||||
const GWBUF* pQuery,
|
||||
CACHE_KEY* pKey) const;
|
||||
|
||||
cache_result_t get_value(const CACHE_KEY& key,
|
||||
uint32_t flags,
|
||||
GWBUF** ppValue) const;
|
||||
|
3
server/modules/filter/cache/test/tester.cc
vendored
3
server/modules/filter/cache/test/tester.cc
vendored
@ -15,6 +15,7 @@
|
||||
#include <algorithm>
|
||||
#include <iostream>
|
||||
#include <set>
|
||||
#include "cache.hh"
|
||||
#include "storagefactory.hh"
|
||||
// TODO: Move this to a common place.
|
||||
#include "../../../../../query_classifier/test/testreader.hh"
|
||||
@ -245,7 +246,7 @@ bool Tester::get_cache_items(const Statements& statements,
|
||||
if (pQuery)
|
||||
{
|
||||
CACHE_KEY key;
|
||||
cache_result_t result = factory.get_key(NULL, pQuery, &key);
|
||||
cache_result_t result = Cache::get_default_key(NULL, pQuery, &key);
|
||||
|
||||
if (result == CACHE_RESULT_OK)
|
||||
{
|
||||
|
@ -178,7 +178,7 @@ int TesterStorage::run(size_t n_threads,
|
||||
|
||||
CacheKey key;
|
||||
|
||||
sprintf(key.data, "%lu", i);
|
||||
key.data = i;
|
||||
|
||||
vector<uint8_t> value(size, static_cast<uint8_t>(i));
|
||||
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <maxscale/query_classifier.h>
|
||||
#include <maxscale/log_manager.h>
|
||||
#include "storagefactory.hh"
|
||||
#include "cache.hh"
|
||||
#include "cache_storage_api.hh"
|
||||
#include "tester.hh"
|
||||
|
||||
@ -60,7 +61,7 @@ int test(StorageFactory& factory, istream& in)
|
||||
if (pQuery)
|
||||
{
|
||||
CACHE_KEY key;
|
||||
cache_result_t result = factory.get_key(NULL, pQuery, &key);
|
||||
cache_result_t result = Cache::get_default_key(NULL, pQuery, &key);
|
||||
|
||||
if (result == CACHE_RESULT_OK)
|
||||
{
|
||||
|
Reference in New Issue
Block a user