init push

This commit is contained in:
oceanbase-admin
2021-05-31 22:56:52 +08:00
commit cea7de1475
7020 changed files with 5689869 additions and 0 deletions

4
unittest/share/cache/CMakeLists.txt vendored Normal file
View File

@ -0,0 +1,4 @@
#ob_unittest(test_kv_storecache)
ob_unittest(test_cache_utils)
#ob_unittest(test_working_set_mgr)
#ob_unittest(test_cache_working_set)

View File

@ -0,0 +1,735 @@
/**
* Copyright (c) 2021 OceanBase
* OceanBase CE is licensed under Mulan PubL v2.
* You can use this software according to the terms and conditions of the Mulan PubL v2.
* You may obtain a copy of Mulan PubL v2 at:
* http://license.coscl.org.cn/MulanPubL-2.0
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PubL v2 for more details.
*/
#ifndef OCEANBASE_SHARE_OB_CACHE_TEST_UTILS_H_
#define OCEANBASE_SHARE_OB_CACHE_TEST_UTILS_H_
#include "share/ob_define.h"
#include "lib/atomic/ob_atomic.h"
#include "lib/queue/ob_lighty_queue.h"
#include "lib/alloc/ob_malloc_allocator.h"
namespace oceanbase {
using namespace lib;
namespace common {
template <int64_t SIZE>
struct TestKVCacheKey : public ObIKVCacheKey {
TestKVCacheKey(void) : v_(0), tenant_id_(0)
{
memset(buf_, 0, sizeof(buf_));
}
virtual bool operator==(const ObIKVCacheKey& other) const;
virtual uint64_t get_tenant_id() const
{
return tenant_id_;
}
virtual uint64_t hash() const
{
return v_;
}
virtual int64_t size() const
{
return sizeof(*this);
}
virtual int deep_copy(char* buf, const int64_t buf_len, ObIKVCacheKey*& key) const;
uint64_t v_;
uint64_t tenant_id_;
char buf_[SIZE > sizeof(v_) ? SIZE - sizeof(v_) : 0];
};
template <int64_t SIZE>
struct TestKVCacheValue : public ObIKVCacheValue {
TestKVCacheValue(void) : v_(0)
{
memset(buf_, 0, sizeof(buf_));
}
virtual int64_t size() const
{
return sizeof(*this);
}
virtual int deep_copy(char* buf, const int64_t buf_len, ObIKVCacheValue*& value) const;
uint64_t v_;
char buf_[SIZE > sizeof(v_) ? SIZE - sizeof(v_) : 0];
};
template <int64_t SIZE>
bool TestKVCacheKey<SIZE>::operator==(const ObIKVCacheKey& other) const
{
const TestKVCacheKey& other_key = reinterpret_cast<const TestKVCacheKey&>(other);
return v_ == other_key.v_ && tenant_id_ == other_key.tenant_id_;
}
template <int64_t SIZE>
int TestKVCacheKey<SIZE>::deep_copy(char* buf, const int64_t buf_len, ObIKVCacheKey*& key) const
{
int ret = OB_SUCCESS;
TestKVCacheKey<SIZE>* pkey = NULL;
if (NULL == buf || buf_len < size()) {
ret = OB_INVALID_ARGUMENT;
} else {
pkey = new (buf) TestKVCacheKey<SIZE>();
pkey->v_ = v_;
pkey->tenant_id_ = tenant_id_;
key = pkey;
}
return ret;
}
template <int64_t SIZE>
int TestKVCacheValue<SIZE>::deep_copy(char* buf, const int64_t buf_len, ObIKVCacheValue*& value) const
{
int ret = OB_SUCCESS;
TestKVCacheValue<SIZE>* pvalue = NULL;
if (NULL == buf || buf_len < size()) {
ret = OB_INVALID_ARGUMENT;
} else {
pvalue = new (buf) TestKVCacheValue<SIZE>();
pvalue->v_ = v_;
value = pvalue;
}
return ret;
}
struct AllocBuf {
void* ptr_;
AllocBuf* next_;
};
class ObCacheTestTask;
class ObICacheTestStat {
public:
virtual void add_task(ObCacheTestTask* task) = 0;
virtual ObCacheTestTask* pop_oppo_task(ObCacheTestTask* task) = 0;
virtual void inc_fail_count() = 0;
};
class ObCacheTestTask {
public:
ObCacheTestTask(const int64_t tenant_id, const bool is_alloc, const int64_t alloc_size, const int64_t alloc_count,
ObICacheTestStat* stat)
: tenant_id_(tenant_id),
is_alloc_(is_alloc),
alloc_size_(alloc_size),
alloc_count_(alloc_count),
next_(NULL),
stat_(stat),
alloc_list_(NULL)
{}
virtual ~ObCacheTestTask()
{}
virtual int process()
{
int ret = OB_SUCCESS;
if (is_alloc_) {
ObMemAttr attr;
attr.tenant_id_ = tenant_id_;
attr.label_ = 1; // xxx
for (int64_t i = 0; i < alloc_count_; ++i) {
void* ptr = ob_malloc(alloc_size_, attr);
if (NULL == ptr) {
ret = OB_ALLOCATE_MEMORY_FAILED;
COMMON_LOG(WARN, "ob_malloc failed", K(ret), K_(alloc_size));
ObMallocAllocator::get_instance()->print_tenant_memory_usage(tenant_id_);
ObMallocAllocator::get_instance()->print_tenant_ctx_memory_usage(tenant_id_);
break;
} else {
append_alloc_list(ptr);
}
}
if (OB_SUCC(ret)) {
stat_->add_task(this);
}
} else {
ObCacheTestTask* oppo_task = stat_->pop_oppo_task(this);
if (NULL == oppo_task) {
ret = OB_ERR_UNEXPECTED;
COMMON_LOG(WARN, "oppo_task not exist", K(ret), "this", *this);
} else {
int64_t free_count = 0;
AllocBuf* buf = oppo_task->alloc_list_;
AllocBuf* next = NULL;
while (NULL != buf) {
next = buf->next_;
ob_free(buf->ptr_);
buf = next;
++free_count;
}
if (free_count != alloc_count_) {
ret = OB_ERR_UNEXPECTED;
COMMON_LOG(WARN, "free_count != alloc_count_", K(ret), K(free_count), K_(alloc_count));
}
ob_free(oppo_task);
}
}
if (OB_FAIL(ret)) {
COMMON_LOG(WARN, "task process failed", K(ret));
ObMallocAllocator::get_instance()->print_tenant_memory_usage(tenant_id_);
ObMallocAllocator::get_instance()->print_tenant_ctx_memory_usage(tenant_id_);
stat_->inc_fail_count();
}
return ret;
}
void set_next(ObCacheTestTask* task)
{
next_ = task;
}
void append_alloc_list(void* ptr)
{
AllocBuf* buf = new (ptr) AllocBuf();
buf->ptr_ = ptr;
buf->next_ = alloc_list_;
alloc_list_ = buf;
}
TO_STRING_KV(K_(tenant_id), K_(is_alloc), K_(alloc_size), K_(alloc_count));
public:
uint64_t tenant_id_;
bool is_alloc_; // alloc or free
int64_t alloc_size_;
int64_t alloc_count_;
ObCacheTestTask* next_;
ObICacheTestStat* stat_;
AllocBuf* alloc_list_;
};
class CacheTestStat : public ObICacheTestStat {
public:
CacheTestStat() : fail_count_(0), task_list_(NULL)
{}
virtual ~CacheTestStat()
{}
virtual void inc_fail_count()
{
ATOMIC_AAF(&fail_count_, 1);
}
virtual int64_t get_fail_count() const
{
return ATOMIC_LOAD(&fail_count_);
}
virtual void add_task(ObCacheTestTask* task)
{
if (NULL != task) {
task->next_ = task_list_;
task_list_ = task;
}
}
virtual ObCacheTestTask* pop_oppo_task(ObCacheTestTask* task)
{
ObCacheTestTask* oppo_task = NULL;
ObCacheTestTask* prev_task = NULL;
ObCacheTestTask* cur_task = NULL;
if (NULL != task && !task->is_alloc_) {
cur_task = task_list_;
while (NULL != cur_task) {
if (cur_task->is_alloc_ && cur_task->tenant_id_ == task->tenant_id_ &&
cur_task->alloc_size_ == task->alloc_size_ && cur_task->alloc_count_ == task->alloc_count_) {
oppo_task = cur_task;
// delete it from list
if (NULL != prev_task) {
prev_task->next_ = cur_task->next_;
cur_task->next_ = NULL;
} else {
task_list_ = cur_task->next_;
cur_task->next_ = NULL;
}
break;
}
cur_task = cur_task->next_;
prev_task = cur_task;
}
}
return oppo_task;
}
private:
int64_t fail_count_;
ObCacheTestTask* task_list_;
};
class ObAllocatorStress : public share::ObThreadPool {
public:
ObAllocatorStress() : inited_(false), stat_(), queue_()
{}
virtual ~ObAllocatorStress()
{}
int init()
{
int ret = OB_SUCCESS;
if (inited_) {
ret = OB_INIT_TWICE;
COMMON_LOG(WARN, "init twice", K(ret));
} else if (OB_FAIL(queue_.init(1024))) {
COMMON_LOG(WARN, "queue init failed", K(ret));
} else {
inited_ = true;
}
return ret;
}
virtual void run1()
{
UNUSED(arg);
int ret = OB_SUCCESS;
COMMON_LOG(INFO, "allocator stress thread start");
if (!inited_) {
ret = OB_NOT_INIT;
COMMON_LOG(WARN, "not init", K(ret));
} else {
// will process all task before exit
while (!has_set_stop() || OB_ENTRY_NOT_EXIST != ret) {
ObCacheTestTask* task = NULL;
if (OB_FAIL(pop(task))) {
if (OB_ENTRY_NOT_EXIST != ret) {
COMMON_LOG(WARN, "pop task failed", K(ret));
}
} else {
if (OB_FAIL(task->process())) {
COMMON_LOG(WARN, "task process failed", K(ret));
} else {
COMMON_LOG(INFO, "task process succeed", "task", *task);
}
ObMallocAllocator::get_instance()->print_tenant_memory_usage(500);
ObMallocAllocator::get_instance()->print_tenant_ctx_memory_usage(500);
ObMallocAllocator::get_instance()->print_tenant_memory_usage(task->tenant_id_);
ObMallocAllocator::get_instance()->print_tenant_ctx_memory_usage(task->tenant_id_);
}
}
}
COMMON_LOG(INFO, "allocator stress thread end");
}
int add_task(const ObCacheTestTask& task)
{
int ret = OB_SUCCESS;
const int64_t buf_size = sizeof(task);
void* ptr = NULL;
if (NULL == (ptr = ob_malloc(buf_size))) {
ret = OB_ALLOCATE_MEMORY_FAILED;
COMMON_LOG(WARN, "ob_malloc failed", K(ret), K(buf_size));
} else {
ObCacheTestTask* copy_task =
new (ptr) ObCacheTestTask(task.tenant_id_, task.is_alloc_, task.alloc_size_, task.alloc_count_, task.stat_);
if (OB_FAIL(queue_.push(copy_task))) {
COMMON_LOG(WARN, "push task failed", K(ret));
}
}
return ret;
}
int pop(ObCacheTestTask*& task)
{
int ret = OB_SUCCESS;
void* vp = NULL;
const int64_t timeout = 1000 * 1000;
if (!inited_) {
ret = OB_NOT_INIT;
COMMON_LOG(WARN, "not init", K(ret));
} else {
ret = queue_.pop(vp, timeout);
if (OB_FAIL(ret)) {
if (OB_ENTRY_NOT_EXIST != ret) {
COMMON_LOG(WARN, "queue pop failed", K(ret));
}
} else {
task = static_cast<ObCacheTestTask*>(vp);
}
}
return ret;
}
int64_t get_fail_count()
{
return stat_.get_fail_count();
}
CacheTestStat* get_stat()
{
return &stat_;
}
private:
bool inited_;
CacheTestStat stat_;
common::LightyQueue queue_;
};
template <int64_t K_SIZE, int64_t V_SIZE>
class ObCacheStress : public share::ObThreadPool {
public:
typedef TestKVCacheKey<K_SIZE> TestKey;
typedef TestKVCacheValue<V_SIZE> TestValue;
ObCacheStress() : inited_(false), tenant_id_(OB_INVALID_ID), put_count_(0), fail_count_(0), cache_()
{}
virtual ~ObCacheStress()
{}
int init(const uint64_t tenant_id, int64_t index)
{
int ret = OB_SUCCESS;
char cache_name[1024];
snprintf(cache_name, 1024, "%s_%ld", "test", index);
if (inited_) {
ret = OB_INIT_TWICE;
COMMON_LOG(WARN, "init twice", K(ret));
} else if (OB_FAIL(cache_.init(cache_name))) {
COMMON_LOG(WARN, "cache init failed", K(ret));
} else {
tenant_id_ = tenant_id;
inited_ = true;
}
return ret;
}
virtual void run1()
{
UNUSED(arg);
int ret = OB_SUCCESS;
COMMON_LOG(INFO, "cache stress thread start");
TestKey key;
TestValue value;
if (!inited_) {
ret = OB_NOT_INIT;
COMMON_LOG(WARN, "not init", K(ret));
} else {
while (!has_set_stop()) {
key.tenant_id_ = tenant_id_;
key.v_ = put_count_;
if (OB_FAIL(cache_.put(key, value))) {
COMMON_LOG(WARN, "cache put failed", K(ret));
} else {
const TestValue* get_value = NULL;
ObKVCacheHandle handle;
if (OB_FAIL(cache_.get(key, get_value, handle))) {
COMMON_LOG(WARN, "cache get failed", K(ret));
}
}
++put_count_;
if (OB_FAIL(ret)) {
++fail_count_;
}
}
}
COMMON_LOG(INFO, "cache stress thread exit");
}
uint64_t get_tenant_id() const
{
return tenant_id_;
}
int64_t get_put_count() const
{
return put_count_;
}
int64_t get_fail_count() const
{
return fail_count_;
}
private:
bool inited_;
uint64_t tenant_id_;
int64_t put_count_;
int64_t fail_count_;
ObKVCache<TestKey, TestValue> cache_;
};
template <int64_t K_SIZE, int64_t V_SIZE>
class ObCacheGetStress : public share::ObThreadPool {
public:
typedef TestKVCacheKey<K_SIZE> TestKey;
typedef TestKVCacheValue<V_SIZE> TestValue;
ObCacheGetStress()
: inited_(false), tenant_id_(OB_INVALID_ID), kv_cnt_(0), hit_cnt_(0), total_cnt_(0), fail_cnt_(0), cache_()
{}
int init(const uint64_t tenant_id, const int64_t kv_cnt)
{
int ret = OB_SUCCESS;
if (inited_) {
ret = OB_INIT_TWICE;
COMMON_LOG(WARN, "init twice", K(ret));
} else if (OB_INVALID_ID == tenant_id || kv_cnt <= 0) {
ret = OB_INVALID_ARGUMENT;
COMMON_LOG(WARN, "invalid arguments", K(ret), K(tenant_id), K(kv_cnt));
} else if (OB_FAIL(cache_.init("test_cache"))) {
COMMON_LOG(WARN, "cache init failed", K(ret));
} else {
// put kv pairs to cache
TestKey key;
TestValue value;
for (int64_t i = 0; OB_SUCC(ret) && i < kv_cnt; ++i) {
key.tenant_id_ = tenant_id;
;
key.v_ = i;
if (OB_FAIL(cache_.put(key, value))) {
COMMON_LOG(WARN, "put failed", K(ret));
}
}
tenant_id_ = tenant_id;
kv_cnt_ = kv_cnt;
hit_cnt_ = 0;
total_cnt_ = 0;
fail_cnt_ = 0;
inited_ = true;
}
return ret;
}
// will create monitor thread print hit ratio per second
virtual void set_thread_count(const int64_t thread_count)
{
// extra thread for monitor thread
share::ObThreadPool::set_thread_count(static_cast<int32_t>(thread_count + 1));
}
virtual void run1()
{
int64_t thread_id = (int64_t)(arg);
if (0 == thread_id) {
do_monitor();
} else {
do_work();
}
}
ObKVCache<TestKey, TestValue>& get_cache()
{
return cache_;
}
double get_hit_ratio()
{
int ret = OB_SUCCESS;
int64_t hit_cnt = 0;
TestKey key;
const TestValue* pvalue = NULL;
ObKVCacheHandle handle;
for (int64_t i = 0; i < kv_cnt_; ++i) {
key.tenant_id_ = tenant_id_;
key.v_ = i;
if (OB_FAIL(cache_.get(key, pvalue, handle))) {
if (OB_ENTRY_NOT_EXIST != ret) {
COMMON_LOG(ERROR, "get failed", K(ret));
}
} else {
++hit_cnt;
}
}
return (double)hit_cnt / (double)kv_cnt_;
}
private:
void do_monitor()
{
while (!has_set_stop()) {
double hit_ratio = 0;
const int64_t hit_cnt = ATOMIC_LOAD(&hit_cnt_);
const int64_t total_cnt = ATOMIC_LOAD(&total_cnt_);
ATOMIC_STORE(&hit_cnt_, 0);
ATOMIC_STORE(&total_cnt_, 0);
if (total_cnt > 0) {
hit_ratio = (double)hit_cnt / (double)total_cnt;
}
COMMON_LOG(INFO, "get stress stat", K(hit_ratio));
sleep(1);
}
}
void do_work()
{
int ret = OB_SUCCESS;
TestKey key;
const TestValue* pvalue = NULL;
ObKVCacheHandle handle;
while (!has_set_stop()) {
for (int64_t i = 0; i < kv_cnt_ && !has_set_stop(); ++i) {
const int64_t get_cnt = i < (int64_t)((double)kv_cnt_ * 0.8) ? 1 : 4;
key.tenant_id_ = tenant_id_;
key.v_ = i;
for (int64_t i = 0; i < get_cnt; ++i) {
ATOMIC_INC(&total_cnt_);
if (OB_FAIL(cache_.get(key, pvalue, handle))) {
if (OB_ENTRY_NOT_EXIST != ret) {
COMMON_LOG(ERROR, "get failed", K(ret));
} else {
ATOMIC_INC(&fail_cnt_);
}
} else {
ATOMIC_INC(&hit_cnt_);
}
}
}
}
}
bool inited_;
uint64_t tenant_id_;
int64_t kv_cnt_;
int64_t hit_cnt_;
int64_t total_cnt_;
int64_t fail_cnt_;
ObKVCache<TestKey, TestValue> cache_;
};
template <int64_t K_SIZE, int64_t V_SIZE>
class ObWorkingSetStress : public share::ObThreadPool {
public:
typedef TestKVCacheKey<K_SIZE> TestKey;
typedef TestKVCacheValue<V_SIZE> TestValue;
ObWorkingSetStress() : inited_(false), tenant_id_(OB_INVALID_ID), put_count_(0), fail_count_(0)
{}
virtual ~ObWorkingSetStress()
{}
int init(const uint64_t tenant_id, const bool only_put)
{
int ret = OB_SUCCESS;
if (inited_) {
ret = OB_INIT_TWICE;
COMMON_LOG(WARN, "init twice", K(ret));
} else if (OB_FAIL(cache_.init("test_cache"))) {
COMMON_LOG(WARN, "cache init failed", K(ret));
} else if (OB_FAIL(ws_.init(tenant_id, cache_))) {
COMMON_LOG(WARN, "init ws failed", K(ret), K(tenant_id));
} else {
tenant_id_ = tenant_id;
put_count_ = 0;
fail_count_ = 0;
only_put_ = only_put;
pcache_ = &ws_;
inited_ = true;
}
return ret;
}
int init(
const uint64_t tenant_id, ObKVCache<TestKey, TestValue>& cache, const bool use_ws, const int64_t start_key = 0)
{
int ret = OB_SUCCESS;
if (inited_) {
ret = OB_INIT_TWICE;
COMMON_LOG(WARN, "init twice", K(ret));
} else {
tenant_id_ = tenant_id;
put_count_ = 0;
fail_count_ = 0;
only_put_ = false;
pcache_ = &cache;
start_key_ = start_key;
if (use_ws) {
if (OB_FAIL(ws_.init(tenant_id, cache))) {
COMMON_LOG(WARN, "init ws failed", K(ret), K(tenant_id));
} else {
pcache_ = &ws_;
}
}
if (OB_SUCC(ret)) {
inited_ = true;
}
}
return ret;
}
virtual void run1()
{
const int64_t thread_id = (int64_t)(arg);
const int64_t count = _threadCount;
int64_t put_count = 0;
int64_t fail_count = 0;
int ret = OB_SUCCESS;
COMMON_LOG(INFO, "working set stress thread start");
TestKey key;
TestValue value;
if (!inited_) {
ret = OB_NOT_INIT;
COMMON_LOG(WARN, "not init", K(ret));
} else {
while (!has_set_stop()) {
key.tenant_id_ = tenant_id_;
key.v_ = start_key_ + thread_id + put_count * count;
if (OB_FAIL(pcache_->put(key, value))) {
COMMON_LOG(WARN, "cache put failed", K(ret));
} else if (!only_put_) {
const TestValue* get_value = NULL;
ObKVCacheHandle handle;
if (OB_FAIL(pcache_->get(key, get_value, handle))) {
if (OB_ENTRY_NOT_EXIST == ret) {
ret = OB_SUCCESS;
} else {
COMMON_LOG(WARN, "cache get failed", K(ret));
}
}
}
++put_count;
if (OB_FAIL(ret)) {
++fail_count;
ObMallocAllocator::get_instance()->print_tenant_memory_usage(tenant_id_);
ObMallocAllocator::get_instance()->print_tenant_ctx_memory_usage(tenant_id_);
}
}
}
ATOMIC_AAF(&put_count_, put_count);
ATOMIC_AAF(&fail_count_, fail_count);
COMMON_LOG(INFO, "working set stress thread exit");
}
uint64_t get_tenant_id() const
{
return tenant_id_;
}
int64_t get_put_count() const
{
return put_count_;
}
int64_t get_fail_count() const
{
return fail_count_;
}
int64_t get_used() const
{
return ws_.working_set_->get_used();
}
int64_t get_limit() const
{
return ws_.working_set_->get_limit();
}
private:
bool inited_;
uint64_t tenant_id_;
int64_t put_count_;
int64_t fail_count_;
bool only_put_;
ObIKVCache<TestKey, TestValue>* pcache_;
ObKVCache<TestKey, TestValue> cache_;
ObCacheWorkingSet<TestKey, TestValue> ws_;
int64_t start_key_;
};
} // end namespace common
} // end namespace oceanbase
#endif // OCEANBASE_SHARE_OB_CACHE_TEST_UTILS_H_

View File

@ -0,0 +1,148 @@
/**
* Copyright (c) 2021 OceanBase
* OceanBase CE is licensed under Mulan PubL v2.
* You can use this software according to the terms and conditions of the Mulan PubL v2.
* You may obtain a copy of Mulan PubL v2 at:
* http://license.coscl.org.cn/MulanPubL-2.0
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PubL v2 for more details.
*/
#define USING_LOG_PREFEX SHARE
#include <gtest/gtest.h>
#include <gmock/gmock.h>
#include "share/ob_define.h"
#define private public
#include "share/cache/ob_cache_utils.h"
using ::testing::_;
namespace oceanbase {
using namespace common;
using namespace lib;
namespace share {
TEST(TestFixedHashMap, basic)
{
ObFixedHashMap<int64_t, int64_t> fix_map;
ASSERT_EQ(OB_SUCCESS, fix_map.init(1024, 1024, "1"));
for (int64_t i = 0; i < 1024; ++i) {
ASSERT_EQ(OB_SUCCESS, fix_map.set(i, i));
if (i != 1023) {
ASSERT_EQ(OB_ENTRY_EXIST, fix_map.set(i, i));
} else {
ASSERT_EQ(OB_SIZE_OVERFLOW, fix_map.set(i, i));
}
}
ASSERT_EQ(OB_SIZE_OVERFLOW, fix_map.set(5000, 5000));
for (int64_t i = 0; i < 1024; ++i) {
int64_t value = 0;
ASSERT_EQ(OB_SUCCESS, fix_map.get(i, value));
ASSERT_EQ(value, i);
}
int64_t value = 0;
ASSERT_EQ(OB_ENTRY_NOT_EXIST, fix_map.get(5000, value));
ObFixedHashMap<int64_t, int64_t>::iterator iter = fix_map.begin();
int64_t i = 0;
for (; iter != fix_map.end(); ++iter) {
ASSERT_EQ(i, iter->first);
ASSERT_EQ(i, iter->second);
++i;
}
fix_map.reuse();
for (int64_t i = 0; i < 1024; ++i) {
int64_t value = 0;
ASSERT_EQ(OB_ENTRY_NOT_EXIST, fix_map.get(i, value));
}
ASSERT_EQ(fix_map.begin(), fix_map.end());
for (int64_t i = 0; i < 1024; ++i) {
ASSERT_EQ(OB_SUCCESS, fix_map.set(i, i));
}
for (int64_t i = 0; i < 1024; ++i) {
int64_t value = 0;
ASSERT_EQ(OB_SUCCESS, fix_map.get(i, value));
ASSERT_EQ(value, i);
}
ASSERT_EQ(OB_SIZE_OVERFLOW, fix_map.set(5000, 5000));
iter = fix_map.begin();
i = 0;
for (; iter != fix_map.end(); ++iter) {
ASSERT_EQ(i, iter->first);
ASSERT_EQ(i, iter->second);
++i;
}
for (int64_t j = 0; j < i; ++j) {
ASSERT_EQ(OB_SUCCESS, fix_map.erase(j));
}
ASSERT_EQ(OB_ENTRY_NOT_EXIST, fix_map.erase(0));
ASSERT_EQ(0, fix_map.size_);
for (int64_t i = 0; i < 1024; ++i) {
int64_t value = 0;
ASSERT_EQ(OB_ENTRY_NOT_EXIST, fix_map.get(i, value));
ASSERT_EQ(OB_ENTRY_NOT_EXIST, fix_map.erase(i));
}
ASSERT_EQ(0, fix_map.size_);
for (int64_t i = 0; i < 1024; ++i) {
ASSERT_EQ(OB_SUCCESS, fix_map.set(i, i));
int64_t value = 0;
ASSERT_EQ(OB_SUCCESS, fix_map.get(i, value));
ASSERT_EQ(value, i);
}
ASSERT_EQ(1024, fix_map.size_);
ASSERT_EQ(OB_SIZE_OVERFLOW, fix_map.set(5000, 5000));
}
TEST(TestFreeHeap, basic)
{
ObFreeHeap<int64_t> heap;
ASSERT_EQ(OB_SUCCESS, heap.init(1024, "1"));
for (int64_t i = 0; i < 1024; ++i) {
int64_t* ptr = NULL;
ASSERT_EQ(OB_SUCCESS, heap.sbrk(ptr));
MEMSET(ptr, 0, sizeof(int64_t));
}
int64_t* ptr = NULL;
ASSERT_EQ(OB_BUF_NOT_ENOUGH, heap.sbrk(ptr));
heap.reuse();
for (int64_t i = 0; i < 512; ++i) {
int64_t* ptr = NULL;
ASSERT_EQ(OB_SUCCESS, heap.sbrk(ptr));
MEMSET(ptr, 0, sizeof(int64_t));
}
ASSERT_EQ(OB_SUCCESS, heap.sbrk(500, ptr));
MEMSET(ptr, 0, sizeof(int64_t) * 500);
ASSERT_EQ(OB_SUCCESS, heap.sbrk(12, ptr));
MEMSET(ptr, 0, sizeof(int64_t) * 12);
ASSERT_EQ(OB_BUF_NOT_ENOUGH, heap.sbrk(ptr));
}
TEST(TestFixArray, basic)
{
ObSimpleFixedArray<int64_t> fix_array;
ASSERT_EQ(OB_SUCCESS, fix_array.init(1024, "1"));
for (int64_t i = 0; i < 1024; ++i) {
ASSERT_EQ(OB_SUCCESS, fix_array.push_back(i));
}
ASSERT_EQ(OB_SIZE_OVERFLOW, fix_array.push_back(1025));
for (int64_t i = 0; i < fix_array.count(); ++i) {
ASSERT_EQ(fix_array.at(i), i);
}
fix_array.reuse();
for (int64_t i = 0; i < 1024; ++i) {
ASSERT_EQ(OB_SUCCESS, fix_array.push_back(i));
}
}
} // end namespace share
} // end namespace oceanbase
int main(int argc, char** argv)
{
oceanbase::common::ObLogger::get_logger().set_log_level("INFO");
OB_LOGGER.set_log_level("INFO");
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -0,0 +1,256 @@
/**
* Copyright (c) 2021 OceanBase
* OceanBase CE is licensed under Mulan PubL v2.
* You can use this software according to the terms and conditions of the Mulan PubL v2.
* You may obtain a copy of Mulan PubL v2 at:
* http://license.coscl.org.cn/MulanPubL-2.0
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PubL v2 for more details.
*/
#define USING_LOG_PREFEX COMMON
#include <gtest/gtest.h>
#include <gmock/gmock.h>
#include "share/ob_define.h"
#include "lib/container/ob_array.h"
#define private public
#include "share/ob_tenant_mgr.h"
#include "share/cache/ob_working_set_mgr.h"
#include "share/cache/ob_kv_storecache.h"
#include "ob_cache_test_utils.h"
#include "observer/ob_signal_handle.h"
using ::testing::_;
namespace oceanbase {
using namespace share;
namespace common {
class TestCacheWorkingSet : public ::testing::Test {
public:
static const int64_t K_SIZE = 10;
static const int64_t V_SIZE = 16 * 1024;
typedef TestKVCacheKey<K_SIZE> Key;
typedef TestKVCacheValue<V_SIZE> Value;
TestCacheWorkingSet();
virtual ~TestCacheWorkingSet();
virtual void SetUp();
virtual void TearDown();
protected:
uint64_t tenant_id_;
int64_t mem_limit_;
};
TestCacheWorkingSet::TestCacheWorkingSet() : tenant_id_(OB_INVALID_ID), mem_limit_(0)
{}
TestCacheWorkingSet::~TestCacheWorkingSet()
{}
void TestCacheWorkingSet::SetUp()
{
ASSERT_EQ(OB_SUCCESS, ObKVGlobalCache::get_instance().init());
ObTenantManager& tenant_mgr = ObTenantManager::get_instance();
mem_limit_ = 1 * 1024 * 1024 * 1024;
tenant_id_ = 1001;
ASSERT_EQ(OB_SUCCESS, tenant_mgr.init());
ASSERT_EQ(OB_SUCCESS, tenant_mgr.add_tenant(tenant_id_));
ASSERT_EQ(OB_SUCCESS, tenant_mgr.set_tenant_mem_limit(tenant_id_, mem_limit_, mem_limit_));
}
void TestCacheWorkingSet::TearDown()
{
ObTenantManager::get_instance().destroy();
ObKVGlobalCache::get_instance().destroy();
}
TEST_F(TestCacheWorkingSet, basic)
{
CHUNK_MGR.set_limit(2L * 1024L * 1024L * 1024L);
Key key;
Value value;
const TestKVCacheValue<V_SIZE>* pvalue = NULL;
ObKVCacheHandle handle;
ObCacheWorkingSet<Key, Value> ws;
// not init
ASSERT_EQ(OB_NOT_INIT, ws.put(key, value));
ASSERT_EQ(OB_NOT_INIT, ws.put_and_fetch(key, value, pvalue, handle));
ASSERT_EQ(OB_NOT_INIT, ws.get(key, pvalue, handle));
ASSERT_EQ(OB_NOT_INIT, ws.erase(key));
ObKVCache<Key, Value> test_cache;
ASSERT_EQ(OB_SUCCESS, test_cache.init("test_cache"));
ASSERT_EQ(OB_SUCCESS, ws.init(tenant_id_, test_cache));
const int64_t count = ws.get_limit() / V_SIZE * 3;
// put
for (int64_t i = 0; i < count; ++i) {
key.tenant_id_ = tenant_id_;
key.v_ = i;
ASSERT_EQ(OB_SUCCESS, ws.put(key, value));
}
// get
int64_t succeed_cnt = 0;
for (int64_t i = 0; i < count; ++i) {
key.tenant_id_ = tenant_id_;
key.v_ = i;
int ret = ws.get(key, pvalue, handle);
succeed_cnt += (OB_SUCCESS == ret ? 1 : 0);
ASSERT_TRUE(OB_SUCCESS == ret || OB_ENTRY_NOT_EXIST == ret);
}
ASSERT_TRUE(succeed_cnt > 0);
COMMON_LOG(INFO, "stat", K(succeed_cnt));
// erase
succeed_cnt = 0;
for (int64_t i = 0; i < count; ++i) {
key.tenant_id_ = tenant_id_;
key.v_ = i;
int ret = ws.erase(key);
succeed_cnt += (OB_SUCCESS == ret ? 1 : 0);
ASSERT_TRUE(OB_SUCCESS == ret || OB_ENTRY_NOT_EXIST == ret);
}
ASSERT_TRUE(succeed_cnt > 0);
COMMON_LOG(INFO, "stat", K(succeed_cnt));
// put_and_fetch
for (int64_t i = 0; i < count; ++i) {
key.tenant_id_ = tenant_id_;
key.v_ = i;
ASSERT_EQ(OB_SUCCESS, ws.put_and_fetch(key, value, pvalue, handle));
}
}
TEST_F(TestCacheWorkingSet, parallel)
{
CHUNK_MGR.set_limit(5L * 1024L * 1024L * 1024L);
ObWorkingSetStress<K_SIZE, V_SIZE> ws_stress;
ASSERT_EQ(OB_SUCCESS, ws_stress.init(tenant_id_, true));
ws_stress.set_thread_count(10);
ws_stress.start();
sleep(20);
ws_stress.stop();
ws_stress.wait();
const int64_t put_count = ws_stress.get_put_count();
const int64_t fail_count = ws_stress.get_fail_count();
ASSERT_EQ(0, fail_count);
COMMON_LOG(INFO, "put speed", K(put_count));
COMMON_LOG(INFO, "stat", "used", ws_stress.get_used(), "limit", ws_stress.get_limit());
}
TEST_F(TestCacheWorkingSet, cache_size_increase)
{
// close background wash timer task
ObKVGlobalCache::get_instance().wash_timer_.stop();
ObKVGlobalCache::get_instance().wash_timer_.wait();
Key key;
Value value;
ObKVCache<Key, Value> test_cache;
ASSERT_EQ(OB_SUCCESS, test_cache.init("test_cache"));
const int64_t count = mem_limit_ / 2 / V_SIZE;
for (int64_t i = 0; i < count; ++i) {
key.tenant_id_ = tenant_id_;
key.v_ = i;
ASSERT_EQ(OB_SUCCESS, test_cache.put(key, value));
}
const int64_t direct_size = test_cache.store_size(tenant_id_);
ObKVCache<Key, Value> ws_cache;
ASSERT_EQ(OB_SUCCESS, ws_cache.init("ws_cache"));
ObCacheWorkingSet<Key, Value> ws;
ASSERT_EQ(OB_SUCCESS, ws.init(tenant_id_, ws_cache));
for (int64_t i = 0; i < count; ++i) {
key.tenant_id_ = tenant_id_;
key.v_ = i;
ASSERT_EQ(OB_SUCCESS, ws.put(key, value));
}
const int64_t ws_cache_size = ws_cache.store_size(tenant_id_);
ASSERT_TRUE(ws_cache_size * 2 < direct_size);
COMMON_LOG(INFO, "result", K(direct_size), K(ws_cache_size), K(ws.get_used()));
}
TEST_F(TestCacheWorkingSet, hit_ratio)
{
mem_limit_ = 2L * 1024L * 1024L * 1024L;
ASSERT_EQ(OB_SUCCESS, ObTenantManager::get_instance().set_tenant_mem_limit(tenant_id_, mem_limit_, mem_limit_));
const int64_t kv_cnt = mem_limit_ / 2 / V_SIZE;
double hit_ratio = 0.0;
double ws_hit_ratio = 0.0;
// test hit ratio when not using working set
{
ObCacheGetStress<K_SIZE, V_SIZE> get_stress;
ASSERT_EQ(OB_SUCCESS, get_stress.init(tenant_id_, kv_cnt));
get_stress.set_thread_count(10);
get_stress.start();
sleep(10);
ObWorkingSetStress<K_SIZE, V_SIZE> ws_stress;
ASSERT_EQ(OB_SUCCESS, ws_stress.init(tenant_id_, get_stress.get_cache(), false, kv_cnt + 1));
ws_stress.set_thread_count(1);
ws_stress.start();
sleep(30);
ws_stress.stop();
ws_stress.wait();
sleep(30);
get_stress.stop();
get_stress.wait();
hit_ratio = get_stress.get_hit_ratio();
COMMON_LOG(INFO, "without working set stat", K(hit_ratio));
ObKVGlobalCache::get_instance().destroy();
}
// test hit ratio when using working set
{
ASSERT_EQ(OB_SUCCESS, ObKVGlobalCache::get_instance().init());
ObCacheGetStress<K_SIZE, V_SIZE> get_stress;
ASSERT_EQ(OB_SUCCESS, get_stress.init(tenant_id_, kv_cnt));
get_stress.set_thread_count(10);
get_stress.start();
sleep(10);
ObWorkingSetStress<K_SIZE, V_SIZE> ws_stress;
ASSERT_EQ(OB_SUCCESS, ws_stress.init(tenant_id_, get_stress.get_cache(), true, kv_cnt + 1));
ws_stress.set_thread_count(1);
ws_stress.start();
sleep(30);
ws_stress.stop();
ws_stress.wait();
sleep(30);
get_stress.stop();
get_stress.wait();
ws_hit_ratio = get_stress.get_hit_ratio();
COMMON_LOG(INFO, "with working set stat", K(ws_hit_ratio));
}
ASSERT_TRUE(ws_hit_ratio > hit_ratio);
}
} // end namespace common
} // end namespace oceanbase
int main(int argc, char** argv)
{
oceanbase::observer::ObSignalHandle signal_handle;
oceanbase::observer::ObSignalHandle::change_signal_mask();
signal_handle.start();
oceanbase::common::ObLogger::get_logger().set_log_level("INFO");
OB_LOGGER.set_log_level("INFO");
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -0,0 +1,851 @@
/**
* Copyright (c) 2021 OceanBase
* OceanBase CE is licensed under Mulan PubL v2.
* You can use this software according to the terms and conditions of the Mulan PubL v2.
* You may obtain a copy of Mulan PubL v2 at:
* http://license.coscl.org.cn/MulanPubL-2.0
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PubL v2 for more details.
*/
#include <gtest/gtest.h>
#define private public
#define protected public
#include "share/cache/ob_kv_storecache.h"
#include "share/ob_tenant_mgr.h"
#include "lib/utility/ob_tracepoint.h"
//#include "ob_cache_get_stressor.h"
#include "observer/ob_signal_handle.h"
#include "ob_cache_test_utils.h"
namespace oceanbase {
using namespace lib;
using namespace observer;
namespace common {
class TestKVCache : public ::testing::Test {
public:
TestKVCache();
virtual ~TestKVCache();
virtual void SetUp();
virtual void TearDown();
private:
// disallow copy
DISALLOW_COPY_AND_ASSIGN(TestKVCache);
protected:
// function members
protected:
// data members
int64_t tenant_id_;
int64_t lower_mem_limit_;
int64_t upper_mem_limit_;
};
TestKVCache::TestKVCache() : tenant_id_(1234), lower_mem_limit_(8 * 1024 * 1024), upper_mem_limit_(16 * 1024 * 1024)
{}
TestKVCache::~TestKVCache()
{}
void TestKVCache::SetUp()
{
int ret = OB_SUCCESS;
const int64_t bucket_num = 1024;
const int64_t max_cache_size = 1024 * 1024 * 1024;
const int64_t block_size = lib::ACHUNK_SIZE;
ret = ObTenantManager::get_instance().init(100000);
ASSERT_EQ(OB_SUCCESS, ret);
ret = ObTenantManager::get_instance().add_tenant(tenant_id_);
ASSERT_EQ(OB_SUCCESS, ret);
ret = ObTenantManager::get_instance().set_tenant_mem_limit(tenant_id_, lower_mem_limit_, upper_mem_limit_);
ASSERT_EQ(OB_SUCCESS, ret);
ret = ObKVGlobalCache::get_instance().init(bucket_num, max_cache_size, block_size);
ASSERT_EQ(OB_SUCCESS, ret);
// set observer memory limit
CHUNK_MGR.set_limit(5L * 1024L * 1024L * 1024L);
}
void TestKVCache::TearDown()
{
ObKVGlobalCache::get_instance().destroy();
ObTenantManager::get_instance().destroy();
}
TEST(ObKVCacheInstMap, normal)
{
int ret = OB_SUCCESS;
ObKVCacheInstMap inst_map;
ObKVCacheConfig config;
// invalid argument
ret = inst_map.init(0, &config, ObTenantManager::get_instance());
ASSERT_NE(OB_SUCCESS, ret);
ret = inst_map.init(1000, NULL, ObTenantManager::get_instance());
ASSERT_NE(OB_SUCCESS, ret);
// normal argument
ret = inst_map.init(1000, &config, ObTenantManager::get_instance());
ASSERT_EQ(OB_SUCCESS, ret);
// repeat init
ret = inst_map.init(1000, &config, ObTenantManager::get_instance());
ASSERT_NE(OB_SUCCESS, ret);
inst_map.destroy();
}
TEST(ObKVCacheInstMap, memory)
{
int ret = OB_SUCCESS;
int64_t cache_inst_cnt = 1000;
ObKVCacheInstMap inst_map;
ObKVCacheConfig configs[MAX_CACHE_NUM];
ObKVCacheInstKey inst_key;
ObKVCacheInstHandle inst_handle;
inst_key.cache_id_ = 0;
inst_key.tenant_id_ = 1;
// normal argument
ret = inst_map.init(cache_inst_cnt, configs, ObTenantManager::get_instance());
ASSERT_EQ(OB_SUCCESS, ret);
#ifdef ERRSIM
TP_SET_EVENT(EventTable::EN_4, OB_ALLOCATE_MEMORY_FAILED, 0, 1);
for (int64_t i = 0; i < cache_inst_cnt * 10; ++i) {
ret = inst_map.get_cache_inst(inst_key, inst_handle);
ASSERT_NE(OB_SUCCESS, ret);
}
TP_SET_EVENT(EventTable::EN_4, OB_SUCCESS, 0, 0);
#endif
for (int64_t i = 0; i < cache_inst_cnt; ++i) {
ret = inst_map.get_cache_inst(inst_key, inst_handle);
ASSERT_EQ(OB_SUCCESS, ret);
}
inst_map.destroy();
}
TEST(ObKVGlobalCache, normal)
{
int ret = OB_SUCCESS;
// invalid argument
ret = ObKVGlobalCache::get_instance().init(-1);
ASSERT_NE(OB_SUCCESS, ret);
// repeat init
ret = ObKVGlobalCache::get_instance().init();
ASSERT_EQ(OB_SUCCESS, ret);
ret = ObKVGlobalCache::get_instance().init();
ASSERT_NE(OB_SUCCESS, ret);
ObKVGlobalCache::get_instance().destroy();
}
/*
TEST(TestKVCacheValue, wash_stress)
{
int ret = OB_SUCCESS;
const int64_t bucket_num = 1024L * 1024L;
const int64_t max_cache_size = 100L * 1024L * 1024L * 1024L;
const int64_t block_size = common::OB_MALLOC_BIG_BLOCK_SIZE;
const uint64_t tenant_id = 1234;
const int64_t lower_mem_limit = 40L * 1024L * 1024L * 1024L;
const int64_t upper_mem_limit = 60L * 1024L * 1024L * 1024L;
CHUNK_MGR.set_limit(upper_mem_limit * 3 / 2);
ret = ObTenantManager::get_instance().init(100000);
ASSERT_EQ(OB_SUCCESS, ret);
ret = ObTenantManager::get_instance().add_tenant(tenant_id);
ASSERT_EQ(OB_SUCCESS, ret);
ret = ObTenantManager::get_instance().set_tenant_mem_limit(tenant_id, lower_mem_limit, upper_mem_limit);
ASSERT_EQ(OB_SUCCESS, ret);
ret = ObKVGlobalCache::get_instance().init(bucket_num, max_cache_size, block_size);
ASSERT_EQ(OB_SUCCESS, ret);
ObKVGlobalCache::get_instance().wash_timer_.cancel_all();
static const int64_t K_SIZE = 16;
static const int64_t V_SIZE = 16 * 1024;
typedef TestKVCacheKey<K_SIZE> TestKey;
typedef TestKVCacheValue<V_SIZE> TestValue;
ObKVCache<TestKey, TestValue> cache;
ASSERT_EQ(OB_SUCCESS, cache.init("test"));
const int64_t count = 64;
const int64_t kv_count = upper_mem_limit / V_SIZE;
ObCacheGetStressor<K_SIZE, V_SIZE> getters[64];
ObCacheGetStressor<K_SIZE, V_SIZE>::make_cache_full(cache, tenant_id, kv_count);
for (int64_t i = 0; i < count; ++i) {
ASSERT_EQ(OB_SUCCESS, getters[i].init(cache, tenant_id, i, count, kv_count));
getters[i].start();
}
int64_t wash_count = 64;
while (--wash_count > 0) {
ObCacheGetStressor<K_SIZE, V_SIZE>::make_cache_full(cache, tenant_id, kv_count);
sleep(1);
ObKVGlobalCache::get_instance().wash();
}
for (int64_t i = 0; i < count; ++i) {
getters[i].start();
getters[i].stop();
getters[i].wait();
}
}
*/
TEST_F(TestKVCache, test_func)
{
static const int64_t K_SIZE = 16;
static const int64_t V_SIZE = 64;
typedef TestKVCacheKey<K_SIZE> TestKey;
typedef TestKVCacheValue<V_SIZE> TestValue;
int ret = OB_SUCCESS;
ObKVCache<TestKey, TestValue> cache;
TestKey key;
TestValue value;
const TestValue* pvalue = NULL;
ObKVCachePair* kvpair = NULL;
ObKVCacheHandle handle;
ObKVCacheIterator iter;
key.v_ = 1234;
key.tenant_id_ = tenant_id_;
value.v_ = 4321;
// invalid invoke when not init
ret = cache.set_priority(1);
ASSERT_NE(OB_SUCCESS, ret);
ret = cache.put(key, value);
ASSERT_NE(OB_SUCCESS, ret);
ret = cache.get(key, pvalue, handle);
ASSERT_NE(OB_SUCCESS, ret);
ret = cache.put_and_fetch(key, value, pvalue, handle);
ASSERT_NE(OB_SUCCESS, ret);
ret = cache.get_iterator(iter);
ASSERT_NE(OB_SUCCESS, ret);
ret = cache.erase(key);
ASSERT_NE(OB_SUCCESS, ret);
ret = cache.alloc(key, value, kvpair, handle);
ASSERT_NE(OB_SUCCESS, ret);
ret = cache.put(kvpair, handle);
ASSERT_NE(OB_SUCCESS, ret);
// test init
ret = cache.init("test");
ASSERT_EQ(OB_SUCCESS, ret);
ret = cache.init("test");
ASSERT_NE(OB_SUCCESS, ret);
// invalid argument
ret = cache.set_priority(0);
ASSERT_NE(OB_SUCCESS, ret);
ret = cache.put(kvpair, handle);
ASSERT_NE(OB_SUCCESS, ret);
// test put and get
ret = cache.put(key, value);
ASSERT_EQ(OB_SUCCESS, ret);
ret = cache.get(key, pvalue, handle);
ASSERT_EQ(OB_SUCCESS, ret);
if (OB_SUCC(ret)) {
ASSERT_TRUE(value.v_ == pvalue->v_);
}
// test erase
ret = cache.erase(key);
ASSERT_EQ(OB_SUCCESS, ret);
ret = cache.erase(key);
ASSERT_EQ(OB_ENTRY_NOT_EXIST, ret);
// test alloc and put
ret = cache.alloc(key, value, kvpair, handle);
ASSERT_EQ(OB_SUCCESS, ret);
ret = cache.put(kvpair, handle, true);
ASSERT_EQ(OB_SUCCESS, ret);
// test iterator
handle.reset();
ret = cache.get_iterator(iter);
ASSERT_EQ(OB_SUCCESS, ret);
const TestKey* pkey = NULL;
ret = iter.get_next_kvpair(pkey, pvalue, handle);
ASSERT_EQ(OB_SUCCESS, ret);
ret = iter.get_next_kvpair(pkey, pvalue, handle);
ASSERT_EQ(OB_ITER_END, ret);
// test destroy
cache.destroy();
ret = cache.init("test2");
ASSERT_EQ(OB_SUCCESS, ret);
ret = cache.get(key, pvalue, handle);
ASSERT_NE(OB_SUCCESS, ret);
}
TEST_F(TestKVCache, test_large_kv)
{
static const int64_t K_SIZE = 16;
static const int64_t V_SIZE = 2 * 1024 * 1024;
typedef TestKVCacheKey<K_SIZE> TestKey;
typedef TestKVCacheValue<V_SIZE> TestValue;
int ret = OB_SUCCESS;
ObKVCache<TestKey, TestValue> cache;
TestKey key;
TestValue value;
const TestValue* pvalue = NULL;
ObKVCacheHandle handle;
ObKVCacheIterator iter;
key.v_ = 1234;
key.tenant_id_ = tenant_id_;
value.v_ = 4321;
// test init
ret = cache.init("test");
ASSERT_EQ(OB_SUCCESS, ret);
// test put and get
ret = cache.put(key, value);
ASSERT_EQ(OB_SUCCESS, ret);
ret = cache.get(key, pvalue, handle);
ASSERT_EQ(OB_SUCCESS, ret);
if (OB_SUCC(ret)) {
ASSERT_TRUE(value.v_ == pvalue->v_);
}
}
TEST_F(TestKVCache, test_wash)
{
static const int64_t K_SIZE = 16;
static const int64_t V_SIZE = 2 * 1024 * 1024;
typedef TestKVCacheKey<K_SIZE> TestKey;
typedef TestKVCacheValue<V_SIZE> TestValue;
int ret = OB_SUCCESS;
ObKVCache<TestKey, TestValue> cache;
TestKey key;
TestValue value;
ObKVCacheHandle handle;
ObKVCacheIterator iter;
key.v_ = 1234;
key.tenant_id_ = tenant_id_;
value.v_ = 4321;
// test init
ret = cache.init("test");
ASSERT_EQ(OB_SUCCESS, ret);
// test put and wash
for (int64_t i = 0; i < upper_mem_limit_ / V_SIZE * 10; ++i) {
key.v_ = i;
ret = cache.put(key, value);
ASSERT_EQ(OB_SUCCESS, ret);
}
sleep(1);
ASSERT_TRUE(cache.size(tenant_id_) < upper_mem_limit_);
}
TEST_F(TestKVCache, test_hold_size)
{
static const int64_t K_SIZE = 16;
static const int64_t V_SIZE = 2 * 1024 * 1024;
typedef TestKVCacheKey<K_SIZE> TestKey;
typedef TestKVCacheValue<V_SIZE> TestValue;
ObKVCache<TestKey, TestValue> cache;
ASSERT_EQ(OB_SUCCESS, cache.init("test"));
int64_t hold_size = 0;
ASSERT_EQ(OB_ENTRY_NOT_EXIST, ObKVGlobalCache::get_instance().set_hold_size(tenant_id_, "test", hold_size));
ASSERT_EQ(OB_ENTRY_NOT_EXIST, ObKVGlobalCache::get_instance().get_hold_size(tenant_id_, "test", hold_size));
TestKey key;
TestValue value;
key.v_ = 1234;
key.tenant_id_ = tenant_id_;
value.v_ = 4321;
ASSERT_EQ(OB_SUCCESS, cache.put(key, value));
hold_size = -1;
ASSERT_EQ(OB_SUCCESS, ObKVGlobalCache::get_instance().get_hold_size(tenant_id_, "test", hold_size));
ASSERT_EQ(0, hold_size);
int64_t new_hold_size = 2 * 1024 * 1024;
ASSERT_EQ(OB_SUCCESS, ObKVGlobalCache::get_instance().set_hold_size(tenant_id_, "test", new_hold_size));
ASSERT_EQ(OB_SUCCESS, ObKVGlobalCache::get_instance().get_hold_size(tenant_id_, "test", hold_size));
ASSERT_EQ(new_hold_size, hold_size);
ASSERT_EQ(OB_SUCCESS, ObKVGlobalCache::get_instance().set_hold_size(tenant_id_, "test", 0));
for (int64_t i = 0; i < upper_mem_limit_ / V_SIZE * 10; ++i) {
key.v_ = i;
ASSERT_EQ(OB_SUCCESS, cache.put(key, value));
}
sleep(1);
SHARE_LOG(INFO, "store_size", "cache_size", cache.store_size(tenant_id_));
// check hold_size work
new_hold_size = 8 * 1024 * 1024;
ASSERT_EQ(OB_SUCCESS, ObKVGlobalCache::get_instance().set_hold_size(tenant_id_, "test", new_hold_size));
for (int64_t i = 0; i < upper_mem_limit_ / V_SIZE * 10; ++i) {
key.v_ = i;
ASSERT_EQ(OB_SUCCESS, cache.put(key, value));
}
sleep(1);
ASSERT_TRUE(cache.store_size(tenant_id_) >= hold_size);
SHARE_LOG(INFO, "store_size", "cache_size", cache.store_size(tenant_id_));
}
TEST_F(TestKVCache, sync_wash_mbs)
{
CHUNK_MGR.set_limit(512 * 1024 * 1024);
// close background wash timer task
ObKVGlobalCache::get_instance().wash_timer_.cancel_all();
// put to cache make cache use all memory
static const int64_t K_SIZE = 16;
static const int64_t V_SIZE = 2 * 1024;
typedef TestKVCacheKey<K_SIZE> TestKey;
typedef TestKVCacheValue<V_SIZE> TestValue;
ObKVCache<TestKey, TestValue> cache;
ASSERT_EQ(OB_SUCCESS, cache.init("test"));
int ret = OB_SUCCESS;
TestKey key;
TestValue value;
key.v_ = 1234;
key.tenant_id_ = tenant_id_;
value.v_ = 4321;
int64_t i = 0;
while (OB_SUCC(ret)) {
key.v_ = i;
if (OB_FAIL(cache.put(key, value))) {
SHARE_LOG(WARN, "put to cache failed", K(ret), K(i));
} else {
++i;
}
if (CHUNK_MGR.get_hold() >= CHUNK_MGR.get_limit()) {
break;
}
}
const int64_t cache_total_size = i * V_SIZE;
SHARE_LOG(INFO, "stat", K(cache_total_size));
// try allocate memory, suppose to succeed
ObResourceMgr::get_instance().set_cache_washer(ObKVGlobalCache::get_instance());
ObArenaAllocator allocator(ObNewModIds::OB_KVSTORE_CACHE, 512 * 1024, tenant_id_);
int64_t j = 0;
void* ptr = NULL;
ret = OB_SUCCESS;
while (OB_SUCC(ret)) {
ObMemAttr attr;
attr.tenant_id_ = tenant_id_;
attr.label_ = ObModIds::OB_KVSTORE_CACHE;
ptr = allocator.alloc(V_SIZE);
if (NULL == ptr) {
ret = OB_ALLOCATE_MEMORY_FAILED;
SHARE_LOG(WARN, "allocate memory failed", K(ret), K(j));
} else {
++j;
}
}
const int64_t malloc_total_size = j * V_SIZE;
SHARE_LOG(INFO, "stat", K(malloc_total_size));
int64_t cache_size = 0;
ObTenantResourceMgrHandle resource_handle;
ASSERT_EQ(OB_SUCCESS, ObResourceMgr::get_instance().get_tenant_resource_mgr(tenant_id_, resource_handle));
cache_size = resource_handle.get_memory_mgr()->get_cache_hold();
ASSERT_TRUE(cache_size < 3 * 1024 * 1024);
}
TEST_F(TestKVCache, cache_wash_self)
{
CHUNK_MGR.set_limit(1024 * 1024 * 1024);
ObResourceMgr::get_instance().set_cache_washer(ObKVGlobalCache::get_instance());
ObTenantResourceMgrHandle resource_handle;
ASSERT_EQ(OB_SUCCESS, ObResourceMgr::get_instance().get_tenant_resource_mgr(tenant_id_, resource_handle));
resource_handle.get_memory_mgr()->set_limit(512 * 1024 * 1024);
// close background wash timer task
ObKVGlobalCache::get_instance().wash_timer_.stop();
ObKVGlobalCache::get_instance().wash_timer_.wait();
ObKVGlobalCache::get_instance().replace_timer_.stop();
ObKVGlobalCache::get_instance().replace_timer_.wait();
// put to cache make cache use all memory
static const int64_t K_SIZE = 16;
static const int64_t V_SIZE = 2 * 1024;
typedef TestKVCacheKey<K_SIZE> TestKey;
typedef TestKVCacheValue<V_SIZE> TestValue;
ObKVCache<TestKey, TestValue> cache;
ASSERT_EQ(OB_SUCCESS, cache.init("test"));
int ret = OB_SUCCESS;
TestKey key;
TestValue value;
key.v_ = 1234;
key.tenant_id_ = tenant_id_;
value.v_ = 4321;
int64_t i = 0;
const int64_t put_count = CHUNK_MGR.get_limit() / V_SIZE * 2;
while (OB_SUCC(ret)) {
key.v_ = i;
if (OB_FAIL(cache.put(key, value))) {
SHARE_LOG(WARN, "put to cache failed", K(ret), K(i));
} else {
++i;
}
// try get
if (OB_SUCC(ret)) {
const TestValue* get_value = NULL;
ObKVCacheHandle handle;
ASSERT_EQ(OB_SUCCESS, cache.get(key, get_value, handle));
if (i >= put_count) {
break;
}
if (i % 10000 == 0) {
SHARE_LOG(INFO, "xx", K(i), K(put_count));
}
} else {
ObMallocAllocator::get_instance()->print_tenant_memory_usage(tenant_id_);
ObMallocAllocator::get_instance()->print_tenant_ctx_memory_usage(tenant_id_);
}
}
const int64_t cache_put_size = put_count * V_SIZE;
SHARE_LOG(INFO, "stat", K(cache_put_size));
}
TEST_F(TestKVCache, mix_mode_without_backgroup)
{
CHUNK_MGR.set_limit(1024 * 1024 * 1024);
ObResourceMgr::get_instance().set_cache_washer(ObKVGlobalCache::get_instance());
ObTenantResourceMgrHandle resource_handle;
ASSERT_EQ(OB_SUCCESS, ObResourceMgr::get_instance().get_tenant_resource_mgr(tenant_id_, resource_handle));
resource_handle.get_memory_mgr()->set_limit(512 * 1024 * 1024);
// close background wash timer task
ObKVGlobalCache::get_instance().wash_timer_.stop();
ObKVGlobalCache::get_instance().wash_timer_.wait();
ObKVGlobalCache::get_instance().replace_timer_.stop();
ObKVGlobalCache::get_instance().replace_timer_.wait();
ObAllocatorStress alloc_stress;
ObCacheStress<16, 2 * 1024> cache_stress;
ASSERT_EQ(OB_SUCCESS, alloc_stress.init());
ASSERT_EQ(OB_SUCCESS, cache_stress.init(tenant_id_, 0));
alloc_stress.start();
cache_stress.start();
// wait cache use all memory
sleep(10);
// add alloc/free task to alloc_stress, total 400MB alloc then free
const int64_t alloc_size = 1024;
const int64_t alloc_count = 50 * 1024;
const int64_t task_count = 4;
for (int64_t i = 0; i < task_count; ++i) {
ObCacheTestTask task(tenant_id_, true, alloc_size, alloc_count, alloc_stress.get_stat());
ASSERT_EQ(OB_SUCCESS, alloc_stress.add_task(task));
sleep(1);
}
for (int64_t i = 0; i < task_count; ++i) {
ObCacheTestTask task(tenant_id_, false, alloc_size, alloc_count, alloc_stress.get_stat());
ASSERT_EQ(OB_SUCCESS, alloc_stress.add_task(task));
sleep(1);
}
alloc_stress.stop();
cache_stress.stop();
alloc_stress.wait();
cache_stress.wait();
ObMallocAllocator::get_instance()->print_tenant_memory_usage(tenant_id_);
ObMallocAllocator::get_instance()->print_tenant_ctx_memory_usage(tenant_id_);
ASSERT_EQ(0, alloc_stress.get_fail_count());
ASSERT_EQ(0, cache_stress.get_fail_count());
}
TEST_F(TestKVCache, mix_mode_with_backgroup)
{
CHUNK_MGR.set_limit(1024 * 1024 * 1024);
ObResourceMgr::get_instance().set_cache_washer(ObKVGlobalCache::get_instance());
ObAllocatorStress alloc_stress_array[3];
ObCacheStress<16, 2 * 1024> cache_stress_array[3];
for (int i = 0; i < 3; ++i) {
ASSERT_EQ(OB_SUCCESS, alloc_stress_array[i].init());
ASSERT_EQ(OB_SUCCESS, cache_stress_array[i].init(tenant_id_, i));
}
for (int i = 0; i < 3; ++i) {
alloc_stress_array[i].start();
cache_stress_array[i].start();
}
// wait cache use all memory
sleep(10);
// add alloc/free task to alloc_stress
const int64_t alloc_size = 1024;
const int64_t alloc_count = 20 * 1024;
const int64_t task_count = 4;
for (int64_t i = 0; i < task_count; ++i) {
for (int j = 0; j < 3; ++j) {
ObCacheTestTask task(tenant_id_, true, alloc_size, alloc_count, alloc_stress_array[j].get_stat());
ASSERT_EQ(OB_SUCCESS, alloc_stress_array[j].add_task(task));
}
}
for (int64_t i = 0; i < task_count; ++i) {
for (int j = 0; j < 3; ++j) {
ObCacheTestTask task(tenant_id_, false, alloc_size, alloc_count, alloc_stress_array[j].get_stat());
ASSERT_EQ(OB_SUCCESS, alloc_stress_array[j].add_task(task));
}
}
for (int i = 0; i < 3; ++i) {
alloc_stress_array[i].stop();
cache_stress_array[i].stop();
alloc_stress_array[i].wait();
cache_stress_array[i].wait();
}
ObMallocAllocator::get_instance()->print_tenant_memory_usage(tenant_id_);
ObMallocAllocator::get_instance()->print_tenant_ctx_memory_usage(tenant_id_);
for (int i = 0; i < 3; ++i) {
ASSERT_EQ(0, alloc_stress_array[i].get_fail_count());
ASSERT_EQ(0, cache_stress_array[i].get_fail_count());
}
}
TEST_F(TestKVCache, large_chunk_wash_mb)
{
CHUNK_MGR.set_limit(1024 * 1024 * 1024);
ObResourceMgr::get_instance().set_cache_washer(ObKVGlobalCache::get_instance());
ObTenantResourceMgrHandle resource_handle;
ASSERT_EQ(OB_SUCCESS, ObResourceMgr::get_instance().get_tenant_resource_mgr(tenant_id_, resource_handle));
resource_handle.get_memory_mgr()->set_limit(512 * 1024 * 1024);
// close background wash timer task
ObKVGlobalCache::get_instance().wash_timer_.stop();
ObKVGlobalCache::get_instance().wash_timer_.wait();
ObKVGlobalCache::get_instance().replace_timer_.stop();
ObKVGlobalCache::get_instance().replace_timer_.wait();
ObAllocatorStress alloc_stress;
ObCacheStress<16, 2 * 1024> cache_stress;
ASSERT_EQ(OB_SUCCESS, alloc_stress.init());
ASSERT_EQ(OB_SUCCESS, cache_stress.init(tenant_id_, 0));
alloc_stress.start();
cache_stress.start();
// wait cache use all memory
sleep(10);
// add alloc/free task to alloc_stress, total 400MB alloc then free
const int64_t alloc_size = 20 * 1024 * 1024;
const int64_t alloc_count = 2;
const int64_t task_count = 4;
for (int64_t i = 0; i < task_count; ++i) {
ObCacheTestTask task(tenant_id_, true, alloc_size, alloc_count, alloc_stress.get_stat());
ASSERT_EQ(OB_SUCCESS, alloc_stress.add_task(task));
sleep(1);
}
for (int64_t i = 0; i < task_count; ++i) {
ObCacheTestTask task(tenant_id_, false, alloc_size, alloc_count, alloc_stress.get_stat());
ASSERT_EQ(OB_SUCCESS, alloc_stress.add_task(task));
sleep(1);
}
alloc_stress.stop();
cache_stress.stop();
alloc_stress.wait();
cache_stress.wait();
ObMallocAllocator::get_instance()->print_tenant_memory_usage(tenant_id_);
ObMallocAllocator::get_instance()->print_tenant_ctx_memory_usage(tenant_id_);
ASSERT_EQ(0, alloc_stress.get_fail_count());
ASSERT_EQ(0, cache_stress.get_fail_count());
}
TEST_F(TestKVCache, large_mb_wash_mb)
{
CHUNK_MGR.set_limit(1024 * 1024 * 1024);
ObResourceMgr::get_instance().set_cache_washer(ObKVGlobalCache::get_instance());
ObTenantResourceMgrHandle resource_handle;
ASSERT_EQ(OB_SUCCESS, ObResourceMgr::get_instance().get_tenant_resource_mgr(tenant_id_, resource_handle));
resource_handle.get_memory_mgr()->set_limit(512 * 1024 * 1024);
// close background wash timer task
ObKVGlobalCache::get_instance().wash_timer_.stop();
ObKVGlobalCache::get_instance().wash_timer_.wait();
ObKVGlobalCache::get_instance().replace_timer_.stop();
ObKVGlobalCache::get_instance().replace_timer_.wait();
ObCacheStress<16, 2 * 1024> cache_stress;
ASSERT_EQ(OB_SUCCESS, cache_stress.init(tenant_id_, 0));
cache_stress.start();
// wait cache use all memory
sleep(10);
cache_stress.stop();
cache_stress.wait();
static const int64_t K_SIZE = 16;
static const int64_t V_SIZE = 10 * 1024 * 1024;
typedef TestKVCacheKey<K_SIZE> TestKey;
typedef TestKVCacheValue<V_SIZE> TestValue;
ObKVCache<TestKey, TestValue> cache;
ASSERT_EQ(OB_SUCCESS, cache.init("test_big_mb"));
TestKey key;
TestValue value;
// put 80 times, total 800MB
for (int64_t i = 0; i < 80; ++i) {
key.tenant_id_ = tenant_id_;
key.v_ = i;
ASSERT_EQ(OB_SUCCESS, cache.put(key, value));
SHARE_LOG(INFO, "put big mb succeed");
ObMallocAllocator::get_instance()->print_tenant_memory_usage(tenant_id_);
}
cache_stress.stop();
cache_stress.wait();
}
TEST_F(TestKVCache, compute_wash_size_when_min_wash_negative)
{
// close background wash timer task
ObKVGlobalCache::get_instance().wash_timer_.stop();
ObKVGlobalCache::get_instance().wash_timer_.wait();
ObKVGlobalCache::get_instance().replace_timer_.stop();
ObKVGlobalCache::get_instance().replace_timer_.wait();
const uint64_t min_memory = 6L * 1024L * 1024L * 1024L;
const uint64_t max_memory = 12L * 1024L * 1024L * 1024L;
const uint64_t memory_usage = 6L * 1024L * 1024L * 1024L + 100L + 1024L + 1024L;
ObTenantResourceMgrHandle resource_handle;
ASSERT_EQ(OB_SUCCESS, ObResourceMgr::get_instance().get_tenant_resource_mgr(tenant_id_, resource_handle));
resource_handle.get_memory_mgr()->set_limit(max_memory);
resource_handle.get_memory_mgr()->sum_hold_ = memory_usage;
// set tenant memory limit
ObTenantManager::get_instance().set_tenant_mem_limit(tenant_id_, min_memory, max_memory);
// set cache size
ObKVCacheInstKey inst_key;
inst_key.tenant_id_ = tenant_id_;
inst_key.cache_id_ = 1;
ObKVCacheInstHandle inst_handle;
ASSERT_EQ(OB_SUCCESS, ObKVGlobalCache::get_instance().insts_.get_cache_inst(inst_key, inst_handle));
inst_handle.inst_->status_.store_size_ = memory_usage;
inst_handle.inst_->status_.map_size_ = 0;
CHUNK_MGR.set_limit(10L * 1024L * 1024L * 1024L);
CHUNK_MGR.hold_bytes_ = 10L * 1024L * 1024L * 1024L;
CHUNK_MGR.set_urgent(1L * 1024L * 1024L * 1024L);
// compute
ObKVGlobalCache::get_instance().store_.compute_tenant_wash_size();
// check tenant wash size
ObKVCacheStore::TenantWashInfo* tenant_wash_info = NULL;
ObKVGlobalCache::get_instance().store_.tenant_wash_map_.get(tenant_id_, tenant_wash_info);
COMMON_LOG(INFO, "xxx", "wash_size", tenant_wash_info->wash_size_);
ASSERT_TRUE(tenant_wash_info->wash_size_ > 0);
}
TEST_F(TestKVCache, get_mb_list)
{
ObKVCacheInstMap& inst_map = ObKVGlobalCache::get_instance().insts_;
ObTenantMBListHandle handles_[MAX_TENANT_NUM_PER_SERVER];
ASSERT_EQ(MAX_TENANT_NUM_PER_SERVER, inst_map.list_pool_.get_total());
for (int64_t i = 0; i < MAX_TENANT_NUM_PER_SERVER; ++i) {
ASSERT_EQ(OB_SUCCESS, inst_map.get_mb_list(i + 1, handles_[i]));
}
ASSERT_EQ(0, inst_map.list_pool_.get_total());
ObTenantMBListHandle handle;
ASSERT_EQ(OB_ENTRY_NOT_EXIST, inst_map.get_mb_list(5000, handle));
for (int64_t i = 0; i < MAX_TENANT_NUM_PER_SERVER; ++i) {
handles_[i].reset();
}
ASSERT_EQ(MAX_TENANT_NUM_PER_SERVER, inst_map.list_pool_.get_total());
for (int64_t i = 0; i < MAX_TENANT_NUM_PER_SERVER; ++i) {
ASSERT_EQ(OB_SUCCESS, inst_map.get_mb_list(i + 1, handles_[i]));
}
ObTenantMBListHandle second_handles_[MAX_TENANT_NUM_PER_SERVER];
for (int64_t i = 0; i < MAX_TENANT_NUM_PER_SERVER; ++i) {
ASSERT_EQ(OB_SUCCESS, inst_map.get_mb_list(i + 1, second_handles_[i]));
}
ASSERT_EQ(0, inst_map.list_pool_.get_total());
for (int64_t i = 0; i < MAX_TENANT_NUM_PER_SERVER; ++i) {
handles_[i].reset();
second_handles_[i].reset();
}
ASSERT_EQ(MAX_TENANT_NUM_PER_SERVER, inst_map.list_pool_.get_total());
// make list_map set failed
inst_map.list_map_.size_ = 5000;
ASSERT_EQ(MAX_TENANT_NUM_PER_SERVER, inst_map.list_pool_.get_total());
ASSERT_EQ(OB_SIZE_OVERFLOW, inst_map.get_mb_list(5000, handle));
ASSERT_EQ(MAX_TENANT_NUM_PER_SERVER, inst_map.list_pool_.get_total());
}
/*
TEST(ObSyncWashRt, sync_wash_mb_rt)
{
const int64_t max_cache_size = 512L * 1024L * 1024L * 1024L;
ASSERT_EQ(OB_SUCCESS, ObKVGlobalCache::get_instance().init(ObKVGlobalCache::DEFAULT_BUCKET_NUM, max_cache_size));
ObKVGlobalCache::get_instance().wash_timer_.stop();
ObKVGlobalCache::get_instance().wash_timer_.wait();
ObKVCacheInst inst;
inst.tenant_id_ = OB_SYS_TENANT_ID;
ObKVCacheStore &store = ObKVGlobalCache::get_instance().store_;
for (int64_t i = 0; i < store.max_mb_num_; ++i) {
store.mb_handles_[i].handle_ref_.inc_ref_cnt();
store.mb_handles_[i].inst_ = &inst;
}
const int64_t start = ObTimeUtility::current_time();
const int64_t sync_wash_count = 1000;
for (int64_t i = 0; i < sync_wash_count; ++i) {
ObICacheWasher::ObCacheMemBlock *wash_blocks = NULL;
ASSERT_EQ(OB_CACHE_FREE_BLOCK_NOT_ENOUGH, ObKVGlobalCache::get_instance().sync_wash_mbs(
OB_SYS_TENANT_ID, 2 * 1024 * 1024, false, wash_blocks));
}
const int64_t end = ObTimeUtility::current_time();
STORAGE_LOG(INFO, "wash cost time", "avg", (end - start) / sync_wash_count);
}
*/
} // namespace common
} // namespace oceanbase
int main(int argc, char** argv)
{
oceanbase::observer::ObSignalHandle signal_handle;
oceanbase::observer::ObSignalHandle::change_signal_mask();
signal_handle.start();
oceanbase::common::ObLogger::get_logger().set_log_level("INFO");
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -0,0 +1,203 @@
/**
* Copyright (c) 2021 OceanBase
* OceanBase CE is licensed under Mulan PubL v2.
* You can use this software according to the terms and conditions of the Mulan PubL v2.
* You may obtain a copy of Mulan PubL v2 at:
* http://license.coscl.org.cn/MulanPubL-2.0
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PubL v2 for more details.
*/
#define USING_LOG_PREFEX COMMON
#include <gtest/gtest.h>
#include <gmock/gmock.h>
#include "share/ob_define.h"
#include "lib/container/ob_array.h"
#define private public
#include "share/cache/ob_working_set_mgr.h"
#include "share/cache/ob_kv_storecache.h"
#include "ob_cache_test_utils.h"
using ::testing::_;
namespace oceanbase {
namespace common {
TEST(TestWorkingSet, common)
{
ASSERT_EQ(OB_SUCCESS, ObKVGlobalCache::get_instance().init());
ObWorkingSetMgr ws_mgr;
ASSERT_EQ(OB_SUCCESS, ws_mgr.init(ObKVGlobalCache::get_instance().store_));
ObFixedQueue<WorkingSetMB>& ws_mb_pool = ws_mgr.ws_mb_pool_;
ObWorkingSet working_set;
// not init
const int64_t SIZE = 1024;
WSListKey ws_list_key;
ws_list_key.tenant_id_ = 1;
ws_list_key.cache_id_ = 0;
ObKVCacheInstHandle inst_handle;
ASSERT_EQ(OB_SUCCESS, ObKVGlobalCache::get_instance().insts_.get_cache_inst(ws_list_key, inst_handle));
ObKVCacheInst& inst = *inst_handle.get_inst();
TestKVCacheKey<SIZE> key;
TestKVCacheValue<SIZE> value;
ObKVCachePair* kvpair = NULL;
ObKVMemBlockHandle* mb_handle = NULL;
WorkingSetMB* mb_wrapper = NULL;
const int64_t limit = 10 * 1024 * 1024; // 10MB
ASSERT_EQ(OB_SUCCESS, ObKVGlobalCache::get_instance().store_.alloc_mbhandle(ws_list_key, mb_handle));
ASSERT_EQ(
OB_SUCCESS, working_set.init(ws_list_key, limit, mb_handle, ws_mb_pool, ObKVGlobalCache::get_instance().store_));
ASSERT_TRUE(working_set.is_valid());
const int64_t kv_cnt = 2 * limit / SIZE;
for (int64_t i = 0; i < kv_cnt; ++i) {
key.v_ = i;
ASSERT_EQ(OB_SUCCESS, working_set.store(inst, key, value, kvpair, mb_wrapper));
ObKVGlobalCache::get_instance().revert(mb_wrapper->mb_handle_);
}
COMMON_LOG(INFO, "xxx", "used", working_set.used_, K(limit));
ASSERT_TRUE(working_set.used_ < limit + working_set.cur_->block_size_);
ASSERT_TRUE(NULL != working_set.get_curr_mb());
working_set.reset();
// do again
ASSERT_EQ(
OB_SUCCESS, working_set.init(ws_list_key, limit, mb_handle, ws_mb_pool, ObKVGlobalCache::get_instance().store_));
ASSERT_TRUE(working_set.is_valid());
for (int64_t i = 0; i < kv_cnt; ++i) {
key.v_ = i;
ASSERT_EQ(OB_SUCCESS, working_set.store(inst, key, value, kvpair, mb_wrapper));
ObKVGlobalCache::get_instance().revert(mb_wrapper->mb_handle_);
}
ASSERT_TRUE(working_set.used_ < limit + working_set.cur_->block_size_);
COMMON_LOG(INFO, "xxx", "used", working_set.used_, K(limit));
inst_handle.reset();
ObKVGlobalCache::get_instance().destroy();
}
TEST(TestWorkingSetList, common)
{
ASSERT_EQ(OB_SUCCESS, ObKVGlobalCache::get_instance().init());
ObWorkingSetMgr::WorkingSetList ws_list;
ObWorkingSetMgr ws_mgr;
ASSERT_EQ(OB_SUCCESS, ws_mgr.init(ObKVGlobalCache::get_instance().store_));
ObKVCacheStore& store = ObKVGlobalCache::get_instance().store_;
// not init
ObWorkingSet ws;
ObKVMemBlockHandle* mb_handle = NULL;
ASSERT_EQ(OB_NOT_INIT, ws_list.add_ws(&ws));
ASSERT_EQ(OB_NOT_INIT, ws_list.del_ws(&ws));
ASSERT_EQ(OB_NOT_INIT, ws_list.pop_mb_handle(mb_handle));
ASSERT_EQ(OB_NOT_INIT, ws_list.push_mb_handle(mb_handle));
// init
WSListKey list_key;
list_key.tenant_id_ = OB_INVALID_ID;
ASSERT_EQ(OB_INVALID_ARGUMENT, ws_list.init(list_key, store));
list_key.tenant_id_ = OB_SYS_TENANT_ID;
list_key.cache_id_ = 1;
ASSERT_EQ(OB_SUCCESS, ws_list.init(list_key, store));
ASSERT_EQ(OB_INIT_TWICE, ws_list.init(list_key, store));
ObArenaAllocator allocator;
ObArray<ObWorkingSet*> working_sets;
const int64_t limit = 1024;
const int64_t count = 100;
int64_t sum_limit = 0;
ObKVMemBlockHandle new_mb_handle;
new_mb_handle.handle_ref_.inc_ref_cnt();
ObFixedQueue<WorkingSetMB>& ws_mb_pool = ws_mgr.ws_mb_pool_;
for (int64_t i = 0; i < count; ++i) {
void* buf = allocator.alloc(sizeof(ObWorkingSet));
ASSERT_TRUE(NULL != buf);
ObWorkingSet* ws = new (buf) ObWorkingSet();
ObKVMemBlockHandle* mb_handle = NULL;
ASSERT_EQ(OB_SUCCESS, ObKVGlobalCache::get_instance().store_.alloc_mbhandle(list_key, mb_handle));
ASSERT_EQ(OB_SUCCESS, ws->init(list_key, limit, mb_handle, ws_mb_pool, ObKVGlobalCache::get_instance().store_));
ASSERT_EQ(OB_SUCCESS, working_sets.push_back(ws));
sum_limit += limit;
}
for (int64_t i = 0; i < count; ++i) {
ASSERT_EQ(OB_SUCCESS, ws_list.add_ws(working_sets.at(i)));
}
ASSERT_EQ(count, ws_list.list_.get_size());
ASSERT_EQ(sum_limit, ws_list.limit_sum_);
for (int64_t i = 0; i < count; ++i) {
ASSERT_EQ(OB_SUCCESS, ws_list.del_ws(working_sets.at(i)));
}
ASSERT_EQ(0, ws_list.list_.get_size());
ASSERT_EQ(0, ws_list.limit_sum_);
for (int64_t i = 0; i < ObWorkingSetMgr::WorkingSetList::FREE_ARRAY_SIZE; ++i) {
ASSERT_EQ(OB_SUCCESS, ws_list.push_mb_handle(&new_mb_handle));
}
ASSERT_EQ(OB_SIZE_OVERFLOW, ws_list.push_mb_handle(&new_mb_handle));
ObKVMemBlockHandle* rt_handle = NULL;
for (int64_t i = 0; i < ObWorkingSetMgr::WorkingSetList::FREE_ARRAY_SIZE; ++i) {
ASSERT_EQ(OB_SUCCESS, ws_list.pop_mb_handle(rt_handle));
ASSERT_EQ(rt_handle, &new_mb_handle);
new_mb_handle.status_ = FULL;
}
ASSERT_EQ(OB_ENTRY_NOT_EXIST, ws_list.pop_mb_handle(rt_handle));
for (int64_t i = 0; i < ObWorkingSetMgr::WorkingSetList::FREE_ARRAY_SIZE; ++i) {
ASSERT_EQ(OB_SUCCESS, ws_list.push_mb_handle(&new_mb_handle));
}
ObKVGlobalCache::get_instance().destroy();
}
TEST(TestWorkingSetMgr, common)
{
ObWorkingSetMgr mgr;
ASSERT_EQ(OB_SUCCESS, ObKVGlobalCache::get_instance().init());
uint64_t tenant_id = 1;
uint64_t cache_id = 1;
ObArray<ObWorkingSet*> ws_array;
ObWorkingSet* ws = NULL;
const int64_t limit = 1024;
WSListKey key;
ASSERT_EQ(OB_NOT_INIT, mgr.create_working_set(key, limit, ws));
ASSERT_EQ(OB_NOT_INIT, mgr.delete_working_set(ws));
ASSERT_EQ(OB_SUCCESS, mgr.init(ObKVGlobalCache::get_instance().store_));
ASSERT_EQ(OB_INVALID_ARGUMENT, mgr.create_working_set(key, limit, ws));
ASSERT_EQ(OB_INVALID_ARGUMENT, mgr.delete_working_set(NULL));
for (int64_t i = 0; i < 20; ++i) {
tenant_id = i + 1;
key.tenant_id_ = tenant_id;
key.cache_id_ = cache_id;
ASSERT_EQ(OB_SUCCESS, mgr.create_working_set(key, limit, ws));
ASSERT_EQ(OB_SUCCESS, ws_array.push_back(ws));
}
for (int64_t i = 0; i < ws_array.count(); ++i) {
ASSERT_EQ(OB_SUCCESS, mgr.delete_working_set(ws_array.at(i)));
}
for (int64_t i = 0; i < 20; ++i) {
tenant_id = i % 20 + 1;
key.tenant_id_ = tenant_id;
key.cache_id_ = cache_id;
ASSERT_EQ(OB_SUCCESS, mgr.create_working_set(key, limit, ws));
ASSERT_EQ(OB_SUCCESS, ws_array.push_back(ws));
}
ObKVGlobalCache::get_instance().destroy();
}
} // end namespace common
} // end namespace oceanbase
int main(int argc, char** argv)
{
oceanbase::common::ObLogger::get_logger().set_log_level("INFO");
OB_LOGGER.set_log_level("INFO");
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}