[FEAT MERGE] performance optimzation for OLTP

Co-authored-by: dimstars <liangjinrongcm@gmail.com>
Co-authored-by: pe-99y <315053752@qq.com>
This commit is contained in:
Naynahs
2024-04-10 07:32:27 +00:00
committed by ob-robot
parent 054f5a5a80
commit 3d4ef9741d
177 changed files with 7111 additions and 9708 deletions

View File

@ -10,6 +10,11 @@
* See the Mulan PubL v2 for more details.
*/
#include <gtest/gtest.h>
#include <thread>
#define private public
#define protected public
#include "storage/memtable/mvcc/ob_keybtree.h"
#include "common/object/ob_object.h"
@ -19,9 +24,6 @@
#include "storage/memtable/ob_memtable_key.h"
#include "storage/memtable/mvcc/ob_mvcc_row.h"
#include <gtest/gtest.h>
#include <thread>
namespace oceanbase
{
namespace unittest
@ -78,21 +80,22 @@ int BIND_CPU(pthread_t thread)
const char *attr = ObModIds::TEST;
void init_key(BtreeKey *ptr, int64_t key)
void init_key(ObStoreRowkeyWrapper *ptr, int64_t key)
{
ptr->get_rowkey()->get_rowkey().get_obj_ptr()[0].set_int(key);
}
int alloc_key(BtreeKey *&ret_key, int64_t key)
int alloc_key(ObStoreRowkeyWrapper *&ret_key, int64_t key)
{
int ret = OB_SUCCESS;
ObObj *obj_ptr = nullptr;
ObStoreRowkey *storerowkey = nullptr;
if (OB_ISNULL(obj_ptr = (ObObj *)ob_malloc(sizeof(ObObj), attr)) || OB_ISNULL(new(obj_ptr)ObObj(key))) {
ret = OB_ALLOCATE_MEMORY_FAILED;
} else if (OB_ISNULL(storerowkey = (ObStoreRowkey *)ob_malloc(sizeof(ObStoreRowkey), attr)) || OB_ISNULL(new(storerowkey)ObStoreRowkey(obj_ptr, 1))) {
} else if (OB_ISNULL(storerowkey = (ObStoreRowkey *)ob_malloc(sizeof(ObStoreRowkey), attr))
|| OB_ISNULL(new(storerowkey)ObStoreRowkey(obj_ptr, 1))) {
ret = OB_ALLOCATE_MEMORY_FAILED;
} else if (OB_ISNULL(ret_key = (BtreeKey *)ob_malloc(sizeof(BtreeKey), attr)) || OB_ISNULL(new(ret_key)BtreeKey(storerowkey))) {
} else if (OB_ISNULL(ret_key = (ObStoreRowkeyWrapper *)ob_malloc(sizeof(ObStoreRowkeyWrapper), attr)) || OB_ISNULL(new(ret_key)ObStoreRowkeyWrapper(storerowkey))) {
ret = OB_ALLOCATE_MEMORY_FAILED;
}
return ret;
@ -115,14 +118,14 @@ public:
}
};
int64_t get_v(BtreeKey *ptr)
int64_t get_v(ObStoreRowkeyWrapper *ptr)
{
int64_t tmp = 0;
IS_EQ(OB_SUCCESS, ptr->get_rowkey()->get_rowkey().get_obj_ptr()[0].get_int(tmp));
return tmp;
}
typedef ObKeyBtree Btree;
typedef ObKeyBtree<ObStoreRowkeyWrapper, ObMvccRow *> Btree;
constexpr int64_t THREAD_COUNT = (1 << 6);
@ -130,10 +133,6 @@ constexpr int64_t ORDER_INSERT_THREAD_COUNT = THREAD_COUNT;
constexpr int64_t RANDOM_INSERT_THREAD_COUNT = THREAD_COUNT >> 3;
constexpr int64_t INSERT_COUNT_PER_THREAD = (1 << 20);
constexpr int64_t DELETE_THREAD_COUNT = 2;//THREAD_COUNT;
constexpr int64_t REINSERT_THREAD_COUNT = 2;//THREAD_COUNT;
constexpr int64_t SCAN_THREAD_COUNT = THREAD_COUNT;
constexpr int64_t MAX_INSERT_NUM = ORDER_INSERT_THREAD_COUNT * INSERT_COUNT_PER_THREAD * 4;
@ -147,46 +146,42 @@ TEST(TestKeyBtree, smoke_test)
constexpr int64_t SCAN_THREAD_COUNT = THREAD_COUNT;
constexpr int64_t DELETE_THREAD_COUNT = THREAD_COUNT;
constexpr int64_t DELETE_RANGE_COUNT_PER_THREAD = (1 << 4);
constexpr int64_t DELETE_RANGE_SIZE = (1 << 10);
lib::set_memory_limit(200 * 1024 * 1024 * 1024L);
IS_EQ(INSERT_THREAD_COUNT * INSERT_COUNT_PER_THREAD
>= DELETE_THREAD_COUNT * DELETE_RANGE_COUNT_PER_THREAD * DELETE_RANGE_SIZE, true);
BtreeNodeAllocator allocator(*FakeAllocator::get_instance());
BtreeNodeAllocator<ObStoreRowkeyWrapper, ObMvccRow *> allocator(*FakeAllocator::get_instance());
Btree btree(allocator);
int ret = OB_SUCCESS;
IS_EQ(OB_SUCCESS, btree.init());
// naughty thread
// naughty thread, we build a template tree to test the influence between trees of retire station
std::thread normal_threads[2];
CACHE_ALIGNED bool should_stop = false;
for (int64_t i = 0; i < 2; ++i) {
normal_threads[i] = std::thread([&]() {
BtreeNodeAllocator allocator(*FakeAllocator::get_instance());
Btree btree(allocator);
IS_EQ(OB_SUCCESS, btree.init());
BtreeKey *key = nullptr;
for (int64_t j = 0; !ATOMIC_LOAD(&should_stop); ++j) {
auto v = (BtreeVal)(j << 3);
IS_EQ(OB_SUCCESS, alloc_key(key, j));
IS_EQ(OB_SUCCESS, btree.insert(*key, v));
}
btree.destroy();
});
normal_threads[i] = std::thread(
[&]() {
BtreeNodeAllocator<ObStoreRowkeyWrapper, ObMvccRow *> allocator(*FakeAllocator::get_instance());
Btree btree(allocator);
IS_EQ(OB_SUCCESS, btree.init());
ObStoreRowkeyWrapper *key = nullptr;
for (int64_t j = 0; !ATOMIC_LOAD(&should_stop); ++j) {
auto v = (ObMvccRow *)(j << 3);
IS_EQ(OB_SUCCESS, alloc_key(key, j));
IS_EQ(OB_SUCCESS, btree.insert(*key, v));
}
btree.destroy();
});
}
// keep inserting at left bound
// keep inserting at left bound, range is from
std::thread head_insert_thread[2];
CACHE_ALIGNED int64_t head_num = -1;
for (int64_t i = 0; i < 2; ++i) {
head_insert_thread[i] = std::thread([&]() {
BtreeKey *key = nullptr;
ObStoreRowkeyWrapper *key = nullptr;
while (!ATOMIC_LOAD(&should_stop)) {
int64_t j = ATOMIC_FAA(&head_num, -1);
auto v = (BtreeVal)(j << 3);
auto v = (ObMvccRow *)(j << 3);
IS_EQ(OB_SUCCESS, alloc_key(key, j));
IS_EQ(OB_SUCCESS, btree.insert(*key, v));
}
@ -197,25 +192,25 @@ TEST(TestKeyBtree, smoke_test)
for (int64_t i = 0; i < 2; ++i) {
bad_scan_threads[i] = std::thread([&]() {
int ret = OB_SUCCESS;
BtreeKey *start_key = nullptr;
BtreeKey *end_key = nullptr;
BtreeKey *tmp_key = nullptr;
BtreeVal tmp_value = nullptr;
ObStoreRowkeyWrapper *start_key = nullptr;
ObStoreRowkeyWrapper *end_key = nullptr;
ObStoreRowkeyWrapper *tmp_key = nullptr;
ObMvccRow * tmp_value = nullptr;
IS_EQ(OB_SUCCESS, alloc_key(start_key, 0));
IS_EQ(OB_SUCCESS, alloc_key(end_key, 0));
IS_EQ(OB_SUCCESS, alloc_key(tmp_key, 0));
while (!ATOMIC_LOAD(&should_stop)) {
BtreeIterator iter;
BtreeIterator<ObStoreRowkeyWrapper, ObMvccRow *> iter;
init_key(start_key, MAX_INSERT_NUM);
init_key(end_key, INT64_MAX);
ret = btree.set_key_range(iter, *start_key, false, *end_key, false, 2);
ret = btree.set_key_range(iter, *start_key, false, *end_key, false);
IS_EQ(OB_SUCCESS, ret);
ret = iter.get_next(*tmp_key, tmp_value);
IS_EQ(OB_ITER_END, ret);
iter.reset();
init_key(start_key, INT64_MIN);
init_key(end_key, INT64_MIN + 1);
ret = btree.set_key_range(iter,*start_key, false, *end_key, false, 2);
ret = btree.set_key_range(iter,*start_key, false, *end_key, false);
IS_EQ(OB_SUCCESS, ret);
ret = iter.get_next(*tmp_key, tmp_value);
IS_EQ(OB_ITER_END, ret);
@ -228,19 +223,19 @@ TEST(TestKeyBtree, smoke_test)
for (int64_t i = 0; i < 2; ++i) {
scan_all_threads[i] = std::thread([&]() {
int ret = OB_SUCCESS;
BtreeKey *start_key = nullptr;
BtreeKey *end_key = nullptr;
BtreeKey *tmp_key = nullptr;
BtreeVal tmp_value = nullptr;
BtreeKey *last = nullptr;
ObStoreRowkeyWrapper *start_key = nullptr;
ObStoreRowkeyWrapper *end_key = nullptr;
ObStoreRowkeyWrapper *tmp_key = nullptr;
ObMvccRow * tmp_value = nullptr;
ObStoreRowkeyWrapper *last = nullptr;
IS_EQ(OB_SUCCESS, alloc_key(start_key, INT64_MIN));
IS_EQ(OB_SUCCESS, alloc_key(end_key, INT64_MAX));
IS_EQ(OB_SUCCESS, alloc_key(tmp_key, 0));
IS_EQ(OB_SUCCESS, alloc_key(last, 0));
while (!ATOMIC_LOAD(&should_stop)) {
BtreeIterator iter;
BtreeIterator<ObStoreRowkeyWrapper, ObMvccRow *> iter;
init_key(last, INT64_MIN);
ret = btree.set_key_range(iter, *start_key, false, *end_key, false, 2);
ret = btree.set_key_range(iter, *start_key, false, *end_key, false);
IS_EQ(OB_SUCCESS, ret);
while (OB_SUCC(iter.get_next(*tmp_key, tmp_value))) {
int cmp = 0;
@ -261,11 +256,11 @@ TEST(TestKeyBtree, smoke_test)
for (int64_t i = 0; i < ORDER_INSERT_THREAD_COUNT; ++i) {
order_insert_threads[i] = std::thread([&]() {
int ret = OB_SUCCESS;
BtreeKey *tmp_key = nullptr;
BtreeVal tmp_value = nullptr;
ObStoreRowkeyWrapper *tmp_key = nullptr;
ObMvccRow * tmp_value = nullptr;
for (int64_t j = 0; j < INSERT_COUNT_PER_THREAD; ++j) {
int64_t key = ATOMIC_FAA(&global_key, 1);
auto v = (BtreeVal)(key << 3);
auto v = (ObMvccRow *)(key << 3);
IS_EQ(OB_SUCCESS, alloc_key(tmp_key, key));
if (OB_SUCC(btree.get(*tmp_key, tmp_value))) {
// do nothing
@ -285,10 +280,10 @@ TEST(TestKeyBtree, smoke_test)
random_insert_threads[i] = std::thread([&]() {
int ret = OB_SUCCESS;
for (int64_t j = 0; j < INSERT_COUNT_PER_THREAD * 4; ++j) {
BtreeKey *tmp_key = nullptr;
BtreeVal tmp_value = nullptr;
ObStoreRowkeyWrapper *tmp_key = nullptr;
ObMvccRow * tmp_value = nullptr;
int64_t key = ObRandom::rand(0, MAX_INSERT_NUM - 1);
auto v = (BtreeVal)(key << 3);
auto v = (ObMvccRow *)(key << 3);
IS_EQ(OB_SUCCESS, alloc_key(tmp_key, key));
if (OB_SUCC(btree.get(*tmp_key, tmp_value))) {
// do nothing
@ -303,48 +298,6 @@ TEST(TestKeyBtree, smoke_test)
});
}
_OB_LOG(INFO, "del inorder");
std::thread delete_threads[DELETE_THREAD_COUNT];
CACHE_ALIGNED int64_t del_key = 0;
for (int64_t i = 0; i < DELETE_THREAD_COUNT; ++i) {
delete_threads[i] = std::thread([&]() {
int ret = OB_SUCCESS;
BtreeKey *tmp_key = nullptr;
BtreeVal tmp_value = nullptr;
int64_t key = 0;
IS_EQ(OB_SUCCESS, alloc_key(tmp_key, 0));
while ((key = ATOMIC_FAA(&del_key, 1)) < ORDER_INSERT_THREAD_COUNT * INSERT_COUNT_PER_THREAD) {
init_key(tmp_key, key);
while (OB_FAIL(btree.del(*tmp_key, tmp_value, 3))) {
IS_EQ(OB_ENTRY_NOT_EXIST, ret);
}
judge(tmp_key, tmp_value);
}
});
}
_OB_LOG(INFO, "reinsert inorder");
std::thread reinsert_threads[REINSERT_THREAD_COUNT];
CACHE_ALIGNED int64_t reinsert_key = 0;
for (int64_t i = 0; i < REINSERT_THREAD_COUNT; ++i) {
reinsert_threads[i] = std::thread([&]() {
int ret = OB_SUCCESS;
BtreeKey *tmp_key = nullptr;
BtreeVal tmp_value = nullptr;
int64_t key = 0;
IS_EQ(OB_SUCCESS, alloc_key(tmp_key, 0));
while ((key = ATOMIC_FAA(&reinsert_key, 1)) < ORDER_INSERT_THREAD_COUNT * INSERT_COUNT_PER_THREAD) {
init_key(tmp_key, key);
while (OB_FAIL(btree.re_insert(*tmp_key, (BtreeVal)(key << 3)))) {
IS_EQ(OB_ENTRY_NOT_EXIST, ret);
}
ret = btree.del(*tmp_key, tmp_value, 3);
IS_EQ(OB_SUCCESS, ret);
judge(tmp_key, tmp_value);
}
});
}
for (int64_t i = 0; i < RANDOM_INSERT_THREAD_COUNT; ++i) {
random_insert_threads[i].join();
_OB_LOG(INFO, "random insert end");
@ -353,45 +306,38 @@ TEST(TestKeyBtree, smoke_test)
order_insert_threads[i].join();
_OB_LOG(INFO, "order insert end");
}
for (int64_t i = 0; i < DELETE_THREAD_COUNT; ++i) {
delete_threads[i].join();
_OB_LOG(INFO, "delete end");
}
for (int64_t i = 0; i < REINSERT_THREAD_COUNT; ++i) {
reinsert_threads[i].join();
_OB_LOG(INFO, "reinsert end");
}
_OB_LOG(INFO, "cal sum");
std::thread scan_threads[SCAN_THREAD_COUNT];
CACHE_ALIGNED int64_t sum = 0;
for (int64_t i = 0; i < SCAN_THREAD_COUNT; ++i) {
scan_threads[i] = std::thread([&, i]() {
BtreeIterator iter1, iter2;
BtreeKey *start_key = nullptr;
BtreeKey *end_key = nullptr;
BtreeKey *tmp_key = nullptr;
BtreeVal tmp_value = nullptr;
int64_t len = ORDER_INSERT_THREAD_COUNT * INSERT_COUNT_PER_THREAD / SCAN_THREAD_COUNT;
IS_EQ(OB_SUCCESS, alloc_key(start_key, i * len));
IS_EQ(OB_SUCCESS, alloc_key(end_key, (i + 1) * len));
IS_EQ(OB_SUCCESS, alloc_key(tmp_key, 0));
IS_EQ(OB_SUCCESS, btree.set_key_range(iter1, *start_key, false, *end_key, true, 2));
IS_EQ(OB_SUCCESS, btree.set_key_range(iter2, *start_key, false, *end_key, true, 4));
for (int64_t j = 0; j < len; ++j) {
IS_EQ(OB_SUCCESS, iter1.get_next(*tmp_key, tmp_value));
IS_EQ((uint64_t)tmp_value & 1, 0);
IS_EQ(get_v(tmp_key), i * len + j);
judge(tmp_key, tmp_value);
IS_EQ(OB_SUCCESS, iter2.get_next(*tmp_key, tmp_value));
IS_EQ((uint64_t)tmp_value & 1, 1);
IS_EQ(get_v(tmp_key), i * len + j);
judge(tmp_key, tmp_value);
ATOMIC_AAF(&sum, get_v(tmp_key));
}
IS_EQ(OB_ITER_END, iter1.get_next(*tmp_key, tmp_value));
IS_EQ(OB_ITER_END, iter2.get_next(*tmp_key, tmp_value));
});
scan_threads[i] = std::thread(
[&, i]() {
BtreeIterator<ObStoreRowkeyWrapper, ObMvccRow *> iter1, iter2;
ObStoreRowkeyWrapper *start_key = nullptr;
ObStoreRowkeyWrapper *end_key = nullptr;
ObStoreRowkeyWrapper *tmp_key = nullptr;
ObMvccRow * tmp_value = nullptr;
int64_t len = ORDER_INSERT_THREAD_COUNT * INSERT_COUNT_PER_THREAD / SCAN_THREAD_COUNT;
IS_EQ(OB_SUCCESS, alloc_key(start_key, i * len));
IS_EQ(OB_SUCCESS, alloc_key(end_key, (i + 1) * len));
IS_EQ(OB_SUCCESS, alloc_key(tmp_key, 0));
IS_EQ(OB_SUCCESS, btree.set_key_range(iter1, *start_key, false, *end_key, true));
IS_EQ(OB_SUCCESS, btree.set_key_range(iter2, *start_key, false, *end_key, true));
for (int64_t j = 0; j < len; ++j) {
IS_EQ(OB_SUCCESS, iter1.get_next(*tmp_key, tmp_value));
IS_EQ((uint64_t)tmp_value & 1, 0);
IS_EQ(get_v(tmp_key), i * len + j);
judge(tmp_key, tmp_value);
IS_EQ(OB_SUCCESS, iter2.get_next(*tmp_key, tmp_value));
IS_EQ((uint64_t)tmp_value & 1, 0);
IS_EQ(get_v(tmp_key), i * len + j);
judge(tmp_key, tmp_value);
ATOMIC_AAF(&sum, get_v(tmp_key));
}
IS_EQ(OB_ITER_END, iter1.get_next(*tmp_key, tmp_value));
IS_EQ(OB_ITER_END, iter2.get_next(*tmp_key, tmp_value));
});
}
for (int64_t i = 0; i < SCAN_THREAD_COUNT; ++i) {
scan_threads[i].join();
@ -399,15 +345,15 @@ TEST(TestKeyBtree, smoke_test)
IS_EQ((ORDER_INSERT_THREAD_COUNT * INSERT_COUNT_PER_THREAD - 1) * ORDER_INSERT_THREAD_COUNT * INSERT_COUNT_PER_THREAD / 2, sum);
_OB_LOG(INFO, "cal sum end");
BtreeIterator iter;
BtreeKey *start_key = nullptr;
BtreeKey *end_key = nullptr;
BtreeKey *tmp_key = nullptr;
BtreeVal tmp_value = nullptr;
BtreeIterator<ObStoreRowkeyWrapper, ObMvccRow *> iter;
ObStoreRowkeyWrapper *start_key = nullptr;
ObStoreRowkeyWrapper *end_key = nullptr;
ObStoreRowkeyWrapper *tmp_key = nullptr;
ObMvccRow * tmp_value = nullptr;
IS_EQ(OB_SUCCESS, alloc_key(start_key, 0));
IS_EQ(OB_SUCCESS, alloc_key(end_key, INT64_MAX));
IS_EQ(OB_SUCCESS, alloc_key(tmp_key, 0));
IS_EQ(OB_SUCCESS, btree.set_key_range(iter, *start_key, false, *end_key, true, 5));
IS_EQ(OB_SUCCESS, btree.set_key_range(iter, *start_key, false, *end_key, true));
for (int64_t key = 0; OB_SUCC(iter.get_next(*tmp_key, tmp_value)); ++key) {
judge(tmp_key, tmp_value);
if (get_v(tmp_key) < ORDER_INSERT_THREAD_COUNT * INSERT_COUNT_PER_THREAD) {
@ -419,26 +365,6 @@ TEST(TestKeyBtree, smoke_test)
IS_EQ(OB_ITER_END, ret);
IS_EQ(random_sum, 0);
_OB_LOG(INFO, "reinsert");
ATOMIC_STORE(&global_key, 0);
for (int64_t i = 0; i < SCAN_THREAD_COUNT; ++i) {
scan_threads[i] = std::thread([&]() {
int ret = OB_SUCCESS;
BtreeKey *tmp_key = nullptr;
int64_t key = 0;
IS_EQ(OB_SUCCESS, alloc_key(tmp_key, 0));
while ((key = ATOMIC_FAA(&global_key, 1)) < ORDER_INSERT_THREAD_COUNT * INSERT_COUNT_PER_THREAD) {
init_key(tmp_key, key);
while (OB_FAIL(btree.re_insert(*tmp_key, (BtreeVal)(key << 3)))) {
IS_EQ(OB_ENTRY_NOT_EXIST, ret);
}
}
});
}
for (int64_t i = 0; i < SCAN_THREAD_COUNT; ++i) {
scan_threads[i].join();
}
_OB_LOG(INFO, "reinsert end");
int32_t pos = btree.update_split_info(7);
_OB_LOG(INFO, "btree split info %d", pos);
IS_EQ(OB_SUCCESS, btree.destroy());

View File

@ -0,0 +1,645 @@
/**
* Copyright (c) 2021 OceanBase
* OceanBase CE is licensed under Mulan PubL v2.
* You can use this software according to the terms and conditions of the Mulan PubL v2.
* You may obtain a copy of Mulan PubL v2 at:
* http://license.coscl.org.cn/MulanPubL-2.0
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PubL v2 for more details.
*/
#include "storage/memtable/mvcc/ob_keybtree.h"
#include "lib/allocator/ob_malloc.h"
#include "lib/oblog/ob_log.h"
#include "lib/random/ob_random.h"
#include "common/object/ob_object.h"
#include <gtest/gtest.h>
#include <thread>
#include <algorithm>
#include <iostream>
#include <vector>
#include <unordered_set>
#include <atomic>
namespace oceanbase {
namespace unittest {
using namespace oceanbase::keybtree;
#define DUMP_BTREE \
{ \
FILE *file = fopen("dump_btree.txt", "w+"); \
btree.dump(file); \
fclose(file); \
}
class FakeAllocator : public ObIAllocator {
public:
FakeAllocator() : remain_(0), is_limited_(false)
{}
const char *attr = ObModIds::TEST;
void *alloc(int64_t size) override
{
void *block = nullptr;
if (is_limited_) {
if (remain_ > 0) {
block = ob_malloc(size, attr);
remain_ -= size;
}
} else {
block = ob_malloc(size, attr);
}
return block;
}
void *alloc_key(int64_t size)
{
return ob_malloc(size, attr);
}
void *alloc(const int64_t size, const ObMemAttr &attr) override
{
void *block = nullptr;
UNUSED(attr);
if (is_limited_) {
if (remain_ > 0) {
block = alloc(size);
remain_ -= size;
}
} else {
block = alloc(size);
}
return block;
}
void free(void *ptr) override
{
ob_free(ptr);
}
void set_remain(int remain)
{
remain_ = remain;
is_limited_ = true;
}
void unset_limited()
{
is_limited_ = false;
}
static FakeAllocator *get_instance()
{
static FakeAllocator allocator;
return &allocator;
}
private:
std::atomic<int64_t> remain_;
bool is_limited_;
};
class FakeKey {
public:
FakeKey() : obj_(nullptr)
{}
FakeKey(ObObj *obj) : obj_(obj)
{}
void set_int(int64_t data)
{
obj_->set_int(data);
}
int compare(FakeKey other, int &cmp) const
{
return obj_->compare(*other.obj_, cmp);
}
int64_t to_string(char *buf, const int64_t limit) const
{
return obj_->to_string(buf, limit);
}
ObObj *get_ptr() const
{
return obj_;
}
ObObj *obj_;
};
FakeKey build_int_key(int64_t key)
{
auto alloc = FakeAllocator::get_instance();
void *block = alloc->alloc_key(sizeof(ObObj));
EXPECT_TRUE(OB_NOT_NULL(block));
ObObj *obj = new (block) ObObj(key);
return FakeKey(obj);
}
void free_key(FakeKey &key)
{
auto alloc = FakeAllocator::get_instance();
alloc->free((void *)key.obj_);
}
using BtreeNode = BtreeNode<FakeKey, int64_t *>;
using ObKeyBtree = ObKeyBtree<FakeKey, int64_t *>;
using BtreeIterator = BtreeIterator<FakeKey, int64_t *>;
void judge_tree_scan(ObKeyBtree *btree, FakeKey start_key, FakeKey end_key, bool exclude_start_key,
bool exclude_end_key, bool is_backward, std::vector<int64_t> &answer)
{
FakeKey key;
int64_t *val = nullptr;
int i = 0;
BtreeIterator iter;
btree->set_key_range(iter, start_key, exclude_start_key, end_key, exclude_end_key);
while (iter.get_next(key, val) == OB_SUCCESS) {
ASSERT_EQ(key.get_ptr()->get_int(), answer[i]);
i++;
}
ASSERT_EQ(i, answer.size());
}
void free_btree(ObKeyBtree &btree)
{
FakeKey start_key = build_int_key(INT64_MIN);
FakeKey end_key = build_int_key(INT64_MAX);
FakeKey key;
int64_t *val = nullptr;
FakeAllocator *allocator = FakeAllocator::get_instance();
BtreeIterator iter;
btree.set_key_range(iter, start_key, true, end_key, true);
std::vector<FakeKey> keys;
while (iter.get_next(key, val) == OB_SUCCESS) {
keys.push_back(key);
}
for (auto &key : keys) {
allocator->free(key.get_ptr());
}
btree.destroy(false /*is_batch_destroy*/);
}
TEST(TestBtree, smoke_test)
{
constexpr int64_t KEY_NUM = 100000;
std::vector<int64_t> data(KEY_NUM);
FakeAllocator *allocator = FakeAllocator::get_instance();
BtreeNodeAllocator<FakeKey, int64_t *> node_allocator(*allocator);
ObKeyBtree btree(node_allocator);
FakeKey search_key = build_int_key(0);
ASSERT_EQ(btree.init(), OB_SUCCESS);
for (int i = 0; i <= KEY_NUM; i++) {
data[i] = i * 2;
}
std::random_shuffle(data.begin(), data.end());
// test insert and search
for (int len = 1; len <= KEY_NUM; len++) {
int64_t cur = data[len - 1];
FakeKey key = build_int_key(cur);
int64_t *val = &data[len - 1];
btree.insert(key, val);
if (len % (KEY_NUM / 49) == 0) {
for (int i = 0; i < len * 2; i++) {
search_key.set_int(data[i / 2] + i % 2);
if (i % 2 == 0) {
ASSERT_EQ(btree.get(search_key, val), OB_SUCCESS);
} else {
ASSERT_EQ(btree.get(search_key, val), OB_ENTRY_NOT_EXIST);
}
}
}
}
std::sort(data.begin(), data.end());
search_key.set_int(-1);
int64_t *val = nullptr;
ASSERT_EQ(btree.get(search_key, val), OB_ENTRY_NOT_EXIST);
FakeKey start_key = build_int_key(0);
FakeKey end_key = build_int_key(0);
// test scan
int REPEAT_COUNT = 100;
// forward include
REPEAT_COUNT = 100;
while (REPEAT_COUNT--) {
int64_t start_int = ObRandom::rand(-KEY_NUM, KEY_NUM * 3);
int64_t end_int = ObRandom::rand(start_int + 1, KEY_NUM * 3);
start_key.set_int(start_int);
end_key.set_int(end_int);
std::vector<int64_t> ans;
for (int i = max(0, (start_int + 1) / 2 * 2); i <= min((KEY_NUM - 1) * 2, min(end_int / 2 * 2, end_int)); i += 2) {
ans.push_back(i);
}
judge_tree_scan(&btree, start_key, end_key, false, false, false, ans);
}
// forward exclude
REPEAT_COUNT = 100;
while (REPEAT_COUNT--) {
int64_t start_int = ObRandom::rand(-KEY_NUM, KEY_NUM * 3);
int64_t end_int = ObRandom::rand(start_int + 1, KEY_NUM * 3);
start_key.set_int(start_int);
end_key.set_int(end_int);
std::vector<int64_t> ans;
for (int i = max(0, (start_int + 2) / 2 * 2); i <= min((KEY_NUM - 1) * 2, min((end_int - 1) / 2 * 2, end_int - 1));
i += 2) {
ans.push_back(i);
}
judge_tree_scan(&btree, start_key, end_key, true, true, false, ans);
}
// backward include
REPEAT_COUNT = 100;
while (REPEAT_COUNT--) {
int64_t start_int = ObRandom::rand(-KEY_NUM, KEY_NUM * 3);
int64_t end_int = ObRandom::rand(start_int + 1, KEY_NUM * 3);
start_key.set_int(start_int);
end_key.set_int(end_int);
std::vector<int64_t> ans;
for (int i = min((KEY_NUM - 1) * 2, min(end_int / 2 * 2, end_int)); i >= max(0, (start_int + 1) / 2 * 2); i -= 2) {
ans.push_back(i);
}
judge_tree_scan(&btree, end_key, start_key, false, false, true, ans);
}
// backward exclude
REPEAT_COUNT = 100;
while (REPEAT_COUNT--) {
int64_t start_int = ObRandom::rand(-KEY_NUM, KEY_NUM * 3);
int64_t end_int = ObRandom::rand(start_int + 1, KEY_NUM * 3);
start_key.set_int(start_int);
end_key.set_int(end_int);
std::vector<int64_t> ans;
for (int i = min((KEY_NUM - 1) * 2, min((end_int - 1) / 2 * 2, end_int - 1)); i >= max(0, (start_int + 2) / 2 * 2);
i -= 2) {
ans.push_back(i);
}
judge_tree_scan(&btree, end_key, start_key, true, true, true, ans);
}
free_btree(btree);
allocator->free(search_key.get_ptr());
allocator->free(start_key.get_ptr());
allocator->free(end_key.get_ptr());
}
TEST(TestEventualConsistency, smoke_test)
{
constexpr uint64_t KEY_NUM = 6400000;
constexpr uint64_t THREAD_COUNT = 64;
constexpr uint64_t PER_THREAD_INSERT_COUNT = KEY_NUM / THREAD_COUNT;
FakeAllocator *allocator = FakeAllocator::get_instance();
BtreeNodeAllocator<FakeKey, int64_t *> node_allocator(*allocator);
ObKeyBtree btree(node_allocator);
std::thread threads[THREAD_COUNT];
ASSERT_EQ(btree.init(), OB_SUCCESS);
// prepare insert keys
std::vector<std::vector<int64_t>> data(THREAD_COUNT, std::vector<int64_t>(PER_THREAD_INSERT_COUNT));
for (int i = 0; i < THREAD_COUNT; i++) {
for (int j = 0; j < PER_THREAD_INSERT_COUNT; j++) {
data[i][j] = THREAD_COUNT * j + i;
}
std::random_shuffle(data[i].begin(), data[i].end());
}
// concurrent insert
for (int thread_id = 0; thread_id < THREAD_COUNT; thread_id++) {
threads[thread_id] = std::thread(
[&](int i) {
for (int j = 0; j < PER_THREAD_INSERT_COUNT; j++) {
int64_t *val = &(data[i][j]);
btree.insert(build_int_key(data[i][j]), val);
}
},
thread_id);
}
for (int thread_id = 0; thread_id < THREAD_COUNT; thread_id++) {
threads[thread_id].join();
}
// evaluate the tree
FakeKey start_key = build_int_key(0);
FakeKey end_key = build_int_key(KEY_NUM);
FakeKey key;
int64_t *val;
BtreeIterator iter;
btree.set_key_range(iter, start_key, false, end_key, false);
int i = 0;
while (iter.get_next(key, val) == OB_SUCCESS) {
ASSERT_EQ(key.get_ptr()->get_int(), i);
i++;
}
ASSERT_EQ(i, KEY_NUM);
free_btree(btree);
allocator->free(start_key.get_ptr());
allocator->free(end_key.get_ptr());
}
TEST(TestMonotonicReadWrite, smoke_test)
{
constexpr int KEY_NUM = 6400000;
constexpr int WRITE_THREAD_COUNT = 32;
constexpr int PER_THREAD_INSERT_COUNT = KEY_NUM / WRITE_THREAD_COUNT;
constexpr int SCAN_THREAD_COUNT = 32;
constexpr int PER_THREAD_SCAN_COUNT = 8;
FakeAllocator *allocator = FakeAllocator::get_instance();
BtreeNodeAllocator<FakeKey, int64_t *> node_allocator(*allocator);
ObKeyBtree btree(node_allocator);
ASSERT_EQ(btree.init(), OB_SUCCESS);
// constructing insert keys
std::vector<std::vector<int64_t>> data(WRITE_THREAD_COUNT, std::vector<int64_t>(PER_THREAD_INSERT_COUNT));
for (int i = 0; i < WRITE_THREAD_COUNT; i++) {
for (int j = 0; j < PER_THREAD_INSERT_COUNT; j++) {
data[i][j] = WRITE_THREAD_COUNT * j + i;
}
std::random_shuffle(data[i].begin(), data[i].end());
}
std::thread write_threads[WRITE_THREAD_COUNT];
for (int thread_id = 0; thread_id < WRITE_THREAD_COUNT; thread_id++) {
write_threads[thread_id] = std::thread(
[&](int i) {
// insert in order
for (int j = 0; j < PER_THREAD_INSERT_COUNT; j++) {
int64_t *val = &(data[i][j]);
btree.insert(build_int_key(data[i][j]), val);
usleep(1);
}
},
thread_id);
}
std::thread scan_threads[SCAN_THREAD_COUNT];
for (int thread_id = 0; thread_id < SCAN_THREAD_COUNT; thread_id++) {
scan_threads[thread_id] = std::thread(
[&](int thread_id) {
FakeKey start_key = build_int_key(-1);
FakeKey end_key = build_int_key(KEY_NUM + 1);
int scan_count = PER_THREAD_SCAN_COUNT;
std::unordered_set<int64_t *> last_results;
while (scan_count--) {
std::unordered_set<int64_t *> results;
FakeKey key;
int64_t *val;
if (thread_id % 2 == 0) {
// scan forward
BtreeIterator iter;
btree.set_key_range(iter, start_key, false, end_key, false);
int64_t last = -1;
while (iter.get_next(key, val) == OB_SUCCESS) {
results.insert(val);
ASSERT_GT(key.get_ptr()->get_int(), last);
last = key.get_ptr()->get_int();
}
} else {
// scan backward
BtreeIterator iter;
btree.set_key_range(iter, end_key, false, start_key, false);
int64_t last = KEY_NUM + 1;
while (iter.get_next(key, val) == OB_SUCCESS) {
results.insert(val);
ASSERT_LT(key.get_ptr()->get_int(), last);
last = key.get_ptr()->get_int();
}
}
// test monotonic write, if a thread see a key A, then it should see all keys inserted before A
for (int i = 0; i < WRITE_THREAD_COUNT; i++) {
if (thread_id % 2 == 0) {
int64_t min = KEY_NUM + 1;
for (int j = PER_THREAD_INSERT_COUNT - 1; j >= 0; j--) {
ASSERT_TRUE(data[i][j] < min || results.count(&data[i][j]) == 1);
if (results.count(&data[i][j]) == 1) {
min = std::min(min, data[i][j]);
}
}
} else {
int64_t max = -1;
for (int j = PER_THREAD_INSERT_COUNT - 1; j >= 0; j--) {
ASSERT_TRUE(data[i][j] > max || results.count(&data[i][j]) == 1);
if (results.count(&data[i][j]) == 1) {
max = std::max(max, data[i][j]);
}
}
}
}
// test monotonic read, if a thread do two scan, then the frist scan result should be the subset of
// the second scan result.
for (auto i : last_results) {
ASSERT_TRUE(results.count(i) == 1);
}
last_results = results;
}
allocator->free(start_key.get_ptr());
allocator->free(end_key.get_ptr());
},
thread_id);
}
for (int thread_id = 0; thread_id < WRITE_THREAD_COUNT; thread_id++) {
write_threads[thread_id].join();
}
for (int thread_id = 0; thread_id < SCAN_THREAD_COUNT; thread_id++) {
scan_threads[thread_id].join();
}
free_btree(btree);
}
TEST(TestSequentialConsistency, smoke_test)
{
constexpr int PER_THREAD_INSERT_COUNT = 200000;
constexpr int READ_THREAD_COUNT = 16;
std::atomic<int> progress(-1);
FakeAllocator *allocator = FakeAllocator::get_instance();
BtreeNodeAllocator<FakeKey, int64_t *> node_allocator(*allocator);
ObKeyBtree btree(node_allocator);
ASSERT_EQ(btree.init(), OB_SUCCESS);
std::thread main_thread([&] {
for (; progress < PER_THREAD_INSERT_COUNT; progress++) {
usleep(1);
}
});
std::vector<int64_t> insert_keys(PER_THREAD_INSERT_COUNT * 2);
for (int i = 0; i < insert_keys.size(); i++) {
insert_keys[i] = i;
}
std::random_shuffle(insert_keys.begin(), insert_keys.end());
std::thread write_threads[2];
for (int thread_id = 0; thread_id < 2; thread_id++) {
write_threads[thread_id] = std::thread(
[&](int thread_id) {
int last = -1;
int insert_id = 0;
while (last < PER_THREAD_INSERT_COUNT) {
while (last >= progress) {}
last++;
insert_id = last * 2 + thread_id;
int64_t *val = &(insert_keys[insert_id]);
btree.insert(build_int_key(insert_keys[insert_id]), val);
}
},
thread_id);
}
std::vector<std::vector<bool>> read_results(READ_THREAD_COUNT, std::vector<bool>(PER_THREAD_INSERT_COUNT));
std::thread read_threads[READ_THREAD_COUNT];
for (int thread_id = 0; thread_id < READ_THREAD_COUNT; thread_id++) {
read_threads[thread_id] = std::thread(
[&](int thread_id) {
int64_t *val;
for (int i = 0; i < PER_THREAD_INSERT_COUNT; i++) {
FakeKey search_key1 = build_int_key(insert_keys[i * 2]);
FakeKey search_key2 = build_int_key(insert_keys[i * 2 + 1]);
if (thread_id % 2 == 0) {
while (btree.get(search_key1, val) != OB_SUCCESS) {}
if (btree.get(search_key2, val) == OB_ENTRY_NOT_EXIST) {
// the order this thread saw is: search_key1 -> search_key2
read_results[thread_id][i] = true;
}
} else {
while (btree.get(search_key2, val) != OB_SUCCESS) {}
if (btree.get(search_key1, val) == OB_ENTRY_NOT_EXIST) {
// the order this thread saw is: search_key2 -> search_key1
read_results[thread_id][i] = true;
}
}
allocator->free(search_key1.get_ptr());
allocator->free(search_key2.get_ptr());
}
},
thread_id);
}
main_thread.join();
write_threads[0].join();
write_threads[1].join();
for (int i = 0; i < READ_THREAD_COUNT; i++) {
read_threads[i].join();
}
int count = 0;
for (int j = 0; j < PER_THREAD_INSERT_COUNT; j++) {
for (int i = 0; i < READ_THREAD_COUNT; i++) {
read_results[i % 2][j] = read_results[i % 2][j] || read_results[i][j];
}
// threads shouldn't see different order
ASSERT_FALSE(read_results[0][j] && read_results[1][j]);
}
free_btree(btree);
}
void test_memory_not_enough(int max_nodes_cnt)
{
constexpr uint64_t KEY_NUM = 6400000;
constexpr uint64_t THREAD_COUNT = 64;
constexpr uint64_t PER_THREAD_INSERT_COUNT = KEY_NUM / THREAD_COUNT;
FakeAllocator *allocator = FakeAllocator::get_instance();
BtreeNodeAllocator<FakeKey, int64_t *> node_allocator(*allocator);
ObKeyBtree btree(node_allocator);
int64_t max_size = max_nodes_cnt * sizeof(BtreeNode);
ASSERT_EQ(btree.init(), OB_SUCCESS);
std::thread threads[THREAD_COUNT];
std::vector<std::vector<int64_t>> data(THREAD_COUNT, std::vector<int64_t>(PER_THREAD_INSERT_COUNT));
for (int i = 0; i < THREAD_COUNT; i++) {
for (int j = 0; j < PER_THREAD_INSERT_COUNT; j++) {
data[i][j] = THREAD_COUNT * j + i;
}
std::random_shuffle(data[i].begin(), data[i].end());
}
allocator->set_remain(max_size);
int insert_progress[THREAD_COUNT];
// concurrent insert
for (int thread_id = 0; thread_id < THREAD_COUNT; thread_id++) {
threads[thread_id] = std::thread(
[&](int i) {
int ret = OB_SUCCESS;
int64_t *val = &data[i][0];
insert_progress[i] = -1;
for (int j = 0; j < PER_THREAD_INSERT_COUNT && OB_SUCC(btree.insert(build_int_key(data[i][j]), val));
j++, val = &data[i][j]) {
insert_progress[i] = j;
}
ASSERT_EQ(ret, OB_ALLOCATE_MEMORY_FAILED);
},
thread_id);
}
for (int thread_id = 0; thread_id < THREAD_COUNT; thread_id++) {
threads[thread_id].join();
}
allocator->unset_limited();
// evaluate the tree
FakeKey key = build_int_key(0);
int64_t *val = nullptr;
std::unordered_set<int64_t> results;
for (int i = 0; i < THREAD_COUNT; i++) {
for (int j = 0; j <= insert_progress[i]; j++) {
key.set_int(data[i][j]);
ASSERT_EQ(btree.get(key, val), OB_SUCCESS);
ASSERT_EQ(*val, data[i][j]);
results.insert(*val);
}
}
free_key(key);
FakeKey start_key = build_int_key(0);
FakeKey end_key = build_int_key(KEY_NUM);
BtreeIterator iter;
btree.set_key_range(iter, start_key, false, end_key, false);
int64_t last = -1;
while (iter.get_next(key, val) == OB_SUCCESS) {
ASSERT_GT(*val, last);
last = *val;
results.erase(*val);
}
ASSERT_EQ(results.size(), 0);
free_btree(btree);
free_key(start_key);
free_key(end_key);
}
TEST(TestMemoryNotEnough, smoke_test)
{
int nodes_cnt[20] = {1, 2, 3, 15, 16, 17, 18, 19, 20, 225, 227, 229, 230, 500, 1000, 2000, 3375, 5000, 8000, 10000};
for (int i = 0; i < 20; i++) {
test_memory_not_enough(nodes_cnt[i]);
}
}
} // namespace unittest
} // namespace oceanbase
int main(int argc, char **argv)
{
// oceanbase::unittest::BIND_CPU(pthread_self());
oceanbase::common::ObLogger::get_logger().set_file_name("test_keybtreeV2.log", true);
oceanbase::common::ObLogger::get_logger().set_log_level("INFO");
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -102,7 +102,7 @@ public:
mt_counter_(0),
mt_ctx_(),
cb_allocator_(),
mgr_(mt_ctx_, cb_allocator_),
mgr_(mt_ctx_, cb_allocator_, mt_ctx_.mem_ctx_obj_pool_),
callback_list_(mgr_, 101) { }
virtual void SetUp() override
{
@ -1275,7 +1275,7 @@ TEST_F(TestTxCallbackList, log_cursor) {
namespace memtable
{
void ObMemtableCtx::callback_free(ObITransCallback *cb)
void ObMemtableCtx::free_mvcc_row_callback(ObITransCallback *cb)
{
if (OB_ISNULL(cb)) {
TRANS_LOG_RET(ERROR, OB_ERR_UNEXPECTED, "cb is null, unexpected error", KP(cb), K(*this));

View File

@ -61,32 +61,31 @@ TEST(TestObQueryEngine, smoke_test)
};
auto test_scan = [&](int64_t start, bool include_start, int64_t end, bool include_end) {
ObIQueryEngineIterator *iter = nullptr;
ret = qe.scan(mtk[start], !include_start, mtk[end], !include_end, 1, iter);
bool skip_purge_memtable = false;
ret = qe.scan(mtk[start], !include_start, mtk[end], !include_end, iter);
EXPECT_EQ(OB_SUCCESS, ret);
if (start <= end) {
for (int64_t i = (include_start ? start : (start + 1)); i <= (include_end ? end : (end - 1)); i++) {
ret = iter->next(skip_purge_memtable);
ret = iter->next();
EXPECT_EQ(OB_SUCCESS, ret);
assert(0 == mtk[i]->compare(*iter->get_key()));
EXPECT_EQ(&mtv[i], iter->get_value());
}
} else {
for (int64_t i = (include_start ? start : (start - 1)); i >= (include_end ? end : (end + 1)); i--) {
ret = iter->next(skip_purge_memtable);
ret = iter->next();
EXPECT_EQ(OB_SUCCESS, ret);
assert(0 == mtk[i]->compare(*iter->get_key()));
assert(0 == mtk[i]->compare(*iter->get_key()));
EXPECT_EQ(&mtv[i], iter->get_value());
}
}
ret = iter->next(skip_purge_memtable);
ret = iter->next();
EXPECT_EQ(OB_ITER_END, ret);
// if QueryEngine::Iterator returnes ITER_END, inner iter will be freed.
ret = iter->next(skip_purge_memtable);
ret = iter->next();
EXPECT_EQ(OB_ITER_END, ret);
};
ret = qe.init(1);
ret = qe.init();
EXPECT_EQ(OB_SUCCESS, ret);
INIT_MTK(allocator, mtk[0], V("aaaa", 4), I(1024), N("1234567890.01234567890"));