init push

This commit is contained in:
oceanbase-admin
2021-05-31 22:56:52 +08:00
commit cea7de1475
7020 changed files with 5689869 additions and 0 deletions

View File

@ -0,0 +1,249 @@
/**
* Copyright (c) 2021 OceanBase
* OceanBase CE is licensed under Mulan PubL v2.
* You can use this software according to the terms and conditions of the Mulan PubL v2.
* You may obtain a copy of Mulan PubL v2 at:
* http://license.coscl.org.cn/MulanPubL-2.0
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PubL v2 for more details.
*/
#include <gtest/gtest.h>
#include "lib/alloc/ob_malloc_allocator.h"
#define private public
#include "lib/allocator/ob_page_manager.h"
#undef private
#include "lib/allocator/ob_allocator_v2.h"
using namespace std;
using namespace oceanbase::common;
using namespace oceanbase::lib;
const uint64_t tenant_id = 100;
const uint64_t ctx_id = 2;
const int64_t limit = 1 << 30;
const lib::ObLabel& label = "1";
const uint64_t new_tenant_id = 101;
static bool has_unfree = false;
void has_unfree_callback()
{
has_unfree = true;
}
class TestAllocator : public ::testing::Test {
public:
virtual void SetUp()
{
ObMallocAllocator* ma = ObMallocAllocator::get_instance();
ASSERT_EQ(OB_SUCCESS, ma->create_tenant_ctx_allocator(tenant_id, ctx_id));
ASSERT_EQ(OB_SUCCESS, ma->set_tenant_limit(tenant_id, limit));
ObTenantCtxAllocator* ta = ma->get_tenant_ctx_allocator(tenant_id, ctx_id);
ASSERT_TRUE(NULL != ta);
ASSERT_EQ(OB_SUCCESS, ma->create_tenant_ctx_allocator(new_tenant_id, ctx_id));
ta = ma->get_tenant_ctx_allocator(new_tenant_id, ctx_id);
ASSERT_TRUE(NULL != ta);
}
// virtual void TearDown();
};
// ObAllocator has no state and no logic, only basic functions are tested here
TEST_F(TestAllocator, basic)
{
ObMallocAllocator* ma = ObMallocAllocator::get_instance();
ObTenantCtxAllocator* ta = ma->get_tenant_ctx_allocator(tenant_id, ctx_id);
ObMemAttr attr(tenant_id, label, ctx_id);
ObAllocator a(attr);
int64_t sz = 100;
void* p[128] = {};
int64_t cnt = 1L << 18;
sz = 1L << 4;
while (cnt--) {
int i = 0;
p[i++] = a.alloc(sz);
p[i++] = a.alloc(sz);
p[i++] = a.alloc(sz);
p[i++] = a.alloc(sz);
p[i++] = a.alloc(sz);
p[i++] = a.alloc(sz);
p[i++] = a.alloc(sz);
p[i++] = a.alloc(sz);
p[i++] = a.alloc(sz);
p[i++] = a.alloc(sz);
p[i++] = a.alloc(sz);
p[i++] = a.alloc(sz);
p[i++] = a.alloc(sz);
p[i++] = a.alloc(sz);
p[i++] = a.alloc(sz);
p[i++] = a.alloc(sz);
int64_t hold = a.used();
ASSERT_GT(hold, 0);
while (i--) {
a.free(p[i]);
}
sz = ((sz | reinterpret_cast<size_t>(p[0])) & ((1 << 13) - 1));
}
cout << "done" << endl;
}
TEST_F(TestAllocator, reveal_unfree)
{
ObMallocAllocator* ma = ObMallocAllocator::get_instance();
ObTenantCtxAllocator* ta = ma->get_tenant_ctx_allocator(tenant_id, ctx_id);
ObMemAttr attr(tenant_id, label, ctx_id);
has_unfree = false;
// no unfree
{
ObAllocator a(attr);
const int64_t hold = a.used();
void* ptr = a.alloc(100);
ASSERT_NE(ptr, nullptr);
ASSERT_GT(a.used(), hold);
a.free(ptr);
a.~ObAllocator();
ASSERT_FALSE(has_unfree);
ASSERT_EQ(a.used(), hold);
}
// has unfree
{
ObAllocator a(attr);
const int64_t hold = a.used();
void* ptr = a.alloc(100);
ASSERT_NE(ptr, nullptr);
ASSERT_GT(a.used(), hold);
// a.free(ptr);
a.~ObAllocator();
ASSERT_TRUE(has_unfree);
ASSERT_EQ(a.used(), hold);
}
}
TEST_F(TestAllocator, reset)
{
ObMallocAllocator* ma = ObMallocAllocator::get_instance();
ObTenantCtxAllocator* ta = ma->get_tenant_ctx_allocator(tenant_id, ctx_id);
ObMemAttr attr(tenant_id, label, ctx_id);
const int64_t hold = 0;
ObAllocator a(attr);
void* ptr = a.alloc(100);
ASSERT_NE(ptr, nullptr);
ASSERT_GT(a.used(), hold);
// reset
a.reset();
ASSERT_EQ(a.used(), hold);
// alloc after reset
ptr = a.alloc(100);
ASSERT_NE(ptr, nullptr);
ASSERT_GT(a.used(), hold);
a.~ObAllocator();
ASSERT_EQ(a.used(), hold);
}
TEST_F(TestAllocator, pm_basic)
{
ObPageManager pm;
// use default
void* page = pm.alloc_page(100);
ASSERT_NE(nullptr, page);
ASSERT_EQ(pm.get_hold(), pm.set_.get_total_hold());
pm.free_page(page);
ASSERT_EQ(OB_SUCCESS, pm.set_tenant_ctx(tenant_id, ctx_id));
int64_t ps = 1024;
void* ptr = nullptr;
void* p[128] = {};
ps = 8L << 10;
int i = 0;
// For cut tenants, the release of chunks is lazy and triggered by the next application (that is, the first
// application of a new tenant) So here hold> 0
int64_t hold = pm.get_hold();
while (i < 30) {
p[i] = pm.alloc_page(ps);
ASSERT_NE(nullptr, p[i++]);
ps = (8L << 10) * i;
}
ASSERT_GT(pm.get_hold(), hold);
while (i--) {
pm.free_page(p[i]);
}
ASSERT_EQ(pm.get_hold(), 0);
// freelist
int large_size = INTACT_ACHUNK_SIZE - 200;
pm.set_max_chunk_cache_cnt(1);
ptr = pm.alloc_page(large_size);
hold = pm.get_hold();
ASSERT_GT(hold, 0);
pm.free_page(ptr);
ASSERT_EQ(pm.get_hold(), hold);
pm.set_max_chunk_cache_cnt(0);
ptr = pm.alloc_page(large_size);
ASSERT_EQ(pm.get_hold(), hold);
pm.free_page(ptr);
ASSERT_LT(pm.get_hold(), hold);
hold = pm.get_hold();
ptr = pm.alloc_page(large_size);
ASSERT_GT(pm.get_hold(), hold);
pm.free_page(ptr);
ASSERT_EQ(pm.get_hold(), hold);
pm.set_max_chunk_cache_cnt(2);
pm.alloc_page(large_size);
pm.alloc_page(large_size);
pm.alloc_page(large_size);
// switch tenant
hold = pm.get_hold();
ASSERT_EQ(OB_SUCCESS, pm.set_tenant_ctx(new_tenant_id, ctx_id));
ptr = pm.alloc_page(100);
ASSERT_LT(pm.get_hold(), hold);
ASSERT_GT(pm.get_hold(), 0);
cout << "done" << endl;
}
TEST_F(TestAllocator, pm_reveal_unfree)
{
ObMallocAllocator* ma = ObMallocAllocator::get_instance();
ObTenantCtxAllocator* ta = ma->get_tenant_ctx_allocator(tenant_id, ctx_id);
has_unfree = false;
int64_t ps = 8L << 10;
// no unfree
{
const int64_t hold = ta->get_hold();
ObPageManager pm;
ASSERT_EQ(OB_SUCCESS, pm.set_tenant_ctx(tenant_id, ctx_id));
void* ptr = pm.alloc_page(ps);
ASSERT_NE(ptr, nullptr);
ASSERT_GT(ta->get_hold(), hold);
pm.free_page(ptr);
ASSERT_EQ(ta->get_hold(), hold);
pm.~ObPageManager();
ASSERT_FALSE(has_unfree);
}
// has unfree
{
const int64_t hold = ta->get_hold();
ObPageManager pm;
ASSERT_EQ(OB_SUCCESS, pm.set_tenant_ctx(tenant_id, ctx_id));
void* ptr = pm.alloc_page(100);
ASSERT_NE(ptr, nullptr);
ASSERT_GT(ta->get_hold(), hold);
pm.~ObPageManager();
ASSERT_TRUE(has_unfree);
ASSERT_EQ(ta->get_hold(), hold);
}
}
int main(int argc, char** argv)
{
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -0,0 +1,263 @@
/**
* Copyright (c) 2021 OceanBase
* OceanBase CE is licensed under Mulan PubL v2.
* You can use this software according to the terms and conditions of the Mulan PubL v2.
* You may obtain a copy of Mulan PubL v2 at:
* http://license.coscl.org.cn/MulanPubL-2.0
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PubL v2 for more details.
*/
#include <gtest/gtest.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <algorithm>
#include "lib/allocator/ob_buddy_allocator.h"
#include "lib/allocator/page_arena.h"
#include "lib/allocator/ob_malloc.h"
#include "lib/oblog/ob_log_module.h"
using namespace oceanbase::common;
class BuddyAllocatorTest : public ::testing::Test {
public:
BuddyAllocatorTest();
virtual ~BuddyAllocatorTest();
virtual void SetUp();
virtual void TearDown();
void assign_ptr(char* ptr, int64_t ptr_len, int64_t seed);
bool is_valid_ptr(const char* ptr, int64_t ptr_len, int64_t seed);
private:
// disallow copy
BuddyAllocatorTest(const BuddyAllocatorTest& other);
BuddyAllocatorTest& operator=(const BuddyAllocatorTest& other);
private:
// data members
};
BuddyAllocatorTest::BuddyAllocatorTest()
{}
BuddyAllocatorTest::~BuddyAllocatorTest()
{}
void BuddyAllocatorTest::SetUp()
{}
void BuddyAllocatorTest::TearDown()
{}
void BuddyAllocatorTest::assign_ptr(char* ptr, int64_t ptr_len, int64_t seed)
{
OB_ASSERT(ptr_len > 0);
for (int64_t i = 0, j = seed % 255; i < ptr_len - 1; ++i, ++j) {
ptr[i] = static_cast<char>((j % 255) + 1);
}
ptr[ptr_len - 1] = 0;
}
bool BuddyAllocatorTest::is_valid_ptr(const char* ptr, int64_t ptr_len, int64_t seed)
{
OB_ASSERT(ptr_len > 0);
for (int64_t i = 0, j = seed % 255; i < ptr_len - 1; ++i, ++j) {
if (ptr[i] != static_cast<char>((j % 255) + 1)) {
COMMON_LOG(ERROR, "not equal", K(seed % 255), K(i), K((int)ptr[i]), K((j % 255) + 1), K(ptr_len));
_COMMON_LOG(ERROR, "ptr=%.*s", (int)ptr_len, ptr);
return false;
}
}
if (ptr[ptr_len - 1] != 0) {
COMMON_LOG(ERROR, "last is not 0", K(seed % 255), K((int)ptr[ptr_len - 1]), K(ptr_len));
_COMMON_LOG(ERROR, "ptr=%.*s", (int)ptr_len, ptr);
return false;
}
return true;
}
TEST_F(BuddyAllocatorTest, basic_test)
{
ObMalloc a;
void* ptr1 = NULL;
void* ptr2 = NULL;
void* ptr3 = NULL;
char* tmp = NULL;
int32_t tmp_order = 0;
int64_t tmp_size = 0;
const int64_t block_size = 128;
// int32_t block_order = 7;
void* pointer_array[1000];
ObBuddyAllocator alloc(a);
alloc.init(block_size);
// test 1 calloc 1120B
ASSERT_TRUE(NULL != (ptr1 = alloc.alloc(1120)));
_OB_LOG(INFO, "alloc 1120,ptr1 point to it");
tmp = static_cast<char*>(ptr1);
tmp--;
tmp_order = (int)(*tmp);
_OB_LOG(INFO, "order you write to memory is %d", tmp_order);
tmp_size = (1 << tmp_order) * block_size;
_OB_LOG(INFO, "you can use %ldB with pointer ptr1", tmp_size - 1);
if (ptr1 != NULL) {
alloc.free(ptr1);
}
// test 2
_OB_LOG(INFO, "info:test 2 alloc 10^5B");
ptr2 = alloc.alloc(100000);
_OB_LOG(INFO, "alloc 100000 with address is %p", ptr2);
if (ptr2 != NULL) {
alloc.free(ptr2);
}
// test 3
_OB_LOG(INFO, "info:test 3 alloc 1024B");
ptr3 = alloc.alloc(1024);
_OB_LOG(INFO, "alloc 1024 with address is %p, with %d basic page(128B)", ptr3, 16);
if (ptr3 != NULL) {
alloc.free(ptr3);
}
// test 4:alloc(negative numbaer)
for (int i = 0; i < 20; i++) {
ASSERT_TRUE(NULL == (pointer_array[i] = alloc.alloc(-(1 << i))));
}
for (int i = 0; i < 20; i++) {
if (pointer_array[i] != NULL) {
alloc.free(pointer_array[i]);
}
}
// test5:alloc(2^i)
ASSERT_TRUE(NULL == (pointer_array[0] = alloc.alloc(0)));
for (int i = 1; i < 18; i++) {
ASSERT_TRUE(NULL != (pointer_array[i] = alloc.alloc((1L << i))));
if (pointer_array[i] != NULL) {
_OB_LOG(INFO, "alloc(%ld) succeed", 1L << i);
} else {
_OB_LOG(INFO, "alloc(%ld) fail", 1L << i);
}
}
ASSERT_TRUE(NULL == (pointer_array[19] = alloc.alloc((1L << 19))));
ASSERT_TRUE(NULL == (pointer_array[20] = alloc.alloc((1L << 20))));
for (int i = 0; i < 20; i++) {
if (pointer_array[i] != NULL) {
alloc.free(pointer_array[i]);
}
}
// test6: alloc(2^i - 1)
// ASSERT_TRUE(NULL == (pointer_array[0] = alloc.alloc(1L << 0)+1));
for (int i = 0; i < 18; i++) {
ASSERT_TRUE(NULL != (pointer_array[i] = alloc.alloc((1L << i) + 1)));
if (pointer_array[i] != NULL) {
_OB_LOG(INFO, "alloc(%ld) succeed", 1L << i);
}
}
ASSERT_TRUE(NULL == (pointer_array[19] = alloc.alloc((1L << 19) + 1)));
ASSERT_TRUE(NULL == (pointer_array[20] = alloc.alloc((1L << 20) + 1)));
for (int i = 0; i <= 20; i++) {
if (pointer_array[i] != NULL) {
alloc.free(pointer_array[i]);
}
}
// test7:alloc(2^[20,30])
for (int i = 20; i < 30; i++) {
ASSERT_TRUE(NULL == (pointer_array[i] = alloc.alloc((1L << i))));
if (pointer_array[i] != NULL) {
_OB_LOG(INFO, "alloc(%ld) succeed", 1L << i);
}
}
for (int i = 20; i < 30; i++) {
if (pointer_array[i] != NULL) {
alloc.free(pointer_array[i]);
}
}
// test6: alloc(2^i - 1)
_OB_LOG(INFO, "free ptr");
}
class ObPtrStore {
public:
int64_t seed_;
int64_t ptr_len_;
char* ptr_;
};
TEST_F(BuddyAllocatorTest, random_test)
{
srand(static_cast<unsigned int>(time(NULL)));
ObArenaAllocator base_allocator(ObModIds::TEST);
ObBuddyAllocator alloc(base_allocator);
ASSERT_TRUE(OB_SUCCESS == alloc.init(2));
const static int64_t MAX_PTR_ARRAY_LEN = 1024;
ObPtrStore ptr_store_arr[MAX_PTR_ARRAY_LEN];
for (int64_t i = 0; i < MAX_PTR_ARRAY_LEN; ++i) {
int64_t random_alloc_size = rand() % 511 + 2;
char* tmp_ptr = static_cast<char*>(alloc.alloc(random_alloc_size));
ASSERT_TRUE(NULL != tmp_ptr);
int64_t random_seed = rand();
assign_ptr(tmp_ptr, random_alloc_size, random_seed);
ObPtrStore tmp_ptr_store;
tmp_ptr_store.seed_ = random_seed;
tmp_ptr_store.ptr_len_ = random_alloc_size;
tmp_ptr_store.ptr_ = tmp_ptr;
ptr_store_arr[i] = tmp_ptr_store;
}
for (int64_t i = 0; i < MAX_PTR_ARRAY_LEN; ++i) {
bool ptr_is_valid = is_valid_ptr(ptr_store_arr[i].ptr_, ptr_store_arr[i].ptr_len_, ptr_store_arr[i].seed_);
if (!ptr_is_valid) {
COMMON_LOG(ERROR, "ptr is not valid", K(ptr_is_valid), K(i));
}
ASSERT_TRUE(true == ptr_is_valid);
}
for (int64_t time = 0; time < 10000; ++time) {
// upset
for (int64_t i = 0; i < MAX_PTR_ARRAY_LEN; ++i) {
int64_t select_pos = rand() % (MAX_PTR_ARRAY_LEN - i) + i;
if (select_pos != i) {
ObPtrStore tmp_store = ptr_store_arr[i];
ptr_store_arr[i] = ptr_store_arr[select_pos];
ptr_store_arr[select_pos] = tmp_store;
}
}
// Dequeue at the head of the queue, free
ASSERT_TRUE(NULL != ptr_store_arr[0].ptr_);
alloc.free(ptr_store_arr[0].ptr_);
// Move the entire array forward by one subscript
for (int64_t i = 0; i < MAX_PTR_ARRAY_LEN - 1; ++i) {
ptr_store_arr[i] = ptr_store_arr[i + 1];
}
// Re-alloc a pointer to the last position of the array
int64_t random_alloc_size = rand() % 511 + 2;
char* tmp_ptr = static_cast<char*>(alloc.alloc(random_alloc_size));
ASSERT_TRUE(NULL != tmp_ptr);
int64_t random_seed = rand();
assign_ptr(tmp_ptr, random_alloc_size, random_seed);
ObPtrStore tmp_ptr_store;
tmp_ptr_store.seed_ = random_seed;
tmp_ptr_store.ptr_len_ = random_alloc_size;
tmp_ptr_store.ptr_ = tmp_ptr;
ptr_store_arr[MAX_PTR_ARRAY_LEN - 1] = tmp_ptr_store;
// Check again
for (int64_t i = 0; i < MAX_PTR_ARRAY_LEN; ++i) {
ASSERT_TRUE(true == is_valid_ptr(ptr_store_arr[i].ptr_, ptr_store_arr[i].ptr_len_, ptr_store_arr[i].seed_));
}
}
_OB_LOG(INFO, "free ptr");
}
int main(int argc, char** argv)
{
OB_LOGGER.set_log_level("DEBUG");
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -0,0 +1,242 @@
/**
* Copyright (c) 2021 OceanBase
* OceanBase CE is licensed under Mulan PubL v2.
* You can use this software according to the terms and conditions of the Mulan PubL v2.
* You may obtain a copy of Mulan PubL v2 at:
* http://license.coscl.org.cn/MulanPubL-2.0
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PubL v2 for more details.
*/
#include <pthread.h>
#include "lib/allocator/ob_concurrent_fifo_allocator.h"
#include "lib/allocator/ob_mod_define.h"
#include "gtest/gtest.h"
#include "lib/coro/testing.h"
using namespace oceanbase;
using namespace oceanbase::common;
static const int64_t TOTAL_SIZE = 1024l * 1024l * 1024l * 8l;
static const int64_t PAGE_SIZE = 64 * 1024;
class TestC {
public:
TestC()
{}
virtual ~TestC()
{}
public:
// a successful virtual function invoking represents a good vtable
virtual void set_mem_a(int64_t value)
{
mem_a_ = value;
}
virtual int64_t get_mem_a()
{
return mem_a_;
}
virtual void set_mem_b(int64_t value)
{
mem_b_ = value;
}
virtual int64_t get_mem_b()
{
return mem_b_;
}
private:
int64_t mem_a_;
int64_t mem_b_;
};
ObConcurrentFIFOAllocator allocator;
TEST(TestConcurrentFIFOAllocator, single_thread)
{
LIB_LOG(INFO, "start single thread test");
static const int64_t loop = 4096;
TestC* ptr_buffer[loop];
ASSERT_EQ(OB_SUCCESS, allocator.init(TOTAL_SIZE, TOTAL_SIZE, PAGE_SIZE));
for (int64_t i = 0; i < loop; ++i) {
ptr_buffer[i] = NULL;
}
for (int64_t i = 0; i < loop; ++i) {
void* ptr = allocator.alloc(sizeof(TestC));
ASSERT_TRUE(NULL != ptr);
TestC* test_c = new (ptr) TestC();
test_c->set_mem_a(i);
test_c->set_mem_b(i);
ptr_buffer[i] = test_c;
}
for (int64_t i = 0; i < loop; ++i) {
ptr_buffer[i]->~TestC();
allocator.free(ptr_buffer[i]);
ptr_buffer[i] = NULL;
}
allocator.destroy();
}
TEST(TestConcurrentFIFOAllocator, single_thread2)
{
LIB_LOG(INFO, "start single thread test2");
static const int64_t MALLOC_PER_LOOP = 1024;
static const int64_t LOOP = 32 * 512;
void* ptr_buffer[MALLOC_PER_LOOP];
ASSERT_EQ(OB_SUCCESS, allocator.init(TOTAL_SIZE, TOTAL_SIZE, PAGE_SIZE));
for (int64_t i = 0; i < MALLOC_PER_LOOP; ++i) {
ptr_buffer[i] = NULL;
}
for (int64_t loop = 0; loop < LOOP; ++loop) {
for (int64_t i = 0; i < MALLOC_PER_LOOP; ++i) {
void* ptr = allocator.alloc(sizeof(TestC));
ASSERT_TRUE(NULL != ptr);
ptr_buffer[i] = ptr;
}
for (int64_t i = 0; i < MALLOC_PER_LOOP; ++i) {
allocator.free(ptr_buffer[i]);
ptr_buffer[i] = NULL;
}
}
allocator.destroy();
}
ObConcurrentFIFOAllocator allocator1;
pthread_barrier_t barrier1;
void* th_direct_alloc_func(void* arg)
{
UNUSED(arg);
static const int64_t MALLOC_TIMES_PER_THREAD = 1024;
void* ptr_buffer[MALLOC_TIMES_PER_THREAD];
for (int64_t i = 0; i < MALLOC_TIMES_PER_THREAD; ++i) {
ptr_buffer[i] = NULL;
;
}
pthread_barrier_wait(&barrier1);
for (int64_t times = 0; times < MALLOC_TIMES_PER_THREAD; ++times) {
void* ptr = allocator1.alloc(65536);
EXPECT_TRUE(NULL != ptr);
ptr_buffer[times] = ptr;
}
for (int64_t times = 0; times < MALLOC_TIMES_PER_THREAD; ++times) {
allocator1.free(ptr_buffer[times]);
ptr_buffer[times] = NULL;
}
return NULL;
}
TEST(TestConcurrentFIFOAllocator, multipe_threads_direct_alloc)
{
LIB_LOG(INFO, "start multiple threads direct alloc test");
static const int64_t THREAD_NUM = 16;
pthread_t work_thread[THREAD_NUM];
ASSERT_EQ(OB_SUCCESS, allocator1.init(TOTAL_SIZE, TOTAL_SIZE, PAGE_SIZE));
ASSERT_EQ(0, pthread_barrier_init(&barrier1, NULL, THREAD_NUM));
for (int64_t i = 0; i < THREAD_NUM; ++i) {
int ret = pthread_create(&work_thread[i], NULL, &th_direct_alloc_func, NULL);
ASSERT_EQ(0, ret);
}
for (int64_t i = 0; i < THREAD_NUM; ++i) {
ASSERT_EQ(0, pthread_join(work_thread[i], NULL));
}
ASSERT_EQ(0, pthread_barrier_destroy(&barrier1));
allocator1.destroy();
}
void* th_normal_alloc_func(int64_t size)
{
static const int64_t MALLOC_PER_LOOP = 1024;
static const int64_t LOOP = 512;
void* ptr_buffer[MALLOC_PER_LOOP];
for (int64_t i = 0; i < MALLOC_PER_LOOP; ++i) {
ptr_buffer[i] = NULL;
}
pthread_barrier_wait(&barrier1);
for (int64_t i = 0; i < LOOP; ++i) {
for (int64_t times = 0; times < MALLOC_PER_LOOP; ++times) {
void* ptr = allocator1.alloc(size);
EXPECT_TRUE(NULL != ptr);
ptr_buffer[times] = ptr;
}
for (int64_t times = 0; times < MALLOC_PER_LOOP; ++times) {
allocator1.free(ptr_buffer[times]);
ptr_buffer[times] = NULL;
}
}
return NULL;
}
TEST(TestConcurrentFIFOAllocator, multiple_threads_normal_alloc_32B)
{
LIB_LOG(INFO, "start multipe threads normal alloc test");
static const int THREAD_NUM = 8;
static int64_t ALLOC_SIZE = 32;
ASSERT_EQ(OB_SUCCESS, allocator1.init(TOTAL_SIZE, TOTAL_SIZE, PAGE_SIZE));
ASSERT_EQ(0, pthread_barrier_init(&barrier1, NULL, THREAD_NUM));
cotesting::FlexPool([] { th_normal_alloc_func(ALLOC_SIZE); }, THREAD_NUM).start();
ASSERT_EQ(0, pthread_barrier_destroy(&barrier1));
allocator1.destroy();
}
TEST(TestConcurrentFIFOAllocator, multiple_threads_normal_alloc_128B)
{
LIB_LOG(INFO, "start multipe threads normal alloc test");
static const int THREAD_NUM = 8;
static int64_t ALLOC_SIZE = 128;
ASSERT_EQ(OB_SUCCESS, allocator1.init(TOTAL_SIZE, TOTAL_SIZE, PAGE_SIZE));
ASSERT_EQ(0, pthread_barrier_init(&barrier1, NULL, THREAD_NUM));
cotesting::FlexPool([] { th_normal_alloc_func(ALLOC_SIZE); }, THREAD_NUM).start();
ASSERT_EQ(0, pthread_barrier_destroy(&barrier1));
allocator1.destroy();
}
TEST(TestConcurrentFIFOAllocator, multiple_threads_normal_alloc_1K)
{
LIB_LOG(INFO, "start multipe threads normal alloc test");
static const int THREAD_NUM = 8;
static int64_t ALLOC_SIZE = 1024;
ASSERT_EQ(OB_SUCCESS, allocator1.init(TOTAL_SIZE, TOTAL_SIZE, PAGE_SIZE));
ASSERT_EQ(0, pthread_barrier_init(&barrier1, NULL, THREAD_NUM));
cotesting::FlexPool([] { th_normal_alloc_func(ALLOC_SIZE); }, THREAD_NUM).start();
ASSERT_EQ(0, pthread_barrier_destroy(&barrier1));
allocator1.destroy();
}
TEST(TestConcurrentFIFOAllocator, multiple_threads_normal_alloc_4K)
{
LIB_LOG(INFO, "start multipe threads normal alloc test");
static const int THREAD_NUM = 8;
static int64_t ALLOC_SIZE = 4 * 1024;
ASSERT_EQ(OB_SUCCESS, allocator1.init(TOTAL_SIZE, TOTAL_SIZE, PAGE_SIZE));
ASSERT_EQ(0, pthread_barrier_init(&barrier1, NULL, THREAD_NUM));
cotesting::FlexPool([] { th_normal_alloc_func(ALLOC_SIZE); }, THREAD_NUM).start();
ASSERT_EQ(0, pthread_barrier_destroy(&barrier1));
allocator1.destroy();
}
TEST(TestConcurrentFIFOAllocator, multiple_threads_normal_alloc_16K)
{
LIB_LOG(INFO, "start multipe threads normal alloc test");
static const int THREAD_NUM = 8;
static int64_t ALLOC_SIZE = 16 * 1024;
ASSERT_EQ(OB_SUCCESS, allocator1.init(TOTAL_SIZE, PAGE_SIZE, PAGE_SIZE));
ASSERT_EQ(0, pthread_barrier_init(&barrier1, NULL, THREAD_NUM));
cotesting::FlexPool([] { th_normal_alloc_func(ALLOC_SIZE); }, THREAD_NUM).start();
ASSERT_EQ(0, pthread_barrier_destroy(&barrier1));
allocator1.destroy();
}
int main(int argc, char** argv)
{
system("rm -f test_lf_fifo_allocator.log*");
ObLogger& logger = ObLogger::get_logger();
logger.set_file_name("test_lf_fifo_allocator.log", true);
logger.set_log_level("info");
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,390 @@
/**
* Copyright (c) 2021 OceanBase
* OceanBase CE is licensed under Mulan PubL v2.
* You can use this software according to the terms and conditions of the Mulan PubL v2.
* You may obtain a copy of Mulan PubL v2 at:
* http://license.coscl.org.cn/MulanPubL-2.0
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PubL v2 for more details.
*/
#include <pthread.h>
#include "gtest/gtest.h"
#include "lib/allocator/ob_lf_fifo_allocator.h"
#include "lib/allocator/ob_mod_define.h"
#include "lib/hash/ob_concurrent_hash_map.h"
#include "lib/hash/ob_hashmap.h"
#include "lib/coro/testing.h"
using namespace oceanbase;
using namespace oceanbase::common;
static const int64_t PAGE_SIZE = 64 * 1024;
class TestC {
public:
TestC()
{}
virtual ~TestC()
{}
public:
// a successful virtual function invoking represents a good vtable
virtual void set_mem_a(int64_t value)
{
mem_a_ = value;
}
virtual int64_t get_mem_a()
{
return mem_a_;
}
virtual void set_mem_b(int64_t value)
{
mem_b_ = value;
}
virtual int64_t get_mem_b()
{
return mem_b_;
}
private:
int64_t mem_a_;
int64_t mem_b_;
};
TEST(TestLfFIFOAllocator, single_thread)
{
LIB_LOG(INFO, "start single thread test");
static const int64_t loop = 4096;
TestC* ptr_buffer[loop];
ObLfFIFOAllocator allocator;
ASSERT_EQ(OB_SUCCESS, allocator.init(PAGE_SIZE, ObModIds::OB_PARTITION_STORAGE));
for (int64_t i = 0; i < loop; ++i) {
ptr_buffer[i] = NULL;
}
for (int64_t i = 0; i < loop; ++i) {
void* ptr = allocator.alloc(sizeof(TestC));
ASSERT_TRUE(NULL != ptr);
TestC* test_c = new (ptr) TestC();
test_c->set_mem_a(i);
test_c->set_mem_b(i);
ptr_buffer[i] = test_c;
}
for (int64_t i = 0; i < loop; ++i) {
ptr_buffer[i]->~TestC();
allocator.free(ptr_buffer[i]);
ptr_buffer[i] = NULL;
}
}
TEST(TestLfFIFOAllocator, single_thread2)
{
LIB_LOG(INFO, "start single thread test2");
static const int64_t MALLOC_PER_LOOP = 1024;
static const int64_t LOOP = 32 * 512;
void* ptr_buffer[MALLOC_PER_LOOP];
ObLfFIFOAllocator allocator;
ASSERT_EQ(OB_SUCCESS, allocator.init(PAGE_SIZE, ObModIds::OB_PARTITION_STORAGE));
for (int64_t i = 0; i < MALLOC_PER_LOOP; ++i) {
ptr_buffer[i] = NULL;
}
for (int64_t loop = 0; loop < LOOP; ++loop) {
for (int64_t i = 0; i < MALLOC_PER_LOOP; ++i) {
void* ptr = allocator.alloc(sizeof(TestC));
ASSERT_TRUE(NULL != ptr);
ptr_buffer[i] = ptr;
}
for (int64_t i = 0; i < MALLOC_PER_LOOP; ++i) {
allocator.free(ptr_buffer[i]);
ptr_buffer[i] = NULL;
}
}
}
ObLfFIFOAllocator allocator1;
pthread_barrier_t barrier1;
// typedef common::ObConcurrentHashMap<int64_t, int64_t> PointerContainer;
typedef common::hash::ObHashMap<int64_t, int64_t> PointerContainer;
PointerContainer pc;
void* th_direct_alloc_func(void* arg)
{
UNUSED(arg);
static const int64_t MALLOC_TIMES_PER_THREAD = 1024;
void* ptr_buffer[MALLOC_TIMES_PER_THREAD];
for (int64_t i = 0; i < MALLOC_TIMES_PER_THREAD; ++i) {
ptr_buffer[i] = NULL;
;
}
pthread_barrier_wait(&barrier1);
for (int64_t times = 0; times < MALLOC_TIMES_PER_THREAD; ++times) {
void* ptr = allocator1.alloc(65536);
EXPECT_TRUE(NULL != ptr);
ptr_buffer[times] = ptr;
}
for (int64_t times = 0; times < MALLOC_TIMES_PER_THREAD; ++times) {
allocator1.free(ptr_buffer[times]);
ptr_buffer[times] = NULL;
}
return NULL;
}
TEST(TestLfFIFOAllocator, multipe_threads_direct_alloc)
{
LIB_LOG(INFO, "start multiple threads direct alloc test");
static const int64_t THREAD_NUM = 16;
pthread_t work_thread[THREAD_NUM];
ASSERT_EQ(OB_SUCCESS, allocator1.init(PAGE_SIZE, ObModIds::OB_PARTITION_STORAGE));
ASSERT_EQ(0, pthread_barrier_init(&barrier1, NULL, THREAD_NUM));
for (int64_t i = 0; i < THREAD_NUM; ++i) {
int ret = pthread_create(&work_thread[i], NULL, &th_direct_alloc_func, NULL);
ASSERT_EQ(0, ret);
}
for (int64_t i = 0; i < THREAD_NUM; ++i) {
ASSERT_EQ(0, pthread_join(work_thread[i], NULL));
}
ASSERT_EQ(0, pthread_barrier_destroy(&barrier1));
allocator1.destroy();
}
void* th_normal_alloc_func(void* arg)
{
int64_t size = *(int64_t*)arg;
static const int64_t MALLOC_PER_LOOP = 1024;
static const int64_t LOOP = 8192;
void* ptr_buffer[MALLOC_PER_LOOP];
for (int64_t i = 0; i < MALLOC_PER_LOOP; ++i) {
ptr_buffer[i] = NULL;
}
pthread_barrier_wait(&barrier1);
for (int64_t i = 0; i < LOOP; ++i) {
for (int64_t times = 0; times < MALLOC_PER_LOOP; ++times) {
void* ptr = allocator1.alloc(sizeof(size));
EXPECT_TRUE(NULL != ptr);
ptr_buffer[times] = ptr;
}
for (int64_t times = 0; times < MALLOC_PER_LOOP; ++times) {
allocator1.free(ptr_buffer[times]);
ptr_buffer[times] = NULL;
}
}
return NULL;
}
int book_alloc(const int64_t size, void*& ptr)
{
ptr = allocator1.alloc(size);
int64_t ptr_key = reinterpret_cast<int64_t>(ptr);
int64_t ptr_value = 0;
int ret = pc.get_refactored(ptr_key, ptr_value);
if (OB_SUCCESS != ret) {
ret = pc.set_refactored(ptr_key, 1, 0);
if (OB_SUCCESS == ret) {
} else {
printf("put key[%ld], value[%ld] ret[%d].\n", ptr_key, ptr_value, ret);
ret = OB_ERR_UNEXPECTED;
}
} else {
if (0 != ptr_value) {
printf("bug here! found same ptr_key[%ld] not free.", ptr_key);
ret = OB_ERR_UNEXPECTED;
} else {
// overwrite;
ret = pc.set_refactored(ptr_key, 1, 1);
if (OB_SUCCESS != ret) {
printf("system error, cannot overwrite ptr_key[%ld] with 1,ret[%d]\n", ptr_key, ret);
ret = OB_ERR_UNEXPECTED;
} else {
ret = OB_SUCCESS;
}
}
}
return ret;
}
int book_free(void* ptr)
{
int64_t ptr_key = reinterpret_cast<int64_t>(ptr);
int64_t ptr_value = 0;
int ret = pc.get_refactored(ptr_key, ptr_value);
if (OB_SUCCESS != ret) {
printf("bug here! ptr_key[%ld] has not allocated[%d].\n", ptr_key, ret);
} else {
if (1 != ptr_value) {
printf("bug here! found ptr_key[%ld] has freed[%ld].\n", ptr_key, ptr_value);
} else {
// overwrite;
allocator1.free(ptr);
ret = pc.set_refactored(ptr_key, 0, 1);
if (OB_SUCCESS != ret) {
printf("system error, cannot overwrite ptr_key[%ld] with 0,ret[%d]\n", ptr_key, ret);
ret = OB_ERR_UNEXPECTED;
} else {
ret = OB_SUCCESS;
}
}
}
return ret;
}
void* th_normal_alloc_free_func(void* arg)
{
int64_t size = *(int64_t*)arg;
UNUSED(size);
static const int64_t MALLOC_PER_LOOP = 16;
static const int64_t LOOP = 8192;
void* ptr_buffer[MALLOC_PER_LOOP];
int64_t alloc_time = 0;
int64_t free_time = 0;
int64_t start = 0;
int64_t end = 0;
int ret = 0;
for (int64_t i = 0; i < MALLOC_PER_LOOP; ++i) {
ptr_buffer[i] = NULL;
}
pthread_barrier_wait(&barrier1);
for (int64_t i = 0; i < LOOP; ++i) {
for (int64_t times = 0; times < MALLOC_PER_LOOP; ++times) {
start = ObTimeUtility::current_time();
// ret = book_alloc(32, ptr_buffer[times]);
ptr_buffer[times] = allocator1.alloc(32);
end = ObTimeUtility::current_time();
alloc_time += (end - start);
EXPECT_TRUE(ret == 0);
EXPECT_TRUE(NULL != ptr_buffer[times]);
}
for (int64_t times = 0; times < MALLOC_PER_LOOP; ++times) {
start = ObTimeUtility::current_time();
// ret = book_free(ptr_buffer[times]);
allocator1.free(ptr_buffer[times]);
end = ObTimeUtility::current_time();
free_time += (end - start);
EXPECT_TRUE(ret == 0);
ptr_buffer[times] = NULL;
}
}
printf("average alloc[%ld], free[%ld]\n", alloc_time / LOOP / MALLOC_PER_LOOP, free_time / LOOP / MALLOC_PER_LOOP);
return NULL;
}
TEST(TestLfFIFOAllocator, multiple_threads_normal_alloc_32B)
{
LIB_LOG(INFO, "start multipe threads normal alloc test");
static const int64_t THREAD_NUM = 8;
static int64_t ALLOC_SIZE = 32;
pthread_t work_thread[THREAD_NUM];
ASSERT_EQ(OB_SUCCESS, allocator1.init(PAGE_SIZE, ObModIds::OB_PARTITION_STORAGE));
ASSERT_EQ(0, pthread_barrier_init(&barrier1, NULL, THREAD_NUM));
for (int64_t i = 0; i < THREAD_NUM; ++i) {
int ret = pthread_create(&work_thread[i], NULL, &th_normal_alloc_func, &ALLOC_SIZE);
ASSERT_EQ(0, ret);
}
for (int64_t i = 0; i < THREAD_NUM; ++i) {
ASSERT_EQ(0, pthread_join(work_thread[i], NULL));
}
ASSERT_EQ(0, pthread_barrier_destroy(&barrier1));
allocator1.destroy();
}
TEST(TestLfFIFOAllocator, multiple_threads_normal_alloc_128B)
{
LIB_LOG(INFO, "start multipe threads normal alloc test");
static const int64_t THREAD_NUM = 8;
static int64_t ALLOC_SIZE = 128;
pthread_t work_thread[THREAD_NUM];
ASSERT_EQ(OB_SUCCESS, allocator1.init(PAGE_SIZE, ObModIds::OB_PARTITION_STORAGE));
ASSERT_EQ(0, pthread_barrier_init(&barrier1, NULL, THREAD_NUM));
for (int64_t i = 0; i < THREAD_NUM; ++i) {
int ret = pthread_create(&work_thread[i], NULL, &th_normal_alloc_func, &ALLOC_SIZE);
ASSERT_EQ(0, ret);
}
for (int64_t i = 0; i < THREAD_NUM; ++i) {
ASSERT_EQ(0, pthread_join(work_thread[i], NULL));
}
ASSERT_EQ(0, pthread_barrier_destroy(&barrier1));
allocator1.destroy();
}
TEST(TestLfFIFOAllocator, multiple_threads_normal_alloc_1K)
{
LIB_LOG(INFO, "start multipe threads normal alloc test");
static const int64_t THREAD_NUM = 8;
static int64_t ALLOC_SIZE = 1024;
pthread_t work_thread[THREAD_NUM];
ASSERT_EQ(OB_SUCCESS, allocator1.init(PAGE_SIZE, ObModIds::OB_PARTITION_STORAGE));
ASSERT_EQ(0, pthread_barrier_init(&barrier1, NULL, THREAD_NUM));
for (int64_t i = 0; i < THREAD_NUM; ++i) {
int ret = pthread_create(&work_thread[i], NULL, &th_normal_alloc_func, &ALLOC_SIZE);
ASSERT_EQ(0, ret);
}
for (int64_t i = 0; i < THREAD_NUM; ++i) {
ASSERT_EQ(0, pthread_join(work_thread[i], NULL));
}
ASSERT_EQ(0, pthread_barrier_destroy(&barrier1));
allocator1.destroy();
}
TEST(TestLfFIFOAllocator, multiple_threads_normal_alloc_4K)
{
LIB_LOG(INFO, "start multipe threads normal alloc test");
static const int64_t THREAD_NUM = 8;
static int64_t ALLOC_SIZE = 4 * 1024;
pthread_t work_thread[THREAD_NUM];
ASSERT_EQ(OB_SUCCESS, allocator1.init(PAGE_SIZE, ObModIds::OB_PARTITION_STORAGE));
ASSERT_EQ(0, pthread_barrier_init(&barrier1, NULL, THREAD_NUM));
for (int64_t i = 0; i < THREAD_NUM; ++i) {
int ret = pthread_create(&work_thread[i], NULL, &th_normal_alloc_func, &ALLOC_SIZE);
ASSERT_EQ(0, ret);
}
for (int64_t i = 0; i < THREAD_NUM; ++i) {
ASSERT_EQ(0, pthread_join(work_thread[i], NULL));
}
ASSERT_EQ(0, pthread_barrier_destroy(&barrier1));
allocator1.destroy();
}
TEST(TestLfFIFOAllocator, multiple_threads_normal_alloc_16K)
{
LIB_LOG(INFO, "start multipe threads normal alloc test");
static const int64_t THREAD_NUM = 8;
static int64_t ALLOC_SIZE = 16 * 1024;
pthread_t work_thread[THREAD_NUM];
ASSERT_EQ(OB_SUCCESS, allocator1.init(PAGE_SIZE, ObModIds::OB_PARTITION_STORAGE));
ASSERT_EQ(0, pthread_barrier_init(&barrier1, NULL, THREAD_NUM));
for (int64_t i = 0; i < THREAD_NUM; ++i) {
int ret = pthread_create(&work_thread[i], NULL, &th_normal_alloc_func, &ALLOC_SIZE);
ASSERT_EQ(0, ret);
}
for (int64_t i = 0; i < THREAD_NUM; ++i) {
ASSERT_EQ(0, pthread_join(work_thread[i], NULL));
}
ASSERT_EQ(0, pthread_barrier_destroy(&barrier1));
allocator1.destroy();
}
TEST(TestLfFIFOAllocator, multiple_threads_normal_alloc_free_32B)
{
LIB_LOG(INFO, "start multipe threads normal alloc test");
static const int64_t THREAD_NUM = 128;
static int64_t ALLOC_SIZE = 32;
ASSERT_EQ(OB_SUCCESS, pc.create(10240, ObModIds::OB_CS_COMMON, ObModIds::OB_CS_COMMON));
ASSERT_EQ(OB_SUCCESS, allocator1.init(PAGE_SIZE, ObModIds::OB_PARTITION_STORAGE));
ASSERT_EQ(0, pthread_barrier_init(&barrier1, NULL, THREAD_NUM));
cotesting::FlexPool([&] { th_normal_alloc_free_func(&ALLOC_SIZE); }, THREAD_NUM).start();
ASSERT_EQ(0, pthread_barrier_destroy(&barrier1));
allocator1.destroy();
}
int main(int argc, char** argv)
{
system("rm -f test_lf_fifo_allocator.log*");
ObLogger& logger = ObLogger::get_logger();
logger.set_file_name("test_lf_fifo_allocator.log", true);
logger.set_log_level("info");
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -0,0 +1,150 @@
/**
* Copyright (c) 2021 OceanBase
* OceanBase CE is licensed under Mulan PubL v2.
* You can use this software according to the terms and conditions of the Mulan PubL v2.
* You may obtain a copy of Mulan PubL v2 at:
* http://license.coscl.org.cn/MulanPubL-2.0
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PubL v2 for more details.
*/
#include <gtest/gtest.h>
#include "lib/allocator/page_arena.h"
using namespace oceanbase::common;
using namespace oceanbase::lib;
struct MyPageAllocator : public ObIAllocator {
void* alloc(const int64_t sz)
{
alloc_count_++;
return ob_malloc(sz);
}
void free(void* p)
{
free_count_++;
ob_free(p);
}
void freed(const int64_t sz)
{
UNUSED(sz);
}
void set_label(const lib::ObLabel& label)
{
UNUSED(label);
}
void set_tenant_id(uint64_t tenant_id)
{
UNUSED(tenant_id);
}
lib::ObLabel get_label() const
{
return 0;
}
static int64_t alloc_count_;
static int64_t free_count_;
};
typedef PageArena<char, MyPageAllocator> MyModuleArena;
int64_t MyPageAllocator::alloc_count_;
int64_t MyPageAllocator::free_count_;
#define CHECK(expect_ac, expect_fc) \
do { \
int64_t& ac = MyPageAllocator::alloc_count_; \
int64_t& fc = MyPageAllocator::free_count_; \
EXPECT_EQ(expect_ac, ac); \
EXPECT_EQ(expect_fc, fc); \
} while (0)
#define RESET() \
do { \
int64_t& ac = MyPageAllocator::alloc_count_; \
int64_t& fc = MyPageAllocator::free_count_; \
EXPECT_EQ(ac, fc); \
ac = fc = 0; \
} while (0)
TEST(TestPageArena, Basic)
{
{
CHECK(0, 0);
MyModuleArena ma;
// Weird!! Default page size is 8K-32, and meta size for each page
// is 32 bytes. So if we alloc 8K-32-31 which less than 8K-32-32
// it will allocate a default normal page, and a big page.
ma.alloc(8192 - 32 - 31);
CHECK(2, 0);
ma.free();
CHECK(2, 2);
RESET();
// If we allocate memory with size 8K-32-32, it can be hold in the
// first normal page.
ma.alloc(8192 - 32 - 32);
CHECK(1, 0);
}
CHECK(1, 1);
RESET();
}
TEST(TestPageArena, Tracer1)
{
MyModuleArena ma;
CHECK(0, 0);
ma.alloc(10);
CHECK(1, 0);
EXPECT_TRUE(ma.set_tracer());
ma.alloc(8192);
ma.alloc(8192);
ma.alloc(8192);
CHECK(4, 0);
constexpr auto N = 8192 - 32 - 32;
for (int i = 0; i < N; i++) {
ma.alloc(1);
}
CHECK(5, 0);
EXPECT_TRUE(ma.revert_tracer());
CHECK(5, 4);
ma.free();
RESET();
}
TEST(TestPageArena, Tracer2)
{
MyModuleArena ma;
CHECK(0, 0);
for (int i = 0; i < 2; i++) {
EXPECT_TRUE(ma.set_tracer());
CHECK(3 * i, 3 * i);
// some small allocates
constexpr auto N = 8192 - 32 - 32;
for (int i = 0; i < N; i++) {
ma.alloc(1);
}
CHECK(1 + 3 * i, 3 * i);
// some big allocates
ma.alloc(8192);
ma.alloc(8192);
EXPECT_TRUE(ma.revert_tracer());
CHECK(3 + 3 * i, 3 + 3 * i);
}
ma.free();
RESET();
}
int main(int argc, char** argv)
{
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -0,0 +1,319 @@
/**
* Copyright (c) 2021 OceanBase
* OceanBase CE is licensed under Mulan PubL v2.
* You can use this software according to the terms and conditions of the Mulan PubL v2.
* You may obtain a copy of Mulan PubL v2 at:
* http://license.coscl.org.cn/MulanPubL-2.0
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PubL v2 for more details.
*/
#include "lib/allocator/ob_slice_alloc.h"
#include "lib/allocator/ob_vslice_alloc.h"
#include "lib/allocator/ob_small_allocator.h"
#include "lib/allocator/ob_simple_fifo_alloc.h"
#include "lib/objectpool/ob_concurrency_objpool.h"
#include "lib/queue/ob_link_queue.h"
#include "lib/time/ob_time_utility.h"
#include "lib/metrics/ob_counter.h"
using namespace oceanbase;
using namespace oceanbase::common;
#define ISIZE 64
struct AllocInterface {
public:
virtual void* alloc() = 0;
virtual void free(void* p) = 0;
virtual int64_t hold()
{
return 0;
}
virtual void set_nway(int nway)
{
UNUSED(nway);
}
};
struct MallocWrapper : public AllocInterface {
public:
void* alloc()
{
return ::malloc(ISIZE);
}
void free(void* p)
{
::free(p);
}
};
struct ObMallocWrapper : public AllocInterface {
public:
void* alloc()
{
return common::ob_malloc(ISIZE);
}
void free(void* p)
{
common::ob_free(p);
}
};
struct SmallAllocWrapper : public AllocInterface, public ObSmallAllocator {
public:
SmallAllocWrapper()
{
alloc_.init(ISIZE);
}
void* alloc()
{
return alloc_.alloc();
}
void free(void* p)
{
alloc_.free(p);
}
private:
ObSmallAllocator alloc_;
};
struct OpAllocWrapper : public AllocInterface {
public:
struct Item {
char data[ISIZE];
};
void* alloc()
{
return (void*)op_alloc(Item);
}
void free(void* p)
{
return op_free((Item*)p);
}
};
struct OpReclaimAllocWrapper : public AllocInterface {
public:
struct Item {
char data[ISIZE];
};
void* alloc()
{
return (void*)op_reclaim_alloc(Item);
}
void free(void* p)
{
return op_reclaim_free((Item*)p);
}
};
ObMemAttr mem_attr;
struct SliceAllocWrapper : public AllocInterface {
public:
SliceAllocWrapper() : alloc_(ISIZE, mem_attr, OB_MALLOC_BIG_BLOCK_SIZE)
{
alloc_.set_nway(64);
}
void* alloc()
{
return alloc_.alloc();
}
void free(void* p)
{
return alloc_.free(p);
}
void set_nway(int nway)
{
alloc_.set_nway(nway);
}
int64_t hold()
{
return alloc_.hold();
}
private:
ObSliceAlloc alloc_;
};
struct VSliceAllocWrapper : public AllocInterface {
public:
VSliceAllocWrapper() : alloc_(mem_attr, OB_MALLOC_BIG_BLOCK_SIZE)
{
alloc_.set_nway(64);
}
void* alloc()
{
return alloc_.alloc(ISIZE);
}
void free(void* p)
{
return alloc_.free(p);
}
void set_nway(int nway)
{
alloc_.set_nway(nway);
}
int64_t hold()
{
return alloc_.hold();
}
private:
ObVSliceAlloc alloc_;
};
struct SimpleFifoAllocWrapper : public AllocInterface {
public:
SimpleFifoAllocWrapper() : alloc_(mem_attr, OB_MALLOC_BIG_BLOCK_SIZE)
{
alloc_.set_nway(64);
}
void* alloc()
{
return alloc_.alloc(ISIZE);
}
void free(void* p)
{
return alloc_.free(p);
}
void set_nway(int nway)
{
alloc_.set_nway(nway);
}
int64_t hold()
{
return alloc_.hold();
}
private:
ObSimpleFifoAlloc alloc_;
};
ObTCCounter gcounter;
class FixedStack {
public:
FixedStack() : top_(base_)
{}
void push(void* p)
{
if (NULL == p)
abort();
*top_++ = p;
}
void* pop()
{
return *--top_;
}
private:
void** top_;
void* base_[4096];
};
FixedStack gstack[65];
FixedStack& get_stack()
{
return gstack[get_itid()];
}
inline uint64_t rand64(uint64_t h)
{
h ^= h >> 33;
h *= 0xff51afd7ed558ccd;
h ^= h >> 33;
h *= 0xc4ceb9fe1a85ec53;
h ^= h >> 33;
return h;
}
int64_t next_seq()
{
static int64_t init_seq = 0;
static __thread int64_t seq = 0;
if (seq <= 0) {
seq = ATOMIC_AAF(&init_seq, 1);
}
return seq += 1024;
}
int64_t rand_gen()
{
return rand64(next_seq());
}
inline void do_alloc(AllocInterface* ga, int64_t count = 1)
{
for (int i = 0; i < count; i++) {
get_stack().push(ga->alloc());
}
}
inline void do_free(AllocInterface* ga, int64_t count = 1)
{
for (int i = 0; i < count; i++) {
ga->free(get_stack().pop());
}
}
volatile bool g_stop CACHE_ALIGNED;
void* thread_func(AllocInterface* ga)
{
while (!g_stop) {
int64_t x = 1 + (rand_gen() & 1023);
do_alloc(ga, x);
do_free(ga, x);
gcounter.inc(x);
}
return NULL;
}
#define cfgi(k, d) atoi(getenv(k) ?: #d)
void do_perf(AllocInterface* ga)
{
int n_sec = cfgi("n_sec", 1);
int n_way = cfgi("n_way", 1);
int n_thread = cfgi("n_thread", 8);
pthread_t thread[128];
g_stop = false;
ga->set_nway(n_way);
for (int i = 0; i < n_thread; i++) {
pthread_create(thread + i, NULL, (void* (*)(void*))thread_func, ga);
}
sleep(1);
int64_t last = gcounter.value();
while (n_sec--) {
sleep(1);
int64_t cur = gcounter.value();
fprintf(stderr, "tps=%'ld hold=%ld\n", cur - last, ga->hold());
last = cur;
}
g_stop = true;
for (int i = 0; i < n_thread; i++) {
pthread_join(thread[i], NULL);
}
}
// int64_t get_us() { return ObTimeUtility::current_time(); }
#define PERF(x) \
{ \
fprintf(stderr, #x "\n"); \
x##Wrapper ga; \
do_perf(&ga); \
}
#include <locale.h>
int main()
{
setlocale(LC_ALL, "");
PERF(SliceAlloc);
PERF(VSliceAlloc);
PERF(Malloc);
PERF(ObMalloc);
PERF(SmallAlloc);
PERF(OpAlloc);
PERF(OpReclaimAlloc);
PERF(SimpleFifoAlloc);
return 0;
}

View File

@ -0,0 +1,154 @@
/**
* Copyright (c) 2021 OceanBase
* OceanBase CE is licensed under Mulan PubL v2.
* You can use this software according to the terms and conditions of the Mulan PubL v2.
* You may obtain a copy of Mulan PubL v2 at:
* http://license.coscl.org.cn/MulanPubL-2.0
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PubL v2 for more details.
*/
#include "lib/allocator/ob_sql_arena_allocator.h"
#include "lib/ob_define.h"
#include <gtest/gtest.h>
#include "lib/utility/ob_test_util.h"
#include "lib/alloc/ob_malloc_allocator.h"
using namespace oceanbase::common;
using namespace oceanbase::lib;
class TestSQLArenaAllocator : public ::testing::Test {
public:
TestSQLArenaAllocator();
virtual ~TestSQLArenaAllocator();
virtual void SetUp();
virtual void TearDown();
private:
// disallow copy
DISALLOW_COPY_AND_ASSIGN(TestSQLArenaAllocator);
protected:
// function members
protected:
// data members
};
TestSQLArenaAllocator::TestSQLArenaAllocator()
{}
TestSQLArenaAllocator::~TestSQLArenaAllocator()
{}
void TestSQLArenaAllocator::SetUp()
{}
void TestSQLArenaAllocator::TearDown()
{}
#define GET_DEFAULT() \
({ \
ma->get_tenant_ctx_mod_usage(tenant_id, ObCtxIds::DEFAULT_CTX_ID, label, item); \
int64_t hold = item.hold_; \
hold; \
})
#define GET_AREA() \
({ \
ma->get_tenant_ctx_mod_usage(tenant_id, ObCtxIds::WORK_AREA, label, item); \
int64_t hold = item.hold_; \
hold; \
})
TEST_F(TestSQLArenaAllocator, basic)
{
uint64_t tenant_id = OB_SERVER_TENANT_ID;
const lib::ObLabel& label = ObModIds::OB_SQL_ARENA;
ObMallocAllocator* ma = ObMallocAllocator::get_instance();
ObModItem item;
ObSQLArenaAllocator sql_arena(tenant_id);
int64_t default_hold = GET_DEFAULT();
int64_t area_hold = GET_AREA();
ASSERT_EQ(OB_SUCCESS, ma->create_tenant_ctx_allocator(tenant_id, ObCtxIds::WORK_AREA));
void* ptr = sql_arena.alloc((1 << 20) - 1);
ASSERT_NE(nullptr, ptr);
ASSERT_GT(GET_DEFAULT(), default_hold);
default_hold = GET_DEFAULT();
int64_t new_area_hold = GET_AREA();
ASSERT_EQ(new_area_hold, area_hold);
ptr = sql_arena.alloc(1 << 20);
ASSERT_NE(nullptr, ptr);
int64_t new_default_hold = GET_DEFAULT();
ASSERT_EQ(new_default_hold, default_hold);
ASSERT_GT(GET_AREA(), area_hold);
area_hold = GET_AREA();
// reset remain one page
int64_t total = sql_arena.total();
sql_arena.reset_remain_one_page();
ASSERT_GT(total, sql_arena.total());
total = sql_arena.total();
// reset()
sql_arena.reset();
ASSERT_EQ(0, sql_arena.total());
default_hold = GET_DEFAULT();
area_hold = GET_AREA();
ASSERT_EQ(0, default_hold);
ASSERT_EQ(0, area_hold);
int64_t last_default_hold = 0;
int64_t last_area_hold = 0;
bool flag = false;
int cnt = 0;
do {
last_default_hold = default_hold;
last_area_hold = area_hold;
ptr = sql_arena.alloc((1 << 20) / 2);
ASSERT_NE(nullptr, ptr);
default_hold = GET_DEFAULT();
area_hold = GET_AREA();
if (area_hold > last_area_hold) {
ASSERT_EQ(last_default_hold, default_hold);
flag = true;
ptr = sql_arena.alloc(2 << 20);
ASSERT_NE(nullptr, ptr);
ASSERT_GT(GET_AREA(), area_hold);
break;
} else {
cnt++;
ASSERT_EQ(last_area_hold, area_hold);
}
} while (true);
ASSERT_TRUE(flag);
ASSERT_GT(cnt, 3);
// tracer
total = sql_arena.total();
ASSERT_TRUE(sql_arena.set_tracer());
ptr = sql_arena.alloc(4 << 20);
ASSERT_NE(nullptr, ptr);
ASSERT_GT(sql_arena.total(), total);
ASSERT_TRUE(sql_arena.revert_tracer());
ASSERT_EQ(sql_arena.total(), total);
// deconstruct
ptr = sql_arena.alloc(1 << 20);
ASSERT_NE(nullptr, ptr);
sql_arena.~ObSQLArenaAllocator();
ASSERT_EQ(0, sql_arena.total());
default_hold = GET_DEFAULT();
area_hold = GET_AREA();
ASSERT_EQ(default_hold, 0);
ASSERT_EQ(area_hold, 0);
}
int main(int argc, char** argv)
{
OB_LOGGER.set_log_level("INFO");
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}