Fix memory usage of thread_local on heap & remove tc_alloc

This commit is contained in:
nroskill
2022-11-16 13:07:58 +00:00
committed by wangzelin.wzl
parent cd0687afbf
commit 6710671db5
19 changed files with 108 additions and 370 deletions

View File

@ -30,13 +30,16 @@ bool ObMallocAllocator::is_inited_ = false;
ObMallocAllocator::ObMallocAllocator() : locks_(), allocators_(), reserved_(0), urgent_(0)
{
for (int64_t i = 0; i < PRESERVED_TENANT_COUNT; ++i) {
locks_[i].enable_record_stat(false);
}
set_root_allocator();
int ret = OB_SUCCESS;
for (int64_t i = 0; OB_SUCC(ret) && i < ObCtxIds::MAX_CTX_ID; i++) {
if (OB_FAIL(create_tenant_ctx_allocator(OB_SYS_TENANT_ID, i))) {
LOG_ERROR("create tenant allocator fail", K(ret), K(i));
//LOG_ERROR("create tenant allocator fail", K(ret), K(i));
} else if (OB_FAIL(create_tenant_ctx_allocator(OB_SERVER_TENANT_ID, i))) {
LOG_ERROR("create tenant allocator fail", K(ret), K(i));
//LOG_ERROR("create tenant allocator fail", K(ret), K(i));
}
}
is_inited_ = true;
@ -174,7 +177,7 @@ ObTenantCtxAllocator *ObMallocAllocator::get_tenant_ctx_allocator(uint64_t tenan
} else {
// TODO: lock slot
const int64_t slot = tenant_id % PRESERVED_TENANT_COUNT;
obsys::ObRLockGuard guard(locks_[slot]);
SpinRLockGuard guard(locks_[slot]);
ObTenantCtxAllocator * const *cur = &allocators_[slot][ctx_id];
while (NULL != *cur && (*cur)->get_tenant_id() < tenant_id) {
cur = &(*cur)->get_next();
@ -224,7 +227,7 @@ int ObMallocAllocator::create_tenant_ctx_allocator(
cas_succeed = ATOMIC_BCAS(&allocators_[tenant_id][ctx_id], NULL, allocator);
} else {
const int64_t slot = tenant_id % PRESERVED_TENANT_COUNT;
obsys::ObWLockGuard guard(locks_[slot]);
SpinWLockGuard guard(locks_[slot]);
ObTenantCtxAllocator **cur = &allocators_[slot][ctx_id];
while ((NULL != *cur) && (*cur)->get_tenant_id() < tenant_id) {
cur = &((*cur)->get_next());
@ -242,7 +245,7 @@ int ObMallocAllocator::create_tenant_ctx_allocator(
if (OB_FAIL(ret) || !cas_succeed) {
allocator->~ObTenantCtxAllocator();
allocer->free(buf);
} else {
} else if (OB_SYS_TENANT_ID != tenant_id && OB_SERVER_TENANT_ID != tenant_id) {
LOG_INFO("tenant ctx allocator was created", K(tenant_id), K(ctx_id), KCSTRING(lbt()));
}
}
@ -256,7 +259,7 @@ void ObMallocAllocator::set_root_allocator()
int ret = OB_SUCCESS;
static ObTenantCtxAllocator allocator(OB_SERVER_TENANT_ID);
if (OB_FAIL(allocator.set_tenant_memory_mgr())) {
LOG_ERROR("set_tenant_memory_mgr failed", K(ret));
//LOG_ERROR("set_tenant_memory_mgr failed", K(ret));
} else {
allocators_[OB_SERVER_TENANT_ID][0] = &allocator;
}
@ -452,7 +455,7 @@ int ObMallocAllocator::get_chunks(AChunk **chunks, int cap, int &cnt)
{
int ret = OB_SUCCESS;
for (int64_t slot = 0; OB_SUCC(ret) && slot < PRESERVED_TENANT_COUNT; ++slot) {
obsys::ObRLockGuard guard(locks_[slot]);
SpinRLockGuard guard(locks_[slot]);
for (int64_t ctx_id = 0; OB_SUCC(ret) && ctx_id < ObCtxIds::MAX_CTX_ID; ctx_id++) {
ObTenantCtxAllocator *ta = allocators_[slot][ctx_id];
while (OB_SUCC(ret) && ta != nullptr) {

View File

@ -77,7 +77,7 @@ private:
DISALLOW_COPY_AND_ASSIGN(ObMallocAllocator);
private:
obsys::ObRWLock locks_[PRESERVED_TENANT_COUNT];
common::SpinRWLock locks_[PRESERVED_TENANT_COUNT];
ObTenantCtxAllocator *allocators_[PRESERVED_TENANT_COUNT][common::ObCtxIds::MAX_CTX_ID];
int64_t reserved_;
int64_t urgent_;

View File

@ -18,6 +18,7 @@
#include "lib/allocator/ob_allocator.h"
#include "lib/allocator/ob_mod_define.h"
#include "lib/list/ob_free_list.h"
#include "lib/utility/ob_tracepoint.h"
#include "lib/utility/utility.h"
#include "lib/hash_func/ob_hash_func.h"
#include "lib/allocator/ob_mem_leak_checker.h"
@ -131,6 +132,8 @@ const ObCtxInfo &get_global_ctx_info()
void __attribute__((constructor(MALLOC_INIT_PRIORITY))) init_global_memory_pool()
{
auto& t = EventTable::instance();
auto& c = get_mem_leak_checker();
global_default_allocator = ObMallocAllocator::get_instance();
#ifndef OB_USE_ASAN
abort_unless(OB_SUCCESS == install_ob_signal_handler());

View File

@ -543,7 +543,7 @@ void ObLockDiagnose::print()
#endif
ObLatch::ObLatch()
: lock_(0)
: lock_(0), record_stat_(true)
{
}
@ -590,7 +590,7 @@ int ObLatch::try_rdlock(const uint32_t latch_id)
PAUSE();
} while (true);
TRY_LOCK_RECORD_STAT(latch_id, i, ret, true);
TRY_LOCK_RECORD_STAT(latch_id, i, ret, record_stat_);
}
HOLD_LOCK_INC();
return ret;
@ -610,7 +610,7 @@ int ObLatch::try_wrlock(const uint32_t latch_id, const uint32_t *puid)
ret = OB_EAGAIN;
}
TRY_LOCK_RECORD_STAT(latch_id, 1, ret, true);
TRY_LOCK_RECORD_STAT(latch_id, 1, ret, record_stat_);
}
HOLD_LOCK_INC();
return ret;
@ -791,7 +791,7 @@ OB_INLINE int ObLatch::low_lock(
}
}
LOCK_RECORD_STAT(latch_id, waited, spin_cnt, yield_cnt, true);
LOCK_RECORD_STAT(latch_id, waited, spin_cnt, yield_cnt, record_stat_);
}
return ret;
}

View File

@ -228,6 +228,7 @@ public:
inline bool is_wrlocked_by(const uint32_t *puid = NULL) const;
inline uint32_t get_wid() const;
int64_t to_string(char* buf, const int64_t buf_len) const;
void enable_record_stat(bool enable) { record_stat_ = enable; }
private:
template<typename LowTryLock>
@ -260,6 +261,7 @@ private:
static const uint32_t WAIT_MASK = 1<<31;
static const uint32_t MAX_READ_LOCK_CNT = 1<<24;
volatile uint32_t lock_;
bool record_stat_;
};
struct ObLDLockType

View File

@ -50,6 +50,7 @@ public:
inline int unlock() { return latch_.unlock(); }
inline void rdunlock() { unlock(); }
inline void wrunlock() { unlock(); }
inline void enable_record_stat(bool enable) { latch_.enable_record_stat(enable); }
private:
ObLatch latch_;
uint32_t latch_id_;

View File

@ -2116,7 +2116,6 @@ enum ObWFRemoveMode
#define DIO_ALIGN_SIZE 4096
#define DIO_READ_ALIGN_SIZE 4096
#define DIO_ALLOCATOR_CACHE_BLOCK_SIZE (OB_DEFAULT_MACRO_BLOCK_SIZE + DIO_READ_ALIGN_SIZE)
#define CORO_INIT_PRIORITY 120
#define MALLOC_INIT_PRIORITY 128
#define NORMAL_INIT_PRIORITY (MALLOC_INIT_PRIORITY + 1)
@ -2266,22 +2265,22 @@ OB_INLINE int64_t ob_gettid()
return tid;
}
OB_INLINE uint64_t &ob_get_tenant_id()
OB_INLINE uint64_t& ob_get_tenant_id()
{
RLOCAL_INLINE(uint64_t, tenant_id);
thread_local uint64_t tenant_id = 0;;
return tenant_id;
}
OB_INLINE char *ob_get_tname()
OB_INLINE char* ob_get_tname()
{
struct TNameBuf {
TNameBuf() {
snprintf(v_, oceanbase::OB_THREAD_NAME_BUF_LEN, "%s", "");
}
char v_[oceanbase::OB_THREAD_NAME_BUF_LEN];
};
RLOCAL_INLINE(TNameBuf, tname);
return tname.v_;
thread_local char tname[oceanbase::OB_THREAD_NAME_BUF_LEN] = {0};
return tname;
}
OB_INLINE const char*& ob_get_origin_thread_name()
{
thread_local const char* tname = nullptr;
return tname;
}
#define GETTID() ob_gettid()

View File

@ -369,7 +369,7 @@ int ObObjFreeList::init(const char *name, const int64_t obj_size,
alignment_ = alignment;
obj_count_base_ = (OP_GLOBAL == cache_type) ? 0 : obj_count;
type_size_base_ = obj_size;
only_global_ = (OP_RECLAIM != cache_type);
only_global_ = (OP_GLOBAL == cache_type);
name_ = name;
// Make sure we align *all* the objects in the allocation,
@ -631,22 +631,6 @@ void *ObObjFreeList::reclaim_alloc(ObThreadCache *thread_cache)
return ret;
}
void *ObObjFreeList::tc_alloc(ObThreadCache *thread_cache)
{
void *ret = NULL;
if (OB_LIKELY(NULL != thread_cache)){
if (NULL != (ret = thread_cache->inner_free_list_.pop())) {
thread_cache->nr_free_--;
thread_cache->nr_malloc_++;
} else if (only_global_) {
if (NULL != (ret = global_alloc())) {
thread_cache->nr_malloc_++;
}
}
}
return ret;
}
void *ObObjFreeList::alloc()
{
void *ret = NULL;
@ -661,7 +645,9 @@ void *ObObjFreeList::alloc()
}
if (only_global_) {
ret = tc_alloc(thread_cache);
if (OB_NOT_NULL(ret = global_alloc())) {
thread_cache->nr_malloc_++;
}
} else {
ret = reclaim_alloc(thread_cache);
}
@ -691,36 +677,6 @@ void ObObjFreeList::reclaim_free(ObThreadCache *cur_thread_cache, void *item)
rcu_read_unlock(cur_thread_cache);
}
void ObObjFreeList::tc_free(ObThreadCache *cur_thread_cache, void *item)
{
if (obj_count_base_ > 0) {
// free all thread cache obj upto global free list if it's overflow
if (OB_UNLIKELY(cur_thread_cache->nr_free_ >= obj_count_base_)) {
void *next = NULL;
obj_free_list_.push(item);
// keep half of obj_count_base_
int64_t low_watermark = obj_count_base_ / 2;
while (cur_thread_cache->nr_free_ > low_watermark
&& NULL != (next = cur_thread_cache->inner_free_list_.pop())) {
obj_free_list_.push(next);
cur_thread_cache->nr_free_--;
}
} else {
cur_thread_cache->inner_free_list_.push(reinterpret_cast<ObFreeObject *>(item));
cur_thread_cache->nr_free_++;
}
} else {
global_free(item);
}
/**
* For global allocate mode, maybe thread A allocates memory and thread B frees it.
* So when thread B frees, B's thread cache maybe NULL. The thread_cache->nr_malloc_
* isn't the actual malloc number of this thread, maybe it's negative.
*/
cur_thread_cache->nr_malloc_--;
}
void ObObjFreeList::free(void *item)
{
ObThreadCache *thread_cache = NULL;
@ -734,7 +690,8 @@ void ObObjFreeList::free(void *item)
if (OB_LIKELY(NULL != thread_cache)) {
if (only_global_) {
tc_free(thread_cache, item);
global_free(item);
thread_cache->nr_malloc_--;
} else {
reclaim_free(thread_cache, item);
}

View File

@ -43,7 +43,6 @@ extern void thread_shutdown_cleanup(void *);
enum ObMemCacheType
{
OP_GLOBAL,
OP_TC,
OP_RECLAIM
};
@ -177,8 +176,6 @@ private:
void global_free(void *item);
void *reclaim_alloc(ObThreadCache *thread_cache);
void reclaim_free(ObThreadCache *cur_thread_cache, void *item);
void *tc_alloc(ObThreadCache *thread_cache);
void tc_free(ObThreadCache *cur_thread_cache, void *item);
ObThreadCache *init_thread_cache();
void privatize_thread_cache(ObThreadCache *cur_thread_cache, ObThreadCache *src_thread_cache);
@ -796,16 +793,14 @@ inline void call_destructor(T *ptr) {
// 3. because object pool uses singleton, please only use one of global,
// tc or reclaim interfaces for each object type in the whole procject.
// Note:
// op_alloc,op_tc_alloc and op_reclaim_alloc call the default constructor if it exist,
// op_alloc and op_reclaim_alloc call the default constructor if it exist,
// else it just reinterpret_cast ptr.
//
// op_alloc_args,op_tc_alloc_args and op_reclaim_args call the constructor with args.
// op_alloc_args and op_reclaim_args call the constructor with args.
// It uses placement new to construct instance, if args is null and there isn't public
// default constructor, compiler isn't happy.
//
// op_alloc_args uses global object freelist, save memory but performance is poor.
// op_tc_alloc_args uses thread local object free list, perfromance is better but
// waste some memory.
// op_reclaim_alloc_args uses thread local object free list and with memory reclaim,
// performace is good and object waste less memory.
@ -887,44 +882,6 @@ inline void call_destructor(T *ptr) {
} \
})
// thread cache pool allocator interface
#define op_tc_alloc_args(type, args...) \
({ \
type *ret = NULL; \
common::ObClassAllocator<type> *instance = \
common::ObClassAllocator<type>::get(common::OPNum<type>::LOCAL_NUM, common::OP_TC, \
common::OPNum<type>::LABEL); \
if (OB_LIKELY(NULL != instance)) { \
void *tmp = instance->alloc_void(); \
if (OB_LIKELY(NULL != tmp)) { \
ret = new (tmp) type(args); \
} \
} \
ret; \
})
#define op_tc_alloc(type) \
({ \
type *ret = NULL; \
common::ObClassAllocator<type> *instance = \
common::ObClassAllocator<type>::get(common::OPNum<type>::LOCAL_NUM, common::OP_TC, \
common::OPNum<type>::LABEL); \
if (OB_LIKELY(NULL != instance)) { \
ret = instance->alloc(); \
} \
ret; \
})
#define op_tc_free(ptr) \
({ \
common::ObClassAllocator<__typeof__(*ptr)> *instance = \
common::ObClassAllocator<__typeof__(*ptr)>::get(common::OPNum<__typeof__(*ptr)>::LOCAL_NUM, common::OP_TC, \
common::OPNum<__typeof__(*ptr)>::LABEL); \
if (OB_LIKELY(NULL != instance)) { \
instance->free(ptr); \
} \
})
// thread cache pool and reclaim allocator interface
#define op_reclaim_alloc_args(type, args...) \
({ \
@ -941,29 +898,9 @@ inline void call_destructor(T *ptr) {
ret; \
})
#define op_reclaim_alloc(type) \
({ \
OLD_STATIC_ASSERT((std::is_default_constructible<type>::value), "type is not default constructible"); \
type *ret = NULL; \
common::ObClassAllocator<type> *instance = \
common::ObClassAllocator<type>::get(common::OPNum<type>::LOCAL_NUM, common::OP_RECLAIM, \
common::OPNum<type>::LABEL); \
if (OB_LIKELY(NULL != instance)) { \
ret = instance->alloc(); \
} \
ret; \
})
#define op_reclaim_alloc(type) op_alloc(type)
#define op_reclaim_free(ptr) \
({ \
common::ObClassAllocator<__typeof__(*ptr)> *instance = \
common::ObClassAllocator<__typeof__(*ptr)>::get(common::OPNum<__typeof__(*ptr)>::LOCAL_NUM, \
common::OP_RECLAIM, \
common::OPNum<__typeof__(*ptr)>::LABEL); \
if (OB_LIKELY(NULL != instance)) { \
instance->free(ptr); \
} \
})
#define op_reclaim_free(ptr) op_free(ptr)
} // end of namespace common
} // end of namespace oceanbase

View File

@ -23,7 +23,6 @@
#include "lib/alloc/alloc_struct.h"
#include "lib/alloc/alloc_failed_reason.h"
#include "lib/alloc/memory_sanity.h"
#include "lib/stat/ob_diagnose_info.h"
using namespace oceanbase::lib;
@ -68,8 +67,6 @@ void *AChunkMgr::direct_alloc(const uint64_t size, const bool can_use_huge_page,
{
common::ObTimeGuard time_guard(__func__, 1000 * 1000);
int orig_errno = errno;
EVENT_INC(MMAP_COUNT);
EVENT_ADD(MMAP_SIZE, size);
void *ptr = nullptr;
ptr = low_alloc(size, can_use_huge_page, huge_page_used, alloc_shadow);
@ -114,8 +111,7 @@ void *AChunkMgr::direct_alloc(const uint64_t size, const bool can_use_huge_page,
void AChunkMgr::direct_free(const void *ptr, const uint64_t size)
{
common::ObTimeGuard time_guard(__func__, 1000 * 1000);
EVENT_INC(MUNMAP_COUNT);
EVENT_ADD(MUNMAP_SIZE, size);
ATOMIC_FAA(&unmaps_, 1);
if (size > INTACT_ACHUNK_SIZE) {
ATOMIC_FAA(&large_unmaps_, 1);

View File

@ -507,6 +507,9 @@ int ObTenantResourceMgrList::pop(ObTenantResourceMgr *&tenant_resource_mgr)
ObResourceMgr::ObResourceMgr()
: inited_(false), cache_washer_(NULL), locks_(), tenant_resource_mgrs_(), free_list_()
{
for (int64_t i = 0; i < MAX_TENANT_COUNT; ++i) {
locks_[i].enable_record_stat(false);
}
}
ObResourceMgr::~ObResourceMgr()

View File

@ -14,6 +14,7 @@
#define OB_DI_TLS_H_
#include "lib/ob_define.h"
#include "lib/allocator/ob_malloc.h"
namespace oceanbase
{
@ -24,92 +25,44 @@ template <class T>
class ObDITls
{
public:
static ObDITls &get_di_tls();
void destroy();
T *new_instance();
static T *get_instance();
static T* get_instance();
private:
ObDITls() : key_(INT32_MAX)
{
if (0 != pthread_key_create(&key_, destroy_thread_data_)) {
}
}
~ObDITls() { destroy(); }
static void destroy_thread_data_(void *ptr);
~ObDITls();
private:
pthread_key_t key_;
static TLOCAL(T *, instance_);
static TLOCAL(bool, disable_);
T* instance_;
bool disable_;
};
// NOTE: thread local diagnose information
// TODO: check if multi-query execute within one thread.
template <class T>
TLOCAL(T *, ObDITls<T>::instance_);
template <class T>
TLOCAL(bool, ObDITls<T>::disable_);
template <class T>
void ObDITls<T>::destroy_thread_data_(void *ptr)
ObDITls<T>::~ObDITls()
{
if (NULL != ptr) {
T *tls = (T *)ptr;
instance_ = NULL;
if (OB_NOT_NULL(instance_)) {
T* tls = instance_;
instance_ = nullptr;
disable_ = true;
delete tls;
//delete tls;
ob_delete(tls);
}
}
template <class T>
ObDITls<T> &ObDITls<T>::get_di_tls()
T* ObDITls<T>::get_instance()
{
static ObDITls<T> di_tls;
return di_tls;
}
template <class T>
void ObDITls<T>::destroy()
{
if (INT32_MAX != key_) {
void *ptr = pthread_getspecific(key_);
destroy_thread_data_(ptr);
if (0 != pthread_key_delete(key_)) {
} else {
key_ = INT32_MAX;
static thread_local ObDITls<T> di_tls;
if (OB_ISNULL(di_tls.instance_)) {
if (OB_LIKELY(!di_tls.disable_)) {
const char* label = "DITls";
//if (OB_NOT_NULL(ob_get_origin_thread_name())) {
// label = ob_get_origin_thread_name();
//}
di_tls.disable_ = true;
// add tenant
di_tls.instance_ = OB_NEW(T, label);
//instance_ = new T();
di_tls.disable_ = false;
}
}
}
template <class T>
T *ObDITls<T>::new_instance()
{
T *instance = NULL;
if (INT32_MAX != key_) {
T *tls = (T *)pthread_getspecific(key_);
if (NULL == tls) {
tls = new (std::nothrow) T();
if (NULL != tls && 0 != pthread_setspecific(key_, tls)) {
delete tls;
tls = NULL;
}
}
if (NULL != tls) {
instance = tls;
}
}
return instance;
}
template <class T>
T *ObDITls<T>::get_instance()
{
if (OB_UNLIKELY(NULL == instance_)) {
if (OB_LIKELY(!disable_)) {
disable_ = true;
instance_ = get_di_tls().new_instance();
disable_ = false;
}
}
return instance_;
return di_tls.instance_;
}
}

View File

@ -67,9 +67,5 @@ ObSessionStatEstGuard::~ObSessionStatEstGuard()
}
}
void __attribute__((constructor(101))) init_SessionDIBuffer()
{
oceanbase::common::ObDITls<ObSessionDIBuffer>::get_instance();
}
} /* namespace common */
} /* namespace oceanbase */

View File

@ -196,10 +196,10 @@ STAT_EVENT_ADD_DEF(LOCATION_CACHE_NONBLOCK_MISS, "location nonblock get miss", O
STAT_EVENT_ADD_DEF(LOCATION_CACHE_RPC_CHECK, "location cache rpc renew count", ObStatClassIds::CACHE, "location cache rpc renew count", 50021, true, true)
STAT_EVENT_ADD_DEF(LOCATION_CACHE_RENEW, "location cache renew", ObStatClassIds::CACHE, "location cache renew", 50022, true, true)
STAT_EVENT_ADD_DEF(LOCATION_CACHE_RENEW_IGNORED, "location cache renew ignored", ObStatClassIds::CACHE, "location cache renew ignored", 50023, true, true)
STAT_EVENT_ADD_DEF(MMAP_COUNT, "mmap count", ObStatClassIds::CACHE, "mmap count", 50024, true, true)
STAT_EVENT_ADD_DEF(MUNMAP_COUNT, "munmap count", ObStatClassIds::CACHE, "munmap count", 50025, true, true)
STAT_EVENT_ADD_DEF(MMAP_SIZE, "mmap size", ObStatClassIds::CACHE, "mmap size", 50026, true, true)
STAT_EVENT_ADD_DEF(MUNMAP_SIZE, "munmap size", ObStatClassIds::CACHE, "munmap size", 50027, true, true)
//STAT_EVENT_ADD_DEF(MMAP_COUNT, "mmap count", ObStatClassIds::CACHE, "mmap count", 50024, true, true)
//STAT_EVENT_ADD_DEF(MUNMAP_COUNT, "munmap count", ObStatClassIds::CACHE, "munmap count", 50025, true, true)
//STAT_EVENT_ADD_DEF(MMAP_SIZE, "mmap size", ObStatClassIds::CACHE, "mmap size", 50026, true, true)
//STAT_EVENT_ADD_DEF(MUNMAP_SIZE, "munmap size", ObStatClassIds::CACHE, "munmap size", 50027, true, true)
STAT_EVENT_ADD_DEF(KVCACHE_SYNC_WASH_TIME, "kvcache sync wash time", ObStatClassIds::CACHE, "kvcache sync wash time", 50028, true, true)
STAT_EVENT_ADD_DEF(KVCACHE_SYNC_WASH_COUNT, "kvcache sync wash count", ObStatClassIds::CACHE, "kvcache sync wash count", 50029, true, true)
STAT_EVENT_ADD_DEF(LOCATION_CACHE_RPC_RENEW_FAIL, "location cache rpc renew fail count", ObStatClassIds::CACHE, "location cache rpc renew fail count", 50030, true, true)

View File

@ -32,6 +32,7 @@ inline void set_thread_name(const char* type, uint64_t idx)
{
char *name = ob_get_tname();
uint64_t tenant_id = ob_get_tenant_id();
ob_get_origin_thread_name() = type;
if (tenant_id == 0) {
snprintf(name, OB_THREAD_NAME_BUF_LEN, "%s%ld", type, idx);
} else {
@ -44,6 +45,7 @@ inline void set_thread_name(const char* type)
{
char *name = ob_get_tname();
uint64_t tenant_id = ob_get_tenant_id();
ob_get_origin_thread_name() = type;
if (tenant_id == 0) {
snprintf(name, OB_THREAD_NAME_BUF_LEN, "%s", type);
} else {

View File

@ -112,6 +112,6 @@ oblib_addtest(utility/test_sample_rate_limiter.cpp)
oblib_addtest(utility/test_utility.cpp)
oblib_addtest(wait_event/test_wait_event.cpp)
oblib_addtest(utility/test_fast_convert.cpp)
oblib_addtest(objectpool/test_concurrency_pool.cpp)
#oblib_addtest(objectpool/test_concurrency_pool.cpp)
oblib_addtest(utility/test_defer.cpp)
oblib_addtest(hash/test_ob_ref_mgr.cpp)

View File

@ -188,39 +188,6 @@ private:
DISALLOW_COPY_AND_ASSIGN(ObjPoolAllocator);
};
template <uint64_t size>
class ObjPoolTCAllocator : public BaseAllocator
{
public:
ObjPoolTCAllocator()
{
}
virtual void reset()
{
}
virtual void *alloc()
{
return(op_tc_alloc(TestObj<size>));
}
virtual void free(void *p)
{
op_tc_free((TestObj<size>*)p);
}
virtual ~ObjPoolTCAllocator()
{
}
private:
int64_t size_;
DISALLOW_COPY_AND_ASSIGN(ObjPoolTCAllocator);
};
class DirectAllocator : public BaseAllocator
{
public:
@ -732,18 +699,6 @@ TEST(TestSimpleAllocate, objpool)
ASSERT_TRUE(engine.run() >= 0);
}
TEST(TestSimpleAllocate, objpool_tc)
{
int64_t max_thread = get_cpu_num() * 2;
Params params;
params.simple_param.times = ALLOC_TIME_PER_THREAD;
ObjPoolTCAllocator<ALLOC_SIZE> allocator;
TestEngine engine(&allocator, max_thread, &simple_worker, params);
ASSERT_TRUE(engine.run() >= 0);
}
TEST(TestSimpleAllocate, ob_malloc)
{
int64_t max_thread = get_cpu_num() * 2;
@ -795,19 +750,6 @@ TEST(TestWindowAllocate, objpool)
ASSERT_TRUE(engine.run() >= 0);
}
TEST(TestWindowAllocate, objpool_tc)
{
int64_t max_thread = get_cpu_num() * 2;
Params params;
params.window_param.times = ALLOC_TIME_PER_THREAD;
params.window_param.window_len = WINDOW_SIZE;
ObjPoolTCAllocator<ALLOC_SIZE> allocator;
TestEngine engine(&allocator, max_thread, &window_worker, params);
ASSERT_TRUE(engine.run() >= 0);
}
TEST(TestWindowAllocate, ob_malloc)
{
int64_t max_thread = get_cpu_num() * 2;
@ -836,21 +778,6 @@ TEST(TestPairwiseAllocate, lf_fifo)
free(params.pairwise_param.addr_queue);
}
TEST(TestPairwiseAllocate, objpool_tc)
{
int64_t max_thread = get_core_num() * 2;
Params params;
params.pairwise_param.times = ALLOC_TIME_PER_THREAD;
params.pairwise_param.addr_queue = (unsigned long int *)malloc(sizeof(unsigned long int) * MAX_THREAD * QUEUE_SIZE);
ObjPoolTCAllocator<ALLOC_SIZE> allocator;
TestEngine engine(&allocator, max_thread, &pairwise_worker, params, true);
ASSERT_TRUE(engine.run() >= 0);
free(params.pairwise_param.addr_queue);
}
TEST(TestPairwiseAllocate, objpool)
{
int64_t max_thread = get_core_num() * 2;

View File

@ -27,9 +27,6 @@ class COP
#define obj_alloc(type) op_alloc(type)
#define obj_free(ptr) op_free(ptr)
#define obj_tc_alloc(type) op_tc_alloc(type)
#define obj_tc_free(ptr) op_tc_free(ptr)
#define obj_reclaim_alloc(type) op_reclaim_alloc(type)
#define obj_reclaim_free(ptr) op_reclaim_free(ptr)
@ -38,8 +35,6 @@ struct FixedMemStruct
{
char data_[size];
};
#define fixed_mem_alloc(size) obj_tc_alloc(FixedMemStruct<size>)
#define fixed_mem_free(ptr, size) obj_tc_free((FixedMemStruct<size> *)ptr)
struct TestObject1
{
@ -98,13 +93,6 @@ volatile int64_t prepared3 = 0;
volatile int64_t prepared4 = 0;
static const int64_t THREAD_COUNT = 15;
TEST(COP, test_fixed_mem_op)
{
void *ptr = fixed_mem_alloc(2048);
OB_ASSERT(NULL != ptr);
fixed_mem_free(ptr, 2048);
}
TEST(COP, test_global_op)
{
TestObject1 *obj = NULL;
@ -115,47 +103,20 @@ TEST(COP, test_global_op)
ObObjFreeListList::get_freelists().dump();
}
TEST(COP, test_tc_op)
{
TestObject2 *obj2 = NULL;
obj2 = obj_tc_alloc(TestObject2);
OB_ASSERT(NULL != obj2);
obj_tc_free(obj2);
// reuse freelist of TestObject2
TestObject3 *obj3 = obj_tc_alloc(TestObject3);
OB_ASSERT(NULL != obj3);
obj_tc_free(obj3);
//double free
//obj_tc_free(obj3);
TestObject4 *obj4 = obj_reclaim_alloc(TestObject4);
OB_ASSERT(NULL != obj4);
obj_reclaim_free(obj4);
//double free
//obj_reclaim_free(obj4);
ObObjFreeListList::get_freelists().dump();
}
enum AllocType
{
GLOBAL,
TC,
RECLAIM
};
template <class T>
void alloc_objs(T &head, int64_t count, AllocType type = TC)
void alloc_objs(T &head, int64_t count, AllocType type = RECLAIM)
{
T *obj = NULL;
for (int64_t i = 0; i < count; i++) {
if (GLOBAL == type) {
obj = obj_alloc(T);
} else if (TC == type) {
obj = obj_tc_alloc(T);
} else {
obj = obj_reclaim_alloc(T);
}
@ -166,7 +127,7 @@ void alloc_objs(T &head, int64_t count, AllocType type = TC)
}
template <class T>
void free_objs(T &head, int64_t count, AllocType type = TC)
void free_objs(T &head, int64_t count, AllocType type = RECLAIM)
{
T *obj = NULL;
for (int64_t i = 0; i < count; i++) {
@ -177,8 +138,6 @@ void free_objs(T &head, int64_t count, AllocType type = TC)
head.next_ = obj->next_;
if (GLOBAL == type) {
obj_free(obj);
} else if (TC == type) {
obj_tc_free(obj);
} else {
obj_reclaim_free(obj);
}

View File

@ -37,7 +37,7 @@ ObTabletMemtableMgr::ObTabletMemtableMgr()
schema_recorder_()
{
#if defined(__x86_64__)
static_assert(sizeof(ObTabletMemtableMgr) <= 352, "The size of ObTabletMemtableMgr will affect the meta memory manager, and the necessity of adding new fields needs to be considered.");
static_assert(sizeof(ObTabletMemtableMgr) <= 360, "The size of ObTabletMemtableMgr will affect the meta memory manager, and the necessity of adding new fields needs to be considered.");
#endif
}