Use tenant-level object pool instead of sop_borrow

This commit is contained in:
obdev
2023-05-26 10:41:16 +00:00
committed by ob-robot
parent 982112f7bc
commit c9cccf08db
21 changed files with 102 additions and 80 deletions

View File

@ -11,6 +11,7 @@
*/
#include "lib/lock/ob_rwlock.h"
#include "lib/allocator/ob_malloc.h"
using namespace oceanbase;
using namespace obsys;
@ -55,13 +56,14 @@ ObRWLock::ObRWLock(LockMode lockMode)
pthread_rwlockattr_setkind_np(&attr, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
}
pthread_rwlock_init(&rwlock_, &attr);
rlock_ = new ObRLock(&rwlock_);
wlock_ = new ObWLock(&rwlock_);
auto mattr = SET_USE_500("RWLock");
rlock_ = OB_NEW(ObRLock, mattr, &rwlock_);
wlock_ = OB_NEW(ObWLock, mattr, &rwlock_);
}
ObRWLock::~ObRWLock()
{
pthread_rwlock_destroy(&rwlock_);
delete rlock_;
delete wlock_;
OB_DELETE(ObRLock, "unused", rlock_);
OB_DELETE(ObWLock, "unused", wlock_);
}

View File

@ -283,19 +283,21 @@ public:
* 因为是全局单例,所以是在程序启动时机完成了这些工作
* TODO: 改为按需分配
*/
ObServerObjectPool(const int64_t tenant_id, const bool regist, const bool is_mini_mode)
: tenant_id_(tenant_id), regist_(regist), is_mini_mode_(is_mini_mode), arena_num_(0),
ObServerObjectPool(const int64_t tenant_id, const bool regist, const bool is_mini_mode,
const int64_t cpu_count)
: tenant_id_(tenant_id), regist_(regist), is_mini_mode_(is_mini_mode),
cpu_count_(cpu_count), arena_num_(0),
arena_(NULL), cnt_per_arena_(0), item_size_(0), buf_(nullptr), is_inited_(false)
{}
int init()
{
int ret = OB_SUCCESS;
const bool is_mini = (lib::is_mini_mode() || is_mini_mode_);
arena_num_ = static_cast<int32_t>(is_mini ? get_cpu_count()/2 : get_cpu_count());
const bool is_mini = is_mini_mode_;
arena_num_ = min(64/*upper_bound*/, max(4/*lower_bound*/, static_cast<int32_t>(cpu_count_) * 2));
//If the assignment logic of buf_ below is not reached, buf_ will not be initialized
buf_ = NULL;
cnt_per_arena_ = is_mini ? 16 : 128;
cnt_per_arena_ = is_mini ? 8 : 64;
int64_t s = (sizeof(T) + sizeof(Meta)); // Each cached object header has a Meta field to store necessary information and linked list pointers
item_size_ = upper_align(s, CACHE_ALIGN_SIZE); // Align according to the cache line to ensure that there will be no false sharing between objects
ObMemAttr attr(tenant_id_, ObModIds::OB_SERVER_OBJECT_POOL);
@ -400,6 +402,7 @@ private:
const int64_t tenant_id_;
const bool regist_;
const bool is_mini_mode_;
const int64_t cpu_count_;
int32_t arena_num_;
ObPoolArenaHead *arena_;
int64_t cnt_per_arena_;
@ -413,7 +416,8 @@ inline ObServerObjectPool<T>& get_server_object_pool() {
class Wrapper {
public:
Wrapper()
: instance_(OB_SERVER_TENANT_ID, true/*regist*/, false/*is_mini_mode*/)
: instance_(OB_SERVER_TENANT_ID, true/*regist*/, lib::is_mini_mode(),
get_cpu_count())
{
instance_.init(); // is_inited_ will be checked all invokes
}

View File

@ -65,7 +65,8 @@ int ObBaseLogWriter::init(
} else if (OB_UNLIKELY(!log_cfg.is_valid())) {
ret = OB_INVALID_ARGUMENT;
LOG_STDERR("Invalid argument.\n");
} else if (NULL == (log_items_ = (ObIBaseLogItem**) malloc(sizeof(ObIBaseLogItem*) * log_cfg.max_buffer_item_cnt_))) {
} else if (NULL == (log_items_ = (ObIBaseLogItem**) ob_malloc(sizeof(ObIBaseLogItem*) * log_cfg.max_buffer_item_cnt_,
attr))) {
ret = OB_ALLOCATE_MEMORY_FAILED;
LOG_STDERR("Fail to allocate memory, max_buffer_item_cnt=%lu.\n", log_cfg.max_buffer_item_cnt_);
} else if (0 != pthread_mutex_init(&thread_mutex_, NULL)) {
@ -183,7 +184,7 @@ void ObBaseLogWriter::destroy()
is_inited_ = false;
if (NULL != log_items_) {
free(log_items_);
ob_free(log_items_);
log_items_ = NULL;
}
if (OB_NOT_NULL(log_write_cond_)) {

View File

@ -198,19 +198,19 @@ void Thread::wait()
int ret = 0;
if (pth_ != 0) {
if (2 <= ATOMIC_AAF(&join_concurrency_, 1)) {
abort();
ob_abort();
}
if (0 != (ret = pthread_join(pth_, nullptr))) {
LOG_ERROR("pthread_join failed", K(ret), K(errno));
#ifndef OB_USE_ASAN
dump_pth();
abort();
ob_abort();
#endif
}
destroy_stack();
runnable_ = nullptr;
if (1 <= ATOMIC_AAF(&join_concurrency_, -1)) {
abort();
ob_abort();
}
}
}