[FEAT MERGE] del_tenant_memleak branch

Co-authored-by: HaHaJeff <jeffzhouhhh@gmail.com>
This commit is contained in:
obdev
2023-01-28 19:29:23 +08:00
committed by ob-robot
parent e3b89cd741
commit ba19ba90e0
179 changed files with 3235 additions and 2291 deletions

View File

@ -25,20 +25,57 @@
using namespace oceanbase::lib;
using namespace oceanbase::common;
uint64_t ObMallocAllocator::max_used_tenant_id_ = 0;
bool ObMallocAllocator::is_inited_ = false;
ObMallocAllocator::ObMallocAllocator() : locks_(), allocators_(), reserved_(0), urgent_(0)
namespace oceanbase
{
namespace lib
{
__thread ObTenantCtxAllocator *tl_ta = NULL;
ObTLTaGuard::ObTLTaGuard()
: restore_(false)
{}
ObTLTaGuard::ObTLTaGuard(const int64_t tenant_id)
: restore_(false)
{
switch_to(tenant_id);
}
ObTLTaGuard::~ObTLTaGuard()
{
revert();
}
void ObTLTaGuard::switch_to(const int64_t tenant_id)
{
revert();
if (tl_ta != NULL && tl_ta->get_tenant_id() == tenant_id) {
// do-nothing
} else {
ta_bak_ = tl_ta;
ta_ = lib::ObMallocAllocator::get_instance()->get_tenant_ctx_allocator_without_tlcache(
tenant_id, ObCtxIds::DEFAULT_CTX_ID);
tl_ta = ta_.ref_allocator();
restore_ = true;
}
}
void ObTLTaGuard::revert()
{
if (restore_) {
tl_ta = ta_bak_;
ta_.revert();
restore_ = false;
}
}
ObMallocAllocator::ObMallocAllocator()
: locks_(), allocators_(), unrecycled_lock_(), unrecycled_allocators_(),
reserved_(0), urgent_(0), max_used_tenant_id_(0), create_on_demand_(false)
{
set_root_allocator();
int ret = OB_SUCCESS;
for (int64_t i = 0; OB_SUCC(ret) && i < ObCtxIds::MAX_CTX_ID; i++) {
if (OB_FAIL(create_tenant_ctx_allocator(OB_SYS_TENANT_ID, i))) {
LOG_ERROR("create tenant allocator fail", K(ret), K(i));
} else if (OB_FAIL(create_tenant_ctx_allocator(OB_SERVER_TENANT_ID, i))) {
LOG_ERROR("create tenant allocator fail", K(ret), K(i));
}
}
is_inited_ = true;
}
@ -62,7 +99,7 @@ void *ObMallocAllocator::alloc(const int64_t size, const oceanbase::lib::ObMemAt
SANITY_DISABLE_CHECK_RANGE(); // prevent sanity_check_range
int ret = OB_E(EventTable::EN_4) OB_SUCCESS;
void *ptr = NULL;
ObIAllocator *allocator = NULL;
ObTenantCtxAllocatorGuard allocator = NULL;
oceanbase::lib::ObMemAttr inner_attr = attr;
if (inner_attr.ctx_id_ == ObCtxIds::STORAGE_LONG_TERM_META_CTX_ID
|| inner_attr.ctx_id_ == ObCtxIds::STORAGE_SHORT_TERM_META_CTX_ID) {
@ -75,10 +112,17 @@ void *ObMallocAllocator::alloc(const int64_t size, const oceanbase::lib::ObMemAt
LOG_ERROR("invalid argument", K(inner_attr.tenant_id_), K(ret));
} else if (OB_NOT_NULL(allocator = get_tenant_ctx_allocator(inner_attr.tenant_id_, inner_attr.ctx_id_))) {
// do nothing
} else if (OB_FAIL(create_tenant_ctx_allocator(inner_attr.tenant_id_, inner_attr.ctx_id_))) {
LOG_ERROR("create tenant allocator fail", K(ret));
} else {
allocator = get_tenant_ctx_allocator(inner_attr.tenant_id_, inner_attr.ctx_id_);
} else if (inner_attr.tenant_id_ <= OB_USER_TENANT_ID || OB_UNLIKELY(create_on_demand_)) {
if (OB_FAIL(create_and_add_tenant_allocator(inner_attr.tenant_id_))) {
LOG_ERROR("create and add tenant allocator failed", K(ret), K(inner_attr.tenant_id_));
} else {
allocator = get_tenant_ctx_allocator(inner_attr.tenant_id_, inner_attr.ctx_id_);
}
}
if (OB_ISNULL(allocator)) {
ret = OB_ENTRY_NOT_EXIST;
LOG_ERROR("tenant allocator not exist", K(inner_attr.tenant_id_), K(inner_attr.ctx_id_),
K(ret));
}
if (OB_SUCC(ret)) {
@ -117,7 +161,7 @@ void *ObMallocAllocator::realloc(
get_mem_leak_checker().on_free(*obj);
}
oceanbase::lib::ObMemAttr inner_attr = attr;
ObIAllocator *allocator = nullptr;
ObTenantCtxAllocatorGuard allocator = NULL;
if (inner_attr.ctx_id_ == ObCtxIds::STORAGE_LONG_TERM_META_CTX_ID
|| inner_attr.ctx_id_ == ObCtxIds::STORAGE_SHORT_TERM_META_CTX_ID) {
inner_attr.tenant_id_ = OB_SERVER_TENANT_ID;
@ -166,100 +210,178 @@ void ObMallocAllocator::free(void *ptr)
#endif // PERF_MODE
}
ObTenantCtxAllocator *ObMallocAllocator::get_tenant_ctx_allocator(uint64_t tenant_id, uint64_t ctx_id) const
ObTenantCtxAllocatorGuard ObMallocAllocator::get_tenant_ctx_allocator_without_tlcache(
uint64_t tenant_id, uint64_t ctx_id) const
{
ObTenantCtxAllocator *allocator = nullptr;
if (OB_LIKELY(tenant_id < PRESERVED_TENANT_COUNT)) {
allocator = allocators_[tenant_id][ctx_id];
} else {
// TODO: lock slot
const int64_t slot = tenant_id % PRESERVED_TENANT_COUNT;
obsys::ObRLockGuard guard(locks_[slot]);
ObTenantCtxAllocator * const *cur = &allocators_[slot][ctx_id];
while (NULL != *cur && (*cur)->get_tenant_id() < tenant_id) {
cur = &(*cur)->get_next();
}
if (NULL != *cur && (*cur)->get_tenant_id() == tenant_id) {
allocator = *cur;
STATIC_ASSERT(OB_USER_TENANT_ID < PRESERVED_TENANT_COUNT, "preserved count is too small");
if (tenant_id <= OB_USER_TENANT_ID) {
const bool lock = false;
if (OB_NOT_NULL(allocators_[tenant_id])) {
return ObTenantCtxAllocatorGuard(&allocators_[tenant_id][ctx_id], lock);
} else {
return ObTenantCtxAllocatorGuard();
}
}
return allocator;
const int64_t slot = tenant_id % PRESERVED_TENANT_COUNT;
BucketRLockGuard guard(const_cast<BucketLock&>(locks_[slot]),
GETTID() % BucketLock::BUCKET_COUNT);
if (OB_LIKELY(tenant_id < PRESERVED_TENANT_COUNT)) {
if (OB_LIKELY(allocators_[slot] != NULL && allocators_[slot]->get_tenant_id() == tenant_id)) {
return ObTenantCtxAllocatorGuard(&allocators_[slot][ctx_id]);
} else {
return ObTenantCtxAllocatorGuard();
}
}
ObTenantCtxAllocator *allocator = nullptr;
ObTenantCtxAllocator * const *cur = &allocators_[slot];
while (NULL != *cur && (*cur)->get_tenant_id() < tenant_id) {
cur = &(*cur)->get_next();
}
if (NULL != *cur && (*cur)->get_tenant_id() == tenant_id) {
allocator = *cur;
}
if (OB_NOT_NULL(allocator)) {
return ObTenantCtxAllocatorGuard(&allocator[ctx_id]);
} else {
return ObTenantCtxAllocatorGuard();
}
}
int ObMallocAllocator::create_tenant_ctx_allocator(
uint64_t tenant_id,
uint64_t ctx_id)
ObTenantCtxAllocatorGuard ObMallocAllocator::get_tenant_ctx_allocator(uint64_t tenant_id,
uint64_t ctx_id) const
{
if (OB_LIKELY(tl_ta != NULL && tl_ta->get_tenant_id() == tenant_id)) {
const bool lock = false;
return ObTenantCtxAllocatorGuard(&tl_ta[ctx_id], lock);;
}
return get_tenant_ctx_allocator_without_tlcache(tenant_id, ctx_id);
}
int ObMallocAllocator::create_and_add_tenant_allocator(uint64_t tenant_id)
{
int ret = OB_SUCCESS;
ObMemAttr attr;
void *buf = nullptr;
ObTenantCtxAllocator *allocator = nullptr;
if (INT64_MAX == tenant_id) {
ret = OB_INVALID_ARGUMENT;
LOG_ERROR("invalid argument", KCSTRING(lbt()), K(tenant_id), K(ret));
} else if (ObCtxIds::DEFAULT_CTX_ID == ctx_id) {
// do nothing
} else if (OB_ISNULL(get_tenant_ctx_allocator(tenant_id, ObCtxIds::DEFAULT_CTX_ID))) {
ret = create_tenant_ctx_allocator(tenant_id, ObCtxIds::DEFAULT_CTX_ID);
}
if (OB_FAIL(ret)) {
// do nothing
} else if (OB_NOT_NULL(get_tenant_ctx_allocator(tenant_id, ctx_id))) {
// no need to create again, do nothing
} else {
if (tenant_id > max_used_tenant_id_) {
UNUSED(ATOMIC_BCAS(&max_used_tenant_id_, max_used_tenant_id_, tenant_id));
}
auto *allocer = get_tenant_ctx_allocator(OB_SERVER_TENANT_ID, ObCtxIds::DEFAULT_CTX_ID);
attr.tenant_id_ = allocer->get_tenant_id();
attr.label_ = ObModIds::OB_TENANT_CTX_ALLOCATOR;
if (OB_ISNULL(buf = allocer->alloc(sizeof(ObTenantCtxAllocator), attr))) {
ret = OB_ALLOCATE_MEMORY_FAILED;
ObTenantCtxAllocator *allocator = NULL;
if (OB_FAIL(create_tenant_allocator(tenant_id, allocator))) {
LOG_ERROR("create tenant allocator failed", K(ret), K(tenant_id));
} else if (OB_FAIL(add_tenant_allocator(allocator))) {
if (OB_ENTRY_EXIST == ret && tenant_id <= OB_USER_TENANT_ID) {
LOG_INFO("tenant allocator already exists", K(ret), K(tenant_id));
ret = OB_SUCCESS;
} else {
bool cas_succeed = false;
allocator = new (buf) ObTenantCtxAllocator(tenant_id, ctx_id);
if (OB_FAIL(allocator->set_tenant_memory_mgr())) {
LOG_ERROR("set_tenant_memory_mgr failed", K(ret));
} else if (tenant_id < PRESERVED_TENANT_COUNT) {
cas_succeed = ATOMIC_BCAS(&allocators_[tenant_id][ctx_id], NULL, allocator);
} else {
const int64_t slot = tenant_id % PRESERVED_TENANT_COUNT;
obsys::ObWLockGuard guard(locks_[slot]);
ObTenantCtxAllocator **cur = &allocators_[slot][ctx_id];
while ((NULL != *cur) && (*cur)->get_tenant_id() < tenant_id) {
cur = &((*cur)->get_next());
}
if (OB_ISNULL(*cur)) {
*cur = allocator;
cas_succeed = true;
} else if ((*cur)->get_tenant_id() > tenant_id) {
ObTenantCtxAllocator *next_allocator = *cur;
*cur = allocator;
((*cur)->get_next()) = next_allocator;
cas_succeed = true;
}
}
if (OB_FAIL(ret) || !cas_succeed) {
allocator->~ObTenantCtxAllocator();
allocer->free(buf);
} else {
LOG_INFO("tenant ctx allocator was created", K(tenant_id), K(ctx_id), KCSTRING(lbt()));
}
LOG_ERROR("add tenant allocator failed", K(ret), K(tenant_id));
}
}
return ret;
}
int ObMallocAllocator::create_tenant_allocator(uint64_t tenant_id, ObTenantCtxAllocator *&allocator)
{
int ret = OB_SUCCESS;
allocator = NULL;
ObTenantCtxAllocator *unrecycled_allocator = take_off_tenant_allocator_unrecycled(tenant_id);
if (unrecycled_allocator != NULL) {
allocator = unrecycled_allocator;
} else {
auto allocer = get_tenant_ctx_allocator(OB_SERVER_TENANT_ID, ObCtxIds::DEFAULT_CTX_ID);
void *buf = NULL;
if (OB_ISNULL(allocer)) {
ret = OB_ENTRY_NOT_EXIST;
LOG_ERROR("get root tenant allocator failed", K(ret));
} else if (OB_ISNULL(buf = allocer->alloc(sizeof(ObTenantCtxAllocator) * ObCtxIds::MAX_CTX_ID,
ObMemAttr(OB_SERVER_TENANT_ID, ObModIds::OB_TENANT_CTX_ALLOCATOR)))) {
ret = OB_ALLOCATE_MEMORY_FAILED;
LOG_ERROR("alloc tenant allocator failed", K(ret));
} else if (OB_FAIL(create_tenant_allocator(tenant_id, buf, allocator))) {
LOG_ERROR("create tenant allocator failed", K(ret), K(tenant_id));
}
if (OB_FAIL(ret) && buf != NULL) {
allocer->free(buf);
buf = NULL;
}
}
return ret;
}
void ObMallocAllocator::destroy_tenant_allocator(ObTenantCtxAllocator *allocator)
{
for (int64_t ctx_id = 0; ctx_id < ObCtxIds::MAX_CTX_ID; ctx_id++) {
allocator[ctx_id].~ObTenantCtxAllocator();
}
auto allocer = get_tenant_ctx_allocator(OB_SERVER_TENANT_ID, ObCtxIds::DEFAULT_CTX_ID);
if (allocer != NULL) {
allocer->free(allocator);
}
}
int ObMallocAllocator::create_tenant_allocator(uint64_t tenant_id, void *buf,
ObTenantCtxAllocator *&allocator)
{
int ret = OB_SUCCESS;
allocator = NULL;
if (tenant_id > max_used_tenant_id_) {
UNUSED(ATOMIC_BCAS(&max_used_tenant_id_, max_used_tenant_id_, tenant_id));
}
for (int ctx_id = 0; OB_SUCC(ret) && ctx_id < ObCtxIds::MAX_CTX_ID; ctx_id++) {
auto *ctx_allocator = new ((char*)buf + ctx_id * sizeof(ObTenantCtxAllocator))
ObTenantCtxAllocator(tenant_id, ctx_id);
if (OB_FAIL(ctx_allocator->set_tenant_memory_mgr())) {
LOG_ERROR("set_tenant_memory_mgr failed", K(ret));
}
}
if (OB_SUCC(ret)) {
allocator = (ObTenantCtxAllocator*)buf;
}
return ret;
}
int ObMallocAllocator::add_tenant_allocator(ObTenantCtxAllocator *allocator)
{
int ret = OB_SUCCESS;
uint64_t tenant_id = allocator->get_tenant_id();
const int64_t slot = tenant_id % PRESERVED_TENANT_COUNT;
// critical area is extremely small, just wait without trylock
BucketWLockGuard guard(locks_[slot]);
ObTenantCtxAllocator **cur = &allocators_[slot];
while ((NULL != *cur) && (*cur)->get_tenant_id() < tenant_id) {
cur = &((*cur)->get_next());
}
if (OB_ISNULL(*cur)) {
*cur = allocator;
} else if ((*cur)->get_tenant_id() > tenant_id) {
ObTenantCtxAllocator *next_allocator = *cur;
*cur = allocator;
((*cur)->get_next()) = next_allocator;
} else {
ret = OB_ENTRY_EXIST;
}
return ret;
}
ObTenantCtxAllocator *ObMallocAllocator::take_off_tenant_allocator(uint64_t tenant_id)
{
ObTenantCtxAllocator *ta = NULL;
const int64_t slot = tenant_id % PRESERVED_TENANT_COUNT;
BucketWLockGuard guard(locks_[slot]);
ObTenantCtxAllocator **cur = &allocators_[slot];
while (*cur && (*cur)->get_tenant_id() < tenant_id) {
cur = &(*cur)->get_next();
}
if (*cur != NULL && (*cur)->get_tenant_id() == tenant_id) {
ta = *cur;
*cur = (*cur)->get_next();
}
return ta;
}
void ObMallocAllocator::set_root_allocator()
{
int ret = OB_SUCCESS;
static ObTenantCtxAllocator allocator(OB_SERVER_TENANT_ID);
if (OB_FAIL(allocator.set_tenant_memory_mgr())) {
LOG_ERROR("set_tenant_memory_mgr failed", K(ret));
} else {
allocators_[OB_SERVER_TENANT_ID][0] = &allocator;
}
static char buf[sizeof(ObTenantCtxAllocator) * ObCtxIds::MAX_CTX_ID];
ObTenantCtxAllocator *allocator = NULL;
abort_unless(OB_SUCCESS == create_tenant_allocator(OB_SERVER_TENANT_ID, buf, allocator));
abort_unless(OB_SUCCESS == add_tenant_allocator(allocator));
}
ObMallocAllocator *ObMallocAllocator::get_instance()
@ -325,7 +447,7 @@ int64_t ObMallocAllocator::get_tenant_remain(uint64_t tenant_id)
int64_t ObMallocAllocator::get_tenant_ctx_hold(const uint64_t tenant_id, const uint64_t ctx_id) const
{
int64_t hold = 0;
ObTenantCtxAllocator *allocator = nullptr;
ObTenantCtxAllocatorGuard allocator = NULL;
if (OB_ISNULL(allocator = get_tenant_ctx_allocator(tenant_id, ctx_id))) {
// do nothing
} else {
@ -337,7 +459,7 @@ int64_t ObMallocAllocator::get_tenant_ctx_hold(const uint64_t tenant_id, const u
void ObMallocAllocator::get_tenant_label_usage(
uint64_t tenant_id, ObLabel &label, ObLabelItem &item) const
{
ObTenantCtxAllocator *allocator = nullptr;
ObTenantCtxAllocatorGuard allocator = NULL;
for (int64_t i = 0; i < ObCtxIds::MAX_CTX_ID; i++) {
if (OB_ISNULL(allocator = get_tenant_ctx_allocator(tenant_id, i))) {
// do nothing
@ -349,11 +471,16 @@ void ObMallocAllocator::get_tenant_label_usage(
void ObMallocAllocator::print_tenant_ctx_memory_usage(uint64_t tenant_id) const
{
ObTenantCtxAllocator *allocator = NULL;
for (int64_t i = 0; i < ObCtxIds::MAX_CTX_ID; i++) {
allocator = get_tenant_ctx_allocator(tenant_id, i);
ObTenantCtxAllocatorGuard allocator = NULL;
for (int64_t ctx_id = 0; ctx_id < ObCtxIds::MAX_CTX_ID; ctx_id++) {
allocator = get_tenant_ctx_allocator(tenant_id, ctx_id);
if (OB_LIKELY(NULL != allocator)) {
allocator->print_memory_usage();
} else {
allocator = get_tenant_ctx_allocator_unrecycled(tenant_id, ctx_id);
if (OB_LIKELY(NULL != allocator)) {
allocator->print_memory_usage();
}
}
}
}
@ -402,16 +529,6 @@ void ObMallocAllocator::print_tenant_memory_usage(uint64_t tenant_id) const
UNUSED(ret);
}
IBlockMgr *ObMallocAllocator::get_tenant_ctx_block_mgr(uint64_t tenant_id, uint64_t ctx_id)
{
IBlockMgr *blk_mgr = nullptr;
ObTenantCtxAllocator *allocator = get_tenant_ctx_allocator(tenant_id, ctx_id);
if (OB_UNLIKELY(allocator != nullptr)) {
blk_mgr = &allocator->get_block_mgr();
}
return blk_mgr;
}
void ObMallocAllocator::set_urgent(int64_t bytes)
{
CHUNK_MGR.set_urgent(bytes);
@ -438,7 +555,7 @@ int ObMallocAllocator::set_tenant_ctx_idle(const uint64_t tenant_id,
const bool reserve /*=false*/)
{
int ret = OB_SUCCESS;
ObTenantCtxAllocator *allocator = get_tenant_ctx_allocator(tenant_id, ctx_id);
auto allocator = get_tenant_ctx_allocator(tenant_id, ctx_id);
if (NULL == allocator) {
ret = OB_TENANT_NOT_EXIST;
LOG_WARN("tenant or ctx not exist", K(ret), K(tenant_id), K(ctx_id));
@ -452,15 +569,27 @@ int ObMallocAllocator::get_chunks(AChunk **chunks, int cap, int &cnt)
{
int ret = OB_SUCCESS;
for (int64_t slot = 0; OB_SUCC(ret) && slot < PRESERVED_TENANT_COUNT; ++slot) {
obsys::ObRLockGuard guard(locks_[slot]);
for (int64_t ctx_id = 0; OB_SUCC(ret) && ctx_id < ObCtxIds::MAX_CTX_ID; ctx_id++) {
ObTenantCtxAllocator *ta = allocators_[slot][ctx_id];
while (OB_SUCC(ret) && ta != nullptr) {
ta->get_chunks(chunks, cap, cnt);
ObTenantCtxAllocatorGuard tas[16]; // TODO: should be dynamic array, but enough so far
int tas_cnt = 0;
{
BucketRLockGuard guard(locks_[slot], GETTID() % BucketLock::BUCKET_COUNT);
ObTenantCtxAllocator *ta = allocators_[slot];
while (OB_SUCC(ret) && ta != nullptr && tas_cnt < ARRAYSIZEOF(tas)) {
tas[tas_cnt++] = ObTenantCtxAllocatorGuard(ta);
ta = ta->get_next();
}
if (tas_cnt >= ARRAYSIZEOF(tas)) {
LOG_WARN("array size not enough");
// ignore ret
}
}
while (OB_SUCC(ret) && tas_cnt--) {
auto ta = tas[tas_cnt].ref_allocator();
for (int64_t ctx_id = 0; OB_SUCC(ret) &&ctx_id < ObCtxIds::MAX_CTX_ID; ctx_id++) {
ta[ctx_id].get_chunks(chunks, cap, cnt);
if (cnt >= cap) {
ret = OB_SIZE_OVERFLOW;
}
ta = ta->get_next();
}
}
}
@ -474,7 +603,7 @@ int64_t ObMallocAllocator::sync_wash(uint64_t tenant_id, uint64_t from_ctx_id, i
washed_size < wash_size && i < ObCtxIds::MAX_CTX_ID;
i++) {
int64_t ctx_id = (from_ctx_id + i) % ObCtxIds::MAX_CTX_ID;
ObTenantCtxAllocator *allocator = get_tenant_ctx_allocator(tenant_id, ctx_id);
auto allocator = get_tenant_ctx_allocator(tenant_id, ctx_id);
if (NULL == allocator) {
// do-nothing
} else {
@ -487,8 +616,191 @@ int64_t ObMallocAllocator::sync_wash(uint64_t tenant_id, uint64_t from_ctx_id, i
int64_t ObMallocAllocator::sync_wash()
{
int64_t washed_size = 0;
for (uint64_t tenant_id = 1; tenant_id <= ObMallocAllocator::get_max_used_tenant_id(); ++tenant_id) {
for (uint64_t tenant_id = 1; tenant_id <= max_used_tenant_id_; ++tenant_id) {
washed_size += sync_wash(tenant_id, 0, INT64_MAX);
}
return washed_size;
}
ObTenantCtxAllocatorGuard ObMallocAllocator::get_tenant_ctx_allocator_unrecycled(
uint64_t tenant_id, uint64_t ctx_id) const
{
ObTenantCtxAllocatorGuard ta;
ObLatchRGuard guard(const_cast<ObLatch&>(unrecycled_lock_), ObLatchIds::OB_ALLOCATOR_LOCK);
ObTenantCtxAllocator * const *cur = &unrecycled_allocators_;
while (*cur) {
if ((*cur)->get_tenant_id() == tenant_id) {
ta = ObTenantCtxAllocatorGuard(&(*cur)[ctx_id]);
break;
}
cur = &(*cur)->get_next();
}
return ta;
}
void ObMallocAllocator::add_tenant_allocator_unrecycled(ObTenantCtxAllocator *allocator)
{
#ifdef ENABLE_SANITY
if (enable_tenant_leak_memory_protection_) {
modify_tenant_memory_access_permission(allocator, false);
}
#endif
ObLatchWGuard guard(unrecycled_lock_, ObLatchIds::OB_ALLOCATOR_LOCK);
allocator->get_next() = unrecycled_allocators_;
unrecycled_allocators_ = allocator;
}
ObTenantCtxAllocator *ObMallocAllocator::take_off_tenant_allocator_unrecycled(uint64_t tenant_id)
{
ObTenantCtxAllocator *ta = NULL;
{
ObLatchWGuard guard(unrecycled_lock_, ObLatchIds::OB_ALLOCATOR_LOCK);
ObTenantCtxAllocator **cur = &unrecycled_allocators_;
while (*cur) {
if ((*cur)->get_tenant_id() == tenant_id) {
ta = *cur;
break;
}
cur = &(*cur)->get_next();
}
if (ta != NULL) {
*cur = (*cur)->get_next();
}
}
#ifdef ENABLE_SANITY
if (ta != NULL) {
modify_tenant_memory_access_permission(ta, true);
}
#endif
return ta;
}
int ObMallocAllocator::recycle_tenant_allocator(uint64_t tenant_id)
{
int ret = OB_SUCCESS;
ObTenantCtxAllocator *ta = NULL;
if (create_on_demand_) {
// do-nothing
} else if (tenant_id <= OB_USER_TENANT_ID) {
ret = OB_OP_NOT_ALLOW;
} else if (OB_ISNULL(ta = take_off_tenant_allocator(tenant_id)) &&
OB_ISNULL(ta = take_off_tenant_allocator_unrecycled(tenant_id))) {
ret = OB_ENTRY_NOT_EXIST;
LOG_WARN("tenant allocator not exist", K(ret), K(tenant_id));
} else {
// wash idle chunks
for (int64_t ctx_id = 0; ctx_id < ObCtxIds::MAX_CTX_ID; ctx_id++) {
ta[ctx_id].set_idle(0);
}
ObTenantCtxAllocator *tas[ObCtxIds::MAX_CTX_ID] = {NULL};
for (int64_t ctx_id = 0; ctx_id < ObCtxIds::MAX_CTX_ID; ctx_id++) {
tas[ctx_id] = &ta[ctx_id];
}
// check references
int wait_times = 3;
int waiting_cnt = ObCtxIds::MAX_CTX_ID;
while (wait_times--) {
for (int64_t ctx_id = 0; ctx_id < ObCtxIds::MAX_CTX_ID; ctx_id++) {
if (NULL == tas[ctx_id]) continue;
int64_t ref_cnt = tas[ctx_id]->get_ref_cnt();
if (0 == ref_cnt) {
LOG_INFO("wait tenant ctx allocator success", K(tenant_id), K(ctx_id),
K(get_global_ctx_info().get_ctx_name(ctx_id)));
tas[ctx_id] = NULL;
waiting_cnt--;
}
}
if (waiting_cnt <= 0) break;
usleep(1 * 1000 * 1000L); // 1s
}
for (int64_t ctx_id = 0; ctx_id < ObCtxIds::MAX_CTX_ID; ctx_id++) {
ObTenantCtxAllocator *ctx_allocator = tas[ctx_id];
if (ctx_allocator != NULL) {
LOG_ERROR("tenant ctx allocator is still refered by upper-layer modules",
K(tenant_id), K(ctx_id),
K(get_global_ctx_info().get_ctx_name(ctx_id)),
K(ctx_allocator->get_ref_cnt()));
}
}
// check unfree
for (int64_t ctx_id = 0; ctx_id < ObCtxIds::MAX_CTX_ID; ctx_id++) {
ObTenantCtxAllocator *ctx_allocator = tas[ctx_id];
if (NULL == ctx_allocator) {
ctx_allocator = &ta[ctx_id];
const char *first_label = NULL;
bool has_unfree = ctx_allocator->check_has_unfree(&first_label);
if (has_unfree) {
LOG_ERROR("tenant ctx allocator has unfree objects", K(tenant_id),
K(ctx_id), K(get_global_ctx_info().get_ctx_name(ctx_id)), K(first_label));
tas[ctx_id] = ctx_allocator;
}
}
}
bool all_ready = true;
for (int64_t ctx_id = 0; ctx_id < ObCtxIds::MAX_CTX_ID; ctx_id++) {
ObTenantCtxAllocator *ctx_allocator = tas[ctx_id];
if (ctx_allocator != NULL) {
all_ready = false;
}
}
if (!all_ready) {
ret = OB_ERROR;
LOG_WARN("failed to recycle tenant allocator immediately", K(ret), K(tenant_id));
add_tenant_allocator_unrecycled(ta);
} else {
destroy_tenant_allocator(ta);
ta = NULL;
LOG_INFO("recycle tenant allocator success", K(tenant_id));
}
}
return ret;
}
void ObMallocAllocator::get_unrecycled_tenant_ids(uint64_t *ids, int cap, int &cnt) const
{
cnt = 0;
ObLatchRGuard guard(const_cast<ObLatch&>(unrecycled_lock_), ObLatchIds::OB_ALLOCATOR_LOCK);
ObTenantCtxAllocator * const *cur = &unrecycled_allocators_;
while (*cur && cnt < cap) {
ids[cnt++] = (*cur)->get_tenant_id();
cur = &(*cur)->get_next();
}
}
#ifdef ENABLE_SANITY
int ObMallocAllocator::get_chunks(ObTenantCtxAllocator *ta, AChunk **chunks, int cap, int &cnt)
{
int ret = OB_SUCCESS;
for (int64_t ctx_id = 0; OB_SUCC(ret) && ctx_id < ObCtxIds::MAX_CTX_ID; ctx_id++) {
ObTenantCtxAllocator *ctx_allocator = &ta[ctx_id];
ctx_allocator->get_chunks(chunks, cap, cnt);
if (cnt >= cap) {
ret = OB_SIZE_OVERFLOW;
}
}
return ret;
}
void ObMallocAllocator::modify_tenant_memory_access_permission(ObTenantCtxAllocator *ta, bool accessible)
{
AChunk *chunks[1024] = {nullptr};
int chunk_cnt = 0;
abort_unless(OB_SUCCESS == get_chunks(ta, chunks, sizeof(chunks)/sizeof(chunks[0]), chunk_cnt));
for (int i = 0; i < chunk_cnt; i++) {
AChunk *chunk = chunks[i];
if (chunk != nullptr) {
if (accessible) {
SANITY_UNPOISON(chunk, chunk->aligned());
} else {
SANITY_POISON(chunk, chunk->aligned());
}
}
}
}
#endif
} // end of namespace lib
} // end of namespace oceanbase