remove tenant io manager hash in io manager
This commit is contained in:
@ -30,8 +30,7 @@ ObIOManager::ObIOManager()
|
||||
io_config_(),
|
||||
allocator_(),
|
||||
fault_detector_(io_config_),
|
||||
io_scheduler_(io_config_, allocator_),
|
||||
tenant_map_lock_(ObLatchIds::TENANT_IO_MANAGE_LOCK)
|
||||
io_scheduler_(io_config_, allocator_)
|
||||
{
|
||||
}
|
||||
|
||||
@ -63,20 +62,24 @@ int ObIOManager::init(const int64_t memory_limit,
|
||||
LOG_WARN("init io allocator failed", K(ret));
|
||||
} else if (OB_FAIL(channel_map_.create(7, "IO_CHANNEL_MAP"))) {
|
||||
LOG_WARN("create channel map failed", K(ret));
|
||||
} else if (OB_FAIL(tenant_map_.create(7, "IO_TENANT_MAP"))) {
|
||||
LOG_WARN("create tenant map failed", K(ret));
|
||||
} else if (OB_FAIL(io_scheduler_.init(schedule_queue_count, schedule_media_id))) {
|
||||
LOG_WARN("init io scheduler failed", K(ret));
|
||||
} else if (OB_FAIL(fault_detector_.init())) {
|
||||
LOG_WARN("init io fault detector failed", K(ret));
|
||||
} else if (OB_ISNULL(server_io_manager_ = OB_NEW(ObTenantIOManager, "IO_MGR"))) {
|
||||
} else if (OB_FAIL(server_io_manager_->init(OB_SERVER_TENANT_ID, ObTenantIOConfig::default_instance(), &io_scheduler_))) {
|
||||
LOG_WARN("init server tenant io mgr failed", K(ret));
|
||||
} else {
|
||||
ObMemAttr attr(OB_SERVER_TENANT_ID, "IO_MGR");
|
||||
SET_USE_500(attr);
|
||||
allocator_.set_attr(attr);
|
||||
io_config_.set_default_value();
|
||||
is_inited_ = true;
|
||||
if (OB_FAIL(server_io_manager_->start())) {
|
||||
LOG_WARN("init server tenant io mgr start failed", K(ret));
|
||||
}
|
||||
}
|
||||
if (OB_UNLIKELY(!is_inited_)) {
|
||||
if (OB_FAIL(ret)) {
|
||||
destroy();
|
||||
}
|
||||
return ret;
|
||||
@ -97,21 +100,6 @@ private:
|
||||
ObIAllocator &allocator_;
|
||||
};
|
||||
|
||||
struct DestroyTenantMapFn
|
||||
{
|
||||
public:
|
||||
DestroyTenantMapFn(ObIAllocator &allocator) : allocator_(allocator) {}
|
||||
int operator () (oceanbase::common::hash::HashMapPair<uint64_t, ObTenantIOManager *> &entry) {
|
||||
if (nullptr != entry.second) {
|
||||
entry.second->~ObTenantIOManager();
|
||||
allocator_.free(entry.second);
|
||||
}
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
private:
|
||||
ObIAllocator &allocator_;
|
||||
};
|
||||
|
||||
void ObIOManager::destroy()
|
||||
{
|
||||
stop();
|
||||
@ -120,9 +108,8 @@ void ObIOManager::destroy()
|
||||
DestroyChannelMapFn destry_channel_map_fn(allocator_);
|
||||
channel_map_.foreach_refactored(destry_channel_map_fn);
|
||||
channel_map_.destroy();
|
||||
DestroyTenantMapFn destry_tenant_map_fn(allocator_);
|
||||
tenant_map_.foreach_refactored(destry_tenant_map_fn);
|
||||
tenant_map_.destroy();
|
||||
OB_DELETE(ObTenantIOManager, "IO_MGR", server_io_manager_);
|
||||
server_io_manager_ = nullptr;
|
||||
allocator_.destroy();
|
||||
is_inited_ = false;
|
||||
}
|
||||
@ -146,6 +133,9 @@ int ObIOManager::start()
|
||||
void ObIOManager::stop()
|
||||
{
|
||||
is_working_ = false;
|
||||
if (OB_NOT_NULL(server_io_manager_)) {
|
||||
server_io_manager_->stop();
|
||||
}
|
||||
io_scheduler_.stop();
|
||||
}
|
||||
|
||||
@ -343,17 +333,21 @@ int ObIOManager::adjust_tenant_clock()
|
||||
} else {
|
||||
ObTenantIOManager *tenant_io_mgr = nullptr;
|
||||
ObArray<ObTenantIOClock *> io_clocks;
|
||||
DRWLock::RDLockGuard guard(tenant_map_lock_);
|
||||
hash::ObHashMap<uint64_t, ObTenantIOManager *>::iterator iter = tenant_map_.begin();
|
||||
for (; OB_SUCC(ret) && iter != tenant_map_.end(); ++iter) {
|
||||
if (OB_ISNULL(tenant_io_mgr = iter->second)) {
|
||||
ret = OB_ERR_UNEXPECTED;
|
||||
LOG_WARN("tenant io manager is null", K(ret));
|
||||
ObVector<uint64_t> tenant_ids;
|
||||
if (OB_NOT_NULL(GCTX.omt_)) {
|
||||
GCTX.omt_->get_tenant_ids(tenant_ids);
|
||||
}
|
||||
for (int64_t i = 0; i < tenant_ids.size(); ++i) {
|
||||
const uint64_t cur_tenant_id = tenant_ids.at(i);
|
||||
ObRefHolder<ObTenantIOManager> tenant_holder;
|
||||
if (OB_FAIL(get_tenant_io_manager(cur_tenant_id, tenant_holder))) {
|
||||
LOG_WARN("get tenant io manager failed", K(ret), K(cur_tenant_id));
|
||||
} else if (FALSE_IT(tenant_io_mgr = tenant_holder.get_ptr())) {
|
||||
} else if (OB_FAIL(io_clocks.push_back(tenant_io_mgr->get_io_clock()))) {
|
||||
LOG_WARN("push back io clock failed", K(ret), K(tenant_map_.size()));
|
||||
LOG_WARN("push back io clock failed", K(ret), K(tenant_ids.size()));
|
||||
}
|
||||
}
|
||||
if (OB_SUCC(ret) && !io_clocks.empty()) {
|
||||
if (!io_clocks.empty()) {
|
||||
if (OB_FAIL(io_clocks.at(0)->sync_clocks(io_clocks))) {
|
||||
LOG_WARN("sync io clocks failed", K(ret), K(io_clocks));
|
||||
}
|
||||
@ -473,75 +467,6 @@ int ObIOManager::get_device_channel(const ObIODevice *device_handle, ObDeviceCha
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ObIOManager::add_tenant_io_manager(const uint64_t tenant_id, const ObTenantIOConfig &tenant_io_config)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
ObTenantIOManager *tenant_io_mgr = nullptr;
|
||||
void *buf = nullptr;
|
||||
if (OB_UNLIKELY(!is_inited_)) {
|
||||
ret = OB_NOT_INIT;
|
||||
LOG_WARN("not init", K(ret), K(is_inited_));
|
||||
} else if (OB_UNLIKELY(tenant_id <= 0 || !tenant_io_config.is_valid())) {
|
||||
ret = OB_INVALID_ARGUMENT;
|
||||
LOG_WARN("invalid argument", K(ret), K(tenant_id), K(tenant_io_config));
|
||||
} else if (OB_ISNULL(buf = allocator_.alloc(sizeof(ObTenantIOManager)))) {
|
||||
ret = OB_ALLOCATE_MEMORY_FAILED;
|
||||
LOG_WARN("alloc memory failed", K(ret));
|
||||
} else if (FALSE_IT(tenant_io_mgr = new (buf) ObTenantIOManager())) {
|
||||
} else if (OB_FAIL(tenant_io_mgr->init(tenant_id, tenant_io_config, &io_scheduler_))) {
|
||||
LOG_WARN("init tenant io manager failed", K(ret), K(tenant_id), K(tenant_io_config));
|
||||
} else if (OB_FAIL(tenant_io_mgr->start())) {
|
||||
LOG_WARN("start tenant io manager failed", K(ret), K(tenant_id));
|
||||
} else {
|
||||
tenant_io_mgr->inc_ref();
|
||||
DRWLock::WRLockGuard guard(tenant_map_lock_);
|
||||
if (OB_FAIL(tenant_map_.set_refactored(tenant_id, tenant_io_mgr))) {
|
||||
LOG_WARN("put into tenant map failed", K(ret), K(tenant_id), KP(tenant_io_mgr));
|
||||
} else {
|
||||
LOG_INFO("add tenant io manager success", K(tenant_id), KPC(tenant_io_mgr));
|
||||
tenant_io_mgr = nullptr;
|
||||
}
|
||||
}
|
||||
if (OB_SUCC(ret)) {
|
||||
int tmp_ret = adjust_tenant_clock();
|
||||
if (OB_UNLIKELY(OB_SUCCESS != tmp_ret)) {
|
||||
LOG_WARN("adjust tenant clock failed", K(tmp_ret));
|
||||
}
|
||||
}
|
||||
if (OB_UNLIKELY(nullptr != tenant_io_mgr)) {
|
||||
tenant_io_mgr->~ObTenantIOManager();
|
||||
allocator_.free(tenant_io_mgr);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ObIOManager::remove_tenant_io_manager(const uint64_t tenant_id)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
ObTenantIOManager *tenant_io_mgr = nullptr;
|
||||
if (OB_UNLIKELY(!is_inited_)) {
|
||||
ret = OB_NOT_INIT;
|
||||
LOG_WARN("not init", K(ret), K(is_inited_));
|
||||
} else if (OB_UNLIKELY(tenant_id <= 0)) {
|
||||
ret = OB_INVALID_ARGUMENT;
|
||||
LOG_WARN("invalid argument", K(ret), K(tenant_id));
|
||||
} else if (OB_FAIL(io_scheduler_.remove_phyqueues(tenant_id))) {
|
||||
LOG_WARN("remove phy_queues from map failed", K(ret), K(tenant_id));
|
||||
} else {
|
||||
DRWLock::WRLockGuard guard(tenant_map_lock_);
|
||||
if (OB_FAIL(tenant_map_.erase_refactored(tenant_id, &tenant_io_mgr))) {
|
||||
LOG_WARN("remove tenant io manager failed", K(ret), K(tenant_id), KP(tenant_io_mgr));
|
||||
} else {
|
||||
LOG_INFO("remove tenant io manager success", K(tenant_id), KP(tenant_io_mgr));
|
||||
}
|
||||
}
|
||||
if (OB_SUCC(ret) && nullptr != tenant_io_mgr) {
|
||||
tenant_io_mgr->stop();
|
||||
tenant_io_mgr->dec_ref();
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ObIOManager::refresh_tenant_io_config(const uint64_t tenant_id, const ObTenantIOConfig &tenant_io_config)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
@ -588,27 +513,22 @@ int ObIOManager::modify_group_io_config(const uint64_t tenant_id,
|
||||
int ObIOManager::get_tenant_io_manager(const uint64_t tenant_id, ObRefHolder<ObTenantIOManager> &tenant_holder)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
ObTenantIOManager *tenant_io_mgr = nullptr;
|
||||
DRWLock::RDLockGuard guard(tenant_map_lock_);
|
||||
if (OB_FAIL(tenant_map_.get_refactored(tenant_id, tenant_io_mgr))) {
|
||||
LOG_WARN("get tenant io manager failed", K(ret), K(tenant_id));
|
||||
} else {
|
||||
if (OB_SERVER_TENANT_ID == tenant_id) {
|
||||
tenant_holder.hold(server_io_manager_);
|
||||
} else if (is_virtual_tenant_id(tenant_id)) {
|
||||
// DO NOT SUPPORT
|
||||
} else if (MTL_ID() == tenant_id) {
|
||||
ObTenantIOManager *tenant_io_mgr = MTL(ObTenantIOManager*);
|
||||
tenant_holder.hold(tenant_io_mgr);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ObIOManager::get_tenant_ids(ObIArray<uint64_t> &tenant_ids)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
tenant_ids.reset();
|
||||
DRWLock::RDLockGuard guard(tenant_map_lock_);
|
||||
hash::ObHashMap<uint64_t, ObTenantIOManager *>::iterator iter = tenant_map_.begin();
|
||||
for (; OB_SUCC(ret) && iter != tenant_map_.end(); ++iter) {
|
||||
if (OB_FAIL(tenant_ids.push_back(iter->first))) {
|
||||
LOG_WARN("push back tenant id failed", K(ret), K(iter->first));
|
||||
} else {
|
||||
MTL_SWITCH(tenant_id) {
|
||||
ObTenantIOManager *tenant_io_mgr = MTL(ObTenantIOManager*);
|
||||
tenant_holder.hold(tenant_io_mgr);
|
||||
}
|
||||
}
|
||||
if (OB_SUCC(ret) && OB_ISNULL(tenant_holder.get_ptr())) {
|
||||
ret = OB_HASH_NOT_EXIST; // for compatibility
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -618,27 +538,38 @@ ObIOScheduler *ObIOManager::get_scheduler()
|
||||
}
|
||||
|
||||
/****************** TenantIOManager **********************/
|
||||
|
||||
int ObTenantIOManager::mtl_new(ObTenantIOManager *&io_service)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
void *buf = nullptr;
|
||||
io_service = nullptr;
|
||||
if (is_virtual_tenant_id(MTL_ID())) {
|
||||
// do nothing
|
||||
} else if (OB_ISNULL(buf = OB_IO_MANAGER.allocator_.alloc(sizeof(ObTenantIOManager)))) {
|
||||
ret = OB_ALLOCATE_MEMORY_FAILED;
|
||||
FLOG_WARN("failed to alloc tenant io mgr", K(ret));
|
||||
} else {
|
||||
io_service = new (buf) ObTenantIOManager();
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ObTenantIOManager::mtl_init(ObTenantIOManager *&io_service)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
io_service = nullptr;
|
||||
const uint64_t tenant_id = MTL_ID();
|
||||
ObRefHolder<ObTenantIOManager> holder;
|
||||
if (OB_FAIL(OB_IO_MANAGER.add_tenant_io_manager(tenant_id, ObTenantIOConfig::default_instance()))) {
|
||||
if (OB_HASH_EXIST != ret) {
|
||||
LOG_WARN("add tenant io manager failed", K(ret), K(tenant_id));
|
||||
if (OB_ISNULL(io_service)) {
|
||||
if (is_virtual_tenant_id(tenant_id)) {
|
||||
// do nothing
|
||||
} else {
|
||||
ret = OB_SUCCESS;
|
||||
ret = OB_INVALID_ARGUMENT;
|
||||
}
|
||||
}
|
||||
if (OB_SUCC(ret)) {
|
||||
if (OB_FAIL(OB_IO_MANAGER.get_tenant_io_manager(tenant_id, holder))) {
|
||||
LOG_WARN("get tenant io manager failed", K(ret), K(tenant_id));
|
||||
} else {
|
||||
io_service = holder.get_ptr();
|
||||
}
|
||||
}
|
||||
if (OB_SUCC(ret)) {
|
||||
} else if (OB_FAIL(io_service->init(tenant_id,
|
||||
ObTenantIOConfig::default_instance(),
|
||||
&OB_IO_MANAGER.io_scheduler_))) {
|
||||
FLOG_WARN("mtl iit tenant io manager failed", K(tenant_id));
|
||||
} else {
|
||||
FLOG_INFO("mtl init tenant io manager success", K(tenant_id), KPC(io_service));
|
||||
}
|
||||
return ret;
|
||||
@ -647,36 +578,11 @@ int ObTenantIOManager::mtl_init(ObTenantIOManager *&io_service)
|
||||
void ObTenantIOManager::mtl_destroy(ObTenantIOManager *&io_service)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
const int64_t start_ts = ObTimeUtility::current_time();
|
||||
while (OB_NOT_NULL(io_service) && OB_SUCC(ret)) {
|
||||
if (io_service->get_ref_cnt() == 1) {
|
||||
break;
|
||||
} else {
|
||||
if (REACH_TIME_INTERVAL(1000L * 1000L)) { //1s
|
||||
LOG_INFO("wait tenant io manager quit", K(MTL_ID()), K(start_ts), K(io_service->get_ref_cnt()));
|
||||
}
|
||||
ob_usleep((useconds_t)10L * 1000L); //10ms
|
||||
}
|
||||
}
|
||||
|
||||
const uint64_t tenant_id = MTL_ID();
|
||||
if (OB_FAIL(OB_IO_MANAGER.remove_tenant_io_manager(tenant_id))) {
|
||||
if (OB_HASH_NOT_EXIST != ret) {
|
||||
LOG_WARN("remove tenant io manager failed", K(ret), K(tenant_id));
|
||||
} else {
|
||||
ret = OB_SUCCESS;
|
||||
}
|
||||
}
|
||||
if (OB_SUCC(ret)) {
|
||||
if (OB_NOT_NULL(io_service) && io_service->get_ref_cnt() == 0) {
|
||||
io_service->~ObTenantIOManager();
|
||||
OB_IO_MANAGER.allocator_.free(io_service);
|
||||
io_service = nullptr;
|
||||
FLOG_INFO("mtl destroy tenant io manager success", K(tenant_id));
|
||||
} else if (OB_NOT_NULL(io_service) && io_service->get_ref_cnt() != 0) {
|
||||
ret = OB_ERR_UNEXPECTED;
|
||||
LOG_ERROR("ERROR: tenant io manager ref_cnt is not zero", K(ret));
|
||||
}
|
||||
if (OB_NOT_NULL(io_service)) {
|
||||
io_service->~ObTenantIOManager();
|
||||
OB_IO_MANAGER.allocator_.free(io_service);
|
||||
io_service = nullptr;
|
||||
FLOG_INFO("mtl destroy tenant io manager success", K(MTL_ID()));
|
||||
}
|
||||
}
|
||||
|
||||
@ -740,6 +646,7 @@ int ObTenantIOManager::init(const uint64_t tenant_id,
|
||||
} else {
|
||||
tenant_id_ = tenant_id;
|
||||
io_scheduler_ = io_scheduler;
|
||||
inc_ref();
|
||||
is_inited_ = true;
|
||||
}
|
||||
if (OB_UNLIKELY(!is_inited_)) {
|
||||
@ -750,7 +657,21 @@ int ObTenantIOManager::init(const uint64_t tenant_id,
|
||||
|
||||
void ObTenantIOManager::destroy()
|
||||
{
|
||||
ATOMIC_SET(&is_working_, false);
|
||||
ATOMIC_STORE(&is_working_, false);
|
||||
|
||||
const int64_t start_ts = ObTimeUtility::current_time();
|
||||
while (1 != get_ref_cnt()) {
|
||||
if (REACH_TIME_INTERVAL(1000L * 1000L)) { //1s
|
||||
LOG_INFO("wait tenant io manager quit", K(MTL_ID()), K(start_ts), K(get_ref_cnt()));
|
||||
}
|
||||
ob_usleep((useconds_t)10L * 1000L); //10ms
|
||||
}
|
||||
dec_ref();
|
||||
|
||||
int ret = OB_SUCCESS;
|
||||
if (OB_NOT_NULL(io_scheduler_) && OB_FAIL(io_scheduler_->remove_phyqueues(MTL_ID()))) {
|
||||
LOG_WARN("remove phy_queues from map failed", K(ret), K(MTL_ID()));
|
||||
}
|
||||
|
||||
if (OB_NOT_NULL(io_clock_)) {
|
||||
io_clock_->destroy();
|
||||
@ -785,13 +706,17 @@ int ObTenantIOManager::start()
|
||||
LOG_WARN("init callback manager failed", K(ret), K(tenant_id_), K(callback_thread_count));
|
||||
} else {
|
||||
is_working_ = true;
|
||||
int tmp_ret = OB_IO_MANAGER.adjust_tenant_clock();
|
||||
if (OB_UNLIKELY(OB_SUCCESS != tmp_ret)) {
|
||||
LOG_WARN("adjust tenant clock failed", K(tmp_ret));
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
void ObTenantIOManager::stop()
|
||||
{
|
||||
ATOMIC_SET(&is_working_, false);
|
||||
ATOMIC_STORE(&is_working_, false);
|
||||
callback_mgr_.destroy();
|
||||
}
|
||||
|
||||
@ -1672,7 +1597,7 @@ void ObTenantIOManager::dec_ref()
|
||||
int64_t tmp_ref = ATOMIC_SAF(&ref_cnt_, 1);
|
||||
if (tmp_ref < 0) {
|
||||
ret = OB_ERR_UNEXPECTED;
|
||||
LOG_ERROR("bug: ref_cnt < 0", K(ret), K(tmp_ref), KCSTRING(lbt()));
|
||||
LOG_ERROR("bug: ref_cnt < 0", K(ret), K(tmp_ref));
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
@ -68,11 +68,8 @@ public:
|
||||
int get_device_channel(const ObIODevice *device_handle, ObDeviceChannel *&device_channel);
|
||||
|
||||
// tenant management
|
||||
int add_tenant_io_manager(const uint64_t tenant_id, const ObTenantIOConfig &tenant_io_config);
|
||||
int remove_tenant_io_manager(const uint64_t tenant_id);
|
||||
int refresh_tenant_io_config(const uint64_t tenant_id, const ObTenantIOConfig &tenant_io_config);
|
||||
int get_tenant_io_manager(const uint64_t tenant_id, ObRefHolder<ObTenantIOManager> &tenant_holder);
|
||||
int get_tenant_ids(ObIArray<uint64_t> &tenant_ids);
|
||||
int modify_group_io_config(const uint64_t tenant_id,
|
||||
const uint64_t index,
|
||||
const int64_t min_percent,
|
||||
@ -98,13 +95,13 @@ private:
|
||||
hash::ObHashMap<int64_t /*device_handle*/, ObDeviceChannel *> channel_map_;
|
||||
ObIOFaultDetector fault_detector_;
|
||||
ObIOScheduler io_scheduler_;
|
||||
DRWLock tenant_map_lock_;
|
||||
hash::ObHashMap<uint64_t /*tenant_id*/, ObTenantIOManager *> tenant_map_;
|
||||
ObTenantIOManager *server_io_manager_;
|
||||
};
|
||||
|
||||
class ObTenantIOManager final
|
||||
{
|
||||
public:
|
||||
static int mtl_new(ObTenantIOManager *&io_service);
|
||||
static int mtl_init(ObTenantIOManager *&io_service);
|
||||
static void mtl_destroy(ObTenantIOManager *&io_service);
|
||||
public:
|
||||
|
||||
@ -772,13 +772,12 @@ int ObIOTuner::init()
|
||||
if (OB_UNLIKELY(is_inited_)) {
|
||||
ret = OB_INIT_TWICE;
|
||||
LOG_WARN("init twice", K(ret));
|
||||
} else if (OB_FAIL(TG_SET_RUNNABLE_AND_START(lib::TGDefIDs::IO_TUNING, *this))) {
|
||||
LOG_WARN("start io scheduler failed", K(ret));
|
||||
} else {
|
||||
is_inited_ = true;
|
||||
if (OB_FAIL(TG_SET_RUNNABLE_AND_START(lib::TGDefIDs::IO_TUNING, *this))) {
|
||||
LOG_WARN("start io scheduler failed", K(ret));
|
||||
}
|
||||
}
|
||||
if (OB_UNLIKELY(!is_inited_)) {
|
||||
if (OB_FAIL(ret)) {
|
||||
destroy();
|
||||
}
|
||||
return ret;
|
||||
@ -862,14 +861,17 @@ void ObIOTuner::print_sender_status()
|
||||
int ObIOTuner::try_release_thread()
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
ObArray<uint64_t> tenant_ids;
|
||||
if (OB_FAIL(OB_IO_MANAGER.get_tenant_ids(tenant_ids))) {
|
||||
LOG_WARN("get tenant id failed", K(ret));
|
||||
} else if (tenant_ids.count() > 0) {
|
||||
for (int64_t i = 0; OB_SUCC(ret) && i < tenant_ids.count(); ++i) {
|
||||
ObVector<uint64_t> tenant_ids;
|
||||
if (OB_NOT_NULL(GCTX.omt_)) {
|
||||
GCTX.omt_->get_tenant_ids(tenant_ids);
|
||||
}
|
||||
if (tenant_ids.size() > 0) {
|
||||
for (int64_t i = 0; OB_SUCC(ret) && i < tenant_ids.size(); ++i) {
|
||||
const uint64_t cur_tenant_id = tenant_ids.at(i);
|
||||
ObRefHolder<ObTenantIOManager> tenant_holder;
|
||||
if (OB_FAIL(OB_IO_MANAGER.get_tenant_io_manager(cur_tenant_id, tenant_holder))) {
|
||||
if (is_virtual_tenant_id(cur_tenant_id)) {
|
||||
// do nothing
|
||||
} else if (OB_FAIL(OB_IO_MANAGER.get_tenant_io_manager(cur_tenant_id, tenant_holder))) {
|
||||
LOG_WARN("get tenant io manager failed", K(ret), K(cur_tenant_id));
|
||||
} else {
|
||||
tenant_holder.get_ptr()->get_callback_mgr().try_release_thread();
|
||||
@ -882,14 +884,17 @@ int ObIOTuner::try_release_thread()
|
||||
void ObIOTuner::print_io_status()
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
ObArray<uint64_t> tenant_ids;
|
||||
if (OB_FAIL(OB_IO_MANAGER.get_tenant_ids(tenant_ids))) {
|
||||
LOG_WARN("get tenant id failed", K(ret));
|
||||
} else if (tenant_ids.count() > 0) {
|
||||
for (int64_t i = 0; OB_SUCC(ret) && i < tenant_ids.count(); ++i) {
|
||||
ObVector<uint64_t> tenant_ids;
|
||||
if (OB_NOT_NULL(GCTX.omt_)) {
|
||||
GCTX.omt_->get_tenant_ids(tenant_ids);
|
||||
}
|
||||
if (tenant_ids.size() > 0) {
|
||||
for (int64_t i = 0; OB_SUCC(ret) && i < tenant_ids.size(); ++i) {
|
||||
const uint64_t cur_tenant_id = tenant_ids.at(i);
|
||||
ObRefHolder<ObTenantIOManager> tenant_holder;
|
||||
if (OB_FAIL(OB_IO_MANAGER.get_tenant_io_manager(cur_tenant_id, tenant_holder))) {
|
||||
if (is_virtual_tenant_id(cur_tenant_id)) {
|
||||
// do nothing
|
||||
} else if (OB_FAIL(OB_IO_MANAGER.get_tenant_io_manager(cur_tenant_id, tenant_holder))) {
|
||||
if (OB_HASH_NOT_EXIST != ret) {
|
||||
LOG_WARN("get tenant io manager failed", K(ret), K(cur_tenant_id));
|
||||
} else {
|
||||
@ -1042,7 +1047,7 @@ public:
|
||||
LOG_WARN("get tenant io manager failed", K(ret), K(entry.first));
|
||||
} else {
|
||||
entry.second->~ObIOGroupQueues();
|
||||
if (OB_NOT_NULL(tenant_holder.get_ptr()->get_tenant_io_allocator())) {
|
||||
if (OB_NOT_NULL(tenant_holder.get_ptr()) && OB_NOT_NULL(tenant_holder.get_ptr()->get_tenant_io_allocator())) {
|
||||
tenant_holder.get_ptr()->get_tenant_io_allocator()->free(entry.second);
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user