Interm Result Management is Splitted to Tenants
This commit is contained in:
@ -365,7 +365,7 @@ int ObTempTableAccessOp::locate_interm_result(int64_t result_id)
|
||||
// The current operation of obtaining intermediate results and
|
||||
// the operation of the background thread of dumping intermediate results
|
||||
// are mutually exclusive
|
||||
if (OB_FAIL(dtl::ObDTLIntermResultManager::getInstance().atomic_get_interm_result_info(
|
||||
if (OB_FAIL(MTL(dtl::ObDTLIntermResultManager*)->atomic_get_interm_result_info(
|
||||
dtl_int_key, result_info_guard_))) {
|
||||
LOG_WARN("failed to create row store.", K(ret));
|
||||
} else if (FALSE_IT(result_info = result_info_guard_.result_info_)) {
|
||||
|
||||
@ -260,7 +260,7 @@ int ObTempTableInsertOp::init_chunk_row_store(ObDTLIntermResultInfo *&chunk_row_
|
||||
uint64_t tenant_id = ctx_.get_my_session()->get_effective_tenant_id();
|
||||
ObMemAttr mem_attr(tenant_id, "TempTableInsert", ObCtxIds::WORK_AREA);
|
||||
dtl::ObDTLIntermResultInfoGuard result_info_guard;
|
||||
if (OB_FAIL(dtl::ObDTLIntermResultManager::getInstance().create_interm_result_info(
|
||||
if (OB_FAIL(MTL(dtl::ObDTLIntermResultManager*)->create_interm_result_info(
|
||||
mem_attr,
|
||||
result_info_guard,
|
||||
dtl::ObDTLIntermResultMonitorInfo(
|
||||
@ -305,11 +305,11 @@ int ObTempTableInsertOp::insert_chunk_row_store()
|
||||
if (OB_ISNULL(phy_plan_ctx = GET_PHY_PLAN_CTX(ctx_))) {
|
||||
ret = OB_ERR_UNEXPECTED;
|
||||
LOG_WARN("phy_plan_ctx is NULL", K(ret));
|
||||
} else if (!MY_SPEC.is_distributed_ &&
|
||||
} else if (!MY_SPEC.is_distributed_ &&
|
||||
all_datum_store_.empty() &&
|
||||
OB_FAIL(init_chunk_row_store(chunk_row_store))) {
|
||||
//local temp table需要一个空的row store占位
|
||||
LOG_WARN("failed to init chunk row store", K(ret));
|
||||
LOG_WARN("failed to init chunk row store", K(ret));
|
||||
} else if (!MY_SPEC.is_distributed_ && all_datum_store_.count() != 1) {
|
||||
ret = OB_ERR_UNEXPECTED;
|
||||
LOG_WARN("local temp table shoud have one chunk row store", K(ret));
|
||||
@ -334,12 +334,12 @@ int ObTempTableInsertOp::insert_chunk_row_store()
|
||||
row_store->set_eof(true);
|
||||
//chunk row store不需要管理dump逻辑
|
||||
row_store->is_read_ = true;
|
||||
if (OB_FAIL(dtl::ObDTLIntermResultManager::getInstance().insert_interm_result_info(
|
||||
if (OB_FAIL(MTL(dtl::ObDTLIntermResultManager*)->insert_interm_result_info(
|
||||
dtl_int_key, row_store))) {
|
||||
LOG_WARN("failed to insert row store.", K(ret), K(dtl_int_key.channel_id_));
|
||||
} else if (OB_FAIL(keys_insert.push_back(dtl_int_key))) {
|
||||
LOG_WARN("failed to push back key", K(ret));
|
||||
dtl::ObDTLIntermResultManager::getInstance().erase_interm_result_info(dtl_int_key);
|
||||
MTL(dtl::ObDTLIntermResultManager*)->erase_interm_result_info(dtl_int_key);
|
||||
} else {
|
||||
row_store->datum_store_->reset_callback();
|
||||
ObPxSqcHandler *handler = ctx_.get_sqc_handler();
|
||||
@ -361,7 +361,7 @@ int ObTempTableInsertOp::insert_chunk_row_store()
|
||||
if (OB_FAIL(ret)) {
|
||||
//异常处理
|
||||
for (int64_t i = 0; i < keys_insert.count(); ++i) {
|
||||
dtl::ObDTLIntermResultManager::getInstance().erase_interm_result_info(keys_insert.at(i));
|
||||
MTL(dtl::ObDTLIntermResultManager*)->erase_interm_result_info(keys_insert.at(i));
|
||||
}
|
||||
} else {
|
||||
clear_all_datum_store();
|
||||
|
||||
@ -240,7 +240,7 @@ int ObTempTableTransformationOp::destory_local_interm_results(ObIArray<uint64_t>
|
||||
LOG_TRACE("destory interm results", K(get_exec_ctx().get_addr()), K(result_ids));
|
||||
for (int64_t i = 0; OB_SUCC(ret) && i < result_ids.count(); ++i) {
|
||||
dtl_int_key.channel_id_ = result_ids.at(i);
|
||||
if (OB_FAIL(dtl::ObDTLIntermResultManager::getInstance().erase_interm_result_info(
|
||||
if (OB_FAIL(MTL(dtl::ObDTLIntermResultManager*)->erase_interm_result_info(
|
||||
dtl_int_key))) {
|
||||
if (OB_HASH_NOT_EXIST == ret) {
|
||||
ret = OB_SUCCESS;
|
||||
|
||||
@ -382,7 +382,7 @@ int ObPxReceiveOp::inner_rescan()
|
||||
channel->reset_state();
|
||||
channel->set_batch_id(ctx_.get_px_batch_id());
|
||||
channel->reset_px_row_iterator();
|
||||
release_channel_ret = ObDTLIntermResultManager::getInstance().erase_interm_result_info(key);
|
||||
release_channel_ret = MTL(ObDTLIntermResultManager*)->erase_interm_result_info(key);
|
||||
if (release_channel_ret != common::OB_SUCCESS) {
|
||||
LOG_WARN("fail to release recieve internal result", KR(release_channel_ret), K(ret));
|
||||
}
|
||||
@ -541,7 +541,7 @@ int ObPxReceiveOp::erase_dtl_interm_result()
|
||||
for (int64_t batch_id = ctx_.get_px_batch_id();
|
||||
batch_id < PX_RESCAN_BATCH_ROW_COUNT && OB_SUCC(ret); batch_id++) {
|
||||
key.batch_id_ = batch_id;
|
||||
if (OB_FAIL(ObDTLIntermResultManager::getInstance().erase_interm_result_info(key))) {
|
||||
if (OB_FAIL(MTL(ObDTLIntermResultManager*)->erase_interm_result_info(key))) {
|
||||
if (OB_HASH_NOT_EXIST == ret) {
|
||||
ret = OB_SUCCESS;
|
||||
break;
|
||||
|
||||
@ -1121,7 +1121,7 @@ int ObPxCoordOp::erase_dtl_interm_result()
|
||||
key.channel_id_ = ci.chid_;
|
||||
for (int j = 0; j < last_px_batch_rescan_size_; ++j) {
|
||||
key.batch_id_ = j;
|
||||
if (OB_FAIL(ObDTLIntermResultManager::getInstance().erase_interm_result_info(key))) {
|
||||
if (OB_FAIL(MTL(ObDTLIntermResultManager*)->erase_interm_result_info(key))) {
|
||||
LOG_TRACE("fail to release recieve internal result", K(ret));
|
||||
}
|
||||
}
|
||||
|
||||
@ -660,7 +660,7 @@ int ObPxCleanDtlIntermResP::process()
|
||||
key.channel_id_ = ch_set.get_ch_info_set().at(ch_idx).chid_;
|
||||
for (int64_t batch_id = 0; batch_id < batch_size && OB_SUCC(ret); batch_id++) {
|
||||
key.batch_id_= batch_id;
|
||||
if (OB_FAIL(dtl::ObDTLIntermResultManager::getInstance().erase_interm_result_info(key))) {
|
||||
if (OB_FAIL(MTL(dtl::ObDTLIntermResultManager*)->erase_interm_result_info(key))) {
|
||||
if (OB_HASH_NOT_EXIST == ret) {
|
||||
// interm result is written from batch_id = 0 to batch_size,
|
||||
// if some errors happen when batch_id = i, no interm result of batch_id > i will be written.
|
||||
|
||||
Reference in New Issue
Block a user