fix tenant_role user primary as default
This commit is contained in:
parent
03bb761572
commit
ceaef5c7da
@ -803,7 +803,7 @@ int ObGCHandler::try_check_and_set_wait_gc_when_log_archive_is_off_(
|
|||||||
ls_status = ObGarbageCollector::LSStatus::LS_NEED_DELETE_ENTRY;
|
ls_status = ObGarbageCollector::LSStatus::LS_NEED_DELETE_ENTRY;
|
||||||
CLOG_LOG(INFO, "Tenant is dropped and the log stream can be removed, try_check_and_set_wait_gc_ success",
|
CLOG_LOG(INFO, "Tenant is dropped and the log stream can be removed, try_check_and_set_wait_gc_ success",
|
||||||
K(tenant_id), K(ls_id), K(gc_state), K(offline_scn), K(readable_scn));
|
K(tenant_id), K(ls_id), K(gc_state), K(offline_scn), K(readable_scn));
|
||||||
} else if (offline_scn.is_valid() && MTL_GET_TENANT_ROLE() == share::ObTenantRole::RESTORE_TENANT) {
|
} else if (offline_scn.is_valid() && MTL_GET_TENANT_ROLE_CACHE() == share::ObTenantRole::RESTORE_TENANT) {
|
||||||
// restore tenant, not need gc delay
|
// restore tenant, not need gc delay
|
||||||
if (OB_FAIL(ls_->set_gc_state(LSGCState::WAIT_GC))) {
|
if (OB_FAIL(ls_->set_gc_state(LSGCState::WAIT_GC))) {
|
||||||
CLOG_LOG(WARN, "set_gc_state failed", K(ls_id), K(gc_state), K(ret));
|
CLOG_LOG(WARN, "set_gc_state failed", K(ls_id), K(gc_state), K(ret));
|
||||||
|
@ -80,7 +80,7 @@ int ObLogRestoreScheduler::modify_thread_count_(const share::ObLogRestoreSourceT
|
|||||||
int64_t restore_concurrency = 0;
|
int64_t restore_concurrency = 0;
|
||||||
// for primary tenant, set restore_concurrency to 1.
|
// for primary tenant, set restore_concurrency to 1.
|
||||||
// otherwise, set restore_concurrency to tenant config.
|
// otherwise, set restore_concurrency to tenant config.
|
||||||
if (MTL_GET_TENANT_ROLE() == share::ObTenantRole::PRIMARY_TENANT
|
if (MTL_GET_TENANT_ROLE_CACHE() == share::ObTenantRole::PRIMARY_TENANT
|
||||||
|| !share::is_location_log_source_type(source_type)) {
|
|| !share::is_location_log_source_type(source_type)) {
|
||||||
restore_concurrency = 1;
|
restore_concurrency = 1;
|
||||||
} else {
|
} else {
|
||||||
|
@ -167,7 +167,7 @@ void ObLogRestoreService::run1()
|
|||||||
} else {
|
} else {
|
||||||
while (! has_set_stop()) {
|
while (! has_set_stop()) {
|
||||||
int64_t begin_stamp = ObTimeUtility::current_time();
|
int64_t begin_stamp = ObTimeUtility::current_time();
|
||||||
const bool is_primary = MTL_GET_TENANT_ROLE() == share::ObTenantRole::PRIMARY_TENANT;
|
const bool is_primary = MTL_GET_TENANT_ROLE_CACHE() == share::ObTenantRole::PRIMARY_TENANT;
|
||||||
const int64_t thread_interval = is_primary ? PRIMARY_THREAD_RUN_INTERVAL : STANDBY_THREAD_RUN_INTERVAL;
|
const int64_t thread_interval = is_primary ? PRIMARY_THREAD_RUN_INTERVAL : STANDBY_THREAD_RUN_INTERVAL;
|
||||||
do_thread_task_();
|
do_thread_task_();
|
||||||
int64_t end_tstamp = ObTimeUtility::fast_current_time();
|
int64_t end_tstamp = ObTimeUtility::fast_current_time();
|
||||||
|
@ -102,7 +102,7 @@ int ObRemoteLocationAdaptor::update_upstream(share::ObLogRestoreSourceItem &sour
|
|||||||
|
|
||||||
bool ObRemoteLocationAdaptor::is_tenant_primary_()
|
bool ObRemoteLocationAdaptor::is_tenant_primary_()
|
||||||
{
|
{
|
||||||
return MTL_GET_TENANT_ROLE() == share::ObTenantRole::PRIMARY_TENANT;
|
return MTL_GET_TENANT_ROLE_CACHE() == share::ObTenantRole::PRIMARY_TENANT;
|
||||||
}
|
}
|
||||||
|
|
||||||
int ObRemoteLocationAdaptor::do_update_(const bool is_add_source, const share::ObLogRestoreSourceItem &item)
|
int ObRemoteLocationAdaptor::do_update_(const bool is_add_source, const share::ObLogRestoreSourceItem &item)
|
||||||
|
@ -69,7 +69,7 @@ int ObAllVirtualTimestampService::get_next_tenant_id_info_()
|
|||||||
tenant_ids_index_++;
|
tenant_ids_index_++;
|
||||||
} else {
|
} else {
|
||||||
MTL(ObTimestampAccess *)->get_virtual_info(ts_value_, service_role_, role_, service_epoch_);
|
MTL(ObTimestampAccess *)->get_virtual_info(ts_value_, service_role_, role_, service_epoch_);
|
||||||
is_primary_ = MTL_IS_PRIMARY_TENANT();
|
is_primary_ = MTL_TENANT_ROLE_CACHE_IS_PRIMARY_OR_INVALID();
|
||||||
tenant_ids_index_++;
|
tenant_ids_index_++;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -264,7 +264,7 @@ int ObLSRecoveryReportor::update_ls_recovery_stat_()
|
|||||||
LOG_WARN("failed to update_ls_replayable_point", KR(tmp_ret), KPC(ls), K(replayable_scn));
|
LOG_WARN("failed to update_ls_replayable_point", KR(tmp_ret), KPC(ls), K(replayable_scn));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ls->is_sys_ls() && !MTL_IS_PRIMARY_TENANT()) {
|
if (ls->is_sys_ls() && !MTL_TENANT_ROLE_CACHE_IS_PRIMARY()) {
|
||||||
// nothing todo
|
// nothing todo
|
||||||
// sys ls of user standby/restore tenant is in ls_recovery
|
// sys ls of user standby/restore tenant is in ls_recovery
|
||||||
} else if (OB_FAIL(update_ls_recovery(ls, sql_proxy_))) {
|
} else if (OB_FAIL(update_ls_recovery(ls, sql_proxy_))) {
|
||||||
|
@ -96,6 +96,8 @@ int ObTenantInfoLoader::start()
|
|||||||
ret = OB_NOT_INIT;
|
ret = OB_NOT_INIT;
|
||||||
LOG_WARN("not init", KR(ret));
|
LOG_WARN("not init", KR(ret));
|
||||||
} else if (!is_user_tenant(tenant_id_)) {
|
} else if (!is_user_tenant(tenant_id_)) {
|
||||||
|
//meta and sys tenant is primary
|
||||||
|
MTL_SET_TENANT_ROLE_CACHE(ObTenantRole::PRIMARY_TENANT);
|
||||||
LOG_INFO("not user tenant no need load", K(tenant_id_));
|
LOG_INFO("not user tenant no need load", K(tenant_id_));
|
||||||
} else if (OB_FAIL(logical_start())) {
|
} else if (OB_FAIL(logical_start())) {
|
||||||
LOG_WARN("failed to start", KR(ret));
|
LOG_WARN("failed to start", KR(ret));
|
||||||
@ -592,7 +594,7 @@ int ObAllTenantInfoCache::refresh_tenant_info(const uint64_t tenant_id,
|
|||||||
SpinWLockGuard guard(lock_);
|
SpinWLockGuard guard(lock_);
|
||||||
if (ora_rowscn >= ora_rowscn_) {
|
if (ora_rowscn >= ora_rowscn_) {
|
||||||
if (ora_rowscn > ora_rowscn_) {
|
if (ora_rowscn > ora_rowscn_) {
|
||||||
MTL_SET_TENANT_ROLE(new_tenant_info.get_tenant_role().value());
|
MTL_SET_TENANT_ROLE_CACHE(new_tenant_info.get_tenant_role().value());
|
||||||
(void)tenant_info_.assign(new_tenant_info);
|
(void)tenant_info_.assign(new_tenant_info);
|
||||||
ora_rowscn_ = ora_rowscn;
|
ora_rowscn_ = ora_rowscn;
|
||||||
content_changed = true;
|
content_changed = true;
|
||||||
@ -631,7 +633,7 @@ int ObAllTenantInfoCache::update_tenant_info_cache(
|
|||||||
ret = OB_EAGAIN;
|
ret = OB_EAGAIN;
|
||||||
LOG_WARN("my tenant_info is invalid, don't refresh", KR(ret), K_(tenant_info), K_(ora_rowscn));
|
LOG_WARN("my tenant_info is invalid, don't refresh", KR(ret), K_(tenant_info), K_(ora_rowscn));
|
||||||
} else if (new_ora_rowscn > ora_rowscn_) {
|
} else if (new_ora_rowscn > ora_rowscn_) {
|
||||||
MTL_SET_TENANT_ROLE(new_tenant_info.get_tenant_role().value());
|
MTL_SET_TENANT_ROLE_CACHE(new_tenant_info.get_tenant_role().value());
|
||||||
(void)tenant_info_.assign(new_tenant_info);
|
(void)tenant_info_.assign(new_tenant_info);
|
||||||
ora_rowscn_ = new_ora_rowscn;
|
ora_rowscn_ = new_ora_rowscn;
|
||||||
refreshed = true;
|
refreshed = true;
|
||||||
|
@ -304,7 +304,7 @@ int ObTenantThreadHelper::check_can_do_recovery_(const uint64_t tenant_id)
|
|||||||
LOG_WARN("only user tenant need check recovery", KR(ret), K(tenant_id));
|
LOG_WARN("only user tenant need check recovery", KR(ret), K(tenant_id));
|
||||||
} else {
|
} else {
|
||||||
MTL_SWITCH(tenant_id) {
|
MTL_SWITCH(tenant_id) {
|
||||||
share::ObTenantRole::Role tenant_role = MTL_GET_TENANT_ROLE();
|
share::ObTenantRole::Role tenant_role = MTL_GET_TENANT_ROLE_CACHE();
|
||||||
if (is_primary_tenant(tenant_role) || is_standby_tenant(tenant_role)) {
|
if (is_primary_tenant(tenant_role) || is_standby_tenant(tenant_role)) {
|
||||||
} else if (is_restore_tenant(tenant_role)) {
|
} else if (is_restore_tenant(tenant_role)) {
|
||||||
//need to check success to create init ls
|
//need to check success to create init ls
|
||||||
@ -324,6 +324,9 @@ int ObTenantThreadHelper::check_can_do_recovery_(const uint64_t tenant_id)
|
|||||||
ret = OB_NEED_WAIT;
|
ret = OB_NEED_WAIT;
|
||||||
LOG_WARN("restore tenant not valid to recovery", KR(ret), K(job_info));
|
LOG_WARN("restore tenant not valid to recovery", KR(ret), K(job_info));
|
||||||
}
|
}
|
||||||
|
} else if (is_invalid_tenant(tenant_role)) {
|
||||||
|
ret = OB_NEED_WAIT;
|
||||||
|
LOG_WARN("tenant role not ready, need wait", KR(ret), K(tenant_role));
|
||||||
} else {
|
} else {
|
||||||
ret = OB_ERR_UNEXPECTED;
|
ret = OB_ERR_UNEXPECTED;
|
||||||
LOG_WARN("unexpected tenant role", KR(ret), K(tenant_role));
|
LOG_WARN("unexpected tenant role", KR(ret), K(tenant_role));
|
||||||
|
@ -929,7 +929,7 @@ int ObDDLUtil::ddl_get_tablet(
|
|||||||
|
|
||||||
bool ObDDLUtil::need_remote_write(const int ret_code)
|
bool ObDDLUtil::need_remote_write(const int ret_code)
|
||||||
{
|
{
|
||||||
return ObTenantRole::PRIMARY_TENANT == MTL_GET_TENANT_ROLE()
|
return ObTenantRole::PRIMARY_TENANT == MTL_GET_TENANT_ROLE_CACHE()
|
||||||
&& (OB_NOT_MASTER == ret_code
|
&& (OB_NOT_MASTER == ret_code
|
||||||
|| OB_NOT_RUNNING == ret_code
|
|| OB_NOT_RUNNING == ret_code
|
||||||
|| OB_LS_LOCATION_LEADER_NOT_EXIST == ret_code
|
|| OB_LS_LOCATION_LEADER_NOT_EXIST == ret_code
|
||||||
|
@ -50,7 +50,7 @@ ObTenantBase::ObTenantBase(const uint64_t id, bool enable_tenant_ctx_check)
|
|||||||
inited_(false),
|
inited_(false),
|
||||||
created_(false),
|
created_(false),
|
||||||
mtl_init_ctx_(nullptr),
|
mtl_init_ctx_(nullptr),
|
||||||
tenant_role_value_(share::ObTenantRole::Role::PRIMARY_TENANT),
|
tenant_role_value_(share::ObTenantRole::Role::INVALID_TENANT),
|
||||||
unit_max_cpu_(0),
|
unit_max_cpu_(0),
|
||||||
unit_min_cpu_(0),
|
unit_min_cpu_(0),
|
||||||
unit_memory_size_(0),
|
unit_memory_size_(0),
|
||||||
|
@ -323,13 +323,17 @@ using ObTableScanIteratorObjPool = common::ObServerObjectPool<oceanbase::storage
|
|||||||
// 获取租户ID
|
// 获取租户ID
|
||||||
#define MTL_ID() share::ObTenantEnv::get_tenant_local()->id()
|
#define MTL_ID() share::ObTenantEnv::get_tenant_local()->id()
|
||||||
// 获取是否为主租户
|
// 获取是否为主租户
|
||||||
#define MTL_IS_PRIMARY_TENANT() share::ObTenantEnv::get_tenant()->is_primary_tenant()
|
#define MTL_TENANT_ROLE_CACHE_IS_PRIMARY() share::ObTenantEnv::get_tenant()->is_primary_tenant()
|
||||||
|
//由于之前租户默认为主库,兼容性写法
|
||||||
|
#define MTL_TENANT_ROLE_CACHE_IS_PRIMARY_OR_INVALID() share::ObTenantEnv::get_tenant()->is_primary_or_invalid_tenant()
|
||||||
|
//租户角色为初始化成功,未invalid
|
||||||
|
#define MTL_TENANT_ROLE_CACHE_IS_INVALID() share::ObTenantEnv::get_tenant()->is_invalid_tenant()
|
||||||
// 租户是否处于恢复中
|
// 租户是否处于恢复中
|
||||||
#define MTL_IS_RESTORE_TENANT() share::ObTenantEnv::get_tenant()->is_restore_tenant()
|
#define MTL_TENANT_ROLE_CACHE_IS_RESTORE() share::ObTenantEnv::get_tenant()->is_restore_tenant()
|
||||||
// 更新租户role
|
// 更新租户role
|
||||||
#define MTL_SET_TENANT_ROLE(tenant_role) share::ObTenantEnv::get_tenant()->set_tenant_role(tenant_role)
|
#define MTL_SET_TENANT_ROLE_CACHE(tenant_role) share::ObTenantEnv::get_tenant()->set_tenant_role(tenant_role)
|
||||||
// 获取租户role
|
// 获取租户role
|
||||||
#define MTL_GET_TENANT_ROLE() share::ObTenantEnv::get_tenant()->get_tenant_role()
|
#define MTL_GET_TENANT_ROLE_CACHE() share::ObTenantEnv::get_tenant()->get_tenant_role()
|
||||||
// 获取租户模块
|
// 获取租户模块
|
||||||
#define MTL_CTX() (share::ObTenantEnv::get_tenant())
|
#define MTL_CTX() (share::ObTenantEnv::get_tenant())
|
||||||
// 获取租户初始化参数,仅在初始化时使用
|
// 获取租户初始化参数,仅在初始化时使用
|
||||||
@ -514,11 +518,23 @@ public:
|
|||||||
return share::is_primary_tenant(ATOMIC_LOAD(&tenant_role_value_));
|
return share::is_primary_tenant(ATOMIC_LOAD(&tenant_role_value_));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool is_primary_or_invalid_tenant()
|
||||||
|
{
|
||||||
|
share::ObTenantRole::Role tenant_role = get_tenant_role();
|
||||||
|
return share::is_primary_tenant(tenant_role)
|
||||||
|
or share::is_invalid_tenant(tenant_role);
|
||||||
|
}
|
||||||
|
|
||||||
bool is_restore_tenant()
|
bool is_restore_tenant()
|
||||||
{
|
{
|
||||||
return share::is_restore_tenant(ATOMIC_LOAD(&tenant_role_value_));
|
return share::is_restore_tenant(ATOMIC_LOAD(&tenant_role_value_));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool is_invalid_tenant()
|
||||||
|
{
|
||||||
|
return share::is_invalid_tenant(ATOMIC_LOAD(&tenant_role_value_));
|
||||||
|
}
|
||||||
|
|
||||||
template<class T>
|
template<class T>
|
||||||
T get() { return inner_get(Identity<T>()); }
|
T get() { return inner_get(Identity<T>()); }
|
||||||
|
|
||||||
|
@ -1352,9 +1352,9 @@ int ObTableLocation::get_is_weak_read(const ObDMLStmt &dml_stmt,
|
|||||||
dml_stmt.get_query_ctx()->is_contain_select_for_update_ ||
|
dml_stmt.get_query_ctx()->is_contain_select_for_update_ ||
|
||||||
dml_stmt.get_query_ctx()->is_contain_inner_table_) {
|
dml_stmt.get_query_ctx()->is_contain_inner_table_) {
|
||||||
is_weak_read = false;
|
is_weak_read = false;
|
||||||
} else if (share::ObTenantEnv::get_tenant() == nullptr) { //table api can't invoke MTL_IS_PRIMARY_TENANT
|
} else if (share::ObTenantEnv::get_tenant() == nullptr) { //table api can't invoke MTL_TENANT_ROLE_CACHE_IS_PRIMARY_OR_INVALID
|
||||||
is_weak_read = false;
|
is_weak_read = false;
|
||||||
} else if (!MTL_IS_PRIMARY_TENANT()) {
|
} else if (!MTL_TENANT_ROLE_CACHE_IS_PRIMARY_OR_INVALID()) {
|
||||||
is_weak_read = true;
|
is_weak_read = true;
|
||||||
} else {
|
} else {
|
||||||
ObConsistencyLevel consistency_level = INVALID_CONSISTENCY;
|
ObConsistencyLevel consistency_level = INVALID_CONSISTENCY;
|
||||||
|
@ -1163,7 +1163,7 @@ int ObResolver::resolve(IsPrepared if_prepared, const ParseNode &parse_tree, ObS
|
|||||||
|
|
||||||
if (OB_SUCC(ret)) {
|
if (OB_SUCC(ret)) {
|
||||||
if (ObStmt::is_write_stmt(stmt->get_stmt_type(), stmt->has_global_variable())
|
if (ObStmt::is_write_stmt(stmt->get_stmt_type(), stmt->has_global_variable())
|
||||||
&& !MTL_IS_PRIMARY_TENANT()) {
|
&& !MTL_TENANT_ROLE_CACHE_IS_PRIMARY_OR_INVALID()) {
|
||||||
ret = OB_STANDBY_READ_ONLY;
|
ret = OB_STANDBY_READ_ONLY;
|
||||||
TRANS_LOG(WARN, "standby tenant support read only", K(ret), K(stmt));
|
TRANS_LOG(WARN, "standby tenant support read only", K(ret), K(stmt));
|
||||||
}
|
}
|
||||||
|
@ -1118,7 +1118,7 @@ int ObMediumCompactionScheduleFunc::schedule_tablet_medium_merge(
|
|||||||
LOG_WARN("failed to load medium info list", K(ret), K(tablet));
|
LOG_WARN("failed to load medium info list", K(ret), K(tablet));
|
||||||
} else if (ObMediumCompactionInfo::MAJOR_COMPACTION == medium_list->get_last_compaction_type()
|
} else if (ObMediumCompactionInfo::MAJOR_COMPACTION == medium_list->get_last_compaction_type()
|
||||||
&& inner_table_merged_version < medium_list->get_last_compaction_scn()
|
&& inner_table_merged_version < medium_list->get_last_compaction_scn()
|
||||||
&& !MTL_IS_PRIMARY_TENANT()) { // for STANDBY/RESTORE TENANT
|
&& !MTL_TENANT_ROLE_CACHE_IS_PRIMARY_OR_INVALID()) { // for STANDBY/RESTORE TENANT
|
||||||
ObTabletCompactionScnInfo ret_info;
|
ObTabletCompactionScnInfo ret_info;
|
||||||
// for standby/restore tenant, need select inner_table to check RS status before schedule new round
|
// for standby/restore tenant, need select inner_table to check RS status before schedule new round
|
||||||
if (!scheduler_called) { // should not visit inner table, wait for scheduler loop
|
if (!scheduler_called) { // should not visit inner table, wait for scheduler loop
|
||||||
|
@ -1338,7 +1338,7 @@ int ObDDLSSTableRedoWriter::write_redo_log(const ObDDLMacroBlockRedoInfo &redo_i
|
|||||||
LOG_WARN("fail to switch to remote write", K(ret));
|
LOG_WARN("fail to switch to remote write", K(ret));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
LOG_WARN("fail to write ddl redo clog", K(ret), K(MTL_GET_TENANT_ROLE()));
|
LOG_WARN("fail to write ddl redo clog", K(ret), K(MTL_GET_TENANT_ROLE_CACHE()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1114,7 +1114,7 @@ int ObStartCompleteMigrationTask::wait_log_replay_sync_()
|
|||||||
SCN last_replay_scn;
|
SCN last_replay_scn;
|
||||||
bool need_wait = false;
|
bool need_wait = false;
|
||||||
bool is_done = false;
|
bool is_done = false;
|
||||||
const bool is_primay_tenant = MTL_IS_PRIMARY_TENANT();
|
const bool is_primay_tenant = MTL_TENANT_ROLE_CACHE_IS_PRIMARY_OR_INVALID();
|
||||||
share::SCN readable_scn;
|
share::SCN readable_scn;
|
||||||
ObTimeoutCtx timeout_ctx;
|
ObTimeoutCtx timeout_ctx;
|
||||||
int64_t timeout = 10_min;
|
int64_t timeout = 10_min;
|
||||||
|
@ -206,7 +206,7 @@ int ObLSMemberListService::get_max_tablet_transfer_scn(share::SCN &transfer_scn)
|
|||||||
STORAGE_LOG(INFO, "committed tablet_status does not exist", K(ret), K(key));
|
STORAGE_LOG(INFO, "committed tablet_status does not exist", K(ret), K(key));
|
||||||
ret = OB_SUCCESS;
|
ret = OB_SUCCESS;
|
||||||
} else if (OB_ERR_SHARED_LOCK_CONFLICT == ret) {
|
} else if (OB_ERR_SHARED_LOCK_CONFLICT == ret) {
|
||||||
if (MTL_IS_PRIMARY_TENANT()) {
|
if (MTL_TENANT_ROLE_CACHE_IS_PRIMARY_OR_INVALID()) {
|
||||||
STORAGE_LOG(INFO, "committed tablet_status does not exist", K(ret), K(tablet_id));
|
STORAGE_LOG(INFO, "committed tablet_status does not exist", K(ret), K(tablet_id));
|
||||||
break;
|
break;
|
||||||
} else {
|
} else {
|
||||||
@ -325,11 +325,14 @@ int ObLSMemberListService::get_ls_member_list_(common::ObIArray<common::ObAddr>
|
|||||||
int ObLSMemberListService::check_ls_transfer_scn_validity_(palf::LogConfigVersion &leader_config_version)
|
int ObLSMemberListService::check_ls_transfer_scn_validity_(palf::LogConfigVersion &leader_config_version)
|
||||||
{
|
{
|
||||||
int ret = OB_SUCCESS;
|
int ret = OB_SUCCESS;
|
||||||
if (MTL_IS_PRIMARY_TENANT()) {
|
if (MTL_TENANT_ROLE_CACHE_IS_INVALID()) {
|
||||||
|
ret = OB_NEED_RETRY;
|
||||||
|
STORAGE_LOG(WARN, "tenant role is invalid, need retry", KR(ret));
|
||||||
|
} else if (MTL_TENANT_ROLE_CACHE_IS_PRIMARY()) {
|
||||||
if (OB_FAIL(check_ls_transfer_scn_validity_for_primary_(leader_config_version))) {
|
if (OB_FAIL(check_ls_transfer_scn_validity_for_primary_(leader_config_version))) {
|
||||||
STORAGE_LOG(WARN, "failed to check ls transfer scn validity for primary", K(ret), KP_(ls));
|
STORAGE_LOG(WARN, "failed to check ls transfer scn validity for primary", K(ret), KP_(ls));
|
||||||
}
|
}
|
||||||
} else {
|
} else {//standby restore
|
||||||
if (OB_FAIL(check_ls_transfer_scn_validity_for_standby_(leader_config_version))) {
|
if (OB_FAIL(check_ls_transfer_scn_validity_for_standby_(leader_config_version))) {
|
||||||
STORAGE_LOG(WARN, "failed to check ls transfer scn validity for standby", K(ret), KP_(ls));
|
STORAGE_LOG(WARN, "failed to check ls transfer scn validity for standby", K(ret), KP_(ls));
|
||||||
}
|
}
|
||||||
|
@ -254,7 +254,10 @@ int ObStorageHAUtils::check_transfer_ls_can_rebuild(
|
|||||||
if (!replay_scn.is_valid()) {
|
if (!replay_scn.is_valid()) {
|
||||||
ret = OB_INVALID_ARGUMENT;
|
ret = OB_INVALID_ARGUMENT;
|
||||||
LOG_WARN("argument invalid", K(ret), K(replay_scn));
|
LOG_WARN("argument invalid", K(ret), K(replay_scn));
|
||||||
} else if (MTL_IS_PRIMARY_TENANT()) {
|
} else if (MTL_TENANT_ROLE_CACHE_IS_INVALID()) {
|
||||||
|
ret = OB_NEED_RETRY;
|
||||||
|
LOG_WARN("tenant role is invalid, need retry", KR(ret), K(replay_scn));
|
||||||
|
} else if (MTL_TENANT_ROLE_CACHE_IS_PRIMARY()) {
|
||||||
need_rebuild = true;
|
need_rebuild = true;
|
||||||
} else if (OB_FAIL(get_readable_scn_(readable_scn))) {
|
} else if (OB_FAIL(get_readable_scn_(readable_scn))) {
|
||||||
LOG_WARN("failed to get readable scn", K(ret), K(replay_scn));
|
LOG_WARN("failed to get readable scn", K(ret), K(replay_scn));
|
||||||
|
@ -1328,7 +1328,7 @@ int ObLS::get_ls_info(ObLSVTInfo &ls_info)
|
|||||||
} else {
|
} else {
|
||||||
// The readable point of the primary tenant is weak read ts,
|
// The readable point of the primary tenant is weak read ts,
|
||||||
// and the readable point of the standby tenant is readable scn
|
// and the readable point of the standby tenant is readable scn
|
||||||
if (MTL_IS_PRIMARY_TENANT()) {
|
if (MTL_TENANT_ROLE_CACHE_IS_PRIMARY_OR_INVALID()) {
|
||||||
ls_info.weak_read_scn_ = ls_wrs_handler_.get_ls_weak_read_ts();
|
ls_info.weak_read_scn_ = ls_wrs_handler_.get_ls_weak_read_ts();
|
||||||
} else if (OB_FAIL(get_ls_replica_readable_scn(ls_info.weak_read_scn_))) {
|
} else if (OB_FAIL(get_ls_replica_readable_scn(ls_info.weak_read_scn_))) {
|
||||||
TRANS_LOG(WARN, "get ls replica readable scn fail", K(ret), KPC(this));
|
TRANS_LOG(WARN, "get ls replica readable scn fail", K(ret), KPC(this));
|
||||||
|
@ -58,7 +58,7 @@ void ObTimestampAccess::get_virtual_info(int64_t &ts_value,
|
|||||||
int64_t &proposal_id)
|
int64_t &proposal_id)
|
||||||
{
|
{
|
||||||
service_type = service_type_;
|
service_type = service_type_;
|
||||||
if (MTL_IS_PRIMARY_TENANT()) {
|
if (MTL_TENANT_ROLE_CACHE_IS_PRIMARY_OR_INVALID()) {
|
||||||
MTL(ObTimestampService *)->get_virtual_info(ts_value, role, proposal_id);
|
MTL(ObTimestampService *)->get_virtual_info(ts_value, role, proposal_id);
|
||||||
} else {
|
} else {
|
||||||
MTL(ObStandbyTimestampService *)->get_virtual_info(ts_value, role, proposal_id);
|
MTL(ObStandbyTimestampService *)->get_virtual_info(ts_value, role, proposal_id);
|
||||||
|
@ -815,7 +815,7 @@ public:
|
|||||||
} else if (OB_FAIL(tx_stat_iter_.push(tx_stat))) {
|
} else if (OB_FAIL(tx_stat_iter_.push(tx_stat))) {
|
||||||
TRANS_LOG_RET(WARN, ret, "ObTxStatIterator push trans stat error", K(ret));
|
TRANS_LOG_RET(WARN, ret, "ObTxStatIterator push trans stat error", K(ret));
|
||||||
} else if (!tx_stat.xid_.empty() && tx_stat.coord_ == tx_stat.ls_id_ && (int64_t)ObTxState::REDO_COMPLETE == tx_stat.state_
|
} else if (!tx_stat.xid_.empty() && tx_stat.coord_ == tx_stat.ls_id_ && (int64_t)ObTxState::REDO_COMPLETE == tx_stat.state_
|
||||||
&& (!MTL_IS_PRIMARY_TENANT() || (TxCtxRoleState::LEADER == tx_stat.role_state_
|
&& (!MTL_TENANT_ROLE_CACHE_IS_PRIMARY_OR_INVALID() || (TxCtxRoleState::LEADER == tx_stat.role_state_
|
||||||
&& tx_stat.last_request_ts_ < ObClockGenerator::getClock() - INSERT_INTERNAL_FOR_PRIMARY))) {
|
&& tx_stat.last_request_ts_ < ObClockGenerator::getClock() - INSERT_INTERNAL_FOR_PRIMARY))) {
|
||||||
(void)MTL(ObXAService *)->insert_record_for_standby(tx_stat.tenant_id_,
|
(void)MTL(ObXAService *)->insert_record_for_standby(tx_stat.tenant_id_,
|
||||||
tx_stat.xid_,
|
tx_stat.xid_,
|
||||||
@ -1367,7 +1367,7 @@ public:
|
|||||||
} else if (!tx_ctx->is_inited()) {
|
} else if (!tx_ctx->is_inited()) {
|
||||||
// not inited, don't need to traverse
|
// not inited, don't need to traverse
|
||||||
} else if (tx_ctx->is_xa_trans() && tx_ctx->is_root() && ObTxState::REDO_COMPLETE == tx_ctx->exec_info_.state_
|
} else if (tx_ctx->is_xa_trans() && tx_ctx->is_root() && ObTxState::REDO_COMPLETE == tx_ctx->exec_info_.state_
|
||||||
&& (!MTL_IS_PRIMARY_TENANT() || TxCtxRoleState::LEADER == tx_ctx->role_state_)) {
|
&& (!MTL_TENANT_ROLE_CACHE_IS_PRIMARY_OR_INVALID() || TxCtxRoleState::LEADER == tx_ctx->role_state_)) {
|
||||||
ret = MTL(ObXAService *)->insert_record_for_standby(tx_ctx->tenant_id_, tx_ctx->exec_info_.xid_, tx_id,
|
ret = MTL(ObXAService *)->insert_record_for_standby(tx_ctx->tenant_id_, tx_ctx->exec_info_.xid_, tx_id,
|
||||||
tx_ctx->ls_id_, tx_ctx->exec_info_.scheduler_);
|
tx_ctx->ls_id_, tx_ctx->exec_info_.scheduler_);
|
||||||
}
|
}
|
||||||
|
@ -1000,7 +1000,7 @@ int ObTransService::get_read_store_ctx(const ObTxReadSnapshot &snapshot,
|
|||||||
if (OB_TRANS_CTX_NOT_EXIST == ret && !exist) {
|
if (OB_TRANS_CTX_NOT_EXIST == ret && !exist) {
|
||||||
ret = OB_SUCCESS;
|
ret = OB_SUCCESS;
|
||||||
} else {
|
} else {
|
||||||
if (!MTL_IS_PRIMARY_TENANT()) {
|
if (!MTL_TENANT_ROLE_CACHE_IS_PRIMARY_OR_INVALID()) {
|
||||||
ret = OB_STANDBY_READ_ONLY;
|
ret = OB_STANDBY_READ_ONLY;
|
||||||
}
|
}
|
||||||
TRANS_LOG(WARN, "get tx ctx fail",
|
TRANS_LOG(WARN, "get tx ctx fail",
|
||||||
@ -1125,7 +1125,7 @@ int ObTransService::get_write_store_ctx(ObTxDesc &tx,
|
|||||||
}
|
}
|
||||||
// fail, rollback
|
// fail, rollback
|
||||||
if (OB_FAIL(ret)) {
|
if (OB_FAIL(ret)) {
|
||||||
if (!MTL_IS_PRIMARY_TENANT()) {
|
if (!MTL_TENANT_ROLE_CACHE_IS_PRIMARY_OR_INVALID()) {
|
||||||
ret = OB_STANDBY_READ_ONLY;
|
ret = OB_STANDBY_READ_ONLY;
|
||||||
}
|
}
|
||||||
if (OB_NOT_NULL(tx_ctx)) {
|
if (OB_NOT_NULL(tx_ctx)) {
|
||||||
@ -1460,7 +1460,7 @@ int ObTransService::check_replica_readable_(const ObTxReadSnapshot &snapshot,
|
|||||||
} else {
|
} else {
|
||||||
if (OB_SUCC(wait_follower_readable_(ls, expire_ts, snapshot.core_.version_, src))) {
|
if (OB_SUCC(wait_follower_readable_(ls, expire_ts, snapshot.core_.version_, src))) {
|
||||||
TRANS_LOG(INFO, "read from follower", K(snapshot), K(snapshot), K(ls));
|
TRANS_LOG(INFO, "read from follower", K(snapshot), K(snapshot), K(ls));
|
||||||
} else if (MTL_IS_PRIMARY_TENANT()) {
|
} else if (MTL_TENANT_ROLE_CACHE_IS_PRIMARY_OR_INVALID()) {
|
||||||
ret = OB_NOT_MASTER;
|
ret = OB_NOT_MASTER;
|
||||||
} else {
|
} else {
|
||||||
ret = OB_REPLICA_NOT_READABLE;
|
ret = OB_REPLICA_NOT_READABLE;
|
||||||
@ -1479,7 +1479,7 @@ bool ObTransService::check_ls_readable_(ObLS &ls,
|
|||||||
int ret = OB_SUCCESS;
|
int ret = OB_SUCCESS;
|
||||||
bool readable = false;
|
bool readable = false;
|
||||||
SCN scn;
|
SCN scn;
|
||||||
if (ObTxReadSnapshot::SRC::WEAK_READ_SERVICE == src || MTL_IS_PRIMARY_TENANT()) {
|
if (ObTxReadSnapshot::SRC::WEAK_READ_SERVICE == src || MTL_TENANT_ROLE_CACHE_IS_PRIMARY_OR_INVALID()) {
|
||||||
readable = snapshot <= ls.get_ls_wrs_handler()->get_ls_weak_read_ts();
|
readable = snapshot <= ls.get_ls_wrs_handler()->get_ls_weak_read_ts();
|
||||||
} else if (OB_FAIL(ls.get_ls_replica_readable_scn(scn))) {
|
} else if (OB_FAIL(ls.get_ls_replica_readable_scn(scn))) {
|
||||||
TRANS_LOG(WARN, "get ls replica readable scn fail", K(ret), K(ls.get_ls_id()));
|
TRANS_LOG(WARN, "get ls replica readable scn fail", K(ret), K(ls.get_ls_id()));
|
||||||
@ -1606,7 +1606,7 @@ OB_NOINLINE int ObTransService::acquire_local_snapshot_(const share::ObLSID &ls_
|
|||||||
SCN snapshot0;
|
SCN snapshot0;
|
||||||
SCN snapshot1;
|
SCN snapshot1;
|
||||||
ObLSTxCtxMgr *ls_tx_ctx_mgr = NULL;
|
ObLSTxCtxMgr *ls_tx_ctx_mgr = NULL;
|
||||||
const bool can_elr = MTL_IS_PRIMARY_TENANT() ? true : false;
|
const bool can_elr = MTL_TENANT_ROLE_CACHE_IS_PRIMARY() ? true : false;
|
||||||
ObLSHandle ls_handle;
|
ObLSHandle ls_handle;
|
||||||
if (OB_FAIL(MTL(ObLSService *)->get_ls(ls_id, ls_handle, ObLSGetMod::TRANS_MOD))) {
|
if (OB_FAIL(MTL(ObLSService *)->get_ls(ls_id, ls_handle, ObLSGetMod::TRANS_MOD))) {
|
||||||
TRANS_LOG(WARN, "get ls fail", K(ret), K(ls_id));
|
TRANS_LOG(WARN, "get ls fail", K(ret), K(ls_id));
|
||||||
@ -1632,10 +1632,10 @@ OB_NOINLINE int ObTransService::acquire_local_snapshot_(const share::ObLSID &ls_
|
|||||||
&& OB_NOT_NULL(ls_handle.get_ls())) {
|
&& OB_NOT_NULL(ls_handle.get_ls())) {
|
||||||
dup_trx_status =
|
dup_trx_status =
|
||||||
ls_handle.get_ls()->get_tx_svr()->get_tx_ls_log_adapter()->get_committing_dup_trx_cnt(committing_dup_trx_cnt);
|
ls_handle.get_ls()->get_tx_svr()->get_tx_ls_log_adapter()->get_committing_dup_trx_cnt(committing_dup_trx_cnt);
|
||||||
if (!MTL_IS_PRIMARY_TENANT()) {
|
if (!MTL_TENANT_ROLE_CACHE_IS_PRIMARY()) {
|
||||||
ret = OB_NOT_MASTER;
|
ret = OB_NOT_MASTER;
|
||||||
TRANS_LOG(DEBUG, "the max_commmit_ts can not be used as a snapshot in standby tenant ",
|
TRANS_LOG(DEBUG, "the max_commmit_ts can not be used as a snapshot in standby tenant ",
|
||||||
K(ret), K(ls_id), K(snapshot), K(MTL_IS_PRIMARY_TENANT()),
|
K(ret), K(ls_id), K(snapshot), K(MTL_TENANT_ROLE_CACHE_IS_PRIMARY()),
|
||||||
K(committing_dup_trx_cnt));
|
K(committing_dup_trx_cnt));
|
||||||
} else if (!ls_handle.get_ls()
|
} else if (!ls_handle.get_ls()
|
||||||
->get_tx_svr()
|
->get_tx_svr()
|
||||||
@ -2393,7 +2393,7 @@ int ObTransService::gen_trans_id(ObTransID &trans_id)
|
|||||||
int ret = OB_SUCCESS;
|
int ret = OB_SUCCESS;
|
||||||
|
|
||||||
int retry_times = 0;
|
int retry_times = 0;
|
||||||
if (!MTL_IS_PRIMARY_TENANT()) {
|
if (!MTL_TENANT_ROLE_CACHE_IS_PRIMARY_OR_INVALID()) {
|
||||||
ret = OB_STANDBY_READ_ONLY;
|
ret = OB_STANDBY_READ_ONLY;
|
||||||
TRANS_LOG(WARN, "standby tenant support read only", K(ret));
|
TRANS_LOG(WARN, "standby tenant support read only", K(ret));
|
||||||
} else {
|
} else {
|
||||||
|
@ -780,7 +780,7 @@ int ObTransService::get_weak_read_snapshot_version(const int64_t max_read_stale_
|
|||||||
if (OB_FAIL(OB_TS_MGR.get_gts(tenant_id_, NULL, gts_cache))) {
|
if (OB_FAIL(OB_TS_MGR.get_gts(tenant_id_, NULL, gts_cache))) {
|
||||||
TRANS_LOG(WARN, "get ts sync error", K(ret), K(max_read_stale_us_for_user));
|
TRANS_LOG(WARN, "get ts sync error", K(ret), K(max_read_stale_us_for_user));
|
||||||
} else {
|
} else {
|
||||||
const int64_t current_time_us = MTL_IS_PRIMARY_TENANT()
|
const int64_t current_time_us = MTL_TENANT_ROLE_CACHE_IS_PRIMARY_OR_INVALID()
|
||||||
? std::max(ObTimeUtility::current_time(), gts_cache.convert_to_ts())
|
? std::max(ObTimeUtility::current_time(), gts_cache.convert_to_ts())
|
||||||
: gts_cache.convert_to_ts();
|
: gts_cache.convert_to_ts();
|
||||||
current_scn.convert_from_ts(current_time_us - max_read_stale_us_for_user);
|
current_scn.convert_from_ts(current_time_us - max_read_stale_us_for_user);
|
||||||
|
@ -344,7 +344,7 @@ int LockForReadFunctor::operator()(const ObTxData &tx_data, ObTxCCCtx *tx_cc_ctx
|
|||||||
} else if (ObTimeUtility::current_time() + MIN(i, MAX_SLEEP_US) >= lock_expire_ts) {
|
} else if (ObTimeUtility::current_time() + MIN(i, MAX_SLEEP_US) >= lock_expire_ts) {
|
||||||
ret = OB_ERR_SHARED_LOCK_CONFLICT;
|
ret = OB_ERR_SHARED_LOCK_CONFLICT;
|
||||||
break;
|
break;
|
||||||
} else if (!MTL_IS_PRIMARY_TENANT() && OB_SUCC(check_for_standby(tx_data.tx_id_))) {
|
} else if (!MTL_TENANT_ROLE_CACHE_IS_PRIMARY_OR_INVALID() && OB_SUCC(check_for_standby(tx_data.tx_id_))) {
|
||||||
TRANS_LOG(INFO, "read by standby tenant success", K(tx_data), KPC(tx_cc_ctx), KPC(this));
|
TRANS_LOG(INFO, "read by standby tenant success", K(tx_data), KPC(tx_cc_ctx), KPC(this));
|
||||||
break;
|
break;
|
||||||
} else if (i < 10) {
|
} else if (i < 10) {
|
||||||
|
@ -23,7 +23,7 @@ namespace transaction
|
|||||||
int ObTxELRUtil::check_and_update_tx_elr_info(ObTxDesc &tx)
|
int ObTxELRUtil::check_and_update_tx_elr_info(ObTxDesc &tx)
|
||||||
{
|
{
|
||||||
int ret = OB_SUCCESS;
|
int ret = OB_SUCCESS;
|
||||||
if (OB_SYS_TENANT_ID != MTL_ID() && MTL_IS_PRIMARY_TENANT()) {
|
if (OB_SYS_TENANT_ID != MTL_ID() && MTL_TENANT_ROLE_CACHE_IS_PRIMARY()) {
|
||||||
if (can_tenant_elr_) { // tenant config enable elr
|
if (can_tenant_elr_) { // tenant config enable elr
|
||||||
tx.set_can_elr(true);
|
tx.set_can_elr(true);
|
||||||
TX_STAT_ELR_ENABLE_TRANS_INC(MTL_ID());
|
TX_STAT_ELR_ENABLE_TRANS_INC(MTL_ID());
|
||||||
|
@ -281,7 +281,7 @@ bool ObBLService::check_need_skip_leader_(const uint64_t tenant_id)
|
|||||||
bool need_skip = true;
|
bool need_skip = true;
|
||||||
int ret = OB_SUCCESS;
|
int ret = OB_SUCCESS;
|
||||||
MTL_SWITCH(tenant_id) {
|
MTL_SWITCH(tenant_id) {
|
||||||
if (!MTL_IS_PRIMARY_TENANT()) {
|
if (!MTL_TENANT_ROLE_CACHE_IS_PRIMARY()) {
|
||||||
need_skip = false;
|
need_skip = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -63,9 +63,10 @@ int ObWeakReadUtil::generate_min_weak_read_version(const uint64_t tenant_id, SCN
|
|||||||
tenant_id,
|
tenant_id,
|
||||||
oceanbase::omt::ObTenantConfigMgr::default_fallback_tenant_id(),
|
oceanbase::omt::ObTenantConfigMgr::default_fallback_tenant_id(),
|
||||||
/* success */ [buffer_time, &tenant_config_exist, &max_stale_time](const omt::ObTenantConfig &config) mutable {
|
/* success */ [buffer_time, &tenant_config_exist, &max_stale_time](const omt::ObTenantConfig &config) mutable {
|
||||||
if (MTL_IS_PRIMARY_TENANT()) {
|
if (MTL_TENANT_ROLE_CACHE_IS_PRIMARY()) {
|
||||||
max_stale_time = config.max_stale_time_for_weak_consistency - buffer_time;
|
max_stale_time = config.max_stale_time_for_weak_consistency - buffer_time;
|
||||||
} else {
|
} else {
|
||||||
|
//standby, restore, invalid
|
||||||
max_stale_time = config.max_stale_time_for_weak_consistency + transaction::ObTimestampService::PREALLOCATE_RANGE_FOR_SWITHOVER - buffer_time;
|
max_stale_time = config.max_stale_time_for_weak_consistency + transaction::ObTimestampService::PREALLOCATE_RANGE_FOR_SWITHOVER - buffer_time;
|
||||||
}
|
}
|
||||||
tenant_config_exist = true;
|
tenant_config_exist = true;
|
||||||
|
@ -1343,7 +1343,11 @@ int ObLSService::get_restore_status_(
|
|||||||
LOG_WARN("not init", K(ret));
|
LOG_WARN("not init", K(ret));
|
||||||
} else if (is_sys_tenant(tenant_id) || is_meta_tenant(tenant_id)) {
|
} else if (is_sys_tenant(tenant_id) || is_meta_tenant(tenant_id)) {
|
||||||
restore_status = ObLSRestoreStatus::RESTORE_NONE;
|
restore_status = ObLSRestoreStatus::RESTORE_NONE;
|
||||||
} else if (FALSE_IT(restore_status = MTL_IS_RESTORE_TENANT() ?
|
} else if (share::ObTenantRole::INVALID_TENANT == MTL_GET_TENANT_ROLE_CACHE()) {
|
||||||
|
//tenant role not ready, need wait
|
||||||
|
ret = OB_NEED_WAIT;
|
||||||
|
LOG_WARN("tenant role is invalid now, need wait", KR(ret), K(tenant_id));
|
||||||
|
} else if (FALSE_IT(restore_status = MTL_TENANT_ROLE_CACHE_IS_RESTORE() ?
|
||||||
ObLSRestoreStatus::RESTORE_START : ObLSRestoreStatus::RESTORE_NONE)) {
|
ObLSRestoreStatus::RESTORE_START : ObLSRestoreStatus::RESTORE_NONE)) {
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -752,7 +752,9 @@ int ObTxDataTable::get_recycle_scn(SCN &recycle_scn)
|
|||||||
min_end_scn = std::min(min_end_scn_from_old_tablets, min_end_scn_from_latest_tablets);
|
min_end_scn = std::min(min_end_scn_from_old_tablets, min_end_scn_from_latest_tablets);
|
||||||
if (!min_end_scn.is_max()) {
|
if (!min_end_scn.is_max()) {
|
||||||
recycle_scn = min_end_scn;
|
recycle_scn = min_end_scn;
|
||||||
if (!MTL_IS_PRIMARY_TENANT()) {
|
//Regardless of whether the primary or standby tenant is unified, refer to GTS.
|
||||||
|
//If the tenant role in memory is deferred,
|
||||||
|
//it may cause the standby tenant to commit and recycle when the primary is switched to standby.
|
||||||
SCN snapshot_version;
|
SCN snapshot_version;
|
||||||
MonotonicTs unused_ts(0);
|
MonotonicTs unused_ts(0);
|
||||||
if (OB_FAIL(OB_TS_MGR.get_gts(MTL_ID(), MonotonicTs(1), NULL, snapshot_version, unused_ts))) {
|
if (OB_FAIL(OB_TS_MGR.get_gts(MTL_ID(), MonotonicTs(1), NULL, snapshot_version, unused_ts))) {
|
||||||
@ -762,7 +764,6 @@ int ObTxDataTable::get_recycle_scn(SCN &recycle_scn)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
FLOG_INFO("get tx data recycle scn finish.",
|
FLOG_INFO("get tx data recycle scn finish.",
|
||||||
KR(ret),
|
KR(ret),
|
||||||
|
Loading…
x
Reference in New Issue
Block a user