[CP] fix rulescan bugs
This commit is contained in:
parent
848c4c4fd2
commit
8924627121
@ -511,7 +511,7 @@ int main(int argc, char *argv[])
|
||||
opts.log_level_ = OB_LOG_LEVEL_WARN;
|
||||
parse_opts(argc, argv, opts);
|
||||
|
||||
if (OB_FAIL(check_uid_before_start(CONF_DIR))) {
|
||||
if (OB_SUCC(ret) && OB_FAIL(check_uid_before_start(CONF_DIR))) {
|
||||
MPRINT("Fail check_uid_before_start, please use the initial user to start observer!");
|
||||
} else if (OB_FAIL(FileDirectoryUtils::create_full_path(PID_DIR))) {
|
||||
MPRINT("create pid dir fail: ./run/");
|
||||
|
@ -243,14 +243,14 @@ int ObMPConnect::init_process_single_stmt(const ObMultiStmtItem &multi_stmt_item
|
||||
} else if (OB_FAIL(gctx_.sql_engine_->stmt_query(sql, ctx, result))) {
|
||||
LOG_WARN("sql execute failed", K(multi_stmt_item), K(sql), K(ret));
|
||||
} else {
|
||||
if (OB_FAIL(result.open())) {
|
||||
LOG_WARN("failed to do result set open", K(ret));
|
||||
int open_ret = result.open();
|
||||
if (open_ret) {
|
||||
LOG_WARN("failed to do result set open", K(open_ret));
|
||||
}
|
||||
int save_ret = ret;
|
||||
if (OB_FAIL(result.close())) {
|
||||
LOG_WARN("result close failed, disconnect.", K(ret));
|
||||
}
|
||||
ret = (save_ret != OB_SUCCESS) ? save_ret : ret;
|
||||
ret = (open_ret != OB_SUCCESS) ? open_ret : ret;
|
||||
}
|
||||
if (enable_trace_log) {
|
||||
ObThreadLogLevelUtils::clear();
|
||||
|
@ -92,19 +92,18 @@ int ObNetEndpointIngressManager::collect_predict_bw(ObNetEndpointKVArray &update
|
||||
const int64_t current_time = ObTimeUtility::current_time();
|
||||
{
|
||||
ObSpinLockGuard guard(lock_);
|
||||
int tmp_ret = OB_SUCCESS;
|
||||
for (ObIngressPlanMap::iterator iter = ingress_plan_map_.begin(); iter != ingress_plan_map_.end(); ++iter) {
|
||||
for (ObIngressPlanMap::iterator iter = ingress_plan_map_.begin(); OB_SUCC(ret) && iter != ingress_plan_map_.end(); ++iter) {
|
||||
const ObNetEndpointKey &endpoint_key = iter->first;
|
||||
ObNetEndpointValue *endpoint_value = iter->second;
|
||||
if (endpoint_value->expire_time_ < current_time) {
|
||||
LOG_INFO("endpoint expired", K(endpoint_key), K(endpoint_value->expire_time_), K(current_time));
|
||||
if (OB_TMP_FAIL(delete_keys.push_back(endpoint_key))) {
|
||||
if (OB_FAIL(delete_keys.push_back(endpoint_key))) {
|
||||
LOG_WARN("fail to push back arrays", K(ret), K(endpoint_key));
|
||||
} else {
|
||||
ob_free(endpoint_value);
|
||||
}
|
||||
} else {
|
||||
if (OB_TMP_FAIL(update_kvs.push_back(ObNetEndpointKeyValue(endpoint_key, endpoint_value)))) {
|
||||
if (OB_FAIL(update_kvs.push_back(ObNetEndpointKeyValue(endpoint_key, endpoint_value)))) {
|
||||
LOG_WARN("fail to push back arrays", K(ret), K(endpoint_key));
|
||||
} else {
|
||||
endpoint_value->predicted_bw_ = -1;
|
||||
@ -112,11 +111,10 @@ int ObNetEndpointIngressManager::collect_predict_bw(ObNetEndpointKVArray &update
|
||||
}
|
||||
}
|
||||
|
||||
for (int64_t i = 0; i < delete_keys.count(); i++) {
|
||||
for (int64_t i = 0; OB_SUCC(ret) && i < delete_keys.count(); i++) {
|
||||
const ObNetEndpointKey &endpoint_key = delete_keys[i];
|
||||
if (OB_FAIL(ingress_plan_map_.erase_refactored(endpoint_key))) {
|
||||
LOG_ERROR("failed to erase endpoint", K(ret), K(endpoint_key));
|
||||
ret = OB_SUCCESS; // ignore error
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2149,7 +2149,7 @@ int ObServer::init_io()
|
||||
int64_t data_disk_percentage = 0;
|
||||
int64_t log_disk_percentage = 0;
|
||||
|
||||
if (OB_FAIL(log_block_mgr_.init(storage_env_.clog_dir_))) {
|
||||
if (OB_SUCC(ret) && OB_FAIL(log_block_mgr_.init(storage_env_.clog_dir_))) {
|
||||
LOG_ERROR("log block mgr init failed", KR(ret));
|
||||
} else if (OB_FAIL(ObServerUtils::cal_all_part_disk_size(config_.datafile_size,
|
||||
config_.log_disk_size,
|
||||
|
@ -88,54 +88,44 @@ ObServerReloadConfig::~ObServerReloadConfig()
|
||||
|
||||
int ObServerReloadConfig::operator()()
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
int real_ret = ret;
|
||||
int tmp_ret = OB_SUCCESS;
|
||||
int ret = tmp_ret;
|
||||
const bool is_arbitration_mode = OBSERVER.is_arbitration_mode();
|
||||
|
||||
if (!gctx_.is_inited()) {
|
||||
real_ret = ret = OB_INNER_STAT_ERROR;
|
||||
LOG_WARN("gctx not init", "gctx inited", gctx_.is_inited(), K(ret));
|
||||
ret = tmp_ret = OB_INNER_STAT_ERROR;
|
||||
LOG_WARN("gctx not init", "gctx inited", gctx_.is_inited(), K(tmp_ret));
|
||||
} else {
|
||||
if (OB_FAIL(ObReloadConfig::operator()())) {
|
||||
real_ret = ret;
|
||||
LOG_WARN("ObReloadConfig operator() failed", K(ret));
|
||||
if (OB_TMP_FAIL(ObReloadConfig::operator()())) {
|
||||
LOG_WARN("ObReloadConfig operator() failed", K(tmp_ret));
|
||||
}
|
||||
if (OB_FAIL(gctx_.root_service_->reload_config())) {
|
||||
real_ret = ret;
|
||||
LOG_WARN("root_service_ reload_config failed", K(ret));
|
||||
if (OB_TMP_FAIL(gctx_.root_service_->reload_config())) {
|
||||
LOG_WARN("root_service_ reload_config failed", K(tmp_ret));
|
||||
}
|
||||
if (OB_FAIL(gctx_.location_service_->reload_config())) {
|
||||
real_ret = ret;
|
||||
LOG_WARN("location service reload config failed", KR(ret));
|
||||
if (OB_TMP_FAIL(gctx_.location_service_->reload_config())) {
|
||||
LOG_WARN("location service reload config failed", KR(tmp_ret));
|
||||
}
|
||||
if (OB_FAIL(ObClusterVersion::get_instance().reload_config())) {
|
||||
real_ret = ret;
|
||||
LOG_WARN("cluster version reload config failed", K(ret));
|
||||
if (OB_TMP_FAIL(ObClusterVersion::get_instance().reload_config())) {
|
||||
LOG_WARN("cluster version reload config failed", K(tmp_ret));
|
||||
}
|
||||
|
||||
if (OB_FAIL(OBSERVER.reload_config())) {
|
||||
real_ret = ret;
|
||||
LOG_WARN("reload configuration for ob service fail", K(ret));
|
||||
if (OB_TMP_FAIL(OBSERVER.reload_config())) {
|
||||
LOG_WARN("reload configuration for ob service fail", K(tmp_ret));
|
||||
}
|
||||
if (OB_FAIL(OBSERVER.get_net_frame().reload_config())) {
|
||||
real_ret = ret;
|
||||
LOG_WARN("reload configuration for net frame fail", K(ret));
|
||||
if (OB_TMP_FAIL(OBSERVER.get_net_frame().reload_config())) {
|
||||
LOG_WARN("reload configuration for net frame fail", K(tmp_ret));
|
||||
}
|
||||
if (OB_FAIL(OBSERVER.get_net_frame().reload_ssl_config())) {
|
||||
real_ret = ret;
|
||||
LOG_WARN("reload ssl config for net frame fail", K(ret));
|
||||
if (OB_TMP_FAIL(OBSERVER.get_net_frame().reload_ssl_config())) {
|
||||
LOG_WARN("reload ssl config for net frame fail", K(tmp_ret));
|
||||
}
|
||||
if (OB_FAIL(OBSERVER.get_rl_mgr().reload_config())) {
|
||||
real_ret = ret;
|
||||
LOG_WARN("reload config for ratelimit manager fail", K(ret));
|
||||
if (OB_TMP_FAIL(OBSERVER.get_rl_mgr().reload_config())) {
|
||||
LOG_WARN("reload config for ratelimit manager fail", K(tmp_ret));
|
||||
}
|
||||
if (OB_FAIL(ObTdeEncryptEngineLoader::get_instance().reload_config())) {
|
||||
real_ret = ret;
|
||||
LOG_WARN("reload config for tde encrypt engine fail", K(ret));
|
||||
if (OB_TMP_FAIL(ObTdeEncryptEngineLoader::get_instance().reload_config())) {
|
||||
LOG_WARN("reload config for tde encrypt engine fail", K(tmp_ret));
|
||||
}
|
||||
if (OB_FAIL(ObSrvNetworkFrame::reload_rpc_auth_method())) {
|
||||
real_ret = ret;
|
||||
LOG_WARN("reload config for rpc auth method fail", K(ret));
|
||||
if (OB_TMP_FAIL(ObSrvNetworkFrame::reload_rpc_auth_method())) {
|
||||
LOG_WARN("reload config for rpc auth method fail", K(tmp_ret));
|
||||
}
|
||||
}
|
||||
{
|
||||
@ -143,9 +133,8 @@ int ObServerReloadConfig::operator()()
|
||||
const int64_t limit_memory = GMEMCONF.get_server_memory_limit();
|
||||
OB_LOGGER.set_info_as_wdiag(GET_MIN_CLUSTER_VERSION() < CLUSTER_VERSION_4_1_0_0);
|
||||
// reload log config again after get MIN_CLUSTER_VERSION
|
||||
if (OB_FAIL(ObReloadConfig::operator()())) {
|
||||
real_ret = ret;
|
||||
LOG_WARN("ObReloadConfig operator() failed", K(ret));
|
||||
if (OB_TMP_FAIL(ObReloadConfig::operator()())) {
|
||||
LOG_WARN("ObReloadConfig operator() failed", K(tmp_ret));
|
||||
}
|
||||
const int64_t reserved_memory = GCONF.cache_wash_threshold;
|
||||
const int64_t reserved_urgent_memory = GCONF.memory_reserved;
|
||||
@ -173,9 +162,8 @@ int ObServerReloadConfig::operator()()
|
||||
io_config.data_storage_warning_tolerance_time_ = GCONF.data_storage_warning_tolerance_time;
|
||||
io_config.data_storage_error_tolerance_time_ = GCONF.data_storage_error_tolerance_time;
|
||||
if (!is_arbitration_mode
|
||||
&& OB_FAIL(ObIOManager::get_instance().set_io_config(io_config))) {
|
||||
real_ret = ret;
|
||||
LOG_WARN("reload io manager config fail, ", K(ret));
|
||||
&& OB_TMP_FAIL(ObIOManager::get_instance().set_io_config(io_config))) {
|
||||
LOG_WARN("reload io manager config fail, ", K(tmp_ret));
|
||||
}
|
||||
|
||||
(void)reload_diagnose_info_config(GCONF.enable_perf_event);
|
||||
@ -329,7 +317,7 @@ int ObServerReloadConfig::operator()()
|
||||
{
|
||||
ObMallocAllocator::get_instance()->force_malloc_for_absent_tenant_ = GCONF._force_malloc_for_absent_tenant;
|
||||
}
|
||||
return real_ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
void ObServerReloadConfig::reload_tenant_scheduler_config_()
|
||||
|
@ -156,7 +156,7 @@ int ObSrvNetworkFrame::init()
|
||||
LOG_ERROR("net keepalive register fail", K(ret));
|
||||
} else if (hp_io_cnt > 0 && OB_FAIL(net_.high_prio_rpc_net_register(rpc_handler_, high_prio_rpc_transport_))) {
|
||||
LOG_ERROR("high prio rpc net register fail", K(ret));
|
||||
} else if (ingress_service_.init(GCONF.cluster_id)) {
|
||||
} else if (OB_FAIL(ingress_service_.init(GCONF.cluster_id))) {
|
||||
LOG_ERROR("endpoint ingress service init fail", K(ret));
|
||||
}
|
||||
#ifdef OB_USE_BABASSL
|
||||
|
@ -1931,8 +1931,8 @@ int ObMultiTenant::convert_real_to_hidden_sys_tenant()
|
||||
}
|
||||
|
||||
if (OB_FAIL(ret)) {
|
||||
} else if (ObServerCheckpointSlogHandler::get_instance()
|
||||
.write_tenant_super_block_slog(tenant_meta.super_block_)) {
|
||||
} else if (OB_FAIL(ObServerCheckpointSlogHandler::get_instance()
|
||||
.write_tenant_super_block_slog(tenant_meta.super_block_))) {
|
||||
LOG_WARN("fail to write_tenant_super_block_slog", K(ret), K(tenant_meta));
|
||||
} else {
|
||||
tenant->set_tenant_super_block(tenant_meta.super_block_);
|
||||
|
@ -1778,6 +1778,7 @@ void ObTenant::check_das()
|
||||
if (!is_virtual_tenant_id(id_)) {
|
||||
ObTenantSwitchGuard guard(this);
|
||||
if (OB_ISNULL(MTL(ObDataAccessService *))) {
|
||||
ret = OB_ERR_UNEXPECTED;
|
||||
LOG_WARN("failed to get das ptr", K(MTL_ID()));
|
||||
} else {
|
||||
double min_cpu = .0;
|
||||
|
@ -543,7 +543,7 @@ int ObTenantConfig::build_errsim_module_()
|
||||
if (OB_SUCC(ret)) {
|
||||
const int64_t percentage = this->errsim_module_error_percentage;
|
||||
|
||||
if (build_tenant_errsim_moulde(tenant_id_, current_version_, module_array, percentage)) {
|
||||
if (OB_FAIL(build_tenant_errsim_moulde(tenant_id_, current_version_, module_array, percentage))) {
|
||||
LOG_WARN("failed to build tenant module", K(ret), K(tenant_id_));
|
||||
}
|
||||
}
|
||||
|
@ -43,7 +43,7 @@ int ObHTableLockMgr::init()
|
||||
LOG_WARN("init twice", K(ret));
|
||||
} else if (OB_FAIL(lock_map_.create(DEFAULT_BUCKET_NUM, ObModIds::TABLE_PROC, ObModIds::TABLE_PROC, MTL_ID()))) {
|
||||
LOG_WARN("fail to create htable lock map", K(ret));
|
||||
} else if (allocator_.init(ObMallocAllocator::get_instance(), OB_MALLOC_MIDDLE_BLOCK_SIZE, attr)) {
|
||||
} else if (OB_FAIL(allocator_.init(ObMallocAllocator::get_instance(), OB_MALLOC_MIDDLE_BLOCK_SIZE, attr))) {
|
||||
LOG_WARN("fail to init allocator", K(ret));
|
||||
} else {
|
||||
is_inited_ = true;
|
||||
|
@ -414,6 +414,7 @@ int ObTableLoadGeneralTableCompactor::create_tablet_compactor_task(int32_t sessi
|
||||
if (OB_FAIL(create_tablet_table_compactor(session_id, tablet_id, table_compactor))) {
|
||||
LOG_WARN("fail to create tablet table compactor", KR(ret));
|
||||
} else if (OB_ISNULL(compactor_task = OB_NEWx(CompactorTask, (&allocator_), table_compactor))) {
|
||||
ret = OB_ALLOCATE_MEMORY_FAILED;
|
||||
LOG_WARN("fail to new CompactorTask", KR(ret));
|
||||
} else if (OB_FAIL(compactor_task_iter_.add(compactor_task))) {
|
||||
LOG_WARN("fail to add compactor task", KR(ret));
|
||||
|
@ -132,7 +132,7 @@ void ObTableLoadParallelMergeTableCompactor::stop()
|
||||
int ObTableLoadParallelMergeTableCompactor::handle_parallel_merge_success()
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
if (build_result()) {
|
||||
if (OB_FAIL(build_result())) {
|
||||
LOG_WARN("fail to build result", KR(ret));
|
||||
} else if (OB_FAIL(compact_ctx_->handle_table_compact_success())) {
|
||||
LOG_WARN("fail to notify table compact success", KR(ret));
|
||||
|
Loading…
x
Reference in New Issue
Block a user