diff --git a/deps/oblib/src/lib/mysqlclient/ob_mysql_connection_pool.cpp b/deps/oblib/src/lib/mysqlclient/ob_mysql_connection_pool.cpp index 674402a494..e0a9d5a4a7 100644 --- a/deps/oblib/src/lib/mysqlclient/ob_mysql_connection_pool.cpp +++ b/deps/oblib/src/lib/mysqlclient/ob_mysql_connection_pool.cpp @@ -464,8 +464,7 @@ int ObMySQLConnectionPool::acquire(const uint64_t tenant_id, ObMySQLConnection * } if (OB_ISNULL(connection)) { - //overwrite ret - ret = OB_ERR_UNEXPECTED; + ret = OB_SUCC(ret) ? OB_ERR_UNEXPECTED: ret; LOG_WARN("failed to acquire connection", K(this), K(tenant_id), K(server_count), K(busy_conn_count_), K(ret)); obsys::ObRLockGuard lock(get_lock_); diff --git a/deps/oblib/src/lib/ob_define.h b/deps/oblib/src/lib/ob_define.h index 457f8d571d..277c7f22bf 100644 --- a/deps/oblib/src/lib/ob_define.h +++ b/deps/oblib/src/lib/ob_define.h @@ -409,6 +409,7 @@ const int64_t OB_MAX_DIRECTORY_NAME_LENGTH = 128; // Compatible with Oracle const int64_t OB_MAX_DIRECTORY_PATH_LENGTH = 4000; // Compatible with Oracle const uint64_t OB_MAX_INTERVAL_PARTITIONS = 1048575; // interval parted table max partitions const int64_t OB_MAX_BALANCE_GROUP_NAME_LENGTH = 512; +const int64_t OB_SERVICE_NAME_LENGTH = 64; //plan cache const int64_t OB_PC_NOT_PARAM_COUNT = 8; @@ -1525,6 +1526,11 @@ const char *const OB_MYSQL_PROXY_VEERSION = "__proxy_version"; const char *const OB_MYSQL_CLIENT_VERSION = "__ob_client_version"; const char *const OB_MYSQL_CLIENT_NAME = "__ob_client_name"; +const char *const OB_MYSQL_FAILOVER_MODE = "__proxy_failover_mode"; +const char *const OB_MYSQL_FAILOVER_MODE_OFF = "off"; +const char *const OB_MYSQL_FAILOVER_MODE_ON = "on"; +const char *const OB_MYSQL_SERVICE_NAME = "__proxy_service_name"; + const char *const OB_MYSQL_JDBC_CLIENT_NAME = "OceanBase Connector/J"; const char *const OB_MYSQL_OCI_CLIENT_NAME = "OceanBase Connector/C"; // for java client diff --git a/deps/oblib/src/rpc/obmysql/obsm_struct.h b/deps/oblib/src/rpc/obmysql/obsm_struct.h index 020b694478..a895ae7484 100644 --- a/deps/oblib/src/rpc/obmysql/obsm_struct.h +++ b/deps/oblib/src/rpc/obmysql/obsm_struct.h @@ -80,6 +80,7 @@ public: client_sessid_ = INVALID_SESSID; client_addr_port_ = 0; client_create_time_ = 0; + has_service_name_ = false; } obmysql::ObCompressType get_compress_type() { @@ -215,6 +216,7 @@ public: uint32_t client_sessid_; int32_t client_addr_port_; int64_t client_create_time_; + bool has_service_name_; }; } // end of namespace observer } // end of namespace oceanbase diff --git a/deps/oblib/src/rpc/obrpc/ob_rpc_packet_list.h b/deps/oblib/src/rpc/obrpc/ob_rpc_packet_list.h index e75227d430..e181d48912 100644 --- a/deps/oblib/src/rpc/obrpc/ob_rpc_packet_list.h +++ b/deps/oblib/src/rpc/obrpc/ob_rpc_packet_list.h @@ -285,7 +285,7 @@ PCODE_DEF(OB_REMOVE_CLUSTER_INFO_FROM_ARB_SERVER, 0x2A8) #endif PCODE_DEF(OB_DROP_LOB, 0x2A9) PCODE_DEF(OB_EXCHANGE_PARTITION, 0x2AA) -// PCODE_DEF(OB_REFRESH_SERVICE_NAME, 0x2AB) +PCODE_DEF(OB_REFRESH_SERVICE_NAME, 0x2AB) PCODE_DEF(OB_CREATE_OUTLINE, 0x350) PCODE_DEF(OB_DROP_OUTLINE, 0x351) diff --git a/src/logservice/restoreservice/ob_log_restore_driver_base.cpp b/src/logservice/restoreservice/ob_log_restore_driver_base.cpp index 2d831a369d..7282c9ceea 100644 --- a/src/logservice/restoreservice/ob_log_restore_driver_base.cpp +++ b/src/logservice/restoreservice/ob_log_restore_driver_base.cpp @@ -16,6 +16,7 @@ #include "lib/ob_define.h" #include "storage/tx_storage/ob_ls_map.h" // ObLSIterator #include "storage/tx_storage/ob_ls_service.h" // ObLSService +#include "rootserver/ob_tenant_info_loader.h" // ObTenantInfoLoader #include "logservice/ob_log_service.h" namespace oceanbase @@ -126,8 +127,13 @@ int ObLogRestoreDriverBase::get_upper_resotore_scn(share::SCN &scn) { int ret = OB_SUCCESS; SCN replayable_point; - if (OB_FAIL(log_service_->get_replayable_point(replayable_point))) { - ARCHIVE_LOG(WARN, "get replayable point failed", K(ret)); + bool restore_log_limit = true; + if (OB_FAIL(check_fetch_log_unlimited_(restore_log_limit))) { + LOG_WARN("check_fetch_log_unlimited_ failed"); + } else if (! restore_log_limit) { + scn = share::SCN::max_scn(); + } else if (OB_FAIL(log_service_->get_replayable_point(replayable_point))) { + LOG_WARN("get replayable point failed", K(ret)); } else { share::SCN advance_scn = share::SCN::plus(replayable_point, FETCH_LOG_AHEAD_THRESHOLD_NS); scn = global_recovery_scn_ <= advance_scn ? global_recovery_scn_ : advance_scn; @@ -135,5 +141,24 @@ int ObLogRestoreDriverBase::get_upper_resotore_scn(share::SCN &scn) return ret; } +int ObLogRestoreDriverBase::check_fetch_log_unlimited_(bool &limit) +{ + int ret = OB_SUCCESS; + share::ObAllTenantInfo tenant_info; + rootserver::ObTenantInfoLoader *tenant_info_loader = MTL(rootserver::ObTenantInfoLoader*); + limit = true; + bool is_prepare = false; + + if (OB_ISNULL(tenant_info_loader)) { + ret = OB_ERR_UNEXPECTED; + LOG_ERROR("tenant_info_loader is NULL", K(tenant_info_loader)); + } else if(OB_FAIL(tenant_info_loader->check_is_prepare_flashback_for_switch_to_primary_status(is_prepare))) { + LOG_WARN("fail to check tenant status", KR(ret), KPC(tenant_info_loader)); + } else if (is_prepare) { + limit = false; + } + return ret; +} + } // namespace logservice } // namespace oceanbase diff --git a/src/logservice/restoreservice/ob_log_restore_driver_base.h b/src/logservice/restoreservice/ob_log_restore_driver_base.h index 941e055217..ff99ff4a20 100644 --- a/src/logservice/restoreservice/ob_log_restore_driver_base.h +++ b/src/logservice/restoreservice/ob_log_restore_driver_base.h @@ -42,6 +42,8 @@ protected: virtual int do_fetch_log_(ObLS &ls) = 0; int check_replica_status_(storage::ObLS &ls, bool &can_fetch_log); int get_upper_resotore_scn(share::SCN &scn); +private: + int check_fetch_log_unlimited_(bool &limit); protected: bool inited_; uint64_t tenant_id_; diff --git a/src/logservice/restoreservice/ob_log_restore_handler.cpp b/src/logservice/restoreservice/ob_log_restore_handler.cpp index abd0dc633e..e8fba4f459 100644 --- a/src/logservice/restoreservice/ob_log_restore_handler.cpp +++ b/src/logservice/restoreservice/ob_log_restore_handler.cpp @@ -856,7 +856,7 @@ bool ObLogRestoreHandler::restore_to_end() const RLockGuard guard(lock_); return restore_to_end_unlock_(); } - +ERRSIM_POINT_DEF(ERRSIM_LS_STATE_NOT_MATCH); int ObLogRestoreHandler::check_restore_to_newest_from_service_( const share::ObRestoreSourceServiceAttr &service_attr, const share::SCN &end_scn, @@ -864,27 +864,10 @@ int ObLogRestoreHandler::check_restore_to_newest_from_service_( { int ret = OB_SUCCESS; bool offline_log_exist = false; - share::ObTenantRole tenant_role; - share::schema::ObTenantStatus tenant_status; palf::AccessMode access_mode; - const char *db_name = service_attr.user_.mode_ == common::ObCompatibilityMode::MYSQL_MODE ? OB_SYS_DATABASE_NAME : OB_ORA_SYS_SCHEMA_NAME; - ObSqlString user; - char passwd[OB_MAX_PASSWORD_LENGTH + 1] = {0}; SMART_VAR(share::ObLogRestoreProxyUtil, proxy_util) { - if (!service_attr.is_valid()) { - ret = OB_ERR_UNEXPECTED; - } else if (OB_FAIL(service_attr.get_password(passwd, sizeof(passwd)))) { - CLOG_LOG(WARN, "get_password failed", K(id_), K(service_attr)); - } else if (OB_FAIL(service_attr.get_user_str_(user))) { - CLOG_LOG(WARN, "get user str failed", K(service_attr)); - } else if (OB_FAIL(proxy_util.init(MTL_ID(), service_attr.addr_, - user.ptr(), passwd, db_name))) { - CLOG_LOG(WARN, "proxy_util init failed", K(id_)); - } else if (OB_FAIL(proxy_util.get_tenant_info(tenant_role, tenant_status))) { - CLOG_LOG(WARN, "get tenant info failed", K(id_), K(service_attr)); - } else if (! tenant_role.is_standby() || share::schema::ObTenantStatus::TENANT_STATUS_NORMAL != tenant_status) { - ret = OB_SOURCE_TENANT_STATE_NOT_MATCH; - CLOG_LOG(WARN, "tenant role or status not match", K(id_), K(tenant_role), K(tenant_status), K(service_attr)); + if (OB_FAIL(proxy_util.init_with_service_attr(MTL_ID(), &service_attr))) { + CLOG_LOG(WARN, "proxy_util init failed", K(id_), K(service_attr)); } else if (OB_FAIL(proxy_util.get_max_log_info(share::ObLSID(id_), access_mode, archive_scn))) { // OB_ENTRY_NOT_EXIST, ls not exist in gv$ob_log_stat, a) ls has no leader; b) access virtual table failed; c) ls gc if (OB_ENTRY_NOT_EXIST == ret) { @@ -921,7 +904,10 @@ int ObLogRestoreHandler::check_restore_to_newest_from_service_( CLOG_LOG(INFO, "check_restore_to_newest succ", K(id_), K(archive_scn), K(end_scn)); } } - + if (OB_UNLIKELY(ERRSIM_LS_STATE_NOT_MATCH)) { + ret = OB_SUCC(ret) ? OB_SOURCE_LS_STATE_NOT_MATCH : ret; + CLOG_LOG(WARN, "ERRSIM_LS_STATE_NOT_MATCH is on", KR(ret)); + } // if connect to source tenant denied, rewrite ret_code if (-ER_ACCESS_DENIED_ERROR == ret) { ret = OB_PASSWORD_WRONG; diff --git a/src/observer/dbms_job/ob_dbms_job_utils.cpp b/src/observer/dbms_job/ob_dbms_job_utils.cpp index 62b3bb82f2..b6c2beed3c 100644 --- a/src/observer/dbms_job/ob_dbms_job_utils.cpp +++ b/src/observer/dbms_job/ob_dbms_job_utils.cpp @@ -218,10 +218,11 @@ int ObDBMSJobUtils::check_job_can_running(int64_t tenant_id, bool &can_running) OZ (GCTX.schema_service_->get_tenant_schema_guard(tenant_id, guard)); OZ (guard.check_tenant_is_restore(tenant_id, is_restore)); - // job can not run in standy cluster and restore. - if (OB_SUCC(ret) && job_queue_processor > 0 - && !GCTX.is_standby_cluster() - && !is_restore) { + // job can not run in standy and restore tenant. + bool is_primary = false; + if (FAILEDx(ObShareUtil::table_check_if_tenant_role_is_primary(tenant_id, is_primary))) { + LOG_WARN("fail to execute table_check_if_tenant_role_is_primary", KR(ret), K(tenant_id)); + } else if (is_primary && job_queue_processor > 0) { SMART_VAR(ObMySQLProxy::MySQLResult, result) { if (OB_FAIL(sql_proxy_->read(result, tenant_id, sql.ptr()))) { LOG_WARN("execute query failed", K(ret), K(sql), K(tenant_id)); diff --git a/src/observer/mysql/obmp_connect.cpp b/src/observer/mysql/obmp_connect.cpp index 178204602a..5507750554 100644 --- a/src/observer/mysql/obmp_connect.cpp +++ b/src/observer/mysql/obmp_connect.cpp @@ -312,6 +312,8 @@ int ObMPConnect::process() uint64_t tenant_id = OB_INVALID_ID; ObSQLSessionInfo *session = NULL; bool autocommit = false; + ObString service_name; + bool failover_mode = false; MAKE_TENANT_SWITCH_SCOPE_GUARD(guard); THIS_WORKER.set_timeout_ts(INT64_MAX); // avoid see a former timeout value if (THE_TRACE != nullptr) { @@ -342,8 +344,14 @@ int ObMPConnect::process() } else if (SS_STOPPING == GCTX.status_) { ret = OB_SERVER_IS_STOPPING; LOG_WARN("server is stopping", K(ret)); + } else if (OB_FAIL(extract_service_name(*conn, service_name, failover_mode))) { + LOG_WARN("fail to extraxt service name", KR(ret)); } else if (OB_FAIL(check_update_tenant_id(*conn, tenant_id))) { - LOG_WARN("fail to check update tenant id", K(ret)); + LOG_WARN("fail to check update tenant id", KR(ret), K(tenant_name_)); + if (OB_ERR_INVALID_TENANT_NAME == ret && !service_name.empty()) { + ret = OB_SERVICE_NAME_NOT_FOUND; + LOG_WARN("login via service_name but tenant not exist", KR(ret), K(service_name), K(tenant_name_)); + } } else if (OB_FAIL(guard.switch_to(tenant_id))) { LOG_WARN("switch to tenant fail", K(ret), K(tenant_id)); } else if (OB_FAIL(check_client_property(*conn))) { @@ -355,6 +363,8 @@ int ObMPConnect::process() } else if (OB_ISNULL(session)) { ret = OB_ERR_UNEXPECTED; LOG_ERROR("null session", K(ret), K(session)); + } else if (OB_FAIL(set_service_name(tenant_id, *session, service_name, failover_mode))) { + LOG_WARN("fail to set service_name", KR(ret), KPC(session), K(service_name), K(failover_mode)); } else if (OB_FAIL(verify_identify(*conn, *session, tenant_id))) { LOG_WARN("fail to verify_identify", K(ret)); } else if (OB_FAIL(process_kill_client_session(*session, true))) { @@ -2341,3 +2351,74 @@ int ObMPConnect::set_client_version(ObSMConnection &conn) } return ret; } +ERRSIM_POINT_DEF(ERRSIM_MOCK_SERVICE_NAME); +int ObMPConnect::extract_service_name(ObSMConnection &conn, ObString &service_name, bool &failover_mode) +{ + int ret = OB_SUCCESS; + ObString failover_mode_key(OB_MYSQL_FAILOVER_MODE); + ObString failover_mode_off(OB_MYSQL_FAILOVER_MODE_OFF); + ObString failover_mode_on(OB_MYSQL_FAILOVER_MODE_ON); + ObString service_name_key(OB_MYSQL_SERVICE_NAME); + bool is_found_failover_mode = false; + bool is_found_service_name = false; + conn.has_service_name_ = false; + // extract failover_mode and service_name + for (int64_t i = 0; OB_SUCC(ret) && !is_found_failover_mode && i < hsr_.get_connect_attrs().count(); ++i) { + const ObStringKV &kv = hsr_.get_connect_attrs().at(i); + if (failover_mode_key == kv.key_) { + if (failover_mode_off == kv.value_) { + failover_mode = false; + } else if (failover_mode_on == kv.value_) { + failover_mode = true; + } else { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("failover_mode should be on or off", KR(ret), K(kv)); + } + is_found_failover_mode = true; + } + } + for (int64_t i = 0; OB_SUCC(ret) && !is_found_service_name && i < hsr_.get_connect_attrs().count(); ++i) { + const ObStringKV &kv = hsr_.get_connect_attrs().at(i); + if (service_name_key == kv.key_) { + if (kv.value_.empty()) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("service_name should not be empty", KR(ret), K(kv)); + } else { + conn.has_service_name_ = true; + (void) service_name.assign_ptr(kv.value_.ptr(), kv.value_.length()); + } + is_found_service_name = true; + } + } + if (OB_SUCC(ret) && is_found_failover_mode != is_found_service_name) { + // The 'failover_mode' and 'service_name' must both be specified at the same time. + // The 'failover_mode' only matters if 'service_name' is not empty. + // If 'failover_mode' is 'on', it allows connection only to the main tenant. + ret = OB_ERR_UNEXPECTED; + LOG_WARN("failover_mode or service_name is missing", KR(ret), K(is_found_failover_mode), K(is_found_service_name)); + } + if (OB_SUCC(ret) && ERRSIM_MOCK_SERVICE_NAME && !tenant_name_.empty() && 0 != tenant_name_.compare(OB_SYS_TENANT_NAME)) { + service_name = ObString::make_string("test_service"); + conn.has_service_name_ = true; + failover_mode = true; + LOG_INFO("ERRSIM_MOCK_SERVICE_NAME opened", KR(ret), K(service_name), K(tenant_name_)); + } + return ret; +} +int ObMPConnect::set_service_name(const uint64_t tenant_id, ObSQLSessionInfo &session, + const ObString &service_name, const bool failover_mode) +{ + int ret = OB_SUCCESS; + (void) session.set_failover_mode(failover_mode); + if (OB_FAIL(ret) || service_name.empty()) { + // If the connection is not established via 'service_name', the 'connection_attr' + // will not contain 'failover_mode' and 'service_name'. Consequently, 'service_name' + // in 'session_info' will be empty, indicating that any 'service_name' related logic + // will not be triggered. + } else if (OB_FAIL(session.set_service_name(service_name))) { + LOG_WARN("fail to set service_name", KR(ret), K(service_name), K(tenant_id)); + } else if (OB_FAIL(session.check_service_name_and_failover_mode(tenant_id))) { + LOG_WARN("fail to execute check_service_name_and_failover_mode", KR(ret), K(service_name), K(tenant_id)); + } + return ret; +} \ No newline at end of file diff --git a/src/observer/mysql/obmp_connect.h b/src/observer/mysql/obmp_connect.h index 07dcbb58f2..e68739cefa 100644 --- a/src/observer/mysql/obmp_connect.h +++ b/src/observer/mysql/obmp_connect.h @@ -126,6 +126,9 @@ private: int set_proxy_version(ObSMConnection &conn); int set_client_version(ObSMConnection &conn); + int extract_service_name(ObSMConnection &conn, ObString &service_name, bool &failover_mode); + int set_service_name(const uint64_t tenant_id, sql::ObSQLSessionInfo &session, + const ObString &service_name, const bool failover_mode); int get_proxy_user_name(ObString &real_user); private: DISALLOW_COPY_AND_ASSIGN(ObMPConnect); diff --git a/src/observer/mysql/obsm_handler.cpp b/src/observer/mysql/obsm_handler.cpp index 8546d82410..1318701758 100644 --- a/src/observer/mysql/obsm_handler.cpp +++ b/src/observer/mysql/obsm_handler.cpp @@ -164,6 +164,7 @@ int ObSMHandler::on_disconnect(easy_connection_t *c) } else { sess_info->set_session_state(sql::SESSION_KILLED); sess_info->set_shadow(true); + conn->has_service_name_ = false; } } LOG_INFO("kill and revert session", K(conn->sessid_), diff --git a/src/observer/ob_rpc_processor_simple.cpp b/src/observer/ob_rpc_processor_simple.cpp index 61e9e97c6f..5e21338034 100644 --- a/src/observer/ob_rpc_processor_simple.cpp +++ b/src/observer/ob_rpc_processor_simple.cpp @@ -85,7 +85,7 @@ #include "storage/tenant_snapshot/ob_tenant_snapshot_service.h" #include "storage/high_availability/ob_storage_ha_utils.h" #include "share/ob_rpc_struct.h" -#include "rootserver/ob_recovery_ls_service.h" +#include "rootserver/standby/ob_recovery_ls_service.h" #include "logservice/ob_server_log_block_mgr.h" #include "rootserver/ob_admin_drtask_util.h" #include "storage/ddl/ob_tablet_ddl_kv.h" @@ -3308,6 +3308,17 @@ int ObForceDumpServerUsageP::process() return ret; } +int ObRefreshServiceNameP::process() +{ + int ret = OB_SUCCESS; + if (OB_ISNULL(gctx_.ob_service_)) { + ret = OB_ERR_UNEXPECTED; + COMMON_LOG(WARN, "ob_service is null", KR(ret)); + } else if (OB_FAIL(gctx_.ob_service_->refresh_service_name(arg_, result_))) { + COMMON_LOG(WARN, "fail to refresh_service_name", KR(ret), K(arg_)); + } + return ret; +} int ObResourceLimitCalculatorP::process() { int ret = OB_SUCCESS; diff --git a/src/observer/ob_rpc_processor_simple.h b/src/observer/ob_rpc_processor_simple.h index 7424f13f6c..d0ef8a3a23 100644 --- a/src/observer/ob_rpc_processor_simple.h +++ b/src/observer/ob_rpc_processor_simple.h @@ -279,6 +279,7 @@ OB_DEFINE_PROCESSOR_S(Srv, OB_TABLET_LOCATION_BROADCAST, ObTabletLocationReceive OB_DEFINE_PROCESSOR_S(Srv, OB_CANCEL_GATHER_STATS, ObCancelGatherStatsP); OB_DEFINE_PROCESSOR_OBADMIN(Srv, OB_LOG_FORCE_SET_TENANT_LOG_DISK, ObForceSetTenantLogDiskP); OB_DEFINE_PROCESSOR_OBADMIN(Srv, OB_FORCE_DUMP_SERVER_USAGE, ObForceDumpServerUsageP); +OB_DEFINE_PROCESSOR_S(Srv, OB_REFRESH_SERVICE_NAME, ObRefreshServiceNameP); OB_DEFINE_PROCESSOR_S(Srv, OB_CAL_UNIT_PHY_RESOURCE, ObResourceLimitCalculatorP); OB_DEFINE_PROCESSOR_S(Srv, OB_CHECK_AND_CANCEL_DDL_COMPLEMENT_DAG, ObRpcCheckandCancelDDLComplementDagP); diff --git a/src/observer/ob_server.cpp b/src/observer/ob_server.cpp index ee898e0bc7..c3782a7b2a 100644 --- a/src/observer/ob_server.cpp +++ b/src/observer/ob_server.cpp @@ -102,7 +102,7 @@ #include "share/ash/ob_active_sess_hist_task.h" #include "share/ash/ob_active_sess_hist_list.h" #include "share/ob_server_blacklist.h" -#include "share/ob_primary_standby_service.h" // ObPrimaryStandbyService +#include "rootserver/standby/ob_standby_service.h" // ObStandbyService #include "share/scheduler/ob_dag_warning_history_mgr.h" #include "share/longops_mgr/ob_longops_mgr.h" #include "logservice/palf/election/interface/election.h" @@ -503,7 +503,7 @@ int ObServer::init(const ObServerOptions &opts, const ObPLogWriterCfg &log_cfg) } else if (OB_FAIL(imc_tasks_.init())) { LOG_ERROR("init imc tasks failed", KR(ret)); #endif - } else if (OB_FAIL(OB_PRIMARY_STANDBY_SERVICE.init(&sql_proxy_, &schema_service_))) { + } else if (OB_FAIL(OB_STANDBY_SERVICE.init(&sql_proxy_, &schema_service_))) { LOG_ERROR("init OB_PRIMARY_STANDBY_SERVICE failed", KR(ret)); } else if (OB_FAIL(init_px_target_mgr())) { LOG_ERROR("init px target mgr failed", KR(ret)); @@ -823,9 +823,9 @@ void ObServer::destroy() ObVirtualTenantManager::get_instance().destroy(); FLOG_INFO("virtual tenant manager destroyed"); - FLOG_INFO("begin to destroy OB_PRIMARY_STANDBY_SERVICE"); - OB_PRIMARY_STANDBY_SERVICE.destroy(); - FLOG_INFO("OB_PRIMARY_STANDBY_SERVICE destroyed"); + FLOG_INFO("begin to destroy OB_STANDBY_SERVICE"); + OB_STANDBY_SERVICE.destroy(); + FLOG_INFO("OB_STANDBY_SERVICE destroyed"); FLOG_INFO("begin to destroy rootservice event history"); ROOTSERVICE_EVENT_INSTANCE.destroy(); diff --git a/src/observer/ob_server_struct.cpp b/src/observer/ob_server_struct.cpp index d6ff9d80fc..2438af4053 100644 --- a/src/observer/ob_server_struct.cpp +++ b/src/observer/ob_server_struct.cpp @@ -35,16 +35,6 @@ ObGlobalContext &global_context() bool ObGlobalContext::is_observer() const { return !share::schema::ObSchemaService::g_liboblog_mode_; } -bool ObGlobalContext::is_primary_cluster() const -{ - return true;; -} - -bool ObGlobalContext::is_standby_cluster() const -{ - return false; -} - common::ObClusterRole ObGlobalContext::get_cluster_role() const { return PRIMARY_CLUSTER; @@ -147,39 +137,5 @@ ObGlobalContext &ObGlobalContext::operator=(const ObGlobalContext &other) return *this; } -ObUseWeakGuard::ObUseWeakGuard() -{ - auto *tsi_value = GET_TSI(TSIUseWeak); - if (NULL != tsi_value) { - tsi_value->inited_ = true; - tsi_value->did_use_weak_ = GCTX.is_standby_cluster_and_started();; - } else { - LOG_ERROR_RET(OB_ALLOCATE_MEMORY_FAILED, "tsi value is NULL"); - } -} - -ObUseWeakGuard::~ObUseWeakGuard() -{ - auto *tsi_value = GET_TSI(TSIUseWeak); - if (NULL != tsi_value) { - tsi_value->inited_ = false; - } -} - -bool ObUseWeakGuard::did_use_weak() -{ - bool use_weak = false; - auto *tsi_value = GET_TSI(TSIUseWeak); - if (NULL == tsi_value) { - LOG_ERROR_RET(OB_ALLOCATE_MEMORY_FAILED, "tsi value is NULL"); - use_weak = GCTX.is_standby_cluster_and_started(); - } else if (tsi_value->inited_) { - use_weak = tsi_value->did_use_weak_; - } else { - use_weak = GCTX.is_standby_cluster_and_started(); - } - return use_weak; -} - } // end of namespace observer } // end of namespace oceanbase diff --git a/src/observer/ob_server_struct.h b/src/observer/ob_server_struct.h index 7a27e767f9..be14149c54 100644 --- a/src/observer/ob_server_struct.h +++ b/src/observer/ob_server_struct.h @@ -298,10 +298,6 @@ struct ObGlobalContext // Refer to the high availability zone design document // bool is_observer() const; - bool is_standby_cluster_and_started() { return is_observer() && is_standby_cluster() && has_start_service(); } - bool is_started_and_can_weak_read() { return is_observer() && has_start_service(); } - bool is_primary_cluster() const; - bool is_standby_cluster() const; common::ObClusterRole get_cluster_role() const; share::ServerServiceStatus get_server_service_status() const; void set_upgrade_stage(obrpc::ObUpgradeStage upgrade_stage) { upgrade_stage_ = upgrade_stage; } @@ -324,22 +320,6 @@ struct ObThreadContext }; ObGlobalContext &global_context(); - -struct ObUseWeakGuard -{ - ObUseWeakGuard(); - ~ObUseWeakGuard(); - static bool did_use_weak(); -private: - struct TSIUseWeak - { - bool inited_; - bool did_use_weak_; - TSIUseWeak() - :inited_(false), did_use_weak_(false) - {} - }; -}; } // end of namespace observer } // end of namespace oceanbase diff --git a/src/observer/ob_service.cpp b/src/observer/ob_service.cpp index 878ca92815..29dbe8457f 100644 --- a/src/observer/ob_service.cpp +++ b/src/observer/ob_service.cpp @@ -48,6 +48,7 @@ #include "sql/optimizer/ob_join_order.h" #include "rootserver/ob_bootstrap.h" #include "rootserver/ob_tenant_info_loader.h" // ObTenantInfoLoader +#include "rootserver/ob_tenant_event_history_table_operator.h" // TENANT_EVENT_INSTANCE #include "observer/ob_server.h" #include "observer/ob_dump_task_generator.h" #include "observer/ob_server_schema_updater.h" @@ -78,6 +79,7 @@ #include "rootserver/backup/ob_backup_task_scheduler.h" // ObBackupTaskScheduler #include "rootserver/backup/ob_backup_schedule_task.h" // ObBackupScheduleTask #include "rootserver/ob_ls_recovery_stat_handler.h"//get_all_ls_replica_readbable_scn +#include "rootserver/ob_service_name_command.h" #ifdef OB_BUILD_TDE_SECURITY #include "share/ob_master_key_getter.h" #endif @@ -231,6 +233,8 @@ int ObService::init(common::ObMySQLProxy &sql_proxy, FLOG_WARN("client_manager_.initialize failed", "self_addr", gctx_.self_addr(), KR(ret)); } else if (OB_FAIL(CLUSTER_EVENT_INSTANCE.init(sql_proxy))) { FLOG_WARN("init cluster event history table failed", KR(ret)); + } else if (OB_FAIL(TENANT_EVENT_INSTANCE.init(sql_proxy, gctx_.self_addr()))) { + FLOG_WARN("init tenant event history table failed", KR(ret), K(gctx_.self_addr())); } else if (OB_FAIL(SERVER_EVENT_INSTANCE.init(sql_proxy, gctx_.self_addr()))) { FLOG_WARN("init server event history table failed", KR(ret)); } else if (OB_FAIL(DEALOCK_EVENT_INSTANCE.init(sql_proxy))) { @@ -359,6 +363,10 @@ void ObService::stop() FLOG_INFO("begin to stop cluster event instance"); CLUSTER_EVENT_INSTANCE.stop(); FLOG_INFO("cluster event instance stopped"); + + FLOG_INFO("begin to stop tenant event instance"); + TENANT_EVENT_INSTANCE.stop(); + FLOG_INFO("tenant event instance stopped"); } FLOG_INFO("[OBSERVICE_NOTICE] observice finish stop", K_(stopped)); } @@ -396,6 +404,10 @@ void ObService::wait() FLOG_INFO("begin to wait cluster event instance"); CLUSTER_EVENT_INSTANCE.wait(); FLOG_INFO("wait cluster event instance success"); + + FLOG_INFO("begin to wait tenant event instance"); + TENANT_EVENT_INSTANCE.wait(); + FLOG_INFO("wait tenant event instance success"); } FLOG_INFO("[OBSERVICE_NOTICE] wait ob_service end"); } @@ -420,6 +432,10 @@ int ObService::destroy() CLUSTER_EVENT_INSTANCE.destroy(); FLOG_INFO("cluster event instance destroyed"); + FLOG_INFO("begin to destroy tenant event instance"); + TENANT_EVENT_INSTANCE.destroy(); + FLOG_INFO("tenant event instance destroyed"); + FLOG_INFO("begin to destroy server event instance"); SERVER_EVENT_INSTANCE.destroy(); FLOG_INFO("server event instance destroyed"); @@ -3020,7 +3036,8 @@ int ObService::estimate_tablet_block_count(const obrpc::ObEstBlockArg &arg, } return ret; } - +ERRSIM_POINT_DEF(ERRSIM_GET_LS_SYNC_SCN_ERROR); +ERRSIM_POINT_DEF(ERRSIM_GET_SYS_LS_SYNC_SCN_ERROR); int ObService::get_ls_sync_scn( const ObGetLSSyncScnArg &arg, ObGetLSSyncScnRes &result) @@ -3089,6 +3106,16 @@ int ObService::get_ls_sync_scn( LOG_WARN("the ls not master", KR(ret), K(ls_id), K(first_leader_epoch), K(second_leader_epoch), K(role)); } + if (OB_SUCC(ret) && ERRSIM_GET_LS_SYNC_SCN_ERROR) { + cur_sync_scn = ls_id.is_sys_ls() ? cur_sync_scn : SCN::minus(cur_sync_scn, 1000); + ret = result.init(arg.get_tenant_id(), ls_id, cur_sync_scn, cur_restore_source_max_scn); + LOG_WARN("user ls errsim enabled", KR(ret), K(arg.get_tenant_id()), K(ls_id), K(cur_sync_scn), K(cur_restore_source_max_scn)); + } + if (OB_SUCC(ret) && ERRSIM_GET_SYS_LS_SYNC_SCN_ERROR) { + cur_sync_scn = ls_id.is_sys_ls() ? SCN::minus(cur_sync_scn, 1000) : cur_sync_scn; + ret = result.init(arg.get_tenant_id(), ls_id, cur_sync_scn, cur_restore_source_max_scn); + LOG_WARN("sys ls errsim enabled", KR(ret), K(arg.get_tenant_id()), K(ls_id), K(cur_sync_scn), K(cur_restore_source_max_scn)); + } } LOG_INFO("finish get_ls_sync_scn", KR(ret), K(cur_sync_scn), K(cur_restore_source_max_scn), K(arg), K(result)); return ret; @@ -3331,6 +3358,60 @@ int ObService::ob_admin_unlock_member_list( return ret; } +int ObService::refresh_service_name( + const ObRefreshServiceNameArg &arg, + ObRefreshServiceNameRes &result) +{ + // 1. epoch: + // 1.1 if the arg's epoch <= the tenant_info_loader's epoch, do nothing + // 1.2 otherwise, replace cache with the arg's service_name_list + // 2. kill local connections when the arg's service_op is STOP SERVICE + // and the target service_name's status in the tenant_info_loader is STOPPING + int ret = OB_SUCCESS; + const uint64_t tenant_id = arg.get_tenant_id(); + MAKE_TENANT_SWITCH_SCOPE_GUARD(guard); + if (OB_UNLIKELY(!inited_)) { + ret = OB_NOT_INIT; + LOG_WARN("not init", KR(ret), K(inited_), K(arg)); + } else if (OB_UNLIKELY(!arg.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("arg is invaild", KR(ret), K(arg)); + } else if (tenant_id != MTL_ID() && OB_FAIL(guard.switch_to(tenant_id))) { + LOG_WARN("switch tenant failed", KR(ret), K(arg)); + } + + if (OB_SUCC(ret)) { + rootserver::ObTenantInfoLoader *tenant_info_loader = MTL(rootserver::ObTenantInfoLoader*); + if (OB_ISNULL(tenant_info_loader)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("tenant_info_loader should not be null", KR(ret), KP(tenant_info_loader)); + } else if (OB_FAIL(tenant_info_loader->update_service_name(arg.get_epoch(), arg.get_service_name_list()))) { + LOG_WARN("fail to execute update_service_name", KR(ret), K(arg)); + } else if (arg.is_start_service()) { + // When starting the service, it is expected that `service_name` is utilized. + // However, the ability for users to connect via `service_name` also depends on `tenant_info`, + // so it's crucial to ensure that `tenant_info` is up-to-date. + const ObUpdateTenantInfoCacheArg &u_arg = arg.get_update_tenant_info_arg(); + if (OB_FAIL(tenant_info_loader->update_tenant_info_cache(u_arg.get_ora_rowscn(), u_arg.get_tenant_info(), + u_arg.get_finish_data_version(), u_arg.get_data_version_barrier_scn()))) { + LOG_WARN("fail to execute update_tenant_info_cache", KR(ret), K(u_arg), K(arg)); + } + } else if (arg.is_stop_service()) { + ObServiceName service_name; + if (OB_FAIL(tenant_info_loader->get_service_name(arg.get_target_service_name_id(), service_name))) { + LOG_WARN("fail to get service name", KR(ret), K(arg)); + } else if (service_name.is_stopping() + && OB_FAIL(ObServiceNameCommand::kill_local_connections(tenant_id, service_name))) { + LOG_WARN("fail to kill local connections", KR(ret), K(arg), K(service_name)); + } + } + } + if (FAILEDx(result.init(tenant_id))) { + LOG_WARN("failed to init res", KR(ret), K(tenant_id)); + } + FLOG_INFO("finish refresh_service_name", KR(ret), K(arg), K(result)); + return ret; +} }// end namespace observer }// end namespace oceanbase diff --git a/src/observer/ob_service.h b/src/observer/ob_service.h index c7f3dfea17..41fa94f62b 100644 --- a/src/observer/ob_service.h +++ b/src/observer/ob_service.h @@ -162,6 +162,8 @@ public: obrpc::ObEstBlockRes &res) const; int update_tenant_info_cache(const obrpc::ObUpdateTenantInfoCacheArg &arg, obrpc::ObUpdateTenantInfoCacheRes &result); + int refresh_service_name(const obrpc::ObRefreshServiceNameArg &arg, + obrpc::ObRefreshServiceNameRes &result); //////////////////////////////////////////////////////////////// // ObRpcMinorFreezeP @RS minor freeze int minor_freeze(const obrpc::ObMinorFreezeArg &arg, diff --git a/src/observer/ob_srv_xlator.cpp b/src/observer/ob_srv_xlator.cpp index bff67ad7ef..1fc4d32304 100644 --- a/src/observer/ob_srv_xlator.cpp +++ b/src/observer/ob_srv_xlator.cpp @@ -203,6 +203,8 @@ int ObSrvMySQLXlator::translate(rpc::ObRequest &req, ObReqProcessor *&processor) } else { if (req.is_in_connected_phase()) { ret = get_mp_connect_processor(processor); + } else if (OB_FAIL(check_service_name_(req))) { + LOG_WARN("fail to execute check_service_name_", KR(ret)); } else { const ObMySQLRawPacket &pkt = reinterpret_cast(req.get_packet()); if (pkt.get_cmd() == obmysql::COM_QUERY) { @@ -338,6 +340,36 @@ int ObSrvMySQLXlator::translate(rpc::ObRequest &req, ObReqProcessor *&processor) return ret; } +int ObSrvMySQLXlator::check_service_name_(rpc::ObRequest &req) +{ + int ret = OB_SUCCESS; + ObSMConnection *conn = reinterpret_cast(SQL_REQ_OP.get_sql_session(&req)); + ObSQLSessionInfo *session = NULL; + uint32_t sess_id = 0; + uint64_t tenant_id = OB_INVALID_TENANT_ID; + if (OB_ISNULL(conn) || OB_ISNULL(GCTX.session_mgr_)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("get unexpected null", KR(ret), KP(conn), KP(GCTX.session_mgr_), K(tenant_id)); + } else if (!conn->has_service_name_) { + // do nothing + } else if (FALSE_IT(sess_id = conn->sessid_)) { + } else if (FALSE_IT(tenant_id = conn->tenant_id_)) { + } else if (!is_user_tenant(tenant_id)) { + // do nothing + } else if (OB_FAIL(GCTX.session_mgr_->get_session(sess_id, session))) { + LOG_WARN("fail to get session", KR(ret), K(sess_id), K(tenant_id)); + } else if (OB_ISNULL(session)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("get unexpected null", KR(ret), KP(session), K(tenant_id)); + } else if (OB_FAIL(session->check_service_name_and_failover_mode())) { + LOG_WARN("fail to execute check_service_name_and_failover_mode", KR(ret), K(tenant_id)); + } + if (NULL != session) { + GCTX.session_mgr_->revert_session(session); + } + return ret; +} + ObReqProcessor *ObSrvXlator::get_processor(ObRequest &req) { int ret = OB_SUCCESS; diff --git a/src/observer/ob_srv_xlator.h b/src/observer/ob_srv_xlator.h index 390cb6bbcb..ed026df9cb 100644 --- a/src/observer/ob_srv_xlator.h +++ b/src/observer/ob_srv_xlator.h @@ -157,6 +157,7 @@ protected: int get_mp_connect_processor(ObReqProcessor *&ret_proc); private: + int check_service_name_(rpc::ObRequest &req); const ObGlobalContext &gctx_; DISALLOW_COPY_AND_ASSIGN(ObSrvMySQLXlator); }; // end of class ObSrvMySQLXlator diff --git a/src/observer/ob_srv_xlator_partition.cpp b/src/observer/ob_srv_xlator_partition.cpp index 7dbf83ab73..c6001cda13 100644 --- a/src/observer/ob_srv_xlator_partition.cpp +++ b/src/observer/ob_srv_xlator_partition.cpp @@ -294,6 +294,7 @@ void oceanbase::observer::init_srv_xlator_for_others(ObSrvRpcXlator *xlator) { RPC_PROCESSOR(ObRefreshTenantInfoP, gctx_); RPC_PROCESSOR(ObRpcGetLSReplayedScnP, gctx_); RPC_PROCESSOR(ObUpdateTenantInfoCacheP, gctx_); + RPC_PROCESSOR(ObRefreshServiceNameP, gctx_); RPC_PROCESSOR(ObSyncRewriteRulesP, gctx_); diff --git a/src/observer/omt/ob_multi_tenant.cpp b/src/observer/omt/ob_multi_tenant.cpp index a4da3d6a89..c89d771622 100644 --- a/src/observer/omt/ob_multi_tenant.cpp +++ b/src/observer/omt/ob_multi_tenant.cpp @@ -110,7 +110,7 @@ #include "rootserver/ob_tenant_info_loader.h"//ObTenantInfoLoader #include "rootserver/ob_create_standby_from_net_actor.h" // ObCreateStandbyFromNetActor #include "rootserver/ob_primary_ls_service.h"//ObLSService -#include "rootserver/ob_recovery_ls_service.h"//ObRecoveryLSService +#include "rootserver/standby/ob_recovery_ls_service.h"//ObRecoveryLSService #include "rootserver/ob_common_ls_service.h"//ObCommonLSService #include "rootserver/restore/ob_restore_service.h" //ObRestoreService #include "rootserver/ob_tenant_transfer_service.h" // ObTenantTransferService diff --git a/src/observer/omt/ob_tenant_node_balancer.cpp b/src/observer/omt/ob_tenant_node_balancer.cpp index c0a75298f0..c3c5e2283b 100644 --- a/src/observer/omt/ob_tenant_node_balancer.cpp +++ b/src/observer/omt/ob_tenant_node_balancer.cpp @@ -242,13 +242,6 @@ int ObTenantNodeBalancer::notify_create_tenant(const obrpc::TenantServerUnitConf } } - // In standby cluster, may repeat create tenant, if if_not_grant_ is true, ignore OB_TENANT_EXIST - if (OB_TENANT_EXIST == ret && unit.if_not_grant_) { - if (GCTX.is_standby_cluster()) { - ret = OB_SUCCESS; - } - } - return ret; } diff --git a/src/observer/virtual_table/ob_all_virtual_ls_log_restore_status.cpp b/src/observer/virtual_table/ob_all_virtual_ls_log_restore_status.cpp index 8fe9b0ec9d..9b4aaf1ad0 100644 --- a/src/observer/virtual_table/ob_all_virtual_ls_log_restore_status.cpp +++ b/src/observer/virtual_table/ob_all_virtual_ls_log_restore_status.cpp @@ -22,7 +22,7 @@ #include "lib/mysqlclient/ob_mysql_proxy.h" #include "storage/tx_storage/ob_ls_map.h" // ObLSIterator #include "storage/tx_storage/ob_ls_service.h" // ObLSService -#include "rootserver/ob_recovery_ls_service.h" //ObLSRecoveryService +#include "rootserver/standby/ob_recovery_ls_service.h" //ObLSRecoveryService using namespace oceanbase::share; diff --git a/src/observer/virtual_table/ob_show_processlist.cpp b/src/observer/virtual_table/ob_show_processlist.cpp index 0e66a7be5d..7a5015e359 100644 --- a/src/observer/virtual_table/ob_show_processlist.cpp +++ b/src/observer/virtual_table/ob_show_processlist.cpp @@ -487,7 +487,12 @@ bool ObShowProcesslist::FillScanner::operator()(sql::ObSQLSessionMgr::Key key, O break; } case SERVICE_NAME: { - cur_row_->cells_[cell_idx].set_null(); + if (!sess_info->get_service_name().is_empty()) { + cur_row_->cells_[cell_idx].set_varchar(sess_info->get_service_name().ptr()); + cur_row_->cells_[cell_idx].set_collation_type(default_collation); + } else { + cur_row_->cells_[cell_idx].set_null(); + } break; } case TOTAL_CPU_TIME: { diff --git a/src/pl/sys_package/ob_dbms_stats.cpp b/src/pl/sys_package/ob_dbms_stats.cpp index 8c88848e7e..6ec9edd7bf 100644 --- a/src/pl/sys_package/ob_dbms_stats.cpp +++ b/src/pl/sys_package/ob_dbms_stats.cpp @@ -5358,18 +5358,17 @@ int ObDbmsStats::flush_database_monitoring_info(sql::ObExecContext &ctx, int ObDbmsStats::check_statistic_table_writeable(sql::ObExecContext &ctx) { int ret = OB_SUCCESS; - share::schema::ObSchemaGetterGuard *schema_guard = ctx.get_virtual_table_ctx().schema_guard_; - bool in_restore = false; - if (OB_ISNULL(schema_guard) || OB_ISNULL(ctx.get_my_session())) { + uint64_t tenant_id = OB_INVALID_TENANT_ID; + bool is_primary = true; + if (OB_ISNULL(ctx.get_my_session())) { ret = OB_ERR_UNEXPECTED; - LOG_WARN("get unexpected null", K(ret), K(schema_guard)); - } else if (OB_FAIL(schema_guard->check_tenant_is_restore(ctx.get_my_session()->get_effective_tenant_id(), - in_restore))) { - LOG_WARN("failed to check tenant is restore", K(ret)); - } else if (OB_UNLIKELY(in_restore) || - GCTX.is_standby_cluster()) { + LOG_WARN("get unexpected null", KR(ret), KP(ctx.get_my_session())); + } else if (FALSE_IT(tenant_id = ctx.get_my_session()->get_effective_tenant_id())) { + } else if (OB_FAIL(ObShareUtil::mtl_check_if_tenant_role_is_primary(tenant_id, is_primary))) { + LOG_WARN("fail to execute mtl_check_if_tenant_role_is_primary", KR(ret), K(tenant_id)); + } else if (OB_UNLIKELY(!is_primary)) { ret = OB_NOT_SUPPORTED; - LOG_USER_ERROR(OB_NOT_SUPPORTED, "use dbms_stats during restore or standby cluster"); + LOG_USER_ERROR(OB_NOT_SUPPORTED, "use dbms_stats during non-primary tenant"); } return ret; } diff --git a/src/rootserver/CMakeLists.txt b/src/rootserver/CMakeLists.txt index 4aa3609ae2..cd45ae23f8 100644 --- a/src/rootserver/CMakeLists.txt +++ b/src/rootserver/CMakeLists.txt @@ -54,6 +54,7 @@ ob_set_subtarget(ob_rootserver common ob_root_utils.cpp ob_root_inspection.cpp ob_rs_event_history_table_operator.cpp + ob_tenant_event_history_table_operator.cpp ob_rs_job_table_operator.cpp ob_rs_reentrant_thread.cpp ob_rs_thread_checker.cpp @@ -81,11 +82,9 @@ ob_set_subtarget(ob_rootserver common ob_tenant_info_loader.cpp ob_create_standby_from_net_actor.cpp ob_primary_ls_service.cpp - ob_recovery_ls_service.cpp ob_balance_ls_primary_zone.cpp ob_common_ls_service.cpp ob_ls_service_helper.cpp - ob_tenant_role_transition_service.cpp ob_tenant_transfer_service.cpp ob_transfer_partition_task.cpp ob_tenant_balance_service.cpp @@ -103,6 +102,7 @@ ob_set_subtarget(ob_rootserver common ob_shrink_expand_resource_pool_checker.cpp ob_transfer_partition_command.cpp ob_partition_exchange.cpp + ob_service_name_command.cpp ) ob_set_subtarget(ob_rootserver balance @@ -195,4 +195,10 @@ ob_set_subtarget(ob_rootserver mview mview/ob_mview_dependency_service.cpp ) +ob_set_subtarget(ob_rootserver standby + standby/ob_standby_service.cpp + standby/ob_recovery_ls_service.cpp + standby/ob_tenant_role_transition_service.cpp +) + ob_server_add_target(ob_rootserver) diff --git a/src/rootserver/backup/ob_backup_data_set_task_mgr.cpp b/src/rootserver/backup/ob_backup_data_set_task_mgr.cpp index c7da0efe00..e3f20dedee 100644 --- a/src/rootserver/backup/ob_backup_data_set_task_mgr.cpp +++ b/src/rootserver/backup/ob_backup_data_set_task_mgr.cpp @@ -1104,7 +1104,7 @@ int ObBackupSetTaskMgr::get_backup_end_scn_(share::SCN &end_scn) const LOG_WARN("failed to get tenant info", K(ret), K(tenant_id)); } else if (OB_FAIL(ObBackupDataScheduler::get_backup_scn(*sql_proxy_, tenant_id, false/*is backup start*/, end_scn))) { LOG_WARN("failed to get end scn", K(ret), K(tenant_id)); - } else if (tenant_info.is_standby() && end_scn > tenant_info.get_standby_scn()) { + } else if (tenant_info.is_standby() && end_scn > tenant_info.get_readable_scn()) { // For standby tenant, make sure snapshot of end_scn is readable. Otherwise, we // can not backup table list. int64_t abs_timeout = ObTimeUtility::current_time() + 10 * 60 * 1000 * 1000; @@ -1115,7 +1115,7 @@ int ObBackupSetTaskMgr::get_backup_end_scn_(share::SCN &end_scn) const } else if (!tenant_info.is_standby()) { ret = OB_STATE_NOT_MATCH; LOG_WARN("tenant is not standby", K(ret), K(tenant_info)); - } else if (end_scn <= tenant_info.get_standby_scn()) { + } else if (end_scn <= tenant_info.get_readable_scn()) { break; } else if (ObTimeUtility::current_time() > abs_timeout) { ret = OB_TIMEOUT; diff --git a/src/rootserver/ddl_task/ob_build_mview_task.cpp b/src/rootserver/ddl_task/ob_build_mview_task.cpp index b614d83aa9..49838c141a 100644 --- a/src/rootserver/ddl_task/ob_build_mview_task.cpp +++ b/src/rootserver/ddl_task/ob_build_mview_task.cpp @@ -484,9 +484,12 @@ int ObBuildMViewTask::enable_mview() ObSchemaGetterGuard schema_guard; const ObTableSchema *mview_schema = nullptr; bool mview_table_exist = false; - if (GCTX.is_standby_cluster()) { + bool is_primary = false; + if (OB_FAIL(ObShareUtil::table_check_if_tenant_role_is_primary(tenant_id_, is_primary))) { + LOG_WARN("fail to execute table_check_if_tenant_role_is_primary", KR(ret), K(tenant_id_)); + } else if (!is_primary) { ret = OB_OP_NOT_ALLOW; - LOG_WARN("create mview in slave cluster is not allowed", KR(ret), K(mview_table_id_)); + LOG_WARN("create mview in non-primary tenant is not allowed", KR(ret), K(tenant_id_), K(is_primary), K(mview_table_id_)); } else if (OB_FAIL(schema_service.get_tenant_schema_guard(tenant_id_, schema_guard))) { LOG_WARN("failed to get schema guard", KR(ret), K(tenant_id_)); } else if (OB_FAIL(schema_guard.check_table_exist(tenant_id_, mview_table_id_, mview_table_exist))) { diff --git a/src/rootserver/ddl_task/ob_index_build_task.cpp b/src/rootserver/ddl_task/ob_index_build_task.cpp index ba7c7bbd98..b0ee65b10e 100755 --- a/src/rootserver/ddl_task/ob_index_build_task.cpp +++ b/src/rootserver/ddl_task/ob_index_build_task.cpp @@ -1459,9 +1459,12 @@ int ObIndexBuildTask::enable_index() schema_status.tenant_id_ = tenant_id_; int64_t version_in_inner_table = OB_INVALID_VERSION; int64_t local_schema_version = OB_INVALID_VERSION; - if (GCTX.is_standby_cluster()) { + bool is_standby = false; + if (OB_FAIL(ObShareUtil::table_check_if_tenant_role_is_standby(tenant_id_, is_standby))) { + LOG_WARN("fail to execute table_check_if_tenant_role_is_standby", KR(ret), K(tenant_id_)); + } else if (is_standby) { ret = OB_OP_NOT_ALLOW; - LOG_WARN("create global index in slave cluster is not allowed", K(ret), K(index_table_id_)); + LOG_WARN("create global index in standby tenant is not allowed", K(ret), K(index_table_id_)); } else if (OB_FAIL(schema_service.get_tenant_schema_guard(tenant_id_, schema_guard))) { LOG_WARN("fail to get schema guard", K(ret), K(tenant_id_)); } else if (OB_FAIL(schema_guard.check_table_exist(tenant_id_, index_table_id_, index_table_exist))) { diff --git a/src/rootserver/freeze/ob_major_merge_scheduler.cpp b/src/rootserver/freeze/ob_major_merge_scheduler.cpp index 129d6594ad..bcf1207ddf 100644 --- a/src/rootserver/freeze/ob_major_merge_scheduler.cpp +++ b/src/rootserver/freeze/ob_major_merge_scheduler.cpp @@ -844,7 +844,7 @@ void ObMajorMergeScheduler::check_merge_interval_time(const bool is_merging) false, tenant_info))) { LOG_WARN("fail to load tenant info", KR(ret), K_(tenant_id)); } else if (tenant_info.is_standby() - && (tenant_info.get_standby_scn() >= tenant_info.get_recovery_until_scn())) { + && (tenant_info.get_readable_scn() >= tenant_info.get_recovery_until_scn())) { LOG_INFO("standby tenant do not sync from primary tenant any more, and do not" " major freeze any more"); } else { diff --git a/src/rootserver/ob_ddl_service.cpp b/src/rootserver/ob_ddl_service.cpp index 4ee875a8c6..ae6f55d6a4 100755 --- a/src/rootserver/ob_ddl_service.cpp +++ b/src/rootserver/ob_ddl_service.cpp @@ -52,7 +52,7 @@ #include "share/ob_global_stat_proxy.h" #include "share/ob_freeze_info_proxy.h" #include "share/ob_service_epoch_proxy.h" -#include "share/ob_primary_standby_service.h" // ObPrimaryStandbyService +#include "rootserver/standby/ob_standby_service.h" // ObStandbyService #include "sql/resolver/ob_stmt_type.h" #include "sql/resolver/ddl/ob_ddl_resolver.h" #include "sql/resolver/expr/ob_raw_expr_modify_column_name.h" @@ -257,7 +257,6 @@ int ObDDLService::get_zones_in_region( int ObDDLService::get_tenant_schema_guard_with_version_in_inner_table(const uint64_t tenant_id, ObSchemaGetterGuard &schema_guard) { int ret = OB_SUCCESS; - bool is_standby = false; bool is_restore = false; bool use_local = false; int64_t version_in_inner_table = OB_INVALID_VERSION; @@ -268,11 +267,9 @@ int ObDDLService::get_tenant_schema_guard_with_version_in_inner_table(const uint } else if (OB_ISNULL(schema_service_)) { ret = OB_ERR_UNEXPECTED; LOG_WARN("schema_service is null", K(ret)); - } else if (OB_FAIL(get_is_standby_cluster(is_standby))) { - LOG_WARN("failed to get is standby cluster", K(ret)); } else if (OB_FAIL(schema_service_->check_tenant_is_restore(NULL, tenant_id, is_restore))) { LOG_WARN("fail to check tenant is restore", KR(ret), K(tenant_id)); - } else if ((is_standby || is_restore) && OB_SYS_TENANT_ID != tenant_id) { + } else if (is_restore && OB_SYS_TENANT_ID != tenant_id) { ObSchemaStatusProxy *schema_status_proxy = GCTX.schema_status_proxy_; if (OB_ISNULL(schema_status_proxy)) { ret = OB_ERR_UNEXPECTED; @@ -280,12 +277,8 @@ int ObDDLService::get_tenant_schema_guard_with_version_in_inner_table(const uint } else if (OB_FAIL(schema_status_proxy->get_refresh_schema_status(tenant_id, schema_status))) { LOG_WARN("failed to get tenant refresh schema status", KR(ret), K(tenant_id)); } else if (OB_INVALID_VERSION == schema_status.readable_schema_version_) { - // 1. The standalone cluster: the schema status has been reset, it can use the internal table to refresh, - // in this time the standalone cluster already has a leader - // 2. The second of physical recovery, after reset schema status, modify_schema can be modified + // The second of physical recovery, after reset schema status, modify_schema can be modified use_local = false; - } else if (is_standby) { - use_local = true; } else if (is_restore) { ret = OB_NOT_SUPPORTED; LOG_WARN("tenant is still restoring, ddl not supported", KR(ret), K(tenant_id), K(schema_status)); @@ -2466,11 +2459,8 @@ int ObDDLService::create_tables_in_trans(const bool if_not_exist, uint64_t tenant_data_version = 0; ObArenaAllocator allocator(ObModIds::OB_RS_PARTITION_TABLE_TEMP); RS_TRACE(create_tables_in_trans_begin); - bool is_standby = false; if (OB_FAIL(check_inner_stat())) { LOG_WARN("variable is not init"); - } else if (OB_FAIL(get_is_standby_cluster(is_standby))) { - LOG_WARN("faile to get is standby cluster", K(ret)); } else if (table_schemas.count() < 1) { ret = OB_ERR_UNEXPECTED; LOG_WARN("table_schemas have no element", K(ret)); @@ -17271,7 +17261,6 @@ int ObDDLService::truncate_oracle_temp_table(const ObString &db_name, int ObDDLService::maintain_obj_dependency_info(const obrpc::ObDependencyObjDDLArg &arg) { int ret = OB_SUCCESS; - bool is_standby = false; const uint64_t tenant_id = arg.tenant_id_; ObSchemaService *schema_service = schema_service_->get_schema_service(); ObDDLOperator ddl_operator(*schema_service_, *sql_proxy_); @@ -17283,8 +17272,6 @@ int ObDDLService::maintain_obj_dependency_info(const obrpc::ObDependencyObjDDLAr } else if (OB_ISNULL(schema_service)) { ret = OB_ERR_UNEXPECTED; LOG_WARN("schema_service must not null", K(ret)); - } else if (OB_FAIL(get_is_standby_cluster(is_standby))) { - LOG_WARN("failed to get is standby cluster", K(ret)); } else { ObDDLSQLTransaction trans(schema_service_); ObSchemaGetterGuard schema_guard; @@ -17296,15 +17283,15 @@ int ObDDLService::maintain_obj_dependency_info(const obrpc::ObDependencyObjDDLAr } else if (OB_FAIL(trans.start(sql_proxy_, tenant_id, refreshed_schema_version))) { LOG_WARN("failed to start trans, ", KR(ret), K(tenant_id), K(refreshed_schema_version)); } else if (!arg.update_dep_objs_.empty() - && OB_FAIL(process_schema_object_dependency(tenant_id, is_standby, arg.update_dep_objs_, + && OB_FAIL(process_schema_object_dependency(tenant_id, arg.update_dep_objs_, schema_guard, trans, ddl_operator, ObReferenceObjTable::UPDATE_OP))) { LOG_WARN("failed to process update object dependency", K(ret)); } else if (!arg.insert_dep_objs_.empty() - && OB_FAIL(process_schema_object_dependency(tenant_id, is_standby, arg.insert_dep_objs_, + && OB_FAIL(process_schema_object_dependency(tenant_id, arg.insert_dep_objs_, schema_guard, trans, ddl_operator, ObReferenceObjTable::INSERT_OP))) { LOG_WARN("failed to process insert object dependency", K(ret)); } else if (!arg.delete_dep_objs_.empty() - && OB_FAIL(process_schema_object_dependency(tenant_id, is_standby, arg.delete_dep_objs_, + && OB_FAIL(process_schema_object_dependency(tenant_id, arg.delete_dep_objs_, schema_guard, trans, ddl_operator, ObReferenceObjTable::DELETE_OP))) { LOG_WARN("failed to process delete object dependency", K(ret)); } else if (arg.schema_.is_valid() && OB_FAIL(recompile_view(arg.schema_, arg.reset_view_column_infos_, trans))) { @@ -17331,7 +17318,6 @@ int ObDDLService::maintain_obj_dependency_info(const obrpc::ObDependencyObjDDLAr int ObDDLService::process_schema_object_dependency( const uint64_t tenant_id, - const bool is_standby, const ObReferenceObjTable::DependencyObjKeyItemPairs &dep_objs, ObSchemaGetterGuard &schema_guard, ObMySQLTransaction &trans, @@ -17348,12 +17334,11 @@ int ObDDLService::process_schema_object_dependency( switch (op) { case ObReferenceObjTable::INSERT_OP: case ObReferenceObjTable::UPDATE_OP: - OZ (ObReferenceObjTable::batch_execute_insert_or_update_obj_dependency(tenant_id, is_standby, + OZ (ObReferenceObjTable::batch_execute_insert_or_update_obj_dependency(tenant_id, new_schema_version, dep_objs, trans, schema_guard, ddl_operator)); break; case ObReferenceObjTable::DELETE_OP: - OZ (ObReferenceObjTable::batch_execute_delete_obj_dependency(tenant_id, is_standby, - dep_objs, trans)); + OZ (ObReferenceObjTable::batch_execute_delete_obj_dependency(tenant_id, dep_objs, trans)); break; default: break; @@ -23826,30 +23811,23 @@ int ObDDLService::purge_recyclebin_tenant( ddl_stmt.reset(); const ObRecycleObject &recycle_obj = recycle_objs.at(i); if (ObRecycleObject::TENANT == recycle_obj.get_type()) { - bool is_standby = false; - if (OB_FAIL(get_is_standby_cluster(is_standby))) { - LOG_WARN("fail to get", K(ret)); - } else if (!is_standby) { - if (tenant_id != OB_SYS_TENANT_ID) { - ret = OB_ERR_UNEXPECTED; - LOG_WARN("purge tenant only in sys tenant", K(ret)); - } else if (OB_FAIL(ddl_stmt.assign_fmt("PURGE TENANT %.*s", - recycle_obj.get_object_name().length(), - recycle_obj.get_object_name().ptr()))) { - LOG_WARN("append sql failed", K(ret)); + if (tenant_id != OB_SYS_TENANT_ID) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("purge tenant only in sys tenant", K(ret)); + } else if (OB_FAIL(ddl_stmt.assign_fmt("PURGE TENANT %.*s", + recycle_obj.get_object_name().length(), + recycle_obj.get_object_name().ptr()))) { + LOG_WARN("append sql failed", K(ret)); + } else { + ObPurgeTenantArg purge_tenant_arg; + purge_tenant_arg.tenant_id_ = OB_SYS_TENANT_ID; + purge_tenant_arg.tenant_name_ = recycle_obj.get_object_name(); + purge_tenant_arg.ddl_stmt_str_ = ddl_stmt.string(); + if (OB_FAIL(purge_tenant(purge_tenant_arg))) { + LOG_WARN("purge tenant failed", K(purge_tenant_arg), K(recycle_obj), K(ret)); } else { - ObPurgeTenantArg purge_tenant_arg; - purge_tenant_arg.tenant_id_ = OB_SYS_TENANT_ID; - purge_tenant_arg.tenant_name_ = recycle_obj.get_object_name(); - purge_tenant_arg.ddl_stmt_str_ = ddl_stmt.string(); - if (OB_FAIL(purge_tenant(purge_tenant_arg))) { - LOG_WARN("purge tenant failed", K(purge_tenant_arg), K(recycle_obj), K(ret)); - } else { - ++purged_objects; - } + ++purged_objects; } - } else { // standalone cluster is not executed, but it should be counted normally - ++purged_objects; } } } @@ -25330,7 +25308,8 @@ int ObDDLService::create_sys_tenant( 0, /*freeze_service_epoch*/ 0, /*arbitration_service_epoch*/ 0, /*server_zone_op_service_epoch*/ - 0 /*heartbeat_service_epoch*/))) { + 0, /*heartbeat_service_epoch*/ + 0 /* service_name_epoch */))) { LOG_WARN("fail to init service epoch", KR(ret)); } if (trans.is_started()) { @@ -26766,11 +26745,11 @@ int ObDDLService::init_tenant_schema( LOG_WARN("fail to set tenant init global stat", KR(ret), K(tenant_id), K(core_schema_version), K(baseline_schema_version), K(snapshot_gc_scn), K(ddl_epoch), K(data_version)); - } else if (is_user_tenant(tenant_id) && OB_FAIL(OB_PRIMARY_STANDBY_SERVICE.write_upgrade_barrier_log( + } else if (is_user_tenant(tenant_id) && OB_FAIL(OB_STANDBY_SERVICE.write_upgrade_barrier_log( trans, tenant_id, data_version))) { LOG_WARN("fail to write_upgrade_barrier_log", KR(ret), K(tenant_id), K(data_version)); } else if (is_user_tenant(tenant_id) && - OB_FAIL(OB_PRIMARY_STANDBY_SERVICE.write_upgrade_data_version_barrier_log( + OB_FAIL(OB_STANDBY_SERVICE.write_upgrade_data_version_barrier_log( trans, tenant_id, data_version))) { LOG_WARN("fail to write_upgrade_data_version_barrier_log", KR(ret), K(tenant_id), K(data_version)); @@ -26815,7 +26794,8 @@ int ObDDLService::init_tenant_schema( 0, /*freeze_service_epoch*/ 0, /*arbitration_service_epoch*/ 0, /*server_zone_op_service_epoch*/ - 0 /*heartbeat_service_epoch*/))) { + 0, /*heartbeat_service_epoch*/ + 0 /* service_name_epoch */))) { LOG_WARN("fail to init service epoch", KR(ret)); } else if (is_creating_standby && OB_FAIL(set_log_restore_source(gen_user_tenant_id(tenant_id), log_restore_source, trans))) { LOG_WARN("fail to set_log_restore_source", KR(ret), K(tenant_id), K(log_restore_source)); @@ -28094,11 +28074,8 @@ int ObDDLService::modify_tenant(const ObModifyTenantArg &arg) const ObTenantSchema *orig_tenant_schema = NULL; const ObString &tenant_name = arg.tenant_schema_.get_tenant_name(); bool is_restore = false; - bool is_standby = false; if (OB_FAIL(check_inner_stat())) { LOG_WARN("variable is not init"); - } else if (OB_FAIL(get_is_standby_cluster(is_standby))) { - LOG_WARN("failed to get is standby", K(ret)); } else if (0 != arg.sys_var_list_.count() && !arg.alter_option_bitset_.is_empty()) { // After the schema is split, because __all_sys_variable is split under the tenant, in order to avoid @@ -28138,13 +28115,13 @@ int ObDDLService::modify_tenant(const ObModifyTenantArg &arg) } if (OB_FAIL(ret)) { - } else if (OB_FAIL(modify_tenant_inner_phase(arg, orig_tenant_schema, schema_guard, is_standby, is_restore))) { + } else if (OB_FAIL(modify_tenant_inner_phase(arg, orig_tenant_schema, schema_guard, is_restore))) { LOG_WARN("modify_tenant_inner_phase fail", K(ret)); } return ret; } -int ObDDLService::modify_tenant_inner_phase(const ObModifyTenantArg &arg, const ObTenantSchema *orig_tenant_schema, ObSchemaGetterGuard &schema_guard, bool is_standby, bool is_restore) +int ObDDLService::modify_tenant_inner_phase(const ObModifyTenantArg &arg, const ObTenantSchema *orig_tenant_schema, ObSchemaGetterGuard &schema_guard, bool is_restore) { int ret = OB_SUCCESS; if (OB_GTS_TENANT_ID == orig_tenant_schema->get_tenant_id()) { @@ -28159,7 +28136,7 @@ int ObDDLService::modify_tenant_inner_phase(const ObModifyTenantArg &arg, const ObDDLSQLTransaction trans(schema_service_); ObDDLOperator ddl_operator(*schema_service_, *sql_proxy_); bool value_changed = false; - if ((is_standby || is_restore) && is_user_tenant(tenant_id)) { + if (is_restore && is_user_tenant(tenant_id)) { ret = OB_OP_NOT_ALLOW; LOG_WARN("ddl operation is not allowed in standby cluster", K(ret)); LOG_USER_ERROR(OB_OP_NOT_ALLOW, "ddl operation in standby cluster"); @@ -28337,11 +28314,6 @@ int ObDDLService::modify_tenant_inner_phase(const ObModifyTenantArg &arg, const LOG_WARN("rename tenant while tenant is in physical restore status is not allowed", KR(ret), KPC(orig_tenant_schema)); LOG_USER_ERROR(OB_OP_NOT_ALLOW, "rename tenant while tenant is in physical restore status is"); - } else if (is_standby && is_user_tenant(orig_tenant_schema->get_tenant_id()) - && !arg.is_sync_from_primary()) { - ret = OB_OP_NOT_ALLOW; - LOG_WARN("rename user tenant in standby is not allowed", KR(ret), K(arg)); - LOG_USER_ERROR(OB_OP_NOT_ALLOW, "rename user tenant in standby cluster"); } else if (orig_tenant_schema->get_tenant_id() <= OB_MAX_RESERVED_TENANT_ID) { ret = OB_NOT_SUPPORTED; LOG_WARN("rename special tenant not supported", @@ -36231,13 +36203,9 @@ int ObDDLService::check_create_schema_replica_options( } if (OB_SUCC(ret)) { int64_t paxos_num = 0; - bool is_standby = false; if (OB_FAIL(schema.get_paxos_replica_num(schema_guard, paxos_num))) { LOG_WARN("fail to get paxos replica num", K(ret)); - } else if (OB_FAIL(get_is_standby_cluster(is_standby))) { - LOG_WARN("failed to get is standby cluster", K(ret)); - } else if ((!is_standby && paxos_num <= 0) - || paxos_num > common::OB_MAX_MEMBER_NUMBER) { + } else if (paxos_num <= 0 || paxos_num > common::OB_MAX_MEMBER_NUMBER) { ret = OB_INVALID_ARGUMENT; LOG_WARN("invalid paxos replica num", K(ret), K(schema)); LOG_USER_ERROR(OB_INVALID_ARGUMENT, "locality paxos replica num"); @@ -36288,13 +36256,9 @@ int ObDDLService::check_alter_schema_replica_options( if (OB_SUCC(ret)) { int64_t paxos_num = 0; - bool is_standby = false; if (OB_FAIL(new_schema.get_paxos_replica_num(schema_guard, paxos_num))) { LOG_WARN("fail to get paxos replica num", K(ret)); - } else if (OB_FAIL(get_is_standby_cluster(is_standby))) { - LOG_WARN("failed to get is standby cluster", K(ret)); - } else if ((!is_standby && paxos_num <= 0) - || paxos_num > common::OB_MAX_MEMBER_NUMBER) { + } else if (paxos_num <= 0 || paxos_num > common::OB_MAX_MEMBER_NUMBER) { ret = OB_INVALID_ARGUMENT; LOG_WARN("invalid paxos replica num", K(ret)); LOG_USER_ERROR(OB_INVALID_ARGUMENT, "locality paxos replica num"); @@ -36472,13 +36436,6 @@ int ObDDLService::construct_zone_region_list( return ret; } -int ObDDLService::get_is_standby_cluster(bool &is_standby) const -{ - int ret = OB_SUCCESS; - is_standby = false; - return ret; -} - template int ObDDLService::set_schema_replica_num_options( SCHEMA &schema, diff --git a/src/rootserver/ob_ddl_service.h b/src/rootserver/ob_ddl_service.h index f67c701443..77f99faa85 100644 --- a/src/rootserver/ob_ddl_service.h +++ b/src/rootserver/ob_ddl_service.h @@ -654,7 +654,6 @@ public: int maintain_obj_dependency_info(const obrpc::ObDependencyObjDDLArg &arg); int process_schema_object_dependency( const uint64_t tenant_id, - const bool is_standby, const share::schema::ObReferenceObjTable::DependencyObjKeyItemPairs &dep_objs, share::schema::ObSchemaGetterGuard &schema_guard, ObMySQLTransaction &trans, @@ -1308,7 +1307,6 @@ private: int modify_tenant_inner_phase(const obrpc::ObModifyTenantArg &arg, const ObTenantSchema *orig_tenant_schema, ObSchemaGetterGuard &schema_guard, - bool is_standby, bool is_restore); int update_global_index(obrpc::ObAlterTableArg &arg, const uint64_t tenant_id, @@ -2629,7 +2627,6 @@ private: const share::schema::ObSysVariableSchema &sys_variable, share::schema::ObSysParam *sys_params, int64_t params_capacity); - int get_is_standby_cluster(bool &is_standby) const; int check_can_alter_column( const int64_t tenant_id, const share::schema::AlterTableSchema &alter_table_schema, diff --git a/src/rootserver/ob_empty_server_checker.cpp b/src/rootserver/ob_empty_server_checker.cpp index 5ce28b3927..cc0fbc41f6 100644 --- a/src/rootserver/ob_empty_server_checker.cpp +++ b/src/rootserver/ob_empty_server_checker.cpp @@ -290,16 +290,34 @@ int ObEmptyServerChecker::check_server_emtpy_by_ls_( LOG_WARN("NULL replica pointer", K(ret)); } else { // check whether has member on empty servers - FOREACH_CNT_X(m, replica->get_member_list(), OB_SUCC(ret)) { + FOREACH_CNT_X(m, replica->get_member_list(), OB_SUCC(ret) && empty_servers.count() > 0) { const ObAddr &addr = m->get_server(); if (has_exist_in_array(empty_servers, addr, &idx)) { //has member in server - LOG_INFO("ls replica has member on sever", K(ls_info), K(addr), K(empty_servers)); + LOG_INFO("ls replica has member on server", K(ls_info), K(addr), K(empty_servers)); if (OB_FAIL(empty_servers.remove(idx))) { LOG_WARN("failed to remove addr from empty servers", KR(ret), K(idx), K(empty_servers)); } } } // end FORECAH member_list + ObMember learner; + for (int64_t index = 0; + OB_SUCC(ret) && index < replica->get_learner_list().get_member_number() && empty_servers.count() > 0; + ++index) { + learner.reset(); + if (OB_FAIL(replica->get_learner_list().get_member_by_index(index, learner))) { + LOG_WARN("fail to get learner by index", KR(ret), K(index)); + } else { + const ObAddr &addr = learner.get_server(); + if (has_exist_in_array(empty_servers, addr, &idx)) { + //has learner in server + LOG_INFO("ls replica has learner on server", K(ls_info), K(addr), K(empty_servers)); + if (OB_FAIL(empty_servers.remove(idx))) { + LOG_WARN("failed to remove addr from empty servers", KR(ret), K(idx), K(empty_servers)); + } + } + } + } } // filter server of replicas for (int64_t i = 0; i < replica_array.count() && OB_SUCC(ret); ++i) { diff --git a/src/rootserver/ob_ls_recovery_reportor.cpp b/src/rootserver/ob_ls_recovery_reportor.cpp index 94e3fc7966..c42a2029fe 100755 --- a/src/rootserver/ob_ls_recovery_reportor.cpp +++ b/src/rootserver/ob_ls_recovery_reportor.cpp @@ -14,7 +14,7 @@ #include "rootserver/ob_ls_recovery_reportor.h" #include "rootserver/ob_tenant_info_loader.h" -#include "rootserver/ob_tenant_role_transition_service.h"//ObTenantRoleTransitionConstants +#include "rootserver/standby/ob_tenant_role_transition_service.h"//ObTenantRoleTransitionConstants #include "rootserver/ob_rs_async_rpc_proxy.h" //ObGetLSReplayedScnProxy #include "rootserver/ob_ls_recovery_stat_handler.h" //ObLSRecoveryStatHandler #include "rootserver/ob_ls_service_helper.h"//update_ls_stat_in_trans diff --git a/src/rootserver/ob_ls_service_helper.cpp b/src/rootserver/ob_ls_service_helper.cpp index 4462062d1a..3ce7ce8c64 100755 --- a/src/rootserver/ob_ls_service_helper.cpp +++ b/src/rootserver/ob_ls_service_helper.cpp @@ -29,9 +29,9 @@ #include "share/ob_upgrade_utils.h"//ObUpgradeChecker #include "share/rc/ob_tenant_base.h"//MTL_SWITCH #include "observer/ob_server_struct.h"//GCTX -#include "rootserver/ob_recovery_ls_service.h"//ObRecoveryLSHelper +#include "rootserver/standby/ob_recovery_ls_service.h"//ObRecoveryLSHelper #include "rootserver/ob_tenant_thread_helper.h"//get_zone_priority -#include "rootserver/ob_tenant_role_transition_service.h"//get_checkpoint_by_rpc +#include "rootserver/standby/ob_tenant_role_transition_service.h"//get_checkpoint_by_rpc #include "storage/tx_storage/ob_ls_map.h" #include "storage/tx_storage/ob_ls_service.h" #include "storage/tx_storage/ob_ls_handle.h" //ObLSHandle diff --git a/src/rootserver/ob_root_inspection.cpp b/src/rootserver/ob_root_inspection.cpp index c713e394ff..cadd67e751 100755 --- a/src/rootserver/ob_root_inspection.cpp +++ b/src/rootserver/ob_root_inspection.cpp @@ -128,8 +128,6 @@ int ObTenantChecker::check_create_tenant_end_() LOG_WARN("schema service not init", K(ret)); } else if (!schema_service_->is_sys_full_schema()) { // skip - } else if (GCTX.is_standby_cluster()) { - // skip } else if (OB_FAIL(schema_service_->get_tenant_ids(tenant_ids))) { LOG_WARN("get_tenant_ids failed", K(ret)); } else if (OB_ISNULL(GCTX.root_service_)) { @@ -991,8 +989,6 @@ int ObRootInspection::calc_diff_names(const uint64_t tenant_id, ObIArray &miss_names /* data inner table less than hard code*/) { int ret = OB_SUCCESS; - ObRefreshSchemaStatus schema_status; - schema_status.tenant_id_ = tenant_id; fetch_names.reset(); if (!inited_) { ret = OB_NOT_INIT; @@ -1007,28 +1003,21 @@ int ObRootInspection::calc_diff_names(const uint64_t tenant_id, ret = OB_INVALID_ARGUMENT; LOG_WARN("table_name is null or names is empty", KR(ret), K(tenant_id), KP(table_name), K(names)); - } else if (GCTX.is_standby_cluster() && is_user_tenant(tenant_id)) { - if (OB_ISNULL(GCTX.schema_status_proxy_)) { - ret = OB_ERR_UNEXPECTED; - LOG_WARN("schema status proxy is null", K(ret)); - } else if (OB_FAIL(GCTX.schema_status_proxy_->get_refresh_schema_status(tenant_id, schema_status))) { - LOG_WARN("fail to get schema status", KR(ret), K(tenant_id)); - } } if (OB_SUCC(ret)) { - const uint64_t exec_tenant_id = schema_status.tenant_id_; - int64_t snapshot_timestamp = schema_status.snapshot_timestamp_; + const uint64_t exec_tenant_id = tenant_id; ObSqlString sql; - ObSQLClientRetryWeak sql_client_retry_weak(sql_proxy_, - snapshot_timestamp); if (OB_FAIL(sql.append_fmt("SELECT name FROM %s%s%s", table_name, (extra_cond.empty()) ? "" : " WHERE ", extra_cond.ptr()))) { LOG_WARN("append_fmt failed", KR(ret), K(tenant_id), K(table_name), K(extra_cond)); + } else if (OB_ISNULL(GCTX.sql_proxy_)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("GCTX.sql_proxy_ is null", KR(ret), KP(GCTX.sql_proxy_)); } else { SMART_VAR(ObMySQLProxy::MySQLResult, res) { ObMySQLResult *result = NULL; - if (OB_FAIL(sql_client_retry_weak.read(res, exec_tenant_id, sql.ptr()))) { + if (OB_FAIL(GCTX.sql_proxy_->read(res, exec_tenant_id, sql.ptr()))) { LOG_WARN("execute sql failed", KR(ret), K(tenant_id), K(sql)); can_retry_ = true; } else if (OB_ISNULL(result = res.get_result())) { @@ -1063,7 +1052,7 @@ int ObRootInspection::calc_diff_names(const uint64_t tenant_id, if (OB_SUCC(ret)) { if (fetch_names.count() <= 0) { LOG_WARN("maybe tenant or zone has been deleted, ignore it", - KR(ret), K(schema_status), K(table_name), K(extra_cond)); + KR(ret), K(table_name), K(extra_cond)); } else { extra_names.reset(); miss_names.reset(); diff --git a/src/rootserver/ob_root_service.cpp b/src/rootserver/ob_root_service.cpp index c2ee2855e5..af5ea20b9e 100755 --- a/src/rootserver/ob_root_service.cpp +++ b/src/rootserver/ob_root_service.cpp @@ -3029,19 +3029,6 @@ int ObRootService::alter_database(const ObAlterDatabaseArg &arg) } else if (!arg.is_valid()) { ret = OB_INVALID_ARGUMENT; LOG_WARN("invalid arg", K(arg), K(ret)); - } else if (common::STANDBY_CLUSTER == ObClusterInfoGetter::get_cluster_role_v2()) { - const int64_t tenant_id = arg.database_schema_.get_tenant_id(); - ObSchemaGetterGuard schema_guard; - uint64_t database_id = OB_INVALID_ID; - if (OB_FAIL(ddl_service_.get_tenant_schema_guard_with_version_in_inner_table( - tenant_id, schema_guard))) { - LOG_WARN("get_schema_guard with version in inner table failed", K(ret), K(tenant_id)); - } else if (OB_FAIL(schema_guard.get_database_id(tenant_id, - arg.database_schema_.get_database_name_str(), database_id))) { - LOG_WARN("failed to get database id", K(ret), K(tenant_id), K(arg)); - } - } - if (OB_FAIL(ret)) { } else if (OB_FAIL(ddl_service_.alter_database(arg))) { LOG_WARN("alter database failed", K(arg), K(ret)); } @@ -9493,7 +9480,6 @@ int ObRootService::admin_rolling_upgrade_cmd(const obrpc::ObAdminRollingUpgradeA int ObRootService::physical_restore_tenant(const obrpc::ObPhysicalRestoreTenantArg &arg, obrpc::Int64 &res_job_id) { int ret = OB_SUCCESS; - bool has_standby_cluster = false; res_job_id = OB_INVALID_ID; int64_t current_timestamp = ObTimeUtility::current_time(); int64_t start_ts = ObTimeUtility::current_time(); @@ -9515,12 +9501,10 @@ int ObRootService::physical_restore_tenant(const obrpc::ObPhysicalRestoreTenantA } else if (!arg.is_valid()) { ret = OB_INVALID_ARGUMENT; LOG_WARN("invalid arg", K(arg), K(ret)); - } else if (GCTX.is_standby_cluster() || GCONF.in_upgrade_mode()) { + } else if (GCONF.in_upgrade_mode()) { ret = OB_OP_NOT_ALLOW; - LOG_WARN("restore tenant while in standby cluster or " - "in upgrade mode is not allowed", KR(ret)); - LOG_USER_ERROR(OB_OP_NOT_ALLOW, - "restore tenant while in standby cluster or in upgrade mode"); + LOG_WARN("in upgrade mode is not allowed", KR(ret)); + LOG_USER_ERROR(OB_OP_NOT_ALLOW, "in upgrade mode"); } else if (0 == restore_concurrency) { ret = OB_OP_NOT_ALLOW; LOG_WARN("restore tenant when restore_concurrency is 0 not allowed", KR(ret)); @@ -11036,7 +11020,6 @@ int ObRootService::get_recycle_schema_versions( { int ret = OB_SUCCESS; LOG_INFO("receive get recycle schema versions request", K(arg)); - bool is_standby = GCTX.is_standby_cluster(); bool in_service = is_full_service(); if (OB_UNLIKELY(!inited_)) { ret = OB_NOT_INIT; @@ -11044,10 +11027,10 @@ int ObRootService::get_recycle_schema_versions( } else if (!arg.is_valid()) { ret = OB_INVALID_ARGUMENT; LOG_WARN("arg is invalid", K(ret), K(arg)); - } else if (!is_standby || !in_service) { + } else if (!in_service) { ret = OB_STATE_NOT_MATCH; - LOG_WARN("should be standby cluster and rs in service", - KR(ret), K(is_standby), K(in_service)); + LOG_WARN("should be rs in service", + KR(ret), K(in_service)); } else if (OB_FAIL(schema_history_recycler_.get_recycle_schema_versions(arg, result))) { LOG_WARN("fail to get recycle schema versions", KR(ret), K(arg)); } @@ -11831,10 +11814,6 @@ int ObRootService::handle_recover_table(const obrpc::ObRecoverTableArg &arg) } else if (!arg.is_valid()) { ret = OB_INVALID_ARGUMENT; LOG_WARN("invalid argument", K(ret), K(arg)); - } else if (GCTX.is_standby_cluster()) { - ret = OB_OP_NOT_ALLOW; - LOG_WARN("recover table in standby tenant is not allowed", K(ret), K(arg)); - LOG_USER_ERROR(OB_OP_NOT_ALLOW, "recover table in standby tenant"); } else if (GCONF.in_upgrade_mode()) { ret = OB_OP_NOT_ALLOW; LOG_WARN("recover table in upgrade mode is not allowed", K(ret), K(arg)); diff --git a/src/rootserver/ob_rs_async_rpc_proxy.h b/src/rootserver/ob_rs_async_rpc_proxy.h index ab4315cce7..4175a3c56c 100644 --- a/src/rootserver/ob_rs_async_rpc_proxy.h +++ b/src/rootserver/ob_rs_async_rpc_proxy.h @@ -90,6 +90,7 @@ RPC_F(obrpc::OB_TRIM_KEY_LIST, obrpc::ObTrimKeyListArg, obrpc::ObTrimKeyListResu RPC_F(obrpc::OB_INNER_CREATE_TENANT_SNAPSHOT, obrpc::ObInnerCreateTenantSnapshotArg, obrpc::ObInnerCreateTenantSnapshotResult, ObTenantSnapshotCreatorProxy); RPC_F(obrpc::OB_INNER_DROP_TENANT_SNAPSHOT, obrpc::ObInnerDropTenantSnapshotArg, obrpc::ObInnerDropTenantSnapshotResult, ObTenantSnapshotDropperProxy); RPC_F(obrpc::OB_FLUSH_LS_ARCHIVE, obrpc::ObFlushLSArchiveArg, obrpc::Int64, ObFlushLSArchiveProxy); +RPC_F(obrpc::OB_REFRESH_SERVICE_NAME, obrpc::ObRefreshServiceNameArg, obrpc::ObRefreshServiceNameRes, ObRefreshServiceNameProxy); RPC_F(obrpc::OB_CAL_STANDBY_TENANT_PHY_RESOURCE, obrpc::ObGetTenantResArg, obrpc::ObTenantLogicalRes, ObGetTenantResProxy); RPC_F(obrpc::OB_KILL_QUERY_CLIENT_SESSION, obrpc::ObKillQueryClientSessionArg, obrpc::Int64, ObKillQueryClientSessionProxy); diff --git a/src/rootserver/ob_rs_rpc_processor.h b/src/rootserver/ob_rs_rpc_processor.h index 1d68251cde..8f305165b4 100644 --- a/src/rootserver/ob_rs_rpc_processor.h +++ b/src/rootserver/ob_rs_rpc_processor.h @@ -124,11 +124,9 @@ protected: } else if (OB_INVALID_TENANT_ID == ddl_arg_->exec_tenant_id_) { ret = OB_INVALID_ARGUMENT; RS_LOG(WARN, "exec tenant id is invalid", K(ret), "arg", *ddl_arg_); - } else if (common::STANDBY_CLUSTER == ObClusterInfoGetter::get_cluster_role_v2() - && !ddl_arg_->is_allow_in_standby()) { - ret = OB_OP_NOT_ALLOW; - RS_LOG(WARN, "ddl operation not allow in standby", KR(ret), KPC(ddl_arg_)); } else { + // TODO (linqiucen.lqc): check whether the tenant is standby + // it will be done after DDL is executed by the tenant itself rather than sys tenant auto *tsi_value = GET_TSI(share::schema::TSIDDLVar); // used for parallel ddl auto *tsi_generator = GET_TSI(share::schema::TSISchemaVersionGenerator); diff --git a/src/rootserver/ob_schema_history_recycler.cpp b/src/rootserver/ob_schema_history_recycler.cpp index f965ba0e00..af25bf3fde 100644 --- a/src/rootserver/ob_schema_history_recycler.cpp +++ b/src/rootserver/ob_schema_history_recycler.cpp @@ -180,9 +180,6 @@ int ObSchemaHistoryRecycler::check_stop() int ret = OB_SUCCESS; if (OB_FAIL(check_inner_stat())) { LOG_WARN("fail to check inner stat", KR(ret)); - } else if (GCTX.is_standby_cluster()) { - ret = OB_CANCELED; - LOG_WARN("schema history recycler should stopped", KR(ret)); } return ret; } @@ -269,10 +266,20 @@ int ObSchemaHistoryRecycler::try_recycle_schema_history() LOG_WARN("fail to check inner stat", KR(ret)); } else if (OB_FAIL(schema_service_->get_tenant_ids(tenant_ids))) { LOG_WARN("fail to get schema_guard", KR(ret)); + } else { + for (int64_t i = tenant_ids.count() - 1; OB_SUCC(ret) && i >= 0; i--) { + const uint64_t tenant_id = tenant_ids.at(i); + bool skip = true; + if (OB_FAIL(check_can_skip_tenant(tenant_id, skip))) { + LOG_WARN("fail to check tenant can skip", KR(ret), K(tenant_id)); + } else if (skip && OB_FAIL(tenant_ids.remove(i))) { + LOG_WARN("fail to remove tenant_id", KR(ret), K(tenant_ids), K(i)); + } + } + } + if (OB_FAIL(ret) || tenant_ids.count() <= 0 ) { } else if (OB_FAIL(calc_recycle_schema_versions(tenant_ids))) { LOG_WARN("fail to fetch recycle schema version", KR(ret)); - } else if (GCTX.is_standby_cluster()) { - // standby cluster only calc recycle schema versions } else if (OB_FAIL(try_recycle_schema_history(tenant_ids))) { LOG_WARN("fail to recycle schema history", KR(ret)); } @@ -288,13 +295,8 @@ int ObSchemaHistoryRecycler::try_recycle_schema_history( } else { for (int64_t i = 0; OB_SUCC(ret) && i < tenant_ids.count(); i++) { const uint64_t tenant_id = tenant_ids.at(i); - bool skip = true; if (OB_FAIL(check_stop())) { LOG_WARN("schema history recycler is stopped", KR(ret)); - } else if (OB_FAIL(check_can_skip_tenant(tenant_id, skip))) { - LOG_WARN("fail to check tenant can skip", KR(ret), K(tenant_id)); - } else if (skip) { - // pass } else { int64_t recycle_schema_version = OB_INVALID_VERSION; if (OB_FAIL(recycle_schema_versions_.get_refactored(tenant_id, recycle_schema_version))) { @@ -319,6 +321,7 @@ int ObSchemaHistoryRecycler::check_can_skip_tenant( bool &skip) { int ret = OB_SUCCESS; + bool is_primary = false; skip = false; if (!inited_) { ret = OB_NOT_INIT; @@ -328,6 +331,10 @@ int ObSchemaHistoryRecycler::check_can_skip_tenant( // Additional schema history of system tenant should be recycled: // 1. Other tenant's schema history(except tenant schema history and system table's schema history) generated before schema split. skip = true; + } else if (OB_FAIL(ObShareUtil::table_check_if_tenant_role_is_primary(tenant_id, is_primary))) { + LOG_WARN("fail to execute table_check_if_tenant_role_is_primary", KR(ret), K(tenant_id)); + } else if (!is_primary) { + skip = true; } else { ObSchemaGetterGuard schema_guard; const ObSimpleTenantSchema *tenant_schema = NULL; @@ -479,7 +486,6 @@ int ObSchemaHistoryRecycler::get_recycle_schema_version_by_global_stat( if (OB_FAIL(check_inner_stat())) { LOG_WARN("fail to check inner stat", KR(ret)); } else { - bool is_standby = GCTX.is_standby_cluster(); if (OB_SUCC(ret)) { // step 1. calc by schema_history_expire_time int64_t conf_expire_time = GCONF.schema_history_expire_time; @@ -493,13 +499,7 @@ int ObSchemaHistoryRecycler::get_recycle_schema_version_by_global_stat( const uint64_t tenant_id = tenant_ids.at(i); int64_t expire_schema_version = OB_INVALID_VERSION; ObRefreshSchemaStatus schema_status; - if (!is_standby) { - schema_status.tenant_id_ = tenant_id; // use strong read - } else { - if (OB_FAIL(schema_status_proxy->get_refresh_schema_status(tenant_id, schema_status))) { - LOG_WARN("fail to get refresh schema status", KR(ret), K(tenant_id)); - } - } + schema_status.tenant_id_ = tenant_id; // use strong read if (FAILEDx(schema_service_->get_schema_version_by_timestamp( schema_status, tenant_id, expire_time, expire_schema_version))) { LOG_WARN("fail to get schema version by timestamp", diff --git a/src/rootserver/ob_service_name_command.cpp b/src/rootserver/ob_service_name_command.cpp new file mode 100644 index 0000000000..5bc7a32a58 --- /dev/null +++ b/src/rootserver/ob_service_name_command.cpp @@ -0,0 +1,460 @@ +/** + * Copyright (c) 2022 OceanBase + * OceanBase CE is licensed under Mulan PubL v2. + * You can use this software according to the terms and conditions of the Mulan PubL v2. + * You may obtain a copy of Mulan PubL v2 at: + * http://license.coscl.org.cn/MulanPubL-2.0 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PubL v2 for more details. + */ +#define USING_LOG_PREFIX RS + +#include "ob_service_name_command.h" + +#include "lib/oblog/ob_log_module.h" +#include "lib/string/ob_sql_string.h" +#include "lib/mysqlclient/ob_mysql_transaction.h" +#include "share/ob_unit_table_operator.h" +#include "share/ob_all_server_tracer.h" +#include "observer/ob_server_struct.h" +#include "rootserver/ob_rs_async_rpc_proxy.h" +#include "rootserver/ob_root_utils.h" +#include "rootserver/ob_tenant_event_def.h" + +using namespace oceanbase::common; +using namespace oceanbase::share; +using namespace oceanbase::tenant_event; +namespace oceanbase +{ +namespace rootserver +{ +int ObServiceNameKillSessionFunctor::init( + const uint64_t tenant_id, + const share::ObServiceNameString &service_name, + ObArray *killed_connection_list) +{ + int ret = OB_SUCCESS; + if (!is_valid_tenant_id(tenant_id) || !service_name.is_valid() || OB_ISNULL(killed_connection_list)) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid arg", KR(ret), K(tenant_id), K(service_name), K(killed_connection_list)); + } else if (OB_FAIL(service_name_.assign(service_name))) { + LOG_WARN("fail to assign service_name", KR(ret), K(service_name)); + } + else { + tenant_id_ = tenant_id; + killed_connection_list_ = killed_connection_list; + killed_connection_list_->reset(); + } + return ret; +} +bool ObServiceNameKillSessionFunctor::operator()(sql::ObSQLSessionMgr::Key key, sql::ObSQLSessionInfo *sess_info) +{ + int ret = OB_SUCCESS; + if (OB_ISNULL(sess_info) || OB_ISNULL(GCTX.session_mgr_) || OB_ISNULL(killed_connection_list_)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("unexpected null pointer", KR(ret), KP(sess_info), KP(GCTX.session_mgr_), + KP(killed_connection_list_)); + } else if (sess_info->get_effective_tenant_id() == tenant_id_ + && sess_info->get_service_name().equal_to(service_name_)) { + uint64_t sess_id = sess_info->get_sessid(); + if (OB_FAIL(GCTX.session_mgr_->kill_session(*sess_info))) { + LOG_WARN("fail to kill session", KR(ret), K(sess_id)); + } else if (OB_FAIL(killed_connection_list_->push_back(sess_id))) { + LOG_WARN("fail to push back", KR(ret), K(sess_id)); + } + } + return OB_SUCCESS == ret; +} + +int ObServiceNameCommand::create_service( + const uint64_t tenant_id, + const ObServiceNameString &service_name_str) +{ + int ret = OB_SUCCESS; + ObArray target_servers; + ObServiceName service_name; + int64_t epoch = 0; + ObArray all_service_names; + int64_t begin_ts = ObTimeUtility::current_time(); + if (OB_UNLIKELY(!is_valid_tenant_id(tenant_id) || !service_name_str.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid arg", KR(ret), K(tenant_id), K(service_name_str)); + } else if (OB_FAIL(check_and_get_tenants_servers_(tenant_id, true /* include_temp_offline */, target_servers))) { + LOG_WARN("fail to execute check_and_get_tenants_online_servers_", KR(ret), K(tenant_id)); + } else if (OB_FAIL(ObServiceNameProxy::insert_service_name(tenant_id, service_name_str, + epoch, all_service_names))) { + // insert service_name into __all_service if the tenant's so_status is NORMAL + LOG_WARN("fail to insert service_name", KR(ret), K(tenant_id), K(service_name_str)); + } else if (OB_FAIL(extract_service_name_(all_service_names, service_name_str, service_name))) { + LOG_WARN("fail to execute extract_service_name_", KR(ret), K(all_service_names), K(service_name_str)); + if (OB_SERVICE_NAME_NOT_FOUND == ret) { + ret = OB_ERR_UNEXPECTED; + } + } else if (OB_FAIL(broadcast_refresh_( + tenant_id, + service_name.get_service_name_id(), + ObServiceNameArg::CREATE_SERVICE, + target_servers, + epoch, + all_service_names))) { + LOG_WARN("fail to broadcast", KR(ret), K(tenant_id), K(service_name), K(target_servers), + K(epoch), K(all_service_names)); + } + int64_t end_ts = ObTimeUtility::current_time(); + TENANT_EVENT(tenant_id, SERVICE_NAME, CREATE_SERVICE, end_ts, ret, end_ts - begin_ts, + service_name_str.ptr(), service_name); + return ret; +} +int ObServiceNameCommand::delete_service( + const uint64_t tenant_id, + const ObServiceNameString &service_name_str) +{ + int ret = OB_SUCCESS; + ObServiceName service_name; + int64_t begin_ts = ObTimeUtility::current_time(); + if (OB_ISNULL(GCTX.sql_proxy_)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("GCTX.sql_proxy_ is null", KR(ret), KP(GCTX.sql_proxy_)); + } else if (OB_UNLIKELY(!is_valid_tenant_id(tenant_id) || !service_name_str.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid arg", KR(ret), K(tenant_id), K(service_name_str)); + } else if (OB_FAIL(ObServiceNameProxy::select_service_name(*GCTX.sql_proxy_, tenant_id, service_name_str, service_name))) { + LOG_WARN("fail to select service_name", KR(ret), K(tenant_id), K(service_name_str)); + } else if (!service_name.is_stopped()) { + // simple check at first + // status will be checked again when the service_name is removed from the table + ret = OB_OP_NOT_ALLOW; + LOG_WARN("service_status is not STOPPED, delete_service is not allowed", KR(ret), K(service_name)); + LOG_USER_ERROR(OB_OP_NOT_ALLOW, "The service_status is not STOPPED, DELETE SERVICE is"); + } else if (OB_FAIL(ObServiceNameProxy::delete_service_name(service_name))) { + LOG_WARN("fail to delete service_name", KR(ret), K(tenant_id), K(service_name)); + } + int64_t end_ts = ObTimeUtility::current_time(); + TENANT_EVENT(tenant_id, SERVICE_NAME, DELETE_SERVICE, end_ts, ret, end_ts - begin_ts, service_name); + return ret; +} +int ObServiceNameCommand::start_service( + const uint64_t tenant_id, + const ObServiceNameString &service_name_str) +{ + int ret = OB_SUCCESS; + ObArray target_servers; + ObServiceName service_name; + ObServiceName service_name_before; + int64_t epoch = 0; + ObArray all_service_names; + int64_t begin_ts = ObTimeUtility::current_time(); + if (OB_ISNULL(GCTX.sql_proxy_)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("GCTX.sql_proxy_ is null", KR(ret), KP(GCTX.sql_proxy_)); + } else if (OB_UNLIKELY(!is_valid_tenant_id(tenant_id) || !service_name_str.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid arg", KR(ret), K(tenant_id), K(service_name_str)); + } else if (OB_FAIL(check_and_get_tenants_servers_(tenant_id, true /* include_temp_offline */, target_servers))) { + LOG_WARN("fail to execute check_and_get_tenants_servers_", KR(ret), K(tenant_id)); + } else if (OB_FAIL(ObServiceNameProxy::select_all_service_names_with_epoch(tenant_id, epoch, all_service_names))) { + LOG_WARN("fail to select service_name", KR(ret), K(tenant_id), K(service_name_str)); + } else if (OB_FAIL(extract_service_name_(all_service_names, service_name_str, service_name))) { + LOG_WARN("fail to execute extract_service_name_", KR(ret), K(all_service_names), K(service_name_str)); + } else if (OB_FAIL(service_name_before.assign(service_name))) { + LOG_WARN("fail to assign service_name_before", KR(ret), K(service_name)); + } else if (!service_name.is_started()) { + if (OB_FAIL(ObServiceNameProxy::update_service_status(service_name, ObServiceName::STARTED, + epoch, all_service_names))) { + LOG_WARN("fail to update service_status", KR(ret), K(tenant_id), K(service_name)); + } else if (OB_FAIL(extract_service_name_(all_service_names, service_name_str, service_name))) { + LOG_WARN("fail to execute extract_service_name_", KR(ret), K(all_service_names), K(service_name_str)); + if (OB_SERVICE_NAME_NOT_FOUND == ret) { + ret = OB_ERR_UNEXPECTED; + } + } + } + if (FAILEDx(broadcast_refresh_( + tenant_id, + service_name.get_service_name_id(), + ObServiceNameArg::START_SERVICE, + target_servers, + epoch, + all_service_names))) { + LOG_WARN("fail to broadcast", KR(ret), K(tenant_id), K(service_name), K(target_servers), + K(epoch), K(all_service_names)); + } + int64_t end_ts = ObTimeUtility::current_time(); + TENANT_EVENT(tenant_id, SERVICE_NAME, START_SERVICE, end_ts, ret, end_ts - begin_ts, service_name_before, service_name); + return ret; +} +int ObServiceNameCommand::stop_service( + const uint64_t tenant_id, + const ObServiceNameString &service_name_str) +{ + int ret = OB_SUCCESS; + ObArray tenant_online_servers; + ObServiceName service_name; + ObServiceName service_name_before; + int64_t epoch = 0; + ObArray all_service_names; + int64_t begin_ts = ObTimeUtility::current_time(); + if (OB_ISNULL(GCTX.sql_proxy_)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("GCTX.sql_proxy_ is null", KR(ret), KP(GCTX.sql_proxy_)); + } else if (OB_UNLIKELY(!is_valid_tenant_id(tenant_id) || !service_name_str.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid arg", KR(ret), K(tenant_id), K(service_name_str)); + } else if (OB_FAIL(ObServiceNameProxy::select_all_service_names_with_epoch(tenant_id, epoch, all_service_names))) { + LOG_WARN("fail to select service_name", KR(ret), K(tenant_id), K(service_name_str)); + } else if (OB_FAIL(extract_service_name_(all_service_names, service_name_str, service_name))) { + LOG_WARN("fail to execute extract_service_name_", KR(ret), K(all_service_names), K(service_name_str)); + } else if (OB_FAIL(service_name_before.assign(service_name))) { + LOG_WARN("fail to assign service_name_before", KR(ret), K(service_name)); + } else if (service_name.is_stopped()) { + // status has been already stopped, do nothing + } else if (OB_FAIL(check_and_get_tenants_servers_(tenant_id, false /* include_temp_offline */, tenant_online_servers))) { + // also ensure the tenant has no units on temp. offline servers + LOG_WARN("fail to execute check_and_get_tenants_online_servers_", KR(ret), K(tenant_id)); + } else { + if (service_name.is_started()) { + if (OB_FAIL(ObServiceNameProxy::update_service_status(service_name, ObServiceName::STOPPING, + epoch, all_service_names))) { + LOG_WARN("fail to update service_status", KR(ret), K(tenant_id), K(service_name)); + } else if (OB_FAIL(extract_service_name_(all_service_names, service_name_str, service_name))) { + LOG_WARN("fail to execute extract_service_name_", KR(ret), K(all_service_names), K(service_name_str)); + if (OB_SERVICE_NAME_NOT_FOUND == ret) { + ret = OB_ERR_UNEXPECTED; + } + } + } + if (FAILEDx(broadcast_refresh_( + tenant_id, + service_name.get_service_name_id(), + ObServiceNameArg::STOP_SERVICE, + tenant_online_servers, + epoch, + all_service_names))) { + LOG_WARN("fail to broadcast", KR(ret), K(tenant_id), K(service_name), K(tenant_online_servers), + K(epoch), K(all_service_names)); + } else if (OB_FAIL(ObServiceNameProxy::update_service_status(service_name, ObServiceName::STOPPED, + epoch, all_service_names))) { + LOG_WARN("fail to update service_status", KR(ret), K(tenant_id), K(service_name)); + } else if (OB_FAIL(extract_service_name_(all_service_names, service_name_str, service_name))) { + LOG_WARN("fail to execute extract_service_name_", KR(ret), K(all_service_names), K(service_name_str)); + if (OB_SERVICE_NAME_NOT_FOUND == ret) { + ret = OB_ERR_UNEXPECTED; + } + } + } + int64_t end_ts = ObTimeUtility::current_time(); + TENANT_EVENT(tenant_id, SERVICE_NAME, STOP_SERVICE, end_ts, ret, end_ts - begin_ts, service_name_before, service_name); + return ret; +} + +int ObServiceNameCommand::kill_local_connections( + const uint64_t tenant_id, + const share::ObServiceName &service_name) +{ + int ret = OB_SUCCESS; + ObArray killed_connection_list; + int64_t begin_ts = ObTimeUtility::current_time(); + ObServiceNameKillSessionFunctor kill_session_functor; + const ObServiceNameString & service_name_str = service_name.get_service_name_str(); + if (OB_ISNULL(GCTX.session_mgr_)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("GCTX.session_mgr_ is null", KR(ret), KP(GCTX.session_mgr_)); + } else if (OB_FAIL(kill_session_functor.init( + tenant_id, + service_name_str, + &killed_connection_list))) { + LOG_WARN("fail to init kill_session_functor", KR(ret), K(tenant_id), K(service_name_str)); + } else if (OB_FAIL(GCTX.session_mgr_->for_each_session(kill_session_functor))) { + LOG_WARN("fail to kill local sessions", KR(ret)); + } + if (killed_connection_list.count() > 0) { + int64_t end_ts = ObTimeUtility::current_time(); + TENANT_EVENT(tenant_id, SERVICE_NAME, KILL_CONNECTIONS_OF_SERVICE_NAME, end_ts, ret, end_ts - begin_ts, + service_name, killed_connection_list.count(), killed_connection_list); + } + return ret; +} + +int ObServiceNameCommand::check_and_get_tenants_servers_( + const uint64_t tenant_id, + const bool include_temp_offline, + common::ObIArray &target_servers) +{ + int ret = OB_SUCCESS; + ObArray units; + target_servers.reset(); + ObUnitTableOperator unit_operator; + if (OB_ISNULL(GCTX.sql_proxy_)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("GCTX.sql_proxy_ is null", KR(ret), KP(GCTX.sql_proxy_)); + } else if (OB_FAIL(unit_operator.init(*GCTX.sql_proxy_))) { + LOG_WARN("failed to init unit operator", KR(ret)); + } else if (OB_FAIL(unit_operator.get_units_by_tenant(tenant_id, units))) { + LOG_WARN("failed to get tenant unit", KR(ret), K(tenant_id)); + } + for (int64_t i = 0; OB_SUCC(ret) && i < units.count(); i++) { + const ObUnit &unit = units.at(i); + if (OB_FAIL(server_check_and_push_back_(unit.server_, include_temp_offline, target_servers))) { + LOG_WARN("fail to execute server_check_and_push_back_", KR(ret), K(unit.server_)); + } else if (unit.migrate_from_server_.is_valid() && + OB_FAIL(server_check_and_push_back_(unit.migrate_from_server_, include_temp_offline, target_servers))) { + LOG_WARN("fail to execute server_check_and_push_back_", KR(ret), K(unit.migrate_from_server_)); + } + } + return ret; +} + +int ObServiceNameCommand::server_check_and_push_back_( + const common::ObAddr &server, + const bool include_temp_offline, + common::ObIArray &target_servers) +{ + int ret = OB_SUCCESS; + ObServerInfoInTable server_info; + if (OB_UNLIKELY(!server.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid server", KR(ret), K(server)); + } else if (OB_FAIL(SVR_TRACER.get_server_info(server, server_info))) { + LOG_WARN("fail to execute get_server_info", KR(ret), K(server)); + } else if (server_info.is_permanent_offline()) { + // skip + } else if (!include_temp_offline && server_info.is_temporary_offline()) { + ret = OB_OP_NOT_ALLOW; + LOG_WARN("the tenant has units on temporary offline servers", KR(ret), K(server_info)); + LOG_USER_ERROR(OB_OP_NOT_ALLOW, "The tenant has units on temporary offline servers, STOP SERVICE is"); + } else if (OB_FAIL(target_servers.push_back(server))) { + LOG_WARN("fail to push back", KR(ret), K(server), K(server_info)); + } + return ret; +} + +int ObServiceNameCommand::broadcast_refresh_( + const uint64_t tenant_id, + const share::ObServiceNameID &target_service_name_id, + const share::ObServiceNameArg::ObServiceOp &service_op, + const common::ObIArray &target_servers, + const int64_t epoch, + const ObArray &all_service_names) +{ + // 1. Broadcasting to all servers restricts executing commands related to service_name + // when any server is permanently offline. + // 2. Users are required to ensure that processes on permanently offline servers are terminated. + // 3. Broadcasts target only online servers, following verification that no servers are temporarily offline. + int ret = OB_SUCCESS; + const ObAddr &from_server = GCTX.self_addr(); + ObTimeoutCtx ctx; + ObArray return_code_array; + obrpc::ObRefreshServiceNameArg arg; + share::ObAllTenantInfo tenant_info; + int64_t ora_rowscn = 0; + common::ObArray success_servers; + int64_t begin_ts = ObTimeUtility::current_time(); + if (OB_ISNULL(GCTX.srv_rpc_proxy_) || OB_ISNULL(GCTX.sql_proxy_)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("unexpected null ptr", KR(ret), KP(GCTX.srv_rpc_proxy_), KP(GCTX.sql_proxy_)); + } else if (OB_UNLIKELY(!is_valid_tenant_id(tenant_id))) { + // the validity of epoch and all_service_names will be checked when we init arg + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid arg", KR(ret), K(tenant_id)); + } else if (0 == target_servers.count()) { + LOG_INFO("no target servers, no need to broadcast", KR(ret), K(tenant_id), + K(all_service_names), K(target_servers)); + } else if (ObServiceNameArg::START_SERVICE == service_op && + OB_FAIL(ObAllTenantInfoProxy::load_tenant_info(tenant_id, GCTX.sql_proxy_, false, ora_rowscn, tenant_info))) { + // When starting the service, it is expected that `service_name` is utilized. + // However, the ability for users to connect via `service_name` also depends on `tenant_info`, + // so it's crucial to ensure that `tenant_info` is up-to-date. + LOG_WARN("fail to load tenant info", KR(ret), K(tenant_id)); + } else if (OB_FAIL(arg.init(tenant_id, epoch, from_server, target_service_name_id, + all_service_names, service_op, tenant_info, ora_rowscn))) { + LOG_WARN("failed to init arg", KR(ret), K(tenant_id), K(from_server), K(target_service_name_id), + K(all_service_names), K(service_op), K(tenant_info), K(ora_rowscn)); + } else { + const uint64_t group_id = share::OBCG_DBA_COMMAND; + ObRefreshServiceNameProxy proxy(*GCTX.srv_rpc_proxy_, &obrpc::ObSrvRpcProxy::refresh_service_name); + int tmp_ret = OB_SUCCESS; + // Try to send to all target servers, but return failure if at least one fails. + // The intention is to maximize the number of observers aware of the STARTED status when starting the service. + for (int64_t i = 0; OB_SUCC(ret) && i < target_servers.count(); i++) { + const ObAddr &server = target_servers.at(i); + if (OB_FAIL(ObRootUtils::get_rs_default_timeout_ctx(ctx))) { + LOG_WARN("fail to get timeout ctx", KR(ret), K(ctx)); + } else if (OB_TMP_FAIL(proxy.call(server, ctx.get_timeout(), GCONF.cluster_id, tenant_id, group_id, arg))) { + LOG_WARN("failed to send rpc", KR(ret), KR(tmp_ret), K(server), K(ctx), K(tenant_id), K(arg)); + } + } + + if (OB_TMP_FAIL(proxy.wait_all(return_code_array))) { + LOG_WARN("wait all batch result failed", KR(ret), KR(tmp_ret)); + ret = OB_SUCCESS == ret ? tmp_ret : ret; + } + int first_ret = OB_SUCCESS; + if (FAILEDx(proxy.check_return_cnt(return_code_array.count()))) { + LOG_WARN("fail to check return cnt", KR(ret), "return_cnt", return_code_array.count()); + } + ARRAY_FOREACH_X(proxy.get_results(), idx, cnt, OB_SUCC(ret)) { + const obrpc::ObRefreshServiceNameRes *result = proxy.get_results().at(idx); + const ObAddr &dest_addr = proxy.get_dests().at(idx); + tmp_ret = return_code_array.at(idx); + if (OB_TENANT_NOT_EXIST == tmp_ret) { + LOG_WARN("tenant not exist", KR(ret), KR(tmp_ret), K(dest_addr)); + tmp_ret = OB_SUCCESS; + } + if (OB_SUCCESS != tmp_ret) { + LOG_WARN("fail to send rpc", KR(ret), KR(tmp_ret), K(dest_addr), K(idx)); + } else if (OB_ISNULL(result)) { + tmp_ret = OB_ERR_UNEXPECTED; + LOG_WARN("result is null", KR(ret), KR(tmp_ret), KR(first_ret), KP(result)); + } else if (OB_UNLIKELY(!result->is_valid())) { + tmp_ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid result", + KR(ret), KR(tmp_ret), KR(first_ret), KPC(result), K(dest_addr)); + } else { + LOG_INFO("refresh_service_name success", KR(ret), KR(tmp_ret), KR(first_ret), K(dest_addr), K(arg), KPC(result)); + if (OB_FAIL(success_servers.push_back(dest_addr))) { + LOG_WARN("fail to push back", KR(ret), K(dest_addr)); + } + } + first_ret = OB_SUCCESS == first_ret ? tmp_ret : first_ret; + } + ret = OB_SUCCESS == ret ? first_ret : ret; + + if (OB_FAIL(ret)) { + int prev_ret = ret; + ret = ObServiceNameArg::STOP_SERVICE == service_op ? OB_NEED_RETRY : OB_SERVICE_NOT_FULLY_STARTED; + LOG_WARN("fail to broadcast to the tenant's all servers", KR(ret), KR(prev_ret), + K(target_servers), K(success_servers), K(service_op)); + } + int64_t end_ts = ObTimeUtility::current_time(); + TENANT_EVENT(tenant_id, SERVICE_NAME, BROADCAST_SERVICE_NAME, end_ts, ret, end_ts - begin_ts, + epoch, target_service_name_id, all_service_names, ObServiceNameArg::service_op_to_str(service_op), + target_servers, success_servers); + } + return ret; +} + +int ObServiceNameCommand::extract_service_name_( + const ObArray &all_service_names, + const share::ObServiceNameString &service_name_str, + share::ObServiceName &service_name) +{ + int ret = OB_SUCCESS; + bool is_found = false; + for (int64_t i = 0; OB_SUCC(ret) && i < all_service_names.count() && !is_found; ++i) { + if (all_service_names.at(i).get_service_name_str().equal_to(service_name_str)) { + is_found = true; + if (OB_FAIL(service_name.assign(all_service_names.at(i)))) { + LOG_WARN("fail to assign service_name", KR(ret), K(all_service_names.at(i))); + } + } + } + if (OB_SUCC(ret) && !is_found) { + ret = OB_SERVICE_NAME_NOT_FOUND; + LOG_WARN("service_name_str is not found", KR(ret), K(service_name_str), K(all_service_names)); + } + return ret; +} +} // end namespace rootserver +} // end namespace oceanbase \ No newline at end of file diff --git a/src/rootserver/ob_service_name_command.h b/src/rootserver/ob_service_name_command.h new file mode 100644 index 0000000000..06db93d83d --- /dev/null +++ b/src/rootserver/ob_service_name_command.h @@ -0,0 +1,82 @@ +/** + * Copyright (c) 2022 OceanBase + * OceanBase CE is licensed under Mulan PubL v2. + * You can use this software according to the terms and conditions of the Mulan PubL v2. + * You may obtain a copy of Mulan PubL v2 at: + * http://license.coscl.org.cn/MulanPubL-2.0 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PubL v2 for more details. + */ + +#ifndef OCEANBASE_ROOTSERVER_OB_SERVICE_NAME_COMMAND_H +#define OCEANBASE_ROOTSERVER_OB_SERVICE_NAME_COMMAND_H + +#include "lib/ob_define.h" +#include "share/ob_define.h" +#include "share/ob_service_name_proxy.h" +#include "sql/session/ob_sql_session_mgr.h" +namespace oceanbase +{ +namespace rootserver +{ +class ObServiceNameKillSessionFunctor +{ +public: + ObServiceNameKillSessionFunctor() + : tenant_id_(OB_INVALID_TENANT_ID), service_name_(), killed_connection_list_(NULL) {}; + ~ObServiceNameKillSessionFunctor() {}; + int init(const uint64_t tenant_id, + const share::ObServiceNameString &service_name, + ObArray *killed_connection_list); + bool operator()(sql::ObSQLSessionMgr::Key key, sql::ObSQLSessionInfo *sess_info); +private: + uint64_t tenant_id_; + ObServiceNameString service_name_; + ObArray *killed_connection_list_; +}; +class ObServiceNameCommand +{ +public: + ObServiceNameCommand(); + ~ObServiceNameCommand(); + static int create_service( + const uint64_t tenant_id, + const share::ObServiceNameString &service_name_str); + static int delete_service( + const uint64_t tenant_id, + const share::ObServiceNameString &service_name_str); + static int start_service( + const uint64_t tenant_id, + const share::ObServiceNameString &service_name_str); + static int stop_service( + const uint64_t tenant_id, + const share::ObServiceNameString &service_name_str); + static int kill_local_connections( + const uint64_t tenant_id, + const share::ObServiceName &service_name); +private: + static int check_and_get_tenants_servers_( + const uint64_t tenant_id, + const bool include_temp_offline, + common::ObIArray &target_servers); + static int server_check_and_push_back_( + const common::ObAddr &server, + const bool include_temp_offline, + common::ObIArray &target_servers); + static int broadcast_refresh_( + const uint64_t tenant_id, + const share::ObServiceNameID &target_service_name_id, + const share::ObServiceNameArg::ObServiceOp &service_op, + const common::ObIArray &target_servers, + const int64_t epoch, + const ObArray &all_service_names); + static int extract_service_name_( + const ObArray &all_service_names, + const share::ObServiceNameString &service_name_str, + share::ObServiceName &service_name); +}; +} // end namespace rootserver +} // end namespace oceanbase +#endif \ No newline at end of file diff --git a/src/rootserver/ob_system_admin_util.cpp b/src/rootserver/ob_system_admin_util.cpp index 0f69e0b5ab..d88304d8c8 100644 --- a/src/rootserver/ob_system_admin_util.cpp +++ b/src/rootserver/ob_system_admin_util.cpp @@ -1435,26 +1435,36 @@ int ObAdminUpgradeVirtualSchema::execute() if (OB_UNLIKELY(!ctx_.is_inited())) { ret = OB_NOT_INIT; LOG_WARN("not init", KR(ret)); - } else if (GCTX.is_standby_cluster()) { - // standby cluster cannot upgrade virtual schema independently, - // need to get these information from the primary cluster - ret = OB_OP_NOT_ALLOW; - LOG_WARN("upgrade virtual schema in standby cluster not allow", KR(ret)); - LOG_USER_ERROR(OB_OP_NOT_ALLOW, "upgrade virtual schema in standby cluster"); - } else if (OB_ISNULL(ctx_.root_inspection_) - || OB_ISNULL(ctx_.ddl_service_)) { + } else if (OB_ISNULL(ctx_.root_inspection_) || OB_ISNULL(ctx_.ddl_service_) || OB_ISNULL(GCTX.sql_proxy_)) { ret = OB_ERR_UNEXPECTED; - LOG_WARN("ptr is null", KR(ret), KP(ctx_.root_inspection_), KP(ctx_.ddl_service_)); + LOG_WARN("ptr is null", KR(ret), KP(ctx_.root_inspection_), KP(ctx_.ddl_service_), KP(GCTX.sql_proxy_)); } else if (OB_FAIL(ctx_.ddl_service_->get_tenant_schema_guard_with_version_in_inner_table( OB_SYS_TENANT_ID, schema_guard))) { LOG_WARN("get_schema_guard failed", KR(ret)); } else if (OB_FAIL(schema_guard.get_tenant_ids(tenant_ids))) { LOG_WARN("fail to get tenant ids", KR(ret)); } else { + share::ObTenantRole tenant_role; FOREACH(tenant_id, tenant_ids) { // ignore ret int tmp_ret = OB_SUCCESS; - if (OB_SUCCESS != (tmp_ret = execute(*tenant_id, upgrade_cnt))) { - LOG_WARN("fail to execute upgrade virtual table by tenant", KR(tmp_ret), K(*tenant_id)); + if (OB_TMP_FAIL(ObAllTenantInfoProxy::get_tenant_role(GCTX.sql_proxy_, *tenant_id, tenant_role))) { + LOG_WARN("fail to get tenant role", KR(ret), KP(GCTX.sql_proxy_), K(*tenant_id)); + } else if (tenant_role.is_invalid()) { + tmp_ret = OB_NEED_WAIT; + LOG_WARN("tenant role is not ready, need wait", KR(ret), K(*tenant_id), KR(tmp_ret), K(tenant_role)); + } else if (tenant_role.is_restore()) { + tmp_ret = OB_OP_NOT_ALLOW; + LOG_WARN("restore tenant cannot upgrade virtual schema", KR(ret), K(*tenant_id), KR(tmp_ret), K(tenant_role)); + } else if (tenant_role.is_standby()) { + // skip + } else if (tenant_role.is_primary()) { + if (OB_TMP_FAIL(execute(*tenant_id, upgrade_cnt))) { + LOG_WARN("fail to execute upgrade virtual table by tenant", KR(ret), K(*tenant_id), KR(tmp_ret), K(tenant_role)); + } + } else { + // Currently, clone tenant is not available, but it may be added later. + tmp_ret = OB_ERR_UNEXPECTED; + LOG_WARN("unknown tenant_role", KR(ret), K(*tenant_id), KR(tmp_ret), K(tenant_role)); } ret = OB_SUCC(ret) ? tmp_ret : ret; } diff --git a/src/rootserver/ob_tenant_event_def.h b/src/rootserver/ob_tenant_event_def.h new file mode 100644 index 0000000000..d8c5e49594 --- /dev/null +++ b/src/rootserver/ob_tenant_event_def.h @@ -0,0 +1,252 @@ +/** + * Copyright (c) 2021 OceanBase + * OceanBase CE is licensed under Mulan PubL v2. + * You can use this software according to the terms and conditions of the Mulan PubL v2. + * You may obtain a copy of Mulan PubL v2 at: + * http://license.coscl.org.cn/MulanPubL-2.0 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PubL v2 for more details. + */ + /** + * @description: + * Use the DEF_MODULE macro in this file to define MODULE. Use the DEF_EVENT macro to define EVENT. + * Each MODULE can define multiple EVENTS of the same type. + * + * ****** NOTICE ********* + * To ensure compatibility, the already defined MODULE and EVENT cannot be modified. + * Only MODULE, EVENT, and EVENT fields can be added. + * + * DEF_MODULE(MODULE, MODULE_STR) + * @param[in] MODULE The label of MODULE + * @param[in] MODULE_STR The corresponding string of MODULE + * + * DEF_EVENT(MODULE, EVENT, EVENT_STR, NAME1 ...) + * @param[in] MODULE The label of MODULE + * @param[in] EVENT The label of EVENT + * @param[in] EVENT_STR The corresponding string of EVENT + * @param[in] NAME1 The field name of the key information of EVENT, + * up to 6 key information fields are supported + * + * Use the TENANT_EVENT macro to record the EVENT of the tenant, + * and fill in the field values in the order defined by DEF_EVENT. + * TENANT_EVENT(tenant_id, MODULE, EVENT, event_timestamp, user_ret, cost_us, VALUE1 ...) + * @param[in] tenant_id The tenant ID of the ordinary tenant + * @param[in] MODULE The label of MODULE + * @param[in] EVENT The label of EVENT + * @param[in] event_timestamp the timestamp when the event occurs + * @param[in] user_ret return code + * @param[in] cost_us cost time + * @param[in] VALUE1 The value of the field of the key information of EVENT, + * fill in the value of the field according to the order defined by DEF_EVENT + * The EVENT recorded by TENANT_EVENT will be stored in the inner table + * (__all_tenant_event_history) under the META tenant space corresponding to tenant_id, + * and displayed through the views CDB_OB_TENANT_EVENT_HISTORY/DBA_OB_TENANT_EVENT_HISTORY. + * + * ****** NOTICE ********* + * TENANT_EVENT is inserted into the table asynchronously, gmt_create is the input arg event_timestamp + * The EVENTS recorded in the inner table (__all_tenant_event_history) under the META tenant space + * will not be cleared until the tenant is deleted. + * Therefore, in order to prevent too many EVENTS, the interval between recording EVENTS should not be too frequent + */ +#ifdef DEF_MODULE +#ifdef DEF_EVENT + /** + * @description: + * Log events related to tenant role change + * failover to primary/switchover to primary/switchover to standby + */ + class TENANT_ROLE_CHANGE { + public: + DEF_MODULE(TENANT_ROLE_CHANGE, "TENANT ROLE CHANGE"); + DEF_EVENT(TENANT_ROLE_CHANGE, SWITCHOVER_TO_PRIMARY_START, "SWITCHOVER TO PRIMARY START", + STMT_STR, + TENANT_INFO); + DEF_EVENT(TENANT_ROLE_CHANGE, SWITCHOVER_TO_PRIMARY_END, "SWITCHOVER TO PRIMARY END", + STMT_STR, + TENANT_INFO, + SWITCHOVER_SCN, + COST_DETAIL, + ALL_LS); + DEF_EVENT(TENANT_ROLE_CHANGE, SWITCHOVER_TO_STANDBY_START, "SWITCHOVER TO STANDBY START", + STMT_STR, + TENANT_INFO); + DEF_EVENT(TENANT_ROLE_CHANGE, SWITCHOVER_TO_STANDBY_END, "SWITCHOVER TO STANDBY END", + STMT_STR, + TENANT_INFO, + SWITCHOVER_SCN, + COST_DETAIL, + ALL_LS); + DEF_EVENT(TENANT_ROLE_CHANGE, FAILOVER_TO_PRIMARY_START, "FAILOVER TO PRIMARY START", + STMT_STR, + TENANT_INFO); + DEF_EVENT(TENANT_ROLE_CHANGE, FAILOVER_TO_PRIMARY_END, "FAILOVER TO PRIMARY END", + STMT_STR, + TENANT_INFO, + FAILOVER_SCN, + COST_DETAIL, + ALL_LS); + DEF_EVENT(TENANT_ROLE_CHANGE, WAIT_LOG_SYNC, "WAIT LOG SYNC", + IS_SYS_LS_SYNCED, + IS_ALL_LS_SYNCED, + NON_SYNC_INFO); + }; + + class SERVICE_NAME { + public: + DEF_MODULE(SERVICE_NAME, "SERVICE NAME"); + + DEF_EVENT(SERVICE_NAME, CREATE_SERVICE, "CREATE SERVICE", + SERVICE_NAME_STRING, + CREATED_SERVICE_NAME); + + DEF_EVENT(SERVICE_NAME, DELETE_SERVICE, "DELETE SERVICE", + DELETED_SERVICE_NAME); + + DEF_EVENT(SERVICE_NAME, START_SERVICE, "START SERVICE", + SERVICE_NAME_BEFORE, + SERVICE_NAME_AFTER); + + DEF_EVENT(SERVICE_NAME, STOP_SERVICE, "STOP SERVICE", + SERVICE_NAME_BEFORE, + SERVICE_NAME_AFTER); + + DEF_EVENT(SERVICE_NAME, KILL_CONNECTIONS_OF_SERVICE_NAME, "KILL CONNECTIONS OF SERVICE NAME", + SERVICE_NAME, + KILLED_CONNECTIONS_COUNT, + KILLED_CONNECTIONS_LIST); + + DEF_EVENT(SERVICE_NAME, BROADCAST_SERVICE_NAME, "BROADCAST SERVICE NAME", + EPOCH, + TARGET_SERVICE_NAME_ID, + SERVICE_NAME_LIST, + SERVICE_NAME_COMMAND_TYPE, + TARGET_SERVERS_LIST, + SUCCESS_SERVERS_LIST); + }; +#endif +#endif +//////////////////////////////////////////////////////////////// +#ifndef _OB_TENANT_EVENT_DEF_H +#define _OB_TENANT_EVENT_DEF_H 1 +#include +#include "rootserver/ob_tenant_event_history_table_operator.h" // TENANT_EVENT_ADD +namespace oceanbase +{ +namespace tenant_event +{ +#define DEF_MODULE(MODULE, MODULE_STR) \ + static constexpr const char* const MODULE##_NAME = #MODULE; \ + static constexpr const char* const MODULE##_STR = MODULE_STR; +#define DEF_EVENT_COMMON(EVENT, EVENT_STR) \ + static constexpr const char* const EVENT##_NAME = #EVENT; \ + static constexpr const char* const EVENT##_STR = EVENT_STR; +#define OneArguments(MODULE) +#define TwoArguments(MODULE, EVENT) +#define ThreeArguments(MODULE, EVENT, EVENT_STR) \ + DEF_EVENT_COMMON(EVENT, EVENT_STR) \ + template<> \ + static void MODULE##_##EVENT##_func(const uint64_t tenant_id, const char * const module, const char * const event, \ + const int64_t event_timestamp, const int user_ret, const int64_t cost_us) \ + { \ + TENANT_EVENT_ADD(tenant_id, module, event, event_timestamp, user_ret, cost_us); \ + return ;\ + } +#define FourArguments(MODULE, EVENT, EVENT_STR, NAME1) \ + DEF_EVENT_COMMON(EVENT, EVENT_STR) \ + template \ + static void MODULE##_##EVENT##_func(const uint64_t tenant_id, const char * const module, const char * const event, \ + const int64_t event_timestamp, const int user_ret, const int64_t cost_us, \ + const T1 &value1) \ + { \ + TENANT_EVENT_ADD(tenant_id, module, event, event_timestamp, user_ret, cost_us, #NAME1, value1); \ + return ;\ + } +#define FiveArguments(MODULE, EVENT, EVENT_STR, NAME1, NAME2) \ + DEF_EVENT_COMMON(EVENT, EVENT_STR) \ + template \ + static void MODULE##_##EVENT##_func(const uint64_t tenant_id, const char * const module, const char * const event, \ + const int64_t event_timestamp, const int user_ret, const int64_t cost_us, \ + const T1 &value1, const T2 &value2) \ + { \ + TENANT_EVENT_ADD(tenant_id, module, event, event_timestamp, user_ret, cost_us, #NAME1, value1, #NAME2, value2); \ + return ;\ + } +#define SixArguments(MODULE, EVENT, EVENT_STR, NAME1, NAME2, NAME3) \ + DEF_EVENT_COMMON(EVENT, EVENT_STR) \ + template \ + static void MODULE##_##EVENT##_func(const uint64_t tenant_id, const char * const module, const char * const event, \ + const int64_t event_timestamp, const int user_ret, const int64_t cost_us, \ + const T1 &value1, const T2 &value2, \ + const T3 &value3) \ + { \ + TENANT_EVENT_ADD(tenant_id, module, event, event_timestamp, user_ret, cost_us, #NAME1, value1, #NAME2, value2, \ + #NAME3, value3); \ + return ;\ + } +#define SevenArguments(MODULE, EVENT, EVENT_STR, NAME1, NAME2, NAME3, NAME4) \ + DEF_EVENT_COMMON(EVENT, EVENT_STR) \ + template \ + static void MODULE##_##EVENT##_func(const uint64_t tenant_id, const char * const module, const char * const event, \ + const int64_t event_timestamp, const int user_ret, const int64_t cost_us, \ + const T1 &value1, const T2 &value2, \ + const T3 &value3, const T4 &value4) \ + { \ + TENANT_EVENT_ADD(tenant_id, module, event, event_timestamp, user_ret, cost_us, #NAME1, value1, #NAME2, value2, \ + #NAME3, value3, #NAME4, value4); \ + return ;\ + } +#define EightArguments(MODULE, EVENT, EVENT_STR, NAME1, NAME2, NAME3, NAME4, NAME5) \ + DEF_EVENT_COMMON(EVENT, EVENT_STR) \ + template \ + static void MODULE##_##EVENT##_func(const uint64_t tenant_id, const char * const module, const char * const event, \ + const int64_t event_timestamp, const int user_ret, const int64_t cost_us, \ + const T1 &value1, const T2 &value2, \ + const T3 &value3, const T4 &value4, \ + const T5 &value5) \ + { \ + TENANT_EVENT_ADD(tenant_id, module, event, event_timestamp, user_ret, cost_us, #NAME1, value1, #NAME2, value2, \ + #NAME3, value3, #NAME4, value4, #NAME5, value5); \ + return ;\ + } +#define NineArguments(MODULE, EVENT, EVENT_STR, NAME1, NAME2, NAME3, NAME4, NAME5, NAME6) \ + DEF_EVENT_COMMON(EVENT, EVENT_STR) \ + template \ + static void MODULE##_##EVENT##_func(const uint64_t tenant_id, const char * const module, const char * const event, \ + const int64_t event_timestamp, const int user_ret, const int64_t cost_us, \ + const T1 &value1, const T2 &value2, \ + const T3 &value3, const T4 &value4, \ + const T5 &value5, const T6 &value6) \ + { \ + TENANT_EVENT_ADD(tenant_id, module, event, event_timestamp, user_ret, cost_us, #NAME1, value1, #NAME2, value2, \ + #NAME3, value3, #NAME4, value4, #NAME5, value5, #NAME6, value6); \ + return ;\ + } + +#define GetMacro(_1, _2, _3, _4, _5, _6, _7, _8, _9, NAME, ...) NAME +#define DEF_EVENT(...) \ + GetMacro(__VA_ARGS__, NineArguments, EightArguments, SevenArguments, SixArguments, FiveArguments, FourArguments, ThreeArguments, TwoArguments, OneArgument, ...)(__VA_ARGS__) + +#define TENANT_EVENT(tenant_id, MODULE, EVENT, event_timestamp, user_ret, cost_us, args...) \ + MODULE::MODULE##_##EVENT##_func(tenant_id, MODULE::MODULE##_STR, MODULE::EVENT##_STR, event_timestamp, user_ret, cost_us, args) + +#include "ob_tenant_event_def.h" +#undef DEF_MODULE +#undef DEF_EVENT +#undef DEF_EVENT_COMMON +#undef OneArguments +#undef TwoArguments +#undef ThreeArguments +#undef FourArguments +#undef FiveArguments +#undef SixArguments +#undef SevenArguments +#undef EightArguments +#undef NineArguments +#undef GetMacro +} // end namespace tenant_event +} // end namespace oceanbase +#endif /* _OB_TENANT_EVENT_DEF_H */ \ No newline at end of file diff --git a/src/rootserver/ob_tenant_event_history_table_operator.cpp b/src/rootserver/ob_tenant_event_history_table_operator.cpp new file mode 100644 index 0000000000..b33e52179b --- /dev/null +++ b/src/rootserver/ob_tenant_event_history_table_operator.cpp @@ -0,0 +1,47 @@ +/** + * Copyright (c) 2021 OceanBase + * OceanBase CE is licensed under Mulan PubL v2. + * You can use this software according to the terms and conditions of the Mulan PubL v2. + * You may obtain a copy of Mulan PubL v2 at: + * http://license.coscl.org.cn/MulanPubL-2.0 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PubL v2 for more details. + */ +#define USING_LOG_PREFIX RS + +#include "ob_tenant_event_history_table_operator.h" + +#include "lib/oblog/ob_log_module.h" // LOG_* +namespace oceanbase +{ +namespace rootserver +{ +using namespace common; +using namespace share; +int ObTenantEventHistoryTableOperator::init(common::ObMySQLProxy &proxy, + const common::ObAddr &self_addr) +{ + int ret = OB_SUCCESS; + if (OB_FAIL(ObEventHistoryTableOperator::init(proxy))) { + LOG_WARN("fail to init event history table operator", KR(ret)); + } else { + const bool is_rs_event = false; + const bool is_server_event = false; + set_addr(self_addr, is_rs_event, is_server_event); + set_event_table(share::OB_ALL_TENANT_EVENT_HISTORY_TNAME); + } + return ret; +} +ObTenantEventHistoryTableOperator &ObTenantEventHistoryTableOperator::get_instance() +{ + static ObTenantEventHistoryTableOperator instance; + return instance; +} +int ObTenantEventHistoryTableOperator::async_delete() +{ + return OB_NOT_SUPPORTED; +} +}//end namespace rootserver +}//end namespace oceanbase \ No newline at end of file diff --git a/src/rootserver/ob_tenant_event_history_table_operator.h b/src/rootserver/ob_tenant_event_history_table_operator.h new file mode 100644 index 0000000000..792ccd17d7 --- /dev/null +++ b/src/rootserver/ob_tenant_event_history_table_operator.h @@ -0,0 +1,36 @@ +/** + * Copyright (c) 2021 OceanBase + * OceanBase CE is licensed under Mulan PubL v2. + * You can use this software according to the terms and conditions of the Mulan PubL v2. + * You may obtain a copy of Mulan PubL v2 at: + * http://license.coscl.org.cn/MulanPubL-2.0 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PubL v2 for more details. + */ +#ifndef OCEANBASE_ROOTSERVER_OB_TENANT_EVENT_HISTORY_TABLE_OPERATOR_H_ +#define OCEANBASE_ROOTSERVER_OB_TENANT_EVENT_HISTORY_TABLE_OPERATOR_H_ +#include "share/ob_event_history_table_operator.h" + +namespace oceanbase +{ +namespace rootserver +{ +class ObTenantEventHistoryTableOperator : public share::ObEventHistoryTableOperator +{ +public: + virtual ~ObTenantEventHistoryTableOperator() {} + int init(common::ObMySQLProxy &proxy, const common::ObAddr &self_addr); + virtual int async_delete() override; + static ObTenantEventHistoryTableOperator &get_instance(); +private: + ObTenantEventHistoryTableOperator() {} + DISALLOW_COPY_AND_ASSIGN(ObTenantEventHistoryTableOperator); +}; +} //end namespace rootserver +} //end namespace oceanbase +#define TENANT_EVENT_INSTANCE (::oceanbase::rootserver::ObTenantEventHistoryTableOperator::get_instance()) +#define TENANT_EVENT_ADD(args...) \ + TENANT_EVENT_INSTANCE.async_add_tenant_event(args) +#endif // OCEANBASE_ROOTSERVER_OB_TENANT_EVENT_HISTORY_TABLE_OPERATOR_H_ \ No newline at end of file diff --git a/src/rootserver/ob_tenant_info_loader.cpp b/src/rootserver/ob_tenant_info_loader.cpp index 631ced4716..9ed9ffd003 100644 --- a/src/rootserver/ob_tenant_info_loader.cpp +++ b/src/rootserver/ob_tenant_info_loader.cpp @@ -56,6 +56,7 @@ int ObTenantInfoLoader::init() sql_proxy_ = GCTX.sql_proxy_; tenant_id_ = MTL_ID(); tenant_info_cache_.reset(); + service_names_cache_.reset(); ATOMIC_STORE(&broadcast_times_, 0); ATOMIC_STORE(&rpc_update_times_, 0); ATOMIC_STORE(&sql_update_times_, 0); @@ -65,6 +66,8 @@ int ObTenantInfoLoader::init() } else if (OB_ISNULL(GCTX.sql_proxy_)) { ret = OB_ERR_UNEXPECTED; LOG_WARN("sql proxy is null", KR(ret)); + } else if (OB_FAIL(service_names_cache_.init(tenant_id_))) { + LOG_WARN("fail to init service_name_cache_", KR(ret), K(tenant_id_)); } else if (OB_FAIL(create(thread_cnt, "TenantInf"))) { LOG_WARN("failed to create tenant info loader thread", KR(ret), K(thread_cnt)); } @@ -83,6 +86,7 @@ void ObTenantInfoLoader::destroy() is_inited_ = false; tenant_id_ = OB_INVALID_TENANT_ID; tenant_info_cache_.reset(); + service_names_cache_.reset(); sql_proxy_ = NULL; ATOMIC_STORE(&broadcast_times_, 0); ATOMIC_STORE(&rpc_update_times_, 0); @@ -148,8 +152,8 @@ void ObTenantInfoLoader::run2() const int64_t refresh_time_interval_us = act_as_standby_() && is_sys_ls_leader ? ObTenantRoleTransitionConstants::STS_TENANT_INFO_REFRESH_TIME_US : ObTenantRoleTransitionConstants::DEFAULT_TENANT_INFO_REFRESH_TIME_US; - - if (need_refresh(refresh_time_interval_us) + bool need_refresh_tenant_info = need_refresh(refresh_time_interval_us); + if (need_refresh_tenant_info && OB_FAIL(tenant_info_cache_.refresh_tenant_info(tenant_id_, sql_proxy_, content_changed))) { LOG_WARN("failed to update tenant info", KR(ret), K_(tenant_id), KP(sql_proxy_)); } @@ -168,7 +172,21 @@ void ObTenantInfoLoader::run2() if (content_changed) { (void)dump_tenant_info_(sql_update_cost_time, is_sys_ls_leader, broadcast_cost_time, end_time_us, last_dump_time_us); } - const int64_t idle_time = max(10 * 1000, refresh_time_interval_us - cost_time_us); + + // Positioned last to reduce the impact on tenant_info_cache. + int tmp_ret = OB_SUCCESS; + const int64_t start_time_us_service_name = ObTimeUtility::current_time(); + bool need_refresh_service_name = service_names_cache_.need_refresh(); + if (need_refresh_service_name + && OB_TMP_FAIL(service_names_cache_.refresh_service_name())) { + LOG_WARN("failed to refresh service_names", KR(ret), KR(tmp_ret), K_(tenant_id), KP(sql_proxy_)); + } + const int64_t cost_time_us_service_name = ObTimeUtility::current_time() - start_time_us_service_name; + + LOG_TRACE("tenant info loader cost info", KR(ret), K(need_refresh_tenant_info), + "cost_time_us_tenant_info", cost_time_us, + K(need_refresh_service_name), K(cost_time_us_service_name)); + const int64_t idle_time = max(10 * 1000, refresh_time_interval_us - cost_time_us - cost_time_us_service_name); //At least sleep 10ms, allowing the thread to release the lock if (!stop_) { get_cond().wait_us(idle_time); @@ -434,7 +452,7 @@ int ObTenantInfoLoader::get_valid_sts_after(const int64_t specified_time_us, sha ret = OB_NEED_WAIT; LOG_TRACE("sts can not work for current tenant status", KR(ret), K(tenant_info)); } else { - standby_scn = tenant_info.get_standby_scn(); + standby_scn = tenant_info.get_readable_scn(); } const int64_t PRINT_INTERVAL = 3 * 1000 * 1000L; @@ -445,6 +463,22 @@ int ObTenantInfoLoader::get_valid_sts_after(const int64_t specified_time_us, sha return ret; } +int ObTenantInfoLoader::check_if_sts_is_ready(bool &is_ready) +{ + int ret = OB_SUCCESS; + is_ready = false; + if (is_user_tenant(tenant_id_)) { + // user tenant + share::ObAllTenantInfo tenant_info; + if (OB_FAIL(tenant_info_cache_.get_tenant_info(tenant_info))) { + LOG_WARN("failed to get tenant info", KR(ret)); + } else { + is_ready = tenant_info.is_sts_ready(); + } + } + return ret; +} + int ObTenantInfoLoader::get_readable_scn(share::SCN &readable_scn) { int ret = OB_SUCCESS; @@ -495,6 +529,25 @@ int ObTenantInfoLoader::check_is_primary_normal_status(bool &is_primary_normal_s return ret; } +int ObTenantInfoLoader::check_is_prepare_flashback_for_switch_to_primary_status(bool &is_prepare) +{ + int ret = OB_SUCCESS; + is_prepare = false; + + if (OB_SYS_TENANT_ID == MTL_ID() || is_meta_tenant(MTL_ID())) { + is_prepare = false; + } else { + // user tenant + share::ObAllTenantInfo tenant_info; + if (OB_FAIL(tenant_info_cache_.get_tenant_info(tenant_info))) { + LOG_WARN("failed to get tenant info", KR(ret)); + } else { + is_prepare = tenant_info.is_prepare_flashback_for_switch_to_primary_status(); + } + } + return ret; +} + int ObTenantInfoLoader::get_global_replayable_scn(share::SCN &replayable_scn) { int ret = OB_SUCCESS; @@ -616,6 +669,30 @@ int ObTenantInfoLoader::refresh_tenant_info() return ret; } +int ObTenantInfoLoader::refresh_service_name() +{ + int ret = OB_SUCCESS; + if (IS_NOT_INIT) { + ret = OB_NOT_INIT; + LOG_WARN("not init", KR(ret)); + } else if (OB_FAIL(service_names_cache_.refresh_service_name())) { + LOG_WARN("failed to refresh_service_name", KR(ret), K_(tenant_id)); + } + return ret; +} + +int ObTenantInfoLoader::update_service_name(const int64_t epoch, const common::ObIArray &service_name_list) +{ + int ret = OB_SUCCESS; + if (IS_NOT_INIT) { + ret = OB_NOT_INIT; + LOG_WARN("not init", KR(ret)); + } else if (OB_FAIL(service_names_cache_.update_service_name(epoch, service_name_list))) { + LOG_WARN("fail to update_service_name", KR(ret), K_(tenant_id), K(service_name_list)); + } + return ret; +} + int ObTenantInfoLoader::update_tenant_info_cache(const int64_t new_ora_rowscn, const ObAllTenantInfo &new_tenant_info, const uint64_t new_finish_data_version, @@ -909,6 +986,130 @@ int ObAllTenantInfoCache::get_tenant_info(share::ObAllTenantInfo &tenant_info) } return ret; } +int ObAllServiceNamesCache::init(const uint64_t tenant_id) +{ + int ret = OB_SUCCESS; + if (OB_UNLIKELY(!is_valid_tenant_id(tenant_id))) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid arg", KR(ret), K(tenant_id)); + } else { + epoch_ = 0; + tenant_id_ = tenant_id; + all_service_names_.reset(); + last_refresh_time_ = OB_INVALID_TIMESTAMP; + ATOMIC_SET(&is_service_name_enabled_, false); + } + return ret; +} +int ObAllServiceNamesCache::refresh_service_name() +{ + int ret = OB_SUCCESS; + ObArray all_service_names; + int64_t epoch = 0; + if (!is_user_tenant(tenant_id_)) { + // do nothing + } else { + if (!ATOMIC_LOAD(&is_service_name_enabled_)) { + if (OB_FAIL(ObServiceNameProxy::check_is_service_name_enabled(tenant_id_))) { + LOG_WARN("fail to check whether service_name is enabled", KR(ret), K(tenant_id_)); + } else { + ATOMIC_SET(&is_service_name_enabled_, true); + LOG_INFO("service_name is enabled now", KR(ret), K(is_service_name_enabled_)); + } + } + if (OB_SUCC(ret)) { + if (OB_FAIL(ObServiceNameProxy::select_all_service_names_with_epoch(tenant_id_, epoch, all_service_names))) { + LOG_WARN("fail to load", KR(ret), K(tenant_id_)); + } else if (OB_FAIL(update_service_name(epoch, all_service_names))) { + LOG_WARN("fail to update service_name", KR(ret), K(all_service_names)); + } + if (dump_service_names_interval_.reach()) { + LOG_INFO("refresh service_names", KR(ret), K(tenant_id_), K(epoch_), K(all_service_names_)); + } + } + } + return ret; +} +int ObAllServiceNamesCache::update_service_name(const int64_t epoch, const common::ObIArray &service_name_list) +{ + int ret = OB_SUCCESS; + SpinWLockGuard guard(lock_); + if (epoch <= epoch_) { + // do nothing + } else { + LOG_INFO("try to update service_name", KR(ret), "local epoch", epoch_, + "local cache", all_service_names_, "new epoch", epoch, "new cache", service_name_list); + epoch_ = epoch; + all_service_names_.reset(); + if (OB_FAIL(all_service_names_.assign(service_name_list))) { + LOG_WARN("fail to assign all_service_names_", KR(ret), K(service_name_list)); + } + last_refresh_time_ = ObTimeUtility::current_time(); + } + return ret; +} +bool ObAllServiceNamesCache::need_refresh() +{ + bool need = false; + const int64_t now = ObTimeUtility::current_time(); + if (now - last_refresh_time_ >= REFRESH_INTERVAL) { + need = true; + } + return need; +} +void ObAllServiceNamesCache::reset() +{ + SpinWLockGuard guard(lock_); + all_service_names_.reset(); + last_refresh_time_ = OB_INVALID_TIMESTAMP; + ATOMIC_SET(&is_service_name_enabled_, false); + tenant_id_ = OB_INVALID_TENANT_ID; + epoch_ = 0; +} +int ObAllServiceNamesCache::check_if_the_service_name_is_stopped(const ObServiceNameString &service_name_str) const +{ + int ret = OB_SUCCESS; + bool is_found = false; + SpinRLockGuard guard(lock_); + for (int64_t i = 0; OB_SUCC(ret) && !is_found && i < all_service_names_.size(); i++) { + if (all_service_names_.at(i).get_service_name_str().equal_to(service_name_str)) { + const ObServiceName & service_name = all_service_names_.at(i); + is_found = true; + if (service_name.is_stopped() || service_name.is_stopping()) { + ret = OB_SERVICE_STOPPED; + LOG_WARN("service_status is stopped", KR(ret), K(service_name), K(epoch_), K(all_service_names_)); + } + } + } + if (OB_SUCC(ret) && !is_found) { + ret = OB_SERVICE_NAME_NOT_FOUND; + LOG_WARN("service_name_str is not found", KR(ret), K(service_name_str), K(epoch_), K(all_service_names_)); + } + return ret; +} + +int ObAllServiceNamesCache::get_service_name( + const ObServiceNameID &service_name_id, + ObServiceName &service_name) const +{ + int ret = OB_SUCCESS; + bool is_found = false; + SpinRLockGuard guard(lock_); + for (int64_t i = 0; OB_SUCC(ret) && !is_found && i < all_service_names_.size(); ++i) { + const ObServiceName & tmp_service_name = all_service_names_.at(i); + if (service_name_id == tmp_service_name.get_service_name_id()) { + is_found = true; + if (OB_FAIL(service_name.assign(tmp_service_name))) { + LOG_WARN("fail to assign service_name", KR(ret), K(tmp_service_name)); + } + } + } + if (OB_SUCC(ret) && !is_found) { + ret = OB_SERVICE_NAME_NOT_FOUND; + LOG_WARN("fail to find service_name", KR(ret), K(service_name_id), K(all_service_names_)); + } + return ret; +} } } diff --git a/src/rootserver/ob_tenant_info_loader.h b/src/rootserver/ob_tenant_info_loader.h index a27bc94482..da75220f65 100644 --- a/src/rootserver/ob_tenant_info_loader.h +++ b/src/rootserver/ob_tenant_info_loader.h @@ -16,8 +16,9 @@ #include "lib/thread/ob_reentrant_thread.h"//ObRsReentrantThread #include "lib/utility/ob_print_utils.h" //TO_STRING_KV #include "share/ob_tenant_info_proxy.h"//ObAllTenantInfo +#include "share/ob_service_name_proxy.h"//ObServiceName #include "lib/lock/ob_spin_rwlock.h" //lock -#include "rootserver/ob_tenant_role_transition_service.h"//ObTenantRoleTransitionConstants +#include "rootserver/standby/ob_tenant_role_transition_service.h"//ObTenantRoleTransitionConstants namespace oceanbase { namespace common @@ -87,6 +88,38 @@ private: DISALLOW_COPY_AND_ASSIGN(ObAllTenantInfoCache); }; +class ObAllServiceNamesCache +{ +public: + ObAllServiceNamesCache() + : lock_(), + tenant_id_(OB_INVALID_TENANT_ID), + epoch_(0), + all_service_names_(), + dump_service_names_interval_(DUMP_SERVICE_NAMES_INTERVAL), + last_refresh_time_(OB_INVALID_TIMESTAMP), + is_service_name_enabled_(false) {} + ~ObAllServiceNamesCache() {} + int init(const uint64_t tenant_id); + int refresh_service_name(); + int update_service_name(const int64_t epoch, const common::ObIArray &service_name_list); + bool need_refresh(); + int check_if_the_service_name_is_stopped(const ObServiceNameString &service_name_str) const; + int get_service_name(const ObServiceNameID &service_name_id, ObServiceName &service_name) const; + void reset(); +private: + static constexpr int64_t DUMP_SERVICE_NAMES_INTERVAL = 5 * 1000L * 1000L; // 5s + static constexpr int64_t REFRESH_INTERVAL = 2 * 1000L * 1000L; // 2s + common::SpinRWLock lock_; + uint64_t tenant_id_; + int64_t epoch_; + ObArray all_service_names_; + common::ObTimeInterval dump_service_names_interval_; + int64_t last_refresh_time_; + bool is_service_name_enabled_; + DISALLOW_COPY_AND_ASSIGN(ObAllServiceNamesCache); +}; + /*description: * Periodically cache tenant info.*/ class ObTenantInfoLoader : public share::ObReentrantThread @@ -102,7 +135,8 @@ public: rpc_update_times_(0), sql_update_times_(0), last_rpc_update_time_us_(OB_INVALID_TIMESTAMP), - dump_tenant_info_cache_update_action_interval_(DUMP_TENANT_INFO_CACHE_UPDATE_ACTION_INTERVAL) {} + dump_tenant_info_cache_update_action_interval_(DUMP_TENANT_INFO_CACHE_UPDATE_ACTION_INTERVAL), + service_names_cache_() {} ~ObTenantInfoLoader() {} static int mtl_init(ObTenantInfoLoader *&ka); int init(); @@ -127,6 +161,7 @@ public: * 3. sts can not work for current tenant status */ int get_valid_sts_after(const int64_t specified_time_us, share::SCN &standby_scn); + int check_if_sts_is_ready(bool &is_ready); /** * @description: * get tenant standby scn. @@ -200,13 +235,35 @@ public: * @param[out] is_primary_normal_status */ int check_is_primary_normal_status(bool &is_primary_normal_status); + int check_is_prepare_flashback_for_switch_to_primary_status(bool &is_prepare); int refresh_tenant_info(); + int refresh_service_name(); + int update_service_name(const int64_t epoch, const common::ObIArray &service_name_list); int update_tenant_info_cache(const int64_t new_ora_rowscn, const ObAllTenantInfo &new_tenant_info, const uint64_t new_finish_data_version, const share::SCN &new_data_version_barrier_scn); bool need_refresh(const int64_t refresh_time_interval_us); int get_max_ls_id(uint64_t &tenant_id, ObLSID &max_ls_id); + /** + * @description: + * check if service_status of the given service_name is STOPPED or STOPPING + * @param[in] service_name_str service_name string + * @return return code + * OB_SERVICE_NAME_NOT_FOUND service_name is not found, cannot check its service_status + * OB_SERVICE_STOPPED service_status is STOPPED or STOPPING + * OB_SUCCESS service_name exists and its service_status is STARTED + * others + */ + int check_if_the_service_name_is_stopped(const ObServiceNameString &service_name_str) const + { + return service_names_cache_.check_if_the_service_name_is_stopped(service_name_str); + } + bool get_service_name(const ObServiceNameID &service_name_id, ObServiceName &service_name) const + { + return service_names_cache_.get_service_name(service_name_id, service_name); + } + protected: /** @@ -245,6 +302,7 @@ private: uint64_t sql_update_times_; int64_t last_rpc_update_time_us_; common::ObTimeInterval dump_tenant_info_cache_update_action_interval_; + ObAllServiceNamesCache service_names_cache_; private: DISALLOW_COPY_AND_ASSIGN(ObTenantInfoLoader); }; diff --git a/src/rootserver/ob_transfer_partition_command.cpp b/src/rootserver/ob_transfer_partition_command.cpp index 14b4c6e149..e0b1a58287 100644 --- a/src/rootserver/ob_transfer_partition_command.cpp +++ b/src/rootserver/ob_transfer_partition_command.cpp @@ -19,7 +19,7 @@ #include "share/balance/ob_balance_job_table_operator.h"//ObBalanceJobTableOperator #include "share/transfer/ob_transfer_task_operator.h"//ObTransferTask #include "share/ob_tenant_info_proxy.h" -#include "share/ob_primary_standby_service.h" +#include "rootserver/standby/ob_standby_service.h" #include "observer/omt/ob_tenant_config_mgr.h" // ObTenantConfigGuard #include "share/ls/ob_ls_i_life_manager.h"//START/END_TRANSACTION #include "storage/tablelock/ob_lock_utils.h"//table_lock @@ -258,7 +258,7 @@ int ObTransferPartitionCommand::check_tenant_status_(const uint64_t tenant_id) } else if (OB_UNLIKELY(!is_valid_tenant_id(tenant_id))) { ret = OB_INVALID_ARGUMENT; LOG_WARN("invalid argument", KR(ret), K(tenant_id)); - } else if (OB_FAIL(OB_PRIMARY_STANDBY_SERVICE.get_tenant_status(tenant_id, tenant_status))) { + } else if (OB_FAIL(OB_STANDBY_SERVICE.get_tenant_status(tenant_id, tenant_status))) { LOG_WARN("fail to get tenant status", KR(ret), K(tenant_id)); } else if (OB_UNLIKELY(!is_tenant_normal(tenant_status))) { ret = OB_OP_NOT_ALLOW; diff --git a/src/rootserver/ob_upgrade_executor.cpp b/src/rootserver/ob_upgrade_executor.cpp index 5b0b487b75..ed17489d58 100644 --- a/src/rootserver/ob_upgrade_executor.cpp +++ b/src/rootserver/ob_upgrade_executor.cpp @@ -18,7 +18,7 @@ #include "observer/ob_server_struct.h" #include "share/ob_global_stat_proxy.h" #include "share/ob_cluster_event_history_table_operator.h"//CLUSTER_EVENT_INSTANCE -#include "share/ob_primary_standby_service.h" // ObPrimaryStandbyService +#include "rootserver/standby/ob_standby_service.h" // ObStandbyService #include "share/ob_tenant_info_proxy.h" //ObAllTenantInfoProxy #include "observer/ob_service.h" @@ -650,7 +650,7 @@ int ObUpgradeExecutor::run_upgrade_begin_action_( LOG_WARN("fail to update target data version", KR(ret), K(tenant_id), "version", DVP(DATA_CURRENT_VERSION)); } else if (is_user_tenant(tenant_id) - && OB_FAIL(OB_PRIMARY_STANDBY_SERVICE.write_upgrade_barrier_log( + && OB_FAIL(OB_STANDBY_SERVICE.write_upgrade_barrier_log( trans, tenant_id, DATA_CURRENT_VERSION))) { LOG_WARN("fail to write_upgrade_barrier_log", KR(ret), K(tenant_id), "version", DVP(DATA_CURRENT_VERSION)); @@ -1125,7 +1125,7 @@ int ObUpgradeExecutor::update_final_current_data_version_(const uint64_t tenant_ if (OB_FAIL(end_proxy.update_current_data_version(version))) { LOG_WARN("fail to update current data version", KR(ret), K(tenant_id), KDV(version)); } else if (is_user_tenant(tenant_id) && - OB_FAIL(OB_PRIMARY_STANDBY_SERVICE.write_upgrade_data_version_barrier_log( + OB_FAIL(OB_STANDBY_SERVICE.write_upgrade_data_version_barrier_log( trans, tenant_id, version))) { LOG_WARN("fail to write_upgrade_data_version_barrier_log", KR(ret), K(tenant_id), KDV(version)); } diff --git a/src/rootserver/restore/ob_clone_scheduler.cpp b/src/rootserver/restore/ob_clone_scheduler.cpp index 0cb6daad33..cda05680d9 100644 --- a/src/rootserver/restore/ob_clone_scheduler.cpp +++ b/src/rootserver/restore/ob_clone_scheduler.cpp @@ -1409,9 +1409,9 @@ int ObCloneScheduler::check_sys_tenant_(const uint64_t tenant_id) } else if (OB_ISNULL(GCTX.schema_service_)) { ret = OB_ERR_UNEXPECTED; LOG_WARN("unexpected null schema service", KR(ret), KP(GCTX.schema_service_)); - } else if (GCTX.is_standby_cluster() || GCONF.in_upgrade_mode()) { + } else if (GCONF.in_upgrade_mode()) { ret = OB_OP_NOT_ALLOW; - LOG_WARN("clone tenant while in standby cluster or in upgrade mode is not allowed", KR(ret)); + LOG_WARN("clone tenant while in upgrade mode is not allowed", KR(ret)); } else if (OB_FAIL(GCTX.schema_service_->get_tenant_schema_guard( OB_SYS_TENANT_ID, schema_guard))) { LOG_WARN("fail to get schema guard", KR(ret)); diff --git a/src/rootserver/restore/ob_recover_table_job_scheduler.cpp b/src/rootserver/restore/ob_recover_table_job_scheduler.cpp index 986664079e..2ea8ce50a2 100644 --- a/src/rootserver/restore/ob_recover_table_job_scheduler.cpp +++ b/src/rootserver/restore/ob_recover_table_job_scheduler.cpp @@ -17,7 +17,7 @@ #include "rootserver/restore/ob_recover_table_initiator.h" #include "rootserver/restore/ob_restore_service.h" #include "share/backup/ob_backup_data_table_operator.h" -#include "share/ob_primary_standby_service.h" +#include "rootserver/standby/ob_standby_service.h" #include "share/location_cache/ob_location_service.h" #include "share/restore/ob_physical_restore_table_operator.h" #include "share/restore/ob_import_util.h" @@ -652,7 +652,7 @@ int ObRecoverTableJobScheduler::failover_to_primary_( MTL_SWITCH(OB_SYS_TENANT_ID) { if (OB_FAIL(switch_tenant_arg.init(aux_tenant_id, obrpc::ObSwitchTenantArg::OpType::FAILOVER_TO_PRIMARY, "", false))) { LOG_WARN("failed to init switch tenant arg", K(ret), K(aux_tenant_id)); - } else if (OB_FAIL(OB_PRIMARY_STANDBY_SERVICE.switch_tenant(switch_tenant_arg))) { + } else if (OB_FAIL(OB_STANDBY_SERVICE.switch_tenant(switch_tenant_arg))) { LOG_WARN("failed to switch_tenant", KR(ret), K(switch_tenant_arg)); } else { LOG_INFO("[RECOVER_TABLE]succeed to switch aux tenant role to primary", K(aux_tenant_id), K(job)); diff --git a/src/rootserver/restore/ob_restore_common_util.cpp b/src/rootserver/restore/ob_restore_common_util.cpp index afd482188f..947030003a 100644 --- a/src/rootserver/restore/ob_restore_common_util.cpp +++ b/src/rootserver/restore/ob_restore_common_util.cpp @@ -16,7 +16,7 @@ #include "share/ls/ob_ls_status_operator.h" //ObLSStatusOperator #include "share/ls/ob_ls_operator.h"//ObLSAttr #include "rootserver/ob_ls_service_helper.h" -#include "rootserver/ob_tenant_role_transition_service.h" +#include "rootserver/standby/ob_tenant_role_transition_service.h" #include "src/share/ob_schema_status_proxy.h" #include "src/share/ob_rpc_struct.h" #include "rootserver/ob_ddl_service.h" @@ -204,6 +204,9 @@ int ObRestoreCommonUtil::try_update_tenant_role(common::ObMySQLProxy *sql_proxy, ObAllTenantInfo all_tenant_info; int64_t new_switch_ts = 0; bool need_update = false; + ObTenantRoleTransitionService role_transition_service; + ObTenantRoleTransCostDetail cost_detail; + ObTenantRoleTransAllLSInfo all_ls; if (OB_UNLIKELY(!is_user_tenant(tenant_id) || OB_ISNULL(sql_proxy) @@ -224,16 +227,24 @@ int ObRestoreCommonUtil::try_update_tenant_role(common::ObMySQLProxy *sql_proxy, //update tenant role to standby tenant if (all_tenant_info.get_sync_scn() != restore_scn) { sync_satisfied = false; - LOG_WARN("tenant sync scn not equal to restore scn", KR(ret), - K(all_tenant_info), K(restore_scn)); + LOG_WARN("tenant sync scn not equal to restore scn", KR(ret), K(all_tenant_info), K(restore_scn)); } else if (OB_FAIL(ObAllTenantInfoProxy::update_tenant_role( tenant_id, sql_proxy, all_tenant_info.get_switchover_epoch(), share::STANDBY_TENANT_ROLE, all_tenant_info.get_switchover_status(), share::NORMAL_SWITCHOVER_STATUS, new_switch_ts))) { LOG_WARN("failed to update tenant role", KR(ret), K(tenant_id), K(all_tenant_info)); + } else if (OB_FAIL(all_ls.init())) { + LOG_WARN("fail to init all_ls", KR(ret)); + } else if (OB_FAIL(role_transition_service.init( + tenant_id, + ObSwitchTenantArg::OpType::INVALID, + false, /* is_verify */ + sql_proxy, + GCTX.srv_rpc_proxy_, + &cost_detail, + &all_ls))) { + LOG_WARN("fail to init role_transition_service", KR(ret), K(tenant_id), KP(sql_proxy), KP(GCTX.srv_rpc_proxy_)); } else { - ObTenantRoleTransitionService role_transition_service(tenant_id, sql_proxy, - GCTX.srv_rpc_proxy_, obrpc::ObSwitchTenantArg::OpType::INVALID); (void)role_transition_service.broadcast_tenant_info( ObTenantRoleTransitionConstants::RESTORE_TO_STANDBY_LOG_MOD_STR); } diff --git a/src/rootserver/restore/ob_restore_scheduler.cpp b/src/rootserver/restore/ob_restore_scheduler.cpp index ab1b3397b9..7dcbe73314 100644 --- a/src/rootserver/restore/ob_restore_scheduler.cpp +++ b/src/rootserver/restore/ob_restore_scheduler.cpp @@ -19,7 +19,7 @@ #include "rootserver/ob_unit_manager.h"//convert_pool_name_lis #include "rootserver/ob_ls_service_helper.h"//create_new_ls_in_trans #include "rootserver/ob_common_ls_service.h"//do_create_user_ls -#include "rootserver/ob_tenant_role_transition_service.h" +#include "rootserver/standby/ob_tenant_role_transition_service.h" #include "share/ob_schema_status_proxy.h" #include "share/schema/ob_schema_utils.h" #include "share/schema/ob_schema_mgr.h" @@ -34,7 +34,7 @@ #include "share/restore/ob_log_restore_source_mgr.h" #include "share/ls/ob_ls_recovery_stat_operator.h"//ObLSRecoveryStatOperator #include "share/ob_rpc_struct.h" -#include "share/ob_primary_standby_service.h" +#include "rootserver/standby/ob_standby_service.h" #include "logservice/palf/log_define.h"//scn #include "share/scn.h" #include "ob_restore_service.h" @@ -1281,7 +1281,7 @@ int ObRestoreScheduler::check_tenant_replay_to_consistent_scn(const uint64_t ten ret = OB_INVALID_ARGUMENT; LOG_WARN("unexpected recovery until scn", K(ret), K(tenant_info), K(scn)); } else { - is_replay_finish = (tenant_info.get_recovery_until_scn() <= tenant_info.get_standby_scn()); + is_replay_finish = (tenant_info.get_recovery_until_scn() <= tenant_info.get_readable_scn()); LOG_INFO("[RESTORE]tenant replay to consistent_scn", K(is_replay_finish)); } return ret; diff --git a/src/rootserver/ob_recovery_ls_service.cpp b/src/rootserver/standby/ob_recovery_ls_service.cpp similarity index 99% rename from src/rootserver/ob_recovery_ls_service.cpp rename to src/rootserver/standby/ob_recovery_ls_service.cpp index f7467b33f0..98782473ed 100755 --- a/src/rootserver/ob_recovery_ls_service.cpp +++ b/src/rootserver/standby/ob_recovery_ls_service.cpp @@ -40,7 +40,7 @@ #include "share/ob_errno.h" #include "share/ob_share_util.h" //ObShareUtil #include "share/schema/ob_multi_version_schema_service.h" //ObMultiSchemaService -#include "share/ob_primary_standby_service.h" // ObPrimaryStandbyService +#include "rootserver/standby/ob_standby_service.h" // ObStandbyService #include "share/ob_standby_upgrade.h" // ObStandbyUpgrade #include "share/ob_upgrade_utils.h" // ObUpgradeChecker #include "share/ob_global_stat_proxy.h" // ObGlobalStatProxy @@ -51,6 +51,7 @@ #include "share/ob_log_restore_proxy.h" // ObLogRestoreProxyUtil #include "share/ob_occam_time_guard.h"//ObTimeGuard #include "src/rootserver/ob_rs_event_history_table_operator.h" +#include "rootserver/tenant_snapshot/ob_tenant_snapshot_util.h" // ObTenantSnapshotUtil namespace oceanbase { @@ -1285,7 +1286,7 @@ int ObRecoveryLSService::do_ls_balance_task_() } else if (OB_FAIL(tenant_info_loader->get_tenant_info(tenant_info))) { LOG_WARN("get_tenant_info failed", K(ret)); } else if (OB_FAIL(ObBalanceTaskHelperTableOperator::load_tasks_order_by_scn( - tenant_id_, *proxy_, tenant_info.get_standby_scn(), + tenant_id_, *proxy_, tenant_info.get_readable_scn(), ls_balance_tasks))) { if (OB_ENTRY_NOT_EXIST == ret) { ret = OB_SUCCESS; @@ -1411,7 +1412,7 @@ int ObRecoveryLSService::check_transfer_begin_can_remove_( //check tenant_info status and check wait readable_scn is equal to sync_scn ret = OB_SUCCESS; transfer_scn = tenant_info.get_sync_scn(); - if (tenant_info.get_sync_scn() != tenant_info.get_standby_scn()) { + if (tenant_info.get_sync_scn() != tenant_info.get_readable_scn()) { can_remove = false; LOG_WARN("There are transfer tasks in progress. Must wait for replay to newest", KR(ret), K(tenant_id_), K(tenant_info), K(ls_balance_task)); diff --git a/src/rootserver/ob_recovery_ls_service.h b/src/rootserver/standby/ob_recovery_ls_service.h similarity index 99% rename from src/rootserver/ob_recovery_ls_service.h rename to src/rootserver/standby/ob_recovery_ls_service.h index dac7f3cb4b..657bdb5a3e 100755 --- a/src/rootserver/ob_recovery_ls_service.h +++ b/src/rootserver/standby/ob_recovery_ls_service.h @@ -17,7 +17,7 @@ #include "logservice/palf/lsn.h"//palf::LSN #include "logservice/palf/palf_iterator.h" //PalfBufferIterator #include "logservice/restoreservice/ob_log_restore_handler.h"//RestoreStatusInfo -#include "ob_primary_ls_service.h" //ObTenantThreadHelper +#include "rootserver/ob_primary_ls_service.h" //ObTenantThreadHelper #include "lib/lock/ob_spin_lock.h" //ObSpinLock #include "storage/tx/ob_multi_data_source.h" //ObTxBufferNode diff --git a/src/share/ob_primary_standby_service.cpp b/src/rootserver/standby/ob_standby_service.cpp similarity index 76% rename from src/share/ob_primary_standby_service.cpp rename to src/rootserver/standby/ob_standby_service.cpp index b4628c8282..7562847ae5 100644 --- a/src/share/ob_primary_standby_service.cpp +++ b/src/rootserver/standby/ob_standby_service.cpp @@ -12,12 +12,13 @@ #define USING_LOG_PREFIX STANDBY -#include "ob_primary_standby_service.h" // ObPrimaryStandbyService +#include "ob_standby_service.h" // ObStandbyService + #include "lib/oblog/ob_log_module.h" // LOG_* #include "lib/utility/ob_print_utils.h" // TO_STRING_KV #include "rootserver/ob_cluster_event.h" // CLUSTER_EVENT_ADD_CONTROL +#include "rootserver/ob_tenant_event_def.h" // TENANT_EVENT #include "rootserver/ob_rs_event_history_table_operator.h" // ROOTSERVICE_EVENT_ADD -#include "rootserver/ob_tenant_role_transition_service.h" // ObTenantRoleTransitionService #include "rootserver/ob_ls_service_helper.h"//ObTenantLSInfo #include "share/restore/ob_log_restore_source_mgr.h" // ObLogRestoreSourceMgr #include "share/ls/ob_ls_recovery_stat_operator.h"// ObLSRecoveryStatOperator @@ -42,11 +43,11 @@ using namespace obrpc; using namespace share; using namespace rootserver; using namespace storage; - +using namespace tenant_event; namespace standby { -int ObPrimaryStandbyService::init( +int ObStandbyService::init( ObMySQLProxy *sql_proxy, share::schema::ObMultiVersionSchemaService *schema_service) { @@ -63,20 +64,20 @@ int ObPrimaryStandbyService::init( return ret; } -void ObPrimaryStandbyService::destroy() +void ObStandbyService::destroy() { if (OB_UNLIKELY(!inited_)) { - LOG_INFO("ObPrimaryStandbyService has been destroyed", K_(inited)); + LOG_INFO("ObStandbyService has been destroyed", K_(inited)); } else { - LOG_INFO("ObPrimaryStandbyService begin to destroy", K_(inited)); + LOG_INFO("ObStandbyService begin to destroy", K_(inited)); sql_proxy_ = NULL; schema_service_ = NULL; inited_ = false; - LOG_INFO("ObPrimaryStandbyService destroyed", K_(inited)); + LOG_INFO("ObStandbyService destroyed", K_(inited)); } } -int ObPrimaryStandbyService::check_inner_stat_() +int ObStandbyService::check_inner_stat_() { int ret = OB_SUCCESS; if (OB_UNLIKELY(!inited_)) { @@ -88,117 +89,209 @@ int ObPrimaryStandbyService::check_inner_stat_() } return ret; } +#define PRINT_TENANT_INFO(tenant_info, tenant_info_buf) \ + do { \ + int64_t pos = 0; \ + size_t tenant_buf_size = sizeof(tenant_info_buf) / sizeof(tenant_info_buf[0]); \ + if ((tenant_info).is_valid()) { \ + (void)databuff_printf(tenant_info_buf, tenant_buf_size, pos, "%s", to_cstring((tenant_info))); \ + } else { \ + (void)databuff_printf(tenant_info_buf, tenant_buf_size, pos, "NULL"); \ + } \ + } while(0) -int ObPrimaryStandbyService::switch_tenant(const obrpc::ObSwitchTenantArg &arg) +void ObStandbyService::tenant_event_start_( + const uint64_t switch_tenant_id, const obrpc::ObSwitchTenantArg &arg, int ret, + int64_t begin_ts, const share::ObAllTenantInfo &tenant_info) +{ + char tenant_info_buf[1024] = ""; + PRINT_TENANT_INFO(tenant_info, tenant_info_buf); + switch (arg.get_op_type()) { + case ObSwitchTenantArg::SWITCH_TO_PRIMARY : + TENANT_EVENT(switch_tenant_id, TENANT_ROLE_CHANGE, SWITCHOVER_TO_PRIMARY_START, begin_ts, + ret, 0, ObHexEscapeSqlStr(arg.get_stmt_str()), ObHexEscapeSqlStr(tenant_info_buf)); + break; + case ObSwitchTenantArg::SWITCH_TO_STANDBY : + TENANT_EVENT(switch_tenant_id, TENANT_ROLE_CHANGE, SWITCHOVER_TO_STANDBY_START, begin_ts, + ret, 0, ObHexEscapeSqlStr(arg.get_stmt_str()), ObHexEscapeSqlStr(tenant_info_buf)); + break; + case ObSwitchTenantArg::FAILOVER_TO_PRIMARY : + TENANT_EVENT(switch_tenant_id, TENANT_ROLE_CHANGE, FAILOVER_TO_PRIMARY_START, begin_ts, + ret, 0, ObHexEscapeSqlStr(arg.get_stmt_str()), ObHexEscapeSqlStr(tenant_info_buf)); + break; + default :break; + } +} + +void ObStandbyService::tenant_event_end_( + const uint64_t switch_tenant_id, const obrpc::ObSwitchTenantArg &arg, + int ret, int64_t cost, int64_t end_ts, const share::SCN switch_scn, + ObTenantRoleTransCostDetail &cost_detail, ObTenantRoleTransAllLSInfo &all_ls) +{ + share::ObAllTenantInfo tenant_info; + if (!THIS_WORKER.is_timeout()) { + int tmp_ret = OB_SUCCESS; + if (OB_TMP_FAIL(ObAllTenantInfoProxy::load_tenant_info( + switch_tenant_id, + sql_proxy_, + false, + tenant_info))) { + LOG_WARN("failed to load tenant info", KR(ret), K(switch_tenant_id)); + } + } + char tenant_info_buf[1024] = ""; + PRINT_TENANT_INFO(tenant_info, tenant_info_buf); + switch (arg.get_op_type()) { + case ObSwitchTenantArg::SWITCH_TO_PRIMARY : + TENANT_EVENT(switch_tenant_id, TENANT_ROLE_CHANGE, SWITCHOVER_TO_PRIMARY_END, end_ts, + ret, cost, ObHexEscapeSqlStr(arg.get_stmt_str()), ObHexEscapeSqlStr(tenant_info_buf), switch_scn.get_val_for_inner_table_field(), cost_detail, all_ls); + break; + case ObSwitchTenantArg::SWITCH_TO_STANDBY : + TENANT_EVENT(switch_tenant_id, TENANT_ROLE_CHANGE, SWITCHOVER_TO_STANDBY_END, end_ts, + ret, cost, ObHexEscapeSqlStr(arg.get_stmt_str()), ObHexEscapeSqlStr(tenant_info_buf), switch_scn.get_val_for_inner_table_field(), cost_detail, all_ls); + break; + case ObSwitchTenantArg::FAILOVER_TO_PRIMARY : + TENANT_EVENT(switch_tenant_id, TENANT_ROLE_CHANGE, FAILOVER_TO_PRIMARY_END, end_ts, + ret, cost, ObHexEscapeSqlStr(arg.get_stmt_str()), ObHexEscapeSqlStr(tenant_info_buf), switch_scn.get_val_for_inner_table_field(), cost_detail, all_ls); + break; + default :break; + } +} + +int ObStandbyService::switch_tenant(const obrpc::ObSwitchTenantArg &arg) { int ret = OB_SUCCESS; - int64_t begin_time = ObTimeUtility::current_time(); + int64_t begin_ts = ObTimeUtility::current_time(); uint64_t switch_tenant_id = OB_INVALID_ID; - const char *alter_cluster_event = arg.get_alter_type_str(); - ObTenantStatus tenant_status = TENANT_STATUS_MAX; + bool is_verify = arg.get_is_verify(); uint64_t compat_version = 0; - CLUSTER_EVENT_ADD_CONTROL_START(ret, alter_cluster_event, "stmt_str", arg.get_stmt_str()); + ObAllTenantInfo tenant_info; + share::SCN switch_scn = SCN::min_scn(); + ObTenantRoleTransCostDetail cost_detail; + ObTenantRoleTransAllLSInfo all_ls; + cost_detail.set_start(begin_ts); if (OB_FAIL(check_inner_stat_())) { LOG_WARN("inner stat error", KR(ret), K_(inited)); - } else if (!arg.is_valid()) { + } else if (OB_UNLIKELY(!arg.is_valid())) { ret = OB_INVALID_ARGUMENT; LOG_WARN("invalid arg", K(arg), KR(ret)); } else if (OB_FAIL(get_target_tenant_id(arg.get_tenant_name(), arg.get_exec_tenant_id(), switch_tenant_id))) { LOG_WARN("failed to get_target_tenant_id", KR(ret), K(switch_tenant_id), K(arg)); } else if (OB_FAIL(GET_MIN_DATA_VERSION(switch_tenant_id, compat_version))) { - LOG_WARN("fail to get data version", KR(ret), K(switch_tenant_id)); - } else if (compat_version < DATA_VERSION_4_1_0_0) { + LOG_WARN("fail to get data version", K(ret), K(arg)); + } else if (OB_UNLIKELY(compat_version < DATA_VERSION_4_1_0_0)) { ret = OB_NOT_SUPPORTED; LOG_WARN("Tenant COMPATIBLE is below 4.1.0.0, switch tenant is not supported", KR(ret)); - LOG_USER_ERROR(OB_NOT_SUPPORTED, "Tenant COMPATIBLE is below 4.1.0.0, switch tenant is"); - } else if (OB_FAIL(get_tenant_status(switch_tenant_id, tenant_status))) { - LOG_WARN("failed to get tenant status", KR(ret), K(switch_tenant_id)); - } else if (is_tenant_normal(tenant_status)) { + TENANT_ROLE_TRANS_USER_ERR_WITH_SUFFIX(OB_NOT_SUPPORTED, "Tenant COMPATIBLE is below 4.1.0.0", arg.get_op_type()); + } else if (OB_UNLIKELY(is_verify && !(compat_version >= DATA_VERSION_4_3_3_0 + || (compat_version >= DATA_VERSION_4_2_2_0 && compat_version < DATA_VERSION_4_3_0_0) + || (compat_version >= MOCK_DATA_VERSION_4_2_1_8 && compat_version < DATA_VERSION_4_2_2_0)))) { + ret = common::OB_NOT_SUPPORTED; + LOG_WARN("only (version >= 4_2_1_8 and version < 4_2_2_0) " + "or version >= 4_2_2_0 and version < 4_3_0_0 " + "or version >= 4_3_3_0 support this operation", KR(ret), K(compat_version)); + } else if (OB_FAIL(check_if_tenant_status_is_normal_(switch_tenant_id, arg.get_op_type()))) { + LOG_WARN("fail to check if tenant status is normal", KR(ret), K(switch_tenant_id), K(arg)); + } else if (OB_FAIL(all_ls.init())) { + LOG_WARN("fail to init all_ls", KR(ret)); + } else if (OB_FAIL(ObAllTenantInfoProxy::load_tenant_info( + switch_tenant_id, + sql_proxy_, + false, + tenant_info))) { + LOG_WARN("failed to load tenant info", KR(ret), K(switch_tenant_id)); + } else { + if (!is_verify) { + (void) tenant_event_start_(switch_tenant_id, arg, ret, begin_ts, tenant_info); + } + switch (arg.get_op_type()) { case ObSwitchTenantArg::SWITCH_TO_PRIMARY : - if (OB_FAIL(switch_to_primary(switch_tenant_id, arg.get_op_type()))) { - LOG_WARN("failed to switch_to_primary", KR(ret), K(switch_tenant_id), K(arg), K(tenant_status)); + if (OB_FAIL(switch_to_primary(switch_tenant_id, arg.get_op_type(), is_verify, + switch_scn, cost_detail, all_ls))) { + LOG_WARN("failed to switch_to_primary", KR(ret), K(switch_tenant_id), K(arg)); } break; case ObSwitchTenantArg::SWITCH_TO_STANDBY : - if (OB_FAIL(switch_to_standby(switch_tenant_id, arg.get_op_type()))) { - LOG_WARN("failed to switch_to_standby", KR(ret), K(switch_tenant_id), K(arg), K(tenant_status)); + if (OB_FAIL(switch_to_standby(switch_tenant_id, arg.get_op_type(), is_verify, + tenant_info, switch_scn, cost_detail, all_ls))) { + LOG_WARN("failed to switch_to_standby", KR(ret), K(switch_tenant_id), K(arg)); } break; case ObSwitchTenantArg::FAILOVER_TO_PRIMARY : - if (OB_FAIL(failover_to_primary(switch_tenant_id, arg.get_op_type()))) { - LOG_WARN("failed to failover_to_primary", KR(ret), K(switch_tenant_id), K(arg), K(tenant_status)); + if (OB_FAIL(failover_to_primary(switch_tenant_id, arg.get_op_type(), is_verify, + tenant_info, switch_scn, cost_detail, all_ls))) { + LOG_WARN("failed to failover_to_primary", KR(ret), K(switch_tenant_id), K(arg)); } break; default : ret = OB_INVALID_ARGUMENT; - LOG_WARN("unkown op_type", K(arg)); + LOG_WARN("unkown op_type", KR(ret), K(arg)); + } + // reset return code to TIMEOUT, to prevent the error code which not user unfriendly + if (THIS_WORKER.is_timeout() && OB_ERR_EXCLUSIVE_LOCK_CONFLICT == ret) { + ret = OB_TIMEOUT; + } + int64_t end_ts = ObTimeUtility::current_time(); + int64_t cost = end_ts - begin_ts; + cost_detail.set_end(end_ts); + FLOG_INFO("switch tenant end", KR(ret), K(arg), K(cost), K(cost_detail), K(all_ls)); + if (!is_verify) { + (void) tenant_event_end_(switch_tenant_id, arg, ret, cost, end_ts, switch_scn, cost_detail, all_ls); } - } else { - ret = OB_OP_NOT_ALLOW; - LOG_WARN("tenant status is not normal, switch tenant is not allowed", KR(ret), K(switch_tenant_id), K(arg), K(tenant_status)); - LOG_USER_ERROR(OB_OP_NOT_ALLOW, "tenant status is not normal, switch tenant is"); } - - // reset return code to TIMEOUT, to prevent the error code which not user unfriendly - if (THIS_WORKER.is_timeout() && OB_ERR_EXCLUSIVE_LOCK_CONFLICT == ret) { - ret = OB_TIMEOUT; - } - - int64_t cost = ObTimeUtility::current_time() - begin_time; - CLUSTER_EVENT_ADD_CONTROL_FINISH(ret, alter_cluster_event, - K(cost), - "stmt_str", arg.get_stmt_str()); - return ret; } -int ObPrimaryStandbyService::failover_to_primary(const uint64_t tenant_id, - const obrpc::ObSwitchTenantArg::OpType &switch_optype) +int ObStandbyService::failover_to_primary( + const uint64_t tenant_id, + const obrpc::ObSwitchTenantArg::OpType &switch_optype, + const bool is_verify, + const share::ObAllTenantInfo &tenant_info, + share::SCN &switch_scn, + ObTenantRoleTransCostDetail &cost_detail, + ObTenantRoleTransAllLSInfo &all_ls) { int ret = OB_SUCCESS; - ObAllTenantInfo tenant_info; - ObTenantStatus tenant_status = TENANT_STATUS_MAX; + ObTenantRoleTransitionService role_transition_service; if (OB_FAIL(check_inner_stat_())) { LOG_WARN("inner stat error", KR(ret), K_(inited)); - } else if (OB_ISNULL(GCTX.srv_rpc_proxy_) || OB_ISNULL(schema_service_)) { + } else if (OB_ISNULL(GCTX.srv_rpc_proxy_)) { ret = OB_ERR_UNEXPECTED; - LOG_WARN("pointer is null", KR(ret), KP(GCTX.srv_rpc_proxy_), KP(schema_service_)); + LOG_WARN("pointer is null", KR(ret), KP(GCTX.srv_rpc_proxy_)); } else if (OB_UNLIKELY(obrpc::ObSwitchTenantArg::OpType::INVALID == switch_optype)) { ret = OB_INVALID_ARGUMENT; LOG_WARN("invalid switch_optype", KR(ret), K(switch_optype)); - } else if (!is_user_tenant(tenant_id)) { - ret = OB_INVALID_ARGUMENT; - LOG_WARN("only support switch user tenant", KR(ret), K(tenant_id)); - LOG_USER_ERROR(OB_INVALID_ARGUMENT, "tenant id, only support operating user tenant"); - } else if (OB_FAIL(ObAllTenantInfoProxy::load_tenant_info(tenant_id, sql_proxy_, - false, tenant_info))) { - LOG_WARN("failed to load tenant info", KR(ret), K(tenant_id)); + } else if (OB_FAIL(role_transition_service.init( + tenant_id, + switch_optype, + is_verify, + sql_proxy_, + GCTX.srv_rpc_proxy_, + &cost_detail, + &all_ls))) { + LOG_WARN("fail to init role_transition_service", KR(ret), K(tenant_id), K(switch_optype), + KP(sql_proxy_), KP(GCTX.srv_rpc_proxy_), K(cost_detail), K(all_ls)); } else if (tenant_info.is_primary() && tenant_info.is_normal_status()) { LOG_INFO("already is primary tenant, no need switch", K(tenant_info)); - } else if (OB_FAIL(get_tenant_status(tenant_id, tenant_status))) { - LOG_WARN("failed to get tenant status", KR(ret), K(tenant_id)); - } else if (is_tenant_normal(tenant_status)) { - ObTenantRoleTransitionService role_transition_service(tenant_id, sql_proxy_, GCTX.srv_rpc_proxy_, switch_optype); - if (tenant_info.get_restore_data_mode().is_remote_mode()) { - ret = OB_OP_NOT_ALLOW; - LOG_WARN("tenant restore data mode is remote, failover is not allowed", KR(ret), K(tenant_id), K(tenant_info)); - LOG_USER_ERROR(OB_OP_NOT_ALLOW, "Tenant restore data mode is remote. Operation is"); - } else if (OB_FAIL(role_transition_service.failover_to_primary())) { - LOG_WARN("failed to failover to primary", KR(ret), K(tenant_id)); - } - } else { + } else if (tenant_info.get_restore_data_mode().is_remote_mode()) { ret = OB_OP_NOT_ALLOW; - LOG_WARN("tenant status is not normal, failover is not allowed", KR(ret), K(tenant_id), K(tenant_status)); - LOG_USER_ERROR(OB_OP_NOT_ALLOW, "tenant status is not normal, failover is"); + LOG_WARN("tenant restore data mode is remote, failover is not allowed", KR(ret), K(tenant_id), K(tenant_info)); + LOG_USER_ERROR(OB_OP_NOT_ALLOW, "Tenant restore data mode is remote. Operation is"); + } else { + if (OB_FAIL(role_transition_service.failover_to_primary())) { + LOG_WARN("failed to failover to primary", KR(ret), K(tenant_id)); + } + switch_scn = tenant_info.get_sync_scn(); } return ret; } -int ObPrimaryStandbyService::get_target_tenant_id(const ObString &tenant_name, - const uint64_t exec_tenant_id, - uint64_t &switch_tenant_id) +int ObStandbyService::get_target_tenant_id( + const ObString &tenant_name, + const uint64_t exec_tenant_id, + uint64_t &switch_tenant_id) { int ret = OB_SUCCESS; switch_tenant_id = OB_INVALID_ID; @@ -229,7 +322,7 @@ int ObPrimaryStandbyService::get_target_tenant_id(const ObString &tenant_name, LOG_WARN("get_schema_guard failed", KR(ret)); } else if (OB_FAIL(guard.get_tenant_id(tenant_name, switch_tenant_id))) { LOG_WARN("get_tenant_id failed", KR(ret), K(tenant_name), K(exec_tenant_id)); - } else if (!is_user_tenant(switch_tenant_id)) { + } else if (OB_UNLIKELY(!is_valid_tenant_id(switch_tenant_id) || !is_user_tenant(switch_tenant_id))) { ret = OB_INVALID_ARGUMENT; LOG_WARN("only support switch user tenant", KR(ret), K(tenant_name), K(exec_tenant_id), K(switch_tenant_id)); LOG_USER_ERROR(OB_INVALID_ARGUMENT, "tenant name, only support operating user tenant"); @@ -240,7 +333,7 @@ int ObPrimaryStandbyService::get_target_tenant_id(const ObString &tenant_name, return ret; } -int ObPrimaryStandbyService::recover_tenant(const obrpc::ObRecoverTenantArg &arg) +int ObStandbyService::recover_tenant(const obrpc::ObRecoverTenantArg &arg) { int ret = OB_SUCCESS; int64_t begin_time = ObTimeUtility::current_time(); @@ -274,21 +367,19 @@ int ObPrimaryStandbyService::recover_tenant(const obrpc::ObRecoverTenantArg &arg return ret; } -int ObPrimaryStandbyService::get_tenant_status( +int ObStandbyService::get_tenant_status( const uint64_t tenant_id, ObTenantStatus &status) { int ret = OB_SUCCESS; status = TENANT_STATUS_MAX; - if (OB_FAIL(check_inner_stat_())) { - LOG_WARN("inner stat error", KR(ret), K_(inited)); - } else if (!is_user_tenant(tenant_id)) { + if (!is_user_tenant(tenant_id)) { ret = OB_INVALID_ARGUMENT; LOG_WARN("only support get user tenant status", KR(ret), K(tenant_id)); LOG_USER_ERROR(OB_INVALID_ARGUMENT, "tenant id, only support operating user tenant"); - } else if (OB_ISNULL(sql_proxy_)) { + } else if (OB_ISNULL(GCTX.sql_proxy_)) { ret = OB_ERR_UNEXPECTED; - LOG_WARN("pointer is null", KR(ret), KP(sql_proxy_)); + LOG_WARN("pointer is null", KR(ret), KP(GCTX.sql_proxy_)); } else { ObSqlString sql; SMART_VAR(ObISQLClient::ReadResult, result) { @@ -296,7 +387,7 @@ int ObPrimaryStandbyService::get_tenant_status( "SELECT status FROM %s WHERE tenant_id = %lu", OB_ALL_TENANT_TNAME, tenant_id))) { LOG_WARN("assign sql string failed", KR(ret), K(tenant_id)); - } else if (OB_FAIL(sql_proxy_->read(result, OB_SYS_TENANT_ID, sql.ptr()))) { + } else if (OB_FAIL(GCTX.sql_proxy_->read(result, OB_SYS_TENANT_ID, sql.ptr()))) { LOG_WARN("execute sql failed", KR(ret), K(tenant_id), K(sql)); } else if (OB_ISNULL(result.get_result())) { ret = OB_ERR_UNEXPECTED; @@ -328,7 +419,21 @@ int ObPrimaryStandbyService::get_tenant_status( return ret; } -int ObPrimaryStandbyService::do_recover_tenant( +int ObStandbyService::check_if_tenant_status_is_normal_(const uint64_t tenant_id, const RoleTransType op_type) +{ + int ret = OB_SUCCESS; + ObTenantStatus tenant_status = TENANT_STATUS_MAX; + if (OB_FAIL(get_tenant_status(tenant_id, tenant_status))) { + LOG_WARN("failed to get tenant status", KR(ret), K(tenant_id)); + } else if (OB_UNLIKELY(!is_tenant_normal(tenant_status))) { + ret = OB_OP_NOT_ALLOW; + LOG_WARN("tenant status is not normal", KR(ret), K(tenant_id), K(tenant_status)); + TENANT_ROLE_TRANS_USER_ERR_WITH_SUFFIX(OB_OP_NOT_ALLOW, "tenant status is not normal", op_type); + } + return ret; +} + +int ObStandbyService::do_recover_tenant( const uint64_t tenant_id, const share::ObTenantSwitchoverStatus &working_sw_status, const obrpc::ObRecoverTenantArg::RecoverType &recover_type, @@ -417,12 +522,17 @@ int ObPrimaryStandbyService::do_recover_tenant( return ret; } -int ObPrimaryStandbyService::switch_to_primary( +int ObStandbyService::switch_to_primary( const uint64_t tenant_id, - const obrpc::ObSwitchTenantArg::OpType &switch_optype) + const obrpc::ObSwitchTenantArg::OpType &switch_optype, + const bool is_verify, + share::SCN &switch_scn, + ObTenantRoleTransCostDetail &cost_detail, + ObTenantRoleTransAllLSInfo &all_ls) { int ret = OB_SUCCESS; int64_t begin_time = ObTimeUtility::current_time(); + ObTenantRoleTransitionService role_transition_service; ObAllTenantInfo tenant_info; if (OB_FAIL(check_inner_stat_())) { LOG_WARN("inner stat error", KR(ret), K_(inited)); @@ -432,29 +542,37 @@ int ObPrimaryStandbyService::switch_to_primary( } else if (OB_UNLIKELY(obrpc::ObSwitchTenantArg::OpType::INVALID == switch_optype)) { ret = OB_INVALID_ARGUMENT; LOG_WARN("invalid switch_optype", KR(ret), K(switch_optype)); - } else if (!is_user_tenant(tenant_id)) { - ret = OB_INVALID_ARGUMENT; - LOG_WARN("only support switch user tenant", KR(ret), K(tenant_id)); - LOG_USER_ERROR(OB_INVALID_ARGUMENT, "tenant id, only support operating user tenant"); + } else if (OB_FAIL(role_transition_service.init( + tenant_id, + switch_optype, + is_verify, + sql_proxy_, + GCTX.srv_rpc_proxy_, + &cost_detail, + &all_ls))) { + LOG_WARN("fail to init role_transition_service", KR(ret), K(tenant_id), K(switch_optype), + KP(sql_proxy_), KP(GCTX.srv_rpc_proxy_), K(cost_detail), K(all_ls)); } else { - ObTenantRoleTransitionService role_transition_service(tenant_id, sql_proxy_, GCTX.srv_rpc_proxy_, switch_optype); (void)role_transition_service.set_switchover_epoch(tenant_info.get_switchover_epoch()); if (OB_FAIL(role_transition_service.failover_to_primary())) { - LOG_WARN("failed to failover to primary", KR(ret), K(tenant_id)); + LOG_WARN("fail to failover to primary", KR(ret), K(tenant_id)); } + switch_scn = role_transition_service.get_so_scn(); } - return ret; } -int ObPrimaryStandbyService::switch_to_standby( +int ObStandbyService::switch_to_standby( const uint64_t tenant_id, - const obrpc::ObSwitchTenantArg::OpType &switch_optype) + const obrpc::ObSwitchTenantArg::OpType &switch_optype, + const bool is_verify, + share::ObAllTenantInfo &tenant_info, + share::SCN &switch_scn, + ObTenantRoleTransCostDetail &cost_detail, + ObTenantRoleTransAllLSInfo &all_ls) { int ret = OB_SUCCESS; - ObAllTenantInfo tenant_info; const int32_t group_id = share::OBCG_DBA_COMMAND; - if (OB_FAIL(check_inner_stat_())) { LOG_WARN("inner stat error", KR(ret), K_(inited)); } else if (OB_ISNULL(GCTX.srv_rpc_proxy_)) { @@ -463,12 +581,6 @@ int ObPrimaryStandbyService::switch_to_standby( } else if (OB_UNLIKELY(obrpc::ObSwitchTenantArg::OpType::INVALID == switch_optype)) { ret = OB_INVALID_ARGUMENT; LOG_WARN("invalid switch_optype", KR(ret), K(switch_optype)); - } else if (!is_user_tenant(tenant_id)) { - ret = OB_INVALID_ARGUMENT; - LOG_WARN("only support switch user tenant", KR(ret), K(tenant_id)); - LOG_USER_ERROR(OB_INVALID_ARGUMENT, "tenant id, only support operating user tenant"); - } else if (OB_FAIL(ObAllTenantInfoProxy::load_tenant_info(tenant_id, sql_proxy_, false, tenant_info))) { - LOG_WARN("failed to load tenant info", KR(ret), K(tenant_id)); } else if (tenant_info.is_standby() && tenant_info.is_normal_status()) { LOG_INFO("already is standby tenant, no need switch", K(tenant_id), K(tenant_info)); } else { @@ -479,6 +591,12 @@ int ObPrimaryStandbyService::switch_to_standby( ret = OB_OP_NOT_ALLOW; LOG_WARN("unexpected tenant role", KR(ret), K(tenant_info)); LOG_USER_ERROR(OB_OP_NOT_ALLOW, "tenant role is not PRIMARY, switchover to standby is"); + } else if (OB_UNLIKELY(!tenant_info.get_recovery_until_scn().is_max())) { + ret = OB_OP_NOT_ALLOW; + LOG_WARN("recovery_until_scn has been changed ", KR(ret), K(tenant_id), K(tenant_info)); + LOG_USER_ERROR(OB_OP_NOT_ALLOW, "recovery_until_scn has been changed, switchover to standby is"); + } else if (is_verify) { + // skip } else if (OB_FAIL(update_tenant_status_before_sw_to_standby_( tenant_info.get_switchover_status(), tenant_info.get_tenant_role(), @@ -490,7 +608,7 @@ int ObPrimaryStandbyService::switch_to_standby( } } case share::ObTenantSwitchoverStatus::PREPARE_SWITCHING_TO_STANDBY_STATUS: { - if (OB_FAIL(ret)) { + if (OB_FAIL(ret) || is_verify) { } else if (OB_FAIL(switch_to_standby_prepare_ls_status_(tenant_id, tenant_info.get_switchover_status(), tenant_info.get_switchover_epoch(), @@ -499,9 +617,19 @@ int ObPrimaryStandbyService::switch_to_standby( } } case share::ObTenantSwitchoverStatus::SWITCHING_TO_STANDBY_STATUS: { - if (OB_FAIL(ret)) { + ObTenantRoleTransitionService role_transition_service; + if (OB_FAIL(ret) || is_verify) { + } else if (OB_FAIL(role_transition_service.init( + tenant_id, + switch_optype, + is_verify, + sql_proxy_, + GCTX.srv_rpc_proxy_, + &cost_detail, + &all_ls))) { + LOG_WARN("fail to init role_transition_service", KR(ret), K(tenant_id), K(switch_optype), + KP(sql_proxy_), KP(GCTX.srv_rpc_proxy_), K(cost_detail), K(all_ls)); } else { - ObTenantRoleTransitionService role_transition_service(tenant_id, sql_proxy_, GCTX.srv_rpc_proxy_, switch_optype); uint64_t compat_version = 0; ObGlobalStatProxy global_proxy(*sql_proxy_, gen_meta_tenant_id(tenant_id)); (void)role_transition_service.set_switchover_epoch(tenant_info.get_switchover_epoch()); @@ -527,13 +655,14 @@ int ObPrimaryStandbyService::switch_to_standby( (void)role_transition_service.broadcast_tenant_info( ObTenantRoleTransitionConstants::SWITCH_TO_STANDBY_LOG_MOD_STR); } + switch_scn = role_transition_service.get_so_scn(); } break; } default: { ret = OB_OP_NOT_ALLOW; LOG_WARN("switchover status not match", KR(ret), K(tenant_info), K(tenant_id)); - LOG_USER_ERROR(OB_OP_NOT_ALLOW, "switchover status not match, switchover to standby"); + LOG_USER_ERROR(OB_OP_NOT_ALLOW, "switchover status not match, switchover to standby is"); break; } } @@ -542,7 +671,7 @@ int ObPrimaryStandbyService::switch_to_standby( return ret; } -int ObPrimaryStandbyService::update_tenant_status_before_sw_to_standby_( +int ObStandbyService::update_tenant_status_before_sw_to_standby_( const ObTenantSwitchoverStatus cur_switchover_status, const ObTenantRole cur_tenant_role, const int64_t cur_switchover_epoch, @@ -571,7 +700,7 @@ int ObPrimaryStandbyService::update_tenant_status_before_sw_to_standby_( } else if (OB_UNLIKELY(!tenant_info.get_recovery_until_scn().is_max())) { ret = OB_OP_NOT_ALLOW; LOG_WARN("recovery_until_scn has been changed ", KR(ret), K(tenant_id), K(tenant_info)); - LOG_USER_ERROR(OB_OP_NOT_ALLOW, "recovery_until_scn has been changed, switchover to standby"); + LOG_USER_ERROR(OB_OP_NOT_ALLOW, "recovery_until_scn has been changed, switchover to standby is"); } else if (cur_switchover_status != tenant_info.get_switchover_status()) { ret = OB_NEED_RETRY; LOG_WARN("tenant not expect switchover status", KR(ret), K(tenant_info), K(cur_switchover_status)); @@ -599,16 +728,15 @@ int ObPrimaryStandbyService::update_tenant_status_before_sw_to_standby_( ret = OB_SUCC(ret) ? temp_ret : ret; } } - CLUSTER_EVENT_ADD_LOG(ret, "update tenant before switchover to standby", - "tenant id", tenant_id, - "old switchover#", cur_switchover_epoch, - "new switchover#", tenant_info.get_switchover_epoch(), - K(cur_switchover_status), K(cur_tenant_role)); + "tenant id", tenant_id, + "old switchover#", cur_switchover_epoch, + "new switchover#", tenant_info.get_switchover_epoch(), + K(cur_switchover_status), K(cur_tenant_role)); return ret; } -int ObPrimaryStandbyService::switch_to_standby_prepare_ls_status_( +int ObStandbyService::switch_to_standby_prepare_ls_status_( const uint64_t tenant_id, const ObTenantSwitchoverStatus &status, const int64_t switchover_epoch, @@ -631,7 +759,7 @@ int ObPrimaryStandbyService::switch_to_standby_prepare_ls_status_( } else if (OB_UNLIKELY(!status.is_prepare_switching_to_standby_status())) { ret = OB_OP_NOT_ALLOW; LOG_WARN("switchover status not match, switchover to standby not allow", KR(ret), K(status)); - LOG_USER_ERROR(OB_OP_NOT_ALLOW, "switchover status not match, switchover to standby"); + LOG_USER_ERROR(OB_OP_NOT_ALLOW, "switchover status not match, switchover to standby is"); } else if (OB_FAIL(GCTX.schema_service_->get_tenant_schema_guard(OB_SYS_TENANT_ID, schema_guard))) { LOG_WARN("fail to get schema guard", KR(ret)); } else if (OB_FAIL(schema_guard.get_tenant_info(tenant_id, tenant_schema))) { @@ -668,7 +796,7 @@ int ObPrimaryStandbyService::switch_to_standby_prepare_ls_status_( return ret; } -int ObPrimaryStandbyService::write_upgrade_barrier_log( +int ObStandbyService::write_upgrade_barrier_log( ObMySQLTransaction &trans, const uint64_t tenant_id, const uint64_t data_version) @@ -683,7 +811,7 @@ int ObPrimaryStandbyService::write_upgrade_barrier_log( return ret; } -int ObPrimaryStandbyService::write_upgrade_data_version_barrier_log( +int ObStandbyService::write_upgrade_data_version_barrier_log( ObMySQLTransaction &trans, const uint64_t tenant_id, const uint64_t data_version) @@ -700,7 +828,7 @@ int ObPrimaryStandbyService::write_upgrade_data_version_barrier_log( return ret; } -int ObPrimaryStandbyService::write_barrier_log_( +int ObStandbyService::write_barrier_log_( const transaction::ObTxDataSourceType type, ObMySQLTransaction &trans, const uint64_t tenant_id, @@ -744,7 +872,7 @@ int ObPrimaryStandbyService::write_barrier_log_( return ret; } -int ObPrimaryStandbyService::check_can_create_standby_tenant( +int ObStandbyService::check_can_create_standby_tenant( const common::ObString &log_restore_source, ObCompatibilityMode &compat_mode) { @@ -772,7 +900,7 @@ int ObPrimaryStandbyService::check_can_create_standby_tenant( return ret; } -int ObPrimaryStandbyService::wait_create_standby_tenant_end(const uint64_t tenant_id) +int ObStandbyService::wait_create_standby_tenant_end(const uint64_t tenant_id) { int ret = OB_SUCCESS; int64_t start_ts = ObTimeUtility::current_time(); @@ -860,7 +988,7 @@ int ObPrimaryStandbyService::wait_create_standby_tenant_end(const uint64_t tenan return ret; } -int ObPrimaryStandbyService::check_ls_restore_status_(const uint64_t tenant_id) +int ObStandbyService::check_ls_restore_status_(const uint64_t tenant_id) { int ret = OB_SUCCESS; ObMySQLProxy *sql_proxy = GCTX.sql_proxy_; diff --git a/src/share/ob_primary_standby_service.h b/src/rootserver/standby/ob_standby_service.h similarity index 69% rename from src/share/ob_primary_standby_service.h rename to src/rootserver/standby/ob_standby_service.h index 0eecf792b3..582998607e 100644 --- a/src/share/ob_primary_standby_service.h +++ b/src/rootserver/standby/ob_standby_service.h @@ -10,20 +10,46 @@ * See the Mulan PubL v2 for more details. */ -#ifndef OCEANBASE_STANDBY_OB_PRIMARY_STANDBY_SERVICE_H_ -#define OCEANBASE_STANDBY_OB_PRIMARY_STANDBY_SERVICE_H_ +#ifndef OCEANBASE_STANDBY_OB_STANDBY_SERVICE_H_ +#define OCEANBASE_STANDBY_OB_STANDBY_SERVICE_H_ #include "share/ob_rpc_struct.h" // ObAdminClusterArg #include "share/ob_rs_mgr.h" // ObRsMgr #include "lib/mysqlclient/ob_isql_client.h" // ObISQLClient #include "rootserver/ob_ddl_service.h" // ObDDLService #include "share/schema/ob_multi_version_schema_service.h" // ObMultiVersionSchemaService +#include "rootserver/standby/ob_tenant_role_transition_service.h" // ObTenantRoleTransitionService +// usage: TENANT_ROLE_TRANS_USER_ERR_WITH_SUFFIX(OB_OP_NOT_ALLOW, "tenant status is not normal") +// the output to user will be "tenant status is not normal, switchover to primary is not allowed" +#define TENANT_ROLE_TRANS_USER_ERR_WITH_SUFFIX(TRT_ERR_RET, TRT_ERR_MSG, TRT_OP) \ +({ \ + int tmp_ret = OB_SUCCESS; \ + ObSqlString err_msg; \ + if (OB_TMP_FAIL(err_msg.append_fmt(TRT_ERR_MSG))) { \ + LOG_WARN("fail to assign error message", KR(tmp_ret)); \ + } else { \ + if (obrpc::ObSwitchTenantArg::OpType::SWITCH_TO_PRIMARY == TRT_OP) { \ + tmp_ret = err_msg.append_fmt(", switchover to primary is"); \ + } else if (obrpc::ObSwitchTenantArg::OpType::SWITCH_TO_STANDBY == TRT_OP) { \ + tmp_ret = err_msg.append_fmt(", switchover to standby is"); \ + } else if (obrpc::ObSwitchTenantArg::OpType::FAILOVER_TO_PRIMARY == TRT_OP) { \ + tmp_ret = err_msg.append_fmt(", failover to primary is"); \ + } else { \ + tmp_ret = err_msg.append_fmt(", this operation is"); \ + } \ + if (OB_SUCCESS != tmp_ret) { \ + LOG_WARN("fail to assign error message", KR(tmp_ret)); \ + } else { \ + LOG_USER_ERROR(TRT_ERR_RET, err_msg.ptr()); \ + } \ + } \ +}) namespace oceanbase { using namespace share; - +using namespace rootserver; namespace share { namespace schema @@ -35,15 +61,15 @@ class ObMultiVersionSchemaService; namespace standby { -class ObPrimaryStandbyService +class ObStandbyService { public: - ObPrimaryStandbyService(): + ObStandbyService(): sql_proxy_(NULL), schema_service_(NULL), inited_(false) {} - virtual ~ObPrimaryStandbyService() {} - + virtual ~ObStandbyService() {} + typedef obrpc::ObSwitchTenantArg::OpType RoleTransType; int init(ObMySQLProxy *sql_proxy, share::schema::ObMultiVersionSchemaService *schema_service); void destroy(); @@ -122,7 +148,14 @@ private: * @param[in] arg tenant switch arguments * @return return code */ - int failover_to_primary(const uint64_t tenant_id, const obrpc::ObSwitchTenantArg::OpType &switch_optype); + int failover_to_primary( + const uint64_t tenant_id, + const obrpc::ObSwitchTenantArg::OpType &switch_optype, + const bool is_verify, + const share::ObAllTenantInfo &tenant_info, + share::SCN &switch_scn, + ObTenantRoleTransCostDetail &cost_detail, + ObTenantRoleTransAllLSInfo &all_ls); /** * @description: @@ -141,7 +174,13 @@ private: * @param[in] arg tenant switch arguments which include primary tenant switchover checkpoint * @return return code */ - int switch_to_primary(const uint64_t tenant_id, const obrpc::ObSwitchTenantArg::OpType &switch_optype); + int switch_to_primary( + const uint64_t tenant_id, + const obrpc::ObSwitchTenantArg::OpType &switch_optype, + const bool is_verify, + share::SCN &switch_scn, + ObTenantRoleTransCostDetail &cost_detail, + ObTenantRoleTransAllLSInfo &all_ls); /** * @description: @@ -149,7 +188,14 @@ private: * @param[in] tenant_id the primary tenant id to switch * @return return code */ - int switch_to_standby(const uint64_t tenant_id, const obrpc::ObSwitchTenantArg::OpType &switch_optype); + int switch_to_standby( + const uint64_t tenant_id, + const obrpc::ObSwitchTenantArg::OpType &switch_optype, + const bool is_verify, + share::ObAllTenantInfo &tenant_info, + share::SCN &switch_scn, + ObTenantRoleTransCostDetail &cost_detail, + ObTenantRoleTransAllLSInfo &all_ls); /** * @description: @@ -217,6 +263,12 @@ private: const uint64_t tenant_id, const uint64_t data_version); + int check_if_tenant_status_is_normal_(const uint64_t tenant_id, const RoleTransType op_type); + void tenant_event_start_(const uint64_t switch_tenant_id, const obrpc::ObSwitchTenantArg &arg, + int ret, int64_t begin_ts, const share::ObAllTenantInfo &tenant_info); + void tenant_event_end_(const uint64_t switch_tenant_id, const obrpc::ObSwitchTenantArg &arg, + int ret, int64_t cost, int64_t end_ts, const share::SCN switch_scn, + ObTenantRoleTransCostDetail &cost_detail, ObTenantRoleTransAllLSInfo &all_ls); private: const static int64_t SEC_UNIT = 1000L * 1000L; const static int64_t PRINT_INTERVAL = 10 * 1000 * 1000L; @@ -226,19 +278,19 @@ private: bool inited_; }; -class ObPrimaryStandbyServiceGetter +class ObStandbyServiceGetter { public: - static ObPrimaryStandbyService &get_instance() + static ObStandbyService &get_instance() { - static ObPrimaryStandbyService primary_standby_service; - return primary_standby_service; + static ObStandbyService standby_service; + return standby_service; } }; -#define OB_PRIMARY_STANDBY_SERVICE (oceanbase::standby::ObPrimaryStandbyServiceGetter::get_instance()) +#define OB_STANDBY_SERVICE (oceanbase::standby::ObStandbyServiceGetter::get_instance()) } // end namespace standby } // end namespace oceanbase -#endif // OCEANBASE_STANDBY_OB_PRIMARY_STANDBY_SERVICE_H_ +#endif // OCEANBASE_STANDBY_OB_STANDBY_SERVICE_H_ diff --git a/src/rootserver/ob_tenant_role_transition_service.cpp b/src/rootserver/standby/ob_tenant_role_transition_service.cpp similarity index 73% rename from src/rootserver/ob_tenant_role_transition_service.cpp rename to src/rootserver/standby/ob_tenant_role_transition_service.cpp index c02f5a7941..bbc1199636 100644 --- a/src/rootserver/ob_tenant_role_transition_service.cpp +++ b/src/rootserver/standby/ob_tenant_role_transition_service.cpp @@ -18,8 +18,10 @@ #include "rootserver/ob_rs_async_rpc_proxy.h"//ObChangeLSAccessModeProxy #include "lib/oblog/ob_log_module.h"// LOG_* #include "lib/utility/ob_print_utils.h"// TO_STRING_KV +#include "share/ls/ob_ls_operator.h" // ls_status_to_str #include "rootserver/ob_cluster_event.h"// CLUSTER_EVENT_ADD_CONTROL #include "rootserver/ob_rs_event_history_table_operator.h" // ROOTSERVICE_EVENT_ADD +#include "rootserver/ob_tenant_event_def.h" // TENANT_EVENT #include "rootserver/ob_ls_service_helper.h" // ObLSServiceHelper #include "rootserver/ob_empty_server_checker.h" // ObEmptyServerChecker #include "share/ob_rpc_struct.h"//ObLSAccessModeInfo @@ -28,20 +30,29 @@ #include "share/ob_global_stat_proxy.h"//ObGlobalStatProxy #include "share/ob_schema_status_proxy.h"//set_schema_status #include "storage/tx/ob_timestamp_service.h" // ObTimestampService -#include "share/ob_primary_standby_service.h" // ObPrimaryStandbyService +#include "rootserver/standby/ob_standby_service.h" // ObStandbyService #include "share/balance/ob_balance_task_helper_operator.h"//ObBalanceTaskHelper #include "lib/utility/ob_macro_utils.h" #include "lib/ob_errno.h" #include "share/oracle_errno.h"//oracle error code +#include "rootserver/ob_service_name_command.h" namespace oceanbase { using namespace share; using namespace palf; +using namespace common; +using namespace tenant_event; + namespace rootserver { - -#define TENANT_ROLE_TRANS_USER_ERROR \ +/* + The macro's usage scenario: It is used in the process of switchover to primary, + to translate the connection information and certain error checking messages + from the source primary tenant into USER ERROR, + facilitating the troubleshooting of cross-tenant connection issues. +*/ +#define SOURCE_TENANT_CHECK_USER_ERROR_FOR_SWITCHOVER_TO_PRIMARY \ int tmp_ret = OB_SUCCESS; \ ObSqlString str; \ switch (ret) { \ @@ -57,53 +68,208 @@ namespace rootserver case -ER_ACCESS_DENIED_ERROR: \ case OB_ERR_NO_LOGIN_PRIVILEGE: \ case -OER_INTERNAL_ERROR_CODE: \ - if (OB_TMP_FAIL(str.assign_fmt("query primary failed(original error code: %d), switch to primary", ret))) { \ + if (OB_TMP_FAIL(str.assign_fmt("query primary failed(original error code: %d), switchover to primary is", ret))) { \ LOG_WARN("tenant role trans user error str assign failed"); \ } else { \ ret = OB_OP_NOT_ALLOW; \ LOG_USER_ERROR(OB_OP_NOT_ALLOW, str.ptr()); \ } \ - break; \ + break; \ + case -ER_ACCOUNT_HAS_BEEN_LOCKED: \ + ret = OB_OP_NOT_ALLOW; \ + LOG_USER_ERROR(OB_OP_NOT_ALLOW, "primary tenant's user account is locked, switchover to primary is"); \ + break; \ case OB_ERR_TENANT_IS_LOCKED: \ ret = OB_OP_NOT_ALLOW; \ - LOG_USER_ERROR(OB_OP_NOT_ALLOW, "primary tenant is locked, role transition"); \ - break; \ - case OB_SOURCE_TENANT_STATE_NOT_MATCH: \ - LOG_USER_ERROR(OB_SOURCE_TENANT_STATE_NOT_MATCH); \ + LOG_USER_ERROR(OB_OP_NOT_ALLOW, "primary tenant is locked, switchover to primary is"); \ break; \ case OB_SOURCE_LS_STATE_NOT_MATCH: \ LOG_USER_ERROR(OB_SOURCE_LS_STATE_NOT_MATCH); \ break; \ - default: \ - if (OB_TMP_FAIL(str.assign_fmt("wait tenant sync to latest failed(original error code: %d), switch to primary", ret))){ \ - LOG_WARN("tenant role trans user error str assign failed"); \ - } else { \ - ret = OB_OP_NOT_ALLOW; \ - LOG_USER_ERROR(OB_OP_NOT_ALLOW, str.ptr()); \ - } \ - } \ + } + const char* const ObTenantRoleTransitionConstants::SWITCH_TO_PRIMARY_LOG_MOD_STR = "SWITCH_TO_PRIMARY"; const char* const ObTenantRoleTransitionConstants::SWITCH_TO_STANDBY_LOG_MOD_STR = "SWITCH_TO_STANDBY"; const char* const ObTenantRoleTransitionConstants::RESTORE_TO_STANDBY_LOG_MOD_STR = "RESTORE_TO_STANDBY"; +///////////ObTenantRoleTransCostDetail///////////////// +const char* ObTenantRoleTransCostDetail::type_to_str(CostType type) const +{ + static const char *strs[] = { "WAIT_LOG_SYNC", "WAIT_BALANCE_TASK", "FLASHBACK_LOG", + "WAIT_LOG_END", "CHANGE_ACCESS_MODE" }; + STATIC_ASSERT(MAX_COST_TYPE == ARRAYSIZEOF(strs), "status string array size mismatch"); + const char* str = "UNKNOWN"; + if (type < 0 || type >= MAX_COST_TYPE) { + LOG_WARN_RET(OB_INVALID_ARGUMENT, "invalid type", K(type)); + } else { + str = strs[type]; + } + return str; +} +void ObTenantRoleTransCostDetail::add_cost(CostType type, int64_t cost) +{ + int ret = OB_SUCCESS; + if (OB_UNLIKELY(type < 0 || type >= MAX_COST_TYPE || cost < 0)) { + ret = OB_INVALID_ARGUMENT; + LOG_ERROR("invalid argument", KR(ret), K(type), K(cost)); + } else { + cost_type_[type] = cost; + } +} +int64_t ObTenantRoleTransCostDetail::to_string(char *buf, const int64_t buf_len) const +{ + int64_t pos = 0; + int64_t counted_cost = 0; + for (int i = 0 ; i < MAX_COST_TYPE; i++) { + if (cost_type_[i] > 0) { + counted_cost += cost_type_[i]; + CostType type = static_cast(i); + BUF_PRINTF("%s: %ld, ", type_to_str(type), cost_type_[i]); + } + } + BUF_PRINTF("OTHERS: %ld", end_ - start_ - counted_cost); + return pos; +} + +///////////ObTenantRoleTransAllLSInfo///////////////// +int ObTenantRoleTransAllLSInfo::init() +{ + int ret = OB_SUCCESS; + for (int64_t i = 0; OB_SUCC(ret) && i < ObLSStatus::OB_LS_MAX_STATUS; i++) { + all_ls_[i].reset(); + } + return ret; +} +int ObTenantRoleTransAllLSInfo::add_ls(const ObLSID &ls_id, const ObLSStatus status) +{ + int ret = OB_SUCCESS; + if (OB_UNLIKELY(!ls_id.is_valid() || status < 0 || status >= ObLSStatus::OB_LS_MAX_STATUS)) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(ls_id), K(status)); + } else if (OB_FAIL(all_ls_[status].push_back(ls_id))) { + LOG_WARN("fail to push back", KR(ret), K(ls_id)); + } + return ret; +} +int64_t ObTenantRoleTransAllLSInfo::to_string(char *buf, const int64_t buf_len) const +{ + int64_t pos = 0; + bool meet_first = false; + for (int64_t i = 0; i < ObLSStatus::OB_LS_MAX_STATUS; i++) { + ObLSStatus status = static_cast(i); + if (all_ls_[i].count() > 0) { + if (!meet_first) { + BUF_PRINTF("%s: ", ls_status_to_str(status)); + meet_first = true; + } else { + BUF_PRINTF("; %s: ", ls_status_to_str(status)); + } + int64_t arr_end = all_ls_[i].count() - 1; + for (int64_t j=0; j < arr_end; j++) { + BUF_PRINTF("%ld, ", all_ls_[i].at(j).id()); + } + BUF_PRINTF("%ld", all_ls_[i].at(arr_end).id()); + } + } + if (!is_valid()) BUF_PRINTF("NULL"); + return pos; +} + +bool ObTenantRoleTransAllLSInfo::is_valid() const { + for (int64_t i = 0; i < ObLSStatus::OB_LS_MAX_STATUS; ++i) { + if (all_ls_[i].count() > 0) { + return true; + } + } + return false; +} + +////////////ObTenantRoleTransNonSyncInfo////////////// +int ObTenantRoleTransNonSyncInfo::init(const ObArray &switchover_checkpoints) +{ + int ret = OB_SUCCESS; + not_sync_checkpoints_.reset(); + is_sync_ = true; + for (int64_t i = 0; OB_SUCC(ret) && i < switchover_checkpoints.count(); i++) { + const obrpc::ObCheckpoint &checkpoint = switchover_checkpoints.at(i); + if (!checkpoint.is_sync_to_latest()) { + is_sync_ = false; + LOG_WARN("ls not sync, keep waiting", KR(ret), K(checkpoint)); + if (OB_FAIL(not_sync_checkpoints_.push_back(checkpoint))) { + LOG_WARN("fail to push back", KR(ret), K(checkpoint), K(not_sync_checkpoints_)); + } + } + } + return ret; +} + +int64_t ObTenantRoleTransNonSyncInfo::to_string(char *buf, const int64_t buf_len) const +{ + int64_t pos = 0; + int64_t ls_num = not_sync_checkpoints_.count(); + BUF_PRINTF("NON_SYNC_LS_CNT: %ld; TOP_%ld: ", ls_num, MAX_PRINT_LS_NUM); + if(ls_num > 0) { + int64_t arr_end = MAX_PRINT_LS_NUM > ls_num ? ls_num - 1 : MAX_PRINT_LS_NUM - 1; + for (int64_t i = 0; i < arr_end; i++) { + const obrpc::ObCheckpoint &checkpoint = not_sync_checkpoints_.at(i); + BUF_PRINTO(checkpoint); + J_COMMA(); + } + J_OBJ(not_sync_checkpoints_.at(arr_end)); + } + return pos; +} ////////////ObTenantRoleTransitionService////////////// + +int ObTenantRoleTransitionService::init( + uint64_t tenant_id, + const obrpc::ObSwitchTenantArg::OpType &switch_optype, + const bool is_verify, + common::ObMySQLProxy *sql_proxy, + obrpc::ObSrvRpcProxy *rpc_proxy, + ObTenantRoleTransCostDetail *cost_detail, + ObTenantRoleTransAllLSInfo *all_ls_info) +{ + int ret = OB_SUCCESS; + if (OB_ISNULL(sql_proxy) + || OB_ISNULL(rpc_proxy) + || OB_ISNULL(cost_detail) + || OB_ISNULL(all_ls_info) + || OB_UNLIKELY(!is_valid_tenant_id(tenant_id))) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), KP(sql_proxy), KP(rpc_proxy), K(tenant_id), + K(switch_optype), KP(cost_detail), KP(all_ls_info)); + } else { + sql_proxy_= sql_proxy; + rpc_proxy_ = rpc_proxy; + tenant_id_ = tenant_id; + switch_optype_ = switch_optype; + switchover_epoch_ = OB_INVALID_VERSION; + so_scn_.set_min(); + cost_detail_ = cost_detail; + all_ls_info_ = all_ls_info; + has_restore_source_ = false; + is_verify_ = is_verify; + } + return ret; +} int ObTenantRoleTransitionService::check_inner_stat() { int ret = OB_SUCCESS; - if (OB_ISNULL(sql_proxy_) || OB_ISNULL(rpc_proxy_) || - OB_UNLIKELY(!is_user_tenant(tenant_id_))) { + if (OB_ISNULL(sql_proxy_) || OB_ISNULL(rpc_proxy_) || OB_ISNULL(cost_detail_) || OB_ISNULL(all_ls_info_) + || OB_UNLIKELY(!is_user_tenant(tenant_id_))) { ret = OB_ERR_UNEXPECTED; - LOG_WARN("error unexpected", KR(ret), K(tenant_id_), KP(sql_proxy_), KP(rpc_proxy_)); + LOG_WARN("error unexpected", KR(ret), K(tenant_id_), KP(sql_proxy_), KP(rpc_proxy_), + KP(cost_detail_), KP(all_ls_info_)); } return ret; -} - +} int ObTenantRoleTransitionService::failover_to_primary() { int ret = OB_SUCCESS; - LOG_INFO("[ROLE_TRANSITION] start to failover to primary", KR(ret), K(tenant_id_)); + LOG_INFO("[ROLE_TRANSITION] start to failover to primary", KR(ret), K(is_verify_), K(tenant_id_)); const int64_t start_service_time = ObTimeUtility::current_time(); ObAllTenantInfo tenant_info; if (OB_FAIL(check_inner_stat())) { @@ -118,35 +284,34 @@ int ObTenantRoleTransitionService::failover_to_primary() } else if (OB_UNLIKELY(!tenant_info.get_recovery_until_scn().is_valid_and_not_min())) { ret = OB_OP_NOT_ALLOW; LOG_WARN("invalid recovery_until_scn", KR(ret), K(tenant_info)); - LOG_USER_ERROR(OB_OP_NOT_ALLOW, "recovery_until_scn is invalid, switch to primary"); + TENANT_ROLE_TRANS_USER_ERR_WITH_SUFFIX(OB_OP_NOT_ALLOW, "recovery_until_scn is invalid", switch_optype_); } else if (FALSE_IT(switchover_epoch_ = tenant_info.get_switchover_epoch())) { - } else if ((tenant_info.is_normal_status())) { - //do failover to primary + } else if (tenant_info.is_normal_status()) { if (OB_FAIL(do_failover_to_primary_(tenant_info))) { - LOG_WARN("failed to do failover to primary", KR(ret), K(tenant_info)); + LOG_WARN("fail to do failover to primary", KR(ret), K(tenant_info)); } } else if (tenant_info.is_prepare_flashback_for_failover_to_primary_status() - || tenant_info.is_prepare_flashback_for_switch_to_primary_status()) { + || tenant_info.is_prepare_flashback_for_switch_to_primary_status()) { //prepare flashback - if (OB_FAIL(do_prepare_flashback_(tenant_info))) { - LOG_WARN("failed to prepare flashback", KR(ret), K(tenant_info)); + if (!is_verify_ && OB_FAIL(do_prepare_flashback_(tenant_info))) { + LOG_WARN("fail to prepare flashback", KR(ret), K(tenant_info)); } } else if (tenant_info.is_flashback_status()) { - if (OB_FAIL(do_flashback_())) { - LOG_WARN("failed to flashback", KR(ret), K(tenant_info)); + if (!is_verify_ && OB_FAIL(do_flashback_())) { + LOG_WARN("fail to flashback", KR(ret), K(tenant_info)); } } else if (tenant_info.is_switching_to_primary_status()) { - if (OB_FAIL(do_switch_access_mode_to_append(tenant_info, share::PRIMARY_TENANT_ROLE))) { - LOG_WARN("failed to switch access mode", KR(ret), K(tenant_info)); + if (!is_verify_ && OB_FAIL(do_switch_access_mode_to_append(tenant_info, share::PRIMARY_TENANT_ROLE))) { + LOG_WARN("fail to switch access mode", KR(ret), K(tenant_info)); } } else { ret = OB_OP_NOT_ALLOW; - LOG_WARN("switchover status not match", KR(ret), K(tenant_info), K_(tenant_id)); - LOG_USER_ERROR(OB_OP_NOT_ALLOW, "switchover status not match, switch to primary"); + LOG_WARN("switchover status not match", KR(ret), K(is_verify_), K(tenant_info), K_(tenant_id)); + TENANT_ROLE_TRANS_USER_ERR_WITH_SUFFIX(OB_OP_NOT_ALLOW, "switchover status not match", switch_optype_); } if (OB_FAIL(ret)) { - } else { + } else if (!is_verify_) { (void)broadcast_tenant_info(ObTenantRoleTransitionConstants::SWITCH_TO_PRIMARY_LOG_MOD_STR); ObBroadcastSchemaArg arg; arg.tenant_id_ = tenant_id_; @@ -159,15 +324,15 @@ int ObTenantRoleTransitionService::failover_to_primary() } const int64_t cost = ObTimeUtility::current_time() - start_service_time; - LOG_INFO("[ROLE_TRANSITION] finish failover to primary", KR(ret), K(tenant_info), K(cost)); + LOG_INFO("[ROLE_TRANSITION] finish failover to primary", KR(ret), K(tenant_info), K(is_verify_), K(cost)); return ret; } - ERRSIM_POINT_DEF(ERRSIM_TENANT_ROLE_TRANS_WAIT_SYNC_ERROR); int ObTenantRoleTransitionService::do_failover_to_primary_(const share::ObAllTenantInfo &tenant_info) { int ret = OB_SUCCESS; ObAllTenantInfo new_tenant_info; + (void) new_tenant_info.assign(tenant_info); if (OB_FAIL(check_inner_stat())) { LOG_WARN("error unexpected", KR(ret), K(tenant_id_), KP(sql_proxy_), KP(rpc_proxy_)); } else if (OB_UNLIKELY(obrpc::ObSwitchTenantArg::OpType::SWITCH_TO_PRIMARY != switch_optype_ @@ -176,32 +341,42 @@ int ObTenantRoleTransitionService::do_failover_to_primary_(const share::ObAllTen LOG_WARN("unexpected switch tenant action", KR(ret), K_(switch_optype), K(tenant_info), K_(tenant_id)); } else if (OB_UNLIKELY(!(tenant_info.is_normal_status()) - || tenant_info.is_primary() - || switchover_epoch_ != tenant_info.get_switchover_epoch())) { + || tenant_info.is_primary() + || switchover_epoch_ != tenant_info.get_switchover_epoch())) { ret = OB_INVALID_ARGUMENT; LOG_WARN("tenant switchover status not valid", KR(ret), K(tenant_info), K(switchover_epoch_)); } else if (obrpc::ObSwitchTenantArg::OpType::SWITCH_TO_PRIMARY == switch_optype_ - && OB_FAIL(wait_tenant_sync_to_latest_until_timeout_(tenant_id_, tenant_info))) { - LOG_WARN("fail to wait_tenant_sync_to_latest_until_timeout_", KR(ret), K_(tenant_id), K(tenant_info)); - TENANT_ROLE_TRANS_USER_ERROR; - } else if (OB_SUCC(ret) && ERRSIM_TENANT_ROLE_TRANS_WAIT_SYNC_ERROR) { - ret = ERRSIM_TENANT_ROLE_TRANS_WAIT_SYNC_ERROR; - TENANT_ROLE_TRANS_USER_ERROR; - LOG_WARN("errsim wait_tenant_sync_to_latest_until_timeout", K(ret)); + && OB_FAIL(wait_sys_ls_sync_to_latest_until_timeout_(tenant_id_, new_tenant_info))) { + LOG_WARN("fail to execute wait_sys_ls_sync_to_latest_until_timeout_", KR(ret), K_(tenant_id), K(new_tenant_info)); + SOURCE_TENANT_CHECK_USER_ERROR_FOR_SWITCHOVER_TO_PRIMARY; + } + /*The switchover to primary verify command ends here. + This command cannot update the switchover status nor execute the further logic. + We update the switchover status right after sys ls being synced, The reason is as follows: + The tenant fetches log with reference to tenant_sync_scn + 3s. + If two ls' sync_scn have an extremely large difference, + e.g. tenant_sync_scn = ls_1001 sync_scn + 3s << ls_1002 sync_scn, + there is a possibility that ls_1002's log cannot be fetched completely. + To ensure all ls' log are fetched completely, we update the switchover status as PREPARE_xxx. + Then the tenant fetching log will no longer utilize tenant_sync_scn + 3s as a reference point. + **/ + if (OB_FAIL(ret) || is_verify_) { + } else if (OB_FAIL(clear_service_name_())) { + LOG_WARN("fail to execute clear_service_name", KR(ret), K(tenant_id_)); } else if (OB_FAIL(ObAllTenantInfoProxy::update_tenant_role( - tenant_id_, sql_proxy_, tenant_info.get_switchover_epoch(), - share::STANDBY_TENANT_ROLE, tenant_info.get_switchover_status(), - obrpc::ObSwitchTenantArg::OpType::SWITCH_TO_PRIMARY == switch_optype_ - ? share::PREPARE_FLASHBACK_FOR_SWITCH_TO_PRIMARY_SWITCHOVER_STATUS - : share::PREPARE_FLASHBACK_FOR_FAILOVER_TO_PRIMARY_SWITCHOVER_STATUS, - switchover_epoch_))) { + tenant_id_, sql_proxy_, tenant_info.get_switchover_epoch(), + share::STANDBY_TENANT_ROLE, tenant_info.get_switchover_status(), + obrpc::ObSwitchTenantArg::OpType::SWITCH_TO_PRIMARY == switch_optype_ ? + share::PREPARE_FLASHBACK_FOR_SWITCH_TO_PRIMARY_SWITCHOVER_STATUS : + share::PREPARE_FLASHBACK_FOR_FAILOVER_TO_PRIMARY_SWITCHOVER_STATUS, + switchover_epoch_))) { LOG_WARN("failed to update tenant role", KR(ret), K(tenant_id_), K(tenant_info)); } else if (OB_FAIL(ObAllTenantInfoProxy::load_tenant_info(tenant_id_, sql_proxy_, - false, new_tenant_info))) { + false, new_tenant_info))) { LOG_WARN("failed to load tenant info", KR(ret), K(tenant_id_)); } else if (OB_UNLIKELY(new_tenant_info.get_switchover_epoch() != switchover_epoch_)) { ret = OB_NEED_RETRY; - LOG_WARN("switchover is concurrency", KR(ret), K(switchover_epoch_), K(new_tenant_info)); + LOG_WARN("switchover is concurrency", KR(ret), K(switchover_epoch_), K(new_tenant_info)); } else if (OB_FAIL(do_prepare_flashback_(new_tenant_info))) { LOG_WARN("failed to prepare flashback", KR(ret), K(new_tenant_info)); } @@ -221,8 +396,8 @@ int ObTenantRoleTransitionService::do_prepare_flashback_(share::ObAllTenantInfo } else if (OB_UNLIKELY(!(tenant_info.is_prepare_flashback_for_failover_to_primary_status() || tenant_info.is_prepare_flashback_for_switch_to_primary_status()))) { ret = OB_OP_NOT_ALLOW; - LOG_WARN("switchover status not match, switch tenant not allow", KR(ret), K(tenant_info)); - LOG_USER_ERROR(OB_OP_NOT_ALLOW, "switchover status not match, switchover tenant"); + LOG_WARN("switchover status not match, switch tenant is not allowed", KR(ret), K(tenant_info)); + TENANT_ROLE_TRANS_USER_ERR_WITH_SUFFIX(OB_OP_NOT_ALLOW, "switchover status not match", switch_optype_); } else if (OB_UNLIKELY(switchover_epoch_ != tenant_info.get_switchover_epoch())) { ret = OB_INVALID_ARGUMENT; LOG_WARN("tenant switchover status not valid", KR(ret), K(tenant_info), K(switchover_epoch_)); @@ -231,7 +406,11 @@ int ObTenantRoleTransitionService::do_prepare_flashback_(share::ObAllTenantInfo LOG_WARN("failed to do_prepare_flashback_for_switch_to_primary_", KR(ret), K(tenant_info)); } } else if (obrpc::ObSwitchTenantArg::OpType::FAILOVER_TO_PRIMARY == switch_optype_) { - if (OB_FAIL(do_prepare_flashback_for_failover_to_primary_(tenant_info))) { + if (OB_FAIL(double_check_service_name_(tenant_info))) { + // do double check here + // so_status is not normal, service name related commands is not allowed + LOG_WARN("fail to execute double_check_service_name_", KR(ret), K(tenant_info)); + } else if (OB_FAIL(do_prepare_flashback_for_failover_to_primary_(tenant_info))) { LOG_WARN("failed to do_prepare_flashback_for_failover_to_primary_", KR(ret), K(tenant_info)); } } else { @@ -248,14 +427,12 @@ int ObTenantRoleTransitionService::do_prepare_flashback_(share::ObAllTenantInfo } return ret; } - int ObTenantRoleTransitionService::do_prepare_flashback_for_switch_to_primary_( share::ObAllTenantInfo &tenant_info) { int ret = OB_SUCCESS; - + ObLSStatusOperator status_op; DEBUG_SYNC(PREPARE_FLASHBACK_FOR_SWITCH_TO_PRIMARY); - LOG_INFO("start to do_prepare_flashback_for_switch_to_primary_", KR(ret), K_(tenant_id)); if (OB_FAIL(check_inner_stat())) { @@ -263,11 +440,28 @@ int ObTenantRoleTransitionService::do_prepare_flashback_for_switch_to_primary_( } else if (OB_UNLIKELY(!tenant_info.is_prepare_flashback_for_switch_to_primary_status())) { ret = OB_OP_NOT_ALLOW; LOG_WARN("switchover status not match, switch to primary not allow", KR(ret), K(tenant_info)); - LOG_USER_ERROR(OB_OP_NOT_ALLOW, "switchover status not match, switchover to primary"); + TENANT_ROLE_TRANS_USER_ERR_WITH_SUFFIX(OB_OP_NOT_ALLOW, "switchover status not match", switch_optype_); + } else if (OB_UNLIKELY(obrpc::ObSwitchTenantArg::OpType::SWITCH_TO_PRIMARY != switch_optype_)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("operation type is not SWITCH_TO_PRIMARY", KR(ret), K(switch_optype_)); } else if (OB_UNLIKELY(switchover_epoch_ != tenant_info.get_switchover_epoch())) { ret = OB_INVALID_ARGUMENT; LOG_WARN("tenant switchover status not valid", KR(ret), K(tenant_info), K_(switchover_epoch)); - } else if (OB_FAIL(wait_ls_balance_task_finish_())) { + } else if (OB_FAIL(status_op.create_abort_ls_in_switch_tenant( + tenant_id_, tenant_info.get_switchover_status(), + tenant_info.get_switchover_epoch(), *sql_proxy_))) { + // SYS LS has been synced, all current CTREATING/CTREATED LS cannot become NORMAL + // These LS should become CREATE_ABORT, otherwise tenant cannot be synced + LOG_WARN("failed to create abort ls", KR(ret), K_(tenant_id), K(tenant_info)); + } else if (OB_FAIL(wait_tenant_sync_to_latest_until_timeout_(tenant_id_, tenant_info))) { + LOG_WARN("fail to execute wait_tenant_sync_to_latest_until_timeout_", KR(ret), K(tenant_info)); + SOURCE_TENANT_CHECK_USER_ERROR_FOR_SWITCHOVER_TO_PRIMARY; + } else if (OB_SUCC(ret) && ERRSIM_TENANT_ROLE_TRANS_WAIT_SYNC_ERROR) { + ret = ERRSIM_TENANT_ROLE_TRANS_WAIT_SYNC_ERROR; + SOURCE_TENANT_CHECK_USER_ERROR_FOR_SWITCHOVER_TO_PRIMARY; + LOG_WARN("errsim wait_tenant_sync_to_latest_until_timeout", K(ret)); + } + if (FAILEDx(wait_ls_balance_task_finish_())) { LOG_WARN("failed to wait ls balance task finish", KR(ret)); } else if (OB_FAIL(switchover_update_tenant_status(tenant_id_, true /* switch_to_primary */, @@ -278,7 +472,6 @@ int ObTenantRoleTransitionService::do_prepare_flashback_for_switch_to_primary_( tenant_info))) { LOG_WARN("failed to switchover_update_tenant_status", KR(ret), K_(tenant_id), K(tenant_info)); } - return ret; } @@ -292,11 +485,11 @@ int ObTenantRoleTransitionService::do_prepare_flashback_for_failover_to_primary_ } else if (OB_UNLIKELY(!tenant_info.is_prepare_flashback_for_failover_to_primary_status())) { ret = OB_OP_NOT_ALLOW; LOG_WARN("switchover status not match, failover to primary not allow", KR(ret), K(tenant_info)); - LOG_USER_ERROR(OB_OP_NOT_ALLOW, "switchover status not match, failover to primary"); + TENANT_ROLE_TRANS_USER_ERR_WITH_SUFFIX(OB_OP_NOT_ALLOW, "switchover status not match", switch_optype_); } else if (OB_UNLIKELY(switchover_epoch_ != tenant_info.get_switchover_epoch())) { ret = OB_INVALID_ARGUMENT; LOG_WARN("tenant switchover status not valid", KR(ret), K(tenant_info), K_(switchover_epoch)); - } else if (OB_FAIL(OB_PRIMARY_STANDBY_SERVICE.do_recover_tenant(tenant_id_, + } else if (OB_FAIL(OB_STANDBY_SERVICE.do_recover_tenant(tenant_id_, share::PREPARE_FLASHBACK_FOR_FAILOVER_TO_PRIMARY_SWITCHOVER_STATUS, obrpc::ObRecoverTenantArg::RecoverType::CANCEL, SCN::min_scn()))) { @@ -321,21 +514,58 @@ int ObTenantRoleTransitionService::do_prepare_flashback_for_failover_to_primary_ return ret; } -int ObTenantRoleTransitionService::do_switch_access_mode_to_flashback( - const share::ObAllTenantInfo &tenant_info) +int ObTenantRoleTransitionService::clear_service_name_() { int ret = OB_SUCCESS; - if (OB_FAIL(check_inner_stat())) { - LOG_WARN("error unexpected", KR(ret), K(tenant_id_), KP(sql_proxy_), KP(rpc_proxy_)); - } else if (OB_UNLIKELY(!tenant_info.is_flashback_status() - || switchover_epoch_ != tenant_info.get_switchover_epoch())) { - ret = OB_INVALID_ARGUMENT; - LOG_WARN("tenant switchover status not valid", KR(ret), K(tenant_info), K(switchover_epoch_)); - } else if (OB_FAIL(get_all_ls_status_and_change_access_mode_( - palf::AccessMode::FLASHBACK, - SCN::base_scn(), - SCN::min_scn()))) { - LOG_WARN("fail to execute get_all_ls_status_and_change_access_mode_", KR(ret), K(tenant_info)); + ObArray all_service_names; + int64_t epoch = 0; + if (obrpc::ObSwitchTenantArg::OpType::FAILOVER_TO_PRIMARY != switch_optype_) { + // do nothing + } else if (OB_FAIL(ObServiceNameProxy::check_is_service_name_enabled(tenant_id_))) { + if (OB_NOT_SUPPORTED == ret) { + ret = OB_SUCCESS; + } + LOG_WARN("service_name is not enabled, no need to execute clear_service_name", KR(ret), K(tenant_id_)); + } else if (OB_ISNULL(GCTX.sql_proxy_)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("GCTX.sql_proxy_ is null", KR(ret), KP(GCTX.sql_proxy_)); + } else if (OB_FAIL(ObServiceNameProxy::select_all_service_names_with_epoch(tenant_id_, epoch, all_service_names))) { + LOG_WARN("fail to execute select_all_service_names_with_epoch", KR(ret), K(tenant_id_)); + } else { + for (int64_t i = 0; OB_SUCC(ret) && i < all_service_names.count(); i++) { + const ObServiceName &service_name = all_service_names.at(i); + if (OB_FAIL(ObServiceNameCommand::stop_service(tenant_id_, service_name.get_service_name_str()))) { + LOG_WARN("fail to execute stop_service", KR(ret), K(tenant_id_), K(service_name)); + } else if (OB_FAIL(ObServiceNameCommand::delete_service(tenant_id_, service_name.get_service_name_str()))) { + LOG_WARN("fail to execute delete_service", KR(ret), K(tenant_id_), K(service_name)); + } + } + } + return ret; +} +int ObTenantRoleTransitionService::double_check_service_name_(const share::ObAllTenantInfo &tenant_info) +{ + int ret = OB_SUCCESS; + ObArray all_service_names; + int64_t service_num = 0; + if (obrpc::ObSwitchTenantArg::OpType::FAILOVER_TO_PRIMARY != switch_optype_) { + // do nothing + } else if (OB_FAIL(ObServiceNameProxy::check_is_service_name_enabled(tenant_id_))) { + if (OB_NOT_SUPPORTED == ret) { + ret = OB_SUCCESS; + } + LOG_WARN("service_name is not enabled, no need to execute double_check_service_name_", KR(ret), K(tenant_id_)); + } else if (OB_UNLIKELY(tenant_info.is_normal_status())) { + ret = OB_OP_NOT_ALLOW; + LOG_WARN("not allowed to do double check when switchover status is NORMAL", KR(ret), K(tenant_info)); + } else if (OB_ISNULL(GCTX.sql_proxy_)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("GCTX.sql_proxy_ is null", KR(ret), KP(GCTX.sql_proxy_)); + } else if (OB_FAIL(ObServiceNameProxy::get_tenant_service_name_num(*GCTX.sql_proxy_, tenant_id_, service_num))) { + LOG_WARN("fail to execute get_tenant_service_name_num", KR(ret), K(tenant_id_)); + } else if (OB_UNLIKELY(0 != service_num)) { + ret = OB_NEED_RETRY; + LOG_WARN("the tenant should have zero service_name", KR(ret), K(service_num)); } return ret; } @@ -347,7 +577,6 @@ int ObTenantRoleTransitionService::do_flashback_() logservice::ObLogService *log_service = NULL; ObLSStatusOperator status_op; ObAllTenantInfo tenant_info; - if (OB_FAIL(check_inner_stat())) { LOG_WARN("error unexpected", KR(ret), K(tenant_id_), KP(sql_proxy_), KP(rpc_proxy_)); } else if (OB_FAIL(ObAllTenantInfoProxy::load_tenant_info( @@ -358,9 +587,10 @@ int ObTenantRoleTransitionService::do_flashback_() tenant_info.get_switchover_epoch())) { ret = OB_INVALID_ARGUMENT; LOG_WARN("tenant switchover status not valid", KR(ret), K(tenant_info), K(switchover_epoch_)); - } else if (OB_FAIL(status_op.create_abort_ls_in_switch_tenant( - tenant_id_, tenant_info.get_switchover_status(), - tenant_info.get_switchover_epoch(), *sql_proxy_))) { + } else if (obrpc::ObSwitchTenantArg::OpType::FAILOVER_TO_PRIMARY == switch_optype_ + && OB_FAIL(status_op.create_abort_ls_in_switch_tenant( + tenant_id_, tenant_info.get_switchover_status(), + tenant_info.get_switchover_epoch(), *sql_proxy_))) { LOG_WARN("failed to create abort ls", KR(ret), K_(tenant_id), K(tenant_info)); } else if (OB_FAIL(ObShareUtil::set_default_timeout_ctx( ctx, GCONF.internal_sql_execute_timeout))) { @@ -368,15 +598,23 @@ int ObTenantRoleTransitionService::do_flashback_() } else if (OB_ISNULL(log_service = MTL(logservice::ObLogService *))) { ret = OB_ERR_UNEXPECTED; LOG_ERROR("failed to get MTL log_service", KR(ret), K(tenant_id_)); - } else if (OB_FAIL(log_service->flashback( - tenant_id_, tenant_info.get_sync_scn(), ctx.get_timeout()))) { - LOG_WARN("failed to flashback", KR(ret), K(tenant_id_), K(tenant_info)); + } else { + int64_t begin_time = ObTimeUtility::current_time(); + if (OB_FAIL(log_service->flashback(tenant_id_, tenant_info.get_sync_scn(), ctx.get_timeout()))) { + LOG_WARN("failed to flashback", KR(ret), K(tenant_id_), K(tenant_info)); + } + int64_t log_flashback = ObTimeUtility::current_time() - begin_time; + if (OB_LIKELY(NULL != cost_detail_)) { + (void) cost_detail_->add_cost(ObTenantRoleTransCostDetail::LOG_FLASHBACK, log_flashback); + } + } + + if (OB_FAIL(ret)) { } else { CLUSTER_EVENT_ADD_LOG(ret, "flashback end", - "tenant id", tenant_id_, - "switchover#", tenant_info.get_switchover_epoch(), - "flashback_scn#", tenant_info.get_sync_scn()); - + "tenant id", tenant_id_, + "switchover#", tenant_info.get_switchover_epoch(), + "flashback_scn#", tenant_info.get_sync_scn()); ObAllTenantInfo new_tenant_info; if (OB_FAIL(ObAllTenantInfoProxy::update_tenant_switchover_status( tenant_id_, sql_proxy_, tenant_info.get_switchover_epoch(), @@ -400,6 +638,7 @@ int ObTenantRoleTransitionService::do_switch_access_mode_to_append( const share::ObTenantRole &target_tenant_role) { int ret = OB_SUCCESS; + int64_t begin_time = ObTimeUtility::current_time(); palf::AccessMode access_mode = logservice::ObLogService::get_palf_access_mode(target_tenant_role); SCN ref_scn; if (OB_FAIL(check_inner_stat())) { @@ -430,13 +669,14 @@ int ObTenantRoleTransitionService::do_switch_access_mode_to_append( } else if (OB_UNLIKELY(tenant_info.get_switchover_status() != cur_tenant_info.get_switchover_status() || tenant_info.get_switchover_epoch() != cur_tenant_info.get_switchover_epoch())) { ret = OB_OP_NOT_ALLOW; - LOG_WARN("Tenant status changed by concurrent operation, switch to primary not allowed", - KR(ret), K(tenant_info), K(cur_tenant_info)); - LOG_USER_ERROR(OB_OP_NOT_ALLOW, "tenant status changed by concurrent operation, switch to primary"); + LOG_WARN("Tenant status changed by concurrent operation, switch to primary is not allowed", + KR(ret), K(tenant_info), K(cur_tenant_info)); + TENANT_ROLE_TRANS_USER_ERR_WITH_SUFFIX(OB_OP_NOT_ALLOW, + "tenant status changed by concurrent operation", switch_optype_); } else if (OB_FAIL(ObAllTenantInfoProxy::update_tenant_role_in_trans( - tenant_id_, trans, tenant_info.get_switchover_epoch(), - share::PRIMARY_TENANT_ROLE, tenant_info.get_switchover_status(), - share::NORMAL_SWITCHOVER_STATUS, switchover_epoch_))) { + tenant_id_, trans, tenant_info.get_switchover_epoch(), + share::PRIMARY_TENANT_ROLE, tenant_info.get_switchover_status(), + share::NORMAL_SWITCHOVER_STATUS, switchover_epoch_))) { LOG_WARN("failed to update tenant switchover status", KR(ret), K(tenant_id_), K(tenant_info), K(cur_tenant_info)); } else if (cur_tenant_info.get_recovery_until_scn().is_max()) { LOG_INFO("recovery_until_scn already is max_scn", KR(ret), K_(tenant_id), K(cur_tenant_info)); @@ -452,6 +692,10 @@ int ObTenantRoleTransitionService::do_switch_access_mode_to_append( } } } + if (OB_LIKELY(NULL != cost_detail_)) { + int64_t log_mode_change = ObTimeUtility::current_time() - begin_time; + (void) cost_detail_->add_cost(ObTenantRoleTransCostDetail::CHANGE_ACCESS_MODE, log_mode_change); + } return ret; } @@ -502,7 +746,8 @@ int ObTenantRoleTransitionService::get_tenant_ref_scn_(const share::SCN &sync_sc int ObTenantRoleTransitionService::wait_ls_balance_task_finish_() { int ret = OB_SUCCESS; - + int64_t begin_time = ObTimeUtility::current_time(); + uint64_t compat_version = 0; if (OB_FAIL(check_inner_stat())) { LOG_WARN("error unexpected", KR(ret), K(tenant_id_), KP(sql_proxy_), KP(rpc_proxy_)); } else { @@ -556,7 +801,7 @@ int ObTenantRoleTransitionService::wait_ls_balance_task_finish_() } else if (OB_FAIL(ObAllTenantInfoProxy::load_tenant_info( tenant_id_, sql_proxy_, false, cur_tenant_info))) { LOG_WARN("failed to load tenant info", KR(ret), K(tenant_id_)); - } else if (cur_tenant_info.get_sync_scn() == cur_tenant_info.get_standby_scn()) { + } else if (cur_tenant_info.get_sync_scn() == cur_tenant_info.get_readable_scn()) { is_finish = true; for (int64_t i = 0; OB_SUCC(ret) && i < balance_task_array.count() && !is_finish; ++i) { const ObBalanceTask &task = balance_task_array.at(i); @@ -593,6 +838,10 @@ int ObTenantRoleTransitionService::wait_ls_balance_task_finish_() } } } + if (OB_LIKELY(NULL != cost_detail_)) { + int64_t wait_balance_task = ObTimeUtility::current_time() - begin_time; + (void) cost_detail_->add_cost(ObTenantRoleTransCostDetail::WAIT_BALANCE_TASK, wait_balance_task); + } return ret; } @@ -638,6 +887,7 @@ int ObTenantRoleTransitionService::do_switch_access_mode_to_raw_rw( const share::ObAllTenantInfo &tenant_info) { int ret = OB_SUCCESS; + int64_t begin_time = ObTimeUtility::current_time(); palf::AccessMode target_access_mode = logservice::ObLogService::get_palf_access_mode(STANDBY_TENANT_ROLE); ObLSStatusOperator status_op; share::ObLSStatusInfoArray sys_info_array; @@ -680,6 +930,12 @@ int ObTenantRoleTransitionService::do_switch_access_mode_to_raw_rw( LOG_WARN("fail to execute get_all_ls_status_and_change_access_mode_", KR(ret), K(tenant_info), K(target_access_mode), K(sys_ls_sync_scn)); } + + if (OB_LIKELY(NULL != cost_detail_)) { + int64_t log_mode_change = ObTimeUtility::current_time() - begin_time; + int64_t change_access_mode = log_mode_change - cost_detail_->get_wait_log_end(); + (void) cost_detail_->add_cost(ObTenantRoleTransCostDetail::CHANGE_ACCESS_MODE, change_access_mode); + } DEBUG_SYNC(AFTER_CHANGE_ACCESS_MODE); return ret; } @@ -690,6 +946,7 @@ int ObTenantRoleTransitionService::get_all_ls_status_and_change_access_mode_( const share::SCN &sys_ls_sync_scn) { int ret = OB_SUCCESS; + int tmp_ret = OB_SUCCESS; ObLSStatusOperator status_op; share::ObLSStatusInfoArray status_info_array; if (OB_FAIL(check_inner_stat())) { @@ -705,6 +962,9 @@ int ObTenantRoleTransitionService::get_all_ls_status_and_change_access_mode_( LOG_WARN("fail to execute change_ls_access_mode_", KR(ret), K(status_info_array), K(target_access_mode), K(ref_scn), K(sys_ls_sync_scn)); } + if (OB_TMP_FAIL(ls_status_stats_when_change_access_mode_(status_info_array))) { + LOG_WARN("fail to gather ls status", KR(ret), KR(tmp_ret), K(status_info_array)); + } return ret; } @@ -784,6 +1044,26 @@ int ObTenantRoleTransitionService::change_ls_access_mode_( return ret; } +int ObTenantRoleTransitionService::ls_status_stats_when_change_access_mode_( + const share::ObLSStatusInfoArray &status_info_array) +{ + int ret = OB_SUCCESS; + if (OB_FAIL(check_inner_stat())) { + LOG_WARN("error unexpected", KR(ret), K(tenant_id_), KP(sql_proxy_), KP(rpc_proxy_)); + } else { + for (int64_t i = 0; OB_SUCC(ret) && i < status_info_array.count(); ++i) { + const ObLSStatusInfo &status = status_info_array.at(i); + const ObLSID &ls_id = status.get_ls_id(); + const ObLSStatus &ls_status = status.get_status(); + if (OB_FAIL(all_ls_info_->add_ls(ls_id, ls_status))) { + LOG_WARN("fail to push back", KR(ret), K(ls_id), KPC(all_ls_info_)); + } + } + FLOG_INFO("gather ls_status_stats", KR(ret), K(all_ls_info_), K(status_info_array)); + } + return ret; +} + template int do_nonblock_renew(const ARRAY_L &array_l, const ARRAY_R &array_r, const uint64_t tenant_id) { @@ -970,9 +1250,10 @@ int ObTenantRoleTransitionService::do_change_ls_access_mode_( LOG_WARN("rpc count not equal to result count", KR(ret), K(rpc_count), K(return_code_array.count())); } else { + int64_t ls_wait_sync_scn_max = 0; for (int64_t i = 0; OB_SUCC(ret) && i < return_code_array.count(); ++i) { ret = return_code_array.at(i); - const auto *result = proxy.get_results().at(i); + const obrpc::ObChangeLSAccessModeRes *result = proxy.get_results().at(i); const obrpc::ObLSAccessModeInfo &info = ls_access_info.at(i); if (OB_FAIL(ret)) { LOG_WARN("send rpc is failed", KR(ret), K(i)); @@ -984,10 +1265,17 @@ int ObTenantRoleTransitionService::do_change_ls_access_mode_( } else if (OB_TMP_FAIL(success_ls_ids.push_back(result->get_ls_id()))) { LOG_WARN("fail to push back", KR(ret), KR(tmp_ret), K(success_ls_ids), K(result)); } - + if (OB_FAIL(ret)) { + } else { + int64_t wait_scn_t = result->get_wait_sync_scn_cost(); + ls_wait_sync_scn_max = + ls_wait_sync_scn_max > wait_scn_t ? ls_wait_sync_scn_max : wait_scn_t; + } LOG_INFO("[ROLE_TRANSITION] change ls access mode", KR(ret), K(info), KPC(result), K(proxy.get_dests())); }// end for - + if (OB_LIKELY(NULL != cost_detail_)) { + (void) cost_detail_->add_cost(ObTenantRoleTransCostDetail::WAIT_LOG_END, ls_wait_sync_scn_max); + } if (OB_FAIL(ret)) { if (OB_TMP_FAIL(do_nonblock_renew(ls_access_info, success_ls_ids, tenant_id_))) { LOG_WARN("failed to renew location", KR(ret), KR(tmp_ret), K(tenant_id_), K(ls_access_info), K(success_ls_ids)); @@ -1066,6 +1354,8 @@ int ObTenantRoleTransitionService::switchover_update_tenant_status( share::ObLSStatusInfoArray status_info_array; common::ObArray switchover_checkpoints; bool is_sync_to_latest = false; + SCN final_sync_scn; + final_sync_scn.set_min(); if (OB_UNLIKELY(!is_user_tenant(tenant_id) || !new_role.is_valid() @@ -1108,8 +1398,7 @@ int ObTenantRoleTransitionService::switchover_update_tenant_status( * Because replayable point is not support, set replayable_scn = sync_scn */ const SCN gts_upper_limit = transaction::ObTimestampService::get_sts_start_scn(max_sys_ls_sync_scn); - - const SCN final_sync_scn = MAX(max_checkpoint_scn, gts_upper_limit); + final_sync_scn = MAX(max_checkpoint_scn, gts_upper_limit); const SCN final_replayable_scn = final_sync_scn; SCN final_readable_scn = SCN::min_scn(); SCN final_recovery_until_scn = SCN::min_scn(); @@ -1131,7 +1420,7 @@ int ObTenantRoleTransitionService::switchover_update_tenant_status( if (switch_to_primary) { // switch_to_primary // Does not change STS - final_readable_scn = tmp_tenant_info.get_standby_scn(); + final_readable_scn = tmp_tenant_info.get_readable_scn(); // To prevent unexpected sync log, set recovery_until_scn = sync_scn final_recovery_until_scn = final_sync_scn; } else { @@ -1145,7 +1434,8 @@ int ObTenantRoleTransitionService::switchover_update_tenant_status( ret = OB_OP_NOT_ALLOW; LOG_WARN("recovery_until_scn is not max_scn ", KR(ret), K(tenant_id), K(final_recovery_until_scn), K(tmp_tenant_info)); - LOG_USER_ERROR(OB_OP_NOT_ALLOW, "recovery_until_scn is not max_scn, switchover to standby"); + TENANT_ROLE_TRANS_USER_ERR_WITH_SUFFIX(OB_OP_NOT_ALLOW, + "recovery_until_scn is not max_scn", switch_optype_); } } } @@ -1177,24 +1467,85 @@ int ObTenantRoleTransitionService::switchover_update_tenant_status( ret = OB_SUCC(ret) ? temp_ret : ret; } } - + if (OB_SUCC(ret)) { + so_scn_ = final_sync_scn; + } CLUSTER_EVENT_ADD_LOG(ret, "update tenant status", - "tenant id", tenant_id, - "old switchover#", old_switchover_epoch, - "new switchover#", new_tenant_info.get_switchover_epoch(), - K(new_role), K(old_status), K(new_status)); + "tenant id", tenant_id, + "old switchover#", old_switchover_epoch, + "new switchover#", new_tenant_info.get_switchover_epoch(), + K(new_role), K(old_status), K(new_status)); + return ret; +} + +int ObTenantRoleTransitionService::wait_sys_ls_sync_to_latest_until_timeout_( + const uint64_t tenant_id, + ObAllTenantInfo &tenant_info) +{ + int ret = OB_SUCCESS; + bool only_check_sys_ls = true; + if (OB_FAIL(check_inner_stat())) { + LOG_WARN("inner stat error", KR(ret)); + } else if (obrpc::ObSwitchTenantArg::OpType::SWITCH_TO_PRIMARY != switch_optype_) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("only switchover to primary can execute this logic", KR(ret), K(tenant_id), K(switch_optype_)); + } else if (OB_UNLIKELY(!tenant_info.is_normal_status())) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("tenant switchover status is not normal", KR(ret), K(tenant_id), K(tenant_info)); + } else if (OB_FAIL(check_restore_source_for_switchover_to_primary_(tenant_id_))) { + LOG_WARN("fail to check restore source", KR(ret), K_(tenant_id)); + } else if (!has_restore_source_) { + LOG_INFO("no restore source", K(tenant_id), K(tenant_info)); + } else if (OB_FAIL(check_sync_to_latest_do_while_(tenant_info, only_check_sys_ls))) { + LOG_WARN("fail to check whether sys ls is synced", KR(ret), K(tenant_info)); + } return ret; } int ObTenantRoleTransitionService::wait_tenant_sync_to_latest_until_timeout_( - const uint64_t tenant_id, - const ObAllTenantInfo &tenant_info) + const uint64_t tenant_id, + const ObAllTenantInfo &tenant_info) +{ + int ret = OB_SUCCESS; + bool only_check_sys_ls = false; + int64_t begin_time = ObTimeUtility::current_time(); + if (OB_FAIL(check_inner_stat())) { + LOG_WARN("inner stat error", KR(ret)); + } else if (obrpc::ObSwitchTenantArg::OpType::SWITCH_TO_PRIMARY != switch_optype_) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("only switchover to primary can execute this logic", KR(ret), K(tenant_id), K(switch_optype_)); + } else if (OB_UNLIKELY(!tenant_info.is_prepare_flashback_for_switch_to_primary_status())) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("tenant switchover status is not prepare", KR(ret), K(tenant_id), K(tenant_info)); + } else if (OB_FAIL(check_restore_source_for_switchover_to_primary_(tenant_id_))) { + LOG_WARN("fail to check restore source", KR(ret), K_(tenant_id)); + } else if (!has_restore_source_) { + LOG_INFO("no restore source", K(tenant_id), K(tenant_info)); + } else if (OB_FAIL(check_sync_to_latest_do_while_(tenant_info, only_check_sys_ls))) { + LOG_WARN("fail to check whether all ls are synced", KR(ret), K(tenant_id), K(tenant_info)); + } + int64_t wait_log_sync = ObTimeUtility::current_time() - begin_time; + LOG_INFO("wait tenant sync to latest", KR(ret), K(has_restore_source_), K(wait_log_sync)); + if (OB_LIKELY(NULL != cost_detail_)) { + (void) cost_detail_->add_cost(ObTenantRoleTransCostDetail::WAIT_LOG_SYNC, wait_log_sync); + } + CLUSTER_EVENT_ADD_LOG(ret, "wait sync to latest end", + "tenant id", tenant_id, + "switchover#", tenant_info.get_switchover_epoch(), + "finished", OB_SUCC(ret) ? "yes" : "no", + "cost sec", wait_log_sync / SEC_UNIT); + return ret; +} + +int ObTenantRoleTransitionService::check_restore_source_for_switchover_to_primary_(const uint64_t tenant_id) { int ret = OB_SUCCESS; ObLogRestoreSourceMgr restore_source_mgr; ObLogRestoreSourceItem item; - bool has_restore_source = true; - int64_t begin_time = ObTimeUtility::current_time(); + ObSqlString standby_source_value; + ObRestoreSourceServiceAttr service_attr; + has_restore_source_ = true; + ObLogRestoreSourceType restore_type; if (OB_FAIL(check_inner_stat())) { LOG_WARN("inner stat error", KR(ret)); } else if (OB_FAIL(restore_source_mgr.init(tenant_id, sql_proxy_))) { @@ -1205,56 +1556,103 @@ int ObTenantRoleTransitionService::wait_tenant_sync_to_latest_until_timeout_( // When restore_source fails, in order to proceed switchover. If no restore_source is set, // do not check sync with restore_source LOG_INFO("failed to get_source", KR(ret), K(tenant_id), K(tenant_id)); - has_restore_source = false; + has_restore_source_ = false; ret = OB_SUCCESS; } - } - - if (OB_FAIL(ret) || !has_restore_source) { + } else if (OB_UNLIKELY(!item.is_valid())) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("log restore source item is invalid"); + } else if (share::is_location_log_source_type(item.type_)) { + // archive mode, cannot check whether previous primary tenant becomes standby + } else if (OB_FAIL(standby_source_value.assign(item.value_))) { + LOG_WARN("fail to assign standby source value", K(item.value_)); + } else if (OB_FAIL(service_attr.parse_service_attr_from_str(standby_source_value))) { + LOG_WARN("fail to parse service attr", K(item), K(standby_source_value)); } else { - bool has_sync_to_latest = false; - while (!THIS_WORKER.is_timeout() && !logservice::ObLogRestoreHandler::need_fail_when_switch_to_primary(ret)) { - has_sync_to_latest = false; - if (OB_FAIL(check_sync_to_latest_(tenant_id, tenant_info, has_sync_to_latest))) { - LOG_WARN("fail to check_sync_to_latest_", KR(ret), K(tenant_id), - K(tenant_info), K(has_sync_to_latest)); - } else if (has_sync_to_latest) { - LOG_INFO("sync to latest", K(has_sync_to_latest), K(tenant_id)); - break; - } else { - LOG_WARN("not sync to latest, wait a while", K(tenant_id)); + // net service mode, check whether previous primary tenant becomes standby + share::ObTenantRole tenant_role; + share::schema::ObTenantStatus tenant_status; + ObTenantSwitchoverStatus switchover_status; + SMART_VAR(share::ObLogRestoreProxyUtil, proxy_util) { + if (OB_FAIL(proxy_util.init_with_service_attr(tenant_id, &service_attr))) { + LOG_WARN("fail to init proxy_util", KR(ret), K(service_attr)); + } else if (OB_FAIL(proxy_util.get_tenant_info(tenant_role, tenant_status, switchover_status))) { + LOG_WARN("fail to get tenant info", KR(ret), K(service_attr)); + } else if (OB_UNLIKELY(!tenant_role.is_standby())) { + ret = OB_OP_NOT_ALLOW; + LOG_WARN("tenant role not match", KR(ret), K(tenant_role), K(service_attr)); + LOG_USER_ERROR(OB_OP_NOT_ALLOW, "log restore source is primary, switchover to primary is"); + } else if (OB_UNLIKELY(share::schema::ObTenantStatus::TENANT_STATUS_NORMAL != tenant_status)) { + ret = OB_OP_NOT_ALLOW; + LOG_WARN("tenant status not match", KR(ret), K(tenant_status), K(service_attr)); + LOG_USER_ERROR(OB_OP_NOT_ALLOW, "log restore source is not in normal status, switchover to primary is"); + } else if (OB_UNLIKELY(!switchover_status.is_normal_status())) { + ret = OB_OP_NOT_ALLOW; + LOG_WARN("tenant switchover status not match", KR(ret), K(switchover_status), K(service_attr)); + LOG_USER_ERROR(OB_OP_NOT_ALLOW, "log restore source is not in normal switchover status, switchover to primary is"); } - usleep(10L * 1000L); } - if (logservice::ObLogRestoreHandler::need_fail_when_switch_to_primary(ret)) { - } else if (THIS_WORKER.is_timeout() || !has_sync_to_latest) { - // return NOT_ALLOW instead of timeout - ret = OB_OP_NOT_ALLOW; - LOG_WARN("has not sync to latest, can not swithover to primary", KR(ret)); - LOG_USER_ERROR(OB_OP_NOT_ALLOW, "not sync to latest, switchover to primary"); - } else if (OB_SUCC(ret)) { - LOG_INFO("finish check sync to latest", K(has_sync_to_latest)); - } - - int64_t cost = ObTimeUtility::current_time() - begin_time; - LOG_INFO("check sync to latest", KR(ret), K(cost), - "is_pass", has_sync_to_latest); - CLUSTER_EVENT_ADD_LOG(ret, "wait sync to latest end", - "tenant id", tenant_id, - "switchover#", tenant_info.get_switchover_epoch(), - "finished", OB_SUCC(ret) ? "yes" : "no", - "cost sec", cost / SEC_UNIT); } return ret; } -int ObTenantRoleTransitionService::check_sync_to_latest_(const uint64_t tenant_id, - const ObAllTenantInfo &tenant_info, - bool &has_sync_to_latest) +int ObTenantRoleTransitionService::check_sync_to_latest_do_while_( + const ObAllTenantInfo &tenant_info, + const bool only_check_sys_ls) { int ret = OB_SUCCESS; - int64_t begin_time = ObTimeUtility::current_time(); - has_sync_to_latest = false; + bool is_synced = false; + const uint64_t tenant_id = tenant_info.get_tenant_id(); + while (!THIS_WORKER.is_timeout() && !logservice::ObLogRestoreHandler::need_fail_when_switch_to_primary(ret)) { + bool is_all_ls_synced = false; + bool is_sys_ls_synced = false; + ret = OB_SUCCESS; + if (OB_FAIL(check_sync_to_latest_(tenant_id, only_check_sys_ls, tenant_info, is_sys_ls_synced, is_all_ls_synced))) { + LOG_WARN("fail to execute check_sync_to_latest_", KR(ret), K(tenant_id), K(only_check_sys_ls), K(tenant_info)); + } else { + is_synced = only_check_sys_ls ? is_sys_ls_synced : is_all_ls_synced; + if (is_synced) { + LOG_INFO("sync to latest", K(tenant_id), K(only_check_sys_ls), K(is_synced), + K(is_sys_ls_synced), K(is_all_ls_synced)); + break; + } else { + LOG_WARN("not sync to latest, wait a while", K(tenant_id), K(only_check_sys_ls)); + } + } + usleep(10L * 1000L); + } + if (logservice::ObLogRestoreHandler::need_fail_when_switch_to_primary(ret)) { + } else if (THIS_WORKER.is_timeout() || !is_synced) { + // return NOT_ALLOW instead of timeout + if (OB_SUCC(ret)) { + ret = OB_TIMEOUT; // to print in err_msg + } + ObSqlString err_msg; + int tmp_ret = OB_SUCCESS; + if (OB_TMP_FAIL(err_msg.assign_fmt("wait tenant sync to latest failed(original error code: %d), switchover to primary is", ret))) { + LOG_WARN("fail to assign error msg", KR(ret), KR(tmp_ret)); + } else { + // convert OB_TIMEOUT or other failure code to OB_OP_NOT_ALLOW + ret = OB_OP_NOT_ALLOW; + LOG_WARN("has not sync to latest, can not swithover to primary", KR(ret), K(only_check_sys_ls)); + LOG_USER_ERROR(OB_OP_NOT_ALLOW, err_msg.ptr()); + } + } + if (OB_SUCC(ret)) { + LOG_INFO("finish check sync to latest", K(only_check_sys_ls), K(is_synced)); + } + return ret; +} +int ObTenantRoleTransitionService::check_sync_to_latest_( + const uint64_t tenant_id, + const bool only_check_sys_ls, + const ObAllTenantInfo &tenant_info, + bool &is_sys_ls_synced, + bool &is_all_ls_synced) +{ + int ret = OB_SUCCESS; + int64_t begin_ts = ObTimeUtility::current_time(); + is_all_ls_synced = false; ObLSStatusOperator ls_status_op; share::ObLSStatusInfoArray all_ls_status_array; share::ObLSStatusInfoArray sys_ls_status_array; @@ -1262,11 +1660,11 @@ int ObTenantRoleTransitionService::check_sync_to_latest_(const uint64_t tenant_i ObLSRecoveryStatOperator ls_recovery_operator; ObLSRecoveryStat sys_ls_recovery_stat; SCN sys_ls_sync_scn = SCN::min_scn(); - bool sys_ls_sync_to_latest = false; + bool sys_ls_sync_has_all_log = false; share::ObLSStatusInfo ls_status; - + ObTenantRoleTransNonSyncInfo non_sync_info; LOG_INFO("start to check_sync_to_latest", KR(ret), K(tenant_id), K(tenant_info)); - + is_sys_ls_synced = false; if (!is_user_tenant(tenant_id)) { ret = OB_INVALID_ARGUMENT; LOG_WARN("invalid argument", KR(ret), K(tenant_id)); @@ -1279,13 +1677,20 @@ int ObTenantRoleTransitionService::check_sync_to_latest_(const uint64_t tenant_i tenant_id, true /*need_check_sync_to_latest*/, sys_ls_sync_scn, - sys_ls_sync_to_latest))) { - LOG_WARN("failed to get_sys_ls_sync_scn_", KR(ret), K(switchover_checkpoints)); - } else if (!(sys_ls_sync_scn.is_valid_and_not_min() && sys_ls_sync_to_latest - && sys_ls_recovery_stat.get_sync_scn() == sys_ls_sync_scn)) { - LOG_WARN("sys ls not sync, keep waiting", KR(ret), K(sys_ls_sync_scn), K(sys_ls_sync_to_latest), - K(sys_ls_recovery_stat), K(switchover_checkpoints)); - // SYS LS is sync, check other LS + sys_ls_sync_has_all_log))) { + LOG_WARN("failed to get_sys_ls_sync_scn_", KR(ret)); + } else { + is_sys_ls_synced = (sys_ls_sync_scn.is_valid_and_not_min() && sys_ls_sync_has_all_log + && sys_ls_recovery_stat.get_sync_scn() == sys_ls_sync_scn); + } + if (OB_FAIL(ret)) { + } else if (only_check_sys_ls) { + // do nothing + } else if (OB_UNLIKELY(!is_sys_ls_synced)) { + // we have checked sys ls when only_check_sys_ls is true, + // now it should be synced + ret = OB_ERR_UNEXPECTED; + LOG_WARN("sys ls not sync", KR(ret), K(only_check_sys_ls), K(is_sys_ls_synced), K(tenant_info)); } else if (OB_FAIL(ls_status_op.get_all_ls_status_by_order_for_switch_tenant(tenant_id, true/* ignore_need_create_abort */, all_ls_status_array, *sql_proxy_))) { LOG_WARN("failed to get_all_ls_status_by_order", KR(ret), K(tenant_id)); @@ -1297,28 +1702,28 @@ int ObTenantRoleTransitionService::check_sync_to_latest_(const uint64_t tenant_i LOG_WARN("unexpect checkpoints count", KR(ret), K(switchover_checkpoints), K(tenant_id), K(tenant_info), K(all_ls_status_array)); } else { - has_sync_to_latest = true; - for (int64_t i = 0; i < switchover_checkpoints.count() && OB_SUCC(ret) && has_sync_to_latest; i++) { - const auto &checkpoint = switchover_checkpoints.at(i); - if (checkpoint.is_sync_to_latest()) { - // LS is sync - } else { - has_sync_to_latest = false; - LOG_WARN("ls not sync, keep waiting", KR(ret), K(checkpoint)); - } - }//end for + lib::ob_sort(switchover_checkpoints.begin(), switchover_checkpoints.end()); + if (OB_FAIL(non_sync_info.init(switchover_checkpoints))) { + LOG_WARN("fail to init non_sync_info", KR(ret), K(switchover_checkpoints)); + } else { + is_all_ls_synced = non_sync_info.is_sync(); + } } - - LOG_INFO("check sync to latest", KR(ret)); - int64_t cost = ObTimeUtility::current_time() - begin_time; - if (REACH_TIME_INTERVAL(PRINT_INTERVAL) || has_sync_to_latest) { + int64_t cost = ObTimeUtility::current_time() - begin_ts; + LOG_INFO("check sync to latest", KR(ret), K(is_verify_), K(tenant_id), K(cost), K(only_check_sys_ls), + K(is_sys_ls_synced), K(is_all_ls_synced), K(non_sync_info)); + if (is_verify_ || only_check_sys_ls) { + } else if (REACH_TIME_INTERVAL(PRINT_INTERVAL) || is_all_ls_synced) { + TENANT_EVENT(tenant_id, TENANT_ROLE_CHANGE, WAIT_LOG_SYNC, begin_ts, + ret, cost, is_sys_ls_synced ? "YES" : "NO", + is_all_ls_synced ? "YES" : "NO", non_sync_info); CLUSTER_EVENT_ADD_LOG(ret, "wait tenant sync from latest", - "tenant id", tenant_id, - "is sync", has_sync_to_latest ? "yes" : "no", - "switchover#", tenant_info.get_switchover_epoch(), - "finished", OB_SUCC(ret) ? "yes" : "no", - "checkpoint", switchover_checkpoints, - "cost sec", cost / SEC_UNIT); + "tenant id", tenant_id, + "is sync", is_all_ls_synced ? "yes" : "no", + "switchover#", tenant_info.get_switchover_epoch(), + "finished", OB_SUCC(ret) ? "yes" : "no", + "checkpoint", switchover_checkpoints, + "cost sec", cost / SEC_UNIT); } return ret; } @@ -1603,25 +2008,27 @@ int ObTenantRoleTransitionService::check_tenant_server_online_() } else if (0 != temporary_offline_servers.count()) { ret = OB_OP_NOT_ALLOW; LOG_WARN("the tenant has units on temporary offline servers", KR(ret), K(tenant_id_), K(temporary_offline_servers)); - LOG_USER_ERROR(OB_OP_NOT_ALLOW, "the tenant has units on temporary offline servers, switch to primary"); + TENANT_ROLE_TRANS_USER_ERR_WITH_SUFFIX(OB_OP_NOT_ALLOW, + "the tenant has units on temporary offline servers", switch_optype_); } else if (0 != permanent_offline_servers.count()) { bool exists = false; if (OB_FAIL(ObEmptyServerChecker::check_if_tenant_ls_replicas_exist_in_servers( tenant_id_, permanent_offline_servers, exists))) { - LOG_WARN("fail to check if the tenant's ls_replicas exist in permanent_offline_servers", + LOG_WARN("fail to check if the tenant's LS replicas exist in permanent_offline_servers", KR(ret), K(tenant_id_), K(permanent_offline_servers)); if (OB_LEADER_NOT_EXIST == ret) { ret = OB_OP_NOT_ALLOW; - LOG_USER_ERROR(OB_OP_NOT_ALLOW, "the tenant has ls replicas without leader, switch to primary"); + TENANT_ROLE_TRANS_USER_ERR_WITH_SUFFIX(OB_OP_NOT_ALLOW, + "the tenant has LS replicas without leader", switch_optype_); } } else if (exists) { ret = OB_OP_NOT_ALLOW; - LOG_WARN("the tenant has ls replicas on at least one of the permanent offline servers", + LOG_WARN("the tenant has LS replicas on at least one of the permanent offline servers", KR(ret), K(tenant_id_), K(exists), K(permanent_offline_servers)); - LOG_USER_ERROR(OB_OP_NOT_ALLOW, - "the tenant has ls replicas on at least one of the permanent offline servers, switch to primary"); + TENANT_ROLE_TRANS_USER_ERR_WITH_SUFFIX(OB_OP_NOT_ALLOW, + "the tenant has LS replicas on at least one of the permanent offline servers", switch_optype_); } } return ret; diff --git a/src/rootserver/ob_tenant_role_transition_service.h b/src/rootserver/standby/ob_tenant_role_transition_service.h similarity index 71% rename from src/rootserver/ob_tenant_role_transition_service.h rename to src/rootserver/standby/ob_tenant_role_transition_service.h index 1a8662384f..d21f8ec1ab 100644 --- a/src/rootserver/ob_tenant_role_transition_service.h +++ b/src/rootserver/standby/ob_tenant_role_transition_service.h @@ -67,20 +67,83 @@ public: template int do_nonblock_renew(const ARRAY &array_l, const ARRAY &array_r, const uint64_t tenant_id); +struct ObTenantRoleTransCostDetail +{ +public: + enum CostType { + WAIT_LOG_SYNC = 0, + WAIT_BALANCE_TASK, + LOG_FLASHBACK, + WAIT_LOG_END, + CHANGE_ACCESS_MODE, + MAX_COST_TYPE + }; + const char* type_to_str(CostType type) const; +public: + ObTenantRoleTransCostDetail() : cost_type_{}, start_(0), end_(0) {} + ~ObTenantRoleTransCostDetail() {} + void set_start(int64_t start) { start_ = start; } + void add_cost(CostType type, int64_t cost); + void set_end(int64_t end) { end_ = end; } + int64_t get_wait_log_end () { return cost_type_[WAIT_LOG_END]; } + int64_t to_string (char *buf, const int64_t buf_len) const ; +private: + int64_t cost_type_[MAX_COST_TYPE]; + int64_t start_; + int64_t end_; +}; + +struct ObTenantRoleTransAllLSInfo +{ +public: + ObTenantRoleTransAllLSInfo() : all_ls_{} {} + ~ObTenantRoleTransAllLSInfo() {} + int init(); + int add_ls(const ObLSID &ls_id, const ObLSStatus status); + int64_t to_string (char *buf, const int64_t buf_len) const; + bool is_valid() const; +private: + ObArray all_ls_[ObLSStatus::OB_LS_MAX_STATUS]; +}; + +struct ObTenantRoleTransNonSyncInfo +{ +public: + ObTenantRoleTransNonSyncInfo() : is_sync_(true), not_sync_checkpoints_() {} + ~ObTenantRoleTransNonSyncInfo() {} + int init(const ObArray &switchover_checkpoints); + int64_t to_string (char *buf, const int64_t buf_len) const; + bool is_sync() const { return is_sync_; } +private: + static constexpr int64_t MAX_PRINT_LS_NUM = 5; + bool is_sync_; + ObArray not_sync_checkpoints_; +}; + /*description: * for primary to standby and standby to primary */ class ObTenantRoleTransitionService { public: - ObTenantRoleTransitionService(const uint64_t tenant_id, + ObTenantRoleTransitionService() + : tenant_id_(OB_INVALID_TENANT_ID), sql_proxy_(NULL), + rpc_proxy_(NULL), switchover_epoch_(OB_INVALID_VERSION), + switch_optype_(obrpc::ObSwitchTenantArg::OpType::INVALID), + so_scn_(), + cost_detail_(NULL), + all_ls_info_(NULL), + has_restore_source_(false), + is_verify_(false) {} + virtual ~ObTenantRoleTransitionService() {} + int init( + uint64_t tenant_id, + const obrpc::ObSwitchTenantArg::OpType &switch_optype, + const bool is_verify, common::ObMySQLProxy *sql_proxy, obrpc::ObSrvRpcProxy *rpc_proxy, - const obrpc::ObSwitchTenantArg::OpType &switch_optype) - : tenant_id_(tenant_id), sql_proxy_(sql_proxy), - rpc_proxy_(rpc_proxy), switchover_epoch_(OB_INVALID_VERSION), - switch_optype_(switch_optype) {} - virtual ~ObTenantRoleTransitionService() {} + ObTenantRoleTransCostDetail *cost_detail, + ObTenantRoleTransAllLSInfo *all_ls_info); int failover_to_primary(); int check_inner_stat(); int do_switch_access_mode_to_append(const share::ObAllTenantInfo &tenant_info, @@ -93,7 +156,6 @@ public: { switchover_epoch_ = switchover_epoch; } - /** * @description: * Update scn/tenant_role/switchover status when switchover is executed @@ -116,6 +178,7 @@ public: const int64_t old_switchover_epoch, ObAllTenantInfo &new_tenant_info); + int wait_sys_ls_sync_to_latest_until_timeout_(const uint64_t tenant_id, ObAllTenantInfo &tenant_info); /** * @description: * wait tenant sync to switchover checkpoint until timeout @@ -123,8 +186,8 @@ public: * @param[in] primary_checkpoints primary switchover checkpoint * @return return code */ - int wait_tenant_sync_to_latest_until_timeout_(const uint64_t tenant_id, - const ObAllTenantInfo &tenant_info); + int wait_tenant_sync_to_latest_until_timeout_(const uint64_t tenant_id, const ObAllTenantInfo &tenant_info); + int check_restore_source_for_switchover_to_primary_(const uint64_t tenant_id); /** * @description: @@ -155,6 +218,7 @@ public: const bool get_latest_scn, ObIArray &checkpoints ); + share::SCN get_so_scn() const { return so_scn_; } private: int do_failover_to_primary_(const share::ObAllTenantInfo &tenant_info); @@ -175,8 +239,6 @@ private: const palf::AccessMode target_access_mode, const share::SCN &ref_scn, const share::SCN &sys_ls_sync_scn); - int do_switch_access_mode_to_flashback( - const share::ObAllTenantInfo &tenant_info); /** * @description: @@ -204,21 +266,36 @@ private: const ObIArray &checkpoints, share::SCN &sys_ls_sync_scn, bool &is_sync_to_latest); + /** + * @description: + * wait tenant/sys ls sync to switchover checkpoint until timeout + * @param[in] tenant_id + * @param[in] only_check_sys_ls true: only wait sys ls sync; false: wait tenant sync + * @return return code + */ + int check_sync_to_latest_do_while_( + const ObAllTenantInfo &tenant_info, + const bool only_check_sys_ls); /** * @description: * when switch to primary, check all ls are sync to latest * @param[in] tenant_id the tenant id to check * @param[in] tenant_info - * @param[out] has_sync_to_latest whether sync to latest + * @param[out] is_all_ls_synced whether sync to latest * @return return code */ - int check_sync_to_latest_(const uint64_t tenant_id, - const ObAllTenantInfo &tenant_info, - bool &has_sync_to_latest); + int check_sync_to_latest_( + const uint64_t tenant_id, + const bool only_check_sys_ls, + const ObAllTenantInfo &tenant_info, + bool &is_sys_ls_synced, + bool &is_all_ls_synced); int do_prepare_flashback_for_switch_to_primary_(share::ObAllTenantInfo &tenant_info); int do_prepare_flashback_for_failover_to_primary_(share::ObAllTenantInfo &tenant_info); + int clear_service_name_(); + int double_check_service_name_(const share::ObAllTenantInfo &tenant_info); int check_and_update_sys_ls_recovery_stat_in_switchover_( const uint64_t tenant_id, const bool switch_to_primary, @@ -236,10 +313,11 @@ private: common::sqlclient::ObMySQLResult &res, ObArray &temporary_offline_servers, ObArray &permanent_offline_servers); + int ls_status_stats_when_change_access_mode_(const share::ObLSStatusInfoArray &status_info_array); private: const static int64_t SEC_UNIT = 1000L * 1000L; - const static int64_t PRINT_INTERVAL = 10 * 1000 * 1000L; + const static int64_t PRINT_INTERVAL = 1000L * 1000L; private: uint64_t tenant_id_; @@ -247,6 +325,11 @@ private: obrpc::ObSrvRpcProxy *rpc_proxy_; int64_t switchover_epoch_; obrpc::ObSwitchTenantArg::OpType switch_optype_; + share::SCN so_scn_; + ObTenantRoleTransCostDetail *cost_detail_; + ObTenantRoleTransAllLSInfo *all_ls_info_; + bool has_restore_source_; + bool is_verify_; }; } } diff --git a/src/share/CMakeLists.txt b/src/share/CMakeLists.txt index b83f6be795..eb2f4c2c1b 100644 --- a/src/share/CMakeLists.txt +++ b/src/share/CMakeLists.txt @@ -188,13 +188,13 @@ ob_set_subtarget(ob_share common ob_inner_kv_table_operator.cpp ob_inner_table_operator.cpp ob_standby_upgrade.cpp - ob_primary_standby_service.cpp ob_common_id.cpp ob_cluster_event_history_table_operator.cpp scn.cpp ob_throttling_utils.cpp ob_storage_ha_diagnose_struct.cpp ob_storage_ha_diagnose_operator.cpp + ob_service_name_proxy.cpp ob_compatibility_control.cpp ) diff --git a/src/share/inner_table/ob_inner_table_schema.12451_12500.cpp b/src/share/inner_table/ob_inner_table_schema.12451_12500.cpp index 72b383d926..2a17c036ad 100644 --- a/src/share/inner_table/ob_inner_table_schema.12451_12500.cpp +++ b/src/share/inner_table/ob_inner_table_schema.12451_12500.cpp @@ -3955,6 +3955,142 @@ int ObInnerTableSchema::all_virtual_user_proxy_role_info_history_schema(ObTableS return ret; } +int ObInnerTableSchema::all_virtual_service_schema(ObTableSchema &table_schema) +{ + int ret = OB_SUCCESS; + uint64_t column_id = OB_APP_MIN_COLUMN_ID - 1; + + //generated fields: + table_schema.set_tenant_id(OB_SYS_TENANT_ID); + table_schema.set_tablegroup_id(OB_INVALID_ID); + table_schema.set_database_id(OB_SYS_DATABASE_ID); + table_schema.set_table_id(OB_ALL_VIRTUAL_SERVICE_TID); + table_schema.set_rowkey_split_pos(0); + table_schema.set_is_use_bloomfilter(false); + table_schema.set_progressive_merge_num(0); + table_schema.set_rowkey_column_num(2); + table_schema.set_load_type(TABLE_LOAD_TYPE_IN_DISK); + table_schema.set_table_type(VIRTUAL_TABLE); + table_schema.set_index_type(INDEX_TYPE_IS_NOT); + table_schema.set_def_type(TABLE_DEF_TYPE_INTERNAL); + + if (OB_SUCC(ret)) { + if (OB_FAIL(table_schema.set_table_name(OB_ALL_VIRTUAL_SERVICE_TNAME))) { + LOG_ERROR("fail to set table_name", K(ret)); + } + } + + if (OB_SUCC(ret)) { + if (OB_FAIL(table_schema.set_compress_func_name(OB_DEFAULT_COMPRESS_FUNC_NAME))) { + LOG_ERROR("fail to set compress_func_name", K(ret)); + } + } + table_schema.set_part_level(PARTITION_LEVEL_ZERO); + table_schema.set_charset_type(ObCharset::get_default_charset()); + table_schema.set_collation_type(ObCharset::get_default_collation(ObCharset::get_default_charset())); + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("tenant_id", //column_name + ++column_id, //column_id + 1, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObIntType, //column_type + CS_TYPE_INVALID, //column_collation_type + sizeof(int64_t), //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("service_name_id", //column_name + ++column_id, //column_id + 2, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObIntType, //column_type + CS_TYPE_INVALID, //column_collation_type + sizeof(int64_t), //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA_TS("gmt_create", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObTimestampType, //column_type + CS_TYPE_INVALID, //column_collation_type + sizeof(ObPreciseDateTime), //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false, //is_autoincrement + false); //is_on_update_for_timestamp + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA_TS("gmt_modified", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObTimestampType, //column_type + CS_TYPE_INVALID, //column_collation_type + sizeof(ObPreciseDateTime), //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false, //is_autoincrement + false); //is_on_update_for_timestamp + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("service_name", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_INVALID, //column_collation_type + OB_SERVICE_NAME_LENGTH, //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("service_status", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_INVALID, //column_collation_type + 64, //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + table_schema.set_index_using_type(USING_BTREE); + table_schema.set_row_store_type(ENCODING_ROW_STORE); + table_schema.set_store_format(OB_STORE_FORMAT_DYNAMIC_MYSQL); + table_schema.set_progressive_merge_round(1); + table_schema.set_storage_format_version(3); + table_schema.set_tablet_id(0); + + table_schema.set_max_used_column_id(column_id); + return ret; +} + int ObInnerTableSchema::all_virtual_tenant_resource_limit_schema(ObTableSchema &table_schema) { int ret = OB_SUCCESS; diff --git a/src/share/inner_table/ob_inner_table_schema.15401_15450.cpp b/src/share/inner_table/ob_inner_table_schema.15401_15450.cpp index 818ee90214..75ad58195c 100644 --- a/src/share/inner_table/ob_inner_table_schema.15401_15450.cpp +++ b/src/share/inner_table/ob_inner_table_schema.15401_15450.cpp @@ -7920,6 +7920,140 @@ int ObInnerTableSchema::all_virtual_user_proxy_role_info_real_agent_ora_schema(O return ret; } +int ObInnerTableSchema::all_virtual_service_ora_schema(ObTableSchema &table_schema) +{ + int ret = OB_SUCCESS; + uint64_t column_id = OB_APP_MIN_COLUMN_ID - 1; + + //generated fields: + table_schema.set_tenant_id(OB_SYS_TENANT_ID); + table_schema.set_tablegroup_id(OB_INVALID_ID); + table_schema.set_database_id(OB_ORA_SYS_DATABASE_ID); + table_schema.set_table_id(OB_ALL_VIRTUAL_SERVICE_ORA_TID); + table_schema.set_rowkey_split_pos(0); + table_schema.set_is_use_bloomfilter(false); + table_schema.set_progressive_merge_num(0); + table_schema.set_rowkey_column_num(2); + table_schema.set_load_type(TABLE_LOAD_TYPE_IN_DISK); + table_schema.set_table_type(VIRTUAL_TABLE); + table_schema.set_index_type(INDEX_TYPE_IS_NOT); + table_schema.set_def_type(TABLE_DEF_TYPE_INTERNAL); + + if (OB_SUCC(ret)) { + if (OB_FAIL(table_schema.set_table_name(OB_ALL_VIRTUAL_SERVICE_ORA_TNAME))) { + LOG_ERROR("fail to set table_name", K(ret)); + } + } + + if (OB_SUCC(ret)) { + if (OB_FAIL(table_schema.set_compress_func_name(OB_DEFAULT_COMPRESS_FUNC_NAME))) { + LOG_ERROR("fail to set compress_func_name", K(ret)); + } + } + table_schema.set_part_level(PARTITION_LEVEL_ZERO); + table_schema.set_charset_type(ObCharset::get_default_charset()); + table_schema.set_collation_type(ObCollationType::CS_TYPE_UTF8MB4_BIN); + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("TENANT_ID", //column_name + ++column_id, //column_id + 1, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObNumberType, //column_type + CS_TYPE_INVALID, //column_collation_type + 38, //column_length + 38, //column_precision + 0, //column_scale + false, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("SERVICE_NAME_ID", //column_name + ++column_id, //column_id + 2, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObNumberType, //column_type + CS_TYPE_INVALID, //column_collation_type + 38, //column_length + 38, //column_precision + 0, //column_scale + false, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("GMT_CREATE", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObTimestampLTZType, //column_type + CS_TYPE_INVALID, //column_collation_type + 0, //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("GMT_MODIFIED", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObTimestampLTZType, //column_type + CS_TYPE_INVALID, //column_collation_type + 0, //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("SERVICE_NAME", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_UTF8MB4_BIN, //column_collation_type + OB_SERVICE_NAME_LENGTH, //column_length + 2, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("SERVICE_STATUS", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_UTF8MB4_BIN, //column_collation_type + 64, //column_length + 2, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + table_schema.set_index_using_type(USING_BTREE); + table_schema.set_row_store_type(ENCODING_ROW_STORE); + table_schema.set_store_format(OB_STORE_FORMAT_DYNAMIC_MYSQL); + table_schema.set_progressive_merge_round(1); + table_schema.set_storage_format_version(3); + table_schema.set_tablet_id(0); + + table_schema.set_max_used_column_id(column_id); + return ret; +} + } // end namespace share } // end namespace oceanbase diff --git a/src/share/inner_table/ob_inner_table_schema.21201_21250.cpp b/src/share/inner_table/ob_inner_table_schema.21201_21250.cpp index bd054eca2c..569d150a61 100644 --- a/src/share/inner_table/ob_inner_table_schema.21201_21250.cpp +++ b/src/share/inner_table/ob_inner_table_schema.21201_21250.cpp @@ -1060,7 +1060,7 @@ int ObInnerTableSchema::gv_ob_processlist_schema(ObTableSchema &table_schema) table_schema.set_collation_type(ObCharset::get_default_collation(ObCharset::get_default_charset())); if (OB_SUCC(ret)) { - if (OB_FAIL(table_schema.set_view_definition(R"__( SELECT SVR_IP, SVR_PORT, SQL_PORT, ID, USER, HOST, DB, TENANT, COMMAND, TIME, TOTAL_TIME, STATE, INFO, PROXY_SESSID, MASTER_SESSID, USER_CLIENT_IP, USER_HOST, RETRY_CNT, RETRY_INFO, SQL_ID, TRANS_ID, THREAD_ID, SSL_CIPHER, TRACE_ID, TRANS_STATE, ACTION, MODULE, CLIENT_INFO, LEVEL, SAMPLE_PERCENTAGE, RECORD_POLICY, LB_VID, LB_VIP, LB_VPORT, IN_BYTES, OUT_BYTES, USER_CLIENT_PORT, cast(total_cpu_time as SIGNED) as TOTAL_CPU_TIME, PROXY_USER, TOP_INFO FROM oceanbase.__all_virtual_processlist )__"))) { + if (OB_FAIL(table_schema.set_view_definition(R"__( SELECT SVR_IP, SVR_PORT, SQL_PORT, ID, USER, HOST, DB, TENANT, COMMAND, TIME, TOTAL_TIME, STATE, INFO, PROXY_SESSID, MASTER_SESSID, USER_CLIENT_IP, USER_HOST, RETRY_CNT, RETRY_INFO, SQL_ID, TRANS_ID, THREAD_ID, SSL_CIPHER, TRACE_ID, TRANS_STATE, ACTION, MODULE, CLIENT_INFO, LEVEL, SAMPLE_PERCENTAGE, RECORD_POLICY, LB_VID, LB_VIP, LB_VPORT, IN_BYTES, OUT_BYTES, USER_CLIENT_PORT, PROXY_USER, SERVICE_NAME, cast(total_cpu_time as SIGNED) as TOTAL_CPU_TIME, TOP_INFO FROM oceanbase.__all_virtual_processlist )__"))) { LOG_ERROR("fail to set view_definition", K(ret)); } } @@ -1110,7 +1110,7 @@ int ObInnerTableSchema::v_ob_processlist_schema(ObTableSchema &table_schema) table_schema.set_collation_type(ObCharset::get_default_collation(ObCharset::get_default_charset())); if (OB_SUCC(ret)) { - if (OB_FAIL(table_schema.set_view_definition(R"__( SELECT SVR_IP, SVR_PORT, SQL_PORT, ID, USER, HOST, DB, TENANT, COMMAND, TIME, TOTAL_TIME, STATE, INFO, PROXY_SESSID, MASTER_SESSID, USER_CLIENT_IP, USER_HOST, RETRY_CNT, RETRY_INFO, SQL_ID, TRANS_ID, THREAD_ID, SSL_CIPHER, TRACE_ID, TRANS_STATE, ACTION, MODULE, CLIENT_INFO, LEVEL, SAMPLE_PERCENTAGE, RECORD_POLICY, LB_VID, LB_VIP, LB_VPORT, IN_BYTES, OUT_BYTES, USER_CLIENT_PORT, cast(total_cpu_time as SIGNED) as TOTAL_CPU_TIME, PROXY_USER, TOP_INFO FROM oceanbase.GV$OB_PROCESSLIST WHERE SVR_IP = host_ip() AND SVR_PORT = rpc_port() )__"))) { + if (OB_FAIL(table_schema.set_view_definition(R"__( SELECT SVR_IP, SVR_PORT, SQL_PORT, ID, USER, HOST, DB, TENANT, COMMAND, TIME, TOTAL_TIME, STATE, INFO, PROXY_SESSID, MASTER_SESSID, USER_CLIENT_IP, USER_HOST, RETRY_CNT, RETRY_INFO, SQL_ID, TRANS_ID, THREAD_ID, SSL_CIPHER, TRACE_ID, TRANS_STATE, ACTION, MODULE, CLIENT_INFO, LEVEL, SAMPLE_PERCENTAGE, RECORD_POLICY, LB_VID, LB_VIP, LB_VPORT, IN_BYTES, OUT_BYTES, USER_CLIENT_PORT, PROXY_USER, SERVICE_NAME, cast(total_cpu_time as SIGNED) as TOTAL_CPU_TIME, TOP_INFO FROM oceanbase.GV$OB_PROCESSLIST WHERE SVR_IP = host_ip() AND SVR_PORT = rpc_port() )__"))) { LOG_ERROR("fail to set view_definition", K(ret)); } } diff --git a/src/share/inner_table/ob_inner_table_schema.21501_21550.cpp b/src/share/inner_table/ob_inner_table_schema.21501_21550.cpp index 625cddb4f3..edcf87ec1e 100644 --- a/src/share/inner_table/ob_inner_table_schema.21501_21550.cpp +++ b/src/share/inner_table/ob_inner_table_schema.21501_21550.cpp @@ -1875,6 +1875,106 @@ int ObInnerTableSchema::v_ob_compatibility_control_schema(ObTableSchema &table_s return ret; } +int ObInnerTableSchema::dba_ob_services_schema(ObTableSchema &table_schema) +{ + int ret = OB_SUCCESS; + uint64_t column_id = OB_APP_MIN_COLUMN_ID - 1; + + //generated fields: + table_schema.set_tenant_id(OB_SYS_TENANT_ID); + table_schema.set_tablegroup_id(OB_INVALID_ID); + table_schema.set_database_id(OB_SYS_DATABASE_ID); + table_schema.set_table_id(OB_DBA_OB_SERVICES_TID); + table_schema.set_rowkey_split_pos(0); + table_schema.set_is_use_bloomfilter(false); + table_schema.set_progressive_merge_num(0); + table_schema.set_rowkey_column_num(0); + table_schema.set_load_type(TABLE_LOAD_TYPE_IN_DISK); + table_schema.set_table_type(SYSTEM_VIEW); + table_schema.set_index_type(INDEX_TYPE_IS_NOT); + table_schema.set_def_type(TABLE_DEF_TYPE_INTERNAL); + + if (OB_SUCC(ret)) { + if (OB_FAIL(table_schema.set_table_name(OB_DBA_OB_SERVICES_TNAME))) { + LOG_ERROR("fail to set table_name", K(ret)); + } + } + + if (OB_SUCC(ret)) { + if (OB_FAIL(table_schema.set_compress_func_name(OB_DEFAULT_COMPRESS_FUNC_NAME))) { + LOG_ERROR("fail to set compress_func_name", K(ret)); + } + } + table_schema.set_part_level(PARTITION_LEVEL_ZERO); + table_schema.set_charset_type(ObCharset::get_default_charset()); + table_schema.set_collation_type(ObCharset::get_default_collation(ObCharset::get_default_charset())); + + if (OB_SUCC(ret)) { + if (OB_FAIL(table_schema.set_view_definition(R"__( SELECT gmt_create AS CREATE_TIME, gmt_modified AS MODIFIED_TIME, SERVICE_NAME_ID, SERVICE_NAME, SERVICE_STATUS FROM oceanbase.__all_virtual_service WHERE TENANT_ID=EFFECTIVE_TENANT_ID(); )__"))) { + LOG_ERROR("fail to set view_definition", K(ret)); + } + } + table_schema.set_index_using_type(USING_BTREE); + table_schema.set_row_store_type(ENCODING_ROW_STORE); + table_schema.set_store_format(OB_STORE_FORMAT_DYNAMIC_MYSQL); + table_schema.set_progressive_merge_round(1); + table_schema.set_storage_format_version(3); + table_schema.set_tablet_id(0); + + table_schema.set_max_used_column_id(column_id); + return ret; +} + +int ObInnerTableSchema::cdb_ob_services_schema(ObTableSchema &table_schema) +{ + int ret = OB_SUCCESS; + uint64_t column_id = OB_APP_MIN_COLUMN_ID - 1; + + //generated fields: + table_schema.set_tenant_id(OB_SYS_TENANT_ID); + table_schema.set_tablegroup_id(OB_INVALID_ID); + table_schema.set_database_id(OB_SYS_DATABASE_ID); + table_schema.set_table_id(OB_CDB_OB_SERVICES_TID); + table_schema.set_rowkey_split_pos(0); + table_schema.set_is_use_bloomfilter(false); + table_schema.set_progressive_merge_num(0); + table_schema.set_rowkey_column_num(0); + table_schema.set_load_type(TABLE_LOAD_TYPE_IN_DISK); + table_schema.set_table_type(SYSTEM_VIEW); + table_schema.set_index_type(INDEX_TYPE_IS_NOT); + table_schema.set_def_type(TABLE_DEF_TYPE_INTERNAL); + + if (OB_SUCC(ret)) { + if (OB_FAIL(table_schema.set_table_name(OB_CDB_OB_SERVICES_TNAME))) { + LOG_ERROR("fail to set table_name", K(ret)); + } + } + + if (OB_SUCC(ret)) { + if (OB_FAIL(table_schema.set_compress_func_name(OB_DEFAULT_COMPRESS_FUNC_NAME))) { + LOG_ERROR("fail to set compress_func_name", K(ret)); + } + } + table_schema.set_part_level(PARTITION_LEVEL_ZERO); + table_schema.set_charset_type(ObCharset::get_default_charset()); + table_schema.set_collation_type(ObCharset::get_default_collation(ObCharset::get_default_charset())); + + if (OB_SUCC(ret)) { + if (OB_FAIL(table_schema.set_view_definition(R"__( SELECT TENANT_ID, gmt_create AS `CREATE_TIME`, gmt_modified AS 'MODIFIED_TIME', SERVICE_NAME_ID, SERVICE_NAME, SERVICE_STATUS FROM oceanbase.__all_virtual_service )__"))) { + LOG_ERROR("fail to set view_definition", K(ret)); + } + } + table_schema.set_index_using_type(USING_BTREE); + table_schema.set_row_store_type(ENCODING_ROW_STORE); + table_schema.set_store_format(OB_STORE_FORMAT_DYNAMIC_MYSQL); + table_schema.set_progressive_merge_round(1); + table_schema.set_storage_format_version(3); + table_schema.set_tablet_id(0); + + table_schema.set_max_used_column_id(column_id); + return ret; +} + } // end namespace share } // end namespace oceanbase diff --git a/src/share/inner_table/ob_inner_table_schema.25301_25350.cpp b/src/share/inner_table/ob_inner_table_schema.25301_25350.cpp index ce518e91a6..eadddef4d7 100644 --- a/src/share/inner_table/ob_inner_table_schema.25301_25350.cpp +++ b/src/share/inner_table/ob_inner_table_schema.25301_25350.cpp @@ -125,6 +125,56 @@ int ObInnerTableSchema::proxy_users_schema(ObTableSchema &table_schema) return ret; } +int ObInnerTableSchema::dba_ob_services_ora_schema(ObTableSchema &table_schema) +{ + int ret = OB_SUCCESS; + uint64_t column_id = OB_APP_MIN_COLUMN_ID - 1; + + //generated fields: + table_schema.set_tenant_id(OB_SYS_TENANT_ID); + table_schema.set_tablegroup_id(OB_INVALID_ID); + table_schema.set_database_id(OB_ORA_SYS_DATABASE_ID); + table_schema.set_table_id(OB_DBA_OB_SERVICES_ORA_TID); + table_schema.set_rowkey_split_pos(0); + table_schema.set_is_use_bloomfilter(false); + table_schema.set_progressive_merge_num(0); + table_schema.set_rowkey_column_num(0); + table_schema.set_load_type(TABLE_LOAD_TYPE_IN_DISK); + table_schema.set_table_type(SYSTEM_VIEW); + table_schema.set_index_type(INDEX_TYPE_IS_NOT); + table_schema.set_def_type(TABLE_DEF_TYPE_INTERNAL); + + if (OB_SUCC(ret)) { + if (OB_FAIL(table_schema.set_table_name(OB_DBA_OB_SERVICES_ORA_TNAME))) { + LOG_ERROR("fail to set table_name", K(ret)); + } + } + + if (OB_SUCC(ret)) { + if (OB_FAIL(table_schema.set_compress_func_name(OB_DEFAULT_COMPRESS_FUNC_NAME))) { + LOG_ERROR("fail to set compress_func_name", K(ret)); + } + } + table_schema.set_part_level(PARTITION_LEVEL_ZERO); + table_schema.set_charset_type(ObCharset::get_default_charset()); + table_schema.set_collation_type(ObCharset::get_default_collation(ObCharset::get_default_charset())); + + if (OB_SUCC(ret)) { + if (OB_FAIL(table_schema.set_view_definition(R"__( SELECT gmt_create AS "CREATE_TIME", gmt_modified AS "MODIFIED_TIME", SERVICE_NAME_ID, SERVICE_NAME, SERVICE_STATUS FROM SYS.ALL_VIRTUAL_SERVICE WHERE TENANT_ID=EFFECTIVE_TENANT_ID(); )__"))) { + LOG_ERROR("fail to set view_definition", K(ret)); + } + } + table_schema.set_index_using_type(USING_BTREE); + table_schema.set_row_store_type(ENCODING_ROW_STORE); + table_schema.set_store_format(OB_STORE_FORMAT_DYNAMIC_MYSQL); + table_schema.set_progressive_merge_round(1); + table_schema.set_storage_format_version(3); + table_schema.set_tablet_id(0); + + table_schema.set_max_used_column_id(column_id); + return ret; +} + } // end namespace share } // end namespace oceanbase diff --git a/src/share/inner_table/ob_inner_table_schema.28101_28150.cpp b/src/share/inner_table/ob_inner_table_schema.28101_28150.cpp index fd2f226431..40a0107dd2 100644 --- a/src/share/inner_table/ob_inner_table_schema.28101_28150.cpp +++ b/src/share/inner_table/ob_inner_table_schema.28101_28150.cpp @@ -960,7 +960,7 @@ int ObInnerTableSchema::gv_ob_processlist_ora_schema(ObTableSchema &table_schema table_schema.set_collation_type(ObCharset::get_default_collation(ObCharset::get_default_charset())); if (OB_SUCC(ret)) { - if (OB_FAIL(table_schema.set_view_definition(R"__( SELECT SVR_IP, SVR_PORT, SQL_PORT, ID, "USER", HOST, DB, TENANT, COMMAND, TIME, TOTAL_TIME, STATE, INFO, PROXY_SESSID, MASTER_SESSID, USER_CLIENT_IP, USER_HOST, RETRY_CNT, RETRY_INFO, SQL_ID, TRANS_ID, THREAD_ID, SSL_CIPHER, TRACE_ID, TRANS_STATE, ACTION, MODULE, CLIENT_INFO, "LEVEL", SAMPLE_PERCENTAGE, RECORD_POLICY, LB_VID, LB_VIP, LB_VPORT, IN_BYTES, OUT_BYTES, USER_CLIENT_PORT, PROXY_USER, CAST(total_cpu_time AS INT) as TOTAL_CPU_TIME, TOP_INFO FROM SYS.ALL_VIRTUAL_PROCESSLIST )__"))) { + if (OB_FAIL(table_schema.set_view_definition(R"__( SELECT SVR_IP, SVR_PORT, SQL_PORT, ID, "USER", HOST, DB, TENANT, COMMAND, TIME, TOTAL_TIME, STATE, INFO, PROXY_SESSID, MASTER_SESSID, USER_CLIENT_IP, USER_HOST, RETRY_CNT, RETRY_INFO, SQL_ID, TRANS_ID, THREAD_ID, SSL_CIPHER, TRACE_ID, TRANS_STATE, ACTION, MODULE, CLIENT_INFO, "LEVEL", SAMPLE_PERCENTAGE, RECORD_POLICY, LB_VID, LB_VIP, LB_VPORT, IN_BYTES, OUT_BYTES, USER_CLIENT_PORT, PROXY_USER, SERVICE_NAME, CAST(total_cpu_time AS INT) as TOTAL_CPU_TIME, TOP_INFO FROM SYS.ALL_VIRTUAL_PROCESSLIST )__"))) { LOG_ERROR("fail to set view_definition", K(ret)); } } @@ -1010,7 +1010,7 @@ int ObInnerTableSchema::v_ob_processlist_ora_schema(ObTableSchema &table_schema) table_schema.set_collation_type(ObCharset::get_default_collation(ObCharset::get_default_charset())); if (OB_SUCC(ret)) { - if (OB_FAIL(table_schema.set_view_definition(R"__( SELECT SVR_IP, SVR_PORT, SQL_PORT, ID, "USER", HOST, DB, TENANT, COMMAND, TIME, TOTAL_TIME, STATE, INFO, PROXY_SESSID, MASTER_SESSID, USER_CLIENT_IP, USER_HOST, RETRY_CNT, RETRY_INFO, SQL_ID, TRANS_ID, THREAD_ID, SSL_CIPHER, TRACE_ID, TRANS_STATE, ACTION, MODULE, CLIENT_INFO, "LEVEL", SAMPLE_PERCENTAGE, RECORD_POLICY, LB_VID, LB_VIP, LB_VPORT, IN_BYTES, OUT_BYTES, USER_CLIENT_PORT, PROXY_USER, CAST(total_cpu_time AS INT) as TOTAL_CPU_TIME, TOP_INFO FROM SYS.GV$OB_PROCESSLIST WHERE SVR_IP = host_ip() AND SVR_PORT = rpc_port() )__"))) { + if (OB_FAIL(table_schema.set_view_definition(R"__( SELECT SVR_IP, SVR_PORT, SQL_PORT, ID, "USER", HOST, DB, TENANT, COMMAND, TIME, TOTAL_TIME, STATE, INFO, PROXY_SESSID, MASTER_SESSID, USER_CLIENT_IP, USER_HOST, RETRY_CNT, RETRY_INFO, SQL_ID, TRANS_ID, THREAD_ID, SSL_CIPHER, TRACE_ID, TRANS_STATE, ACTION, MODULE, CLIENT_INFO, "LEVEL", SAMPLE_PERCENTAGE, RECORD_POLICY, LB_VID, LB_VIP, LB_VPORT, IN_BYTES, OUT_BYTES, USER_CLIENT_PORT, PROXY_USER, SERVICE_NAME, CAST(total_cpu_time AS INT) as TOTAL_CPU_TIME, TOP_INFO FROM SYS.GV$OB_PROCESSLIST WHERE SVR_IP = host_ip() AND SVR_PORT = rpc_port() )__"))) { LOG_ERROR("fail to set view_definition", K(ret)); } } diff --git a/src/share/inner_table/ob_inner_table_schema.501_550.cpp b/src/share/inner_table/ob_inner_table_schema.501_550.cpp index 21590a6120..15edca185f 100644 --- a/src/share/inner_table/ob_inner_table_schema.501_550.cpp +++ b/src/share/inner_table/ob_inner_table_schema.501_550.cpp @@ -2233,6 +2233,158 @@ int ObInnerTableSchema::all_user_proxy_role_info_history_schema(ObTableSchema &t return ret; } +int ObInnerTableSchema::all_service_schema(ObTableSchema &table_schema) +{ + int ret = OB_SUCCESS; + uint64_t column_id = OB_APP_MIN_COLUMN_ID - 1; + + //generated fields: + table_schema.set_tenant_id(OB_SYS_TENANT_ID); + table_schema.set_tablegroup_id(OB_SYS_TABLEGROUP_ID); + table_schema.set_database_id(OB_SYS_DATABASE_ID); + table_schema.set_table_id(OB_ALL_SERVICE_TID); + table_schema.set_rowkey_split_pos(0); + table_schema.set_is_use_bloomfilter(false); + table_schema.set_progressive_merge_num(0); + table_schema.set_rowkey_column_num(2); + table_schema.set_load_type(TABLE_LOAD_TYPE_IN_DISK); + table_schema.set_table_type(SYSTEM_TABLE); + table_schema.set_index_type(INDEX_TYPE_IS_NOT); + table_schema.set_def_type(TABLE_DEF_TYPE_INTERNAL); + + if (OB_SUCC(ret)) { + if (OB_FAIL(table_schema.set_table_name(OB_ALL_SERVICE_TNAME))) { + LOG_ERROR("fail to set table_name", K(ret)); + } + } + + if (OB_SUCC(ret)) { + if (OB_FAIL(table_schema.set_compress_func_name(OB_DEFAULT_COMPRESS_FUNC_NAME))) { + LOG_ERROR("fail to set compress_func_name", K(ret)); + } + } + table_schema.set_part_level(PARTITION_LEVEL_ZERO); + table_schema.set_charset_type(ObCharset::get_default_charset()); + table_schema.set_collation_type(ObCharset::get_default_collation(ObCharset::get_default_charset())); + + if (OB_SUCC(ret)) { + ObObj gmt_create_default; + ObObj gmt_create_default_null; + + gmt_create_default.set_ext(ObActionFlag::OP_DEFAULT_NOW_FLAG); + gmt_create_default_null.set_null(); + ADD_COLUMN_SCHEMA_TS_T("gmt_create", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObTimestampType, //column_type + CS_TYPE_BINARY,//collation_type + 0, //column length + -1, //column_precision + 6, //column_scale + true,//is nullable + false, //is_autoincrement + false, //is_on_update_for_timestamp + gmt_create_default_null, + gmt_create_default) + } + + if (OB_SUCC(ret)) { + ObObj gmt_modified_default; + ObObj gmt_modified_default_null; + + gmt_modified_default.set_ext(ObActionFlag::OP_DEFAULT_NOW_FLAG); + gmt_modified_default_null.set_null(); + ADD_COLUMN_SCHEMA_TS_T("gmt_modified", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObTimestampType, //column_type + CS_TYPE_BINARY,//collation_type + 0, //column length + -1, //column_precision + 6, //column_scale + true,//is nullable + false, //is_autoincrement + true, //is_on_update_for_timestamp + gmt_modified_default_null, + gmt_modified_default) + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("tenant_id", //column_name + ++column_id, //column_id + 1, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObIntType, //column_type + CS_TYPE_INVALID, //column_collation_type + sizeof(int64_t), //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("service_name_id", //column_name + ++column_id, //column_id + 2, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObIntType, //column_type + CS_TYPE_INVALID, //column_collation_type + sizeof(int64_t), //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("service_name", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_INVALID, //column_collation_type + OB_SERVICE_NAME_LENGTH, //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("service_status", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_INVALID, //column_collation_type + 64, //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + table_schema.set_index_using_type(USING_BTREE); + table_schema.set_row_store_type(ENCODING_ROW_STORE); + table_schema.set_store_format(OB_STORE_FORMAT_DYNAMIC_MYSQL); + table_schema.set_progressive_merge_round(1); + table_schema.set_storage_format_version(3); + table_schema.set_tablet_id(OB_ALL_SERVICE_TID); + table_schema.set_aux_lob_meta_tid(OB_ALL_SERVICE_AUX_LOB_META_TID); + table_schema.set_aux_lob_piece_tid(OB_ALL_SERVICE_AUX_LOB_PIECE_TID); + + table_schema.set_max_used_column_id(column_id); + return ret; +} + int ObInnerTableSchema::all_mview_dep_schema(ObTableSchema &table_schema) { int ret = OB_SUCCESS; diff --git a/src/share/inner_table/ob_inner_table_schema.50501_50550.cpp b/src/share/inner_table/ob_inner_table_schema.50501_50550.cpp index d60eb1b9a6..a6abc694cf 100644 --- a/src/share/inner_table/ob_inner_table_schema.50501_50550.cpp +++ b/src/share/inner_table/ob_inner_table_schema.50501_50550.cpp @@ -1375,6 +1375,141 @@ int ObInnerTableSchema::all_user_proxy_role_info_history_aux_lob_meta_schema(ObT return ret; } +int ObInnerTableSchema::all_service_aux_lob_meta_schema(ObTableSchema &table_schema) +{ + int ret = OB_SUCCESS; + uint64_t column_id = OB_APP_MIN_COLUMN_ID - 1; + + //generated fields: + table_schema.set_tenant_id(OB_SYS_TENANT_ID); + table_schema.set_tablegroup_id(OB_SYS_TABLEGROUP_ID); + table_schema.set_database_id(OB_SYS_DATABASE_ID); + table_schema.set_table_id(OB_ALL_SERVICE_AUX_LOB_META_TID); + table_schema.set_rowkey_split_pos(0); + table_schema.set_is_use_bloomfilter(false); + table_schema.set_progressive_merge_num(0); + table_schema.set_rowkey_column_num(2); + table_schema.set_load_type(TABLE_LOAD_TYPE_IN_DISK); + table_schema.set_table_type(AUX_LOB_META); + table_schema.set_index_type(INDEX_TYPE_IS_NOT); + table_schema.set_def_type(TABLE_DEF_TYPE_INTERNAL); + + if (OB_SUCC(ret)) { + if (OB_FAIL(table_schema.set_table_name(OB_ALL_SERVICE_AUX_LOB_META_TNAME))) { + LOG_ERROR("fail to set table_name", K(ret)); + } + } + + if (OB_SUCC(ret)) { + if (OB_FAIL(table_schema.set_compress_func_name(OB_DEFAULT_COMPRESS_FUNC_NAME))) { + LOG_ERROR("fail to set compress_func_name", K(ret)); + } + } + table_schema.set_part_level(PARTITION_LEVEL_ZERO); + table_schema.set_charset_type(ObCharset::get_default_charset()); + table_schema.set_collation_type(ObCharset::get_default_collation(ObCharset::get_default_charset())); + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("lob_id", //column_name + ++column_id, //column_id + 1, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_BINARY, //column_collation_type + 16, //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("seq_id", //column_name + ++column_id, //column_id + 2, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_BINARY, //column_collation_type + 8192, //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("binary_len", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObUInt32Type, //column_type + CS_TYPE_INVALID, //column_collation_type + sizeof(uint32_t), //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("char_len", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObUInt32Type, //column_type + CS_TYPE_INVALID, //column_collation_type + sizeof(uint32_t), //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("piece_id", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObUInt64Type, //column_type + CS_TYPE_INVALID, //column_collation_type + sizeof(uint64_t), //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("lob_data", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_BINARY, //column_collation_type + 262144, //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + table_schema.set_index_using_type(USING_BTREE); + table_schema.set_row_store_type(ENCODING_ROW_STORE); + table_schema.set_store_format(OB_STORE_FORMAT_DYNAMIC_MYSQL); + table_schema.set_progressive_merge_round(1); + table_schema.set_storage_format_version(3); + table_schema.set_tablet_id(OB_ALL_SERVICE_AUX_LOB_META_TID); + table_schema.set_data_table_id(OB_ALL_SERVICE_TID); + + table_schema.set_max_used_column_id(column_id); + return ret; +} + int ObInnerTableSchema::all_mview_dep_aux_lob_meta_schema(ObTableSchema &table_schema) { int ret = OB_SUCCESS; diff --git a/src/share/inner_table/ob_inner_table_schema.60501_60550.cpp b/src/share/inner_table/ob_inner_table_schema.60501_60550.cpp index 04d44f0e63..16db453cec 100644 --- a/src/share/inner_table/ob_inner_table_schema.60501_60550.cpp +++ b/src/share/inner_table/ob_inner_table_schema.60501_60550.cpp @@ -925,6 +925,96 @@ int ObInnerTableSchema::all_user_proxy_role_info_history_aux_lob_piece_schema(Ob return ret; } +int ObInnerTableSchema::all_service_aux_lob_piece_schema(ObTableSchema &table_schema) +{ + int ret = OB_SUCCESS; + uint64_t column_id = OB_APP_MIN_COLUMN_ID - 1; + + //generated fields: + table_schema.set_tenant_id(OB_SYS_TENANT_ID); + table_schema.set_tablegroup_id(OB_SYS_TABLEGROUP_ID); + table_schema.set_database_id(OB_SYS_DATABASE_ID); + table_schema.set_table_id(OB_ALL_SERVICE_AUX_LOB_PIECE_TID); + table_schema.set_rowkey_split_pos(0); + table_schema.set_is_use_bloomfilter(false); + table_schema.set_progressive_merge_num(0); + table_schema.set_rowkey_column_num(1); + table_schema.set_load_type(TABLE_LOAD_TYPE_IN_DISK); + table_schema.set_table_type(AUX_LOB_PIECE); + table_schema.set_index_type(INDEX_TYPE_IS_NOT); + table_schema.set_def_type(TABLE_DEF_TYPE_INTERNAL); + + if (OB_SUCC(ret)) { + if (OB_FAIL(table_schema.set_table_name(OB_ALL_SERVICE_AUX_LOB_PIECE_TNAME))) { + LOG_ERROR("fail to set table_name", K(ret)); + } + } + + if (OB_SUCC(ret)) { + if (OB_FAIL(table_schema.set_compress_func_name(OB_DEFAULT_COMPRESS_FUNC_NAME))) { + LOG_ERROR("fail to set compress_func_name", K(ret)); + } + } + table_schema.set_part_level(PARTITION_LEVEL_ZERO); + table_schema.set_charset_type(ObCharset::get_default_charset()); + table_schema.set_collation_type(ObCharset::get_default_collation(ObCharset::get_default_charset())); + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("piece_id", //column_name + ++column_id, //column_id + 1, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObUInt64Type, //column_type + CS_TYPE_INVALID, //column_collation_type + sizeof(uint64_t), //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("data_len", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObUInt32Type, //column_type + CS_TYPE_INVALID, //column_collation_type + sizeof(uint32_t), //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("lob_data", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_BINARY, //column_collation_type + 32, //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + table_schema.set_index_using_type(USING_BTREE); + table_schema.set_row_store_type(ENCODING_ROW_STORE); + table_schema.set_store_format(OB_STORE_FORMAT_DYNAMIC_MYSQL); + table_schema.set_progressive_merge_round(1); + table_schema.set_storage_format_version(3); + table_schema.set_tablet_id(OB_ALL_SERVICE_AUX_LOB_PIECE_TID); + table_schema.set_data_table_id(OB_ALL_SERVICE_TID); + + table_schema.set_max_used_column_id(column_id); + return ret; +} + int ObInnerTableSchema::all_mview_dep_aux_lob_piece_schema(ObTableSchema &table_schema) { int ret = OB_SUCCESS; diff --git a/src/share/inner_table/ob_inner_table_schema.h b/src/share/inner_table/ob_inner_table_schema.h index 039d421b8b..ae3d0adc42 100644 --- a/src/share/inner_table/ob_inner_table_schema.h +++ b/src/share/inner_table/ob_inner_table_schema.h @@ -626,6 +626,7 @@ public: static int all_user_proxy_info_history_schema(share::schema::ObTableSchema &table_schema); static int all_user_proxy_role_info_schema(share::schema::ObTableSchema &table_schema); static int all_user_proxy_role_info_history_schema(share::schema::ObTableSchema &table_schema); + static int all_service_schema(share::schema::ObTableSchema &table_schema); static int all_mview_dep_schema(share::schema::ObTableSchema &table_schema); static int all_scheduler_job_run_detail_v2_schema(share::schema::ObTableSchema &table_schema); static int tenant_virtual_all_table_schema(share::schema::ObTableSchema &table_schema); @@ -1079,6 +1080,7 @@ public: static int all_virtual_user_proxy_info_history_schema(share::schema::ObTableSchema &table_schema); static int all_virtual_user_proxy_role_info_schema(share::schema::ObTableSchema &table_schema); static int all_virtual_user_proxy_role_info_history_schema(share::schema::ObTableSchema &table_schema); + static int all_virtual_service_schema(share::schema::ObTableSchema &table_schema); static int all_virtual_tenant_resource_limit_schema(share::schema::ObTableSchema &table_schema); static int all_virtual_tenant_resource_limit_detail_schema(share::schema::ObTableSchema &table_schema); static int all_virtual_nic_info_schema(share::schema::ObTableSchema &table_schema); @@ -1354,6 +1356,7 @@ public: static int all_virtual_tracepoint_info_ora_schema(share::schema::ObTableSchema &table_schema); static int all_virtual_user_proxy_info_real_agent_ora_schema(share::schema::ObTableSchema &table_schema); static int all_virtual_user_proxy_role_info_real_agent_ora_schema(share::schema::ObTableSchema &table_schema); + static int all_virtual_service_ora_schema(share::schema::ObTableSchema &table_schema); static int all_virtual_tenant_resource_limit_ora_schema(share::schema::ObTableSchema &table_schema); static int all_virtual_tenant_resource_limit_detail_ora_schema(share::schema::ObTableSchema &table_schema); static int all_virtual_nic_info_ora_schema(share::schema::ObTableSchema &table_schema); @@ -1778,6 +1781,8 @@ public: static int gv_ob_tracepoint_info_schema(share::schema::ObTableSchema &table_schema); static int v_ob_tracepoint_info_schema(share::schema::ObTableSchema &table_schema); static int v_ob_compatibility_control_schema(share::schema::ObTableSchema &table_schema); + static int dba_ob_services_schema(share::schema::ObTableSchema &table_schema); + static int cdb_ob_services_schema(share::schema::ObTableSchema &table_schema); static int gv_ob_tenant_resource_limit_schema(share::schema::ObTableSchema &table_schema); static int v_ob_tenant_resource_limit_schema(share::schema::ObTableSchema &table_schema); static int gv_ob_tenant_resource_limit_detail_schema(share::schema::ObTableSchema &table_schema); @@ -2086,6 +2091,7 @@ public: static int dba_mvref_stmt_stats_ora_schema(share::schema::ObTableSchema &table_schema); static int user_mvref_stmt_stats_ora_schema(share::schema::ObTableSchema &table_schema); static int proxy_users_schema(share::schema::ObTableSchema &table_schema); + static int dba_ob_services_ora_schema(share::schema::ObTableSchema &table_schema); static int gv_ob_sql_audit_ora_schema(share::schema::ObTableSchema &table_schema); static int v_ob_sql_audit_ora_schema(share::schema::ObTableSchema &table_schema); static int gv_instance_schema(share::schema::ObTableSchema &table_schema); @@ -2573,6 +2579,7 @@ public: static int all_user_proxy_info_history_aux_lob_meta_schema(share::schema::ObTableSchema &table_schema); static int all_user_proxy_role_info_aux_lob_meta_schema(share::schema::ObTableSchema &table_schema); static int all_user_proxy_role_info_history_aux_lob_meta_schema(share::schema::ObTableSchema &table_schema); + static int all_service_aux_lob_meta_schema(share::schema::ObTableSchema &table_schema); static int all_mview_dep_aux_lob_meta_schema(share::schema::ObTableSchema &table_schema); static int all_scheduler_job_run_detail_v2_aux_lob_meta_schema(share::schema::ObTableSchema &table_schema); static int all_table_aux_lob_piece_schema(share::schema::ObTableSchema &table_schema); @@ -2873,6 +2880,7 @@ public: static int all_user_proxy_info_history_aux_lob_piece_schema(share::schema::ObTableSchema &table_schema); static int all_user_proxy_role_info_aux_lob_piece_schema(share::schema::ObTableSchema &table_schema); static int all_user_proxy_role_info_history_aux_lob_piece_schema(share::schema::ObTableSchema &table_schema); + static int all_service_aux_lob_piece_schema(share::schema::ObTableSchema &table_schema); static int all_mview_dep_aux_lob_piece_schema(share::schema::ObTableSchema &table_schema); static int all_scheduler_job_run_detail_v2_aux_lob_piece_schema(share::schema::ObTableSchema &table_schema); static int all_virtual_ash_all_virtual_ash_i1_schema(share::schema::ObTableSchema &table_schema); @@ -3392,6 +3400,7 @@ const schema_create_func sys_table_schema_creators [] = { ObInnerTableSchema::all_user_proxy_info_history_schema, ObInnerTableSchema::all_user_proxy_role_info_schema, ObInnerTableSchema::all_user_proxy_role_info_history_schema, + ObInnerTableSchema::all_service_schema, ObInnerTableSchema::all_mview_dep_schema, ObInnerTableSchema::all_scheduler_job_run_detail_v2_schema, NULL,}; @@ -3848,6 +3857,7 @@ const schema_create_func virtual_table_schema_creators [] = { ObInnerTableSchema::all_virtual_user_proxy_info_history_schema, ObInnerTableSchema::all_virtual_user_proxy_role_info_schema, ObInnerTableSchema::all_virtual_user_proxy_role_info_history_schema, + ObInnerTableSchema::all_virtual_service_schema, ObInnerTableSchema::all_virtual_tenant_resource_limit_schema, ObInnerTableSchema::all_virtual_tenant_resource_limit_detail_schema, ObInnerTableSchema::all_virtual_nic_info_schema, @@ -4133,6 +4143,7 @@ const schema_create_func virtual_table_schema_creators [] = { ObInnerTableSchema::all_virtual_tracepoint_info_ora_schema, ObInnerTableSchema::all_virtual_user_proxy_info_real_agent_ora_schema, ObInnerTableSchema::all_virtual_user_proxy_role_info_real_agent_ora_schema, + ObInnerTableSchema::all_virtual_service_ora_schema, ObInnerTableSchema::all_virtual_tenant_resource_limit_ora_schema, ObInnerTableSchema::all_virtual_tenant_resource_limit_detail_ora_schema, ObInnerTableSchema::all_virtual_nic_info_ora_schema, @@ -4644,6 +4655,8 @@ const schema_create_func sys_view_schema_creators [] = { ObInnerTableSchema::gv_ob_tracepoint_info_schema, ObInnerTableSchema::v_ob_tracepoint_info_schema, ObInnerTableSchema::v_ob_compatibility_control_schema, + ObInnerTableSchema::dba_ob_services_schema, + ObInnerTableSchema::cdb_ob_services_schema, ObInnerTableSchema::gv_ob_tenant_resource_limit_schema, ObInnerTableSchema::v_ob_tenant_resource_limit_schema, ObInnerTableSchema::gv_ob_tenant_resource_limit_detail_schema, @@ -4952,6 +4965,7 @@ const schema_create_func sys_view_schema_creators [] = { ObInnerTableSchema::dba_mvref_stmt_stats_ora_schema, ObInnerTableSchema::user_mvref_stmt_stats_ora_schema, ObInnerTableSchema::proxy_users_schema, + ObInnerTableSchema::dba_ob_services_ora_schema, ObInnerTableSchema::gv_ob_sql_audit_ora_schema, ObInnerTableSchema::v_ob_sql_audit_ora_schema, ObInnerTableSchema::gv_instance_schema, @@ -5541,6 +5555,7 @@ const uint64_t tenant_space_tables [] = { OB_ALL_USER_PROXY_INFO_HISTORY_TID, OB_ALL_USER_PROXY_ROLE_INFO_TID, OB_ALL_USER_PROXY_ROLE_INFO_HISTORY_TID, + OB_ALL_SERVICE_TID, OB_ALL_MVIEW_DEP_TID, OB_ALL_SCHEDULER_JOB_RUN_DETAIL_V2_TID, OB_TENANT_VIRTUAL_ALL_TABLE_TID, @@ -5774,6 +5789,7 @@ const uint64_t tenant_space_tables [] = { OB_ALL_VIRTUAL_SESSION_PS_INFO_TID, OB_ALL_VIRTUAL_TRACEPOINT_INFO_TID, OB_ALL_VIRTUAL_COMPATIBILITY_CONTROL_TID, + OB_ALL_VIRTUAL_SERVICE_TID, OB_ALL_VIRTUAL_TENANT_RESOURCE_LIMIT_TID, OB_ALL_VIRTUAL_TENANT_RESOURCE_LIMIT_DETAIL_TID, OB_ALL_VIRTUAL_NIC_INFO_TID, @@ -6056,6 +6072,7 @@ const uint64_t tenant_space_tables [] = { OB_ALL_VIRTUAL_TRACEPOINT_INFO_ORA_TID, OB_ALL_VIRTUAL_USER_PROXY_INFO_REAL_AGENT_ORA_TID, OB_ALL_VIRTUAL_USER_PROXY_ROLE_INFO_REAL_AGENT_ORA_TID, + OB_ALL_VIRTUAL_SERVICE_ORA_TID, OB_ALL_VIRTUAL_TENANT_RESOURCE_LIMIT_ORA_TID, OB_ALL_VIRTUAL_TENANT_RESOURCE_LIMIT_DETAIL_ORA_TID, OB_ALL_VIRTUAL_NIC_INFO_ORA_TID, @@ -6360,6 +6377,7 @@ const uint64_t tenant_space_tables [] = { OB_GV_OB_TRACEPOINT_INFO_TID, OB_V_OB_TRACEPOINT_INFO_TID, OB_V_OB_COMPATIBILITY_CONTROL_TID, + OB_DBA_OB_SERVICES_TID, OB_GV_OB_TENANT_RESOURCE_LIMIT_TID, OB_V_OB_TENANT_RESOURCE_LIMIT_TID, OB_GV_OB_TENANT_RESOURCE_LIMIT_DETAIL_TID, @@ -6667,6 +6685,7 @@ const uint64_t tenant_space_tables [] = { OB_DBA_MVREF_STMT_STATS_ORA_TID, OB_USER_MVREF_STMT_STATS_ORA_TID, OB_PROXY_USERS_TID, + OB_DBA_OB_SERVICES_ORA_TID, OB_GV_OB_SQL_AUDIT_ORA_TID, OB_V_OB_SQL_AUDIT_ORA_TID, OB_GV_INSTANCE_TID, @@ -7309,6 +7328,7 @@ const uint64_t tenant_space_tables [] = { OB_ALL_USER_PROXY_INFO_HISTORY_AUX_LOB_META_TID, OB_ALL_USER_PROXY_ROLE_INFO_AUX_LOB_META_TID, OB_ALL_USER_PROXY_ROLE_INFO_HISTORY_AUX_LOB_META_TID, + OB_ALL_SERVICE_AUX_LOB_META_TID, OB_ALL_MVIEW_DEP_AUX_LOB_META_TID, OB_ALL_SCHEDULER_JOB_RUN_DETAIL_V2_AUX_LOB_META_TID, OB_ALL_TABLE_AUX_LOB_PIECE_TID, @@ -7585,6 +7605,7 @@ const uint64_t tenant_space_tables [] = { OB_ALL_USER_PROXY_INFO_HISTORY_AUX_LOB_PIECE_TID, OB_ALL_USER_PROXY_ROLE_INFO_AUX_LOB_PIECE_TID, OB_ALL_USER_PROXY_ROLE_INFO_HISTORY_AUX_LOB_PIECE_TID, + OB_ALL_SERVICE_AUX_LOB_PIECE_TID, OB_ALL_MVIEW_DEP_AUX_LOB_PIECE_TID, OB_ALL_SCHEDULER_JOB_RUN_DETAIL_V2_AUX_LOB_PIECE_TID, }; @@ -7733,6 +7754,7 @@ const uint64_t all_ora_mapping_virtual_table_org_tables [] = { OB_ALL_VIRTUAL_LS_REPLICA_TASK_HISTORY_TID, OB_ALL_VIRTUAL_SESSION_PS_INFO_TID, OB_ALL_VIRTUAL_TRACEPOINT_INFO_TID, + OB_ALL_VIRTUAL_SERVICE_TID, OB_ALL_VIRTUAL_TENANT_RESOURCE_LIMIT_TID, OB_ALL_VIRTUAL_TENANT_RESOURCE_LIMIT_DETAIL_TID, OB_ALL_VIRTUAL_NIC_INFO_TID, }; @@ -7881,6 +7903,7 @@ const uint64_t all_ora_mapping_virtual_tables [] = { OB_ALL_VIRTUAL_SQL_AUDIT_O , OB_ALL_VIRTUAL_LS_REPLICA_TASK_HISTORY_ORA_TID , OB_ALL_VIRTUAL_SESSION_PS_INFO_ORA_TID , OB_ALL_VIRTUAL_TRACEPOINT_INFO_ORA_TID +, OB_ALL_VIRTUAL_SERVICE_ORA_TID , OB_ALL_VIRTUAL_TENANT_RESOURCE_LIMIT_ORA_TID , OB_ALL_VIRTUAL_TENANT_RESOURCE_LIMIT_DETAIL_ORA_TID , OB_ALL_VIRTUAL_NIC_INFO_ORA_TID @@ -8173,6 +8196,7 @@ const char* const tenant_space_table_names [] = { OB_ALL_USER_PROXY_INFO_HISTORY_TNAME, OB_ALL_USER_PROXY_ROLE_INFO_TNAME, OB_ALL_USER_PROXY_ROLE_INFO_HISTORY_TNAME, + OB_ALL_SERVICE_TNAME, OB_ALL_MVIEW_DEP_TNAME, OB_ALL_SCHEDULER_JOB_RUN_DETAIL_V2_TNAME, OB_TENANT_VIRTUAL_ALL_TABLE_TNAME, @@ -8406,6 +8430,7 @@ const char* const tenant_space_table_names [] = { OB_ALL_VIRTUAL_SESSION_PS_INFO_TNAME, OB_ALL_VIRTUAL_TRACEPOINT_INFO_TNAME, OB_ALL_VIRTUAL_COMPATIBILITY_CONTROL_TNAME, + OB_ALL_VIRTUAL_SERVICE_TNAME, OB_ALL_VIRTUAL_TENANT_RESOURCE_LIMIT_TNAME, OB_ALL_VIRTUAL_TENANT_RESOURCE_LIMIT_DETAIL_TNAME, OB_ALL_VIRTUAL_NIC_INFO_TNAME, @@ -8688,6 +8713,7 @@ const char* const tenant_space_table_names [] = { OB_ALL_VIRTUAL_TRACEPOINT_INFO_ORA_TNAME, OB_ALL_VIRTUAL_USER_PROXY_INFO_REAL_AGENT_ORA_TNAME, OB_ALL_VIRTUAL_USER_PROXY_ROLE_INFO_REAL_AGENT_ORA_TNAME, + OB_ALL_VIRTUAL_SERVICE_ORA_TNAME, OB_ALL_VIRTUAL_TENANT_RESOURCE_LIMIT_ORA_TNAME, OB_ALL_VIRTUAL_TENANT_RESOURCE_LIMIT_DETAIL_ORA_TNAME, OB_ALL_VIRTUAL_NIC_INFO_ORA_TNAME, @@ -8992,6 +9018,7 @@ const char* const tenant_space_table_names [] = { OB_GV_OB_TRACEPOINT_INFO_TNAME, OB_V_OB_TRACEPOINT_INFO_TNAME, OB_V_OB_COMPATIBILITY_CONTROL_TNAME, + OB_DBA_OB_SERVICES_TNAME, OB_GV_OB_TENANT_RESOURCE_LIMIT_TNAME, OB_V_OB_TENANT_RESOURCE_LIMIT_TNAME, OB_GV_OB_TENANT_RESOURCE_LIMIT_DETAIL_TNAME, @@ -9299,6 +9326,7 @@ const char* const tenant_space_table_names [] = { OB_DBA_MVREF_STMT_STATS_ORA_TNAME, OB_USER_MVREF_STMT_STATS_ORA_TNAME, OB_PROXY_USERS_TNAME, + OB_DBA_OB_SERVICES_ORA_TNAME, OB_GV_OB_SQL_AUDIT_ORA_TNAME, OB_V_OB_SQL_AUDIT_ORA_TNAME, OB_GV_INSTANCE_TNAME, @@ -9941,6 +9969,7 @@ const char* const tenant_space_table_names [] = { OB_ALL_USER_PROXY_INFO_HISTORY_AUX_LOB_META_TNAME, OB_ALL_USER_PROXY_ROLE_INFO_AUX_LOB_META_TNAME, OB_ALL_USER_PROXY_ROLE_INFO_HISTORY_AUX_LOB_META_TNAME, + OB_ALL_SERVICE_AUX_LOB_META_TNAME, OB_ALL_MVIEW_DEP_AUX_LOB_META_TNAME, OB_ALL_SCHEDULER_JOB_RUN_DETAIL_V2_AUX_LOB_META_TNAME, OB_ALL_TABLE_AUX_LOB_PIECE_TNAME, @@ -10217,6 +10246,7 @@ const char* const tenant_space_table_names [] = { OB_ALL_USER_PROXY_INFO_HISTORY_AUX_LOB_PIECE_TNAME, OB_ALL_USER_PROXY_ROLE_INFO_AUX_LOB_PIECE_TNAME, OB_ALL_USER_PROXY_ROLE_INFO_HISTORY_AUX_LOB_PIECE_TNAME, + OB_ALL_SERVICE_AUX_LOB_PIECE_TNAME, OB_ALL_MVIEW_DEP_AUX_LOB_PIECE_TNAME, OB_ALL_SCHEDULER_JOB_RUN_DETAIL_V2_AUX_LOB_PIECE_TNAME, }; @@ -10581,6 +10611,7 @@ const uint64_t restrict_access_virtual_tables[] = { OB_ALL_VIRTUAL_TRACEPOINT_INFO_ORA_TID, OB_ALL_VIRTUAL_USER_PROXY_INFO_REAL_AGENT_ORA_TID, OB_ALL_VIRTUAL_USER_PROXY_ROLE_INFO_REAL_AGENT_ORA_TID, + OB_ALL_VIRTUAL_SERVICE_ORA_TID, OB_ALL_VIRTUAL_TENANT_RESOURCE_LIMIT_ORA_TID, OB_ALL_VIRTUAL_TENANT_RESOURCE_LIMIT_DETAIL_ORA_TID, OB_ALL_VIRTUAL_NIC_INFO_ORA_TID, @@ -13115,6 +13146,14 @@ LOBMapping const lob_aux_table_mappings [] = { ObInnerTableSchema::all_user_proxy_role_info_history_aux_lob_piece_schema }, + { + OB_ALL_SERVICE_TID, + OB_ALL_SERVICE_AUX_LOB_META_TID, + OB_ALL_SERVICE_AUX_LOB_PIECE_TID, + ObInnerTableSchema::all_service_aux_lob_meta_schema, + ObInnerTableSchema::all_service_aux_lob_piece_schema + }, + { OB_ALL_MVIEW_DEP_TID, OB_ALL_MVIEW_DEP_AUX_LOB_META_TID, @@ -13168,12 +13207,12 @@ static inline int get_sys_table_lob_aux_schema(const uint64_t tid, } const int64_t OB_CORE_TABLE_COUNT = 4; -const int64_t OB_SYS_TABLE_COUNT = 297; -const int64_t OB_VIRTUAL_TABLE_COUNT = 825; -const int64_t OB_SYS_VIEW_COUNT = 916; -const int64_t OB_SYS_TENANT_TABLE_COUNT = 2043; +const int64_t OB_SYS_TABLE_COUNT = 298; +const int64_t OB_VIRTUAL_TABLE_COUNT = 827; +const int64_t OB_SYS_VIEW_COUNT = 919; +const int64_t OB_SYS_TENANT_TABLE_COUNT = 2049; const int64_t OB_CORE_SCHEMA_VERSION = 1; -const int64_t OB_BOOTSTRAP_SCHEMA_VERSION = 2046; +const int64_t OB_BOOTSTRAP_SCHEMA_VERSION = 2052; } // end namespace share } // end namespace oceanbase diff --git a/src/share/inner_table/ob_inner_table_schema.lob.cpp b/src/share/inner_table/ob_inner_table_schema.lob.cpp index 9d142bcc0e..f6df163d24 100644 --- a/src/share/inner_table/ob_inner_table_schema.lob.cpp +++ b/src/share/inner_table/ob_inner_table_schema.lob.cpp @@ -21,7 +21,7 @@ inner_lob_map_t inner_lob_map; bool lob_mapping_init() { int ret = OB_SUCCESS; - if (OB_FAIL(inner_lob_map.create(300, ObModIds::OB_INNER_LOB_HASH_SET))) { + if (OB_FAIL(inner_lob_map.create(301, ObModIds::OB_INNER_LOB_HASH_SET))) { SERVER_LOG(WARN, "fail to create inner lob map", K(ret)); } else { for (int64_t i = 0; OB_SUCC(ret) && i < ARRAYSIZEOF(lob_aux_table_mappings); ++i) { diff --git a/src/share/inner_table/ob_inner_table_schema_constants.h b/src/share/inner_table/ob_inner_table_schema_constants.h index 26653165ed..0938f63d6f 100644 --- a/src/share/inner_table/ob_inner_table_schema_constants.h +++ b/src/share/inner_table/ob_inner_table_schema_constants.h @@ -326,6 +326,7 @@ const uint64_t OB_ALL_USER_PROXY_INFO_TID = 512; // "__all_user_proxy_info" const uint64_t OB_ALL_USER_PROXY_INFO_HISTORY_TID = 513; // "__all_user_proxy_info_history" const uint64_t OB_ALL_USER_PROXY_ROLE_INFO_TID = 514; // "__all_user_proxy_role_info" const uint64_t OB_ALL_USER_PROXY_ROLE_INFO_HISTORY_TID = 515; // "__all_user_proxy_role_info_history" +const uint64_t OB_ALL_SERVICE_TID = 516; // "__all_service" const uint64_t OB_ALL_MVIEW_DEP_TID = 518; // "__all_mview_dep" const uint64_t OB_ALL_SCHEDULER_JOB_RUN_DETAIL_V2_TID = 519; // "__all_scheduler_job_run_detail_v2" const uint64_t OB_TENANT_VIRTUAL_ALL_TABLE_TID = 10001; // "__tenant_virtual_all_table" @@ -779,6 +780,7 @@ const uint64_t OB_ALL_VIRTUAL_USER_PROXY_INFO_TID = 12474; // "__all_virtual_use const uint64_t OB_ALL_VIRTUAL_USER_PROXY_INFO_HISTORY_TID = 12475; // "__all_virtual_user_proxy_info_history" const uint64_t OB_ALL_VIRTUAL_USER_PROXY_ROLE_INFO_TID = 12476; // "__all_virtual_user_proxy_role_info" const uint64_t OB_ALL_VIRTUAL_USER_PROXY_ROLE_INFO_HISTORY_TID = 12477; // "__all_virtual_user_proxy_role_info_history" +const uint64_t OB_ALL_VIRTUAL_SERVICE_TID = 12480; // "__all_virtual_service" const uint64_t OB_ALL_VIRTUAL_TENANT_RESOURCE_LIMIT_TID = 12481; // "__all_virtual_tenant_resource_limit" const uint64_t OB_ALL_VIRTUAL_TENANT_RESOURCE_LIMIT_DETAIL_TID = 12482; // "__all_virtual_tenant_resource_limit_detail" const uint64_t OB_ALL_VIRTUAL_NIC_INFO_TID = 12487; // "__all_virtual_nic_info" @@ -1054,6 +1056,7 @@ const uint64_t OB_ALL_VIRTUAL_SESSION_PS_INFO_ORA_TID = 15444; // "ALL_VIRTUAL_S const uint64_t OB_ALL_VIRTUAL_TRACEPOINT_INFO_ORA_TID = 15445; // "ALL_VIRTUAL_TRACEPOINT_INFO_ORA" const uint64_t OB_ALL_VIRTUAL_USER_PROXY_INFO_REAL_AGENT_ORA_TID = 15446; // "ALL_VIRTUAL_USER_PROXY_INFO_REAL_AGENT_ORA" const uint64_t OB_ALL_VIRTUAL_USER_PROXY_ROLE_INFO_REAL_AGENT_ORA_TID = 15447; // "ALL_VIRTUAL_USER_PROXY_ROLE_INFO_REAL_AGENT_ORA" +const uint64_t OB_ALL_VIRTUAL_SERVICE_ORA_TID = 15449; // "ALL_VIRTUAL_SERVICE_ORA" const uint64_t OB_ALL_VIRTUAL_TENANT_RESOURCE_LIMIT_ORA_TID = 15450; // "ALL_VIRTUAL_TENANT_RESOURCE_LIMIT_ORA" const uint64_t OB_ALL_VIRTUAL_TENANT_RESOURCE_LIMIT_DETAIL_ORA_TID = 15451; // "ALL_VIRTUAL_TENANT_RESOURCE_LIMIT_DETAIL_ORA" const uint64_t OB_ALL_VIRTUAL_NIC_INFO_ORA_TID = 15456; // "ALL_VIRTUAL_NIC_INFO_ORA" @@ -1478,6 +1481,8 @@ const uint64_t OB_V_OB_SESSION_PS_INFO_TID = 21542; // "V$OB_SESSION_PS_INFO" const uint64_t OB_GV_OB_TRACEPOINT_INFO_TID = 21543; // "GV$OB_TRACEPOINT_INFO" const uint64_t OB_V_OB_TRACEPOINT_INFO_TID = 21544; // "V$OB_TRACEPOINT_INFO" const uint64_t OB_V_OB_COMPATIBILITY_CONTROL_TID = 21545; // "V$OB_COMPATIBILITY_CONTROL" +const uint64_t OB_DBA_OB_SERVICES_TID = 21548; // "DBA_OB_SERVICES" +const uint64_t OB_CDB_OB_SERVICES_TID = 21549; // "CDB_OB_SERVICES" const uint64_t OB_GV_OB_TENANT_RESOURCE_LIMIT_TID = 21550; // "GV$OB_TENANT_RESOURCE_LIMIT" const uint64_t OB_V_OB_TENANT_RESOURCE_LIMIT_TID = 21551; // "V$OB_TENANT_RESOURCE_LIMIT" const uint64_t OB_GV_OB_TENANT_RESOURCE_LIMIT_DETAIL_TID = 21552; // "GV$OB_TENANT_RESOURCE_LIMIT_DETAIL" @@ -1786,6 +1791,7 @@ const uint64_t OB_USER_MVREF_CHANGE_STATS_ORA_TID = 25298; // "USER_MVREF_CHANGE const uint64_t OB_DBA_MVREF_STMT_STATS_ORA_TID = 25299; // "DBA_MVREF_STMT_STATS_ORA" const uint64_t OB_USER_MVREF_STMT_STATS_ORA_TID = 25300; // "USER_MVREF_STMT_STATS_ORA" const uint64_t OB_PROXY_USERS_TID = 25301; // "PROXY_USERS" +const uint64_t OB_DBA_OB_SERVICES_ORA_TID = 25302; // "DBA_OB_SERVICES_ORA" const uint64_t OB_GV_OB_SQL_AUDIT_ORA_TID = 28002; // "GV$OB_SQL_AUDIT_ORA" const uint64_t OB_V_OB_SQL_AUDIT_ORA_TID = 28003; // "V$OB_SQL_AUDIT_ORA" const uint64_t OB_GV_INSTANCE_TID = 28004; // "GV$INSTANCE" @@ -2273,6 +2279,7 @@ const uint64_t OB_ALL_USER_PROXY_INFO_AUX_LOB_META_TID = 50512; // "__all_user_p const uint64_t OB_ALL_USER_PROXY_INFO_HISTORY_AUX_LOB_META_TID = 50513; // "__all_user_proxy_info_history_aux_lob_meta" const uint64_t OB_ALL_USER_PROXY_ROLE_INFO_AUX_LOB_META_TID = 50514; // "__all_user_proxy_role_info_aux_lob_meta" const uint64_t OB_ALL_USER_PROXY_ROLE_INFO_HISTORY_AUX_LOB_META_TID = 50515; // "__all_user_proxy_role_info_history_aux_lob_meta" +const uint64_t OB_ALL_SERVICE_AUX_LOB_META_TID = 50516; // "__all_service_aux_lob_meta" const uint64_t OB_ALL_MVIEW_DEP_AUX_LOB_META_TID = 50518; // "__all_mview_dep_aux_lob_meta" const uint64_t OB_ALL_SCHEDULER_JOB_RUN_DETAIL_V2_AUX_LOB_META_TID = 50519; // "__all_scheduler_job_run_detail_v2_aux_lob_meta" const uint64_t OB_ALL_TABLE_AUX_LOB_PIECE_TID = 60003; // "__all_table_aux_lob_piece" @@ -2573,6 +2580,7 @@ const uint64_t OB_ALL_USER_PROXY_INFO_AUX_LOB_PIECE_TID = 60512; // "__all_user_ const uint64_t OB_ALL_USER_PROXY_INFO_HISTORY_AUX_LOB_PIECE_TID = 60513; // "__all_user_proxy_info_history_aux_lob_piece" const uint64_t OB_ALL_USER_PROXY_ROLE_INFO_AUX_LOB_PIECE_TID = 60514; // "__all_user_proxy_role_info_aux_lob_piece" const uint64_t OB_ALL_USER_PROXY_ROLE_INFO_HISTORY_AUX_LOB_PIECE_TID = 60515; // "__all_user_proxy_role_info_history_aux_lob_piece" +const uint64_t OB_ALL_SERVICE_AUX_LOB_PIECE_TID = 60516; // "__all_service_aux_lob_piece" const uint64_t OB_ALL_MVIEW_DEP_AUX_LOB_PIECE_TID = 60518; // "__all_mview_dep_aux_lob_piece" const uint64_t OB_ALL_SCHEDULER_JOB_RUN_DETAIL_V2_AUX_LOB_PIECE_TID = 60519; // "__all_scheduler_job_run_detail_v2_aux_lob_piece" const uint64_t OB_ALL_VIRTUAL_PLAN_CACHE_STAT_ALL_VIRTUAL_PLAN_CACHE_STAT_I1_TID = 14999; // "__all_virtual_plan_cache_stat" @@ -3079,6 +3087,7 @@ const char *const OB_ALL_USER_PROXY_INFO_TNAME = "__all_user_proxy_info"; const char *const OB_ALL_USER_PROXY_INFO_HISTORY_TNAME = "__all_user_proxy_info_history"; const char *const OB_ALL_USER_PROXY_ROLE_INFO_TNAME = "__all_user_proxy_role_info"; const char *const OB_ALL_USER_PROXY_ROLE_INFO_HISTORY_TNAME = "__all_user_proxy_role_info_history"; +const char *const OB_ALL_SERVICE_TNAME = "__all_service"; const char *const OB_ALL_MVIEW_DEP_TNAME = "__all_mview_dep"; const char *const OB_ALL_SCHEDULER_JOB_RUN_DETAIL_V2_TNAME = "__all_scheduler_job_run_detail_v2"; const char *const OB_TENANT_VIRTUAL_ALL_TABLE_TNAME = "__tenant_virtual_all_table"; @@ -3532,6 +3541,7 @@ const char *const OB_ALL_VIRTUAL_USER_PROXY_INFO_TNAME = "__all_virtual_user_pro const char *const OB_ALL_VIRTUAL_USER_PROXY_INFO_HISTORY_TNAME = "__all_virtual_user_proxy_info_history"; const char *const OB_ALL_VIRTUAL_USER_PROXY_ROLE_INFO_TNAME = "__all_virtual_user_proxy_role_info"; const char *const OB_ALL_VIRTUAL_USER_PROXY_ROLE_INFO_HISTORY_TNAME = "__all_virtual_user_proxy_role_info_history"; +const char *const OB_ALL_VIRTUAL_SERVICE_TNAME = "__all_virtual_service"; const char *const OB_ALL_VIRTUAL_TENANT_RESOURCE_LIMIT_TNAME = "__all_virtual_tenant_resource_limit"; const char *const OB_ALL_VIRTUAL_TENANT_RESOURCE_LIMIT_DETAIL_TNAME = "__all_virtual_tenant_resource_limit_detail"; const char *const OB_ALL_VIRTUAL_NIC_INFO_TNAME = "__all_virtual_nic_info"; @@ -3807,6 +3817,7 @@ const char *const OB_ALL_VIRTUAL_SESSION_PS_INFO_ORA_TNAME = "ALL_VIRTUAL_SESSIO const char *const OB_ALL_VIRTUAL_TRACEPOINT_INFO_ORA_TNAME = "ALL_VIRTUAL_TRACEPOINT_INFO"; const char *const OB_ALL_VIRTUAL_USER_PROXY_INFO_REAL_AGENT_ORA_TNAME = "ALL_VIRTUAL_USER_PROXY_INFO_REAL_AGENT"; const char *const OB_ALL_VIRTUAL_USER_PROXY_ROLE_INFO_REAL_AGENT_ORA_TNAME = "ALL_VIRTUAL_USER_PROXY_ROLE_INFO_REAL_AGENT"; +const char *const OB_ALL_VIRTUAL_SERVICE_ORA_TNAME = "ALL_VIRTUAL_SERVICE"; const char *const OB_ALL_VIRTUAL_TENANT_RESOURCE_LIMIT_ORA_TNAME = "ALL_VIRTUAL_TENANT_RESOURCE_LIMIT"; const char *const OB_ALL_VIRTUAL_TENANT_RESOURCE_LIMIT_DETAIL_ORA_TNAME = "ALL_VIRTUAL_TENANT_RESOURCE_LIMIT_DETAIL"; const char *const OB_ALL_VIRTUAL_NIC_INFO_ORA_TNAME = "ALL_VIRTUAL_NIC_INFO"; @@ -4231,6 +4242,8 @@ const char *const OB_V_OB_SESSION_PS_INFO_TNAME = "V$OB_SESSION_PS_INFO"; const char *const OB_GV_OB_TRACEPOINT_INFO_TNAME = "GV$OB_TRACEPOINT_INFO"; const char *const OB_V_OB_TRACEPOINT_INFO_TNAME = "V$OB_TRACEPOINT_INFO"; const char *const OB_V_OB_COMPATIBILITY_CONTROL_TNAME = "V$OB_COMPATIBILITY_CONTROL"; +const char *const OB_DBA_OB_SERVICES_TNAME = "DBA_OB_SERVICES"; +const char *const OB_CDB_OB_SERVICES_TNAME = "CDB_OB_SERVICES"; const char *const OB_GV_OB_TENANT_RESOURCE_LIMIT_TNAME = "GV$OB_TENANT_RESOURCE_LIMIT"; const char *const OB_V_OB_TENANT_RESOURCE_LIMIT_TNAME = "V$OB_TENANT_RESOURCE_LIMIT"; const char *const OB_GV_OB_TENANT_RESOURCE_LIMIT_DETAIL_TNAME = "GV$OB_TENANT_RESOURCE_LIMIT_DETAIL"; @@ -4539,6 +4552,7 @@ const char *const OB_USER_MVREF_CHANGE_STATS_ORA_TNAME = "USER_MVREF_CHANGE_STAT const char *const OB_DBA_MVREF_STMT_STATS_ORA_TNAME = "DBA_MVREF_STMT_STATS"; const char *const OB_USER_MVREF_STMT_STATS_ORA_TNAME = "USER_MVREF_STMT_STATS"; const char *const OB_PROXY_USERS_TNAME = "PROXY_USERS"; +const char *const OB_DBA_OB_SERVICES_ORA_TNAME = "DBA_OB_SERVICES"; const char *const OB_GV_OB_SQL_AUDIT_ORA_TNAME = "GV$OB_SQL_AUDIT"; const char *const OB_V_OB_SQL_AUDIT_ORA_TNAME = "V$OB_SQL_AUDIT"; const char *const OB_GV_INSTANCE_TNAME = "GV$INSTANCE"; @@ -5026,6 +5040,7 @@ const char *const OB_ALL_USER_PROXY_INFO_AUX_LOB_META_TNAME = "__all_user_proxy_ const char *const OB_ALL_USER_PROXY_INFO_HISTORY_AUX_LOB_META_TNAME = "__all_user_proxy_info_history_aux_lob_meta"; const char *const OB_ALL_USER_PROXY_ROLE_INFO_AUX_LOB_META_TNAME = "__all_user_proxy_role_info_aux_lob_meta"; const char *const OB_ALL_USER_PROXY_ROLE_INFO_HISTORY_AUX_LOB_META_TNAME = "__all_user_proxy_role_info_history_aux_lob_meta"; +const char *const OB_ALL_SERVICE_AUX_LOB_META_TNAME = "__all_service_aux_lob_meta"; const char *const OB_ALL_MVIEW_DEP_AUX_LOB_META_TNAME = "__all_mview_dep_aux_lob_meta"; const char *const OB_ALL_SCHEDULER_JOB_RUN_DETAIL_V2_AUX_LOB_META_TNAME = "__all_scheduler_job_run_detail_v2_aux_lob_meta"; const char *const OB_ALL_TABLE_AUX_LOB_PIECE_TNAME = "__all_table_aux_lob_piece"; @@ -5326,6 +5341,7 @@ const char *const OB_ALL_USER_PROXY_INFO_AUX_LOB_PIECE_TNAME = "__all_user_proxy const char *const OB_ALL_USER_PROXY_INFO_HISTORY_AUX_LOB_PIECE_TNAME = "__all_user_proxy_info_history_aux_lob_piece"; const char *const OB_ALL_USER_PROXY_ROLE_INFO_AUX_LOB_PIECE_TNAME = "__all_user_proxy_role_info_aux_lob_piece"; const char *const OB_ALL_USER_PROXY_ROLE_INFO_HISTORY_AUX_LOB_PIECE_TNAME = "__all_user_proxy_role_info_history_aux_lob_piece"; +const char *const OB_ALL_SERVICE_AUX_LOB_PIECE_TNAME = "__all_service_aux_lob_piece"; const char *const OB_ALL_MVIEW_DEP_AUX_LOB_PIECE_TNAME = "__all_mview_dep_aux_lob_piece"; const char *const OB_ALL_SCHEDULER_JOB_RUN_DETAIL_V2_AUX_LOB_PIECE_TNAME = "__all_scheduler_job_run_detail_v2_aux_lob_piece"; const char *const OB_ALL_VIRTUAL_PLAN_CACHE_STAT_ALL_VIRTUAL_PLAN_CACHE_STAT_I1_TNAME = "__idx_11003_all_virtual_plan_cache_stat_i1"; diff --git a/src/share/inner_table/ob_inner_table_schema_def.py b/src/share/inner_table/ob_inner_table_schema_def.py index 4337c3efb9..af412a0a3b 100644 --- a/src/share/inner_table/ob_inner_table_schema_def.py +++ b/src/share/inner_table/ob_inner_table_schema_def.py @@ -7278,7 +7278,26 @@ def_table_schema(**gen_history_table_def(515, all_user_proxy_role_info_def)) # 513 : __all_user_proxy_info_history # 514 : __all_user_proxy_role_info # 515 : __all_user_proxy_role_info_history -# 516 : __all_service +def_table_schema( + owner = 'linqiucen.lqc', + table_name = '__all_service', + table_id = '516', + table_type = 'SYSTEM_TABLE', + gm_columns = ['gmt_create', 'gmt_modified'], + rowkey_columns = [ + ('tenant_id', 'int'), + ('service_name_id', 'int'), + ], + in_tenant_space = True, + is_cluster_private = True, + meta_record_in_sys = False, + + normal_columns = [ + ('service_name', 'varchar:OB_SERVICE_NAME_LENGTH'), + ('service_status', 'varchar:64', 'false'), + ], +) + # 517 : __all_storage_io_usage def_table_schema( @@ -14559,7 +14578,11 @@ def_table_schema(**gen_iterate_virtual_table_def( # 12478: __all_virtual_tablet_reorganize_history # 12479: __all_virtual_res_mgr_directive -# 12480: __all_virtual_service +def_table_schema(**gen_iterate_private_virtual_table_def( + table_id = '12480', + table_name = '__all_virtual_service', + in_tenant_space = True, + keywords = all_def_keywords['__all_service'])) def_table_schema( owner = 'yanyuan.cxf', @@ -15131,7 +15154,7 @@ def_table_schema(**no_direct_access(gen_oracle_mapping_virtual_table_def('15445' def_table_schema(**no_direct_access(gen_oracle_mapping_real_virtual_table_def('15446', all_def_keywords['__all_user_proxy_info']))) def_table_schema(**no_direct_access(gen_oracle_mapping_real_virtual_table_def('15447', all_def_keywords['__all_user_proxy_role_info']))) # 15448: idx_user_proxy_info_proxy_user_id_real_agent -# 15449: __all_virtual_service +def_table_schema(**no_direct_access(gen_oracle_mapping_virtual_table_def('15449', all_def_keywords['__all_virtual_service']))) def_table_schema(**no_direct_access(gen_oracle_mapping_virtual_table_def('15450', all_def_keywords['__all_virtual_tenant_resource_limit']))) def_table_schema(**no_direct_access(gen_oracle_mapping_virtual_table_def('15451', all_def_keywords['__all_virtual_tenant_resource_limit_detail']))) @@ -24049,8 +24072,9 @@ SELECT IN_BYTES, OUT_BYTES, USER_CLIENT_PORT, - cast(total_cpu_time as SIGNED) as TOTAL_CPU_TIME, PROXY_USER, + SERVICE_NAME, + cast(total_cpu_time as SIGNED) as TOTAL_CPU_TIME, TOP_INFO FROM oceanbase.__all_virtual_processlist """.replace("\n", " ") @@ -24101,8 +24125,9 @@ def_table_schema( IN_BYTES, OUT_BYTES, USER_CLIENT_PORT, - cast(total_cpu_time as SIGNED) as TOTAL_CPU_TIME, PROXY_USER, + SERVICE_NAME, + cast(total_cpu_time as SIGNED) as TOTAL_CPU_TIME, TOP_INFO FROM oceanbase.GV$OB_PROCESSLIST WHERE SVR_IP = host_ip() AND SVR_PORT = rpc_port() @@ -35008,8 +35033,49 @@ def_table_schema( ) # 21546: DBA_OB_RSRC_DIRECTIVES # 21547: CDB_OB_RSRC_DIRECTIVES -# 21548: DBA_OB_SERVICES -# 21549: CDB_OB_SERVICES + +def_table_schema( + owner = 'linqiucen.lqc', + table_name = 'DBA_OB_SERVICES', + table_id = '21548', + table_type = 'SYSTEM_VIEW', + gm_columns = [], + rowkey_columns = [], + normal_columns = [], + in_tenant_space = True, + view_definition = + """ + SELECT + gmt_create AS CREATE_TIME, + gmt_modified AS MODIFIED_TIME, + SERVICE_NAME_ID, + SERVICE_NAME, + SERVICE_STATUS + FROM oceanbase.__all_virtual_service + WHERE TENANT_ID=EFFECTIVE_TENANT_ID(); + """.replace("\n", " ") +) + +def_table_schema( + owner = 'linqiucen.lqc', + table_name = 'CDB_OB_SERVICES', + table_id = '21549', + table_type = 'SYSTEM_VIEW', + gm_columns = [], + rowkey_columns = [], + normal_columns = [], + view_definition = + """ + SELECT + TENANT_ID, + gmt_create AS `CREATE_TIME`, + gmt_modified AS 'MODIFIED_TIME', + SERVICE_NAME_ID, + SERVICE_NAME, + SERVICE_STATUS + FROM oceanbase.__all_virtual_service + """.replace("\n", " ") +) def_table_schema( owner = 'cxf262476', @@ -56291,7 +56357,6 @@ def_table_schema( ) # 25301: PROXY_USERS -# 25302: DBA_OB_SERVICES # 25303: DBA_OB_STORAGE_IO_USAGE def_table_schema( owner = 'mingye.swj', @@ -56335,8 +56400,29 @@ where U1.TENANT_ID = U2.TENANT_ID and V.CLIENT_USER_ID = P.CLIENT_USER_ID """.replace("\n", " ") ) - - +def_table_schema( + owner = 'linqiucen.lqc', + table_name = 'DBA_OB_SERVICES', + name_postfix = '_ORA', + database_id = 'OB_ORA_SYS_DATABASE_ID', + table_id = '25302', + table_type = 'SYSTEM_VIEW', + gm_columns = [], + rowkey_columns = [], + normal_columns = [], + in_tenant_space = True, + view_definition = + """ + SELECT + gmt_create AS "CREATE_TIME", + gmt_modified AS "MODIFIED_TIME", + SERVICE_NAME_ID, + SERVICE_NAME, + SERVICE_STATUS + FROM SYS.ALL_VIRTUAL_SERVICE + WHERE TENANT_ID=EFFECTIVE_TENANT_ID(); + """.replace("\n", " ") +) # # 余留位置(此行之前占位) # 本区域占位建议:采用真实视图名进行占位 @@ -60996,6 +61082,7 @@ SELECT OUT_BYTES, USER_CLIENT_PORT, PROXY_USER, + SERVICE_NAME, CAST(total_cpu_time AS INT) as TOTAL_CPU_TIME, TOP_INFO FROM SYS.ALL_VIRTUAL_PROCESSLIST @@ -61050,6 +61137,7 @@ def_table_schema( OUT_BYTES, USER_CLIENT_PORT, PROXY_USER, + SERVICE_NAME, CAST(total_cpu_time AS INT) as TOTAL_CPU_TIME, TOP_INFO FROM SYS.GV$OB_PROCESSLIST diff --git a/src/share/inner_table/ob_inner_table_schema_misc.ipp b/src/share/inner_table/ob_inner_table_schema_misc.ipp index 8145ce45e9..5e0fa3e0bc 100644 --- a/src/share/inner_table/ob_inner_table_schema_misc.ipp +++ b/src/share/inner_table/ob_inner_table_schema_misc.ipp @@ -486,6 +486,7 @@ case OB_ALL_VIRTUAL_RECOVER_TABLE_JOB_HISTORY_TID: case OB_ALL_VIRTUAL_RESTORE_JOB_TID: case OB_ALL_VIRTUAL_RESTORE_JOB_HISTORY_TID: case OB_ALL_VIRTUAL_RESTORE_PROGRESS_TID: +case OB_ALL_VIRTUAL_SERVICE_TID: case OB_ALL_VIRTUAL_TABLE_OPT_STAT_GATHER_HISTORY_TID: case OB_ALL_VIRTUAL_TABLET_META_TABLE_TID: case OB_ALL_VIRTUAL_TABLET_REPLICA_CHECKSUM_TID: @@ -1460,6 +1461,24 @@ case OB_ALL_VIRTUAL_ZONE_MERGE_INFO_TID: break; } + case OB_ALL_VIRTUAL_SERVICE_TID: { + ObIteratePrivateVirtualTable *iter = NULL; + const bool meta_record_in_sys = false; + if (OB_FAIL(NEW_VIRTUAL_TABLE(ObIteratePrivateVirtualTable, iter))) { + SERVER_LOG(WARN, "create iterate private virtual table iterator failed", KR(ret)); + } else if (OB_FAIL(iter->init(OB_ALL_SERVICE_TID, meta_record_in_sys, index_schema, params))) { + SERVER_LOG(WARN, "iterate private virtual table iter init failed", KR(ret)); + iter->~ObIteratePrivateVirtualTable(); + allocator.free(iter); + iter = NULL; + } else { + vt_iter = iter; + } + break; + } + END_CREATE_VT_ITER_SWITCH_LAMBDA + + BEGIN_CREATE_VT_ITER_SWITCH_LAMBDA case OB_ALL_VIRTUAL_TABLE_OPT_STAT_GATHER_HISTORY_TID: { ObIteratePrivateVirtualTable *iter = NULL; const bool meta_record_in_sys = false; @@ -1475,9 +1494,7 @@ case OB_ALL_VIRTUAL_ZONE_MERGE_INFO_TID: } break; } - END_CREATE_VT_ITER_SWITCH_LAMBDA - BEGIN_CREATE_VT_ITER_SWITCH_LAMBDA case OB_ALL_VIRTUAL_TABLET_META_TABLE_TID: { ObIteratePrivateVirtualTable *iter = NULL; const bool meta_record_in_sys = false; @@ -4849,6 +4866,9 @@ case OB_ALL_RESTORE_JOB_HISTORY_AUX_LOB_PIECE_TID: case OB_ALL_RESTORE_PROGRESS_TID: case OB_ALL_RESTORE_PROGRESS_AUX_LOB_META_TID: case OB_ALL_RESTORE_PROGRESS_AUX_LOB_PIECE_TID: +case OB_ALL_SERVICE_TID: +case OB_ALL_SERVICE_AUX_LOB_META_TID: +case OB_ALL_SERVICE_AUX_LOB_PIECE_TID: case OB_ALL_SERVICE_EPOCH_TID: case OB_ALL_SERVICE_EPOCH_AUX_LOB_META_TID: case OB_ALL_SERVICE_EPOCH_AUX_LOB_PIECE_TID: diff --git a/src/share/inner_table/table_id_to_name b/src/share/inner_table/table_id_to_name index ebfc2fc851..a84b7a15b0 100644 --- a/src/share/inner_table/table_id_to_name +++ b/src/share/inner_table/table_id_to_name @@ -355,6 +355,7 @@ # 514: __all_user_proxy_role_info # 515: __all_user_proxy_role_info_history # 515: __all_user_proxy_role_info # BASE_TABLE_NAME +# 516: __all_service # 518: __all_mview_dep # 519: __all_scheduler_job_run_detail_v2 # 10001: __tenant_virtual_all_table @@ -1121,6 +1122,8 @@ # 12477: __all_virtual_user_proxy_role_info_history # 12477: __all_user_proxy_role_info # BASE_TABLE_NAME # 12477: __all_user_proxy_role_info_history # BASE_TABLE_NAME1 +# 12480: __all_virtual_service +# 12480: __all_service # BASE_TABLE_NAME # 12481: __all_virtual_tenant_resource_limit # 12482: __all_virtual_tenant_resource_limit_detail # 12487: __all_virtual_nic_info @@ -1726,6 +1729,9 @@ # 15446: __all_user_proxy_info # BASE_TABLE_NAME # 15447: ALL_VIRTUAL_USER_PROXY_ROLE_INFO_REAL_AGENT # 15447: __all_user_proxy_role_info # BASE_TABLE_NAME +# 15449: ALL_VIRTUAL_SERVICE +# 15449: __all_service # BASE_TABLE_NAME +# 15449: __all_virtual_service # BASE_TABLE_NAME1 # 15450: ALL_VIRTUAL_TENANT_RESOURCE_LIMIT # 15450: __all_virtual_tenant_resource_limit # BASE_TABLE_NAME # 15451: ALL_VIRTUAL_TENANT_RESOURCE_LIMIT_DETAIL @@ -2155,6 +2161,8 @@ # 21543: GV$OB_TRACEPOINT_INFO # 21544: V$OB_TRACEPOINT_INFO # 21545: V$OB_COMPATIBILITY_CONTROL +# 21548: DBA_OB_SERVICES +# 21549: CDB_OB_SERVICES # 21550: GV$OB_TENANT_RESOURCE_LIMIT # 21551: V$OB_TENANT_RESOURCE_LIMIT # 21552: GV$OB_TENANT_RESOURCE_LIMIT_DETAIL @@ -2463,6 +2471,7 @@ # 25299: DBA_MVREF_STMT_STATS # 25300: USER_MVREF_STMT_STATS # 25301: PROXY_USERS +# 25302: DBA_OB_SERVICES # 28002: GV$OB_SQL_AUDIT # 28003: V$OB_SQL_AUDIT # 28004: GV$INSTANCE diff --git a/src/share/ob_autoincrement_service.cpp b/src/share/ob_autoincrement_service.cpp index a7e45969d3..dfd32c12ff 100644 --- a/src/share/ob_autoincrement_service.cpp +++ b/src/share/ob_autoincrement_service.cpp @@ -2017,9 +2017,6 @@ int ObAutoIncInnerTableProxy::next_autoinc_value(const AutoincKey &key, if (sql_len >= OB_MAX_SQL_LENGTH || sql_len <= 0) { ret = OB_SIZE_OVERFLOW; LOG_WARN("failed to format sql. size not enough", K(ret), K(sql_len)); - } else if (GCTX.is_standby_cluster() && OB_SYS_TENANT_ID != exec_tenant_id) { - ret = OB_OP_NOT_ALLOW; - LOG_WARN("can't write sys table now", K(ret), K(exec_tenant_id)); } else if (OB_FAIL(trans.write(exec_tenant_id, sql, affected_rows))) { LOG_WARN("failed to write data", K(ret)); } else if (affected_rows != 1) { @@ -2315,9 +2312,6 @@ int ObAutoIncInnerTableProxy::sync_autoinc_value(const AutoincKey &key, table_name, sync_value, new_seq_value, OB_INVALID_TENANT_ID, table_id, column_id, inner_autoinc_version))) { LOG_WARN("failed to assign sql", K(ret)); - } else if (GCTX.is_standby_cluster() && OB_SYS_TENANT_ID != exec_tenant_id) { - ret = OB_OP_NOT_ALLOW; - LOG_WARN("can't write sys table now", K(ret), K(exec_tenant_id)); } else if (OB_FAIL((trans.write(exec_tenant_id, sql.ptr(), affected_rows)))) { LOG_WARN("failed to execute", K(sql), K(ret)); } else if (!is_single_row(affected_rows)) { @@ -2458,9 +2452,6 @@ int ObAutoIncInnerTableProxy::read_and_push_inner_table(const AutoincKey &key, table_name, new_seq_value, OB_INVALID_TENANT_ID, table_id, column_id, inner_autoinc_version))) { LOG_WARN("failed to assign sql", K(ret)); - } else if (GCTX.is_standby_cluster() && OB_SYS_TENANT_ID != exec_tenant_id) { - ret = OB_OP_NOT_ALLOW; - LOG_WARN("can't write sys table now", K(ret), K(exec_tenant_id)); } else if (OB_FAIL((trans.write(exec_tenant_id, sql.ptr(), affected_rows)))) { LOG_WARN("failed to execute", K(sql), K(ret)); } else if (!is_single_row(affected_rows)) { diff --git a/src/share/ob_errno.cpp b/src/share/ob_errno.cpp index be507cc942..5445e63936 100644 --- a/src/share/ob_errno.cpp +++ b/src/share/ob_errno.cpp @@ -26997,13 +26997,13 @@ static const _error _error_OB_SOURCE_LS_STATE_NOT_MATCH = { .error_solution = "Contact OceanBase Support", .mysql_errno = -1, .sqlstate = "HY000", - .str_error = "log restore source ls state not match, switchover to primary not allowed", - .str_user_error = "log restore source ls state not match, switchover to primary not allowed", + .str_error = "log restore source LS state not match, switchover to primary is not allowed", + .str_user_error = "log restore source LS state not match, switchover to primary is not allowed", .oracle_errno = 600, - .oracle_str_error = "ORA-00600: internal error code, arguments: -9089, log restore source ls state not match, switchover to primary not allowed", - .oracle_str_user_error = "ORA-00600: internal error code, arguments: -9089, log restore source ls state not match, switchover to primary not allowed", - .ob_str_error = "OBE-00600: internal error code, arguments: -9089, log restore source ls state not match, switchover to primary not allowed", - .ob_str_user_error = "OBE-00600: internal error code, arguments: -9089, log restore source ls state not match, switchover to primary not allowed" + .oracle_str_error = "ORA-00600: internal error code, arguments: -9089, log restore source LS state not match, switchover to primary is not allowed", + .oracle_str_user_error = "ORA-00600: internal error code, arguments: -9089, log restore source LS state not match, switchover to primary is not allowed", + .ob_str_error = "OBE-00600: internal error code, arguments: -9089, log restore source LS state not match, switchover to primary is not allowed", + .ob_str_user_error = "OBE-00600: internal error code, arguments: -9089, log restore source LS state not match, switchover to primary is not allowed" }; static const _error _error_OB_ESI_SESSION_NOT_EXIST = { .error_name = "OB_ESI_SESSION_NOT_EXIST", diff --git a/src/share/ob_errno.def b/src/share/ob_errno.def index 9bd33f342e..2bbc83c3b4 100755 --- a/src/share/ob_errno.def +++ b/src/share/ob_errno.def @@ -2281,7 +2281,7 @@ DEFINE_ERROR(OB_BACKUP_MAJOR_NOT_COVER_MINOR, -9085, -1, "HY000", "backup major DEFINE_ERROR(OB_BACKUP_ADVANCE_CHECKPOINT_TIMEOUT, -9086, -1, "HY000", "backup advance checkpoint by flush timeout"); DEFINE_ERROR(OB_CLOG_RECYCLE_BEFORE_ARCHIVE, -9087, -1, "HY000", "observer clog is recycled before archive"); DEFINE_ERROR(OB_SOURCE_TENANT_STATE_NOT_MATCH, -9088, -1, "HY000", "log restore source tenant state not match, switchover to primary not allowed"); -DEFINE_ERROR(OB_SOURCE_LS_STATE_NOT_MATCH, -9089, -1, "HY000", "log restore source ls state not match, switchover to primary not allowed"); +DEFINE_ERROR(OB_SOURCE_LS_STATE_NOT_MATCH, -9089, -1, "HY000", "log restore source LS state not match, switchover to primary is not allowed"); DEFINE_ERROR(OB_ESI_SESSION_NOT_EXIST, -9090, -1, "HY000", "obesi process session not exist"); diff --git a/src/share/ob_errno.h b/src/share/ob_errno.h index 81c5c994a3..2bb0096566 100755 --- a/src/share/ob_errno.h +++ b/src/share/ob_errno.h @@ -3848,7 +3848,7 @@ constexpr int OB_ERR_INVALID_DATE_MSG_FMT_V2 = -4219; #define OB_BACKUP_ADVANCE_CHECKPOINT_TIMEOUT__USER_ERROR_MSG "backup advance checkpoint by flush timeout" #define OB_CLOG_RECYCLE_BEFORE_ARCHIVE__USER_ERROR_MSG "observer clog is recycled before archive" #define OB_SOURCE_TENANT_STATE_NOT_MATCH__USER_ERROR_MSG "log restore source tenant state not match, switchover to primary not allowed" -#define OB_SOURCE_LS_STATE_NOT_MATCH__USER_ERROR_MSG "log restore source ls state not match, switchover to primary not allowed" +#define OB_SOURCE_LS_STATE_NOT_MATCH__USER_ERROR_MSG "log restore source LS state not match, switchover to primary is not allowed" #define OB_ESI_SESSION_NOT_EXIST__USER_ERROR_MSG "obesi process session not exist" #define OB_ALREADY_IN_ARCHIVE_MODE__USER_ERROR_MSG "Already in ARCHIVELOG mode" #define OB_ALREADY_IN_NOARCHIVE_MODE__USER_ERROR_MSG "Already in NOARCHIVELOG mode" @@ -8102,8 +8102,8 @@ constexpr int OB_ERR_INVALID_DATE_MSG_FMT_V2 = -4219; #define OB_CLOG_RECYCLE_BEFORE_ARCHIVE__OBE_USER_ERROR_MSG "OBE-00600: internal error code, arguments: -9087, observer clog is recycled before archive" #define OB_SOURCE_TENANT_STATE_NOT_MATCH__ORA_USER_ERROR_MSG "ORA-00600: internal error code, arguments: -9088, log restore source tenant state not match, switchover to primary not allowed" #define OB_SOURCE_TENANT_STATE_NOT_MATCH__OBE_USER_ERROR_MSG "OBE-00600: internal error code, arguments: -9088, log restore source tenant state not match, switchover to primary not allowed" -#define OB_SOURCE_LS_STATE_NOT_MATCH__ORA_USER_ERROR_MSG "ORA-00600: internal error code, arguments: -9089, log restore source ls state not match, switchover to primary not allowed" -#define OB_SOURCE_LS_STATE_NOT_MATCH__OBE_USER_ERROR_MSG "OBE-00600: internal error code, arguments: -9089, log restore source ls state not match, switchover to primary not allowed" +#define OB_SOURCE_LS_STATE_NOT_MATCH__ORA_USER_ERROR_MSG "ORA-00600: internal error code, arguments: -9089, log restore source LS state not match, switchover to primary is not allowed" +#define OB_SOURCE_LS_STATE_NOT_MATCH__OBE_USER_ERROR_MSG "OBE-00600: internal error code, arguments: -9089, log restore source LS state not match, switchover to primary is not allowed" #define OB_ESI_SESSION_NOT_EXIST__ORA_USER_ERROR_MSG "ORA-00600: internal error code, arguments: -9090, obesi process session not exist" #define OB_ESI_SESSION_NOT_EXIST__OBE_USER_ERROR_MSG "OBE-00600: internal error code, arguments: -9090, obesi process session not exist" #define OB_ALREADY_IN_ARCHIVE_MODE__ORA_USER_ERROR_MSG "ORA-00600: internal error code, arguments: -9091, Already in ARCHIVELOG mode" diff --git a/src/share/ob_event_history_table_operator.cpp b/src/share/ob_event_history_table_operator.cpp index 179269af8b..b2e3f20e79 100644 --- a/src/share/ob_event_history_table_operator.cpp +++ b/src/share/ob_event_history_table_operator.cpp @@ -81,22 +81,24 @@ ObAsyncTask *ObEventTableClearTask::deep_copy(char *buf, const int64_t buf_size) //////////////////////////////////////////////////////////////// ObEventHistoryTableOperator::ObEventTableUpdateTask::ObEventTableUpdateTask( - ObEventHistoryTableOperator &table_operator, const bool is_delete, const int64_t create_time) + ObEventHistoryTableOperator &table_operator, const bool is_delete, + const int64_t create_time, const uint64_t exec_tenant_id) : IObDedupTask(T_RS_ET_UPDATE), table_operator_(table_operator), is_delete_(is_delete), - create_time_(create_time) + create_time_(create_time), exec_tenant_id_(exec_tenant_id) { } int ObEventHistoryTableOperator::ObEventTableUpdateTask::init(const char *ptr, - const int64_t buf_size) + const int64_t buf_size, const uint64_t exec_tenant_id) { int ret = OB_SUCCESS; - if (OB_ISNULL(ptr) || buf_size <= 0) { - LOG_WARN("invalid argument", KP(ptr), K(buf_size)); + if (OB_ISNULL(ptr) || OB_UNLIKELY(buf_size <= 0 || !is_valid_tenant_id(exec_tenant_id))) { + LOG_WARN("invalid argument", KP(ptr), K(buf_size), K(exec_tenant_id)); ret = OB_INVALID_ARGUMENT; } else { sql_.assign_ptr(ptr, static_cast(buf_size)); + exec_tenant_id_ = exec_tenant_id; } return ret; @@ -105,7 +107,7 @@ int ObEventHistoryTableOperator::ObEventTableUpdateTask::init(const char *ptr, bool ObEventHistoryTableOperator::ObEventTableUpdateTask::is_valid() const { - return table_operator_.is_inited() && !sql_.empty(); + return table_operator_.is_inited() && !sql_.empty() && is_valid_tenant_id(exec_tenant_id_); } int64_t ObEventHistoryTableOperator::ObEventTableUpdateTask::hash() const @@ -135,8 +137,9 @@ bool ObEventHistoryTableOperator::ObEventTableUpdateTask::operator==( is_equal = true; } else { is_equal = (&(this->table_operator_) == &(o.table_operator_)) - && this->sql_ == o.sql_ && this->is_delete_ == o.is_delete_; - //no need take care of create_time + && this->sql_ == o.sql_ && this->is_delete_ == o.is_delete_ + && this->exec_tenant_id_ == o.exec_tenant_id_; + //no need take care of create_time } } return is_equal; @@ -152,7 +155,7 @@ IObDedupTask *ObEventHistoryTableOperator::ObEventTableUpdateTask::deep_copy( LOG_WARN_RET(OB_INVALID_ARGUMENT, "invalid argument", "buf", reinterpret_cast(buf), K(buf_size), "need size", get_deep_copy_size()); } else { - task = new (buf) ObEventTableUpdateTask(table_operator_, is_delete_, create_time_); + task = new (buf) ObEventTableUpdateTask(table_operator_, is_delete_, create_time_, exec_tenant_id_); char *ptr = buf + sizeof(ObEventTableUpdateTask); MEMCPY(ptr, sql_.ptr(), sql_.length()); task->assign_ptr(ptr, sql_.length()); @@ -166,8 +169,8 @@ int ObEventHistoryTableOperator::ObEventTableUpdateTask::process() if (!this->is_valid()) { ret = OB_INNER_STAT_ERROR; LOG_WARN("invalid event task update task", "task", *this, K(ret)); - } else if (OB_FAIL(table_operator_.process_task(sql_, is_delete_, create_time_))) { - LOG_WARN("process_task failed", K_(sql), K_(is_delete), KR(ret), K(create_time_)); + } else if (OB_FAIL(table_operator_.process_task(sql_, is_delete_, create_time_, exec_tenant_id_))) { + LOG_WARN("process_task failed", KR(ret), K_(sql), K_(is_delete), K(create_time_), K_(exec_tenant_id)); } return ret; } @@ -269,25 +272,27 @@ int ObEventHistoryTableOperator::default_async_delete() return ret; } -int ObEventHistoryTableOperator::add_task(const ObSqlString &sql, const bool is_delete, const int64_t create_time) +int ObEventHistoryTableOperator::add_task(const ObSqlString &sql, const bool is_delete, + const int64_t create_time, const uint64_t exec_tenant_id) { int ret = OB_SUCCESS; if (!inited_) { ret = OB_NOT_INIT; - LOG_WARN("not init", K(ret)); - } else if (sql.empty()) { + LOG_WARN("not iget_deep_copy_sizenit", K(ret)); + } else if (OB_UNLIKELY(sql.empty() || !is_valid_tenant_id(exec_tenant_id))) { ret = OB_INVALID_ARGUMENT; - LOG_WARN("sql is empty", K(sql), K(ret)); + LOG_WARN("sql is empty", K(sql), K(exec_tenant_id), K(ret)); } else if (stopped_) { ret = OB_CANCELED; LOG_WARN("observer is stopped, cancel task", K(sql), K(is_delete), K(ret)); } else { int64_t new_create_time = OB_INVALID_TIMESTAMP == create_time ? ObTimeUtility::current_time() : create_time; - ObEventTableUpdateTask task(*this, is_delete, new_create_time); - if (OB_FAIL(task.init(sql.ptr(), sql.length() + 1))) { // extra byte for '\0' + ObEventTableUpdateTask task(*this, is_delete, new_create_time, exec_tenant_id); + if (OB_FAIL(task.init(sql.ptr(), sql.length() + 1, exec_tenant_id))) { // extra byte for '\0' LOG_WARN("task init error", K(ret)); - } else if (OB_FAIL(event_queue_.add_task(task))) { + } + if (FAILEDx(event_queue_.add_task(task))) { if (OB_EAGAIN == ret) { ret = OB_ERR_UNEXPECTED; LOG_WARN("duplicated task is not expected to exist", K(task), K(ret)); @@ -298,11 +303,11 @@ int ObEventHistoryTableOperator::add_task(const ObSqlString &sql, const bool is_ // do nothing } } - return ret; } -int ObEventHistoryTableOperator::process_task(const ObString &sql, const bool is_delete, const int64_t create_time) +int ObEventHistoryTableOperator::process_task(const ObString &sql, const bool is_delete, + const int64_t create_time, const uint64_t exec_tenant_id) { int ret = OB_SUCCESS; @@ -310,9 +315,9 @@ int ObEventHistoryTableOperator::process_task(const ObString &sql, const bool is if (!inited_) { ret = OB_NOT_INIT; LOG_WARN("not init", K(ret)); - } else if (sql.empty()) { + } else if (OB_UNLIKELY(sql.empty() || !is_valid_tenant_id(exec_tenant_id))) { ret = OB_INVALID_ARGUMENT; - LOG_WARN("sql is empty", K(sql), K(ret)); + LOG_WARN("sql is empty", K(sql), K(exec_tenant_id), K(ret)); } else { if (stopped_) { ret = OB_CANCELED; @@ -320,7 +325,7 @@ int ObEventHistoryTableOperator::process_task(const ObString &sql, const bool is } else { int64_t affected_rows = 0; if (!is_delete) { - if (OB_FAIL(proxy_->write(sql.ptr(), affected_rows))) { + if (OB_FAIL(proxy_->write(exec_tenant_id, sql.ptr(), affected_rows))) { LOG_WARN("execute sql failed", K(sql), K(ret)); } else if (!is_single_row(affected_rows)) { ret = OB_ERR_UNEXPECTED; diff --git a/src/share/ob_event_history_table_operator.h b/src/share/ob_event_history_table_operator.h index 255c118954..5715bda868 100644 --- a/src/share/ob_event_history_table_operator.h +++ b/src/share/ob_event_history_table_operator.h @@ -71,26 +71,27 @@ public: { public: ObEventTableUpdateTask(ObEventHistoryTableOperator &table_operator, const bool is_delete, - const int64_t create_time); + const int64_t create_time, const uint64_t exec_tenant_id); virtual ~ObEventTableUpdateTask() {} - int init(const char *ptr, const int64_t buf_size); + int init(const char *ptr, const int64_t buf_size, const uint64_t exec_tenant_id = OB_SYS_TENANT_ID); bool is_valid() const; virtual int64_t hash() const; virtual bool operator==(const common::IObDedupTask &other) const; virtual int64_t get_deep_copy_size() const { return sizeof(*this) + sql_.length(); } virtual common::IObDedupTask *deep_copy(char *buf, const int64_t buf_size) const; virtual int64_t get_abs_expired_time() const { return 0; } + virtual uint64_t get_exec_tenant_id() const { return exec_tenant_id_; } virtual int process(); public: void assign_ptr(char *ptr, const int64_t buf_size) { sql_.assign_ptr(ptr, static_cast(buf_size));} - - TO_STRING_KV(K_(sql), K_(is_delete), K_(create_time)); + TO_STRING_KV(K_(sql), K_(is_delete), K_(create_time), K_(exec_tenant_id)); private: ObEventHistoryTableOperator &table_operator_; common::ObString sql_; bool is_delete_; int64_t create_time_; + uint64_t exec_tenant_id_; DISALLOW_COPY_AND_ASSIGN(ObEventTableUpdateTask); }; @@ -114,6 +115,10 @@ public: // number of others should not less than 0, or more than 13 // if number of others is not 13, should be even, every odd of them are name, every even of them are value template + int async_add_tenant_event(const uint64_t tenant_id, const char *module, const char *event, + const int64_t event_timestamp, const int user_ret, const int64_t cost_sec, Rest &&...others); + // number of others should not less than 0, or more than 13 + template int add_event_with_retry(const char *module, const char *event, Rest &&...others); virtual int async_delete() = 0; @@ -141,7 +146,7 @@ protected: void set_event_table(const char* tname) { event_table_name_ = tname; } const char *get_event_table() const { return event_table_name_; } int add_task(const common::ObSqlString &sql, const bool is_delete = false, - const int64_t create_time = OB_INVALID_TIMESTAMP); + const int64_t create_time = OB_INVALID_TIMESTAMP, const uint64_t exec_tenant_id = OB_SYS_TENANT_ID); int gen_event_ts(int64_t &event_ts); protected: static constexpr const char * names[7] = {"name1", "name2", "name3", "name4", "name5", "name6", "extra_info"}; // only valid in compile time @@ -153,7 +158,7 @@ protected: static const int64_t TASK_QUEUE_SIZE = 20 *1024; static const int64_t MAX_RETRY_COUNT = 12; - virtual int process_task(const common::ObString &sql, const bool is_delete, const int64_t create_time); + virtual int process_task(const common::ObString &sql, const bool is_delete, const int64_t create_time, const uint64_t exec_tenant_id); private: bool inited_; volatile bool stopped_; @@ -320,6 +325,80 @@ int ObEventHistoryTableOperator::sync_add_event(const char *module, const char * return ret; } +template +int ObEventHistoryTableOperator::async_add_tenant_event( + const uint64_t tenant_id, const char *module, const char *event, const int64_t event_timestamp, + const int user_ret, const int64_t cost_sec, Rest &&...others) +{ + static_assert(sizeof...(others) >= 0 && sizeof...(others) <= 13 && + (sizeof...(others) == 13 || (sizeof...(others) % 2 == 0)), + "max support 6 pair of name-value args and 1 extra info, if number of others is not 13, should be even"); + int ret = common::OB_SUCCESS; + int64_t affected_rows = 0; + common::ObSqlString sql; + share::ObDMLSqlSplicer dml; + char ip_buf[common::MAX_IP_ADDR_LENGTH]; + uint64_t compat_version = 0; + if (OB_FAIL(GET_MIN_DATA_VERSION(tenant_id, compat_version))) { + SHARE_LOG(WARN, "fail to get data version", KR(ret), K(tenant_id)); + } else if (OB_UNLIKELY(!(compat_version >= DATA_VERSION_4_3_3_0 + || (compat_version >= DATA_VERSION_4_2_2_0 && compat_version < DATA_VERSION_4_3_0_0) + || (compat_version >= MOCK_DATA_VERSION_4_2_1_8 && compat_version < DATA_VERSION_4_2_2_0)))) { + ret = common::OB_NOT_SUPPORTED; + SHARE_LOG(WARN, "only (version >= 4_2_1_8 and version < 4_2_2_0) " + "or version >= 4_2_2_0 and version < 4_3_0_0 " + "or version >= 4_3_3_0 support this operation", KR(ret), K(compat_version)); + } else if (OB_UNLIKELY(!inited_)) { + ret = common::OB_NOT_INIT; + SHARE_LOG(WARN, "not init", KR(ret)); + } else if (OB_ISNULL(module) || OB_ISNULL(event)) { + ret = common::OB_INVALID_ARGUMENT; + SHARE_LOG(WARN, "neither module or event can be NULL", KR(ret), KP(module), KP(event)); + } else if (OB_UNLIKELY(OB_INVALID_TENANT_ID == tenant_id)) { + ret = OB_INVALID_ARGUMENT; + SHARE_LOG(WARN, "tenant_id is invalid", KR(ret), K(tenant_id)); + } else if (OB_FAIL(dml.add_gmt_create(event_timestamp)) + || OB_FAIL(dml.add_column("module", module)) + || OB_FAIL(dml.add_column("event", event) + || OB_FAIL(dml.add_column("tenant_id", tenant_id)) + || OB_FAIL(dml.add_column("ret_code", user_ret)) + || OB_FAIL(dml.add_column("cost_time", cost_sec)))) { + SHARE_LOG(WARN, "add column failed", KR(ret), K(event_timestamp), "module", module, "event", event, K(tenant_id), KR(user_ret), K(cost_sec)); + } else if (OB_FAIL((add_event_helper_<0, false>(dml, std::forward(others)...)))) {// recursive call + } else if (common::OB_SUCCESS == ret && self_addr_.is_valid()) { + (void)self_addr_.ip_to_string(ip_buf, common::MAX_IP_ADDR_LENGTH); + if (OB_FAIL(dml.add_column("svr_ip", ip_buf)) + || OB_FAIL(dml.add_column("svr_port", self_addr_.get_port()))) { + SHARE_LOG(WARN, "add column failed", KR(ret), K(ip_buf), K(self_addr_.get_port())); + } + } + + if (OB_SUCC(ret)) { + const int64_t MAX_TRACE_ID_LENGTH = 64; + char trace_id_buf[MAX_TRACE_ID_LENGTH] = {0}; + ::oceanbase::common::ObCurTraceId::TraceId *trace_id = ObCurTraceId::get_trace_id(); + if (OB_NOT_NULL(trace_id)) { + if (FALSE_IT(trace_id->to_string(trace_id_buf, sizeof(trace_id_buf)))) { + } else if (OB_FAIL(dml.add_column("trace_id", trace_id_buf))) { + SHARE_LOG(WARN, "add trace_id column failed", KR(ret), K(trace_id_buf), KPC(trace_id)); + } + } + } + + if (OB_SUCC(ret)) { + uint64_t exec_tenant_id = gen_meta_tenant_id(tenant_id); + if (OB_FAIL(dml.splice_insert_sql(event_table_name_, sql))) { + SHARE_LOG(WARN, "splice_insert_sql failed", KR(ret), K(sql)); + } else if (OB_FAIL(add_task(sql, false, OB_INVALID_TIMESTAMP, exec_tenant_id))) { + SHARE_LOG(WARN, "add_task failed", K(sql), K(exec_tenant_id), KR(ret)); + } else { + ObTaskController::get().allow_next_syslog(); + SHARE_LOG(INFO, "event table async add event success", KR(ret), K_(event_table_name), K(sql), K(tenant_id), K(exec_tenant_id)); + } + } + return ret; +} + template int ObEventHistoryTableOperator::add_event_with_retry(const char *module, const char *event, Rest &&...others) { diff --git a/src/share/ob_log_restore_proxy.cpp b/src/share/ob_log_restore_proxy.cpp index 2788044649..61b99f3f65 100644 --- a/src/share/ob_log_restore_proxy.cpp +++ b/src/share/ob_log_restore_proxy.cpp @@ -31,6 +31,7 @@ #include "share/config/ob_server_config.h" #include "logservice/palf/palf_options.h" #include "share/oracle_errno.h" +#include "share/backup/ob_log_restore_struct.h" #include namespace oceanbase @@ -265,6 +266,32 @@ int ObLogRestoreProxyUtil::init(const uint64_t tenant_id, } return ret; } +int ObLogRestoreProxyUtil::init_with_service_attr( + const uint64_t tenant_id, + const ObRestoreSourceServiceAttr *service_attr) +{ + int ret = OB_SUCCESS; + if (OB_ISNULL(service_attr)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("service_attr is null", KR(ret), KP(service_attr)); + } else if (OB_UNLIKELY(!service_attr->is_valid() || !is_valid_tenant_id(tenant_id))) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("service attr or tenant id is invalid", KR(ret), KPC(service_attr), K(tenant_id)); + } else { + const char *db_name = service_attr->user_.mode_ == common::ObCompatibilityMode::MYSQL_MODE ? OB_SYS_DATABASE_NAME : OB_ORA_SYS_SCHEMA_NAME; + ObSqlString user; + char passwd[OB_MAX_PASSWORD_LENGTH + 1] = {0}; + if (OB_FAIL(service_attr->get_password(passwd, sizeof(passwd)))) { + LOG_WARN("fail to get password", KR(ret), K(service_attr)); + } else if (OB_FAIL(service_attr->get_user_str_(user))) { + LOG_WARN("fail to get user str", KR(ret), K(service_attr)); + } else if (OB_FAIL(init(tenant_id, service_attr->addr_, + user.ptr(), passwd, db_name))) { + LOG_WARN("fail to init proxy_util", KR(ret), KPC(service_attr)); + } + } + return ret; +} int ObLogRestoreProxyUtil::get_sql_proxy(common::ObMySQLProxy *&proxy) { @@ -640,11 +667,12 @@ bool ObLogRestoreProxyUtil::is_user_changed_(const char *user_name, const char * return changed; } -int ObLogRestoreProxyUtil::get_tenant_info(ObTenantRole &role, schema::ObTenantStatus &status) +int ObLogRestoreProxyUtil::get_tenant_info(ObTenantRole &role, schema::ObTenantStatus &status, ObTenantSwitchoverStatus &switchover_status) { int ret = OB_SUCCESS; const char *TENANT_ROLE = "TENANT_ROLE"; const char *TENANT_STATUS = "STATUS"; + const char *SWITCHOVER_STATUS = "SWITCHOVER_STATUS"; common::ObMySQLProxy *proxy = &sql_proxy_; if (OB_UNLIKELY(!inited_)) { ret = OB_NOT_INIT; @@ -653,8 +681,8 @@ int ObLogRestoreProxyUtil::get_tenant_info(ObTenantRole &role, schema::ObTenantS SMART_VAR(common::ObMySQLProxy::MySQLResult, res) { common::sqlclient::ObMySQLResult *result = NULL; common::ObSqlString sql; - const char *GET_TENANT_INFO_SQL = "SELECT %s, %s FROM %s"; - if (OB_FAIL(sql.append_fmt(GET_TENANT_INFO_SQL, TENANT_ROLE, TENANT_STATUS, OB_DBA_OB_TENANTS_TNAME))) { + const char *GET_TENANT_INFO_SQL = "SELECT %s, %s, %s FROM %s"; + if (OB_FAIL(sql.append_fmt(GET_TENANT_INFO_SQL, TENANT_ROLE, TENANT_STATUS, SWITCHOVER_STATUS, OB_DBA_OB_TENANTS_TNAME))) { LOG_WARN("append_fmt failed"); } else if (OB_FAIL(proxy->read(res, sql.ptr()))) { LOG_WARN("excute sql failed", K(sql)); @@ -664,13 +692,20 @@ int ObLogRestoreProxyUtil::get_tenant_info(ObTenantRole &role, schema::ObTenantS } else if (OB_FAIL(result->next())) { LOG_WARN("next failed"); } else { + ObString switchover_status_str; ObString status_str; ObString role_str; EXTRACT_VARCHAR_FIELD_MYSQL(*result, TENANT_ROLE, role_str); EXTRACT_VARCHAR_FIELD_MYSQL(*result, TENANT_STATUS, status_str); + EXTRACT_VARCHAR_FIELD_MYSQL(*result, SWITCHOVER_STATUS, switchover_status_str); + ObTenantSwitchoverStatus so_status(switchover_status_str); + switchover_status = so_status; if (OB_SUCC(ret)) { if (OB_FAIL(schema::get_tenant_status(status_str, status))) { LOG_WARN("get tenant status failed"); + } else if (OB_UNLIKELY(!switchover_status.is_valid())) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("invalid switchover status", KR(ret), K(switchover_status_str), K(so_status), K(switchover_status)); } else { role = ObTenantRole(role_str.ptr()); } diff --git a/src/share/ob_log_restore_proxy.h b/src/share/ob_log_restore_proxy.h index 702c8418be..831c0e38ec 100644 --- a/src/share/ob_log_restore_proxy.h +++ b/src/share/ob_log_restore_proxy.h @@ -26,6 +26,7 @@ #include "share/ob_tenant_role.h" #include "share/ob_root_addr_agent.h"//ObRootAddr #include "share/schema/ob_schema_struct.h" +#include "share/ob_tenant_switchover_status.h" #include "logservice/palf/palf_options.h" #include @@ -33,6 +34,7 @@ namespace oceanbase { namespace share { +class ObRestoreSourceServiceAttr; class ObLogRestoreMySQLProvider : public common::sqlclient::ObMySQLServerProvider { public: @@ -112,6 +114,7 @@ public: const char *user_name, const char *user_password, const char *db_name); + int init_with_service_attr(const uint64_t tenant_id, const ObRestoreSourceServiceAttr *service_attr); // destroy proxy, close all connections void destroy(); @@ -145,7 +148,7 @@ public: int get_server_addr(const uint64_t tenant_id, common::ObIArray &addrs); int check_begin_lsn(const uint64_t tenant_id); // get log restore source tenant info, includes tenant role and tennat status - int get_tenant_info(ObTenantRole &role, schema::ObTenantStatus &status); + int get_tenant_info(ObTenantRole &role, schema::ObTenantStatus &status, ObTenantSwitchoverStatus &switchover_status); // get the access_mode and max_scn of the specific LS in log restore source tenant int get_max_log_info(const ObLSID &id, palf::AccessMode &mode, SCN &scn); // get ls from dba_ob_ls diff --git a/src/share/ob_max_id_fetcher.cpp b/src/share/ob_max_id_fetcher.cpp index 3247138e06..1fdd77ce6c 100755 --- a/src/share/ob_max_id_fetcher.cpp +++ b/src/share/ob_max_id_fetcher.cpp @@ -82,6 +82,7 @@ const char *ObMaxIdFetcher::max_id_name_info_[OB_MAX_ID_TYPE][2] = { { "ob_max_used_rls_policy_id", "max used ddl rls policy id"}, { "ob_max_used_rls_group_id", "max used ddl rls group id"}, { "ob_max_used_rls_context_id", "max used ddl rls context id"}, + { "ob_max_used_service_name_id", "max used service name id"} }; lib::ObMutex ObMaxIdFetcher::mutex_bucket_[MAX_TENANT_MUTEX_BUCKET_CNT]; @@ -123,6 +124,7 @@ int ObMaxIdFetcher::convert_id_type( case OB_MAX_USED_OBJECT_ID_TYPE: case OB_MAX_USED_LOCK_OWNER_ID_TYPE: case OB_MAX_USED_REWRITE_RULE_VERSION_TYPE: + case OB_MAX_USED_SERVICE_NAME_ID_TYPE: case OB_MAX_USED_TTL_TASK_ID_TYPE: { dst = src; break; @@ -314,6 +316,7 @@ int ObMaxIdFetcher::fetch_new_max_id(const uint64_t tenant_id, case OB_MAX_USED_LS_ID_TYPE: case OB_MAX_USED_LS_GROUP_ID_TYPE: case OB_MAX_USED_REWRITE_RULE_VERSION_TYPE: + case OB_MAX_USED_SERVICE_NAME_ID_TYPE: case OB_MAX_USED_TTL_TASK_ID_TYPE: { // won't check other id break; @@ -425,9 +428,6 @@ int ObMaxIdFetcher::update_max_id(ObISQLClient &sql_client, const uint64_t tenan || OB_INVALID_ID == max_id) { ret = OB_INVALID_ARGUMENT; LOG_WARN("invalid argument", K(ret), K(tenant_id), K(max_id_type), K(max_id)); - } else if (GCTX.is_standby_cluster() && OB_SYS_TENANT_ID != tenant_id) { - ret = OB_OP_NOT_ALLOW; - LOG_WARN("can't write sys table now", K(ret), K(tenant_id)); } else if (OB_ISNULL(id_name = get_max_id_name(max_id_type))) { ret = OB_ERR_UNEXPECTED; LOG_WARN("NULL name", K(ret)); @@ -528,9 +528,6 @@ int ObMaxIdFetcher::insert_initial_value(common::ObISQLClient &sql_client, uint6 if (OB_INVALID_ID == tenant_id || !valid_max_id_type(max_id_type) || UINT64_MAX == initial_value) { ret = OB_INVALID_ARGUMENT; LOG_WARN("invalid argument", K(ret), K(tenant_id), K(max_id_type), K(initial_value)); - } else if (GCTX.is_standby_cluster() && OB_SYS_TENANT_ID != tenant_id) { - ret = OB_OP_NOT_ALLOW; - LOG_WARN("can't write sys table now", K(ret), K(tenant_id)); } else if (OB_ISNULL(name) || OB_ISNULL(info)) { ret = OB_ERR_UNEXPECTED; LOG_WARN("NULL name or info", K(ret), KP(name), KP(info)); diff --git a/src/share/ob_rpc_struct.cpp b/src/share/ob_rpc_struct.cpp index 20a4a510ec..346451fdcf 100644 --- a/src/share/ob_rpc_struct.cpp +++ b/src/share/ob_rpc_struct.cpp @@ -11991,5 +11991,105 @@ int ObCheckServerMachineStatusResult::assign(const ObCheckServerMachineStatusRes } return ret; } +OB_SERIALIZE_MEMBER(ObRefreshServiceNameArg, tenant_id_, epoch_, from_server_, target_service_name_id_, + service_name_list_, service_op_, update_tenant_info_arg_); +int ObRefreshServiceNameArg::init( + const uint64_t tenant_id, + const uint64_t epoch, + const ObAddr &from_server, + const share::ObServiceNameID &target_service_name_id, + const common::ObIArray &service_name_list, + const share::ObServiceNameArg::ObServiceOp &service_op, + const share::ObAllTenantInfo &tenant_info, + const int64_t ora_rowscn) +{ + int ret = OB_SUCCESS; + if (OB_UNLIKELY(!is_valid_tenant_id(tenant_id) || 0 == epoch || INT64_MAX == epoch + || !from_server.is_valid() || !target_service_name_id.is_valid() || service_name_list.count() <= 0 + || !ObServiceNameArg::is_valid_service_op(service_op))) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid arg", KR(ret), K(tenant_id), K(epoch), K(from_server), K(target_service_name_id), + K(service_name_list), K(service_op)); + } else { + tenant_id_ = tenant_id; + epoch_ = epoch; + from_server_ = from_server; + target_service_name_id_ = target_service_name_id; + service_op_ = service_op; + for (int64_t i = 0; i < service_name_list.count(); ++i) { + const ObServiceName &service_name = service_name_list.at(i); + if (OB_UNLIKELY(!service_name.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid service_name", KR(ret), K(service_name), K(i), K(service_name_list)); + } else if (OB_FAIL(service_name_list_.push_back(service_name))) { + LOG_WARN("fail to push back", KR(ret), K(service_name), K(service_name_list_)); + } + } + } + if (OB_SUCC(ret) && is_start_service()) { + + if (OB_FAIL(update_tenant_info_arg_.init(tenant_id, tenant_info, ora_rowscn, 0 /*finish_data_version*/, SCN::min_scn()))) { + LOG_WARN("fail to init update_tenant_info_arg_", KR(ret), K(tenant_id), K(tenant_info), K(ora_rowscn)); + } + } + return ret; +} +bool ObRefreshServiceNameArg::is_valid() const +{ + bool service_name_list_valid = service_name_list_.count() > 0; + for (int64_t i = 0; i < service_name_list_.count() && service_name_list_valid; ++i) { + if (OB_UNLIKELY(!service_name_list_.at(i).is_valid())) { + service_name_list_valid = false; + } + } + return is_valid_tenant_id(tenant_id_) + && 0 != epoch_ && INT64_MAX != epoch_ && from_server_.is_valid() + && target_service_name_id_.is_valid() && service_name_list_valid + && ObServiceNameArg::is_valid_service_op(service_op_) + && (!is_start_service() || update_tenant_info_arg_.is_valid()); +} +int ObRefreshServiceNameArg::assign(const ObRefreshServiceNameArg &other) +{ + int ret = OB_SUCCESS; + if (this != &other) { + if (OB_FAIL(service_name_list_.assign(other.service_name_list_))) { + LOG_WARN("fail to assign service_name_str_", KR(ret), K(other)); + } else if (OB_FAIL(update_tenant_info_arg_.assign(other.update_tenant_info_arg_))) { + LOG_WARN("fail to assign update_tenant_info_arg_", KR(ret), K(other)); + } else { + tenant_id_ = other.tenant_id_; + epoch_ = other.epoch_; + from_server_ = other.from_server_; + target_service_name_id_ = other.target_service_name_id_; + service_op_ = other.service_op_; + } + } + return ret; +} +OB_SERIALIZE_MEMBER(ObRefreshServiceNameRes, tenant_id_); +int ObRefreshServiceNameRes::init(const uint64_t tenant_id) +{ + int ret = OB_SUCCESS; + if (OB_UNLIKELY(!is_valid_tenant_id(tenant_id))) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid arg", KR(ret), K(tenant_id)); + } else { + tenant_id_ = tenant_id; + } + return ret; +} +bool ObRefreshServiceNameRes::is_valid() const +{ + return is_valid_tenant_id(tenant_id_); +} +int ObRefreshServiceNameRes::assign(const ObRefreshServiceNameRes &other) +{ + int ret = OB_SUCCESS; + if (this != &other) { + tenant_id_ = other.tenant_id_; + } + return ret; +} + }//end namespace obrpc }//end namespace oceanbase diff --git a/src/share/ob_rpc_struct.h b/src/share/ob_rpc_struct.h index a9ce8dee49..759c85e768 100644 --- a/src/share/ob_rpc_struct.h +++ b/src/share/ob_rpc_struct.h @@ -78,6 +78,7 @@ #include "share/tenant_snapshot/ob_tenant_snapshot_id.h" #include "share/location_cache/ob_location_update_task.h" #include "share/resource_limit_calculator/ob_resource_limit_calculator.h"//ObUserResourceCalculateArg +#include "share/ob_service_name_proxy.h" #include "share/ob_heartbeat_handler.h" #include "storage/tablelock/ob_table_lock_common.h" //ObTableLockPriority @@ -8125,6 +8126,9 @@ public: bool is_valid() const; bool operator==(const obrpc::ObCheckpoint &r) const; + bool operator < (const ObCheckpoint &that) { + return this->cur_sync_scn_ < that.cur_sync_scn_; + } share::ObLSID get_ls_id() const { return ls_id_; @@ -8144,7 +8148,8 @@ public: && cur_sync_scn_.is_valid_and_not_min() && cur_restore_source_max_scn_.is_valid_and_not_min()); } - TO_STRING_KV(K_(ls_id), K_(cur_sync_scn), K_(cur_restore_source_max_scn)); + TO_STRING_KV("ls_id", ls_id_.id(), "cur_sync_scn", cur_sync_scn_.get_val_for_inner_table_field(), + "cur_restore_source_max_scn", cur_restore_source_max_scn_.get_val_for_inner_table_field()); share::ObLSID ls_id_; share::SCN cur_sync_scn_; @@ -12121,6 +12126,70 @@ public: private: share::ObServerHealthStatus server_health_status_; }; + +struct ObRefreshServiceNameArg +{ + OB_UNIS_VERSION(1); +public: + ObRefreshServiceNameArg() + : tenant_id_(OB_INVALID_TENANT_ID), + epoch_(0), + from_server_(), + target_service_name_id_(), + service_name_list_(), + service_op_(share::ObServiceNameArg::INVALID_SERVICE_OP), + update_tenant_info_arg_() {} + ~ObRefreshServiceNameArg() {} + int init( + const uint64_t tenant_id, + const uint64_t epoch, + const ObAddr &from_server, + const share::ObServiceNameID &target_service_name_id, + const common::ObIArray &service_name_list, + const share::ObServiceNameArg::ObServiceOp &service_op, + const share::ObAllTenantInfo &tenant_info, + const int64_t ora_rowscn); + bool is_valid() const; + int assign(const ObRefreshServiceNameArg &other); + uint64_t get_tenant_id() const { return tenant_id_; } + uint64_t get_epoch() const { return epoch_; } + const ObAddr &get_from_server() const { return from_server_; } + const share::ObServiceNameID &get_target_service_name_id() const { return target_service_name_id_; } + const common::ObSArray &get_service_name_list() const { return service_name_list_; } + const ObUpdateTenantInfoCacheArg &get_update_tenant_info_arg() const { return update_tenant_info_arg_; } + bool is_start_service() const {return share::ObServiceNameArg::START_SERVICE == service_op_; } + bool is_stop_service() const {return share::ObServiceNameArg::STOP_SERVICE == service_op_; } + TO_STRING_KV(K_(tenant_id), K_(epoch), K_(from_server), + "target_service_name_id", target_service_name_id_.id(), K_(service_name_list), + "service_op_to_str", share::ObServiceNameArg::service_op_to_str(service_op_), K_(update_tenant_info_arg)); +private: + uint64_t tenant_id_; + uint64_t epoch_; // __all_service snapshot corresponding epoch + ObAddr from_server_; // the server which sends broadcasting request + share::ObServiceNameID target_service_name_id_; // indicate which service name to operate + common::ObSArray service_name_list_; // __all_service snapshot + share::ObServiceNameArg::ObServiceOp service_op_; // the operation which triggered broadcasting + ObUpdateTenantInfoCacheArg update_tenant_info_arg_; // only used if the op is START_SERVICE +private: + DISALLOW_COPY_AND_ASSIGN(ObRefreshServiceNameArg); +}; + +struct ObRefreshServiceNameRes +{ + OB_UNIS_VERSION(1); +public: + ObRefreshServiceNameRes() : tenant_id_(OB_INVALID_TENANT_ID) {} + ~ObRefreshServiceNameRes() {} + int init(const uint64_t tenant_id); + bool is_valid() const; + int assign(const ObRefreshServiceNameRes &other); + uint64_t get_tenant_id() const { return tenant_id_; } + TO_STRING_KV(K_(tenant_id)); +private: + uint64_t tenant_id_; +private: + DISALLOW_COPY_AND_ASSIGN(ObRefreshServiceNameRes); +}; }//end namespace obrpc }//end namespace oceanbase #endif diff --git a/src/share/ob_service_epoch_proxy.cpp b/src/share/ob_service_epoch_proxy.cpp index 04267c635c..59e713624c 100644 --- a/src/share/ob_service_epoch_proxy.cpp +++ b/src/share/ob_service_epoch_proxy.cpp @@ -34,7 +34,8 @@ int ObServiceEpochProxy::init_service_epoch( const int64_t freeze_service_epoch, const int64_t arbitration_service_epoch, const int64_t server_zone_op_service_epoch, - const int64_t heartbeat_service_epoch) + const int64_t heartbeat_service_epoch, + const int64_t service_name_epoch) { int ret = OB_SUCCESS; if (is_user_tenant(tenant_id)) { @@ -94,6 +95,12 @@ int ObServiceEpochProxy::init_service_epoch( ARBITRATION_SERVICE_EPOCH, arbitration_service_epoch))) { LOG_WARN("fail to init arb service epoch", KR(ret), K(user_tenant_id), K(arbitration_service_epoch)); + } else if (OB_FAIL(ObServiceEpochProxy::insert_service_epoch( + sql_proxy, + user_tenant_id, + SERVICE_NAME_EPOCH, + service_name_epoch))) { + LOG_WARN("fail to init service name epoch", KR(ret), K(user_tenant_id), K(service_name_epoch)); } else {} } else {} return ret; diff --git a/src/share/ob_service_epoch_proxy.h b/src/share/ob_service_epoch_proxy.h index 9ebd83bce9..273f6d935a 100644 --- a/src/share/ob_service_epoch_proxy.h +++ b/src/share/ob_service_epoch_proxy.h @@ -51,7 +51,8 @@ public: const int64_t freeze_service_epoch, const int64_t arbitration_service_epoch, const int64_t server_zone_op_service_epoch, - const int64_t heartbeat_service_epoch); + const int64_t heartbeat_service_epoch, + const int64_t service_name_epoch); static int insert_service_epoch(common::ObISQLClient &sql_proxy, const int64_t tenant_id, @@ -94,6 +95,7 @@ public: constexpr static const char * const ARBITRATION_SERVICE_EPOCH = "arbitration_service_epoch"; constexpr static const char * const SERVER_ZONE_OP_SERVICE_EPOCH = "server_zone_op_service_epoch"; constexpr static const char * const HEARTBEAT_SERVICE_EPOCH = "heartbeat_service_epoch"; + constexpr static const char * const SERVICE_NAME_EPOCH = "service_name_epoch"; private: static int inner_get_service_epoch_(common::ObISQLClient &sql_proxy, diff --git a/src/share/ob_service_name_proxy.cpp b/src/share/ob_service_name_proxy.cpp new file mode 100644 index 0000000000..e6e0368d40 --- /dev/null +++ b/src/share/ob_service_name_proxy.cpp @@ -0,0 +1,630 @@ +/** + * Copyright (c) 2021 OceanBase + * OceanBase CE is licensed under Mulan PubL v2. + * You can use this software according to the terms and conditions of the Mulan PubL v2. + * You may obtain a copy of Mulan PubL v2 at: + * http://license.coscl.org.cn/MulanPubL-2.0 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PubL v2 for more details. + */ +#define USING_LOG_PREFIX SHARE + +#include "ob_service_name_proxy.h" + +#include "lib/mysqlclient/ob_mysql_result.h" +#include "lib/mysqlclient/ob_mysql_proxy.h" +#include "lib/mysqlclient/ob_mysql_transaction.h" +#include "lib/string/ob_sql_string.h" +#include "observer/ob_server_struct.h" +#include "common/ob_timeout_ctx.h" +#include "share/ob_share_util.h" +#include "share/inner_table/ob_inner_table_schema.h" +#include "share/config/ob_server_config.h" +#include "share/ob_tenant_info_proxy.h" +#include "rootserver/ob_tenant_event_def.h" +#include "share/ob_max_id_fetcher.h" +#include "share/ob_service_epoch_proxy.h" + +using namespace oceanbase::tenant_event; +using namespace oceanbase::common::sqlclient; +namespace oceanbase +{ +namespace share +{ +static const char *SERVICE_STATUS_STR[] = { + "INVALID SERVICE STATUS", + "STARTED", + "STOPPING", + "STOPPED", +}; +OB_SERIALIZE_MEMBER(ObServiceNameID,id_); +OB_SERIALIZE_MEMBER(ObServiceNameString,str_); +int ObServiceNameString::init(const ObString &str) +{ + int ret = OB_SUCCESS; + if (OB_FAIL(check_service_name(str))) { + LOG_WARN("fail to execute check_service_name", KR(ret), K(str)); + } else if (OB_FAIL(str_.assign(str))) { + LOG_WARN("fail to assign str_", KR(ret), K(str)); + } + return ret; +} +int ObServiceNameString::assign(const ObServiceNameString &other) +{ + int ret = OB_SUCCESS; + if (this != &other) { + if (OB_FAIL(str_.assign(other.str_))) { + LOG_WARN("fail to assign str_", KR(ret), K(other)); + } + } + return ret; +} +bool ObServiceNameString::equal_to(const ObServiceNameString &service_name_string) const +{ + return 0 == str_.str().case_compare(service_name_string.str_.str()); +} +int ObServiceNameString::check_service_name(const ObString &service_name_str) +{ + int ret = OB_SUCCESS; + if (OB_UNLIKELY(service_name_str.empty() || service_name_str.length() > OB_SERVICE_NAME_LENGTH)) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid service_name", KR(ret), K(service_name_str), K(service_name_str.length())); + LOG_USER_ERROR(OB_INVALID_ARGUMENT, "service_name, whose length should be 1 - 64"); + } else if (!isalpha(service_name_str[0])) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid service_name", KR(ret), K(service_name_str)); + LOG_USER_ERROR(OB_INVALID_ARGUMENT, "service_name, which should be start with letter"); + } else { + for (int i = 1; i < service_name_str.length() && OB_SUCC(ret); i++) + { + const char cur_char = service_name_str[i]; + if (!isalpha(cur_char) && !isdigit(cur_char) && cur_char != '_' ) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid service_name", KR(ret), K(service_name_str)); + LOG_USER_ERROR(OB_INVALID_ARGUMENT, "service_name, which can only include letter, digit and underscore"); + } + } + } + return ret; +} + +static const char *SERVICE_OP_STR[] = { + "INVALID SERVICE OPERATION", + "CREATE SERVICE", + "DELETE SERVICE", + "START SERVICE", + "STOP SERVICE", +}; +const char *ObServiceNameArg::service_op_to_str(const ObServiceNameArg::ObServiceOp &service_op) +{ + STATIC_ASSERT(ARRAYSIZEOF(SERVICE_OP_STR) == MAX_SERVICE_OP, "array size mismatch"); + ObServiceOp returned_service_op = INVALID_SERVICE_OP; + if (OB_UNLIKELY(service_op >= MAX_SERVICE_OP + || service_op < INVALID_SERVICE_OP)) { + LOG_ERROR_RET(OB_ERR_UNEXPECTED, "fatal error, unknown service op", K(service_op)); + } else { + returned_service_op = service_op; + } + return SERVICE_OP_STR[returned_service_op]; +} +int ObServiceNameArg::init(const ObServiceOp op, const uint64_t target_tenant_id, const ObString &service_name_str) +{ + int ret = OB_SUCCESS; + if (OB_UNLIKELY(!is_valid_service_op(op) + || !is_valid_tenant_id(target_tenant_id) + || service_name_str.empty())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("fail to init", KR(ret), K(op), K(target_tenant_id), K(service_name_str)); + } else if (OB_FAIL(service_name_str_.init(service_name_str))) { + LOG_WARN("fail to init service_name_str_", KR(ret), K(service_name_str)); + } else { + op_ = op; + target_tenant_id_ = target_tenant_id; + } + return ret; +} +bool ObServiceNameArg::is_valid() const +{ + return is_valid_service_op(op_) && is_valid_tenant_id(target_tenant_id_) && service_name_str_.is_valid(); +} +bool ObServiceNameArg::is_valid_service_op(ObServiceOp op) +{ + return op > INVALID_SERVICE_OP && op < MAX_SERVICE_OP; +} +int ObServiceNameArg::assign(const ObServiceNameArg &other) +{ + int ret = OB_SUCCESS; + if (this != &other) { + if (OB_FAIL(service_name_str_.assign(other.service_name_str_))) { + LOG_WARN("fail to assign service_name_str_", KR(ret), K(other)); + } else { + op_ = other.op_; + target_tenant_id_ = other.target_tenant_id_; + } + } + return ret; +} +void ObServiceNameArg::reset() +{ + op_ = INVALID_SERVICE_OP; + target_tenant_id_ = OB_INVALID_TENANT_ID; + service_name_str_.reset(); +} +const char *ObServiceName::service_status_to_str(const ObServiceName::ObServiceStatus &service_status) +{ + STATIC_ASSERT(ARRAYSIZEOF(SERVICE_STATUS_STR) == MAX_SERVICE_STATUS, "array size mismatch"); + ObServiceStatus returned_service_status = INVALID_SERVICE_STATUS; + if (OB_UNLIKELY(service_status >= MAX_SERVICE_STATUS + || service_status < INVALID_SERVICE_STATUS)) { + LOG_ERROR_RET(OB_ERR_UNEXPECTED, "fatal error, unknown service status", K(service_status)); + } else { + returned_service_status = service_status; + } + return SERVICE_STATUS_STR[returned_service_status]; +} +ObServiceName::ObServiceStatus ObServiceName::str_to_service_status(const ObString &service_status_str) +{ + ObServiceStatus service_status = INVALID_SERVICE_STATUS; + bool is_found = false; + for (int i = INVALID_SERVICE_STATUS; i < MAX_SERVICE_STATUS && !is_found; i++) + { + if (0 == service_status_str.case_compare(SERVICE_STATUS_STR[i])) { + service_status = static_cast(i); + is_found = true; + } + } + return service_status; +} +bool ObServiceName::is_valid_service_status(const ObServiceName::ObServiceStatus &service_status) +{ + return service_status > INVALID_SERVICE_STATUS && service_status < MAX_SERVICE_STATUS; +} +OB_SERIALIZE_MEMBER(ObServiceName, tenant_id_, service_name_id_, service_name_str_, service_status_); +int ObServiceName::init( + const uint64_t tenant_id, + const uint64_t service_name_id, + const ObString &service_name_str, + const ObString &service_status_str) +{ + int ret = OB_SUCCESS; + ObServiceStatus service_status = str_to_service_status(service_status_str); + if (OB_UNLIKELY(!is_valid_tenant_id(tenant_id) + || !ObServiceNameID::is_valid_service_name_id(service_name_id) + || !is_valid_service_status(service_status) + || service_name_str.empty())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid arg", KR(ret), K(tenant_id), K(service_name_id), K(service_name_str), + K(service_status_str), K(service_status)); + } else if (OB_FAIL(service_name_str_.init(service_name_str))) { + LOG_WARN("fail to init service_name_str_", KR(ret), K(service_name_str)); + } else { + tenant_id_ = tenant_id; + service_name_id_ = service_name_id; + service_status_ = service_status; + } + return ret; +} +bool ObServiceName::is_valid() const +{ + return is_valid_tenant_id(tenant_id_) && service_name_id_.is_valid() + && !service_name_str_.is_empty() && is_valid_service_status(service_status_); +} +int ObServiceName::assign(const ObServiceName &other) +{ + int ret = OB_SUCCESS; + if (this != &other) { + if (OB_FAIL(service_name_str_.assign(other.service_name_str_))) { + LOG_WARN("fail to assign service_name_str_", KR(ret), K(other)); + } else { + service_name_id_ = other.service_name_id_; + service_status_ = other.service_status_; + tenant_id_ = other.tenant_id_; + } + } + return ret; +} +void ObServiceName::reset() +{ + tenant_id_ = OB_INVALID_TENANT_ID; + service_name_id_.reset(); + service_name_str_.reset(); + service_status_ = INVALID_SERVICE_STATUS; +} +int ObServiceNameProxy::select_all_service_names_with_epoch( + const int64_t tenant_id, + int64_t &epoch, + ObIArray &all_service_names) +{ + int ret = OB_SUCCESS; + const bool EXTRACT_EPOCH = true; + ObSqlString sql; + if (OB_UNLIKELY(!is_valid_tenant_id(tenant_id))) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid tenant_id", KR(ret), K(tenant_id)); + } else if (OB_ISNULL(GCTX.sql_proxy_)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("GCTX.sql_proxy_ is null", KR(ret), KP(GCTX.sql_proxy_)); + } else if (OB_FAIL(sql.assign_fmt("SELECT s.*, e.value as epoch FROM %s AS s " + "JOIN %s AS e ON s.tenant_id = e.tenant_id WHERE s.tenant_id = %lu and e.name='%s' ORDER BY s.gmt_create", + OB_ALL_SERVICE_TNAME, OB_ALL_SERVICE_EPOCH_TNAME, tenant_id, ObServiceEpochProxy::SERVICE_NAME_EPOCH))) { + // join the two tables to avoid add a row lock on __all_service_epoch + // otherwise there might be conflicts and too many retries in tenant_info_loader thread + // when the number of observers is large + LOG_WARN("sql assign_fmt failed", KR(ret), K(sql)); + } else if (OB_FAIL(select_service_name_sql_helper_(*GCTX.sql_proxy_, tenant_id, EXTRACT_EPOCH, + sql, epoch, all_service_names))) { + LOG_WARN("fail to execute select_service_name_sql_helper_", KR(ret), K(tenant_id), K(sql)); + } + return ret; +} +int ObServiceNameProxy::select_all_service_names_( + common::ObISQLClient &sql_proxy, + const int64_t tenant_id, + ObIArray &all_service_names) +{ + int ret = OB_SUCCESS; + ObSqlString sql; + const bool NOT_EXTRACT_EPOCH = false; + int64_t unused_epoch = 0; + if (OB_UNLIKELY(!is_valid_tenant_id(tenant_id))) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid tenant_id", KR(ret), K(tenant_id)); + } else if (OB_FAIL(sql.assign_fmt("SELECT * FROM %s WHERE TENANT_ID = %lu ORDER BY gmt_create", + OB_ALL_SERVICE_TNAME, tenant_id))) { + LOG_WARN("sql assign_fmt failed", KR(ret), K(sql)); + } else if (OB_FAIL(select_service_name_sql_helper_(sql_proxy, tenant_id, NOT_EXTRACT_EPOCH, + sql, unused_epoch, all_service_names))) { + LOG_WARN("fail to execute select_service_name_sql_helper_", KR(ret), K(tenant_id), K(sql)); + } + return ret; +} + +int ObServiceNameProxy::select_service_name_sql_helper_( + common::ObISQLClient &sql_proxy, + const int64_t tenant_id, + const bool extract_epoch, + ObSqlString &sql, + int64_t &epoch, + ObIArray &all_service_names) +{ + int ret = OB_SUCCESS; + ObTimeoutCtx ctx; + const int64_t QUERY_TIMEOUT = 5 * GCONF.rpc_timeout; // 10s + all_service_names.reset(); + if (OB_FAIL(ObShareUtil::set_default_timeout_ctx(ctx, QUERY_TIMEOUT))) { + LOG_WARN("fail to get timeout ctx", KR(ret), K(ctx), K(QUERY_TIMEOUT)); + } else { + SMART_VAR(ObMySQLProxy::MySQLResult, res) { + ObMySQLResult *result = NULL; + if (OB_FAIL(sql_proxy.read(res, gen_meta_tenant_id(tenant_id), sql.ptr()))) { + LOG_WARN("execute sql failed", KR(ret), K(sql)); + } else if (NULL == (result = res.get_result())) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("execute sql failed", K(sql), KR(ret)); + } else { + ObServiceName service_name; + while (OB_SUCC(ret)) { + service_name.reset(); + if (OB_FAIL(result->next())) { + if (OB_ITER_END != ret) { + LOG_WARN("result next failed", KR(ret)); + } else { + ret = OB_SUCCESS; + break; + } + } else if (OB_FAIL(build_service_name_(*result, service_name))) { + LOG_WARN("fail to build server status", KR(ret)); + } else if (OB_FAIL(all_service_names.push_back(service_name))) { + LOG_WARN("fail to build service_name", KR(ret)); + } else if (extract_epoch) { + // epoch can only be extracted when __all_service_epoch table is joined + EXTRACT_INT_FIELD_MYSQL(*result, "epoch", epoch, int64_t); + } + } + } + } + } + return ret; +} +int ObServiceNameProxy::select_service_name( + common::ObISQLClient &sql_proxy, + const uint64_t tenant_id, + const ObServiceNameString &service_name_str, + ObServiceName &service_name) +{ + int ret = OB_SUCCESS; + ObSqlString sql; + ObTimeoutCtx ctx; + const int64_t QUERY_TIMEOUT = 5 * GCONF.rpc_timeout; // 10s + service_name.reset(); + if (OB_UNLIKELY(!is_valid_tenant_id(tenant_id))) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid tenant_id", KR(ret), K(tenant_id)); + } else if (OB_FAIL(ObShareUtil::set_default_timeout_ctx(ctx, QUERY_TIMEOUT))) { + LOG_WARN("fail to get timeout ctx", KR(ret), K(ctx), K(QUERY_TIMEOUT)); + } else if (OB_FAIL(sql.assign_fmt("SELECT * FROM %s WHERE TENANT_ID = %lu AND SERVICE_NAME = '%s'", + OB_ALL_SERVICE_TNAME, tenant_id, service_name_str.ptr()))) { + LOG_WARN("sql assign_fmt failed", KR(ret), K(sql), K(tenant_id), K(service_name_str)); + } else { + SMART_VAR(ObMySQLProxy::MySQLResult, res) { + ObMySQLResult *result = NULL; + if (OB_FAIL(sql_proxy.read(res, gen_meta_tenant_id(tenant_id), sql.ptr()))) { + LOG_WARN("execute sql failed", K(sql), KR(ret)); + } else if (NULL == (result = res.get_result())) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("execute sql failed", K(sql), KR(ret)); + } else if (OB_FAIL(result->next())) { + if (OB_ITER_END == ret) { + ret = OB_SERVICE_NAME_NOT_FOUND; + } + LOG_WARN("fail to get next result", KR(ret)); + } else if (OB_FAIL(build_service_name_(*result, service_name))) { + LOG_WARN("fail to build server status", KR(ret)); + } else { + int tmp_ret = OB_SUCCESS; + if (OB_ITER_END != (tmp_ret = result->next())) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("get more row than one", KR(ret), KR(tmp_ret), K(sql)); + } + } + } + } + if (OB_SUCC(ret) && OB_UNLIKELY(!service_name.is_valid())) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("select invalid service_name", KR(ret), K(tenant_id), K(service_name_str), K(service_name)); + } + return ret; +} + +int ObServiceNameProxy::insert_service_name( + const uint64_t tenant_id, + const ObServiceNameString &service_name_str, + int64_t &epoch, + ObArray &all_service_names) +{ + // insert when so_status is normal + int ret = OB_SUCCESS; + int64_t service_name_num = INT64_MAX; + ObMySQLTransaction trans; + ObSqlString sql; + const char * service_status_str = ObServiceName::service_status_to_str(ObServiceName::STARTED); + if (OB_UNLIKELY(!is_valid_tenant_id(tenant_id) || !service_name_str.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid arg", KR(ret), K(tenant_id), K(service_name_str)); + } else if (OB_ISNULL(GCTX.sql_proxy_)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("GCTX.sql_proxy_ is null", KR(ret), KP(GCTX.sql_proxy_)); + } else if (OB_FAIL(trans_start_and_precheck_(trans, tenant_id, epoch))) { + LOG_WARN("fail to execute trans_start_and_precheck_", KR(ret), K(tenant_id)); + } else if (OB_FAIL(get_tenant_service_name_num(trans, tenant_id, service_name_num))) { + LOG_WARN("fail to get the tenant's service_name_num", KR(ret), K(tenant_id)); + } else if (SERVICE_NAME_MAX_NUM <= service_name_num) { + ret = OB_OP_NOT_ALLOW; + LOG_WARN("The number of service_name for the tenant exceeds the limit", KR(ret), K(service_name_num)); + LOG_USER_ERROR(OB_OP_NOT_ALLOW, "The number of service_name for the tenant exceeds the limit, service name related command is"); + } else { + ObMaxIdFetcher id_fetcher(*GCTX.sql_proxy_); + uint64_t new_service_name_id = OB_INVALID_ID; + uint64_t meta_tenant_id = gen_meta_tenant_id(tenant_id); + if (OB_FAIL(id_fetcher.fetch_new_max_id( + meta_tenant_id, + OB_MAX_USED_SERVICE_NAME_ID_TYPE, + new_service_name_id, + 0 /* initial */))) { + LOG_WARN("fail to fetch new service_name id", KR(ret), K(tenant_id), K(meta_tenant_id)); + } else if (OB_FAIL(sql.assign_fmt("INSERT INTO %s " + "(tenant_id, service_name_id, service_name, service_status) value (%lu, %lu, '%s', '%s')", + OB_ALL_SERVICE_TNAME, tenant_id, new_service_name_id, service_name_str.ptr(), service_status_str))) { + LOG_WARN("fail to insert service_name", KR(ret), K(tenant_id), K(new_service_name_id), + K(service_name_str), K(service_status_str)); + } + } + (void) write_and_end_trans_(ret, trans, tenant_id, sql, all_service_names); + return ret; +} + +int ObServiceNameProxy::update_service_status( + const ObServiceName &service_name, + const ObServiceName::ObServiceStatus &new_status, + int64_t &epoch, + ObArray &all_service_names) +{ + // update when so_status is normal + int ret = OB_SUCCESS; + ObMySQLTransaction trans; + ObSqlString sql; + const char * old_service_status_str = ObServiceName::service_status_to_str(service_name.get_service_status()); + const char * new_service_status_str = ObServiceName::service_status_to_str(new_status); + const uint64_t service_name_id = service_name.get_service_name_id().id(); + const uint64_t tenant_id = service_name.get_tenant_id(); + if (OB_UNLIKELY(!service_name.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid arg", KR(ret), K(service_name)); + } else if (service_name.get_service_status() == new_status) { + ret = OB_OP_NOT_ALLOW; + LOG_WARN("the new service status is the same with the old one", KR(ret), K(service_name), K(new_status)); + } else if (OB_FAIL(trans_start_and_precheck_(trans, tenant_id, epoch))) { + LOG_WARN("fail to execute trans_start_and_precheck_", KR(ret), K(tenant_id)); + } else if (OB_FAIL(sql.assign_fmt("UPDATE %s SET service_status = '%s' " + "WHERE tenant_id = %lu AND service_name_id = '%lu' AND service_status = '%s'", + OB_ALL_SERVICE_TNAME, new_service_status_str, tenant_id, service_name_id, old_service_status_str))) { + LOG_WARN("fail to insert service_name", KR(ret), K(tenant_id), K(service_name), K(new_service_status_str)); + } + (void) write_and_end_trans_(ret, trans, tenant_id, sql, all_service_names); + return ret; +} + +int ObServiceNameProxy::delete_service_name(const ObServiceName &service_name) +{ + int ret = OB_SUCCESS; + ObMySQLTransaction trans; + ObSqlString sql; + int64_t epoch = 0; + ObArray all_service_names; + const char * stopped_status_str = ObServiceName::service_status_to_str(ObServiceName::STOPPED); + const uint64_t tenant_id = service_name.get_tenant_id(); + const uint64_t service_name_id = service_name.get_service_name_id().id(); + if (OB_UNLIKELY(!service_name.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid service_name", KR(ret), K(service_name)); + } else if (OB_FAIL(trans_start_and_precheck_(trans, tenant_id, epoch))) { + LOG_WARN("fail to execute trans_start_and_precheck_", KR(ret), K(tenant_id)); + } else if (OB_FAIL(sql.assign_fmt("DELETE FROM %s WHERE tenant_id = %lu AND service_name_id = '%lu' " + "AND service_status = '%s'", + OB_ALL_SERVICE_TNAME, service_name.get_tenant_id(), service_name_id, stopped_status_str))) { + LOG_WARN("fail to insert service_name", KR(ret), K(service_name)); + } + (void) write_and_end_trans_(ret, trans, tenant_id, sql, all_service_names); + return ret; +} + +int ObServiceNameProxy::check_is_service_name_enabled(const uint64_t tenant_id) +{ + int ret = OB_SUCCESS; + const uint64_t meta_tenant_id = gen_meta_tenant_id(tenant_id); + uint64_t tenant_data_version = 0; + uint64_t meta_tenant_data_version = 0; + if (OB_UNLIKELY(!is_valid_tenant_id(tenant_id) || !is_user_tenant(tenant_id))) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("not user tenant", KR(ret), K(tenant_id)); + } else if (OB_FAIL(GET_MIN_DATA_VERSION(tenant_id, tenant_data_version))) { + LOG_WARN("fail to get the tenant's min data version", KR(ret), K(tenant_id)); + } else if (!((tenant_data_version >= MOCK_DATA_VERSION_4_2_4_0 && tenant_data_version < DATA_VERSION_4_3_0_0) + || tenant_data_version >= DATA_VERSION_4_3_3_0)) { + ret = OB_NOT_SUPPORTED; + LOG_WARN("tenant_data_version should be [4.2.4.0, 4.3.0.0) or [4.3.3.0, +infinity)", KR(ret), K(tenant_data_version)); + } else if (OB_FAIL(GET_MIN_DATA_VERSION(meta_tenant_id, meta_tenant_data_version))) { + LOG_WARN("fail to get the meta tenant's min data version", KR(ret), K(meta_tenant_id)); + } else if (!((meta_tenant_data_version >= MOCK_DATA_VERSION_4_2_4_0 && meta_tenant_data_version < DATA_VERSION_4_3_0_0) + || meta_tenant_data_version >= DATA_VERSION_4_3_3_0)) { + ret = OB_NOT_SUPPORTED; + LOG_WARN("meta_tenant_data_version should be [4.2.4.0, 4.3.0.0) or [4.3.3.0, +infinity)", KR(ret), K(meta_tenant_data_version)); + } + return ret; +} + +int ObServiceNameProxy::trans_start_and_precheck_( + ObMySQLTransaction &trans, + const uint64_t tenant_id, + int64_t &epoch) +{ + int ret = OB_SUCCESS; + ObAllTenantInfo tenant_info; + const uint64_t exec_tenant_id = gen_meta_tenant_id(tenant_id); + int64_t affected_rows = 0; + if (OB_ISNULL(GCTX.sql_proxy_)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("GCTX.sql_proxy_ is null", KR(ret), KP(GCTX.sql_proxy_)); + } else if (OB_FAIL(trans.start(GCTX.sql_proxy_, exec_tenant_id))) { + LOG_WARN("fail to start trans", KR(ret)); + } else if (OB_FAIL(ObAllTenantInfoProxy::load_tenant_info(tenant_id, &trans, true /* for_update */, tenant_info))) { + LOG_WARN("fail to load tenant info", KR(ret), K(tenant_id)); + } else if (OB_UNLIKELY(!tenant_info.is_normal_status())) { + ret = OB_OP_NOT_ALLOW; + LOG_WARN("the tenant's switchover status is not normal", KR(ret), K(tenant_info)); + LOG_USER_ERROR(OB_OP_NOT_ALLOW, "The tenant's switchover status is not normal, service name related command is"); + } else if (OB_FAIL(ObServiceEpochProxy::get_service_epoch(trans, tenant_id, ObServiceEpochProxy::SERVICE_NAME_EPOCH, epoch))) { + LOG_WARN("fail to get service epoch", KR(ret), K(tenant_id)); + } else if (FALSE_IT(epoch += 1)) { + } else if (OB_FAIL(ObServiceEpochProxy::update_service_epoch(trans, tenant_id, + ObServiceEpochProxy::SERVICE_NAME_EPOCH, epoch, affected_rows))) { + LOG_WARN("fail to get service epoch", KR(ret), K(tenant_id)); + } else if (1 != affected_rows) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("affected_rows should be one", KR(ret), K(affected_rows), K(tenant_id)); + } + return ret; +} + +void ObServiceNameProxy::write_and_end_trans_( + int &ret, + ObMySQLTransaction &trans, + const uint64_t tenant_id, + const ObSqlString &sql, + ObArray &all_service_names) +{ + if (OB_SUCC(ret)) { + int64_t affected_rows = 0; + const uint64_t exec_tenant_id = gen_meta_tenant_id(tenant_id); + if (OB_FAIL(trans.write(exec_tenant_id, sql.ptr(), affected_rows))) { + LOG_WARN("failed to execute sql", KR(ret), K(exec_tenant_id), K(sql)); + } else if (1 != affected_rows) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("affected_rows should be one", KR(ret), K(affected_rows)); + } else if (OB_FAIL(select_all_service_names_(trans, tenant_id, all_service_names))) { + LOG_WARN("fail to execute select_all_service_names_", KR(ret), K(tenant_id)); + } + } + if (OB_UNLIKELY(!trans.is_started())) { + LOG_WARN("the transaction is not started"); + } else { + int tmp_ret = OB_SUCCESS; + if (OB_TMP_FAIL(trans.end(OB_SUCC(ret)))) { + LOG_WARN("fail to commit the transaction", KR(ret), KR(tmp_ret), K(tenant_id)); + ret = OB_SUCC(ret) ? tmp_ret : ret; + } + } +} + +int ObServiceNameProxy::build_service_name_( + const common::sqlclient::ObMySQLResult &res, + ObServiceName &service_name) +{ + int ret = OB_SUCCESS; + uint64_t tenant_id = OB_INVALID_TENANT_ID; + uint64_t service_name_id = ObServiceNameID::INVALID_SERVICE_NAME_ID; + ObString service_name_str; + ObString service_status_str; + service_name.reset(); + EXTRACT_VARCHAR_FIELD_MYSQL(res, "service_name", service_name_str); + EXTRACT_VARCHAR_FIELD_MYSQL(res, "service_status", service_status_str); + EXTRACT_INT_FIELD_MYSQL(res, "tenant_id", tenant_id, uint64_t); + EXTRACT_INT_FIELD_MYSQL(res, "service_name_id", service_name_id, uint64_t); + if (FAILEDx(service_name.init(tenant_id, service_name_id, service_name_str, service_status_str))) { + LOG_WARN("fail to init service_name", KR(ret), K(tenant_id), K(service_name_id), K(service_name_str), + K(service_status_str)); + } else if (OB_UNLIKELY(!service_name.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("build invalid service_name", KR(ret), K(service_name)); + } + return ret; +} + +int ObServiceNameProxy::get_tenant_service_name_num( + common::ObISQLClient &sql_proxy, + const uint64_t tenant_id, + int64_t &service_name_num) +{ + int ret = OB_SUCCESS; + ObSqlString sql; + service_name_num = INT64_MAX; + const uint64_t exec_tenant_id = gen_meta_tenant_id(tenant_id); + if (OB_UNLIKELY(!is_valid_tenant_id(tenant_id))) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid tenant_id", KR(ret), K(tenant_id)); + } else if (OB_FAIL(sql.assign_fmt("select count(*) as count from %s where tenant_id = %lu", OB_ALL_SERVICE_TNAME, tenant_id))) { + LOG_WARN("fail to assign sql", KR(ret), K(tenant_id)); + } else { + HEAP_VAR(ObMySQLProxy::MySQLResult, res) { + common::sqlclient::ObMySQLResult *result = NULL; + if (OB_FAIL(sql_proxy.read(res, exec_tenant_id, sql.ptr()))) { + LOG_WARN("failed to read", KR(ret), K(tenant_id), K(exec_tenant_id), K(sql)); + } else if (OB_ISNULL(result = res.get_result())) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("failed to get sql result", KR(ret)); + } else if (OB_FAIL(result->next())) { + LOG_WARN("fail to get next", KR(ret), K(sql)); + } else { + EXTRACT_INT_FIELD_MYSQL(*result, "count", service_name_num, int64_t); + if (OB_FAIL(ret)) { + LOG_WARN("fail to extract count", KR(ret), K(tenant_id), K(exec_tenant_id), K(sql)); + } + } + } + } + return ret; +} +} +} \ No newline at end of file diff --git a/src/share/ob_service_name_proxy.h b/src/share/ob_service_name_proxy.h new file mode 100644 index 0000000000..c068f255fb --- /dev/null +++ b/src/share/ob_service_name_proxy.h @@ -0,0 +1,222 @@ +/** + * Copyright (c) 2021 OceanBase + * OceanBase CE is licensed under Mulan PubL v2. + * You can use this software according to the terms and conditions of the Mulan PubL v2. + * You may obtain a copy of Mulan PubL v2 at: + * http://license.coscl.org.cn/MulanPubL-2.0 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PubL v2 for more details. + */ + +#ifndef OCEANBASE_SHARE_OB_SERVICE_NAME_PROXY_H_ +#define OCEANBASE_SHARE_OB_SERVICE_NAME_PROXY_H_ + +#include "lib/ob_define.h" +#include "share/ob_define.h" + +namespace oceanbase +{ +namespace common +{ +class ObMySQLProxy; +class ObMySQLTransaction; +class ObISQLClient; +namespace sqlclient +{ +class ObMySQLResult; +} +} +namespace share +{ +class ObServiceNameID final +{ + OB_UNIS_VERSION(1); +public: + static const uint64_t INVALID_SERVICE_NAME_ID = 0; + static bool is_valid_service_name_id(const uint64_t id) { return INVALID_SERVICE_NAME_ID != id && OB_INVALID_ID != id; } + + explicit ObServiceNameID(const uint64_t id = INVALID_SERVICE_NAME_ID) : id_(id) {} + ObServiceNameID(const ObServiceNameID &other) : id_(other.id_) {} + ~ObServiceNameID() { reset(); } + + uint64_t id() const { return id_; } + void reset() { id_ = INVALID_SERVICE_NAME_ID; } + // assignment + ObServiceNameID &operator=(const uint64_t id) { id_ = id; return *this; } + ObServiceNameID &operator=(const ObServiceNameID &other) { id_ = other.id_; return *this; } + bool operator==(const ObServiceNameID &other) const { return id_ == other.id_; } + bool operator!=(const ObServiceNameID &other) const { return id_ != other.id_; } + + bool is_valid() const { return is_valid_service_name_id(id_); } + TO_STRING_KV(K_(id)); +private: + uint64_t id_; +}; +class ObServiceNameString final +{ + OB_UNIS_VERSION(1); +public: + ObServiceNameString() : str_() {} + ~ObServiceNameString() {} + int init(const ObString &str); + bool equal_to(const ObServiceNameString &service_name_string) const; + bool is_valid() const { return OB_SUCCESS == check_service_name(str_.str()); } + int assign(const ObServiceNameString &other); + bool is_empty() const { return str_.is_empty(); } + void reset() { return str_.reset(); } + static int check_service_name(const ObString &service_name_str); + const char *ptr() const { return str_.ptr(); } + TO_STRING_KV(K_(str)); +private: + ObFixedLengthString str_; +}; +struct ObServiceNameArg +{ +public: + enum ObServiceOp { + INVALID_SERVICE_OP = 0, + CREATE_SERVICE, + DELETE_SERVICE, + START_SERVICE, + STOP_SERVICE, + MAX_SERVICE_OP + }; + static const char *service_op_to_str(const ObServiceOp &service_op); + ObServiceNameArg() + : op_(INVALID_SERVICE_OP), + target_tenant_id_(OB_INVALID_TENANT_ID), + service_name_str_() {}; + ~ObServiceNameArg() {}; + int init(const ObServiceOp op, const uint64_t target_tenant_id, const ObString &service_name_str); + bool is_valid() const; + static bool is_valid_service_op(ObServiceOp op); + int assign(const ObServiceNameArg &other); + void reset(); + bool is_create_service() const {return CREATE_SERVICE == op_; } + bool is_delete_service() const {return DELETE_SERVICE == op_; } + bool is_start_service() const {return START_SERVICE == op_; } + bool is_stop_service() const {return STOP_SERVICE == op_; } + const share::ObServiceNameString& get_service_name_str() const { return service_name_str_; } + uint64_t get_target_tenant_id() const { return target_tenant_id_; } + const ObServiceOp &get_service_op() const { return op_; } + TO_STRING_KV(K_(op), "service_op_to_str", service_op_to_str(op_), + K_(target_tenant_id), K_(service_name_str)); +private: + ObServiceOp op_; + uint64_t target_tenant_id_; + share::ObServiceNameString service_name_str_; +}; +struct ObServiceName +{ + OB_UNIS_VERSION(1); +public: + enum ObServiceStatus + { + INVALID_SERVICE_STATUS = 0, + STARTED, + STOPPING, + STOPPED, + MAX_SERVICE_STATUS + }; + static const char *service_status_to_str(const ObServiceStatus &service_status); + static ObServiceStatus str_to_service_status(const ObString &service_status_str); + static bool is_valid_service_status(const ObServiceStatus &service_status); + + ObServiceName() + : tenant_id_(OB_INVALID_TENANT_ID), + service_name_id_(), + service_name_str_(), + service_status_(INVALID_SERVICE_STATUS) {} + ~ObServiceName() {} + int init( + const uint64_t tenant_id, + const uint64_t service_name_id, + const ObString &service_name_str, + const ObString &service_status); + bool is_valid() const; + int assign(const ObServiceName &other); + void reset(); + bool is_started() const { return STARTED == service_status_; } + bool is_stopping() const { return STOPPING == service_status_; } + bool is_stopped() const {return STOPPED == service_status_; } + uint64_t get_tenant_id() const { return tenant_id_; } + const ObServiceNameID &get_service_name_id() const { return service_name_id_; } + const ObServiceNameString &get_service_name_str() const { return service_name_str_; } + const ObServiceStatus &get_service_status() const { return service_status_; } + TO_STRING_KV(K_(tenant_id), "service_name_id", service_name_id_.id(), K_(service_name_str), K_(service_status), + "service_status_str", service_status_to_str(service_status_)); +private: + uint64_t tenant_id_; + ObServiceNameID service_name_id_; + ObServiceNameString service_name_str_; + ObServiceStatus service_status_; +}; + +class ObServiceNameProxy +{ +public: + static constexpr int64_t SERVICE_NAME_MAX_NUM = 1; + static int check_is_service_name_enabled(const uint64_t tenant_id); + static int select_all_service_names_with_epoch( + const int64_t tenant_id, + int64_t &epoch, + ObIArray &all_service_names); + static int select_service_name( + common::ObISQLClient &sql_proxy, + const uint64_t tenant_id, + const ObServiceNameString &service_name_str, + ObServiceName &service_name); + static int insert_service_name( + const uint64_t tenant_id, + const ObServiceNameString &service_name_str, + int64_t &epoch, + ObArray &all_service_names); + static int update_service_status( + const ObServiceName &service_name, + const ObServiceName::ObServiceStatus &new_status, + int64_t &epoch, + ObArray &all_service_names); + static int delete_service_name(const ObServiceName &service_name); + static int get_tenant_service_name_num( + common::ObISQLClient &sql_proxy, + const uint64_t tenant_id, + int64_t &service_name_num); + + ObServiceNameProxy() {} + virtual ~ObServiceNameProxy() {} +private: + static int select_all_service_names_( + common::ObISQLClient &sql_proxy, + const int64_t tenant_id, + ObIArray &all_service_names); + static int select_service_name_sql_helper_( + common::ObISQLClient &sql_proxy, + const int64_t tenant_id, + const bool extract_epoch, + ObSqlString &sql, + int64_t &epoch, + ObIArray &all_service_names); + static int trans_start_and_precheck_( + ObMySQLTransaction &trans, + const uint64_t tenant_id, + int64_t &epoch); + static void write_and_end_trans_( + int &ret, + ObMySQLTransaction &trans, + const uint64_t tenant_id, + const ObSqlString &sql, + ObArray &all_service_names); + static int build_service_name_( + const common::sqlclient::ObMySQLResult &res, + ObServiceName &service_name); + static int get_tenant_service_name_num_( + common::ObISQLClient &sql_proxy, + const uint64_t tenant_id, + int64_t &service_name_num); + DISALLOW_COPY_AND_ASSIGN(ObServiceNameProxy); +}; +} // end namespace share +} // end namespace oceanbase +#endif // OCEANBASE_SHARE_OB_SERVICE_NAME_PROXY_H_ \ No newline at end of file diff --git a/src/share/ob_share_util.cpp b/src/share/ob_share_util.cpp index 42e1c387a8..2ba4ba0f23 100644 --- a/src/share/ob_share_util.cpp +++ b/src/share/ob_share_util.cpp @@ -528,6 +528,108 @@ int ObShareUtil::check_compat_version_for_clone_tenant_with_tenant_role( return ret; } +int ObShareUtil::mtl_get_tenant_role(const uint64_t tenant_id, ObTenantRole::Role &tenant_role) +{ + int ret = OB_SUCCESS; + tenant_role = ObTenantRole::INVALID_TENANT; + if (OB_UNLIKELY(!is_valid_tenant_id(tenant_id))) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid tenant_id", KR(ret), K(tenant_id)); + } else if (is_sys_tenant(tenant_id) || is_meta_tenant(tenant_id)) { + tenant_role = ObTenantRole::PRIMARY_TENANT; + } else { + MTL_SWITCH(tenant_id) { + tenant_role = MTL_GET_TENANT_ROLE_CACHE(); + } + } + if (OB_SUCC(ret) && OB_UNLIKELY(is_invalid_tenant(tenant_role))) { + ret = OB_NEED_WAIT; + LOG_WARN("tenant role is not ready, need wait", KR(ret), K(tenant_id), K(tenant_role)); + } + return ret; +} + +int ObShareUtil::mtl_check_if_tenant_role_is_primary(const uint64_t tenant_id, bool &is_primary) +{ + int ret = OB_SUCCESS; + is_primary = false; + ObTenantRole::Role tenant_role; + if (OB_FAIL(mtl_get_tenant_role(tenant_id, tenant_role))) { + LOG_WARN("fail to execute mtl_get_tenant_role", KR(ret), K(tenant_id)); + } else if (is_primary_tenant(tenant_role)) { + is_primary = true; + } + return ret; +} + +int ObShareUtil::mtl_check_if_tenant_role_is_standby(const uint64_t tenant_id, bool &is_standby) +{ + int ret = OB_SUCCESS; + is_standby = false; + ObTenantRole::Role tenant_role; + if (OB_FAIL(mtl_get_tenant_role(tenant_id, tenant_role))) { + LOG_WARN("fail to execute mtl_get_tenant_role", KR(ret), K(tenant_id)); + } else if (is_standby_tenant(tenant_role)) { + is_standby = true; + } + return ret; +} +int ObShareUtil::table_get_tenant_role(const uint64_t tenant_id, ObTenantRole &tenant_role) +{ + int ret = OB_SUCCESS; + tenant_role.reset(); + if (OB_UNLIKELY(!is_valid_tenant_id(tenant_id))) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid tenant_id", KR(ret), K(tenant_id)); + } else if (is_sys_tenant(tenant_id) || is_meta_tenant(tenant_id)) { + tenant_role = ObTenantRole::PRIMARY_TENANT; + } else if (OB_ISNULL(GCTX.sql_proxy_)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("GCTX.sql_proxy_ is null", KR(ret), KP(GCTX.sql_proxy_)); + } else if (OB_FAIL(ObAllTenantInfoProxy::get_tenant_role(GCTX.sql_proxy_, tenant_id, tenant_role))) { + LOG_WARN("fail to get tenant role", KR(ret), KP(GCTX.sql_proxy_), K(tenant_id)); + } else if (tenant_role.is_invalid()) { + ret = OB_NEED_WAIT; + LOG_WARN("tenant role is not ready, need wait", KR(ret), K(tenant_role)); + } + return ret; +} +int ObShareUtil::table_check_if_tenant_role_is_primary(const uint64_t tenant_id, bool &is_primary) +{ + int ret = OB_SUCCESS; + share::ObTenantRole tenant_role; + is_primary = false; + if (OB_FAIL(table_get_tenant_role(tenant_id, tenant_role))) { + LOG_WARN("fail to execute table_get_tenant_role", KR(ret), K(tenant_id)); + } else if (tenant_role.is_primary()) { + is_primary = true; + } + return ret; +} +int ObShareUtil::table_check_if_tenant_role_is_standby(const uint64_t tenant_id, bool &is_standby) +{ + int ret = OB_SUCCESS; + share::ObTenantRole tenant_role; + is_standby = false; + if (OB_FAIL(table_get_tenant_role(tenant_id, tenant_role))) { + LOG_WARN("fail to execute table_get_tenant_role", KR(ret), K(tenant_id)); + } else if (tenant_role.is_standby()) { + is_standby = true; + } + return ret; +} +int ObShareUtil::table_check_if_tenant_role_is_restore(const uint64_t tenant_id, bool &is_restore) +{ + int ret = OB_SUCCESS; + share::ObTenantRole tenant_role; + is_restore = false; + if (OB_FAIL(table_get_tenant_role(tenant_id, tenant_role))) { + LOG_WARN("fail to execute table_get_tenant_role", KR(ret), K(tenant_id)); + } else if (tenant_role.is_restore()) { + is_restore = true; + } + return ret; +} const char *ObShareUtil::replica_type_to_string(const ObReplicaType type) { const char *str = NULL; diff --git a/src/share/ob_share_util.h b/src/share/ob_share_util.h index 9f3ac170c7..893d8fd6a1 100644 --- a/src/share/ob_share_util.h +++ b/src/share/ob_share_util.h @@ -14,6 +14,7 @@ #define OCEANBASE_SHARE_OB_SHARE_UTIL_H_ #include "share/ob_define.h" #include "share/scn.h" +#include "share/ob_tenant_role.h" namespace oceanbase { namespace common @@ -147,6 +148,13 @@ public: SCN &ora_rowscn); static bool is_tenant_enable_rebalance(const uint64_t tenant_id); static bool is_tenant_enable_transfer(const uint64_t tenant_id); + static int mtl_get_tenant_role(const uint64_t tenant_id, ObTenantRole::Role &tenant_role); + static int mtl_check_if_tenant_role_is_primary(const uint64_t tenant_id, bool &is_primary); + static int mtl_check_if_tenant_role_is_standby(const uint64_t tenant_id, bool &is_standby); + static int table_get_tenant_role(const uint64_t tenant_id, ObTenantRole &tenant_role); + static int table_check_if_tenant_role_is_primary(const uint64_t tenant_id, bool &is_primary); + static int table_check_if_tenant_role_is_standby(const uint64_t tenant_id, bool &is_standby); + static int table_check_if_tenant_role_is_restore(const uint64_t tenant_id, bool &is_restore); static const char *replica_type_to_string(const ObReplicaType type); static ObReplicaType string_to_replica_type(const char *str); static ObReplicaType string_to_replica_type(const ObString &str); diff --git a/src/share/ob_srv_rpc_proxy.h b/src/share/ob_srv_rpc_proxy.h index 6880a2c86c..006a98f10d 100644 --- a/src/share/ob_srv_rpc_proxy.h +++ b/src/share/ob_srv_rpc_proxy.h @@ -265,6 +265,7 @@ public: RPC_S(PR5 cancel_gather_stats, OB_CANCEL_GATHER_STATS, (ObCancelGatherStatsArg)); RPC_S(PR5 force_set_tenant_log_disk, OB_LOG_FORCE_SET_TENANT_LOG_DISK, (obrpc::ObForceSetTenantLogDiskArg)); RPC_S(PR5 dump_server_usage, OB_FORCE_DUMP_SERVER_USAGE, (obrpc::ObDumpServerUsageRequest), obrpc::ObDumpServerUsageResult); + RPC_AP(PR5 refresh_service_name, OB_REFRESH_SERVICE_NAME, (obrpc::ObRefreshServiceNameArg), obrpc::ObRefreshServiceNameRes); RPC_AP(PR5 get_tenant_logical_resource, OB_CAL_STANDBY_TENANT_PHY_RESOURCE, (obrpc::ObGetTenantResArg), obrpc::ObTenantLogicalRes); RPC_S(PR5 phy_res_calculate_by_unit, OB_CAL_UNIT_PHY_RESOURCE, (obrpc::Int64), share::ObMinPhyResourceResult); RPC_S(PR5 rpc_reverse_keepalive, OB_RPC_REVERSE_KEEPALIVE, (obrpc::ObRpcReverseKeepaliveArg), obrpc::ObRpcReverseKeepaliveResp); diff --git a/src/share/ob_tenant_info_proxy.cpp b/src/share/ob_tenant_info_proxy.cpp index dce276cc9d..e6eeea6b4d 100755 --- a/src/share/ob_tenant_info_proxy.cpp +++ b/src/share/ob_tenant_info_proxy.cpp @@ -60,9 +60,9 @@ SCN gen_new_replayable_scn(const SCN &cur_replayable_scn, const SCN &desired_rep return MIN(MAX(cur_replayable_scn, desired_replayable_scn), new_sync_scn); } -SCN gen_new_standby_scn(const SCN &cur_standby_scn, const SCN &desired_standby_scn, const SCN &new_replayable_scn) +SCN gen_new_readable_scn(const SCN &cur_readable_scn, const SCN &desired_readable_scn, const SCN &new_replayable_scn) { - return MIN(MAX(cur_standby_scn, desired_standby_scn), new_replayable_scn); + return MIN(MAX(cur_readable_scn, desired_readable_scn), new_replayable_scn); } ////////////ObAllTenantInfo DEFINE_TO_YSON_KV(ObAllTenantInfo, @@ -70,7 +70,7 @@ DEFINE_TO_YSON_KV(ObAllTenantInfo, OB_ID(switchover_epoch), switchover_epoch_, OB_ID(sync_scn), sync_scn_, OB_ID(replayable_scn), replayable_scn_, - OB_ID(standby_scn), standby_scn_, + OB_ID(standby_scn), readable_scn_, OB_ID(recovery_until_scn), recovery_until_scn_, OB_ID(tenant_role), tenant_role_, OB_ID(switchover_status), switchover_status_); @@ -78,16 +78,16 @@ DEFINE_TO_YSON_KV(ObAllTenantInfo, bool ObAllTenantInfo::is_valid() const { return OB_INVALID_TENANT_ID != tenant_id_ - && 0 <= switchover_epoch_ - && sync_scn_.is_valid_and_not_min() - && replayable_scn_.is_valid_and_not_min() - && standby_scn_.is_valid_and_not_min() - && recovery_until_scn_.is_valid_and_not_min() - && tenant_role_.is_valid() - && switchover_status_.is_valid() - && log_mode_.is_valid() - && is_valid_tenant_scn(sync_scn_, replayable_scn_, standby_scn_, recovery_until_scn_) - && restore_data_mode_.is_valid(); + && 0 <= switchover_epoch_ + && sync_scn_.is_valid_and_not_min() + && replayable_scn_.is_valid_and_not_min() + && readable_scn_.is_valid_and_not_min() + && recovery_until_scn_.is_valid_and_not_min() + && tenant_role_.is_valid() + && switchover_status_.is_valid() + && log_mode_.is_valid() + && is_valid_tenant_scn(sync_scn_, replayable_scn_, readable_scn_, recovery_until_scn_) + && restore_data_mode_.is_valid(); } int ObAllTenantInfo::init( @@ -97,7 +97,7 @@ int ObAllTenantInfo::init( int64_t switchover_epoch, const SCN &sync_scn, const SCN &replayable_scn, - const SCN &standby_scn, + const SCN &readable_scn, const SCN &recovery_until_scn, const ObArchiveMode &log_mode, const share::ObLSID &max_ls_id, @@ -110,15 +110,15 @@ int ObAllTenantInfo::init( || 0 > switchover_epoch || !sync_scn.is_valid_and_not_min() || !replayable_scn.is_valid_and_not_min() - || !standby_scn.is_valid_and_not_min() + || !readable_scn.is_valid_and_not_min() || !recovery_until_scn.is_valid_and_not_min() || !log_mode.is_valid() - || !is_valid_tenant_scn(sync_scn, replayable_scn, standby_scn, recovery_until_scn) + || !is_valid_tenant_scn(sync_scn, replayable_scn, readable_scn, recovery_until_scn) || !max_ls_id.is_valid() || !restore_data_mode.is_valid())) { ret = OB_INVALID_ARGUMENT; LOG_WARN("invalid argument", KR(ret), K(tenant_id), K(tenant_role), K(switchover_status), - K(switchover_epoch), K(sync_scn), K(replayable_scn), K(standby_scn), K(recovery_until_scn), + K(switchover_epoch), K(sync_scn), K(replayable_scn), K(readable_scn), K(recovery_until_scn), K(log_mode), K(max_ls_id), K(restore_data_mode)); } else { tenant_id_ = tenant_id; @@ -127,7 +127,7 @@ int ObAllTenantInfo::init( switchover_epoch_ = switchover_epoch; sync_scn_ = sync_scn; replayable_scn_ = replayable_scn; - standby_scn_ = standby_scn; + readable_scn_ = readable_scn; recovery_until_scn_ = recovery_until_scn; log_mode_ = log_mode; max_ls_id_ = max_ls_id; @@ -146,7 +146,7 @@ void ObAllTenantInfo::assign(const ObAllTenantInfo &other) switchover_epoch_ = other.switchover_epoch_; sync_scn_ = other.sync_scn_; replayable_scn_ = other.replayable_scn_; - standby_scn_ = other.standby_scn_; + readable_scn_ = other.readable_scn_; recovery_until_scn_ = other.recovery_until_scn_; log_mode_ = other.log_mode_; max_ls_id_ = other.max_ls_id_; @@ -163,7 +163,7 @@ void ObAllTenantInfo::reset() switchover_epoch_ = OB_INVALID_VERSION; sync_scn_.set_min(); replayable_scn_.set_min(); - standby_scn_.set_min() ; + readable_scn_.set_min() ; recovery_until_scn_.set_min(); log_mode_.reset(); max_ls_id_.reset(); @@ -177,7 +177,7 @@ void ObAllTenantInfo::reset() OB_SERIALIZE_MEMBER(ObAllTenantInfo, tenant_id_, tenant_role_, switchover_status_, switchover_epoch_, sync_scn_, replayable_scn_, - standby_scn_, // FARM COMPAT WHITELIST + readable_scn_, // FARM COMPAT WHITELIST recovery_until_scn_, log_mode_, max_ls_id_, restore_data_mode_); @@ -231,7 +231,7 @@ int ObAllTenantInfoProxy::init_tenant_info( tenant_info.get_switchover_epoch(), tenant_info.get_sync_scn().get_val_for_inner_table_field(), tenant_info.get_replayable_scn().get_val_for_inner_table_field(), - tenant_info.get_standby_scn().get_val_for_inner_table_field(), + tenant_info.get_readable_scn().get_val_for_inner_table_field(), tenant_info.get_recovery_until_scn().get_val_for_inner_table_field(), tenant_info.get_log_mode().to_str(), tenant_info.get_max_ls_id().id()))) { @@ -475,7 +475,7 @@ int ObAllTenantInfoProxy::update_tenant_recovery_status_in_trans( } else { SCN new_sync_scn = gen_new_sync_scn(old_tenant_info.get_sync_scn(), sync_scn, old_tenant_info.get_recovery_until_scn()); SCN new_replayable_scn = gen_new_replayable_scn(old_tenant_info.get_replayable_scn(), replay_scn, new_sync_scn); - SCN new_readable_scn = gen_new_standby_scn(old_tenant_info.get_standby_scn(), readable_scn, new_replayable_scn); + SCN new_readable_scn = gen_new_readable_scn(old_tenant_info.get_readable_scn(), readable_scn, new_replayable_scn); omt::ObTenantConfigGuard tenant_config(TENANT_CONF(tenant_id)); if (OB_UNLIKELY(!tenant_config.is_valid())) { @@ -495,14 +495,14 @@ int ObAllTenantInfoProxy::update_tenant_recovery_status_in_trans( && new_readable_scn_plus_gap.is_valid() && new_replayable_scn > new_readable_scn_plus_gap && new_readable_scn_plus_gap >= old_tenant_info.get_replayable_scn() - && old_tenant_info.get_standby_scn() > SCN::base_scn()) { + && old_tenant_info.get_readable_scn() > SCN::base_scn()) { // condition: !old_tenant_info.get_max_ls_id().is_sys_ls() // If max_ls_id is sys ls, this logic is not needed. // The goal of this logic is to minimize the difference of readable_scn among multiple ls - // condition: old_tenant_info.get_standby_scn() > SCN::base_scn() + // condition: old_tenant_info.get_readable_scn() > SCN::base_scn() // This condition is for restore tenant - // sys ls's readable_scn/standby_scn starts from base_scn + // sys ls's readable_scn starts from base_scn // replayable_scn cannot start from base_scn, it's too slow when we restore tenant // At the beginning time, replayable_scn should be sync_scn new_replayable_scn = new_readable_scn_plus_gap; @@ -511,7 +511,7 @@ int ObAllTenantInfoProxy::update_tenant_recovery_status_in_trans( if (old_tenant_info.get_sync_scn() == new_sync_scn && old_tenant_info.get_replayable_scn() == new_replayable_scn - && old_tenant_info.get_standby_scn() == new_readable_scn) { + && old_tenant_info.get_readable_scn() == new_readable_scn) { LOG_DEBUG("no need update", K(old_tenant_info), K(new_sync_scn), K(new_replayable_scn), K(new_readable_scn)); } else if (OB_FAIL(sql.assign_fmt( "update %s set sync_scn = %ld, replayable_scn = %ld, " @@ -746,7 +746,6 @@ int ObAllTenantInfoProxy::update_tenant_role_in_trans( int64_t cost = ObTimeUtility::current_time() - begin_time; ROOTSERVICE_EVENT_ADD("tenant_info", "update_tenant_role", K(ret), K(tenant_id), K(new_status), K(new_switchover_ts), K(old_status), K(cost)); - return ret; } diff --git a/src/share/ob_tenant_info_proxy.h b/src/share/ob_tenant_info_proxy.h index fa841287e6..d0f60658b2 100755 --- a/src/share/ob_tenant_info_proxy.h +++ b/src/share/ob_tenant_info_proxy.h @@ -44,7 +44,7 @@ namespace share bool is_valid_tenant_scn( const share::SCN &sync_scn, const share::SCN &replayable_scn, - const share::SCN &standby_scn, + const share::SCN &readable_scn, const share::SCN &recovery_until_scn); SCN gen_new_sync_scn(const share::SCN &cur_sync_scn, const share::SCN &desired_sync_scn, const share::SCN &cur_recovery_until_scn); @@ -63,7 +63,7 @@ public: * @param[in] switchover_epoch * @param[in] sync_scn * @param[in] replayable_scn - * @param[in] standby_scn + * @param[in] readable_scn * @param[in] recovery_until_scn * @param[in] log_mode * @param[in] restore_data_mode @@ -74,7 +74,7 @@ public: const int64_t switchover_epoch = 0, const SCN &sync_scn = SCN::base_scn(), const SCN &replayable_scn = SCN::base_scn(), - const SCN &standby_scn = SCN::base_scn(), + const SCN &readable_scn = SCN::base_scn(), const SCN &recovery_until_scn = SCN::base_scn(), const ObArchiveMode &log_mode = NOARCHIVE_MODE, const share::ObLSID &max_ls_id = share::SYS_LS, @@ -116,9 +116,13 @@ IS_TENANT_STATUS(prepare_switching_to_standby) IS_TENANT_STATUS(prepare_flashback_for_switch_to_primary) #undef IS_TENANT_STATUS - TO_STRING_KV(K_(tenant_id), K_(tenant_role), K_(switchover_status), - K_(switchover_epoch), K_(sync_scn), K_(replayable_scn), - K_(standby_scn), K_(recovery_until_scn), K_(log_mode), K_(max_ls_id), K_(restore_data_mode)); + TO_STRING_KV(K_(tenant_id), "tenant_role", tenant_role_.to_str(), + "switchover_status", switchover_status_.to_str(), + K_(switchover_epoch), "sync_scn", sync_scn_.get_val_for_inner_table_field(), + "replayable_scn", replayable_scn_.get_val_for_inner_table_field(), + "readable_scn", readable_scn_.get_val_for_inner_table_field(), + "recovery_until_scn", recovery_until_scn_.get_val_for_inner_table_field(), + "log_mode", log_mode_.to_str(), "max_ls_id", max_ls_id_.id(), K_(restore_data_mode)); DECLARE_TO_YSON_KV; // Getter&Setter @@ -140,7 +144,7 @@ public:\ Property_declare_var(int64_t, switchover_epoch) Property_declare_var(share::SCN, sync_scn) Property_declare_var(share::SCN, replayable_scn) - Property_declare_var(share::SCN, standby_scn) + Property_declare_var(share::SCN, readable_scn) Property_declare_var(share::SCN, recovery_until_scn) Property_declare_var(ObArchiveMode, log_mode) Property_declare_var(share::ObLSID, max_ls_id) diff --git a/src/share/ob_tenant_role.h b/src/share/ob_tenant_role.h index 693cc87fdc..1d148dfd64 100644 --- a/src/share/ob_tenant_role.h +++ b/src/share/ob_tenant_role.h @@ -55,12 +55,13 @@ public: bool operator != (const ObTenantRole &other) const { return value_ != other.value_; } // ObTenantRole attribute interface + bool is_invalid() const { return INVALID_TENANT == value_; } bool is_primary() const { return PRIMARY_TENANT == value_; } bool is_standby() const { return STANDBY_TENANT == value_; } bool is_restore() const { return RESTORE_TENANT == value_; } bool is_clone() const { return CLONE_TENANT == value_; } - TO_STRING_KV(K_(value)); + TO_STRING_KV("tenant_role", to_str(), K_(value)); DECLARE_TO_YSON_KV; private: Role value_; diff --git a/src/share/ob_tenant_switchover_status.h b/src/share/ob_tenant_switchover_status.h index cd6f6620ce..13c0084f8b 100644 --- a/src/share/ob_tenant_switchover_status.h +++ b/src/share/ob_tenant_switchover_status.h @@ -74,7 +74,7 @@ IS_TENANT_STATUS(SWITCHING_TO_STANDBY_STATUS, switching_to_standby) IS_TENANT_STATUS(PREPARE_FLASHBACK_FOR_SWITCH_TO_PRIMARY_STATUS, prepare_flashback_for_switch_to_primary) #undef IS_TENANT_STATUS - TO_STRING_KV(K_(value)); + TO_STRING_KV("switchover_status", to_str(), K_(value)); DECLARE_TO_YSON_KV; private: ObTenantSwitchoverStatus::Status value_; diff --git a/src/share/ob_upgrade_utils.cpp b/src/share/ob_upgrade_utils.cpp index 5daa37d983..9a1ec2d554 100755 --- a/src/share/ob_upgrade_utils.cpp +++ b/src/share/ob_upgrade_utils.cpp @@ -812,10 +812,6 @@ int ObBaseUpgradeProcessor::check_inner_stat() const ret = OB_ERR_UNEXPECTED; LOG_WARN("invalid processor status", KR(ret), K_(data_version), K_(tenant_id), K_(mode)); - } else if (GCTX.is_standby_cluster() && OB_SYS_TENANT_ID != tenant_id_) { - ret = OB_NOT_SUPPORTED; - LOG_WARN("run upgrade job for non-sys tenant in standby cluster is not supported", - KR(ret), K_(tenant_id)); } else if (OB_ISNULL(check_stop_provider_)) { ret = OB_ERR_UNEXPECTED; LOG_WARN("check_stop_provider is null", KR(ret)); @@ -1462,6 +1458,8 @@ int ObUpgradeFor4330Processor::post_upgrade() LOG_WARN("fail to check inner stat", KR(ret)); } else if (OB_FAIL(post_upgrade_for_external_table_flag())) { LOG_WARN("fail to alter log external table flag", KR(ret)); + } else if (OB_FAIL(post_upgrade_for_service_name())) { + LOG_WARN("post upgrade for service name failed", KR(ret)); } else if (OB_FAIL(post_upgrade_for_optimizer_stats())) { LOG_WARN("fail to upgrade optimizer stats", KR(ret)); } @@ -1482,6 +1480,28 @@ int ObUpgradeFor4330Processor::post_upgrade_for_external_table_flag() return ret; } +int ObUpgradeFor4330Processor::post_upgrade_for_service_name() +{ + int ret = OB_SUCCESS; + int64_t affected_rows = 0; + if (OB_ISNULL(sql_proxy_)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("error unexpected", KR(ret), KP(sql_proxy_)); + } else if (!is_meta_tenant(tenant_id_)) { + LOG_INFO("not meta tenant, skip", K(tenant_id_)); + } else { + ObSqlString sql; + uint64_t user_tenant_id = gen_user_tenant_id(tenant_id_); + if (OB_FAIL(sql.assign_fmt("INSERT IGNORE INTO %s (tenant_id, name, value) VALUES (%lu, '%s', 0)", + OB_ALL_SERVICE_EPOCH_TNAME, user_tenant_id, ObServiceEpochProxy::SERVICE_NAME_EPOCH))) { + LOG_WARN("fail to assign sql assign", KR(ret)); + } else if (OB_FAIL(sql_proxy_->write(tenant_id_, sql.ptr(), affected_rows))) { + LOG_WARN("fail to execute sql", KR(ret), K(sql)); + } else {} + } + FLOG_INFO("insert service name epoch", KR(ret), K(tenant_id_), K(affected_rows)); + return ret; +} int ObUpgradeFor4330Processor::post_upgrade_for_optimizer_stats() { int ret = OB_SUCCESS; @@ -1519,6 +1539,5 @@ int ObUpgradeFor4330Processor::post_upgrade_for_optimizer_stats() /* =========== 4330 upgrade processor end ============= */ -/* =========== special upgrade processor end ============= */ } // end share } // end oceanbase diff --git a/src/share/ob_upgrade_utils.h b/src/share/ob_upgrade_utils.h index 2a2fcd52a1..35f6c767c4 100755 --- a/src/share/ob_upgrade_utils.h +++ b/src/share/ob_upgrade_utils.h @@ -270,6 +270,7 @@ public: virtual int post_upgrade() override; private: int post_upgrade_for_external_table_flag(); + int post_upgrade_for_service_name(); int post_upgrade_for_optimizer_stats(); }; /* =========== special upgrade processor end ============= */ diff --git a/src/share/schema/ob_dependency_info.cpp b/src/share/schema/ob_dependency_info.cpp index 7c3f1e529e..10ce4ff2d3 100644 --- a/src/share/schema/ob_dependency_info.cpp +++ b/src/share/schema/ob_dependency_info.cpp @@ -1287,7 +1287,6 @@ int ObReferenceObjTable::fill_rowkey_pairs( int ObReferenceObjTable::batch_execute_insert_or_update_obj_dependency( const uint64_t tenant_id, - const bool is_standby, const int64_t new_schema_version, const ObReferenceObjTable::DependencyObjKeyItemPairs &dep_objs, ObMySQLTransaction &trans, @@ -1299,8 +1298,6 @@ int ObReferenceObjTable::batch_execute_insert_or_update_obj_dependency( if (OB_INVALID_ID == tenant_id) { ret = OB_ERR_UNEXPECTED; LOG_WARN("invalid argument", K(ret), K(tenant_id)); - } else if (is_standby) { - // do nothing } else { ObSqlString sql; ObDMLSqlSplicer dml; @@ -1345,7 +1342,6 @@ int ObReferenceObjTable::batch_execute_insert_or_update_obj_dependency( int ObReferenceObjTable::batch_execute_delete_obj_dependency( const uint64_t tenant_id, - const bool is_standby, const ObReferenceObjTable::DependencyObjKeyItemPairs &dep_objs, ObMySQLTransaction &trans) { @@ -1354,8 +1350,6 @@ int ObReferenceObjTable::batch_execute_delete_obj_dependency( if (OB_INVALID_ID == tenant_id) { ret = OB_ERR_UNEXPECTED; LOG_WARN("invalid argument", K(ret), K(tenant_id)); - } else if (is_standby) { - // do nothing } else { share::ObDMLSqlSplicer dml; ObSqlString sql; @@ -1510,7 +1504,11 @@ int ObReferenceObjTable::process_reference_obj_table(const uint64_t tenant_id, sql::ObMaintainDepInfoTaskQueue &task_queue) { int ret = OB_SUCCESS; - if (!is_inited() || GCTX.is_standby_cluster()) { + share::ObTenantRole::Role tenant_role; + bool is_standby = false; + if (OB_FAIL(ObShareUtil::mtl_check_if_tenant_role_is_standby(tenant_id, is_standby))) { + LOG_WARN("fail to execute mtl_check_if_tenant_role_is_standby", KR(ret), K(tenant_id)); + } else if (OB_UNLIKELY(!is_inited() || is_standby)) { if (OB_INVALID_ID != dep_obj_id) { OZ (task_queue.erase_view_id_from_set(dep_obj_id)); } diff --git a/src/share/schema/ob_dependency_info.h b/src/share/schema/ob_dependency_info.h index 348ac73e38..56bff18a19 100644 --- a/src/share/schema/ob_dependency_info.h +++ b/src/share/schema/ob_dependency_info.h @@ -467,7 +467,6 @@ public: inline const RefObjVersionMap &get_ref_obj_table() const { return ref_obj_version_table_; } static int batch_execute_insert_or_update_obj_dependency( const uint64_t tenant_id, - const bool is_standby, const int64_t new_schema_version, const ObReferenceObjTable::DependencyObjKeyItemPairs &dep_objs, ObMySQLTransaction &trans, @@ -475,7 +474,6 @@ public: rootserver::ObDDLOperator &ddl_operator); static int batch_execute_delete_obj_dependency( const uint64_t tenant_id, - const bool is_standby, const ObReferenceObjTable::DependencyObjKeyItemPairs &dep_objs, ObMySQLTransaction &trans); static int update_max_dependency_version( diff --git a/src/share/schema/ob_multi_version_schema_service.cpp b/src/share/schema/ob_multi_version_schema_service.cpp index ec18ed5867..1e6a387f69 100644 --- a/src/share/schema/ob_multi_version_schema_service.cpp +++ b/src/share/schema/ob_multi_version_schema_service.cpp @@ -968,7 +968,7 @@ int ObMultiVersionSchemaService::get_cluster_schema_guard( // new schema refresh if (OB_FAIL(guard.fast_reset())) { LOG_WARN("fail to reset guard", K(ret)); - } else if (OB_FAIL(guard.init(GCTX.is_standby_cluster()))) { + } else if (OB_FAIL(guard.init())) { LOG_WARN("fail to init guard", K(ret)); } else { ObSEArray tenant_ids; @@ -1036,7 +1036,7 @@ int ObMultiVersionSchemaService::get_cluster_schema_guard( } else { // switchover/failover not clear schema_status, Cannot trust schema_status content unconditionally // bugfix: - if (guard.is_standby_cluster() || (*tenant)->is_restore()) { + if ((*tenant)->is_restore()) { if (OB_FAIL(get_schema_status(schema_status_array, tenant_id, schema_status))) { LOG_WARN("fail to get schema status", K(ret), KPC(*tenant)); } @@ -1093,7 +1093,7 @@ int ObMultiVersionSchemaService::get_tenant_schema_guard( LOG_WARN("invalid tenant_id", K(ret), K(tenant_id)); } else if (OB_FAIL(guard.fast_reset())) { LOG_WARN("fail to reset schema guard", K(ret)); - } else if (OB_FAIL(guard.init(GCTX.is_standby_cluster()))) { + } else if (OB_FAIL(guard.init())) { LOG_WARN("fail to init guard", K(ret)); } sys_schema_status.tenant_id_ = OB_SYS_TENANT_ID; @@ -1150,8 +1150,7 @@ int ObMultiVersionSchemaService::get_tenant_schema_guard( // Avoid circular dependencies } else if (ObSchemaService::g_liboblog_mode_) { tenant_schema_status.tenant_id_ = tenant_id; - } else if (!guard.is_standby_cluster() - && OB_FAIL(check_tenant_is_restore(&guard, tenant_id, guard.restore_tenant_exist_))) { + } else if (OB_FAIL(check_tenant_is_restore(&guard, tenant_id, guard.restore_tenant_exist_))) { LOG_WARN("fail to check restore tenant exist", K(ret), K(tenant_id)); } else if (guard.use_schema_status()) { ObSchemaStatusProxy *schema_status_proxy = GCTX.schema_status_proxy_; @@ -1482,10 +1481,9 @@ int ObMultiVersionSchemaService::retry_get_schema_guard( || is_meta_tenant(tenant_id) || ObSchemaService::g_liboblog_mode_) { // skip - } else if (!schema_guard.is_standby_cluster() - && OB_FAIL(check_tenant_is_restore(&schema_guard, tenant_id, is_restore))) { + } else if (OB_FAIL(check_tenant_is_restore(&schema_guard, tenant_id, is_restore))) { LOG_WARN("fail to check restore tenant exist", K(ret), K(tenant_id)); - } else if (schema_guard.is_standby_cluster() || is_restore) { + } else if (is_restore) { ObSchemaStatusProxy *schema_status_proxy = GCTX.schema_status_proxy_; if (OB_ISNULL(schema_status_proxy)) { ret = OB_ERR_UNEXPECTED; @@ -2392,7 +2390,6 @@ int ObMultiVersionSchemaService::refresh_and_add_schema(const ObIArray FLOG_INFO("[REFRESH_SCHEMA] start to refresh and add schema", K(tenant_ids)); const int64_t start = ObTimeUtility::current_time(); int ret = OB_SUCCESS; - bool is_standby_cluster = GCTX.is_standby_cluster(); if (!check_inner_stat()) { ret = OB_INNER_STAT_ERROR; LOG_WARN("inner stat error", K(ret)); @@ -2410,7 +2407,7 @@ int ObMultiVersionSchemaService::refresh_and_add_schema(const ObIArray LOG_WARN("fail to check restore tenant exist", K(ret), K(tmp_ret), K(tenant_ids)); restore_tenant_exist = true; } - if (is_standby_cluster || restore_tenant_exist) { + if (restore_tenant_exist) { if (OB_ISNULL(schema_status_proxy)) { ret = OB_ERR_UNEXPECTED; LOG_WARN("schema_status_proxy is null", K(ret)); @@ -2723,7 +2720,6 @@ int ObMultiVersionSchemaService::refresh_tenant_schema( const int64_t start = ObTimeUtility::current_time(); int ret = OB_SUCCESS; bool refresh_full_schema = false; - bool is_standby_cluster = GCTX.is_standby_cluster(); bool is_restore = false; if (!check_inner_stat()) { ret = OB_INNER_STAT_ERROR; @@ -2743,7 +2739,7 @@ int ObMultiVersionSchemaService::refresh_tenant_schema( ObISQLClient &sql_client = *sql_proxy_; // read refresh_schema_status from inner table - if ((!is_standby_cluster && !is_restore) + if (!is_restore || is_sys_tenant(tenant_id) || is_meta_tenant(tenant_id)) { // 1. System tenants strengthen the consistency of reading and refresh schema diff --git a/src/share/schema/ob_schema_cache.cpp b/src/share/schema/ob_schema_cache.cpp index 91903752b2..c2f50c7175 100644 --- a/src/share/schema/ob_schema_cache.cpp +++ b/src/share/schema/ob_schema_cache.cpp @@ -933,7 +933,6 @@ int ObSchemaFetcher::fetch_schema(ObSchemaType schema_type, LOG_WARN("inner stat error", K(ret)); } else { do { - observer::ObUseWeakGuard use_weak_guard; if (INT64_MAX == schema_version) { // skip inspection while fetch latest schema } else if (OB_FAIL(schema_service_->can_read_schema_version(schema_status, schema_version))) { diff --git a/src/share/schema/ob_schema_getter_guard.cpp b/src/share/schema/ob_schema_getter_guard.cpp index 3658de6659..fa08fc026d 100644 --- a/src/share/schema/ob_schema_getter_guard.cpp +++ b/src/share/schema/ob_schema_getter_guard.cpp @@ -96,7 +96,6 @@ ObSchemaGetterGuard::ObSchemaGetterGuard() schema_objs_(OB_MALLOC_NORMAL_BLOCK_SIZE, ModulePageAllocator(local_allocator_)), mod_(ObSchemaMgrItem::MOD_STACK), schema_guard_type_(INVALID_SCHEMA_GUARD_TYPE), - is_standby_cluster_(false), restore_tenant_exist_(false), is_inited_(false), pin_cache_size_(0) @@ -112,7 +111,6 @@ ObSchemaGetterGuard::ObSchemaGetterGuard(const ObSchemaMgrItem::Mod mod) schema_objs_(OB_MALLOC_NORMAL_BLOCK_SIZE, ModulePageAllocator(local_allocator_)), mod_(mod), schema_guard_type_(INVALID_SCHEMA_GUARD_TYPE), - is_standby_cluster_(false), restore_tenant_exist_(false), is_inited_(false), pin_cache_size_(0) @@ -128,15 +126,13 @@ ObSchemaGetterGuard::~ObSchemaGetterGuard() } } -int ObSchemaGetterGuard::init( - const bool is_standby_cluster) +int ObSchemaGetterGuard::init() { int ret = OB_SUCCESS; if (is_inited_) { ret = OB_INIT_TWICE; LOG_WARN("init twice", KR(ret)); } else { - is_standby_cluster_ = is_standby_cluster; pin_cache_size_ = 0; is_inited_ = true; } @@ -149,7 +145,6 @@ int ObSchemaGetterGuard::reset() schema_service_ = NULL; schema_objs_.reset(); - is_standby_cluster_ = false; restore_tenant_exist_ = false; if (pin_cache_size_ >= FULL_SCHEMA_MEM_THREHOLD) { FLOG_WARN("hold too much full schema memory", K(tenant_id_), K(pin_cache_size_), K(lbt())); @@ -9890,18 +9885,13 @@ bool ObSchemaGetterGuard::ignore_tenant_not_exist_error( const uint64_t tenant_id) { bool bret = false; - if (is_standby_cluster()) { - // ingore error while standby cluster create tenant. + // ignore error when tenant is in physical restore. + bool is_restore = false; + int tmp_ret = check_tenant_is_restore(tenant_id, is_restore); + if (OB_SUCCESS != tmp_ret) { + LOG_WARN_RET(tmp_ret, "fail to check tenant is restore", K(bret), K(tmp_ret), K(tenant_id)); + } else if (is_restore) { bret = true; - } else { - // ignore error when tenant is in physical restore. - bool is_restore = false; - int tmp_ret = check_tenant_is_restore(tenant_id, is_restore); - if (OB_SUCCESS != tmp_ret) { - LOG_WARN_RET(tmp_ret, "fail to check tenant is restore", K(bret), K(tmp_ret), K(tenant_id)); - } else if (is_restore) { - bret = true; - } } return bret; } diff --git a/src/share/schema/ob_schema_getter_guard.h b/src/share/schema/ob_schema_getter_guard.h index 7ff14e6c46..220bbda01a 100644 --- a/src/share/schema/ob_schema_getter_guard.h +++ b/src/share/schema/ob_schema_getter_guard.h @@ -1046,9 +1046,8 @@ public: SchemaGuardType get_schema_guard_type() const { return schema_guard_type_; } - bool is_standby_cluster() { return is_standby_cluster_; } bool restore_tenant_exist() { return restore_tenant_exist_; } - bool use_schema_status() { return is_standby_cluster() || restore_tenant_exist(); } + bool use_schema_status() { return restore_tenant_exist(); } int check_formal_guard() const; int is_lazy_mode(const uint64_t tenant_id, bool &is_lazy) const; @@ -1179,7 +1178,7 @@ private: const T *&schema, common::ObKVCacheHandle &handle); - int init(const bool is_standby_cluster); + int init(); int fast_reset() { return is_inited_? reset(): common::OB_SUCCESS; } @@ -1232,7 +1231,6 @@ private: ObSchemaMgrItem::Mod mod_; SchemaGuardType schema_guard_type_; - bool is_standby_cluster_; bool restore_tenant_exist_; bool is_inited_; int64_t pin_cache_size_; diff --git a/src/share/schema/ob_schema_struct.cpp b/src/share/schema/ob_schema_struct.cpp index 582d212c8c..32a10fe683 100644 --- a/src/share/schema/ob_schema_struct.cpp +++ b/src/share/schema/ob_schema_struct.cpp @@ -2622,30 +2622,15 @@ int ObDatabaseSchema::get_primary_zone_inherit( ObPrimaryZone &primary_zone) const { int ret = OB_SUCCESS; - bool use_tenant_primary_zone = GCTX.is_standby_cluster() && OB_SYS_TENANT_ID != tenant_id_; primary_zone.reset(); - if (!use_tenant_primary_zone) { - const share::schema::ObSimpleTenantSchema *simple_tenant = nullptr; - if (OB_FAIL(schema_guard.get_tenant_info(tenant_id_, simple_tenant))) { - LOG_WARN("fail to get tenant info", K(ret), K_(tenant_id)); - } else if (OB_UNLIKELY(nullptr == simple_tenant)) { - ret = OB_TENANT_NOT_EXIST; - LOG_WARN("tenant schema ptr is null", K(ret), KPC(simple_tenant)); - } else { - use_tenant_primary_zone = simple_tenant->is_restore(); - } - } - if (OB_FAIL(ret)) { - } else { - const ObTenantSchema *tenant_schema = NULL; - if (OB_FAIL(schema_guard.get_tenant_info(tenant_id_, tenant_schema))) { - LOG_WARN("fail to get tenant schema", K(ret), K(database_id_), K(tenant_id_)); - } else if (OB_UNLIKELY(NULL == tenant_schema)) { - ret = OB_ERR_UNEXPECTED; - LOG_WARN("tenant schema null", K(ret), K(database_id_), K(tenant_id_), KP(tenant_schema)); - } else if (OB_FAIL(tenant_schema->get_primary_zone_inherit(schema_guard, primary_zone))) { - LOG_WARN("fail to get primary zone array", K(ret), K(database_id_), K(tenant_id_)); - } + const ObTenantSchema *tenant_schema = NULL; + if (OB_FAIL(schema_guard.get_tenant_info(tenant_id_, tenant_schema))) { + LOG_WARN("fail to get tenant schema", K(ret), K(database_id_), K(tenant_id_)); + } else if (OB_UNLIKELY(NULL == tenant_schema)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("tenant schema null", K(ret), K(database_id_), K(tenant_id_), KP(tenant_schema)); + } else if (OB_FAIL(tenant_schema->get_primary_zone_inherit(schema_guard, primary_zone))) { + LOG_WARN("fail to get primary zone array", K(ret), K(database_id_), K(tenant_id_)); } return ret; } @@ -4757,30 +4742,15 @@ int ObTablegroupSchema::get_zone_replica_attr_array_inherit( ZoneLocalityIArray &locality) const { int ret = OB_SUCCESS; - bool use_tenant_locality = GCTX.is_standby_cluster() && OB_SYS_TENANT_ID != tenant_id_; locality.reset(); - if (!use_tenant_locality) { - const share::schema::ObSimpleTenantSchema *simple_tenant = nullptr; - if (OB_FAIL(schema_guard.get_tenant_info(tenant_id_, simple_tenant))) { - LOG_WARN("fail to get tenant info", K(ret), K_(tenant_id)); - } else if (OB_UNLIKELY(nullptr == simple_tenant)) { - ret = OB_TENANT_NOT_EXIST; - LOG_WARN("tenant schema ptr is null", K(ret), KPC(simple_tenant)); - } else { - use_tenant_locality = simple_tenant->is_restore(); - } - } - if (OB_FAIL(ret)) { - } else { - const ObTenantSchema *tenant_schema = NULL; - if (OB_FAIL(schema_guard.get_tenant_info(get_tenant_id(), tenant_schema))) { - LOG_WARN("fail to get tenant schema", K(ret), K(tablegroup_id_), K(tenant_id_)); - } else if (OB_UNLIKELY(NULL == tenant_schema)) { - ret = OB_ERR_UNEXPECTED; - LOG_WARN("tenant schema null", K(ret), K(tablegroup_id_), K(tenant_id_), KP(tenant_schema)); - } else if (OB_FAIL(tenant_schema->get_zone_replica_attr_array_inherit(schema_guard, locality))) { - LOG_WARN("fail to get zone replica num array", K(ret), K(tablegroup_id_), K(tenant_id_)); - } + const ObTenantSchema *tenant_schema = NULL; + if (OB_FAIL(schema_guard.get_tenant_info(get_tenant_id(), tenant_schema))) { + LOG_WARN("fail to get tenant schema", K(ret), K(tablegroup_id_), K(tenant_id_)); + } else if (OB_UNLIKELY(NULL == tenant_schema)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("tenant schema null", K(ret), K(tablegroup_id_), K(tenant_id_), KP(tenant_schema)); + } else if (OB_FAIL(tenant_schema->get_zone_replica_attr_array_inherit(schema_guard, locality))) { + LOG_WARN("fail to get zone replica num array", K(ret), K(tablegroup_id_), K(tenant_id_)); } return ret; } @@ -4790,33 +4760,17 @@ int ObTablegroupSchema::get_locality_str_inherit( const common::ObString *&locality_str) const { int ret = OB_SUCCESS; - bool use_tenant_locality = OB_SYS_TENANT_ID != tenant_id_ && GCTX.is_standby_cluster(); locality_str = nullptr; - if (!use_tenant_locality) { - const share::schema::ObSimpleTenantSchema *simple_tenant = nullptr; - if (OB_FAIL(guard.get_tenant_info(tenant_id_, simple_tenant))) { - LOG_WARN("fail to get tenant info", K(ret), K_(tenant_id)); - } else if (OB_UNLIKELY(nullptr == simple_tenant)) { - ret = OB_TENANT_NOT_EXIST; - LOG_WARN("tenant schema ptr is null", K(ret), KPC(simple_tenant)); - } else { - use_tenant_locality = simple_tenant->is_restore(); - } - } - if (OB_FAIL(ret)) { - } else if (use_tenant_locality - || nullptr == locality_str - || locality_str->empty()) { - const ObSimpleTenantSchema *tenant_schema = nullptr; - if (OB_FAIL(guard.get_tenant_info(get_tenant_id(), tenant_schema))) { - LOG_WARN("fail to get tenant schema", K(ret), "tenant_id", get_tenant_id()); - } else if (OB_UNLIKELY(nullptr == tenant_schema)) { - ret = OB_ERR_UNEXPECTED; - LOG_WARN("fail to get tenant schema", K(ret), "tenant_id", get_tenant_id()); - } else { - locality_str = &tenant_schema->get_locality_str(); - } + const ObSimpleTenantSchema *tenant_schema = nullptr; + if (OB_FAIL(guard.get_tenant_info(get_tenant_id(), tenant_schema))) { + LOG_WARN("fail to get tenant schema", K(ret), "tenant_id", get_tenant_id()); + } else if (OB_UNLIKELY(nullptr == tenant_schema)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("fail to get tenant schema", K(ret), "tenant_id", get_tenant_id()); + } else { + locality_str = &tenant_schema->get_locality_str(); } + if (OB_SUCC(ret)) { if (OB_UNLIKELY(nullptr == locality_str || locality_str->empty())) { ret = OB_ERR_UNEXPECTED; @@ -4831,30 +4785,15 @@ int ObTablegroupSchema::get_primary_zone_inherit( ObPrimaryZone &primary_zone) const { int ret = OB_SUCCESS; - bool use_tenant_primary_zone = GCTX.is_standby_cluster() && OB_SYS_TENANT_ID != tenant_id_; primary_zone.reset(); - if (!use_tenant_primary_zone) { - const share::schema::ObSimpleTenantSchema *simple_tenant = nullptr; - if (OB_FAIL(schema_guard.get_tenant_info(tenant_id_, simple_tenant))) { - LOG_WARN("fail to get tenant info", K(ret), K_(tenant_id)); - } else if (OB_UNLIKELY(nullptr == simple_tenant)) { - ret = OB_TENANT_NOT_EXIST; - LOG_WARN("tenant schema ptr is null", K(ret), KPC(simple_tenant)); - } else { - use_tenant_primary_zone = simple_tenant->is_restore(); - } - } - if (OB_FAIL(ret)) { - } else { - const ObTenantSchema *tenant_schema = NULL; - if (OB_FAIL(schema_guard.get_tenant_info(get_tenant_id(), tenant_schema))) { - LOG_WARN("fail to get tenant schema", K(ret), K(tablegroup_id_), K(tenant_id_)); - } else if (OB_UNLIKELY(NULL == tenant_schema)) { - ret = OB_ERR_UNEXPECTED; - LOG_WARN("tenant schema null", K(ret), K(tablegroup_id_), K(tenant_id_), KP(tenant_schema)); - } else if (OB_FAIL(tenant_schema->get_primary_zone_inherit(schema_guard, primary_zone))) { - LOG_WARN("fail to get primary zone array", K(ret), K(tablegroup_id_), K(tenant_id_)); - } + const ObTenantSchema *tenant_schema = NULL; + if (OB_FAIL(schema_guard.get_tenant_info(get_tenant_id(), tenant_schema))) { + LOG_WARN("fail to get tenant schema", K(ret), K(tablegroup_id_), K(tenant_id_)); + } else if (OB_UNLIKELY(NULL == tenant_schema)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("tenant schema null", K(ret), K(tablegroup_id_), K(tenant_id_), KP(tenant_schema)); + } else if (OB_FAIL(tenant_schema->get_primary_zone_inherit(schema_guard, primary_zone))) { + LOG_WARN("fail to get primary zone array", K(ret), K(tablegroup_id_), K(tenant_id_)); } return ret; } diff --git a/src/share/schema/ob_server_schema_service.cpp b/src/share/schema/ob_server_schema_service.cpp index 72ad8ce87b..7d70c52560 100644 --- a/src/share/schema/ob_server_schema_service.cpp +++ b/src/share/schema/ob_server_schema_service.cpp @@ -4328,14 +4328,8 @@ int ObServerSchemaService::apply_##SCHEMA##_schema_to_cache( \ } else { \ FOREACH_CNT_X(schema_key, schema_keys, OB_SUCC(ret)) { \ if (OB_FAIL(mgr.del_##SCHEMA(schema_key->get_##SCHEMA##_key()))) { \ - if (GCTX.is_standby_cluster() \ - && OB_SYS_TENANT_ID == tenant_id \ - && OB_ENTRY_NOT_EXIST == ret) { \ - ret = OB_SUCCESS; \ - } else { \ - LOG_WARN("del "#SCHEMA" failed", K(ret), \ - #SCHEMA"_key", schema_key->get_##SCHEMA##_key()); \ - } \ + LOG_WARN("del "#SCHEMA" failed", K(ret), \ + #SCHEMA"_key", schema_key->get_##SCHEMA##_key()); \ } \ } \ ALLOW_NEXT_LOG(); \ diff --git a/src/share/schema/ob_table_schema.cpp b/src/share/schema/ob_table_schema.cpp index e43315e715..cbb8eb68c0 100644 --- a/src/share/schema/ob_table_schema.cpp +++ b/src/share/schema/ob_table_schema.cpp @@ -417,33 +417,17 @@ int ObSimpleTableSchemaV2::get_zone_replica_attr_array_inherit( { int ret = OB_SUCCESS; const uint64_t tenant_id = get_tenant_id(); - bool use_tenant_locality = !is_sys_tenant(tenant_id) && GCTX.is_standby_cluster(); locality.reuse(); - if (!has_partition()) { - // No partition, no concept of locality - } else if (!use_tenant_locality) { - const share::schema::ObSimpleTenantSchema *simple_tenant = nullptr; - if (OB_FAIL(schema_guard.get_tenant_info(tenant_id, simple_tenant))) { - LOG_WARN("fail to get tenant info", K(ret), K(tenant_id)); - } else if (OB_UNLIKELY(nullptr == simple_tenant)) { - ret = OB_TENANT_NOT_EXIST; - LOG_WARN("tenant schema ptr is null", K(ret), KPC(simple_tenant)); - } else { - use_tenant_locality = simple_tenant->is_restore(); - } - } - if (OB_FAIL(ret)) { - } else { - // Locality is not set when creating table, take tenant's fill - const ObTenantSchema *tenant_schema = NULL; - if (OB_FAIL(schema_guard.get_tenant_info(tenant_id, tenant_schema))) { - LOG_WARN("fail to get tenant schema", K(ret), K(table_id_), K(tenant_id)); - } else if (OB_UNLIKELY(NULL == tenant_schema)) { - ret = OB_ERR_UNEXPECTED; - LOG_WARN("tenant schema null", K(ret), K(table_id_), K(tenant_id), KP(tenant_schema)); - } else if (OB_FAIL(tenant_schema->get_zone_replica_attr_array_inherit(schema_guard, locality))) { - LOG_WARN("fail to get zone replica num array", K(ret), K(table_id_), K(tenant_id)); - } + + // Locality is not set when creating table, take tenant's fill + const ObTenantSchema *tenant_schema = NULL; + if (OB_FAIL(schema_guard.get_tenant_info(tenant_id, tenant_schema))) { + LOG_WARN("fail to get tenant schema", K(ret), K(table_id_), K(tenant_id)); + } else if (OB_UNLIKELY(NULL == tenant_schema)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("tenant schema null", K(ret), K(table_id_), K(tenant_id), KP(tenant_schema)); + } else if (OB_FAIL(tenant_schema->get_zone_replica_attr_array_inherit(schema_guard, locality))) { + LOG_WARN("fail to get zone replica num array", K(ret), K(table_id_), K(tenant_id)); } return ret; } @@ -454,32 +438,15 @@ int ObSimpleTableSchemaV2::get_primary_zone_inherit( { int ret = OB_SUCCESS; const uint64_t tenant_id = get_tenant_id(); - const uint64_t tablegroup_id = get_tablegroup_id(); - const uint64_t database_id = get_database_id(); - bool use_tenant_primary_zone = !is_sys_tenant(tenant_id) && GCTX.is_standby_cluster(); primary_zone.reset(); - if (!use_tenant_primary_zone) { - const share::schema::ObSimpleTenantSchema *simple_tenant = nullptr; - if (OB_FAIL(schema_guard.get_tenant_info(tenant_id, simple_tenant))) { - LOG_WARN("fail to get tenant info", K(ret), K(tenant_id)); - } else if (OB_UNLIKELY(nullptr == simple_tenant)) { - ret = OB_TENANT_NOT_EXIST; - LOG_WARN("tenant schema ptr is null", K(ret), KPC(simple_tenant)); - } else { - use_tenant_primary_zone = simple_tenant->is_restore(); - } - } - if (OB_FAIL(ret)) { - } else if (use_tenant_primary_zone) { - const ObTenantSchema *tenant_schema = NULL; - if (OB_FAIL(schema_guard.get_tenant_info(tenant_id, tenant_schema))) { - LOG_WARN("fail to get tenant schema", K(ret), K(database_id), K(tenant_id)); - } else if (OB_UNLIKELY(NULL == tenant_schema)) { - ret = OB_ERR_UNEXPECTED; - LOG_WARN("tenant schema null", K(ret), K(database_id), K(tenant_id), KP(tenant_schema)); - } else if (OB_FAIL(tenant_schema->get_primary_zone_inherit(schema_guard, primary_zone))) { - LOG_WARN("fail to get primary zone array", K(ret), K(database_id), K(tenant_id)); - } + const ObTenantSchema *tenant_schema = NULL; + if (OB_FAIL(schema_guard.get_tenant_info(tenant_id, tenant_schema))) { + LOG_WARN("fail to get tenant schema", K(ret), K(tenant_id)); + } else if (OB_UNLIKELY(NULL == tenant_schema)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("tenant schema null", K(ret), K(tenant_id), KP(tenant_schema)); + } else if (OB_FAIL(tenant_schema->get_primary_zone_inherit(schema_guard, primary_zone))) { + LOG_WARN("fail to get primary zone array", K(ret), K(tenant_id)); } return ret; } @@ -1035,24 +1002,8 @@ int ObSimpleTableSchemaV2::get_locality_str_inherit( { int ret = OB_SUCCESS; const uint64_t tenant_id = get_tenant_id(); - bool use_tenant_locality = !is_sys_tenant(tenant_id) && GCTX.is_standby_cluster(); locality_str = NULL; - if (OB_INVALID_ID == get_table_id()) { - ret = OB_INVALID_ARGUMENT; - LOG_WARN("invalid table_id", K(ret), K_(table_id)); - } else if (!use_tenant_locality) { - const share::schema::ObSimpleTenantSchema *simple_tenant = nullptr; - if (OB_FAIL(guard.get_tenant_info(tenant_id, simple_tenant))) { - LOG_WARN("fail to get tenant info", K(ret), K(tenant_id)); - } else if (OB_UNLIKELY(nullptr == simple_tenant)) { - ret = OB_TENANT_NOT_EXIST; - LOG_WARN("tenant schema ptr is null", K(ret), KPC(simple_tenant)); - } else { - use_tenant_locality = simple_tenant->is_restore(); - } - } if (OB_FAIL(ret)) { - } else if (!OB_ISNULL(locality_str) && !locality_str->empty()) { } else { const ObSimpleTenantSchema *tenant = NULL; if (OB_FAIL(guard.get_tenant_info(get_tenant_id(), tenant))) { diff --git a/src/share/sequence/ob_sequence_dml_proxy.cpp b/src/share/sequence/ob_sequence_dml_proxy.cpp index a0fe3f33b8..76d21af9eb 100644 --- a/src/share/sequence/ob_sequence_dml_proxy.cpp +++ b/src/share/sequence/ob_sequence_dml_proxy.cpp @@ -286,9 +286,6 @@ int ObSequenceDMLProxy::next_batch( "WHERE SEQUENCE_ID = %lu", tname, next_value.format(), sequence_id))) { LOG_WARN("format update sql fail", K(ret)); - } else if (GCTX.is_standby_cluster() && OB_SYS_TENANT_ID != tenant_id) { - ret = OB_OP_NOT_ALLOW; - LOG_WARN("can't write sys table now", K(ret), K(tenant_id)); } else if (OB_FAIL(trans.write(tenant_id, sql.ptr(), affected_rows))) { LOG_WARN("fail to execute sql", K(sql), K(ret)); } else { diff --git a/src/share/stat/ob_dbms_stats_utils.cpp b/src/share/stat/ob_dbms_stats_utils.cpp index 38003395fb..25763824c5 100644 --- a/src/share/stat/ob_dbms_stats_utils.cpp +++ b/src/share/stat/ob_dbms_stats_utils.cpp @@ -149,19 +149,15 @@ int ObDbmsStatsUtils::cast_number_to_double(const number::ObNumber &src_val, dou return ret; } -// gather statistic related inner table should not read or write during tenant restore or on -// standby cluster. +// gather statistic related inner table should not read or write restore or standby tenant int ObDbmsStatsUtils::check_table_read_write_valid(const uint64_t tenant_id, bool &is_valid) { int ret = OB_SUCCESS; is_valid = true; - bool in_restore = false; - if (OB_ISNULL(GCTX.schema_service_)) { - ret = OB_ERR_UNEXPECTED; - LOG_WARN("get unexpected null", K(ret)); - } else if (OB_FAIL(GCTX.schema_service_->check_tenant_is_restore(NULL, tenant_id, in_restore))) { - LOG_WARN("failed to check tenant is restore", K(ret)); - } else if (OB_UNLIKELY(in_restore) || GCTX.is_standby_cluster()) { + bool is_primary = true; + if (OB_FAIL(ObShareUtil::mtl_check_if_tenant_role_is_primary(tenant_id, is_primary))) { + LOG_WARN("fail to execute mtl_check_if_tenant_role_is_primary", KR(ret), K(tenant_id)); + } else if (OB_UNLIKELY(!is_primary)) { is_valid = false; } return ret; diff --git a/src/share/stat/ob_opt_stat_monitor_manager.cpp b/src/share/stat/ob_opt_stat_monitor_manager.cpp index 466aafd356..02f5b53397 100644 --- a/src/share/stat/ob_opt_stat_monitor_manager.cpp +++ b/src/share/stat/ob_opt_stat_monitor_manager.cpp @@ -103,15 +103,12 @@ void ObOptStatMonitorFlushAllTask::runTimerTask() int ret = OB_SUCCESS; if (OB_NOT_NULL(optstat_monitor_mgr_) && optstat_monitor_mgr_->inited_) { LOG_INFO("run opt stat monitor flush all task", K(optstat_monitor_mgr_->tenant_id_)); - share::schema::ObMultiVersionSchemaService &schema_service = share::schema::ObMultiVersionSchemaService::get_instance(); - share::schema::ObSchemaGetterGuard schema_guard; - bool in_restore = false; + uint64_t tenant_id = optstat_monitor_mgr_->tenant_id_; + bool is_primary = true; THIS_WORKER.set_timeout_ts(FLUSH_INTERVAL / 2 + ObTimeUtility::current_time()); - if (OB_FAIL(schema_service.get_tenant_schema_guard(optstat_monitor_mgr_->tenant_id_, schema_guard))) { - LOG_WARN("failed to get schema guard", K(ret)); - } else if (OB_FAIL(schema_guard.check_tenant_is_restore(optstat_monitor_mgr_->tenant_id_, in_restore))) { - LOG_WARN("failed to check tenant is restore", K(ret)); - } else if (in_restore || GCTX.is_standby_cluster()) { + if (OB_FAIL(ObShareUtil::mtl_check_if_tenant_role_is_primary(tenant_id, is_primary))) { + LOG_WARN("fail to execute mtl_check_if_tenant_role_is_primary", KR(ret), K(tenant_id)); + } else if (!is_primary) { // do nothing } else if (OB_FAIL(optstat_monitor_mgr_->update_column_usage_info(false))) { LOG_WARN("failed to update column usage info", K(ret)); @@ -126,15 +123,12 @@ void ObOptStatMonitorCheckTask::runTimerTask() int ret = OB_SUCCESS; if (OB_NOT_NULL(optstat_monitor_mgr_) && optstat_monitor_mgr_->inited_) { LOG_INFO("run opt stat monitor check task", K(optstat_monitor_mgr_->tenant_id_)); - share::schema::ObMultiVersionSchemaService &schema_service = share::schema::ObMultiVersionSchemaService::get_instance(); - share::schema::ObSchemaGetterGuard schema_guard; - bool in_restore = false; + uint64_t tenant_id = optstat_monitor_mgr_->tenant_id_; + bool is_primary = true; THIS_WORKER.set_timeout_ts(CHECK_INTERVAL + ObTimeUtility::current_time()); - if (OB_FAIL(schema_service.get_tenant_schema_guard(optstat_monitor_mgr_->tenant_id_, schema_guard))) { - LOG_WARN("failed to get schema guard", K(ret)); - } else if (OB_FAIL(schema_guard.check_tenant_is_restore(optstat_monitor_mgr_->tenant_id_, in_restore))) { - LOG_WARN("failed to check tenant is restore", K(ret)); - } else if (in_restore || GCTX.is_standby_cluster()) { + if (OB_FAIL(ObShareUtil::mtl_check_if_tenant_role_is_primary(tenant_id, is_primary))) { + LOG_WARN("fail to execute mtl_check_if_tenant_role_is_primary", KR(ret), K(tenant_id)); + } else if (!is_primary) { // do nothing } else if (OB_FAIL(optstat_monitor_mgr_->update_column_usage_info(true))) { LOG_WARN("failed to update column usage info", K(ret)); @@ -236,35 +230,31 @@ int ObOptStatMonitorManager::flush_database_monitoring_info(sql::ObExecContext & int ObOptStatMonitorManager::update_local_cache(common::ObIArray &args) { int ret = OB_SUCCESS; - if (GCTX.is_standby_cluster()) { - // standby cluster can't write __all_column_usage, so do not need to update local update - } else { - SpinRLockGuard guard(lock_); - for (int64_t i = 0; OB_SUCC(ret) && i < args.count(); ++i) { - ColumnUsageArg &arg = args.at(i); - StatKey col_key(arg.table_id_, arg.column_id_); - int64_t flags = 0; - if (OB_FAIL(column_usage_map_.get_refactored(col_key, flags))) { - if (OB_LIKELY(ret == OB_HASH_NOT_EXIST)) { - if (OB_FAIL(column_usage_map_.set_refactored(col_key, arg.flags_))) { - // other thread set the refactor, try update again - if (OB_FAIL(column_usage_map_.get_refactored(col_key, flags))) { - LOG_WARN("failed to get refactored", K(ret)); - } else if ((~flags) & arg.flags_) { - UpdateValueAtomicOp atomic_op(arg.flags_); - if (OB_FAIL(column_usage_map_.atomic_refactored(col_key, atomic_op))) { - LOG_WARN("failed to atomic refactored", K(ret)); - } + SpinRLockGuard guard(lock_); + for (int64_t i = 0; OB_SUCC(ret) && i < args.count(); ++i) { + ColumnUsageArg &arg = args.at(i); + StatKey col_key(arg.table_id_, arg.column_id_); + int64_t flags = 0; + if (OB_FAIL(column_usage_map_.get_refactored(col_key, flags))) { + if (OB_LIKELY(ret == OB_HASH_NOT_EXIST)) { + if (OB_FAIL(column_usage_map_.set_refactored(col_key, arg.flags_))) { + // other thread set the refactor, try update again + if (OB_FAIL(column_usage_map_.get_refactored(col_key, flags))) { + LOG_WARN("failed to get refactored", K(ret)); + } else if ((~flags) & arg.flags_) { + UpdateValueAtomicOp atomic_op(arg.flags_); + if (OB_FAIL(column_usage_map_.atomic_refactored(col_key, atomic_op))) { + LOG_WARN("failed to atomic refactored", K(ret)); } } - } else { - LOG_WARN("failed to get refactored", K(ret)); - } - } else if ((~flags) & arg.flags_) { - UpdateValueAtomicOp atomic_op(arg.flags_); - if (OB_FAIL(column_usage_map_.atomic_refactored(col_key, atomic_op))) { - LOG_WARN("failed to atomic refactored", K(ret)); } + } else { + LOG_WARN("failed to get refactored", K(ret)); + } + } else if ((~flags) & arg.flags_) { + UpdateValueAtomicOp atomic_op(arg.flags_); + if (OB_FAIL(column_usage_map_.atomic_refactored(col_key, atomic_op))) { + LOG_WARN("failed to atomic refactored", K(ret)); } } } @@ -274,34 +264,30 @@ int ObOptStatMonitorManager::update_local_cache(common::ObIArray int ObOptStatMonitorManager::update_local_cache(ObOptDmlStat &dml_stat) { int ret = OB_SUCCESS; - if (GCTX.is_standby_cluster()) { - // standby cluster can't write __all_monitor_modified, so do not need to update local update - } else { - SpinRLockGuard guard(lock_); - StatKey key(dml_stat.table_id_, dml_stat.tablet_id_); - ObOptDmlStat tmp_dml_stat; - if (OB_FAIL(dml_stat_map_.get_refactored(key, tmp_dml_stat))) { - if (OB_LIKELY(ret == OB_HASH_NOT_EXIST)) { - if (OB_FAIL(dml_stat_map_.set_refactored(key, dml_stat))) { - // other thread set the refactor, try update again - if (OB_FAIL(dml_stat_map_.get_refactored(key, tmp_dml_stat))) { - LOG_WARN("failed to get refactored", K(ret)); - } else { - UpdateValueAtomicOp atomic_op(dml_stat); - if (OB_FAIL(dml_stat_map_.atomic_refactored(key, atomic_op))) { - LOG_WARN("failed to atomic refactored", K(ret)); - } + SpinRLockGuard guard(lock_); + StatKey key(dml_stat.table_id_, dml_stat.tablet_id_); + ObOptDmlStat tmp_dml_stat; + if (OB_FAIL(dml_stat_map_.get_refactored(key, tmp_dml_stat))) { + if (OB_LIKELY(ret == OB_HASH_NOT_EXIST)) { + if (OB_FAIL(dml_stat_map_.set_refactored(key, dml_stat))) { + // other thread set the refactor, try update again + if (OB_FAIL(dml_stat_map_.get_refactored(key, tmp_dml_stat))) { + LOG_WARN("failed to get refactored", K(ret)); + } else { + UpdateValueAtomicOp atomic_op(dml_stat); + if (OB_FAIL(dml_stat_map_.atomic_refactored(key, atomic_op))) { + LOG_WARN("failed to atomic refactored", K(ret)); } } - } else { - LOG_WARN("failed to get refactored", K(ret)); } } else { - UpdateValueAtomicOp atomic_op(dml_stat); - if (OB_FAIL(dml_stat_map_.atomic_refactored(key, atomic_op))) { - LOG_WARN("failed to atomic refactored", K(ret)); - } else {/*do nothing*/} + LOG_WARN("failed to get refactored", K(ret)); } + } else { + UpdateValueAtomicOp atomic_op(dml_stat); + if (OB_FAIL(dml_stat_map_.atomic_refactored(key, atomic_op))) { + LOG_WARN("failed to atomic refactored", K(ret)); + } else {/*do nothing*/} } return ret; } @@ -578,13 +564,10 @@ int ObOptStatMonitorManager::check_table_writeable(bool &is_writeable) { int ret = OB_SUCCESS; is_writeable = true; - bool in_restore = false; - if (OB_ISNULL(GCTX.schema_service_)) { - ret = OB_ERR_UNEXPECTED; - LOG_WARN("get unexpected null", K(ret)); - } else if (OB_FAIL(GCTX.schema_service_->check_tenant_is_restore(NULL, tenant_id_, in_restore))) { - LOG_WARN("failed to check tenant is restore", K(ret)); - } else if (OB_UNLIKELY(in_restore) || GCTX.is_standby_cluster()) { + bool is_primary = true; + if (OB_FAIL(ObShareUtil::mtl_check_if_tenant_role_is_primary(tenant_id_, is_primary))) { + LOG_WARN("fail to execute mtl_check_if_tenant_role_is_primary", KR(ret), K(tenant_id_)); + } else if (OB_UNLIKELY(!is_primary)) { is_writeable = false; } return ret; diff --git a/src/share/table/ob_ttl_util.cpp b/src/share/table/ob_ttl_util.cpp index 73725d32c6..68bdbe888a 100644 --- a/src/share/table/ob_ttl_util.cpp +++ b/src/share/table/ob_ttl_util.cpp @@ -18,6 +18,7 @@ #include "share/ob_server_status.h" #include "share/schema/ob_schema_utils.h" #include "rootserver/ob_root_service.h" +#include "rootserver/ob_tenant_info_loader.h" #include "observer/omt/ob_tenant_timezone_mgr.h" #include "share/schema/ob_multi_version_schema_service.h" #include "lib/stat/ob_diagnose_info.h" @@ -546,7 +547,11 @@ bool ObTTLUtil::check_can_do_work() { int ret = OB_SUCCESS; int64_t tenant_id = MTL_ID(); uint64_t tenant_data_version = 0;; - if (GCTX.is_standby_cluster()) { + bool is_primary = true; + if (OB_FAIL(ObShareUtil::mtl_check_if_tenant_role_is_primary(tenant_id, is_primary))) { + bret = false; + LOG_WARN("fail to execute mtl_check_if_tenant_role_is_primary", KR(ret), K(tenant_id)); + } else if (!is_primary) { bret = false; } else if (OB_FAIL(GET_MIN_DATA_VERSION(tenant_id, tenant_data_version))) { bret = false; diff --git a/src/sql/engine/cmd/ob_alter_system_executor.cpp b/src/sql/engine/cmd/ob_alter_system_executor.cpp index 0f27eea70c..2e7088f74b 100644 --- a/src/sql/engine/cmd/ob_alter_system_executor.cpp +++ b/src/sql/engine/cmd/ob_alter_system_executor.cpp @@ -41,7 +41,7 @@ #include "share/scheduler/ob_dag_warning_history_mgr.h" #include "observer/omt/ob_tenant.h" //ObTenant #include "rootserver/freeze/ob_major_freeze_helper.h" //ObMajorFreezeHelper -#include "share/ob_primary_standby_service.h" // ObPrimaryStandbyService +#include "rootserver/standby/ob_standby_service.h" // ObStandbyService #include "rpc/obmysql/ob_sql_sock_session.h" #include "sql/plan_cache/ob_plan_cache.h" #include "pl/pl_cache/ob_pl_cache_mgr.h" @@ -50,6 +50,8 @@ #include "share/table/ob_ttl_util.h" #include "rootserver/restore/ob_tenant_clone_util.h" +#include "rootserver/ob_service_name_command.h" +#include "rootserver/ob_tenant_event_def.h" namespace oceanbase { using namespace common; @@ -57,7 +59,7 @@ using namespace obrpc; using namespace share; using namespace omt; using namespace obmysql; - +using namespace tenant_event; namespace sql { int ObFreezeExecutor::execute(ObExecContext &ctx, ObFreezeStmt &stmt) @@ -2146,6 +2148,7 @@ int ObSwitchTenantExecutor::execute(ObExecContext &ctx, ObSwitchTenantStmt &stmt } else { ObSwitchTenantArg &arg = stmt.get_arg(); arg.set_stmt_str(first_stmt); + ObSQLSessionInfo *session_info = ctx.get_my_session(); //left 200ms to return result const int64_t remain_timeout_interval_us = THIS_WORKER.get_timeout_remain(); @@ -2157,9 +2160,15 @@ int ObSwitchTenantExecutor::execute(ObExecContext &ctx, ObSwitchTenantStmt &stmt // TODO support specify ALL if (OB_FAIL(ret)) { - } else if (arg.get_is_verify()) { - //do nothing - } else if (OB_FAIL(OB_PRIMARY_STANDBY_SERVICE.switch_tenant(arg))) { + } else if (OB_ISNULL(session_info)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("session_info is null", KR(ret), KP(session_info)); + } else if (OB_UNLIKELY(!session_info->get_service_name().is_empty())) { + ret = OB_OP_NOT_ALLOW; + LOG_WARN("switching tenant role cannot be executed in the session which is created via service_name", + KR(ret), K(session_info->get_service_name())); + LOG_USER_ERROR(OB_OP_NOT_ALLOW, "This session is created via service_name, switching tenant is"); + } else if (OB_FAIL(OB_STANDBY_SERVICE.switch_tenant(arg))) { LOG_WARN("failed to switch_tenant", KR(ret), K(arg)); } @@ -2183,7 +2192,7 @@ int ObRecoverTenantExecutor::execute(ObExecContext &ctx, ObRecoverTenantStmt &st // TODO support specify ALL and tenant list if (OB_FAIL(ret)) { - } else if (OB_FAIL(OB_PRIMARY_STANDBY_SERVICE.recover_tenant(arg))) { + } else if (OB_FAIL(OB_STANDBY_SERVICE.recover_tenant(arg))) { LOG_WARN("failed to recover_tenant", KR(ret), K(arg)); } } @@ -2871,5 +2880,49 @@ int ObTransferPartitionExecutor::execute(ObExecContext& ctx, ObTransferPartition } return ret; } +int ObServiceNameExecutor::execute(ObExecContext& ctx, ObServiceNameStmt& stmt) +{ + int ret = OB_SUCCESS; + const ObServiceNameArg &arg = stmt.get_arg(); + const ObServiceNameString &service_name_str = arg.get_service_name_str(); + const ObServiceNameArg::ObServiceOp &service_op = arg.get_service_op(); + const uint64_t tenant_id = arg.get_target_tenant_id(); + ObSQLSessionInfo *session_info = ctx.get_my_session(); + if (OB_UNLIKELY(!arg.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid arg", KR(ret), K(arg)); + } else if (OB_FAIL(ObServiceNameProxy::check_is_service_name_enabled(tenant_id))) { + LOG_WARN("fail to execute check_is_service_name_enabled", KR(ret), K(tenant_id)); + LOG_USER_ERROR(OB_NOT_SUPPORTED, "The tenant's or meta tenant's data_version is smaller than 4_2_4_0, service name related command is"); + } else if (OB_ISNULL(session_info)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("session_info is null", KR(ret), KP(session_info)); + } else if (OB_UNLIKELY(!session_info->get_service_name().is_empty())) { + ret = OB_OP_NOT_ALLOW; + LOG_WARN("service_name related commands cannot be executed in the session which is created via service_name", + KR(ret), K(session_info->get_service_name())); + LOG_USER_ERROR(OB_OP_NOT_ALLOW, "This session is created via service_name, service name related command is"); + } else if (arg.is_create_service()) { + if (OB_FAIL(ObServiceNameCommand::create_service(tenant_id, service_name_str))) { + LOG_WARN("fail to create service", KR(ret), K(tenant_id), K(service_name_str)); + } + } else if (arg.is_delete_service()) { + if (OB_FAIL(ObServiceNameCommand::delete_service(tenant_id, service_name_str))) { + LOG_WARN("fail to delete service", KR(ret), K(tenant_id), K(service_name_str)); + } + } else if (arg.is_start_service()) { + if (OB_FAIL(ObServiceNameCommand::start_service(tenant_id, service_name_str))) { + LOG_WARN("fail to start service", KR(ret), K(tenant_id), K(service_name_str)); + } + } else if (arg.is_stop_service()) { + if (OB_FAIL(ObServiceNameCommand::stop_service(tenant_id, service_name_str))) { + LOG_WARN("fail to stop service", KR(ret), K(tenant_id), K(service_name_str)); + } + } else { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("unknown service operation", KR(ret), K(arg)); + } + return ret; +} } // end namespace sql } // end namespace oceanbase diff --git a/src/sql/engine/cmd/ob_alter_system_executor.h b/src/sql/engine/cmd/ob_alter_system_executor.h index 08cfdd67f9..b1bec96169 100644 --- a/src/sql/engine/cmd/ob_alter_system_executor.h +++ b/src/sql/engine/cmd/ob_alter_system_executor.h @@ -142,8 +142,9 @@ DEF_SIMPLE_EXECUTOR(ObResetConfig); DEF_SIMPLE_EXECUTOR(ObCancelClone); -DEF_SIMPLE_EXECUTOR(ObTransferPartition); +DEF_SIMPLE_EXECUTOR(ObTransferPartition); +DEF_SIMPLE_EXECUTOR(ObServiceName); class ObCancelTaskExecutor { public: diff --git a/src/sql/engine/cmd/ob_analyze_executor.cpp b/src/sql/engine/cmd/ob_analyze_executor.cpp index 3786086e06..083cdd5a67 100644 --- a/src/sql/engine/cmd/ob_analyze_executor.cpp +++ b/src/sql/engine/cmd/ob_analyze_executor.cpp @@ -45,20 +45,21 @@ int ObAnalyzeExecutor::execute(ObExecContext &ctx, ObAnalyzeStmt &stmt) { int ret = OB_SUCCESS; ObSEArray params; - share::schema::ObSchemaGetterGuard *schema_guard = ctx.get_virtual_table_ctx().schema_guard_; ObSQLSessionInfo *session = ctx.get_my_session(); - bool in_restore = false; - if (OB_ISNULL(schema_guard) || OB_ISNULL(session)) { + if (OB_ISNULL(session)) { ret = OB_ERR_UNEXPECTED; - LOG_WARN("get unexpected null", K(ret), K(schema_guard), K(session)); - } else if (OB_FAIL(schema_guard->check_tenant_is_restore(session->get_effective_tenant_id(), - in_restore))) { - LOG_WARN("failed to check tenant is restore", K(ret)); - } else if (OB_UNLIKELY(in_restore) || - GCTX.is_standby_cluster()) { - ret = OB_NOT_SUPPORTED; - LOG_USER_ERROR(OB_NOT_SUPPORTED, "analyze table during restore or standby cluster"); - } else if (OB_FAIL(ObDbmsStatsUtils::implicit_commit_before_gather_stats(ctx))) { + LOG_WARN("get unexpected null", K(ret), K(session)); + } else { + uint64_t tenant_id = session->get_effective_tenant_id(); + bool is_primary = true; + if (OB_FAIL(ObShareUtil::mtl_check_if_tenant_role_is_primary(tenant_id, is_primary))) { + LOG_WARN("fail to execute mtl_check_if_tenant_role_is_primary", KR(ret), K(tenant_id)); + } else if (OB_UNLIKELY(!is_primary)) { + ret = OB_NOT_SUPPORTED; + LOG_USER_ERROR(OB_NOT_SUPPORTED, "analyze table during non-primary tenant"); + } + } + if (FAILEDx(ObDbmsStatsUtils::implicit_commit_before_gather_stats(ctx))) { LOG_WARN("failed to implicit commit before gather stats", K(ret)); } else if (OB_FAIL(ObDbmsStatsUtils::cancel_async_gather_stats(ctx))) { LOG_WARN("failed to cancel async gather stats", K(ret)); diff --git a/src/sql/engine/cmd/ob_ddl_executor_util.cpp b/src/sql/engine/cmd/ob_ddl_executor_util.cpp index 63032f5ba1..1764849695 100644 --- a/src/sql/engine/cmd/ob_ddl_executor_util.cpp +++ b/src/sql/engine/cmd/ob_ddl_executor_util.cpp @@ -32,15 +32,19 @@ namespace sql int ObDDLExecutorUtil::handle_session_exception(ObSQLSessionInfo &session) { int ret = OB_SUCCESS; + const uint64_t tenant_id = session.get_effective_tenant_id(); + bool is_standby = false; if (OB_UNLIKELY(session.is_query_killed())) { ret = OB_ERR_QUERY_INTERRUPTED; LOG_WARN("query is killed", K(ret)); } else if (OB_UNLIKELY(session.is_zombie())) { ret = OB_SESSION_KILLED; LOG_WARN("session is killed", K(ret)); - } else if (GCTX.is_standby_cluster()) { + } else if (OB_FAIL(ObShareUtil::mtl_check_if_tenant_role_is_standby(tenant_id, is_standby))) { + LOG_WARN("fail to execute mtl_check_if_tenant_role_is_standby", KR(ret), K(tenant_id)); + } else if (is_standby) { ret = OB_SESSION_KILLED; - LOG_INFO("cluster switchoverd, kill session", KR(ret)); + LOG_WARN("session is killed", KR(ret)); } return ret; } diff --git a/src/sql/engine/cmd/ob_index_executor.cpp b/src/sql/engine/cmd/ob_index_executor.cpp index a95dc40ac2..94ff6e0280 100644 --- a/src/sql/engine/cmd/ob_index_executor.cpp +++ b/src/sql/engine/cmd/ob_index_executor.cpp @@ -234,17 +234,18 @@ int ObCreateIndexExecutor::sync_check_index_status(sql::ObSQLSessionInfo &my_ses LOG_WARN("failed to handle_session_exception", KR(ret)); } } - //处理主备库切换的场景,生效过程中发生切换的话,直接返回用户session_killed; //后续有备库来处理该索引; if (OB_FAIL(ret)) { } else if (OB_SYS_TENANT_ID == tenant_id) { //no need to process sys tenant - } else if (OB_FAIL(handle_switchover())) { - if (OB_SESSION_KILLED != ret) { - LOG_WARN("fail to handle switchover status", KR(ret)); - } else { - LOG_WARN("fail to add index while swithover", KR(ret)); + } else { + bool is_standby = false; + if (OB_FAIL(ObShareUtil::table_check_if_tenant_role_is_standby(tenant_id, is_standby))) { + LOG_WARN("fail to execute table_check_if_tenant_role_is_standby", KR(ret), K(tenant_id)); + } else if (is_standby) { + ret = OB_SESSION_KILLED; + LOG_WARN("create index while switchoverd, kill session", KR(ret)); } } @@ -267,16 +268,6 @@ int ObCreateIndexExecutor::handle_session_exception(ObSQLSessionInfo &session) return session.check_session_status(); } -int ObCreateIndexExecutor::handle_switchover() -{ - int ret = OB_SUCCESS; - if (GCTX.is_standby_cluster()) { - ret = OB_SESSION_KILLED; - LOG_INFO("create index while switchoverd, kill session", KR(ret)); - } - return ret; -} - ObDropIndexExecutor::ObDropIndexExecutor() { } diff --git a/src/sql/engine/cmd/ob_index_executor.h b/src/sql/engine/cmd/ob_index_executor.h index 6c19c46ff0..d9c35c7744 100644 --- a/src/sql/engine/cmd/ob_index_executor.h +++ b/src/sql/engine/cmd/ob_index_executor.h @@ -48,7 +48,6 @@ private: common::ObIAllocator &allocator, bool is_update_global_indexes = false); int handle_session_exception(ObSQLSessionInfo &session); - int handle_switchover(); }; class ObDropIndexStmt; diff --git a/src/sql/engine/cmd/ob_tenant_executor.cpp b/src/sql/engine/cmd/ob_tenant_executor.cpp index af884fed12..1dd9d4c558 100644 --- a/src/sql/engine/cmd/ob_tenant_executor.cpp +++ b/src/sql/engine/cmd/ob_tenant_executor.cpp @@ -24,7 +24,7 @@ #include "share/ls/ob_ls_operator.h" #include "share/ob_leader_election_waiter.h" #include "share/ls/ob_ls_status_operator.h" //ObLSStatusInfo, ObLSStatusOperator -#include "share/ob_primary_standby_service.h" // ObPrimaryStandbyService +#include "rootserver/standby/ob_standby_service.h" // ObStandbyService #include "sql/session/ob_sql_session_info.h" #include "sql/resolver/ddl/ob_create_tenant_stmt.h" #include "sql/resolver/ddl/ob_drop_tenant_stmt.h" @@ -250,7 +250,7 @@ int ObCreateStandbyTenantExecutor::execute(ObExecContext &ctx, ObCreateTenantStm } else if (OB_ISNULL(common_rpc_proxy = task_exec_ctx->get_common_rpc())) { ret = OB_NOT_INIT; LOG_WARN("get common rpc proxy failed"); - } else if (OB_FAIL(OB_PRIMARY_STANDBY_SERVICE.check_can_create_standby_tenant( + } else if (OB_FAIL(OB_STANDBY_SERVICE.check_can_create_standby_tenant( create_tenant_arg.log_restore_source_, compat_mode))) { LOG_WARN("check can create standby_tenant failed", KR(ret), K(create_tenant_arg)); } else { @@ -264,7 +264,7 @@ int ObCreateStandbyTenantExecutor::execute(ObExecContext &ctx, ObCreateTenantStm ret = OB_ERR_UNEXPECTED; LOG_WARN("if_not_exist not set and tenant_id invalid tenant_id", KR(ret), K(create_tenant_arg), K(tenant_id)); } else if (OB_INVALID_ID != tenant_id) { - if (OB_FAIL(OB_PRIMARY_STANDBY_SERVICE.wait_create_standby_tenant_end(tenant_id))) { + if (OB_FAIL(OB_STANDBY_SERVICE.wait_create_standby_tenant_end(tenant_id))) { LOG_WARN("failed to wait user create end", KR(ret), K(tenant_id)); } } diff --git a/src/sql/executor/ob_cmd_executor.cpp b/src/sql/executor/ob_cmd_executor.cpp index 317a6a9e56..293e34af99 100644 --- a/src/sql/executor/ob_cmd_executor.cpp +++ b/src/sql/executor/ob_cmd_executor.cpp @@ -1056,6 +1056,10 @@ int ObCmdExecutor::execute(ObExecContext &ctx, ObICmd &cmd) DEFINE_EXECUTE_CMD(ObCloneTenantStmt, ObCloneTenantExecutor); break; } + case stmt::T_SERVICE_NAME: { + DEFINE_EXECUTE_CMD(ObServiceNameStmt, ObServiceNameExecutor); + break; + } case stmt::T_ALTER_SYSTEM_RESET_PARAMETER: { DEFINE_EXECUTE_CMD(ObResetConfigStmt, ObResetConfigExecutor); break; diff --git a/src/sql/parser/sql_parser_mysql_mode.y b/src/sql/parser/sql_parser_mysql_mode.y index 62cac381fd..fc476fe402 100644 --- a/src/sql/parser/sql_parser_mysql_mode.y +++ b/src/sql/parser/sql_parser_mysql_mode.y @@ -549,6 +549,7 @@ END_P SET_VAR DELIMITER %type transfer_partition_stmt transfer_partition_clause part_info cancel_transfer_partition_clause %type geometry_collection %type mock_stmt +%type service_name_stmt service_op %type ttl_definition ttl_expr ttl_unit %type id_dot_id id_dot_id_dot_id %type opt_table_list opt_repair_mode opt_repair_option_list repair_option repair_option_list opt_checksum_option @@ -727,6 +728,7 @@ stmt: | clone_tenant_stmt { $$ = $1; check_question_mark($$, result); } | transfer_partition_stmt { $$ = $1; check_question_mark($$, result); } | mock_stmt {$$ = $1; check_question_mark($$, result);} + | service_name_stmt { $$ = $1; check_question_mark($$, result); } ; /***************************************************************************** @@ -21158,11 +21160,47 @@ DAY dup_expr_string($$, result, @1.first_column, @1.last_column); } ; +/*=========================================================== + * + * 租户 SERVICE_NAME 管理 + * + *===========================================================*/ +service_name_stmt: +alter_with_opt_hint SYSTEM service_op SERVICE relation_name opt_tenant_name +{ + (void)($1); + malloc_non_terminal_node($$, result->malloc_pool_, T_SERVICE_NAME, 3, + $3, /* service operation */ + $5, /* service name */ + $6); /* tenant name */ +} +; +service_op : +CREATE +{ + malloc_terminal_node($$, result->malloc_pool_, T_INT); + $$->value_ = 1; +} +| DELETE +{ + malloc_terminal_node($$, result->malloc_pool_, T_INT); + $$->value_ = 2; +} +| START +{ + malloc_terminal_node($$, result->malloc_pool_, T_INT); + $$->value_ = 3; +} +| STOP +{ + malloc_terminal_node($$, result->malloc_pool_, T_INT); + $$->value_ = 4; +} +; /*=========================================================== * * JSON TABLE -* *============================================================*/ json_table_expr: diff --git a/src/sql/privilege_check/ob_privilege_check.cpp b/src/sql/privilege_check/ob_privilege_check.cpp index 62d7fe6f30..1284a6d518 100644 --- a/src/sql/privilege_check/ob_privilege_check.cpp +++ b/src/sql/privilege_check/ob_privilege_check.cpp @@ -2555,6 +2555,7 @@ int get_sys_tenant_alter_system_priv( stmt::T_TABLE_TTL != basic_stmt->get_stmt_type() && stmt::T_ALTER_SYSTEM_RESET_PARAMETER != basic_stmt->get_stmt_type() && stmt::T_TRANSFER_PARTITION != basic_stmt->get_stmt_type() && + stmt::T_SERVICE_NAME != basic_stmt->get_stmt_type() && stmt::T_ALTER_LS_REPLICA != basic_stmt->get_stmt_type()) { ret = OB_ERR_NO_PRIVILEGE; LOG_WARN("Only sys tenant can do this operation", diff --git a/src/sql/resolver/cmd/ob_alter_system_resolver.cpp b/src/sql/resolver/cmd/ob_alter_system_resolver.cpp index 503ca673ca..ed6b45b182 100644 --- a/src/sql/resolver/cmd/ob_alter_system_resolver.cpp +++ b/src/sql/resolver/cmd/ob_alter_system_resolver.cpp @@ -2072,7 +2072,10 @@ static int set_reset_check_param_valid_oracle_mode(uint64_t tenant_id , ret = OB_OBJECT_NAME_NOT_EXIST; LOG_WARN("fail to get keystore schema", K(ret)); } else if (0 != keystore_schema->get_master_key_id()) { - if (!GCTX.is_standby_cluster()) { + bool is_standby = false; + if (OB_FAIL(ObShareUtil::table_check_if_tenant_role_is_standby(tenant_id, is_standby))) { + LOG_WARN("fail to execute table_check_if_tenant_role_is_standby", KR(ret), K(tenant_id)); + } else if (!is_standby) { ret = OB_NOT_SUPPORTED; LOG_WARN("alter tde method is not support", K(ret)); LOG_USER_ERROR(OB_NOT_SUPPORTED, "alter tde method with master key exists"); @@ -2470,7 +2473,10 @@ int ObSetConfigResolver::check_param_valid(int64_t tenant_id , ret = OB_OBJECT_NAME_NOT_EXIST; LOG_WARN("fail to get keystore schema", K(ret)); } else if (0 != keystore_schema->get_master_key_id()) { - if (!GCTX.is_standby_cluster()) { + bool is_standby = false; + if (OB_FAIL(ObShareUtil::table_check_if_tenant_role_is_standby(tenant_id, is_standby))) { + LOG_WARN("fail to execute table_check_if_tenant_role_is_standby", KR(ret), K(tenant_id)); + } else if (!is_standby) { ret = OB_NOT_SUPPORTED; LOG_WARN("alter tde method is not support", K(ret)); LOG_USER_ERROR(OB_NOT_SUPPORTED, "alter tde method with master key exists"); @@ -7045,5 +7051,68 @@ int ret = OB_SUCCESS; } return ret; } + +int ObServiceNameResolver::resolve(const ParseNode &parse_tree) +{ + int ret = OB_SUCCESS; + ObServiceNameStmt *stmt = create_stmt(); + uint64_t target_tenant_id = OB_INVALID_TENANT_ID; + ObString service_name_str; + if (OB_UNLIKELY(T_SERVICE_NAME != parse_tree.type_)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("invalid parse node, type is not T_SERVICE_NAME", KR(ret), "type", + get_type_name(parse_tree.type_)); + } else if (OB_ISNULL(stmt)) { + ret = OB_ALLOCATE_MEMORY_FAILED; + LOG_WARN("create stmt fail", KR(ret)); + } else if (3 != parse_tree.num_child_ + || OB_ISNULL(parse_tree.children_[0]) + || OB_ISNULL(parse_tree.children_[1]) + || OB_ISNULL(session_info_)) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid parse tree or session info", KR(ret), "num_child", parse_tree.num_child_, + KP(parse_tree.children_[0]), KP(parse_tree.children_[1]), KP(session_info_)); + } else if (OB_FAIL(get_and_verify_tenant_name( + parse_tree.children_[2], + session_info_->get_effective_tenant_id(), + target_tenant_id, + "Service name related command"))) { + LOG_WARN("fail to execute get_and_verify_tenant_name", KR(ret), + K(session_info_->get_effective_tenant_id()), KP(parse_tree.children_[1])); + } else if (OB_UNLIKELY(T_INT != parse_tree.children_[0]->type_)) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid parse node, service_op is not T_INT", K(parse_tree.children_[0]->type_)); + } else if (OB_FAIL(Util::resolve_relation_name(parse_tree.children_[1], service_name_str))) { + LOG_WARN("fail to resolve service_name_str", KR(ret)); + } else { + ObServiceNameArg::ObServiceOp service_op = + static_cast(parse_tree.children_[0]->value_); + if (OB_FAIL(stmt->get_arg().init(service_op, target_tenant_id, service_name_str))) { + LOG_WARN("fail to init ObServiceNameArg", KR(ret), K(service_op), K(target_tenant_id), K(service_name_str)); + } + } + if (OB_SUCC(ret)) { + stmt_ = stmt; + } + if (OB_SUCC(ret) && ObSchemaChecker::is_ora_priv_check()) { + if (OB_ISNULL(schema_checker_)) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", K(ret)); + } else if (OB_FAIL(schema_checker_->check_ora_ddl_priv( + session_info_->get_effective_tenant_id(), + session_info_->get_priv_user_id(), + ObString(""), + // why use T_ALTER_SYSTEM_SET_PARAMETER? + // because T_ALTER_SYSTEM_SET_PARAMETER has following traits: + // T_ALTER_SYSTEM_SET_PARAMETER can allow dba to do an operation + // and prohibit other user to do this operation + // so we reuse this. + stmt::T_ALTER_SYSTEM_SET_PARAMETER, + session_info_->get_enable_role_array()))) { + LOG_WARN("failed to check privilege", K(session_info_->get_effective_tenant_id()), K(session_info_->get_user_id())); + } + } + return ret; +} } // end namespace sql } // end namespace oceanbase diff --git a/src/sql/resolver/cmd/ob_alter_system_resolver.h b/src/sql/resolver/cmd/ob_alter_system_resolver.h index 839a2b62dc..796cf9b81a 100644 --- a/src/sql/resolver/cmd/ob_alter_system_resolver.h +++ b/src/sql/resolver/cmd/ob_alter_system_resolver.h @@ -185,8 +185,7 @@ DEF_SIMPLE_CMD_RESOLVER(ObBackupSetDecryptionResolver); DEF_SIMPLE_CMD_RESOLVER(ObAddRestoreSourceResolver); DEF_SIMPLE_CMD_RESOLVER(ObClearRestoreSourceResolver); DEF_SIMPLE_CMD_RESOLVER(ObCheckpointSlogResolver); - - +DEF_SIMPLE_CMD_RESOLVER(ObServiceNameResolver); int resolve_restore_until(const ParseNode &time_node, const ObSQLSessionInfo *session_info, share::SCN &recovery_until_scn, diff --git a/src/sql/resolver/cmd/ob_alter_system_stmt.h b/src/sql/resolver/cmd/ob_alter_system_stmt.h index dd0a4add55..b927359e1e 100644 --- a/src/sql/resolver/cmd/ob_alter_system_stmt.h +++ b/src/sql/resolver/cmd/ob_alter_system_stmt.h @@ -18,6 +18,7 @@ #include "share/scheduler/ob_sys_task_stat.h" #include "share/backup/ob_backup_clean_struct.h" #include "rootserver/ob_transfer_partition_command.h" +#include "share/ob_service_name_proxy.h" namespace oceanbase { @@ -1390,6 +1391,16 @@ private: rootserver::ObTransferPartitionArg arg_; }; +class ObServiceNameStmt : public ObSystemCmdStmt +{ +public: + ObServiceNameStmt() : ObSystemCmdStmt(stmt::T_SERVICE_NAME), arg_() {} + virtual ~ObServiceNameStmt() {} + share::ObServiceNameArg &get_arg() { return arg_; } +private: + share::ObServiceNameArg arg_; +}; + } // end namespace sql } // end namespace oceanbase diff --git a/src/sql/resolver/ob_resolver.cpp b/src/sql/resolver/ob_resolver.cpp index 7eb90e7419..8239f1b076 100644 --- a/src/sql/resolver/ob_resolver.cpp +++ b/src/sql/resolver/ob_resolver.cpp @@ -1237,6 +1237,10 @@ int ObResolver::resolve(IsPrepared if_prepared, const ParseNode &parse_tree, ObS REGISTER_STMT_RESOLVER(TransferPartition); break; } + case T_SERVICE_NAME: { + REGISTER_STMT_RESOLVER(ServiceName); + break; + } case T_REPAIR_TABLE: { REGISTER_STMT_RESOLVER(Mock); break; diff --git a/src/sql/session/ob_sql_session_info.cpp b/src/sql/session/ob_sql_session_info.cpp index 3289708c1e..a46d1d3ad6 100644 --- a/src/sql/session/ob_sql_session_info.cpp +++ b/src/sql/session/ob_sql_session_info.cpp @@ -59,6 +59,7 @@ #include "ob_sess_info_verify.h" #include "share/schema/ob_schema_utils.h" #include "share/config/ob_config_helper.h" +#include "rootserver/ob_tenant_info_loader.h" using namespace oceanbase::sql; using namespace oceanbase::common; @@ -207,7 +208,9 @@ ObSQLSessionInfo::ObSQLSessionInfo(const uint64_t tenant_id) : current_dblink_sequence_id_(0), client_non_standard_(false), is_session_sync_support_(false), - job_info_(nullptr) + job_info_(nullptr), + failover_mode_(false), + service_name_() { MEMSET(tenant_buff_, 0, sizeof(share::ObTenantSpaceFetcher)); MEMSET(vip_buf_, 0, sizeof(vip_buf_)); @@ -405,6 +408,8 @@ void ObSQLSessionInfo::reset(bool skip_sys_var) need_send_feedback_proxy_info_ = false; is_lock_session_ = false; job_info_ = nullptr; + failover_mode_ = false; + service_name_.reset(); } void ObSQLSessionInfo::clean_status() @@ -986,32 +991,6 @@ int ObSQLSessionInfo::drop_temp_tables(const bool is_disconn, return ret; } -//清理oracle临时表中数据来源和当前session id相同, 但属于被重用的旧的session数据 -int ObSQLSessionInfo::drop_reused_oracle_temp_tables() -{ - int ret = OB_SUCCESS; - //obrpc::ObCommonRpcProxy *common_rpc_proxy = NULL; - if (false == get_is_deserialized() - && !is_inner() - && !GCTX.is_standby_cluster()) { - obrpc::ObDropTableArg drop_table_arg; - drop_table_arg.if_exist_ = true; - drop_table_arg.to_recyclebin_ = false; - drop_table_arg.table_type_ = share::schema::TMP_TABLE_ORA_SESS; - drop_table_arg.session_id_ = get_sessid_for_table(); - drop_table_arg.tenant_id_ = get_effective_tenant_id(); - drop_table_arg.sess_create_time_ = get_sess_create_time(); - //common_rpc_proxy = GCTX.rs_rpc_proxy_; - if (OB_FAIL(delete_from_oracle_temp_tables(drop_table_arg))) { - //if (OB_FAIL(common_rpc_proxy->drop_table(drop_table_arg))) { - LOG_WARN("failed to drop reused temporary table", K(drop_table_arg), K(ret)); - } else { - LOG_DEBUG("succeed to delete old rows for oracle temporary table", K(drop_table_arg)); - } - } - return ret; -} - //proxy方式下session创建、断开和后台定时task检查: //如果距离上次更新此session->last_refresh_temp_table_time_ 超过1hr //则更新session创建的临时表最后活动时间SESSION_ACTIVE_TIME @@ -4721,3 +4700,51 @@ int ObSQLSessionInfo::get_dblink_sequence_schema(int64_t sequence_id, const ObSe LOG_TRACE("get dblink sequence schema", K(sequence_id), K(dblink_sequence_schemas_)); return ret; } + +int ObSQLSessionInfo::set_service_name(const ObString& service_name) +{ + int ret = OB_SUCCESS; + if (OB_FAIL(service_name_.init(service_name))) { + LOG_WARN("fail to init service_name", KR(ret), K(service_name)); + } + return ret; +} +int ObSQLSessionInfo::check_service_name_and_failover_mode(const uint64_t tenant_id) const +{ + // if failover_mode is on, and the session is created via service_name + // the tenant should be primary + // service name must exist and service status must be started + // if service_name is not empty, the version must be >= 4240 + int ret = OB_SUCCESS; + bool is_sts_ready = false; + if (service_name_.is_empty()) { + // do nothing + } else if (OB_UNLIKELY(!is_valid_tenant_id(tenant_id))) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(tenant_id)); + } else { + MTL_SWITCH(tenant_id) { + rootserver::ObTenantInfoLoader *tenant_info_loader = MTL(rootserver::ObTenantInfoLoader*); + if (OB_ISNULL(tenant_info_loader)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("tenant_info_loader is null", KR(ret), KP(tenant_info_loader)); + } else if (OB_FAIL(tenant_info_loader->check_if_sts_is_ready(is_sts_ready))) { + LOG_WARN("fail to execute check_if_sts_is_ready", KR(ret)); + } else if (failover_mode_ && is_sts_ready) { + // 'sts_ready' indicates that the 'access_mode' is 'RAW_WRITE' + // The reason for using 'sts_ready' is that we believe all connections intending to reach the + // primary tenant should be accepted before the 'access_mode' switches to 'RAW_WRITE'. + ret = OB_NOT_PRIMARY_TENANT; + LOG_WARN("the tenant is not primary, the request is not allowed", KR(ret), K(is_sts_ready)); + } else if (OB_FAIL(tenant_info_loader->check_if_the_service_name_is_stopped(service_name_))) { + LOG_WARN("fail to execute check_if_the_service_name_is_stopped", KR(ret), K(service_name_)); + } + } + } + return ret; +} +int ObSQLSessionInfo::check_service_name_and_failover_mode() const +{ + uint64_t tenant_id = get_effective_tenant_id(); + return check_service_name_and_failover_mode(tenant_id); +} diff --git a/src/sql/session/ob_sql_session_info.h b/src/sql/session/ob_sql_session_info.h index 2ec2938970..5f89af9b3b 100644 --- a/src/sql/session/ob_sql_session_info.h +++ b/src/sql/session/ob_sql_session_info.h @@ -46,6 +46,7 @@ #include "sql/ob_optimizer_trace_impl.h" #include "sql/monitor/flt/ob_flt_span_mgr.h" #include "storage/tx/ob_tx_free_route.h" +#include "share/ob_service_name_proxy.h" #include "observer/dbms_scheduler/ob_dbms_sched_job_utils.h" namespace oceanbase @@ -84,6 +85,7 @@ namespace share { struct ObSequenceValue; } +using share::ObServiceNameString; using common::ObPsStmtId; namespace sql { @@ -880,7 +882,6 @@ public: const bool is_xa_trans = false, const bool is_reset_connection = false); void refresh_temp_tables_sess_active_time(); //更新临时表的sess active time - int drop_reused_oracle_temp_tables(); int delete_from_oracle_temp_tables(const obrpc::ObDropTableArg &const_drop_table_arg); //To generate an unique key for Oracle Global Temporary Table @@ -1451,6 +1452,13 @@ public: bool is_lock_session() const { return is_lock_session_; } int64_t get_plsql_exec_time(); void update_pure_sql_exec_time(int64_t elapsed_time); + const ObServiceNameString& get_service_name() const { return service_name_; } + bool get_failover_mode() const { return failover_mode_; } + void set_failover_mode(const bool failover_mode) { failover_mode_ = failover_mode; } + void reset_service_name() { service_name_.reset(); } + int set_service_name(const ObString& service_name); + int check_service_name_and_failover_mode() const; + int check_service_name_and_failover_mode(const uint64_t tenant_id) const; public: bool has_tx_level_temp_table() const { return tx_desc_ && tx_desc_->with_temporary_table(); } void set_affected_rows_is_changed(int64_t affected_rows); @@ -1720,8 +1728,11 @@ private: bool is_session_sync_support_; // session_sync_support flag. share::schema::ObUserLoginInfo login_info_; dbms_scheduler::ObDBMSSchedJobInfo *job_info_; // dbms_scheduler related. + bool failover_mode_; + ObServiceNameString service_name_; }; + inline bool ObSQLSessionInfo::is_terminate(int &ret) const { bool bret = false; diff --git a/src/storage/concurrency_control/ob_multi_version_garbage_collector.cpp b/src/storage/concurrency_control/ob_multi_version_garbage_collector.cpp index 786e61a677..6fb5e7b54d 100644 --- a/src/storage/concurrency_control/ob_multi_version_garbage_collector.cpp +++ b/src/storage/concurrency_control/ob_multi_version_garbage_collector.cpp @@ -331,18 +331,19 @@ int ObMultiVersionGarbageCollector::study() timeguard.click("study_min_unallocated_GTS"); if (OB_SUCC(ret)) { - if (!GCTX.is_standby_cluster() && // standby cluster does not support WRS - OB_FAIL(study_min_unallocated_WRS(min_unallocated_WRS))) { - MVCC_LOG(WARN, "study min unallocated GTS failed", K(ret)); - } else if (!min_unallocated_WRS.is_valid() - || min_unallocated_WRS.is_min() - || min_unallocated_WRS.is_max()) { + bool is_primary = true; + const uint64_t tenant_id = MTL_ID(); + if (OB_FAIL(ObShareUtil::mtl_check_if_tenant_role_is_primary(tenant_id, is_primary))) { + MVCC_LOG(WARN, "fail to execute mtl_check_if_tenant_role_is_primary", KR(ret), K(tenant_id)); + } else if (is_primary && OB_FAIL(study_min_unallocated_WRS(min_unallocated_WRS))) { + MVCC_LOG(WARN, "study min unallocated GTS failed", K(ret), K(is_primary)); + } else if (!min_unallocated_WRS.is_valid() || min_unallocated_WRS.is_min()) { ret = OB_ERR_UNEXPECTED; MVCC_LOG(ERROR, "wrong min unallocated WRS", - K(ret), K(min_unallocated_WRS), KPC(this)); + K(ret), K(min_unallocated_WRS), KPC(this), K(is_primary)); } else { MVCC_LOG(INFO, "study min unallocated wrs succeed", - K(ret), K(min_unallocated_WRS), KPC(this)); + K(ret), K(min_unallocated_WRS), KPC(this), K(is_primary)); } } diff --git a/src/storage/ls/ob_ls.cpp b/src/storage/ls/ob_ls.cpp index 05f9460c2d..9fddf641fe 100755 --- a/src/storage/ls/ob_ls.cpp +++ b/src/storage/ls/ob_ls.cpp @@ -38,7 +38,7 @@ #include "rootserver/ob_create_standby_from_net_actor.h" #include "rootserver/ob_heartbeat_service.h" #include "rootserver/ob_primary_ls_service.h" -#include "rootserver/ob_recovery_ls_service.h" +#include "rootserver/standby/ob_recovery_ls_service.h" #include "rootserver/ob_tenant_transfer_service.h" // ObTenantTransferService #include "rootserver/ob_tenant_balance_service.h" #include "rootserver/restore/ob_restore_service.h" diff --git a/src/storage/memtable/ob_memtable.cpp b/src/storage/memtable/ob_memtable.cpp index 649282dcfe..a11fd6fd7a 100644 --- a/src/storage/memtable/ob_memtable.cpp +++ b/src/storage/memtable/ob_memtable.cpp @@ -1236,10 +1236,6 @@ int ObMemtable::replay_row(ObStoreCtx &ctx, TRANS_LOG(WARN, "get next row error", K(ret)); } } else if (FALSE_IT(timeguard.click("mutator_row copy"))) { - } else if (OB_FAIL(check_standby_cluster_schema_condition_(ctx, table_id, table_version))) { - TRANS_LOG(WARN, "failed to check standby_cluster_schema_condition", K(ret), K(table_id), - K(table_version)); - } else if (FALSE_IT(timeguard.click("check_standby_cluster_schema_condition"))) { } else if (OB_UNLIKELY(dml_flag == blocksstable::ObDmlFlag::DF_NOT_EXIST)) { ret = OB_ERR_UNEXPECTED; TRANS_LOG(ERROR, "Unexpected not exist trans node", K(ret), K(dml_flag), K(rowkey)); @@ -2441,44 +2437,6 @@ void ObMemtable::set_minor_merged() minor_merged_time_ = ObTimeUtility::current_time(); } -int ObMemtable::check_standby_cluster_schema_condition_(ObStoreCtx &ctx, - const int64_t table_id, - const int64_t table_version) -{ - int ret = OB_SUCCESS; -#ifdef ERRSIM - ret = OB_E(EventTable::EN_CHECK_STANDBY_CLUSTER_SCHEMA_CONDITION) OB_SUCCESS; - if (OB_FAIL(ret) && !common::is_inner_table(table_id)) { - TRANS_LOG(WARN, "ERRSIM, replay row failed", K(ret)); - return ret; - } -#endif - if (GCTX.is_standby_cluster()) { - //only stand_by cluster need to be check - uint64_t tenant_id = MTL_ID(); - if (OB_UNLIKELY(!is_valid_tenant_id(tenant_id))) { - ret = OB_ERR_UNEXPECTED; - TRANS_LOG(ERROR, "invalid tenant_id", K(ret), K(tenant_id), K(table_id), K(table_version)); - } else if (OB_SYS_TENANT_ID == tenant_id) { - //sys tenant do not need check - } else { - int64_t tenant_schema_version = 0; - if (OB_FAIL(GSCHEMASERVICE.get_tenant_refreshed_schema_version(tenant_id, tenant_schema_version))) { - TRANS_LOG(WARN, "get_tenant_schema_version failed", K(ret), K(tenant_id), - K(table_id), K(tenant_id), K(table_version)); - if (OB_ENTRY_NOT_EXIST == ret) { - // tenant schema hasn't been flushed in the case of restart, rewrite OB_ENTRY_NOT_EXIST - ret = OB_TRANS_WAIT_SCHEMA_REFRESH; - } - } else if (table_version > tenant_schema_version) { - // replay is not allowed when data's table version is greater than tenant's schema version - //remove by msy164651, in 4.0 no need to check schema version - } else {/*do nothing*/} - } - } - return ret; -} - int64_t ObMemtable::get_upper_trans_version() const { return INT64_MAX; diff --git a/src/storage/memtable/ob_memtable.h b/src/storage/memtable/ob_memtable.h index 5a02ae1a92..dd05d99220 100644 --- a/src/storage/memtable/ob_memtable.h +++ b/src/storage/memtable/ob_memtable.h @@ -541,9 +541,6 @@ private: void set_begin(ObMvccAccessCtx &ctx); void set_end(ObMvccAccessCtx &ctx, int ret); - int check_standby_cluster_schema_condition_(storage::ObStoreCtx &ctx, - const int64_t table_id, - const int64_t table_version); int set_( const storage::ObTableIterParam ¶m, const common::ObIArray &columns, diff --git a/src/storage/tablelock/ob_table_lock_service.cpp b/src/storage/tablelock/ob_table_lock_service.cpp index bdd5c44aa4..05ba584beb 100644 --- a/src/storage/tablelock/ob_table_lock_service.cpp +++ b/src/storage/tablelock/ob_table_lock_service.cpp @@ -2566,20 +2566,14 @@ int ObTableLockService::check_op_allowed_(const uint64_t table_id, // all the tmp table is a normal table now, deal it as a normal user table // table lock not support virtual table/sys table(not in white list) etc. is_allowed = false; - } else if (GCTX.is_standby_cluster() && OB_SYS_TENANT_ID != tenant_id) { - is_allowed = false; - } else if (!GCTX.is_standby_cluster()) { - bool is_restore = false; - ObMultiVersionSchemaService *schema_service = MTL(ObTenantSchemaService*)->get_schema_service(); - if (OB_FAIL(schema_service->check_tenant_is_restore(NULL, - tenant_id, - is_restore))) { - LOG_WARN("failed to check tenant restore", K(ret), K(table_id)); - } else if (is_restore) { + } else { + bool is_primary = true; + if (OB_FAIL(ObShareUtil::mtl_check_if_tenant_role_is_primary(tenant_id, is_primary))) { + LOG_WARN("fail to execute mtl_check_if_tenant_role_is_primary", KR(ret), K(tenant_id)); + } else if (!is_primary) { is_allowed = false; } } - return ret; } diff --git a/tools/deploy/mysql_test/r/mysql/information_schema.result b/tools/deploy/mysql_test/r/mysql/information_schema.result index 41bfa5e168..5af54a3524 100644 --- a/tools/deploy/mysql_test/r/mysql/information_schema.result +++ b/tools/deploy/mysql_test/r/mysql/information_schema.result @@ -331,6 +331,7 @@ select * from information_schema.tables where table_schema in ('oceanbase', 'mys | def | oceanbase | DBA_OB_RESTORE_PROGRESS | SYSTEM VIEW | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | | def | oceanbase | DBA_OB_RSRC_IO_DIRECTIVES | SYSTEM VIEW | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | | def | oceanbase | DBA_OB_SEQUENCE_OBJECTS | SYSTEM VIEW | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | +| def | oceanbase | DBA_OB_SERVICES | SYSTEM VIEW | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | | def | oceanbase | DBA_OB_SYS_VARIABLES | SYSTEM VIEW | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | | def | oceanbase | DBA_OB_TABLEGROUPS | SYSTEM VIEW | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | | def | oceanbase | DBA_OB_TABLEGROUP_PARTITIONS | SYSTEM VIEW | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | @@ -848,6 +849,7 @@ select * from information_schema.tables where table_schema in ('oceanbase', 'mys | def | oceanbase | __all_virtual_server_compaction_event_history | SYSTEM TABLE | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | | def | oceanbase | __all_virtual_server_compaction_progress | SYSTEM TABLE | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | | def | oceanbase | __all_virtual_server_schema_info | SYSTEM TABLE | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | +| def | oceanbase | __all_virtual_service | SYSTEM TABLE | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | | def | oceanbase | __all_virtual_session_event | SYSTEM TABLE | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | | def | oceanbase | __all_virtual_session_info | SYSTEM TABLE | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | | def | oceanbase | __all_virtual_session_ps_info | SYSTEM TABLE | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | @@ -1841,6 +1843,7 @@ select * from information_schema.tables where table_schema in ('oceanbase', 'mys | def | oceanbase | DBA_OB_RESTORE_PROGRESS | SYSTEM VIEW | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | | def | oceanbase | DBA_OB_RSRC_IO_DIRECTIVES | SYSTEM VIEW | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | | def | oceanbase | DBA_OB_SEQUENCE_OBJECTS | SYSTEM VIEW | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | +| def | oceanbase | DBA_OB_SERVICES | SYSTEM VIEW | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | | def | oceanbase | DBA_OB_SYS_VARIABLES | SYSTEM VIEW | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | | def | oceanbase | DBA_OB_TABLEGROUPS | SYSTEM VIEW | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | | def | oceanbase | DBA_OB_TABLEGROUP_PARTITIONS | SYSTEM VIEW | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | @@ -2358,6 +2361,7 @@ select * from information_schema.tables where table_schema in ('oceanbase', 'mys | def | oceanbase | __all_virtual_server_compaction_event_history | SYSTEM TABLE | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | | def | oceanbase | __all_virtual_server_compaction_progress | SYSTEM TABLE | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | | def | oceanbase | __all_virtual_server_schema_info | SYSTEM TABLE | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | +| def | oceanbase | __all_virtual_service | SYSTEM TABLE | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | | def | oceanbase | __all_virtual_session_event | SYSTEM TABLE | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | | def | oceanbase | __all_virtual_session_info | SYSTEM TABLE | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | | def | oceanbase | __all_virtual_session_ps_info | SYSTEM TABLE | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | @@ -3122,6 +3126,8 @@ select * from information_schema.statistics where table_schema in ('oceanbase', | def | oceanbase | __all_server_event_history | 0 | oceanbase | PRIMARY | 3 | svr_port | A | NULL | NULL | NULL | | BTREE | VALID | | YES | NULL | | def | oceanbase | __all_server_event_history | 1 | oceanbase | idx_server_event | 1 | event | A | NULL | NULL | NULL | | BTREE | VALID | | YES | NULL | | def | oceanbase | __all_server_event_history | 1 | oceanbase | idx_server_module | 1 | module | A | NULL | NULL | NULL | | BTREE | VALID | | YES | NULL | +| def | oceanbase | __all_service | 0 | oceanbase | PRIMARY | 1 | tenant_id | A | NULL | NULL | NULL | | BTREE | VALID | | YES | NULL | +| def | oceanbase | __all_service | 0 | oceanbase | PRIMARY | 2 | service_name_id | A | NULL | NULL | NULL | | BTREE | VALID | | YES | NULL | | def | oceanbase | __all_service_epoch | 0 | oceanbase | PRIMARY | 1 | tenant_id | A | NULL | NULL | NULL | | BTREE | VALID | | YES | NULL | | def | oceanbase | __all_service_epoch | 0 | oceanbase | PRIMARY | 2 | name | A | NULL | NULL | NULL | | BTREE | VALID | | YES | NULL | | def | oceanbase | __all_space_usage | 0 | oceanbase | PRIMARY | 1 | tenant_id | A | NULL | NULL | NULL | | BTREE | VALID | | YES | NULL | diff --git a/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/desc_sys_views_in_mysql.result b/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/desc_sys_views_in_mysql.result index b8d7f829de..3ca3c318e8 100644 --- a/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/desc_sys_views_in_mysql.result +++ b/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/desc_sys_views_in_mysql.result @@ -2657,8 +2657,9 @@ LB_VPORT bigint(20) YES NULL IN_BYTES bigint(20) NO NULL OUT_BYTES bigint(20) NO NULL USER_CLIENT_PORT bigint(20) NO -TOTAL_CPU_TIME bigint(21) NO NULL PROXY_USER varchar(128) YES NULL +SERVICE_NAME varchar(64) YES NULL +TOTAL_CPU_TIME bigint(21) NO NULL TOP_INFO varchar(262143) YES NULL select /*+QUERY_TIMEOUT(60000000)*/ count(*) as cnt from (select * from oceanbase.GV$OB_PROCESSLIST limit 1); cnt @@ -2702,8 +2703,9 @@ LB_VPORT bigint(20) NO IN_BYTES bigint(20) NO OUT_BYTES bigint(20) NO USER_CLIENT_PORT bigint(20) NO -TOTAL_CPU_TIME bigint(21) NO PROXY_USER varchar(128) NO +SERVICE_NAME varchar(64) NO +TOTAL_CPU_TIME bigint(21) NO TOP_INFO varchar(262143) NO select /*+QUERY_TIMEOUT(60000000)*/ count(*) as cnt from (select * from oceanbase.V$OB_PROCESSLIST limit 1); cnt @@ -6284,6 +6286,16 @@ ENABLE_VERSIONS longtext NO NULL select /*+QUERY_TIMEOUT(60000000)*/ count(*) as cnt from (select * from oceanbase.V$OB_COMPATIBILITY_CONTROL limit 1); cnt 1 +desc oceanbase.DBA_OB_SERVICES; +Field Type Null Key Default Extra +CREATE_TIME timestamp(6) NO NULL +MODIFIED_TIME timestamp(6) NO NULL +SERVICE_NAME_ID bigint(20) NO NULL +SERVICE_NAME varchar(64) NO NULL +SERVICE_STATUS varchar(64) NO NULL +select /*+QUERY_TIMEOUT(60000000)*/ count(*) as cnt from (select * from oceanbase.DBA_OB_SERVICES limit 1); +cnt +1 desc oceanbase.GV$OB_TENANT_RESOURCE_LIMIT; Field Type Null Key Default Extra SVR_IP varchar(46) NO NULL diff --git a/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/desc_sys_views_in_sys.result b/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/desc_sys_views_in_sys.result index c6f848b4d0..320c60d4f0 100644 --- a/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/desc_sys_views_in_sys.result +++ b/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/desc_sys_views_in_sys.result @@ -3935,8 +3935,9 @@ LB_VPORT bigint(20) YES NULL IN_BYTES bigint(20) NO NULL OUT_BYTES bigint(20) NO NULL USER_CLIENT_PORT bigint(20) NO -TOTAL_CPU_TIME bigint(21) NO NULL PROXY_USER varchar(128) YES NULL +SERVICE_NAME varchar(64) YES NULL +TOTAL_CPU_TIME bigint(21) NO NULL TOP_INFO varchar(262143) YES NULL select /*+QUERY_TIMEOUT(60000000)*/ count(*) as cnt from (select * from oceanbase.GV$OB_PROCESSLIST limit 1); cnt @@ -3980,8 +3981,9 @@ LB_VPORT bigint(20) NO IN_BYTES bigint(20) NO OUT_BYTES bigint(20) NO USER_CLIENT_PORT bigint(20) NO -TOTAL_CPU_TIME bigint(21) NO PROXY_USER varchar(128) NO +SERVICE_NAME varchar(64) NO +TOTAL_CPU_TIME bigint(21) NO TOP_INFO varchar(262143) NO select /*+QUERY_TIMEOUT(60000000)*/ count(*) as cnt from (select * from oceanbase.V$OB_PROCESSLIST limit 1); cnt @@ -9080,6 +9082,27 @@ ENABLE_VERSIONS longtext NO NULL select /*+QUERY_TIMEOUT(60000000)*/ count(*) as cnt from (select * from oceanbase.V$OB_COMPATIBILITY_CONTROL limit 1); cnt 1 +desc oceanbase.DBA_OB_SERVICES; +Field Type Null Key Default Extra +CREATE_TIME timestamp(6) NO NULL +MODIFIED_TIME timestamp(6) NO NULL +SERVICE_NAME_ID bigint(20) NO NULL +SERVICE_NAME varchar(64) NO NULL +SERVICE_STATUS varchar(64) NO NULL +select /*+QUERY_TIMEOUT(60000000)*/ count(*) as cnt from (select * from oceanbase.DBA_OB_SERVICES limit 1); +cnt +1 +desc oceanbase.CDB_OB_SERVICES; +Field Type Null Key Default Extra +TENANT_ID bigint(20) NO NULL +CREATE_TIME timestamp(6) NO NULL +MODIFIED_TIME timestamp(6) NO NULL +SERVICE_NAME_ID bigint(20) NO NULL +SERVICE_NAME varchar(64) NO NULL +SERVICE_STATUS varchar(64) NO NULL +select /*+QUERY_TIMEOUT(60000000)*/ count(*) as cnt from (select * from oceanbase.CDB_OB_SERVICES limit 1); +cnt +1 desc oceanbase.GV$OB_TENANT_RESOURCE_LIMIT; Field Type Null Key Default Extra SVR_IP varchar(46) NO NULL diff --git a/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/desc_virtual_table_in_mysql.result b/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/desc_virtual_table_in_mysql.result index dce795d9e0..354f437c95 100644 --- a/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/desc_virtual_table_in_mysql.result +++ b/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/desc_virtual_table_in_mysql.result @@ -4984,6 +4984,17 @@ enable_versions longtext NO NULL select /*+QUERY_TIMEOUT(60000000)*/ IF(count(*) >= 0, 1, 0) from oceanbase.__all_virtual_compatibility_control; IF(count(*) >= 0, 1, 0) 1 +desc oceanbase.__all_virtual_service; +Field Type Null Key Default Extra +tenant_id bigint(20) NO PRI NULL +service_name_id bigint(20) NO PRI NULL +gmt_create timestamp(6) NO NULL +gmt_modified timestamp(6) NO NULL +service_name varchar(64) NO NULL +service_status varchar(64) NO NULL +select /*+QUERY_TIMEOUT(60000000)*/ IF(count(*) >= 0, 1, 0) from oceanbase.__all_virtual_service; +IF(count(*) >= 0, 1, 0) +1 desc oceanbase.__all_virtual_tenant_resource_limit; Field Type Null Key Default Extra svr_ip varchar(46) NO NULL diff --git a/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/desc_virtual_table_in_sys.result b/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/desc_virtual_table_in_sys.result index b65613a551..3b381fcf20 100644 --- a/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/desc_virtual_table_in_sys.result +++ b/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/desc_virtual_table_in_sys.result @@ -9616,6 +9616,17 @@ is_deleted bigint(20) NO NULL select /*+QUERY_TIMEOUT(60000000)*/ IF(count(*) >= 0, 1, 0) from oceanbase.__all_virtual_user_proxy_role_info_history; IF(count(*) >= 0, 1, 0) 1 +desc oceanbase.__all_virtual_service; +Field Type Null Key Default Extra +tenant_id bigint(20) NO PRI NULL +service_name_id bigint(20) NO PRI NULL +gmt_create timestamp(6) NO NULL +gmt_modified timestamp(6) NO NULL +service_name varchar(64) NO NULL +service_status varchar(64) NO NULL +select /*+QUERY_TIMEOUT(60000000)*/ IF(count(*) >= 0, 1, 0) from oceanbase.__all_virtual_service; +IF(count(*) >= 0, 1, 0) +1 desc oceanbase.__all_virtual_tenant_resource_limit; Field Type Null Key Default Extra svr_ip varchar(46) NO NULL diff --git a/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/inner_table_overall.result b/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/inner_table_overall.result index be1f568b12..57c7f8ef66 100644 --- a/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/inner_table_overall.result +++ b/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/inner_table_overall.result @@ -298,6 +298,7 @@ select 0xffffffffff & table_id, table_name, table_type, database_id, part_num fr 513 __all_user_proxy_info_history 0 201001 1 514 __all_user_proxy_role_info 0 201001 1 515 __all_user_proxy_role_info_history 0 201001 1 +516 __all_service 0 201001 1 518 __all_mview_dep 0 201001 1 519 __all_scheduler_job_run_detail_v2 0 201001 1 10001 __tenant_virtual_all_table 2 201001 1 @@ -751,6 +752,7 @@ select 0xffffffffff & table_id, table_name, table_type, database_id, part_num fr 12475 __all_virtual_user_proxy_info_history 2 201001 1 12476 __all_virtual_user_proxy_role_info 2 201001 1 12477 __all_virtual_user_proxy_role_info_history 2 201001 1 +12480 __all_virtual_service 2 201001 1 12481 __all_virtual_tenant_resource_limit 2 201001 1 12482 __all_virtual_tenant_resource_limit_detail 2 201001 1 12487 __all_virtual_nic_info 2 201001 1 @@ -1175,6 +1177,8 @@ select 0xffffffffff & table_id, table_name, table_type, database_id, part_num fr 21543 GV$OB_TRACEPOINT_INFO 1 201001 1 21544 V$OB_TRACEPOINT_INFO 1 201001 1 21545 V$OB_COMPATIBILITY_CONTROL 1 201001 1 +21548 DBA_OB_SERVICES 1 201001 1 +21549 CDB_OB_SERVICES 1 201001 1 21550 GV$OB_TENANT_RESOURCE_LIMIT 1 201001 1 21551 V$OB_TENANT_RESOURCE_LIMIT 1 201001 1 21552 GV$OB_TENANT_RESOURCE_LIMIT_DETAIL 1 201001 1 diff --git a/unittest/share/schema/mock_schema_service.h b/unittest/share/schema/mock_schema_service.h index 9a157ca4c3..d9de903b36 100644 --- a/unittest/share/schema/mock_schema_service.h +++ b/unittest/share/schema/mock_schema_service.h @@ -110,10 +110,9 @@ public: int ret = OB_SUCCESS; UNUSED(force_fallback); - bool is_standby_cluster = false; if (OB_FAIL(guard.reset())) { SHARE_SCHEMA_LOG(WARN, "fail to reset guard", K(ret)); - } else if (OB_FAIL(guard.init(is_standby_cluster))) { + } else if (OB_FAIL(guard.init())) { SHARE_SCHEMA_LOG(WARN, "fail to init guard", K(ret)); } else { ObArray tenant_schemas;