From 267e78078763ea33acfb179249079ff3bcb5ccb3 Mon Sep 17 00:00:00 2001 From: zhangzhenyuyu Date: Mon, 12 Aug 2024 10:05:52 +0000 Subject: [PATCH] =?UTF-8?q?[FEAT=20MERGE]=20=E6=97=A5=E5=BF=97=E6=B5=81?= =?UTF-8?q?=E5=89=AF=E6=9C=AC=E8=BF=90=E7=BB=B4=E5=91=BD=E4=BB=A4?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/observer/ob_rpc_processor_simple.cpp | 25 + src/observer/ob_rpc_processor_simple.h | 1 + src/observer/ob_srv_xlator_rootserver.cpp | 1 + src/observer/ob_srv_xlator_storage.cpp | 1 + src/rootserver/CMakeLists.txt | 1 + src/rootserver/ob_admin_drtask_util.cpp | 2 +- src/rootserver/ob_disaster_recovery_info.cpp | 64 +- src/rootserver/ob_disaster_recovery_info.h | 16 +- src/rootserver/ob_disaster_recovery_task.cpp | 690 +++++++--- src/rootserver/ob_disaster_recovery_task.h | 109 +- .../ob_disaster_recovery_task_mgr.cpp | 131 +- .../ob_disaster_recovery_task_mgr.h | 11 +- ..._disaster_recovery_task_table_operator.cpp | 273 ++++ ...ob_disaster_recovery_task_table_operator.h | 87 ++ ...b_disaster_recovery_task_table_updater.cpp | 29 +- .../ob_disaster_recovery_worker.cpp | 1152 ++++++++++++++++- src/rootserver/ob_disaster_recovery_worker.h | 133 ++ src/rootserver/ob_root_service.cpp | 89 ++ src/rootserver/ob_root_service.h | 2 + src/rootserver/ob_rs_rpc_processor.h | 1 + src/rootserver/ob_system_admin_util.cpp | 70 + src/rootserver/ob_system_admin_util.h | 11 + .../ob_inner_table_schema.12451_12500.cpp | 468 +++++++ .../ob_inner_table_schema.15401_15450.cpp | 434 +++++++ .../ob_inner_table_schema.21301_21350.cpp | 4 +- .../ob_inner_table_schema.21501_21550.cpp | 100 ++ .../ob_inner_table_schema.25201_25250.cpp | 2 +- .../ob_inner_table_schema.25251_25300.cpp | 50 + .../ob_inner_table_schema.501_550.cpp | 484 +++++++ .../ob_inner_table_schema.50501_50550.cpp | 135 ++ .../ob_inner_table_schema.60501_60550.cpp | 90 ++ src/share/inner_table/ob_inner_table_schema.h | 49 +- .../inner_table/ob_inner_table_schema.lob.cpp | 2 +- .../ob_inner_table_schema_constants.h | 16 + .../inner_table/ob_inner_table_schema_def.py | 253 +++- .../ob_inner_table_schema_misc.ipp | 24 +- src/share/inner_table/table_id_to_name | 9 + src/share/ob_common_rpc_proxy.h | 1 + src/share/ob_debug_sync_point.h | 5 + src/share/ob_rpc_struct.cpp | 293 +++++ src/share/ob_rpc_struct.h | 191 +++ src/share/ob_srv_rpc_proxy.h | 1 + src/share/unit/ob_unit_info.cpp | 1 + .../engine/cmd/ob_alter_system_executor.cpp | 20 + src/sql/engine/cmd/ob_alter_system_executor.h | 2 + src/sql/executor/ob_cmd_executor.cpp | 4 + .../parser/non_reserved_keywords_mysql_mode.c | 3 + src/sql/parser/sql_parser_mysql_mode.y | 82 +- .../privilege_check/ob_privilege_check.cpp | 3 +- .../resolver/cmd/ob_alter_system_resolver.cpp | 532 ++++++++ .../resolver/cmd/ob_alter_system_resolver.h | 23 + src/sql/resolver/cmd/ob_alter_system_stmt.h | 12 + src/sql/resolver/ob_resolver.cpp | 24 + src/sql/resolver/ob_stmt_type.h | 2 +- .../ob_ls_complete_migration.cpp | 16 +- .../high_availability/ob_ls_migration.cpp | 57 +- .../ob_ls_migration_handler.cpp | 213 ++- .../ob_ls_migration_handler.h | 6 + .../ob_ls_prepare_migration.cpp | 26 +- .../high_availability/ob_ls_restore.cpp | 19 +- .../high_availability/ob_storage_ha_dag.cpp | 38 + .../high_availability/ob_storage_ha_dag.h | 8 +- .../ob_storage_ha_tablet_builder.cpp | 52 +- .../ob_storage_ha_tablet_builder.h | 6 +- .../ob_tablet_group_restore.cpp | 17 +- .../r/mysql/information_schema.result | 8 + .../r/mysql/desc_sys_views_in_mysql.result | 32 + .../r/mysql/desc_sys_views_in_sys.result | 65 + .../mysql/desc_virtual_table_in_mysql.result | 31 + .../r/mysql/desc_virtual_table_in_sys.result | 31 + .../r/mysql/inner_table_overall.result | 4 + unittest/sql/parser/print_parser_tree.result | 199 +++ unittest/sql/parser/test_parser.result | 862 ++++++++++++ unittest/sql/parser/test_parser.test | 25 + 74 files changed, 7531 insertions(+), 402 deletions(-) create mode 100644 src/rootserver/ob_disaster_recovery_task_table_operator.cpp create mode 100644 src/rootserver/ob_disaster_recovery_task_table_operator.h diff --git a/src/observer/ob_rpc_processor_simple.cpp b/src/observer/ob_rpc_processor_simple.cpp index 22cf0d85d..ab56b6d19 100644 --- a/src/observer/ob_rpc_processor_simple.cpp +++ b/src/observer/ob_rpc_processor_simple.cpp @@ -141,6 +141,31 @@ int ObRpcCheckBackupSchuedulerWorkingP::process() return ret; } +int ObRpcLSCancelReplicaP::process() +{ + int ret = OB_SUCCESS; + bool is_exist = false; + uint64_t tenant_id = arg_.get_tenant_id(); + MAKE_TENANT_SWITCH_SCOPE_GUARD(guard); + if (tenant_id != MTL_ID()) { + if (OB_FAIL(guard.switch_to(tenant_id))) { + LOG_WARN("failed to switch to tenant", K(ret), K(tenant_id)); + } + } + if (OB_SUCC(ret)) { + if (OB_FAIL(ObStorageHACancelDagNetUtils::cancel_task(arg_.get_ls_id(), arg_.get_task_id()))) { + LOG_WARN("failed to cancel task", K(ret), K(arg_)); + } + } + if (OB_FAIL(ret)) { + SERVER_EVENT_ADD("storage_ha", "cancel storage ha task failed", + "tenant_id", tenant_id, + "ls_id", arg_.get_ls_id().id(), + "task_id", arg_.get_task_id(), + "result", ret); + } + return ret; +} int ObRpcLSMigrateReplicaP::process() { diff --git a/src/observer/ob_rpc_processor_simple.h b/src/observer/ob_rpc_processor_simple.h index 0317bdb7a..7424f13f6 100644 --- a/src/observer/ob_rpc_processor_simple.h +++ b/src/observer/ob_rpc_processor_simple.h @@ -164,6 +164,7 @@ OB_DEFINE_PROCESSOR_S(Srv, OB_DELETE_BACKUP_LS_TASK_RES, ObRpcBackupCleanLSResP) OB_DEFINE_PROCESSOR_S(Srv, OB_BACKUP_LS_DATA_RES, ObRpcBackupLSDataResP); OB_DEFINE_PROCESSOR_S(Srv, OB_NOTIFY_ARCHIVE, ObRpcNotifyArchiveP); +OB_DEFINE_PROCESSOR_S(Srv, OB_LS_CANCEL_REPLICA_TASK, ObRpcLSCancelReplicaP); OB_DEFINE_PROCESSOR_S(Srv, OB_LS_MIGRATE_REPLICA, ObRpcLSMigrateReplicaP); OB_DEFINE_PROCESSOR_S(Srv, OB_LS_ADD_REPLICA, ObRpcLSAddReplicaP); OB_DEFINE_PROCESSOR_S(Srv, OB_LS_TYPE_TRANSFORM, ObRpcLSTypeTransformP); diff --git a/src/observer/ob_srv_xlator_rootserver.cpp b/src/observer/ob_srv_xlator_rootserver.cpp index 8a1fe117e..e21e510e8 100644 --- a/src/observer/ob_srv_xlator_rootserver.cpp +++ b/src/observer/ob_srv_xlator_rootserver.cpp @@ -230,6 +230,7 @@ void oceanbase::observer::init_srv_xlator_for_rootserver(ObSrvRpcXlator *xlator) RPC_PROCESSOR(rootserver::ObRpcAdminReloadZoneP, *gctx_.root_service_); RPC_PROCESSOR(rootserver::ObRpcAdminClearMergeErrorP, *gctx_.root_service_); RPC_PROCESSOR(rootserver::ObRpcAdminMigrateUnitP, *gctx_.root_service_); + RPC_PROCESSOR(rootserver::ObRpcAdminAlterLSReplicaP, *gctx_.root_service_); RPC_PROCESSOR(rootserver::ObRpcAdminUpgradeVirtualSchemaP, *gctx_.root_service_); RPC_PROCESSOR(rootserver::ObRpcRunJobP, *gctx_.root_service_); RPC_PROCESSOR(rootserver::ObRpcAdminRefreshIOCalibrationP, *gctx_.root_service_); diff --git a/src/observer/ob_srv_xlator_storage.cpp b/src/observer/ob_srv_xlator_storage.cpp index cef197777..a975ed8f7 100644 --- a/src/observer/ob_srv_xlator_storage.cpp +++ b/src/observer/ob_srv_xlator_storage.cpp @@ -108,6 +108,7 @@ void oceanbase::observer::init_srv_xlator_for_storage(ObSrvRpcXlator *xlator) { RPC_PROCESSOR(ObRpcRemoteWriteDDLRedoLogP, gctx_); RPC_PROCESSOR(ObRpcRemoteWriteDDLCommitLogP, gctx_); RPC_PROCESSOR(ObRpcRemoteWriteDDLIncCommitLogP, gctx_); + RPC_PROCESSOR(ObRpcLSCancelReplicaP, gctx_); RPC_PROCESSOR(ObRpcLSMigrateReplicaP, gctx_); RPC_PROCESSOR(ObRpcLSAddReplicaP, gctx_); RPC_PROCESSOR(ObRpcLSTypeTransformP, gctx_); diff --git a/src/rootserver/CMakeLists.txt b/src/rootserver/CMakeLists.txt index 60e0cac88..4aa3609ae 100644 --- a/src/rootserver/CMakeLists.txt +++ b/src/rootserver/CMakeLists.txt @@ -27,6 +27,7 @@ ob_set_subtarget(ob_rootserver common ob_bootstrap.cpp ob_admin_drtask_util.cpp ob_disaster_recovery_task_table_updater.cpp + ob_disaster_recovery_task_table_operator.cpp ob_balance_group_ls_stat_operator.cpp ob_disaster_recovery_info.cpp ob_disaster_recovery_worker.cpp diff --git a/src/rootserver/ob_admin_drtask_util.cpp b/src/rootserver/ob_admin_drtask_util.cpp index ac1e6e292..fdea3695b 100644 --- a/src/rootserver/ob_admin_drtask_util.cpp +++ b/src/rootserver/ob_admin_drtask_util.cpp @@ -517,7 +517,7 @@ int ObAdminDRTaskUtil::execute_remove_nonpaxos_task_( } if (OB_SUCC(ret)) { // rpc is send, log task start, task finish will be recorded later - ROOTSERVICE_EVENT_ADD("disaster_recovery", drtasklog::START_REMOVE_LS_PAXOS_REPLICA_STR, + ROOTSERVICE_EVENT_ADD("disaster_recovery", drtasklog::START_REMOVE_LS_NON_PAXOS_REPLICA_STR, "tenant_id", remove_non_paxos_arg.tenant_id_, "ls_id", remove_non_paxos_arg.ls_id_.id(), "task_id", ObCurTraceId::get_trace_id_str(), diff --git a/src/rootserver/ob_disaster_recovery_info.cpp b/src/rootserver/ob_disaster_recovery_info.cpp index faefa1b71..2fa660c24 100644 --- a/src/rootserver/ob_disaster_recovery_info.cpp +++ b/src/rootserver/ob_disaster_recovery_info.cpp @@ -547,7 +547,7 @@ int DRLSInfo::get_leader( int DRLSInfo::get_leader_and_member_list( common::ObAddr &leader_addr, common::ObMemberList &member_list, - GlobalLearnerList &learner_list) + GlobalLearnerList &learner_list) const { int ret = OB_SUCCESS; const ObLSReplica *leader_replica = nullptr; @@ -606,4 +606,66 @@ int DRLSInfo::get_default_data_source( leader_replica->get_memstore_percent()); } return ret; +} + +int DRLSInfo::get_member_by_server( + const common::ObAddr& server_addr, + ObMember &member) const +{ + int ret = OB_SUCCESS; + member.reset(); + common::ObAddr leader_addr; // not used + GlobalLearnerList learner_list; + common::ObMemberList member_list; + if (OB_UNLIKELY(!inited_)) { + ret = OB_NOT_INIT; + LOG_WARN("not init", KR(ret)); + } else if (OB_UNLIKELY(!server_addr.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(server_addr)); + } else if (OB_FAIL(get_leader_and_member_list(leader_addr, member_list, learner_list))) { + LOG_WARN("fail to get leader and member list", KR(ret), K(server_addr)); + } else if (member_list.contains(server_addr)) { + if (OB_FAIL(member_list.get_member_by_addr(server_addr, member))) { + LOG_WARN("fail to get member by addr", KR(ret), K(server_addr), K(member_list)); + } + } else if (learner_list.contains(server_addr)) { + if (OB_FAIL(learner_list.get_learner_by_addr(server_addr, member))) { + LOG_WARN("fail to get member by addr", KR(ret), K(server_addr), K(learner_list)); + } + } else { + ret = OB_ENTRY_NOT_EXIST; + LOG_WARN("fail to find server in leader member list and learner list", + KR(ret), K(server_addr), K(learner_list), K(member_list)); + } + return ret; +} + +int DRLSInfo::check_replica_exist_and_get_ls_replica( + const common::ObAddr& server_addr, + share::ObLSReplica& ls_replica) const +{ + int ret = OB_SUCCESS; + const share::ObLSReplica *ls_replica_ptr = nullptr; + ls_replica.reset(); + if (OB_UNLIKELY(!inited_)) { + ret = OB_NOT_INIT; + LOG_WARN("not init", KR(ret)); + } else if (OB_UNLIKELY(!server_addr.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(server_addr)); + } else if (OB_FAIL(inner_ls_info_.find(server_addr, ls_replica_ptr))) { + if (OB_ENTRY_NOT_EXIST != ret) { + LOG_WARN("fail to find replica by server", KR(ret), K(server_addr), K(inner_ls_info_)); + } else { + LOG_INFO("dose not have replica", KR(ret), K(server_addr), K(inner_ls_info_)); + ret = OB_SUCCESS; + } + } else if (OB_ISNULL(ls_replica_ptr)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("ls_replica_ptr is null", KR(ret), K(server_addr), KP(ls_replica_ptr), K(inner_ls_info_)); + } else if (OB_FAIL(ls_replica.assign(*ls_replica_ptr))) { + LOG_WARN("ls_replica assign failed", KR(ret), K(server_addr), KP(ls_replica_ptr)); + } + return ret; } \ No newline at end of file diff --git a/src/rootserver/ob_disaster_recovery_info.h b/src/rootserver/ob_disaster_recovery_info.h index 7340f4cca..9d3cfaba1 100644 --- a/src/rootserver/ob_disaster_recovery_info.h +++ b/src/rootserver/ob_disaster_recovery_info.h @@ -204,7 +204,7 @@ public: int get_leader_and_member_list( common::ObAddr &leader_addr, common::ObMemberList &member_list, - GlobalLearnerList &learner_list); + GlobalLearnerList &learner_list) const; // get data_source from leader replcia // @param [out] data_source, leader replica @@ -212,6 +212,20 @@ public: int get_default_data_source( ObReplicaMember &data_source, int64_t &data_size) const; + + // get member by server address in leader's learner list and member list + // @param [in] server_addr, which server the member in + // @param [out] member, target member + int get_member_by_server( + const common::ObAddr& server_addr, + ObMember &member) const; + + // check and get if there is a replica on the target server + // @param [in] server_addr, which server the replica in + // @param [out] ls_replica, target replic + int check_replica_exist_and_get_ls_replica( + const common::ObAddr& server_addr, + share::ObLSReplica& ls_replica) const; private: int construct_filtered_ls_info_to_use_( const share::ObLSInfo &input_ls_info, diff --git a/src/rootserver/ob_disaster_recovery_task.cpp b/src/rootserver/ob_disaster_recovery_task.cpp index b39b58e25..1ba8c83b3 100644 --- a/src/rootserver/ob_disaster_recovery_task.cpp +++ b/src/rootserver/ob_disaster_recovery_task.cpp @@ -43,6 +43,96 @@ using namespace share::schema; namespace rootserver { +OB_SERIALIZE_MEMBER( + ObDRLSReplicaTaskStatus, + status_); + +static const char* dr_ls_replica_task_status_strs[] = { + "INPROGRESS", + "COMPLETED", + "FAILED", + "CANCELED", +}; + +const char* ObDRLSReplicaTaskStatus::get_status_str() const { + STATIC_ASSERT(ARRAYSIZEOF(dr_ls_replica_task_status_strs) == (int64_t)MAX_STATUS, + "dr_ls_replica_task_status_strs string array size mismatch enum DRLSReplicaTaskStatus count"); + const char *str = NULL; + if (status_ >= INPROGRESS && status_ < MAX_STATUS) { + str = dr_ls_replica_task_status_strs[status_]; + } else { + LOG_WARN_RET(OB_ERR_UNEXPECTED, "invalid DRLSReplicaTaskStatus", K_(status)); + } + return str; +} + +int64_t ObDRLSReplicaTaskStatus::to_string(char *buf, const int64_t buf_len) const +{ + int64_t pos = 0; + J_OBJ_START(); + J_KV(K_(status), "status", get_status_str()); + J_OBJ_END(); + return pos; +} + +void ObDRLSReplicaTaskStatus::assign(const ObDRLSReplicaTaskStatus &other) +{ + if (this != &other) { + status_ = other.status_; + } +} + +int ObDRLSReplicaTaskStatus::parse_from_string(const ObString &status) +{ + int ret = OB_SUCCESS; + bool found = false; + STATIC_ASSERT(ARRAYSIZEOF(dr_ls_replica_task_status_strs) == (int64_t)MAX_STATUS, + "dr_ls_replica_task_status_strs string array size mismatch enum DRLSReplicaTaskStatus count"); + for (int64_t i = 0; i < ARRAYSIZEOF(dr_ls_replica_task_status_strs) && !found; i++) { + if (0 == status.case_compare(dr_ls_replica_task_status_strs[i])) { + status_ = static_cast(i); + found = true; + break; + } + } + if (!found) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("fail to parse status from string", KR(ret), K(status), K_(status)); + } + return ret; +} + +int build_execute_result( + const int ret_code, + const ObDRTaskRetComment &ret_comment, + const int64_t start_time, + ObSqlString &execute_result) +{ + int ret = OB_SUCCESS; + const int64_t now = ObTimeUtility::current_time(); + const int64_t elapsed = now - start_time; + execute_result.reset(); + if (OB_UNLIKELY(ObDRTaskRetComment::MAX == ret_comment || start_time <= 0)) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(ret_comment), K(start_time)); + } else if (OB_FAIL(execute_result.append_fmt( + "ret:%d, %s; elapsed:%ld;", ret_code, common::ob_error_name(ret_code), elapsed))) { + LOG_WARN("fail to append to execute_result", KR(ret), K(ret_code), K(elapsed)); + } else if (OB_SUCCESS != ret_code + && OB_FAIL(execute_result.append_fmt(" comment:%s;", + ob_disaster_recovery_task_ret_comment_strs(ret_comment)))) { + LOG_WARN("fail to append ret comment to execute result", KR(ret), K(ret_comment)); + } + return ret; +} + +bool is_manual_dr_task_data_version_match(uint64_t tenant_data_version) +{ + return ((tenant_data_version >= DATA_VERSION_4_3_3_0) + || (tenant_data_version >= MOCK_DATA_VERSION_4_2_3_0 && tenant_data_version < DATA_VERSION_4_3_0_0) + || (tenant_data_version >= MOCK_DATA_VERSION_4_2_1_8 && tenant_data_version < DATA_VERSION_4_2_2_0)); +} + int ObDstReplica::assign( const uint64_t unit_id, const uint64_t unit_group_id, @@ -275,34 +365,82 @@ uint64_t ObDRTaskKey::inner_hash() const return hash_val; } +int ObDRTask::fill_dml_splicer( + share::ObDMLSqlSplicer &dml_splicer) const +{ + int ret = OB_SUCCESS; + if (OB_UNLIKELY(!is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid task", KR(ret)); + } else if (OB_FAIL(dml_splicer.add_pk_column("tenant_id", tenant_id_))) { + LOG_WARN("add column failed", KR(ret), K(tenant_id_)); + } else if (OB_FAIL(dml_splicer.add_pk_column("ls_id", ls_id_.id()))) { + LOG_WARN("add column failed", KR(ret), K(ls_id_)); + } else if (OB_FAIL(dml_splicer.add_pk_column("task_id", task_id_))) { + LOG_WARN("add column failed", KR(ret), K(task_id_)); + } else if (OB_FAIL(dml_splicer.add_column("task_status", ObDRLSReplicaTaskStatus(ObDRLSReplicaTaskStatus::INPROGRESS).get_status_str()))) { + // it will only be called when the task is started + LOG_WARN("add column failed", KR(ret)); + } else if (OB_FAIL(dml_splicer.add_column("priority", static_cast(priority_)))) { + LOG_WARN("add column failed", KR(ret), K(priority_)); + } else if (OB_FAIL(dml_splicer.add_time_column("generate_time", generate_time_))) { + LOG_WARN("add column failed", KR(ret), K(generate_time_)); + } else if (OB_FAIL(dml_splicer.add_time_column("schedule_time", schedule_time_))) { + LOG_WARN("add column failed", KR(ret), K(schedule_time_)); + } else if (OB_FAIL(dml_splicer.add_column("comment", comment_.ptr()))) { + LOG_WARN("add column failed", KR(ret), K(comment_)); + } + return ret; +} + +int ObDRTask::fill_dml_splicer_for_new_column( + share::ObDMLSqlSplicer &dml_splicer, + const common::ObAddr &force_data_src) const +{ + // force_data_src may be invalid + int ret = OB_SUCCESS; + uint64_t tenant_data_version = 0; + char force_data_source_ip[OB_MAX_SERVER_ADDR_SIZE] = ""; + if (OB_UNLIKELY(!is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid task", KR(ret)); + } else if (ObDRTaskType::MAX_TYPE == get_disaster_recovery_task_type()) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("unexpected task type", KR(ret)); + } else if (OB_FAIL(GET_MIN_DATA_VERSION(gen_meta_tenant_id(tenant_id_), tenant_data_version))) { + LOG_WARN("fail to get min data version", KR(ret), K(tenant_id_)); + } else if (!is_manual_dr_task_data_version_match(tenant_data_version) + && (is_manual_task() || force_data_src.is_valid())) { + ret = OB_NOT_SUPPORTED; + LOG_WARN("manual operation is not suppported when tenant's data version is not match", + KR(ret), K(tenant_data_version), K(is_manual_task()), K(force_data_src)); + } else if (is_manual_dr_task_data_version_match(tenant_data_version)) { + if (!is_manual_task() && force_data_src.is_valid()) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("task invoke and data_source is not match", KR(ret), K(is_manual_task()), K(force_data_src)); + } else if (OB_FAIL(dml_splicer.add_column("is_manual", is_manual_task()))) { + LOG_WARN("add column failed", KR(ret), K(is_manual_task())); + } else if (ObDRTaskType::LS_ADD_REPLICA == get_disaster_recovery_task_type() + || ObDRTaskType::LS_MIGRATE_REPLICA == get_disaster_recovery_task_type()) { + if (false == force_data_src.ip_to_string(force_data_source_ip, sizeof(force_data_source_ip))) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("convert data_src_server ip to string failed", KR(ret), K(force_data_src)); + } else if (OB_FAIL(dml_splicer.add_column("data_source_svr_ip", force_data_source_ip))) { + LOG_WARN("add column failed", KR(ret), K(force_data_source_ip)); + } else if (OB_FAIL(dml_splicer.add_column("data_source_svr_port", force_data_src.get_port()))) { + LOG_WARN("add column failed", KR(ret), K(force_data_src)); + } + } + } + return ret; +} + bool ObDRTask::is_already_timeout() const { int64_t now = ObTimeUtility::current_time(); return schedule_time_ + GCONF.balancer_task_timeout < now; } -int ObDRTask::build_execute_result( - const int ret_code, - const ObDRTaskRetComment &ret_comment, - ObSqlString &execute_result) const -{ - int ret = OB_SUCCESS; - const int64_t now = ObTimeUtility::current_time(); - const int64_t elapsed = (get_execute_time() > 0) - ? (now - get_execute_time()) - : (now - get_schedule_time()); - execute_result.reset(); - if (OB_FAIL(execute_result.append_fmt( - "ret:%d, %s; elapsed:%ld;", ret_code, common::ob_error_name(ret_code), elapsed))) { - LOG_WARN("fail to append to execute_result", KR(ret), K(ret_code), K(elapsed)); - } else if (OB_SUCCESS != ret_code - && OB_FAIL(execute_result.append_fmt(" ret_comment:%s;", - ob_disaster_recovery_task_ret_comment_strs(ret_comment)))) { - LOG_WARN("fail to append ret comment to execute result", KR(ret), K(ret_comment)); - } - return ret; -} - int ObDRTask::set_task_key( const ObDRTaskKey &task_key) { @@ -485,7 +623,9 @@ int ObMigrateLSReplicaTask::log_execute_result( { int ret = OB_SUCCESS; ObSqlString execute_result; - if (OB_FAIL(build_execute_result(ret_code, ret_comment, execute_result))) { + int64_t start_time = execute_time_ > 0 ? execute_time_ : schedule_time_; + // when rs change leader, execute_time_ is 0, only schedule_time_ is valid + if (OB_FAIL(build_execute_result(ret_code, ret_comment, start_time, execute_result))) { LOG_WARN("fail to build execute result", KR(ret), K(ret_code), K(ret_comment)); } else { ROOTSERVICE_EVENT_ADD("disaster_recovery", get_log_finish_str(), @@ -562,8 +702,6 @@ int ObMigrateLSReplicaTask::fill_dml_splicer( char dest_ip[OB_MAX_SERVER_ADDR_SIZE] = ""; char task_id[OB_TRACE_STAT_BUFFER_SIZE] = ""; char task_type[MAX_DISASTER_RECOVERY_TASK_TYPE_LENGTH] = "MIGRATE REPLICA"; - int64_t transmit_data_size = 0; - if (OB_UNLIKELY(!is_valid())) { ret = OB_INVALID_ARGUMENT; LOG_WARN("invalid task", KR(ret)); @@ -573,30 +711,22 @@ int ObMigrateLSReplicaTask::fill_dml_splicer( } else if (false == get_dst_server().ip_to_string(dest_ip, sizeof(dest_ip))) { ret = OB_INVALID_ARGUMENT; LOG_WARN("convert dest_server ip to string failed", KR(ret), "dest_server", get_dst_server()); - } else if (OB_FAIL(get_execute_transmit_size(transmit_data_size))) { - LOG_WARN("fail to get transmit_data_size", KR(ret), K(transmit_data_size)); - } else { - if (OB_FAIL(dml_splicer.add_pk_column("tenant_id", tenant_id_)) - || OB_FAIL(dml_splicer.add_pk_column("ls_id", ls_id_.id())) - || OB_FAIL(dml_splicer.add_pk_column("task_type", task_type)) - || OB_FAIL(dml_splicer.add_pk_column("task_id", task_id_)) - || OB_FAIL(dml_splicer.add_column("task_status", TASK_STATUS)) - || OB_FAIL(dml_splicer.add_column("priority", static_cast(ObDRTaskPriority::HIGH_PRI))) - || OB_FAIL(dml_splicer.add_column("target_replica_svr_ip", dest_ip)) - || OB_FAIL(dml_splicer.add_column("target_replica_svr_port", get_dst_server().get_port())) - || OB_FAIL(dml_splicer.add_column("target_paxos_replica_number", get_paxos_replica_number())) - || OB_FAIL(dml_splicer.add_column("target_replica_type", ob_replica_type_strs(get_dst_replica().get_member().get_replica_type()))) - || OB_FAIL(dml_splicer.add_column("source_replica_svr_ip", src_ip)) - || OB_FAIL(dml_splicer.add_column("source_replica_svr_port", get_src_member().get_server().get_port())) - || OB_FAIL(dml_splicer.add_column("source_paxos_replica_number", get_paxos_replica_number())) - || OB_FAIL(dml_splicer.add_column("source_replica_type", ob_replica_type_strs(get_src_member().get_replica_type()))) - || OB_FAIL(dml_splicer.add_column("task_exec_svr_ip", dest_ip)) - || OB_FAIL(dml_splicer.add_column("task_exec_svr_port", get_dst_server().get_port())) - || OB_FAIL(dml_splicer.add_time_column("generate_time", generate_time_)) - || OB_FAIL(dml_splicer.add_time_column("schedule_time", schedule_time_)) - || OB_FAIL(dml_splicer.add_column("comment", comment_.ptr()))) { - LOG_WARN("add column failed", KR(ret)); - } + } else if (OB_FAIL(ObDRTask::fill_dml_splicer(dml_splicer))) { + LOG_WARN("ObDRTask fill dml splicer failed", KR(ret)); + } else if (OB_FAIL(dml_splicer.add_pk_column("task_type", task_type)) + || OB_FAIL(dml_splicer.add_column("target_replica_svr_ip", dest_ip)) + || OB_FAIL(dml_splicer.add_column("target_replica_svr_port", get_dst_server().get_port())) + || OB_FAIL(dml_splicer.add_column("target_paxos_replica_number", get_paxos_replica_number())) + || OB_FAIL(dml_splicer.add_column("target_replica_type", ob_replica_type_strs(get_dst_replica().get_member().get_replica_type()))) + || OB_FAIL(dml_splicer.add_column("source_replica_svr_ip", src_ip)) + || OB_FAIL(dml_splicer.add_column("source_replica_svr_port", get_src_member().get_server().get_port())) + || OB_FAIL(dml_splicer.add_column("source_paxos_replica_number", get_paxos_replica_number())) + || OB_FAIL(dml_splicer.add_column("source_replica_type", ob_replica_type_strs(get_src_member().get_replica_type()))) + || OB_FAIL(dml_splicer.add_column("task_exec_svr_ip", dest_ip)) + || OB_FAIL(dml_splicer.add_column("task_exec_svr_port", get_dst_server().get_port()))) { + LOG_WARN("add column failed", KR(ret)); + } else if (OB_FAIL(fill_dml_splicer_for_new_column(dml_splicer, get_force_data_src_member().get_server()))) { + LOG_WARN("fill dml_splicer for new column failed", KR(ret)); } return ret; } @@ -704,6 +834,7 @@ int ObMigrateLSReplicaTask::clone( } else { my_task->set_src_member(get_src_member()); my_task->set_data_src_member(get_data_src_member()); + my_task->set_force_data_src_member(get_force_data_src_member()); my_task->set_paxos_replica_number(get_paxos_replica_number()); output_task = my_task; } @@ -775,6 +906,52 @@ int ObMigrateLSReplicaTask::build( return ret; } +int ObMigrateLSReplicaTask::simple_build( + const uint64_t tenant_id, + const share::ObLSID &ls_id, + const share::ObTaskId &task_id, + const ObDstReplica &dst_replica, + const common::ObReplicaMember &src_member, + const common::ObReplicaMember &data_src_member, + const common::ObReplicaMember &force_data_src_member, + const int64_t paxos_replica_number) +{ + int ret = OB_SUCCESS; + ObDRTaskKey task_key; + if (OB_UNLIKELY(!is_valid_tenant_id(tenant_id) + || !ls_id.is_valid_with_tenant(tenant_id) + || !task_id.is_valid() + || !dst_replica.is_valid() + || !src_member.is_valid() + || paxos_replica_number <= 0)) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(tenant_id), K(ls_id), K(task_id), K(dst_replica), + K(src_member), K(paxos_replica_number)); + } else if (OB_FAIL(task_key.init(tenant_id, ls_id.id(), 0, 0, + ObDRTaskKeyType::FORMAL_DR_KEY))) { + LOG_WARN("fail to init task key", KR(ret), K(tenant_id), K(ls_id)); + } else if (OB_FAIL(ObDRTask::build( + task_key, + tenant_id, + ls_id, + task_id, + 0,/*schedule_time_us*/ 0,/*generate_time_us*/ + GCONF.cluster_id, 0,/*transmit_data_size*/ + obrpc::ObAdminClearDRTaskArg::TaskType::MANUAL, + ObDRTaskPriority::HIGH_PRI, + ObString(drtask::ALTER_SYSTEM_COMMAND_MIGRATE_REPLICA)))) { + LOG_WARN("fail to build ObDRTask", KR(ret), K(task_key), K(tenant_id), K(ls_id), K(task_id)); + } else if (OB_FAIL(dst_replica_.assign(dst_replica))) { + LOG_WARN("fail to assign dst replica", KR(ret), K(dst_replica)); + } else { + src_member_ = src_member; + data_src_member_ = data_src_member; + force_data_src_member_ = force_data_src_member; + paxos_replica_number_ = paxos_replica_number; + } + return ret; +} + int ObMigrateLSReplicaTask::build_task_from_sql_result( const sqlclient::ObMySQLResult &res) { @@ -792,6 +969,9 @@ int ObMigrateLSReplicaTask::build_task_from_sql_result( int64_t schedule_time_us = 0; int64_t generate_time_us = 0; common::ObString comment; + int64_t data_source_port = 0; + common::ObString data_source_ip; + bool is_manual = false; //STEP1_0: read certain members from sql result EXTRACT_INT_FIELD_MYSQL(res, "tenant_id", tenant_id, uint64_t); { @@ -811,6 +991,11 @@ int ObMigrateLSReplicaTask::build_task_from_sql_result( (void)GET_COL_IGNORE_NULL(res.get_int, "target_replica_svr_port", dest_port); (void)GET_COL_IGNORE_NULL(res.get_int, "source_paxos_replica_number", src_paxos_replica_number); (void)GET_COL_IGNORE_NULL(res.get_varchar, "comment", comment); + EXTRACT_INT_FIELD_MYSQL_WITH_DEFAULT_VALUE(res, "data_source_svr_port", data_source_port, + int64_t, true/*skip null error*/, true/*skip column error*/, 0); + EXTRACT_VARCHAR_FIELD_MYSQL_WITH_DEFAULT_VALUE(res, "data_source_svr_ip", data_source_ip, + true/*skip null error*/, true/*skip column error*/, "0.0.0.0"); + EXTRACT_BOOL_FIELD_MYSQL_SKIP_RET(res, "is_manual", is_manual); //STEP2_0: make necessary members to build a task ObDRTaskKey task_key; common::ObAddr src_server; @@ -821,6 +1006,7 @@ int ObMigrateLSReplicaTask::build_task_from_sql_result( share::ObTaskId task_id_to_set; ObSqlString comment_to_set; ObSqlString task_id_sqlstring_format; + common::ObAddr force_data_source; if (OB_FAIL(ret)) { } else if (OB_FAIL(comment_to_set.assign(comment))) { @@ -842,6 +1028,9 @@ int ObMigrateLSReplicaTask::build_task_from_sql_result( } else if (false == dest_server.set_ip_addr(dest_ip, static_cast(dest_port))) { ret = OB_ERR_UNEXPECTED; LOG_WARN("invalid server address", K(dest_ip), K(dest_port)); + } else if (false == force_data_source.set_ip_addr(data_source_ip, static_cast(data_source_port))) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("invalid server address", K(data_source_ip), K(data_source_port)); } else if (OB_FAIL(dst_replica.assign( 0/*unit id*/, 0/*unit group id*/, @@ -869,13 +1058,13 @@ int ObMigrateLSReplicaTask::build_task_from_sql_result( generate_time_us, //(in used) GCONF.cluster_id, //(not used)cluster_id transmit_data_size, //(not used) - obrpc::ObAdminClearDRTaskArg::TaskType::AUTO,//(not used)invoked_source + is_manual ? obrpc::ObAdminClearDRTaskArg::TaskType::MANUAL : obrpc::ObAdminClearDRTaskArg::TaskType::AUTO,//(not used)invoked_source priority_to_set, //(not used) comment_to_set.ptr(), //comment dst_replica, //(in used)dest_server ObReplicaMember(src_server, 0), //(in used)src_server ObReplicaMember(src_server, 0), //(not used)data_src_member - ObReplicaMember(src_server, 0), //(not used)data_src_member + ObReplicaMember(force_data_source, 0), //(not used)force_data_source src_paxos_replica_number))) { //(not used) LOG_WARN("fail to build a ObMigrateLSReplicaTask", KR(ret)); } else { @@ -939,7 +1128,8 @@ int ObAddLSReplicaTask::log_execute_result( { int ret = OB_SUCCESS; ObSqlString execute_result; - if (OB_FAIL(build_execute_result(ret_code, ret_comment, execute_result))) { + int64_t start_time = execute_time_ > 0 ? execute_time_ : schedule_time_; + if (OB_FAIL(build_execute_result(ret_code, ret_comment, start_time, execute_result))) { LOG_WARN("fail to build execute result", KR(ret), K(ret_code), K(ret_comment)); } else { ROOTSERVICE_EVENT_ADD("disaster_recovery", get_log_finish_str(), @@ -1019,8 +1209,6 @@ int ObAddLSReplicaTask::fill_dml_splicer( char dest_ip[OB_MAX_SERVER_ADDR_SIZE] = ""; char task_id[OB_TRACE_STAT_BUFFER_SIZE] = ""; char task_type[MAX_DISASTER_RECOVERY_TASK_TYPE_LENGTH] = "ADD REPLICA"; - int64_t transmit_data_size = 0; - if (OB_UNLIKELY(!is_valid())) { ret = OB_INVALID_ARGUMENT; LOG_WARN("invalid task", KR(ret)); @@ -1030,30 +1218,22 @@ int ObAddLSReplicaTask::fill_dml_splicer( } else if (false == get_dst_server().ip_to_string(dest_ip, sizeof(dest_ip))) { ret = OB_INVALID_ARGUMENT; LOG_WARN("convert dest_server ip to string failed", KR(ret), "dest_server", get_dst_server()); - } else if (OB_FAIL(get_execute_transmit_size(transmit_data_size))) { - LOG_WARN("fail to get transmit_data_size", KR(ret), K(transmit_data_size)); - } else { - if (OB_FAIL(dml_splicer.add_pk_column("tenant_id", tenant_id_)) - || OB_FAIL(dml_splicer.add_pk_column("ls_id", ls_id_.id())) - || OB_FAIL(dml_splicer.add_pk_column("task_type", task_type)) - || OB_FAIL(dml_splicer.add_pk_column("task_id", task_id_)) - || OB_FAIL(dml_splicer.add_column("task_status", TASK_STATUS)) - || OB_FAIL(dml_splicer.add_column("priority", static_cast(ObDRTaskPriority::HIGH_PRI))) - || OB_FAIL(dml_splicer.add_column("target_replica_svr_ip", dest_ip)) - || OB_FAIL(dml_splicer.add_column("target_replica_svr_port", get_dst_server().get_port())) - || OB_FAIL(dml_splicer.add_column("target_paxos_replica_number", get_paxos_replica_number())) - || OB_FAIL(dml_splicer.add_column("target_replica_type", ob_replica_type_strs(get_dst_replica().get_member().get_replica_type()))) - || OB_FAIL(dml_splicer.add_column("source_replica_svr_ip", src_ip)) - || OB_FAIL(dml_splicer.add_column("source_replica_svr_port", get_data_src_member().get_server().get_port())) - || OB_FAIL(dml_splicer.add_column("source_paxos_replica_number", get_orig_paxos_replica_number())) - || OB_FAIL(dml_splicer.add_column("source_replica_type", ob_replica_type_strs(get_dst_replica().get_member().get_replica_type()))) - || OB_FAIL(dml_splicer.add_column("task_exec_svr_ip", dest_ip)) - || OB_FAIL(dml_splicer.add_column("task_exec_svr_port", get_dst_server().get_port())) - || OB_FAIL(dml_splicer.add_time_column("generate_time", generate_time_)) - || OB_FAIL(dml_splicer.add_time_column("schedule_time", schedule_time_)) - || OB_FAIL(dml_splicer.add_column("comment", comment_.ptr()))) { - LOG_WARN("add column failed", KR(ret)); - } + } else if (OB_FAIL(ObDRTask::fill_dml_splicer(dml_splicer))) { + LOG_WARN("ObDRTask fill dml splicer failed", KR(ret)); + } else if (OB_FAIL(dml_splicer.add_pk_column("task_type", task_type)) + || OB_FAIL(dml_splicer.add_column("target_replica_svr_ip", dest_ip)) + || OB_FAIL(dml_splicer.add_column("target_replica_svr_port", get_dst_server().get_port())) + || OB_FAIL(dml_splicer.add_column("target_paxos_replica_number", get_paxos_replica_number())) + || OB_FAIL(dml_splicer.add_column("target_replica_type", ob_replica_type_strs(get_dst_replica().get_member().get_replica_type()))) + || OB_FAIL(dml_splicer.add_column("source_replica_svr_ip", src_ip)) + || OB_FAIL(dml_splicer.add_column("source_replica_svr_port", get_data_src_member().get_server().get_port())) + || OB_FAIL(dml_splicer.add_column("source_paxos_replica_number", get_orig_paxos_replica_number())) + || OB_FAIL(dml_splicer.add_column("source_replica_type", ob_replica_type_strs(get_dst_replica().get_member().get_replica_type()))) + || OB_FAIL(dml_splicer.add_column("task_exec_svr_ip", dest_ip)) + || OB_FAIL(dml_splicer.add_column("task_exec_svr_port", get_dst_server().get_port()))) { + LOG_WARN("add column failed", KR(ret)); + } else if (OB_FAIL(fill_dml_splicer_for_new_column(dml_splicer, get_force_data_src_member().get_server()))) { + LOG_WARN("fill dml_splicer for new column failed", KR(ret)); } return ret; } @@ -1125,7 +1305,8 @@ int ObAddLSReplicaTask::check_paxos_member( LOG_WARN("get invalid replica", K(ret), K(ls_info)); } else if (r->get_server() == dst_replica_.get_server()) { // already check in check online - } else if (r->get_zone() == dst_zone + } else if ((!is_manual_task() && r->get_zone() == dst_zone) + // manual operation allowed mutiple replica in same zone && r->is_in_service() && ObReplicaTypeCheck::is_paxos_replica_V2(r->get_replica_type()) && ObReplicaTypeCheck::is_paxos_replica_V2(dst_replica_.get_replica_type())) { @@ -1165,6 +1346,7 @@ int ObAddLSReplicaTask::clone( LOG_WARN("fail to set dst replica", KR(ret)); } else { my_task->set_data_src_member(get_data_src_member()); + my_task->set_force_data_src_member(get_force_data_src_member()); my_task->set_orig_paxos_replica_number(get_orig_paxos_replica_number()); my_task->set_paxos_replica_number(get_paxos_replica_number()); output_task = my_task; @@ -1235,6 +1417,53 @@ int ObAddLSReplicaTask::build( return ret; } +int ObAddLSReplicaTask::simple_build( + const uint64_t tenant_id, + const share::ObLSID &ls_id, + const share::ObTaskId &task_id, + const ObDstReplica &dst_replica, + const common::ObReplicaMember &data_src_member, + const common::ObReplicaMember &force_data_src_member, + const int64_t orig_paxos_replica_number, + const int64_t paxos_replica_number) +{ + int ret = OB_SUCCESS; + ObDRTaskKey task_key; + if (OB_UNLIKELY(!is_valid_tenant_id(tenant_id) + || !ls_id.is_valid_with_tenant(tenant_id) + || !task_id.is_valid() + || !dst_replica.is_valid() + || paxos_replica_number <= 0 + || orig_paxos_replica_number <= 0)) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(tenant_id), K(ls_id), K(task_id), K(dst_replica), + K(orig_paxos_replica_number), K(paxos_replica_number)); + } else if (OB_FAIL(task_key.init(tenant_id, ls_id.id(), 0, 0, + ObDRTaskKeyType::FORMAL_DR_KEY))) { + LOG_WARN("fail to init task key", KR(ret), K(tenant_id), K(ls_id)); + } else if (OB_FAIL(ObDRTask::build( + task_key, + tenant_id, + ls_id, + task_id, + 0,/*schedule_time_us*/ 0,/*generate_time_us*/ + GCONF.cluster_id, 0,/*transmit_data_size*/ + obrpc::ObAdminClearDRTaskArg::TaskType::MANUAL, + ObDRTaskPriority::HIGH_PRI, + ObString(drtask::ALTER_SYSTEM_COMMAND_ADD_REPLICA)))) { + LOG_WARN("fail to build ObDRTask", KR(ret), K(task_key), K(tenant_id), K(ls_id), K(task_id)); + } else if (OB_FAIL(dst_replica_.assign(dst_replica))) { + LOG_WARN("fail to assign dst replica", KR(ret), K(dst_replica)); + } else { + data_src_member_ = data_src_member; + force_data_src_member_ = force_data_src_member; + orig_paxos_replica_number_ = orig_paxos_replica_number; + paxos_replica_number_ = paxos_replica_number; + } + return ret; +} + + int ObAddLSReplicaTask::build_task_from_sql_result( const sqlclient::ObMySQLResult &res) { @@ -1253,6 +1482,9 @@ int ObAddLSReplicaTask::build_task_from_sql_result( int64_t schedule_time_us = 0; int64_t generate_time_us = 0; common::ObString comment; + int64_t data_source_port = 0; + common::ObString data_source_ip; + bool is_manual = false; //STEP1_0: read certain members from sql result EXTRACT_INT_FIELD_MYSQL(res, "tenant_id", tenant_id, uint64_t); { @@ -1273,6 +1505,11 @@ int ObAddLSReplicaTask::build_task_from_sql_result( (void)GET_COL_IGNORE_NULL(res.get_int, "source_paxos_replica_number", src_paxos_replica_number); (void)GET_COL_IGNORE_NULL(res.get_int, "target_paxos_replica_number", dest_paxos_replica_number); (void)GET_COL_IGNORE_NULL(res.get_varchar, "comment", comment); + EXTRACT_INT_FIELD_MYSQL_WITH_DEFAULT_VALUE(res, "data_source_svr_port", data_source_port, + int64_t, true/*skip null error*/, true/*skip column error*/, 0); + EXTRACT_VARCHAR_FIELD_MYSQL_WITH_DEFAULT_VALUE(res, "data_source_svr_ip", data_source_ip, + true/*skip null error*/, true/*skip column error*/, "0.0.0.0"); + EXTRACT_BOOL_FIELD_MYSQL_SKIP_RET(res, "is_manual", is_manual); //STEP2_0: make necessary members to build a task ObDRTaskKey task_key; common::ObAddr src_server; @@ -1283,6 +1520,7 @@ int ObAddLSReplicaTask::build_task_from_sql_result( share::ObTaskId task_id_to_set; ObSqlString comment_to_set; ObSqlString task_id_sqlstring_format; + common::ObAddr force_data_source; if (OB_FAIL(ret)) { } else if (OB_FAIL(comment_to_set.assign(comment))) { @@ -1304,6 +1542,9 @@ int ObAddLSReplicaTask::build_task_from_sql_result( } else if (false == dest_server.set_ip_addr(dest_ip, static_cast(dest_port))) { ret = OB_ERR_UNEXPECTED; LOG_WARN("invalid server address", K(dest_ip), K(dest_port)); + } else if (false == force_data_source.set_ip_addr(data_source_ip, static_cast(data_source_port))) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("invalid server address", K(data_source_ip), K(data_source_port)); } else if (OB_FAIL(dst_replica.assign( 0/*unit id*/, 0/*unit group id*/, @@ -1331,12 +1572,12 @@ int ObAddLSReplicaTask::build_task_from_sql_result( generate_time_us, GCONF.cluster_id, //(not used)cluster_id transmit_data_size, //(not used) - obrpc::ObAdminClearDRTaskArg::TaskType::AUTO,//(not used)invoked_source + is_manual ? obrpc::ObAdminClearDRTaskArg::TaskType::MANUAL : obrpc::ObAdminClearDRTaskArg::TaskType::AUTO,//(not used)invoked_source priority_to_set, //(not used) comment_to_set.ptr(), //comments dst_replica, //(in used)dest_server ObReplicaMember(src_server, 0), //(in used)src_server - ObReplicaMember(src_server, 0), //(in used)src_server + ObReplicaMember(force_data_source, 0), //(in used)force_data_source src_paxos_replica_number, //(in used) dest_paxos_replica_number))) { //(in used) LOG_WARN("fail to build a ObAddLSReplicaTask", KR(ret)); @@ -1417,7 +1658,8 @@ int ObLSTypeTransformTask::log_execute_result( { int ret = OB_SUCCESS; ObSqlString execute_result; - if (OB_FAIL(build_execute_result(ret_code, ret_comment, execute_result))) { + int64_t start_time = execute_time_ > 0 ? execute_time_ : schedule_time_; + if (OB_FAIL(build_execute_result(ret_code, ret_comment, start_time, execute_result))) { LOG_WARN("fail to build execute result", KR(ret), K(ret_code), K(ret_comment)); } else { ROOTSERVICE_EVENT_ADD("disaster_recovery", get_log_finish_str(), @@ -1494,8 +1736,6 @@ int ObLSTypeTransformTask::fill_dml_splicer( char target_ip[OB_MAX_SERVER_ADDR_SIZE] = ""; char task_id[OB_TRACE_STAT_BUFFER_SIZE] = ""; char task_type[MAX_DISASTER_RECOVERY_TASK_TYPE_LENGTH] = "TYPE TRANSFORM"; - int64_t transmit_data_size = 0; - if (OB_UNLIKELY(!is_valid())) { ret = OB_INVALID_ARGUMENT; LOG_WARN("invalid task", KR(ret)); @@ -1505,30 +1745,22 @@ int ObLSTypeTransformTask::fill_dml_splicer( } else if (false == get_dst_server().ip_to_string(dest_ip, sizeof(dest_ip))) { ret = OB_INVALID_ARGUMENT; LOG_WARN("convert dest_server ip to string failed", KR(ret), "dest_server", get_dst_server()); - } else if (OB_FAIL(get_execute_transmit_size(transmit_data_size))) { - LOG_WARN("fail to get transmit_data_size", KR(ret), K(transmit_data_size)); - } else { - if (OB_FAIL(dml_splicer.add_pk_column("tenant_id", tenant_id_)) - || OB_FAIL(dml_splicer.add_pk_column("ls_id", ls_id_.id())) - || OB_FAIL(dml_splicer.add_pk_column("task_type", task_type)) - || OB_FAIL(dml_splicer.add_pk_column("task_id", task_id_)) - || OB_FAIL(dml_splicer.add_column("task_status", TASK_STATUS)) - || OB_FAIL(dml_splicer.add_column("priority", static_cast(ObDRTaskPriority::HIGH_PRI))) - || OB_FAIL(dml_splicer.add_column("target_replica_svr_ip", dest_ip)) - || OB_FAIL(dml_splicer.add_column("target_replica_svr_port", get_dst_server().get_port())) - || OB_FAIL(dml_splicer.add_column("target_paxos_replica_number", get_paxos_replica_number())) - || OB_FAIL(dml_splicer.add_column("target_replica_type", ob_replica_type_strs(get_dst_replica().get_member().get_replica_type()))) - || OB_FAIL(dml_splicer.add_column("source_replica_svr_ip", src_ip)) - || OB_FAIL(dml_splicer.add_column("source_replica_svr_port", get_src_member().get_server().get_port())) - || OB_FAIL(dml_splicer.add_column("source_paxos_replica_number", get_orig_paxos_replica_number())) - || OB_FAIL(dml_splicer.add_column("source_replica_type", ob_replica_type_strs(get_src_member().get_replica_type()))) - || OB_FAIL(dml_splicer.add_column("task_exec_svr_ip", dest_ip)) - || OB_FAIL(dml_splicer.add_column("task_exec_svr_port", get_dst_server().get_port())) - || OB_FAIL(dml_splicer.add_time_column("generate_time", generate_time_)) - || OB_FAIL(dml_splicer.add_time_column("schedule_time", schedule_time_)) - || OB_FAIL(dml_splicer.add_column("comment", comment_.ptr()))) { - LOG_WARN("add column failed", KR(ret)); - } + } else if (OB_FAIL(ObDRTask::fill_dml_splicer(dml_splicer))) { + LOG_WARN("ObDRTask fill dml splicer failed", KR(ret)); + } else if (OB_FAIL(dml_splicer.add_pk_column("task_type", task_type)) + || OB_FAIL(dml_splicer.add_column("target_replica_svr_ip", dest_ip)) + || OB_FAIL(dml_splicer.add_column("target_replica_svr_port", get_dst_server().get_port())) + || OB_FAIL(dml_splicer.add_column("target_paxos_replica_number", get_paxos_replica_number())) + || OB_FAIL(dml_splicer.add_column("target_replica_type", ob_replica_type_strs(get_dst_replica().get_member().get_replica_type()))) + || OB_FAIL(dml_splicer.add_column("source_replica_svr_ip", src_ip)) + || OB_FAIL(dml_splicer.add_column("source_replica_svr_port", get_src_member().get_server().get_port())) + || OB_FAIL(dml_splicer.add_column("source_paxos_replica_number", get_orig_paxos_replica_number())) + || OB_FAIL(dml_splicer.add_column("source_replica_type", ob_replica_type_strs(get_src_member().get_replica_type()))) + || OB_FAIL(dml_splicer.add_column("task_exec_svr_ip", dest_ip)) + || OB_FAIL(dml_splicer.add_column("task_exec_svr_port", get_dst_server().get_port()))) { + LOG_WARN("add column failed", KR(ret)); + } else if (OB_FAIL(fill_dml_splicer_for_new_column(dml_splicer, common::ObAddr()))) { + LOG_WARN("fill dml_splicer for new column failed", KR(ret)); } return ret; } @@ -1691,6 +1923,53 @@ int ObLSTypeTransformTask::build( return ret; } +int ObLSTypeTransformTask::simple_build( + const uint64_t tenant_id, + const share::ObLSID &ls_id, + const share::ObTaskId &task_id, + const ObDstReplica &dst_replica, + const common::ObReplicaMember &src_member, + const common::ObReplicaMember &data_src_member, + const int64_t orig_paxos_replica_number, + const int64_t paxos_replica_number) +{ + int ret = OB_SUCCESS; + ObDRTaskKey task_key; + if (OB_UNLIKELY(!is_valid_tenant_id(tenant_id) + || !ls_id.is_valid_with_tenant(tenant_id) + || !task_id.is_valid() + || !dst_replica.is_valid() + || !src_member.is_valid() + || paxos_replica_number <= 0 + || orig_paxos_replica_number <= 0)) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(tenant_id), K(ls_id), K(task_id), K(dst_replica), K(src_member), + K(orig_paxos_replica_number), K(paxos_replica_number)); + } else if (OB_FAIL(task_key.init(tenant_id, ls_id.id(), 0, 0, + ObDRTaskKeyType::FORMAL_DR_KEY))) { + LOG_WARN("fail to init task key", KR(ret), K(tenant_id), K(ls_id)); + } else if (OB_FAIL(ObDRTask::build( + task_key, + tenant_id, + ls_id, + task_id, + 0,/*schedule_time_us*/ 0,/*generate_time_us*/ + GCONF.cluster_id, 0,/*transmit_data_size*/ + obrpc::ObAdminClearDRTaskArg::TaskType::MANUAL, + ObDRTaskPriority::HIGH_PRI, + ObString(drtask::ALTER_SYSTEM_COMMAND_MODIFY_REPLICA_TYPE)))) { + LOG_WARN("fail to build ObDRTask", KR(ret), K(task_key), K(tenant_id), K(ls_id), K(task_id)); + } else if (OB_FAIL(dst_replica_.assign(dst_replica))) { + LOG_WARN("fail to assign dst replica", KR(ret), K(dst_replica)); + } else { + src_member_ = src_member; + data_src_member_ = data_src_member; + orig_paxos_replica_number_ = orig_paxos_replica_number; + paxos_replica_number_ = paxos_replica_number; + } + return ret; +} + int ObLSTypeTransformTask::build_task_from_sql_result( const sqlclient::ObMySQLResult &res) { @@ -1711,6 +1990,7 @@ int ObLSTypeTransformTask::build_task_from_sql_result( int64_t schedule_time_us = 0; int64_t generate_time_us = 0; common::ObString comment; + bool is_manual = false; //STEP1_0: read certain members from sql result EXTRACT_INT_FIELD_MYSQL(res, "tenant_id", tenant_id, uint64_t); { @@ -1733,6 +2013,7 @@ int ObLSTypeTransformTask::build_task_from_sql_result( (void)GET_COL_IGNORE_NULL(res.get_varchar, "source_replica_type", src_type); (void)GET_COL_IGNORE_NULL(res.get_varchar, "target_replica_type", dest_type); (void)GET_COL_IGNORE_NULL(res.get_varchar, "comment", comment); + EXTRACT_BOOL_FIELD_MYSQL_SKIP_RET(res, "is_manual", is_manual); //STEP2_0: make necessary members to build a task ObDRTaskKey task_key; common::ObAddr src_server; @@ -1811,7 +2092,7 @@ int ObLSTypeTransformTask::build_task_from_sql_result( generate_time_us, GCONF.cluster_id, //(not used)cluster_id transmit_data_size, //(not used) - obrpc::ObAdminClearDRTaskArg::TaskType::AUTO,//(not used)invoked_source + is_manual ? obrpc::ObAdminClearDRTaskArg::TaskType::MANUAL : obrpc::ObAdminClearDRTaskArg::TaskType::AUTO,//(not used)invoked_source priority_to_set, //(not used) comment_to_set.ptr(), //comment dst_replica, //(in used)dest_server @@ -1873,7 +2154,8 @@ int ObRemoveLSReplicaTask::log_execute_result( { int ret = OB_SUCCESS; ObSqlString execute_result; - if (OB_FAIL(build_execute_result(ret_code, ret_comment, execute_result))) { + int64_t start_time = execute_time_ > 0 ? execute_time_ : schedule_time_; + if (OB_FAIL(build_execute_result(ret_code, ret_comment, start_time, execute_result))) { LOG_WARN("fail to build execute result", KR(ret), K(ret_code), K(ret_comment)); } else { ROOTSERVICE_EVENT_ADD("disaster_recovery", get_log_finish_str(), @@ -1952,7 +2234,6 @@ int ObRemoveLSReplicaTask::fill_dml_splicer( char dest_ip[OB_MAX_SERVER_ADDR_SIZE] = ""; char target_ip[OB_MAX_SERVER_ADDR_SIZE] = ""; char task_id[OB_TRACE_STAT_BUFFER_SIZE] = ""; - int64_t transmit_data_size = 0; const char *task_type_to_set = ob_disaster_recovery_task_type_strs(get_disaster_recovery_task_type()); if (OB_UNLIKELY(!is_valid())) { @@ -1964,30 +2245,22 @@ int ObRemoveLSReplicaTask::fill_dml_splicer( } else if (false == get_remove_server().get_server().ip_to_string(target_ip, sizeof(target_ip))) { ret = OB_INVALID_ARGUMENT; LOG_WARN("convert target_server ip to string failed", KR(ret), "target_server", get_remove_server().get_server()); - } else if (OB_FAIL(get_execute_transmit_size(transmit_data_size))) { - LOG_WARN("fail to get transmit_data_size", KR(ret), K(transmit_data_size)); - } else { - if (OB_FAIL(dml_splicer.add_pk_column("tenant_id", tenant_id_)) - || OB_FAIL(dml_splicer.add_pk_column("ls_id", ls_id_.id())) - || OB_FAIL(dml_splicer.add_pk_column("task_type", task_type_to_set)) - || OB_FAIL(dml_splicer.add_pk_column("task_id", task_id_)) - || OB_FAIL(dml_splicer.add_column("task_status", TASK_STATUS)) - || OB_FAIL(dml_splicer.add_column("priority", static_cast(ObDRTaskPriority::HIGH_PRI))) - || OB_FAIL(dml_splicer.add_column("target_replica_svr_ip", target_ip)) - || OB_FAIL(dml_splicer.add_column("target_replica_svr_port", get_remove_server().get_server().get_port())) - || OB_FAIL(dml_splicer.add_column("target_paxos_replica_number", get_paxos_replica_number())) - || OB_FAIL(dml_splicer.add_column("target_replica_type", ob_replica_type_strs(get_remove_server().get_replica_type()))) - || OB_FAIL(dml_splicer.add_column("source_replica_svr_ip", src_ip)) - || OB_FAIL(dml_splicer.add_column("source_replica_svr_port", 0)) - || OB_FAIL(dml_splicer.add_column("source_paxos_replica_number", get_orig_paxos_replica_number())) - || OB_FAIL(dml_splicer.add_column("source_replica_type", "")) - || OB_FAIL(dml_splicer.add_column("task_exec_svr_ip", dest_ip)) - || OB_FAIL(dml_splicer.add_column("task_exec_svr_port", get_leader().get_port())) - || OB_FAIL(dml_splicer.add_time_column("generate_time", generate_time_)) - || OB_FAIL(dml_splicer.add_time_column("schedule_time", schedule_time_)) - || OB_FAIL(dml_splicer.add_column("comment", comment_.ptr()))) { - LOG_WARN("add column failed", KR(ret)); - } + } else if (OB_FAIL(ObDRTask::fill_dml_splicer(dml_splicer))) { + LOG_WARN("ObDRTask fill dml splicer failed", KR(ret)); + } else if (OB_FAIL(dml_splicer.add_pk_column("task_type", task_type_to_set)) + || OB_FAIL(dml_splicer.add_column("target_replica_svr_ip", target_ip)) + || OB_FAIL(dml_splicer.add_column("target_replica_svr_port", get_remove_server().get_server().get_port())) + || OB_FAIL(dml_splicer.add_column("target_paxos_replica_number", get_paxos_replica_number())) + || OB_FAIL(dml_splicer.add_column("target_replica_type", ob_replica_type_strs(get_remove_server().get_replica_type()))) + || OB_FAIL(dml_splicer.add_column("source_replica_svr_ip", src_ip)) + || OB_FAIL(dml_splicer.add_column("source_replica_svr_port", 0)) + || OB_FAIL(dml_splicer.add_column("source_paxos_replica_number", get_orig_paxos_replica_number())) + || OB_FAIL(dml_splicer.add_column("source_replica_type", "")) + || OB_FAIL(dml_splicer.add_column("task_exec_svr_ip", dest_ip)) + || OB_FAIL(dml_splicer.add_column("task_exec_svr_port", get_leader().get_port()))) { + LOG_WARN("add column failed", KR(ret)); + } else if (OB_FAIL(fill_dml_splicer_for_new_column(dml_splicer, common::ObAddr()))) { + LOG_WARN("fill dml_splicer for new column failed", KR(ret)); } return ret; } @@ -2082,6 +2355,54 @@ int ObRemoveLSReplicaTask::build( return ret; } +int ObRemoveLSReplicaTask::simple_build( + const uint64_t tenant_id, + const share::ObLSID &ls_id, + const share::ObTaskId &task_id, + const common::ObAddr &leader, + const common::ObReplicaMember &remove_server, + const int64_t orig_paxos_replica_number, + const int64_t paxos_replica_number, + const ObReplicaType &replica_type) +{ + int ret = OB_SUCCESS; + ObDRTaskKey task_key; + if (OB_UNLIKELY(!is_valid_tenant_id(tenant_id) + || !ls_id.is_valid_with_tenant(tenant_id) + || !task_id.is_valid() + || !leader.is_valid() + || !remove_server.is_valid() + || orig_paxos_replica_number <= 0 + || paxos_replica_number <= 0 + || REPLICA_TYPE_MAX == replica_type)) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(tenant_id), K(ls_id), K(task_id), + K(leader), K(remove_server), K(orig_paxos_replica_number), + K(paxos_replica_number), K(replica_type)); + } else if (OB_FAIL(task_key.init(tenant_id, ls_id.id(), 0, 0, + ObDRTaskKeyType::FORMAL_DR_KEY))) { + LOG_WARN("fail to init task key", KR(ret), K(tenant_id), K(ls_id)); + } else if (OB_FAIL(ObDRTask::build( + task_key, + tenant_id, + ls_id, + task_id, + 0,/*schedule_time_us*/ 0,/*generate_time_us*/ + GCONF.cluster_id, 0,/*transmit_data_size*/ + obrpc::ObAdminClearDRTaskArg::TaskType::MANUAL, + ObDRTaskPriority::HIGH_PRI, + ObString(drtask::ALTER_SYSTEM_COMMAND_REMOVE_REPLICA)))) { + LOG_WARN("fail to build ObDRTask", KR(ret), K(task_key), K(tenant_id), K(ls_id), K(task_id)); + } else { + leader_ = leader; + remove_server_ = remove_server; + orig_paxos_replica_number_ = orig_paxos_replica_number; + paxos_replica_number_ = paxos_replica_number; + replica_type_ = replica_type; + } + return ret; +} + int ObRemoveLSReplicaTask::build_task_from_sql_result( const sqlclient::ObMySQLResult &res) { @@ -2102,6 +2423,7 @@ int ObRemoveLSReplicaTask::build_task_from_sql_result( int64_t generate_time_us = 0; common::ObString comment; ObReplicaType replica_type = REPLICA_TYPE_MAX; + bool is_manual = false; //STEP1_0: read certain members from sql result EXTRACT_INT_FIELD_MYSQL(res, "tenant_id", tenant_id, uint64_t); { @@ -2123,6 +2445,7 @@ int ObRemoveLSReplicaTask::build_task_from_sql_result( (void)GET_COL_IGNORE_NULL(res.get_int, "source_paxos_replica_number", src_paxos_replica_number); (void)GET_COL_IGNORE_NULL(res.get_int, "target_paxos_replica_number", dest_paxos_replica_number); (void)GET_COL_IGNORE_NULL(res.get_varchar, "comment", comment); + EXTRACT_BOOL_FIELD_MYSQL_SKIP_RET(res, "is_manual", is_manual); //STEP2_0: make necessary members to build a task ObDRTaskKey task_key; common::ObAddr dest_server; @@ -2181,7 +2504,7 @@ int ObRemoveLSReplicaTask::build_task_from_sql_result( generate_time_us, GCONF.cluster_id, //(not used)cluster_id transmit_data_size, //(not used) - obrpc::ObAdminClearDRTaskArg::TaskType::AUTO,//(not used)invoked_source + is_manual ? obrpc::ObAdminClearDRTaskArg::TaskType::MANUAL : obrpc::ObAdminClearDRTaskArg::TaskType::AUTO,//(not used)invoked_source priority_to_set, //(not used) comment_to_set.ptr(), //comment dest_server, //(in used)leader @@ -2247,7 +2570,8 @@ int ObLSModifyPaxosReplicaNumberTask::log_execute_result( { int ret = OB_SUCCESS; ObSqlString execute_result; - if (OB_FAIL(build_execute_result(ret_code, ret_comment, execute_result))) { + int64_t start_time = execute_time_ > 0 ? execute_time_ : schedule_time_; + if (OB_FAIL(build_execute_result(ret_code, ret_comment, start_time, execute_result))) { LOG_WARN("fail to build execute result", KR(ret), K(ret_code), K(ret_comment)); } else { ROOTSERVICE_EVENT_ADD("disaster_recovery", get_log_finish_str(), @@ -2306,38 +2630,28 @@ int ObLSModifyPaxosReplicaNumberTask::fill_dml_splicer( char target_ip[OB_MAX_SERVER_ADDR_SIZE] = ""; char task_id[OB_TRACE_STAT_BUFFER_SIZE] = ""; char task_type[MAX_DISASTER_RECOVERY_TASK_TYPE_LENGTH] = "MODIFY PAXOS REPLICA NUMBER"; - int64_t transmit_data_size = 0; - if (OB_UNLIKELY(!is_valid())) { ret = OB_INVALID_ARGUMENT; LOG_WARN("invalid task", KR(ret)); } else if (false == get_dst_server().ip_to_string(dest_ip, sizeof(dest_ip))) { ret = OB_INVALID_ARGUMENT; LOG_WARN("convert dest_server ip to string failed", KR(ret), "dest_server", get_dst_server()); - } else if (OB_FAIL(get_execute_transmit_size(transmit_data_size))) { - LOG_WARN("fail to get transmit_data_size", KR(ret), K(transmit_data_size)); - } else { - if (OB_FAIL(dml_splicer.add_pk_column("tenant_id", tenant_id_)) - || OB_FAIL(dml_splicer.add_pk_column("ls_id", ls_id_.id())) - || OB_FAIL(dml_splicer.add_pk_column("task_type", task_type)) - || OB_FAIL(dml_splicer.add_pk_column("task_id", task_id_)) - || OB_FAIL(dml_splicer.add_column("task_status", TASK_STATUS)) - || OB_FAIL(dml_splicer.add_column("priority", static_cast(ObDRTaskPriority::HIGH_PRI))) - || OB_FAIL(dml_splicer.add_column("target_replica_svr_ip", dest_ip)) - || OB_FAIL(dml_splicer.add_column("target_replica_svr_port", get_dst_server().get_port())) - || OB_FAIL(dml_splicer.add_column("target_paxos_replica_number", get_paxos_replica_number())) - || OB_FAIL(dml_splicer.add_column("target_replica_type", "")) - || OB_FAIL(dml_splicer.add_column("source_replica_svr_ip", src_ip)) - || OB_FAIL(dml_splicer.add_column("source_replica_svr_port", 0)) - || OB_FAIL(dml_splicer.add_column("source_paxos_replica_number", get_orig_paxos_replica_number())) - || OB_FAIL(dml_splicer.add_column("source_replica_type", "")) - || OB_FAIL(dml_splicer.add_column("task_exec_svr_ip", dest_ip)) - || OB_FAIL(dml_splicer.add_column("task_exec_svr_port", get_dst_server().get_port())) - || OB_FAIL(dml_splicer.add_time_column("generate_time", generate_time_)) - || OB_FAIL(dml_splicer.add_time_column("schedule_time", schedule_time_)) - || OB_FAIL(dml_splicer.add_column("comment", comment_.ptr()))) { - LOG_WARN("add column failed", KR(ret)); - } + } else if (OB_FAIL(ObDRTask::fill_dml_splicer(dml_splicer))) { + LOG_WARN("ObDRTask fill dml splicer failed", KR(ret)); + } else if (OB_FAIL(dml_splicer.add_pk_column("task_type", task_type)) + || OB_FAIL(dml_splicer.add_column("target_replica_svr_ip", dest_ip)) + || OB_FAIL(dml_splicer.add_column("target_replica_svr_port", get_dst_server().get_port())) + || OB_FAIL(dml_splicer.add_column("target_paxos_replica_number", get_paxos_replica_number())) + || OB_FAIL(dml_splicer.add_column("target_replica_type", "")) + || OB_FAIL(dml_splicer.add_column("source_replica_svr_ip", src_ip)) + || OB_FAIL(dml_splicer.add_column("source_replica_svr_port", 0)) + || OB_FAIL(dml_splicer.add_column("source_paxos_replica_number", get_orig_paxos_replica_number())) + || OB_FAIL(dml_splicer.add_column("source_replica_type", "")) + || OB_FAIL(dml_splicer.add_column("task_exec_svr_ip", dest_ip)) + || OB_FAIL(dml_splicer.add_column("task_exec_svr_port", get_dst_server().get_port()))) { + LOG_WARN("add column failed", KR(ret)); + } else if (OB_FAIL(fill_dml_splicer_for_new_column(dml_splicer, common::ObAddr()))) { + LOG_WARN("fill dml_splicer for new column failed", KR(ret)); } return ret; } @@ -2429,6 +2743,52 @@ int ObLSModifyPaxosReplicaNumberTask::build( return ret; } +int ObLSModifyPaxosReplicaNumberTask::simple_build( + const uint64_t tenant_id, + const share::ObLSID &ls_id, + const share::ObTaskId &task_id, + const common::ObAddr &dst_server, + const int64_t orig_paxos_replica_number, + const int64_t paxos_replica_number, + const common::ObMemberList &member_list) +{ + int ret = OB_SUCCESS; + ObDRTaskKey task_key; + if (OB_UNLIKELY(!is_valid_tenant_id(tenant_id) + || !ls_id.is_valid_with_tenant(tenant_id) + || !task_id.is_valid() + || !dst_server.is_valid() + || orig_paxos_replica_number <= 0 + || paxos_replica_number <= 0 + || !member_list.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(tenant_id), K(ls_id), K(task_id), + K(dst_server), K(orig_paxos_replica_number), + K(paxos_replica_number), K(member_list)); + } else if (OB_FAIL(task_key.init(tenant_id, ls_id.id(), 0, 0, + ObDRTaskKeyType::FORMAL_DR_KEY))) { + LOG_WARN("fail to init task key", KR(ret), K(tenant_id), K(ls_id)); + } else if (OB_FAIL(ObDRTask::build( + task_key, + tenant_id, + ls_id, + task_id, + 0,/*schedule_time_us*/ 0,/*generate_time_us*/ + GCONF.cluster_id, 0,/*transmit_data_size*/ + obrpc::ObAdminClearDRTaskArg::TaskType::MANUAL, + ObDRTaskPriority::HIGH_PRI, + ObString(drtask::ALTER_SYSTEM_COMMAND_MODIFY_PAXOS_REPLICA_NUM)))) { + LOG_WARN("fail to build ObDRTask", KR(ret), + K(task_key), K(tenant_id), K(ls_id), K(task_id)); + } else { + orig_paxos_replica_number_ = orig_paxos_replica_number; + paxos_replica_number_ = paxos_replica_number; + server_ = dst_server; + member_list_ = member_list; + } + return ret; +} + int ObLSModifyPaxosReplicaNumberTask::build_task_from_sql_result( const sqlclient::ObMySQLResult &res) { @@ -2445,6 +2805,7 @@ int ObLSModifyPaxosReplicaNumberTask::build_task_from_sql_result( int64_t schedule_time_us = 0; int64_t generate_time_us = 0; common::ObString comment; + bool is_manual = false; //STEP1_0: read certain members from sql result EXTRACT_INT_FIELD_MYSQL(res, "tenant_id", tenant_id, uint64_t); { @@ -2463,6 +2824,7 @@ int ObLSModifyPaxosReplicaNumberTask::build_task_from_sql_result( (void)GET_COL_IGNORE_NULL(res.get_int, "source_paxos_replica_number", src_paxos_replica_number); (void)GET_COL_IGNORE_NULL(res.get_int, "target_paxos_replica_number", dest_paxos_replica_number); (void)GET_COL_IGNORE_NULL(res.get_varchar, "comment", comment); + EXTRACT_BOOL_FIELD_MYSQL_SKIP_RET(res, "is_manual", is_manual); //STEP2_0: make necessary members to build a task ObDRTaskKey task_key; common::ObAddr dest_server; @@ -2514,7 +2876,7 @@ int ObLSModifyPaxosReplicaNumberTask::build_task_from_sql_result( generate_time_us, GCONF.cluster_id, //(not used)cluster_id transmit_data_size, //(not used) - obrpc::ObAdminClearDRTaskArg::TaskType::AUTO,//(not used)invoked_source + is_manual ? obrpc::ObAdminClearDRTaskArg::TaskType::MANUAL : obrpc::ObAdminClearDRTaskArg::TaskType::AUTO,//(not used)invoked_source priority_to_set, //(not used) comment_to_set.ptr(), //comment dest_server, //(in used)leader diff --git a/src/rootserver/ob_disaster_recovery_task.h b/src/rootserver/ob_disaster_recovery_task.h index 796bb80f4..cdee1242a 100644 --- a/src/rootserver/ob_disaster_recovery_task.h +++ b/src/rootserver/ob_disaster_recovery_task.h @@ -52,6 +52,11 @@ namespace drtask const static char * const CANCEL_MIGRATE_UNIT_WITH_NON_PAXOS_REPLICA = "cancel migrate unit remove non-paxos replica"; const static char * const MIGRATE_REPLICA_DUE_TO_UNIT_GROUP_NOT_MATCH = "migrate replica due to unit group not match"; const static char * const MIGRATE_REPLICA_DUE_TO_UNIT_NOT_MATCH = "migrate replica due to unit not match"; + const static char * const ALTER_SYSTEM_COMMAND_ADD_REPLICA = "add replica by manual"; + const static char * const ALTER_SYSTEM_COMMAND_REMOVE_REPLICA = "remove replica by manual"; + const static char * const ALTER_SYSTEM_COMMAND_MODIFY_REPLICA_TYPE = "modify replica type by manual"; + const static char * const ALTER_SYSTEM_COMMAND_MIGRATE_REPLICA = "migrate replica by manual"; + const static char * const ALTER_SYSTEM_COMMAND_MODIFY_PAXOS_REPLICA_NUM = "modify paxos_replica_num by manual"; }; namespace drtasklog @@ -70,6 +75,36 @@ namespace drtasklog const static char * const FINISH_MODIFY_PAXOS_REPLICA_NUMBER_STR = "finish_modify_paxos_replica_number"; } +class ObDRLSReplicaTaskStatus +{ + OB_UNIS_VERSION(1); +public: + enum DRLSReplicaTaskStatus + { + INPROGRESS = 0, + COMPLETED, + FAILED, + CANCELED, + MAX_STATUS, + }; +public: + ObDRLSReplicaTaskStatus() : status_(MAX_STATUS) {} + ObDRLSReplicaTaskStatus(DRLSReplicaTaskStatus status) : status_(status) {} + + ObDRLSReplicaTaskStatus &operator=(const DRLSReplicaTaskStatus status) { status_ = status; return *this; } + ObDRLSReplicaTaskStatus &operator=(const ObDRLSReplicaTaskStatus &other) { status_ = other.status_; return *this; } + void reset() { status_ = MAX_STATUS; } + void assign(const ObDRLSReplicaTaskStatus &other); + bool is_valid() const { return MAX_STATUS != status_; } + const DRLSReplicaTaskStatus &get_status() const { return status_; } + int parse_from_string(const ObString &status); + int64_t to_string(char *buf, const int64_t buf_len) const; + const char* get_status_str() const; + +private: + DRLSReplicaTaskStatus status_; +}; + enum class ObDRTaskType : int64_t; enum class ObDRTaskPriority : int64_t; @@ -94,6 +129,12 @@ const char *ob_disaster_recovery_task_type_strs(const rootserver::ObDRTaskType t const char *ob_disaster_recovery_task_priority_strs(const rootserver::ObDRTaskPriority task_priority); const char* ob_disaster_recovery_task_ret_comment_strs(const rootserver::ObDRTaskRetComment ret_comment); const char *ob_replica_type_strs(const ObReplicaType type); +bool is_manual_dr_task_data_version_match(uint64_t tenant_data_version); +int build_execute_result( + const int ret_code, + const ObDRTaskRetComment &ret_comment, + const int64_t start_time, + ObSqlString &execute_result); class ObDstReplica { @@ -207,8 +248,6 @@ enum class ObDRTaskPriority : int64_t class ObDRTask : public common::ObDLinkBase { -public: - const char *const TASK_STATUS = "INPROGRESS"; public: ObDRTask() : task_key_(), tenant_id_(common::OB_INVALID_ID), @@ -247,10 +286,6 @@ public: const ObDRTaskPriority priority, const ObString &comment); - int build_execute_result( - const int ret_code, - const ObDRTaskRetComment &ret_comment, - ObSqlString &execute_result) const; public: virtual const common::ObAddr &get_dst_server() const = 0; @@ -285,8 +320,11 @@ public: ObDRTaskRetComment &ret_comment) const = 0; virtual int fill_dml_splicer( - share::ObDMLSqlSplicer &dml_splicer) const = 0; + share::ObDMLSqlSplicer &dml_splicer) const; + int fill_dml_splicer_for_new_column( + share::ObDMLSqlSplicer &dml_splicer, + const common::ObAddr &force_data_src) const; // to string virtual TO_STRING_KV(K_(task_key), K_(tenant_id), @@ -413,6 +451,17 @@ public: const int64_t paxos_replica_number ); + // only use some necessary information build a ObMigrateLSReplicaTask + // Specifically, this method is only used when manually executing operation and maintenance commands + int simple_build( + const uint64_t tenant_id, + const share::ObLSID &ls_id, + const share::ObTaskId &task_id, + const ObDstReplica &dst_replica, + const common::ObReplicaMember &src_member, + const common::ObReplicaMember &data_src_member, + const common::ObReplicaMember &force_data_src_member, + const int64_t paxos_replica_number); // build a ObMigrateLSReplicaTask from sql result read from inner table // @param [in] res, sql result read from inner table int build_task_from_sql_result(const sqlclient::ObMySQLResult &res); @@ -528,7 +577,18 @@ public: const common::ObReplicaMember &force_data_src_member, const int64_t orig_paxos_replica_number, const int64_t paxos_replica_number); - + + // only use some necessary information build a ObAddLSReplicaTask + // Specifically, this method is only used when manually executing operation and maintenance commands + int simple_build( + const uint64_t tenant_id, + const share::ObLSID &ls_id, + const share::ObTaskId &task_id, + const ObDstReplica &dst_replica, + const common::ObReplicaMember &data_src_member, + const common::ObReplicaMember &force_data_src_member, + const int64_t orig_paxos_replica_number, + const int64_t paxos_replica_number); // build a ObAddLSReplicaTask from sql result read from inner table // @param [in] res, sql result read from inner table int build_task_from_sql_result(const sqlclient::ObMySQLResult &res); @@ -644,6 +704,17 @@ public: const int64_t orig_paxos_replica_number, const int64_t paxos_replica_number); + // only use some necessary information build a ObLSTypeTransformTask + // Specifically, this method is only used when manually executing operation and maintenance commands + int simple_build( + const uint64_t tenant_id, + const share::ObLSID &ls_id, + const share::ObTaskId &task_id, + const ObDstReplica &dst_replica, + const common::ObReplicaMember &src_member, + const common::ObReplicaMember &data_src_member, + const int64_t orig_paxos_replica_number, + const int64_t paxos_replica_number); // build a ObLSTypeTransformTask from sql result read from inner table // @param [in] res, sql result read from inner table int build_task_from_sql_result(const sqlclient::ObMySQLResult &res); @@ -760,6 +831,18 @@ public: const int64_t paxos_replica_number, const ObReplicaType &replica_type); + // only use some necessary information build a ObRemoveLSReplicaTask + // Specifically, this method is only used when manually executing operation and maintenance commands + int simple_build( + const uint64_t tenant_id, + const share::ObLSID &ls_id, + const share::ObTaskId &task_id, + const common::ObAddr &leader, + const common::ObReplicaMember &remove_server, + const int64_t orig_paxos_replica_number, + const int64_t paxos_replica_number, + const ObReplicaType &replica_type); + // build a ObRemoveLSReplicaTask from sql result read from inner table // @param [in] res, sql result read from inner table int build_task_from_sql_result(const sqlclient::ObMySQLResult &res); @@ -874,6 +957,16 @@ public: const int64_t paxos_replica_number, const common::ObMemberList &member_list); + // only use some necessary information build a ObLSModifyPaxosReplicaNumberTask + // Specifically, this method is only used when manually executing operation and maintenance commands + int simple_build( + const uint64_t tenant_id, + const share::ObLSID &ls_id, + const share::ObTaskId &task_id, + const common::ObAddr &dst_server, + const int64_t orig_paxos_replica_number, + const int64_t paxos_replica_number, + const common::ObMemberList &member_list); // build a ObLSModifyPaxosReplicaNumberTask from sql result read from inner table // @param [in] res, sql result read from inner table int build_task_from_sql_result(const sqlclient::ObMySQLResult &res); diff --git a/src/rootserver/ob_disaster_recovery_task_mgr.cpp b/src/rootserver/ob_disaster_recovery_task_mgr.cpp index df8628988..f4aa22cf4 100644 --- a/src/rootserver/ob_disaster_recovery_task_mgr.cpp +++ b/src/rootserver/ob_disaster_recovery_task_mgr.cpp @@ -14,28 +14,29 @@ #include "ob_disaster_recovery_task_mgr.h" -#include "lib/lock/ob_mutex.h" -#include "lib/stat/ob_diagnose_info.h" -#include "lib/profile/ob_trace_id.h" #include "lib/alloc/ob_malloc_allocator.h" -#include "share/ob_debug_sync.h" -#include "share/ob_srv_rpc_proxy.h" -#include "share/config/ob_server_config.h" -#include "ob_disaster_recovery_task_executor.h" -#include "rootserver/ob_root_balancer.h" -#include "ob_rs_event_history_table_operator.h" -#include "share/ob_rpc_struct.h" -#include "observer/ob_server_struct.h" -#include "sql/executor/ob_executor_rpc_proxy.h" -#include "rootserver/ob_disaster_recovery_task.h" // for ObDRTaskType -#include "share/ob_share_util.h" // for ObShareUtil +#include "lib/lock/ob_mutex.h" #include "lib/lock/ob_tc_rwlock.h" // for common::RWLock +#include "lib/profile/ob_trace_id.h" +#include "lib/stat/ob_diagnose_info.h" +#include "observer/ob_server_struct.h" +#include "ob_disaster_recovery_task_executor.h" +#include "ob_disaster_recovery_task_table_operator.h" +#include "ob_rs_event_history_table_operator.h" +#include "rootserver/ob_disaster_recovery_task.h" // for ObDRTaskType #include "rootserver/ob_disaster_recovery_task.h" #include "rootserver/tenant_snapshot/ob_tenant_snapshot_util.h" // for ObTenantSnapshotUtil -#include "share/inner_table/ob_inner_table_schema_constants.h" -#include "share/ob_all_server_tracer.h" #include "storage/tablelock/ob_lock_inner_connection_util.h" // for ObInnerConnectionLockUtil #include "observer/ob_inner_sql_connection.h" +#include "rootserver/ob_root_balancer.h" +#include "share/config/ob_server_config.h" +#include "share/inner_table/ob_inner_table_schema_constants.h" +#include "share/ob_all_server_tracer.h" +#include "share/ob_debug_sync.h" +#include "share/ob_rpc_struct.h" +#include "share/ob_share_util.h" // for ObShareUtil +#include "share/ob_srv_rpc_proxy.h" +#include "sql/executor/ob_executor_rpc_proxy.h" namespace oceanbase { @@ -220,7 +221,7 @@ int ObDRTaskQueue::push_task_in_wait_list( } int ObDRTaskQueue::push_task_in_schedule_list( - ObDRTask &task) + const ObDRTask &task) { // STEP 1: push task into schedule list // STEP 2: push task into task_map @@ -815,6 +816,55 @@ int ObDRTaskMgr::check_task_exist( return ret; } +int ObDRTaskMgr::add_task_in_queue_and_execute(const ObDRTask &task) +{ + int ret = OB_SUCCESS; + int tmp_ret = OB_SUCCESS; + if (OB_FAIL(check_inner_stat_())) { + LOG_WARN("fail to check inner stat", KR(ret), K_(inited), K_(loaded), K_(stopped)); + } else if (OB_UNLIKELY(!task.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid dr task", KR(ret), K(task)); + } else { + ObThreadCondGuard guard(cond_); + bool task_exist = false; + bool sibling_in_schedule = false; + ObDRTaskQueue &queue = task.is_high_priority_task() ? high_task_queue_ : low_task_queue_; + ObDRTaskQueue &sibling_queue = task.is_high_priority_task() ? low_task_queue_ : high_task_queue_; + if (OB_UNLIKELY(queue.task_cnt() >= TASK_QUEUE_LIMIT)) { + ret = OB_SIZE_OVERFLOW; + LOG_WARN("disaster recovery task queue is full", KR(ret), "task_cnt", queue.task_cnt()); + } else if (OB_FAIL(queue.check_task_exist(task.get_task_key(), task_exist))) { + LOG_WARN("fail to check task in scheduling", KR(ret), K(task)); + } else if (task_exist) { + ret = OB_ENTRY_EXIST; + LOG_WARN("ls disaster recovery task has existed in queue", KR(ret), K(task), K(task_exist)); + } else if (OB_FAIL(sibling_queue.check_task_in_scheduling(task.get_task_key(), sibling_in_schedule))) { + LOG_WARN("fail to check task in scheduling", KR(ret), K(task)); + } else if (sibling_in_schedule) { + ret = OB_ENTRY_EXIST; + LOG_WARN("ls disaster recovery task has existed in sibling_queue", + KR(ret), K(task), K(sibling_in_schedule)); + } else if (OB_FAIL(queue.push_task_in_schedule_list(task))) { + LOG_WARN("fail to add task to schedule list", KR(ret), K(task)); + } else if (OB_FAIL(set_sibling_in_schedule(task, true/*in_schedule*/))) { + //after successfully adding the scheduling queue, + //need mark in both queues that the task has been scheduled in the queue. + //if there is a task in the waiting queue of another queue, need update its mark + LOG_WARN("set sibling in schedule failed", KR(ret), K(task)); + } else { + if (OB_SUCCESS != (tmp_ret = task.log_execute_start())) { + LOG_WARN("fail to log task start", KR(tmp_ret), K(task)); + } + if (OB_FAIL(execute_manual_task_(task))) { + //must under cond + LOG_WARN("fail to execute manual task", KR(ret), K(task)); + } + } + } + return ret; +} + int ObDRTaskMgr::add_task( const ObDRTask &task) { @@ -1207,9 +1257,7 @@ int ObDRTaskMgr::persist_task_info_( { int ret = OB_SUCCESS; ret_comment = ObDRTaskRetComment::MAX; - share::ObDMLSqlSplicer dml; - ObSqlString sql; - int64_t affected_rows = 0; + ObLSReplicaTaskTableOperator task_table_operator; const uint64_t sql_tenant_id = gen_meta_tenant_id(task.get_tenant_id()); ObMySQLTransaction trans; const int64_t timeout = GCONF.internal_sql_execute_timeout; @@ -1223,10 +1271,6 @@ int ObDRTaskMgr::persist_task_info_( LOG_WARN("invalid argument", KR(ret)); } else if (OB_FAIL(trans.start(sql_proxy_, sql_tenant_id))) { LOG_WARN("failed to start trans", KR(ret), K(sql_tenant_id)); - } else if (OB_FAIL(task.fill_dml_splicer(dml))) { - LOG_WARN("fill dml splicer failed", KR(ret)); - } else if (OB_FAIL(dml.splice_insert_sql(share::OB_ALL_LS_REPLICA_TASK_TNAME, sql))) { - LOG_WARN("fail to splice batch insert update sql", KR(ret), K(sql)); } else if (OB_ISNULL(conn = static_cast(trans.get_connection()))) { ret = OB_ERR_UNEXPECTED; LOG_WARN("conn_ is NULL", KR(ret)); @@ -1239,8 +1283,8 @@ int ObDRTaskMgr::persist_task_info_( } else if (OB_FAIL(ObTenantSnapshotUtil::check_tenant_not_in_cloning_procedure(task.get_tenant_id(), case_to_check))) { LOG_WARN("fail to check whether tenant is in cloning procedure", KR(ret)); ret_comment = CANNOT_PERSIST_TASK_DUE_TO_CLONE_CONFLICT; - } else if (OB_FAIL(trans.write(sql_tenant_id, sql.ptr(), affected_rows))) { - LOG_WARN("execute sql failed", KR(ret), "tenant_id",task.get_tenant_id(), K(sql_tenant_id), K(sql)); + } else if (OB_FAIL(task_table_operator.insert_task(trans, task))) { + LOG_WARN("task_table_operator insert_task failed", KR(ret), K(task)); } if (trans.is_started()) { int tmp_ret = OB_SUCCESS; @@ -1448,6 +1492,41 @@ int ObDRTaskMgr::pop_task( return ret; } +int ObDRTaskMgr::execute_manual_task_( + const ObDRTask &task) +{ + int ret = OB_SUCCESS; + FLOG_INFO("execute manual disaster recovery task", K(task)); + int dummy_ret = OB_SUCCESS; + ObDRTaskRetComment ret_comment = ObDRTaskRetComment::MAX; + DEBUG_SYNC(BEFORE_ADD_MANUAL_REPLICA_TASK_IN_INNER_TABLE); + if (OB_FAIL(check_inner_stat_())) { + LOG_WARN("fail to check inner stat", KR(ret), K_(inited), K_(stopped), K_(loaded)); + } else if (OB_UNLIKELY(!task.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(task)); + } else if (OB_ISNULL(task_executor_)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("task_executor_ is nullptr", KR(ret), K(task)); + } else if (OB_FAIL(persist_task_info_(task, ret_comment))) { + LOG_WARN("fail to persist task info into table", KR(ret), K(task)); + } else if (OB_FAIL(task_executor_->execute(task, dummy_ret, ret_comment))) { + LOG_WARN("fail to execute disaster recovery task", KR(ret), K(task)); + } + if (OB_FAIL(ret)) { + (void)log_task_result(task, ret, ret_comment); + const bool data_in_limit = (OB_REACH_SERVER_DATA_COPY_IN_CONCURRENCY_LIMIT == ret); + if (OB_SUCCESS != async_add_cleaning_task_to_updater( + task.get_task_id(), + task.get_task_key(), + ret, false/*need_record_event*/, ret_comment, + !data_in_limit)) { + LOG_WARN("fail to do execute over", KR(ret), K(task)); + } + } + return ret; +} + int ObDRTaskMgr::execute_task( const ObDRTask &task) { diff --git a/src/rootserver/ob_disaster_recovery_task_mgr.h b/src/rootserver/ob_disaster_recovery_task_mgr.h index 33b57ce95..26514f347 100644 --- a/src/rootserver/ob_disaster_recovery_task_mgr.h +++ b/src/rootserver/ob_disaster_recovery_task_mgr.h @@ -87,7 +87,7 @@ public: // push a task into this queue's schedule_list // @param [in] task, the task to push in int push_task_in_schedule_list( - ObDRTask &task); + const ObDRTask &task); // pop a task and move it from wait_list to schedule_list // @param [out] task, the task to pop @@ -261,6 +261,10 @@ public: const ObDRTaskPriority priority, bool &task_exist); + // add task in schedule list and execute task + // @param [in] task, target task + virtual int add_task_in_queue_and_execute( + const ObDRTask &task); // add a task into queue // @param [in] task, the task to push in virtual int add_task( @@ -399,6 +403,11 @@ private: int execute_task( const ObDRTask &task); + // try to persist and execute a manual task + // @param [in] task, the task to execute + int execute_manual_task_( + const ObDRTask &task); + // set sibling in schedule // @param [in] task, which task to deal with // @param [in] in_schedule, whether in schedule diff --git a/src/rootserver/ob_disaster_recovery_task_table_operator.cpp b/src/rootserver/ob_disaster_recovery_task_table_operator.cpp new file mode 100644 index 000000000..40d467e5b --- /dev/null +++ b/src/rootserver/ob_disaster_recovery_task_table_operator.cpp @@ -0,0 +1,273 @@ +/** + * Copyright (c) 2021 OceanBase + * OceanBase CE is licensed under Mulan PubL v2. + * You can use this software according to the terms and conditions of the Mulan PubL v2. + * You may obtain a copy of Mulan PubL v2 at: + * http://license.coscl.org.cn/MulanPubL-2.0 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PubL v2 for more details. + */ +#define USING_LOG_PREFIX RS +#include "ob_disaster_recovery_task_table_operator.h" +#include "lib/container/ob_se_array.h" // ObSEArray +#include "lib/mysqlclient/ob_isql_client.h" // for ObISQLClient +#include "lib/oblog/ob_log_module.h" // for LOG_WARN +#include "lib/string/ob_sql_string.h" // for ObSqlString +#include "share/inner_table/ob_inner_table_schema_constants.h" // for xxx_TNAME + +namespace oceanbase +{ +namespace rootserver +{ + +int ObLSReplicaTaskTableOperator::delete_task( + common::ObMySQLTransaction &trans, + const uint64_t tenant_id, + const share::ObLSID& ls_id, + const ObDRTaskType& task_type, + const share::ObTaskId& task_id, + int64_t &affected_rows) +{ + int ret = OB_SUCCESS; + affected_rows = 0; + ObSqlString sql; + char task_id_to_set[OB_TRACE_STAT_BUFFER_SIZE] = ""; + const uint64_t sql_tenant_id = gen_meta_tenant_id(tenant_id); + if (OB_UNLIKELY(OB_INVALID_TENANT_ID == tenant_id + || !ls_id.is_valid() + || ObDRTaskType::MAX_TYPE == task_type + || task_id.is_invalid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(tenant_id), K(ls_id), K(task_type), K(task_id)); + } else if (false == task_id.to_string(task_id_to_set, sizeof(task_id_to_set))) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("convert task id to string failed", KR(ret), K(task_id)); + } else if (OB_FAIL(sql.assign_fmt("DELETE FROM %s WHERE tenant_id = %lu AND ls_id = %lu " + "AND task_type = '%s' AND task_id = '%s'", + share::OB_ALL_LS_REPLICA_TASK_TNAME, + tenant_id, + ls_id.id(), + ob_disaster_recovery_task_type_strs(task_type), + task_id_to_set))) { + LOG_WARN("assign sql string failed", KR(ret), K(tenant_id), + K(ls_id), K(task_type), K(task_id_to_set)); + } else if (OB_FAIL(trans.write(sql_tenant_id, sql.ptr(), affected_rows))) { + LOG_WARN("execute sql failed", KR(ret), K(sql), K(sql_tenant_id)); + } + return ret; +} + +int ObLSReplicaTaskTableOperator::insert_task( + common::ObISQLClient &sql_proxy, + const ObDRTask &task) +{ + int ret = OB_SUCCESS; + share::ObDMLSqlSplicer dml; + ObSqlString sql; + int64_t affected_rows = 0; + const uint64_t sql_tenant_id = gen_meta_tenant_id(task.get_tenant_id()); + if (OB_UNLIKELY(!task.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(task)); + } else if (OB_FAIL(task.fill_dml_splicer(dml))) { + LOG_WARN("fill dml splicer failed", KR(ret), K(task)); + } else if (OB_FAIL(dml.splice_insert_sql(share::OB_ALL_LS_REPLICA_TASK_TNAME, sql))) { + LOG_WARN("fail to splice insert update sql", KR(ret), K(task)); + } else if (OB_FAIL(sql_proxy.write(sql_tenant_id, sql.ptr(), affected_rows))) { + LOG_WARN("execute sql failed", KR(ret), K(task.get_tenant_id()), K(sql_tenant_id), K(sql)); + } else if (!is_single_row(affected_rows)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("insert is not single row", KR(ret), K(affected_rows)); + } + return ret; +} + +int ObLSReplicaTaskTableOperator::finish_task( + common::ObMySQLTransaction& trans, + const ObDRTaskTableUpdateTask& task) +{ + int ret = OB_SUCCESS; + uint64_t tenant_data_version = 0; + int64_t insert_rows = 0; + int64_t delete_rows = 0; + if (OB_UNLIKELY(!task.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(task)); + } else if (OB_FAIL(GET_MIN_DATA_VERSION(gen_meta_tenant_id(task.get_tenant_id()), tenant_data_version))) { + LOG_WARN("fail to get min data version", KR(ret), K(task)); + } else if (is_manual_dr_task_data_version_match(tenant_data_version)) { + char task_id_to_set[OB_TRACE_STAT_BUFFER_SIZE] = ""; + int64_t schedule_time = 0; + ObSqlString execute_result; + ObSqlString condition_sql; + ObSqlString sql; + uint64_t sql_tenant_id = gen_meta_tenant_id(task.get_tenant_id()); + const char* table_column = "tenant_id, ls_id, task_type, task_id, priority, target_replica_svr_ip, target_replica_svr_port, target_paxos_replica_number," + "target_replica_type, source_replica_svr_ip, source_replica_svr_port, source_paxos_replica_number, source_replica_type, task_exec_svr_ip, task_exec_svr_port," + "generate_time, schedule_time, comment, data_source_svr_ip, data_source_svr_port, is_manual"; + // no task_status + ObDRLSReplicaTaskStatus task_status(ObDRLSReplicaTaskStatus::COMPLETED); + if (OB_CANCELED == task.get_ret_code()) { + task_status = ObDRLSReplicaTaskStatus::CANCELED; + } else if (OB_SUCCESS != task.get_ret_code()) { + task_status = ObDRLSReplicaTaskStatus::FAILED; + } + if (false == task.get_task_id().to_string(task_id_to_set, sizeof(task_id_to_set))) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("convert task id to string failed", KR(ret), K(task)); + } else if (OB_FAIL(get_task_schedule_time_(trans, task, schedule_time))) { + if (OB_ENTRY_NOT_EXIST == ret) { + ret = OB_SUCCESS; + LOG_INFO("task count is 0", KR(ret), K(task)); + } else { + LOG_WARN("faild to get task schedule_time", KR(ret), K(task)); + } + } else if (OB_FAIL(build_execute_result(task.get_ret_code(), + task.get_ret_comment(), + schedule_time, + execute_result))) { + LOG_WARN("build_execute_result failed", KR(ret), K(task), K(schedule_time)); + } else if (OB_FAIL(condition_sql.assign_fmt("tenant_id = %lu AND ls_id = %lu AND task_type = '%s' AND task_id = '%s'", + task.get_tenant_id(), task.get_ls_id().id(), + ob_disaster_recovery_task_type_strs(task.get_task_type()), task_id_to_set))) { + LOG_WARN("failed to append sql", KR(ret), K(task), K(task_id_to_set)); + } else if (OB_FAIL(sql.assign_fmt("insert into %s (%s, task_status, execute_result, finish_time) " + " select %s, '%s', '%s', now() from %s where %s", + share::OB_ALL_LS_REPLICA_TASK_HISTORY_TNAME, table_column, table_column, + task_status.get_status_str(), execute_result.ptr(), + share::OB_ALL_LS_REPLICA_TASK_TNAME, condition_sql.ptr()))) { + LOG_WARN("failed to assign sql", KR(ret), K(task_status), K(execute_result), K(condition_sql)); + } else if (OB_FAIL(trans.write(sql_tenant_id, sql.ptr(), insert_rows))) { + LOG_WARN("execute sql failed", KR(ret), K(sql_tenant_id), K(sql)); + } else if (insert_rows != 1) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("sql insert error", KR(ret), K(insert_rows), K(task)); + } + } + if (FAILEDx(delete_task(trans, task.get_tenant_id(), task.get_ls_id(), task.get_task_type(), + task.get_task_id(), delete_rows))) { + LOG_WARN("delete_task failed", KR(ret), K(task)); + } else if (!is_single_row(delete_rows)) { + // ignore affected row check for task not exist + LOG_INFO("expected deleted single row", K(delete_rows), K(task)); + }// during the upgrade process, it is possible that insert_rows is 0 and delete_rows is 1. + return ret; +} + +int ObLSReplicaTaskTableOperator::get_task_schedule_time_( + common::ObMySQLTransaction& trans, + const ObDRTaskTableUpdateTask &task, + int64_t &schedule_time) +{ + int ret = OB_SUCCESS; + schedule_time = 0; + uint64_t sql_tenant_id = gen_meta_tenant_id(task.get_tenant_id()); + char task_id_to_set[OB_TRACE_STAT_BUFFER_SIZE] = ""; + if (OB_UNLIKELY(!task.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(task)); + } else if (false == task.get_task_id().to_string(task_id_to_set, sizeof(task_id_to_set))) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("convert task id to string failed", KR(ret), K(task)); + } else { + ObSqlString sql; + SMART_VAR(ObISQLClient::ReadResult, res) { + sqlclient::ObMySQLResult* result = nullptr; + if (OB_FAIL(sql.assign_fmt("SELECT time_to_usec(schedule_time) AS schedule_time FROM %s WHERE " + "tenant_id = %lu AND ls_id = %lu AND task_type = '%s' AND task_id = '%s'", + share::OB_ALL_LS_REPLICA_TASK_TNAME, + task.get_tenant_id(), + task.get_ls_id().id(), + ob_disaster_recovery_task_type_strs(task.get_task_type()), + task_id_to_set))) { + LOG_WARN("fail to assign sql", KR(ret), K(task_id_to_set), K(task)); + } else if (OB_FAIL(trans.read(res, sql_tenant_id, sql.ptr()))) { + LOG_WARN("execute sql failed", KR(ret), K(sql_tenant_id), K(sql)); + } else if (OB_ISNULL(result = res.get_result())) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("get mysql result failed", KR(ret), K(sql)); + } else if (OB_FAIL(result->next())) { + if (OB_ITER_END == ret) { + ret = OB_ENTRY_NOT_EXIST; + } + LOG_WARN("fail to get next result", KR(ret), K(sql)); + } else { + EXTRACT_INT_FIELD_MYSQL(*result, "schedule_time", schedule_time, int64_t); + int tmp_ret = OB_SUCCESS; + if (OB_SUCC(ret) && (OB_ITER_END != (tmp_ret = result->next()))) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("get more row than one", KR(ret), KR(tmp_ret), K(sql)); + } + } + } + } + return ret; +} + +int ObLSReplicaTaskTableOperator::get_task_info_for_cancel( + common::ObISQLClient &sql_proxy, + const uint64_t tenant_id, + const share::ObTaskId &task_id, + common::ObAddr &task_execute_server, + share::ObLSID &ls_id) +{ + int ret = OB_SUCCESS; + task_execute_server.reset(); + ls_id.reset(); + int64_t ls_id_res = share::ObLSID::INVALID_LS_ID; + common::ObString server_ip; + int64_t server_port = OB_INVALID_INDEX; + char task_id_to_set[OB_TRACE_STAT_BUFFER_SIZE] = ""; + if (OB_UNLIKELY(OB_INVALID_TENANT_ID == tenant_id || !task_id.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(tenant_id), K(task_id)); + } else if (false == task_id.to_string(task_id_to_set, sizeof(task_id_to_set))) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("convert task id to string failed", KR(ret), K(task_id)); + } else { + uint64_t sql_tenant_id = gen_meta_tenant_id(tenant_id); + ObSqlString sql; + SMART_VAR(ObISQLClient::ReadResult, res) { + sqlclient::ObMySQLResult* result = nullptr; + if (OB_FAIL(sql.assign_fmt("SELECT ls_id, task_exec_svr_ip, task_exec_svr_port FROM %s WHERE " + "tenant_id = %lu AND task_id = '%s'", share::OB_ALL_LS_REPLICA_TASK_TNAME, tenant_id, task_id_to_set))) { + LOG_WARN("fail to assign sql", KR(ret), K(task_id_to_set), K(tenant_id)); + } else if (OB_FAIL(sql_proxy.read(res, sql_tenant_id, sql.ptr()))) { + LOG_WARN("execute sql failed", KR(ret), K(sql_tenant_id), K(sql)); + } else if (OB_ISNULL(result = res.get_result())) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("get mysql result failed", KR(ret), K(sql)); + } else if (OB_FAIL(result->next())) { + if (OB_ITER_END == ret) { + ret = OB_ENTRY_NOT_EXIST; + } + LOG_WARN("fail to get next result", KR(ret), K(sql)); + } else { + EXTRACT_INT_FIELD_MYSQL(*result, "ls_id", ls_id_res, int64_t); + (void)GET_COL_IGNORE_NULL(result->get_varchar, "task_exec_svr_ip", server_ip); + (void)GET_COL_IGNORE_NULL(result->get_int, "task_exec_svr_port", server_port); + if (OB_FAIL(ret)) { + } else if (false == task_execute_server.set_ip_addr(server_ip, static_cast(server_port))) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("invalid server address", K(server_ip), K(server_port)); + } else if (OB_UNLIKELY(!task_execute_server.is_valid())) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("invalid task_execute_server", KR(ret), K(task_execute_server)); + } else { + ls_id = ls_id_res; + } + int tmp_ret = OB_SUCCESS; + if (OB_SUCC(ret) && (OB_ITER_END != (tmp_ret = result->next()))) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("get more row than one", KR(ret), KR(tmp_ret), K(sql)); + } + } + } + } + return ret; +} + +} // end namespace rootserver +} // end namespace oceanbase \ No newline at end of file diff --git a/src/rootserver/ob_disaster_recovery_task_table_operator.h b/src/rootserver/ob_disaster_recovery_task_table_operator.h new file mode 100644 index 000000000..888d5e826 --- /dev/null +++ b/src/rootserver/ob_disaster_recovery_task_table_operator.h @@ -0,0 +1,87 @@ +/** + * Copyright (c) 2021 OceanBase + * OceanBase CE is licensed under Mulan PubL v2. + * You can use this software according to the terms and conditions of the Mulan PubL v2. + * You may obtain a copy of Mulan PubL v2 at: + * http://license.coscl.org.cn/MulanPubL-2.0 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PubL v2 for more details. + */ +#ifndef OCEANBASE_ROOTSERVER_OB_DISASTER_RECOVERY_TASK_TABLE_OPERATOR_H_ +#define OCEANBASE_ROOTSERVER_OB_DISASTER_RECOVERY_TASK_TABLE_OPERATOR_H_ +#include "lib/mysqlclient/ob_mysql_proxy.h" +#include "lib/ob_define.h" // for ObTaskId ObReplicaType .. +#include "lib/string/ob_string.h" // for ObString +#include "ob_disaster_recovery_task.h" // for ObDRTaskPriority +#include "ob_disaster_recovery_task_table_updater.h" // for ObDRTaskTableUpdateTask +#include "share/ob_ls_id.h" // for ObLSID + +namespace oceanbase +{ +namespace rootserver +{ + +class ObLSReplicaTaskTableOperator +{ + OB_UNIS_VERSION(1); +public: + ObLSReplicaTaskTableOperator() {} + virtual ~ObLSReplicaTaskTableOperator() {} + + // through tenant_id, ls_id, task_type and task_id, delete task from __all_ls_replica_task + // @params[in] trans, trans proxy to use + // @params[in] tenant_id, which tenant's task to delete + // @params[in] ls_id, which log stream's task to delete + // @params[in] task_type, type of target task + // @params[in] task_id, task_id of target task + // @params[out] affected_rows, number of rows to delete tasks + int delete_task( + common::ObMySQLTransaction &trans, + const uint64_t tenant_id, + const share::ObLSID& ls_id, + const ObDRTaskType& task_type, + const share::ObTaskId& task_id, + int64_t &affected_rows); + + // insert task into __all_ls_replica_task + // @params[in] trans, trans proxy to use + // @params[in] task, target task to insert + int insert_task( + common::ObISQLClient &sql_proxy, + const ObDRTask &task); + + // read task info and delete task from __all_ls_replica_task, insert task into __all_ls_replica_task_history + // @params[in] sql_proxy, proxy to use + // @params[in] task, contains task info which to finish + int finish_task( + common::ObMySQLTransaction& trans, + const ObDRTaskTableUpdateTask &task); + + // read task info for cancel replica task + // @params[in] tenant_id, tenant task to get + // @params[in] task_id, task_id to get + // @params[out] task_execute_server, which server the task execute + // @params[out] ls_id, which ls this task belong to + int get_task_info_for_cancel( + common::ObISQLClient &sql_proxy, + const uint64_t tenant_id, + const share::ObTaskId &task_id, + common::ObAddr &task_execute_server, + share::ObLSID &ls_id); + +private: + + int get_task_schedule_time_( + common::ObMySQLTransaction& trans, + const ObDRTaskTableUpdateTask &task, + int64_t &schedule_time); + +private: + DISALLOW_COPY_AND_ASSIGN(ObLSReplicaTaskTableOperator); +}; + +} // end namespace rootserver +} // end namespace oceanbase +#endif // OCEANBASE_ROOTSERVER_OB_DISASTER_RECOVERY_TASK_TABLE_OPERATOR_H_ diff --git a/src/rootserver/ob_disaster_recovery_task_table_updater.cpp b/src/rootserver/ob_disaster_recovery_task_table_updater.cpp index 32fd9cd9c..029564a04 100644 --- a/src/rootserver/ob_disaster_recovery_task_table_updater.cpp +++ b/src/rootserver/ob_disaster_recovery_task_table_updater.cpp @@ -16,6 +16,7 @@ #include "share/ob_define.h" #include "share/inner_table/ob_inner_table_schema_constants.h" // for OB_ALL_LS_REPLICA_TASK_TNAME +#include "ob_disaster_recovery_task_table_operator.h" //for ObLSReplicaTaskTableOperator #include "rootserver/ob_disaster_recovery_task_mgr.h" // for ObDRTaskMgr #include "share/schema/ob_multi_version_schema_service.h" // for GSCHEMASERVICE @@ -271,12 +272,9 @@ int ObDRTaskTableUpdater::process_task_( int ret = OB_SUCCESS; DEBUG_SYNC(BEFORE_DELETE_DRTASK_FROM_INNER_TABLE); common::ObMySQLTransaction trans; - int64_t affected_rows = 0; const uint64_t sql_tenant_id = gen_meta_tenant_id(task.get_tenant_id()); - char task_id_to_set[OB_TRACE_STAT_BUFFER_SIZE] = ""; - ObSqlString sql; bool has_dropped = false; - + ObLSReplicaTaskTableOperator task_operator; if (OB_FAIL(check_inner_stat_())) { LOG_WARN("fail to check inner stat", KR(ret), K_(inited), K_(stopped)); } else if (OB_ISNULL(sql_proxy_) || OB_ISNULL(task_mgr_)) { @@ -285,9 +283,6 @@ int ObDRTaskTableUpdater::process_task_( } else if (!task.is_valid()) { ret = OB_INVALID_ARGUMENT; LOG_WARN("invalid argument", KR(ret), K(task)); - } else if (false == task.get_task_id().to_string(task_id_to_set, sizeof(task_id_to_set))) { - ret = OB_INVALID_ARGUMENT; - LOG_WARN("convert task id to string failed", KR(ret), "task_id", task.get_task_id()); } else if (OB_UNLIKELY(!is_valid_tenant_id(task.get_tenant_id()))) { ret = OB_INVALID_ARGUMENT; LOG_WARN("invalid argument", KR(ret), "tenant_id", task.get_tenant_id()); @@ -298,22 +293,8 @@ int ObDRTaskTableUpdater::process_task_( if (has_dropped) { } else if (OB_FAIL(trans.start(sql_proxy_, sql_tenant_id))) { LOG_WARN("start transaction failed", KR(ret), K(sql_tenant_id)); - } else { - if (OB_FAIL(sql.assign_fmt("DELETE FROM %s WHERE tenant_id = %lu AND ls_id = %lu " - "AND task_type = '%s' AND task_id = '%s'", - share::OB_ALL_LS_REPLICA_TASK_TNAME, - task.get_tenant_id(), - task.get_ls_id().id(), - ob_disaster_recovery_task_type_strs(task.get_task_type()), - task_id_to_set))) { - LOG_WARN("assign sql string failed", KR(ret), K(task)); - } else if (OB_FAIL(sql_proxy_->write(sql_tenant_id, sql.ptr(), affected_rows))) { - LOG_WARN("execute sql failed", KR(ret), "sql", sql.ptr(), K(task), K(sql_tenant_id)); - } else if (!is_single_row(affected_rows)) { - // ignore affected row check for task not exist - LOG_INFO("expected deleted single row", - K(affected_rows), K(sql), K(task), K(sql_tenant_id)); - } + } else if (OB_FAIL(task_operator.finish_task(trans, task))) { + LOG_WARN("task_operator get_task failed", KR(ret), K(task)); } if (FAILEDx(task_mgr_->do_cleaning( task.get_task_id(), @@ -325,7 +306,7 @@ int ObDRTaskTableUpdater::process_task_( LOG_WARN("fail to clean task info inside memory", KR(ret), K(task)); } else { LOG_INFO("success to delete row from ls disaster task table and do cleaning", - K(affected_rows), K(sql), K(task), K(sql_tenant_id)); + K(task), K(sql_tenant_id)); } if (trans.is_started()) { int trans_ret = trans.end(OB_SUCCESS == ret); diff --git a/src/rootserver/ob_disaster_recovery_worker.cpp b/src/rootserver/ob_disaster_recovery_worker.cpp index b5313b246..678796cc0 100755 --- a/src/rootserver/ob_disaster_recovery_worker.cpp +++ b/src/rootserver/ob_disaster_recovery_worker.cpp @@ -29,6 +29,7 @@ #include "storage/ls/ob_ls.h" #include "storage/tx_storage/ob_ls_handle.h" #include "observer/ob_server_struct.h" +#include "ob_disaster_recovery_task_table_operator.h" #include "rootserver/ob_disaster_recovery_task.h" #include "rootserver/ob_disaster_recovery_info.h" #include "rootserver/ob_disaster_recovery_task_mgr.h" @@ -2162,6 +2163,989 @@ int ObDRWorker::try_ls_disaster_recovery( return ret; } +int ObDRWorker::do_add_ls_replica_task( + const obrpc::ObAdminAlterLSReplicaArg &arg) +{ + int ret = OB_SUCCESS; + if (OB_UNLIKELY(!inited_)) { + ret = OB_NOT_INIT; + LOG_WARN("DRWorker not init", KR(ret), K(arg)); + } else if (OB_UNLIKELY(!arg.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(arg)); + } else { + DRLSInfo dr_ls_info(gen_user_tenant_id(arg.get_tenant_id()), + zone_mgr_, schema_service_); + ObAddLSReplicaTask add_replica_task; + if (OB_FAIL(check_and_init_info_for_alter_ls_(arg, dr_ls_info))) { + LOG_WARN("fail to check and init info for alter ls", KR(ret), K(arg), K(dr_ls_info)); + } else if (OB_FAIL(build_add_replica_task_(arg, dr_ls_info, add_replica_task))) { + LOG_WARN("fail to build add replica task parameters", KR(ret), K(arg), K(dr_ls_info)); + } else if (OB_FAIL(add_task_in_queue_and_execute_(add_replica_task))) { + LOG_WARN("failed to add task in schedule list", KR(ret), K(add_replica_task)); + } + } + FLOG_INFO("ObDRWorker do add ls replica task", KR(ret), K(arg)); + return ret; +} + +int ObDRWorker::do_remove_ls_replica_task( + const obrpc::ObAdminAlterLSReplicaArg &arg) +{ + int ret = OB_SUCCESS; + if (OB_UNLIKELY(!inited_)) { + ret = OB_NOT_INIT; + LOG_WARN("DRWorker not init", KR(ret), K(arg)); + } else if (OB_UNLIKELY(!arg.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(arg)); + } else { + DRLSInfo dr_ls_info(gen_user_tenant_id(arg.get_tenant_id()), + zone_mgr_, schema_service_); + ObRemoveLSReplicaTask remove_replica_task; + if (OB_FAIL(check_and_init_info_for_alter_ls_(arg, dr_ls_info))) { + LOG_WARN("fail to check and init info for alter ls", KR(ret), K(arg), K(dr_ls_info)); + } else if (OB_FAIL(build_remove_replica_task_(arg, dr_ls_info, remove_replica_task))) { + LOG_WARN("fail to build remove replica task parameters", KR(ret), K(arg), K(dr_ls_info)); + } else if (OB_FAIL(add_task_in_queue_and_execute_(remove_replica_task))) { + LOG_WARN("failed to add task in schedule list", KR(ret), K(remove_replica_task)); + } + } + FLOG_INFO("ObDRWorker do remove ls replica task", KR(ret), K(arg)); + return ret; +} + +int ObDRWorker::do_modify_ls_replica_type_task( + const obrpc::ObAdminAlterLSReplicaArg &arg) +{ + int ret = OB_SUCCESS; + if (OB_UNLIKELY(!inited_)) { + ret = OB_NOT_INIT; + LOG_WARN("DRWorker not init", KR(ret), K(arg)); + } else if (OB_UNLIKELY(!arg.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(arg)); + } else { + DRLSInfo dr_ls_info(gen_user_tenant_id(arg.get_tenant_id()), + zone_mgr_, schema_service_); + ObLSTypeTransformTask modify_replica_type_task; + if (OB_FAIL(check_and_init_info_for_alter_ls_(arg, dr_ls_info))) { + LOG_WARN("fail to check and init info for alter ls", KR(ret), K(arg), K(dr_ls_info)); + } else if (OB_FAIL(build_modify_replica_type_task_(arg, dr_ls_info, modify_replica_type_task))) { + LOG_WARN("fail to build modify replica task parameters", KR(ret), K(arg), K(dr_ls_info)); + } else if (OB_FAIL(add_task_in_queue_and_execute_(modify_replica_type_task))) { + LOG_WARN("failed to add task in schedule list", KR(ret), K(modify_replica_type_task)); + } + } + FLOG_INFO("ObDRWorker do modify ls replica task", KR(ret), K(arg)); + return ret; +} + +int ObDRWorker::do_migrate_ls_replica_task( + const obrpc::ObAdminAlterLSReplicaArg &arg) +{ + int ret = OB_SUCCESS; + if (OB_UNLIKELY(!inited_)) { + ret = OB_NOT_INIT; + LOG_WARN("DRWorker not init", KR(ret), K(arg)); + } else if (OB_UNLIKELY(!arg.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(arg)); + } else { + DRLSInfo dr_ls_info(gen_user_tenant_id(arg.get_tenant_id()), + zone_mgr_, schema_service_); + ObMigrateLSReplicaTask migrate_replica_task; + if (OB_FAIL(check_and_init_info_for_alter_ls_(arg, dr_ls_info))) { + LOG_WARN("fail to check and init info for alter ls", KR(ret), K(arg), K(dr_ls_info)); + } else if (OB_FAIL(build_migrate_replica_task_(arg, dr_ls_info, migrate_replica_task))) { + LOG_WARN("fail to build migrate replica task", KR(ret), K(arg), K(dr_ls_info)); + } else if (OB_FAIL(add_task_in_queue_and_execute_(migrate_replica_task))) { + LOG_WARN("failed to add task in schedule list", KR(ret), K(migrate_replica_task)); + } + } + FLOG_INFO("ObDRWorker do migrate ls replica task", KR(ret), K(arg)); + return ret; +} + +int ObDRWorker::do_modify_ls_paxos_replica_num_task( + const obrpc::ObAdminAlterLSReplicaArg &arg) +{ + int ret = OB_SUCCESS; + if (OB_UNLIKELY(!inited_)) { + ret = OB_NOT_INIT; + LOG_WARN("DRWorker not init", KR(ret), K(arg)); + } else if (OB_UNLIKELY(!arg.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(arg)); + } else { + DRLSInfo dr_ls_info(gen_user_tenant_id(arg.get_tenant_id()), + zone_mgr_, schema_service_); + ObLSModifyPaxosReplicaNumberTask modify_paxos_replica_number_task; + if (OB_FAIL(check_and_init_info_for_alter_ls_(arg, dr_ls_info))) { + LOG_WARN("fail to check and init info for alter ls", KR(ret), K(arg), K(dr_ls_info)); + } else if (OB_FAIL(build_modify_paxos_replica_num_task_(arg, dr_ls_info, modify_paxos_replica_number_task))) { + LOG_WARN("fail to build modify paxos_replica_num task parameters", KR(ret), K(arg), K(dr_ls_info)); + } else if (OB_FAIL(add_task_in_queue_and_execute_(modify_paxos_replica_number_task))) { + LOG_WARN("failed to add task in schedule list", KR(ret), K(modify_paxos_replica_number_task)); + } + } + FLOG_INFO("ObDRWorker do modify ls paxos_replica_num task", KR(ret), K(arg)); + return ret; +} + +int ObDRWorker::do_cancel_ls_replica_task( + const obrpc::ObAdminAlterLSReplicaArg &arg) +{ + int ret = OB_SUCCESS; + common::ObAddr task_execute_server; + share::ObLSID ls_id; + share::ObTaskId task_id; + ObLSCancelReplicaTaskArg rpc_arg; + ObLSReplicaTaskTableOperator task_table_operator; + if (OB_UNLIKELY(!inited_)) { + ret = OB_NOT_INIT; + LOG_WARN("DRWorker not init", KR(ret), K(arg)); + } else if (OB_UNLIKELY(!arg.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(arg)); + } else if (OB_ISNULL(rpc_proxy_) || OB_ISNULL(sql_proxy_)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("some ptr is null", KR(ret), KP(rpc_proxy_), KP(sql_proxy_)); + } else if (OB_FAIL(task_id.set(arg.get_task_id().ptr()))) { + LOG_WARN("fail to set task_id", KR(ret), K(arg)); + } else if (OB_FAIL(task_table_operator.get_task_info_for_cancel( + *sql_proxy_, arg.get_tenant_id(), task_id, task_execute_server, ls_id))) { + if (OB_ENTRY_NOT_EXIST == ret) { + LOG_USER_ERROR(OB_ENTRY_NOT_EXIST, "Task not exist"); + } + LOG_WARN("get task info failed", KR(ret), K(task_id), K(arg)); + } else if (OB_FAIL(rpc_arg.init(task_id, ls_id, arg.get_tenant_id()))) { + LOG_WARN("fail to init arg", KR(ret), K(task_id), K(ls_id), K(arg)); + } else if (OB_FAIL(rpc_proxy_->to(task_execute_server).by(arg.get_tenant_id()).timeout(GCONF.rpc_timeout) + .ls_cancel_replica_task(rpc_arg))) { + if (OB_ENTRY_NOT_EXIST == ret) { + LOG_USER_ERROR(OB_ENTRY_NOT_EXIST, "Task not exist"); + } + LOG_WARN("fail to execute cancel", + KR(ret), K(arg), K(rpc_arg), K(task_execute_server), K(ls_id)); + } + FLOG_INFO("ObDRWorker do cancel ls replica task over", KR(ret), K(arg)); + return ret; +} + +int ObDRWorker::add_task_in_queue_and_execute_(const ObDRTask &task) +{ + int ret = OB_SUCCESS; + FLOG_INFO("add task in schedule list and execute", K(task)); + if (OB_UNLIKELY(!inited_)) { + ret = OB_NOT_INIT; + LOG_WARN("DRWorker not init", KR(ret)); + } else if (OB_UNLIKELY(!task.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(task)); + } else if (OB_ISNULL(disaster_recovery_task_mgr_)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("disaster_recovery_task_mgr_ null", KR(ret), KP(disaster_recovery_task_mgr_)); + } else if (OB_FAIL(disaster_recovery_task_mgr_->add_task_in_queue_and_execute(task))) { + if (OB_ENTRY_EXIST == ret) { + LOG_USER_ERROR(OB_ENTRY_EXIST, "LS has task executing, current operation is not allowed"); + LOG_WARN("task already exist in queue", KR(ret), K(task)); + } else { + LOG_WARN("push task in schedule list failed, unknow error", KR(ret), K(task)); + } + } + LOG_INFO("task has push in queue and execute over", KR(ret), K(task)); + return ret; +} + +int ObDRWorker::check_and_init_info_for_alter_ls_( + const obrpc::ObAdminAlterLSReplicaArg& arg, + DRLSInfo& dr_ls_info) +{ + int ret = OB_SUCCESS; + share::ObLSInfo ls_info; + share::ObLSStatusInfo ls_status_info; + const share::ObLSReplica *leader_replica = nullptr; + if (OB_UNLIKELY(!inited_)) { + ret = OB_NOT_INIT; + LOG_WARN("DRWorker not init", KR(ret)); + } else if (OB_UNLIKELY(!arg.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(arg)); + } else if (OB_FAIL(check_ls_exist_and_get_ls_info_(arg.get_ls_id(), + arg.get_tenant_id(), ls_info, ls_status_info))) { + LOG_WARN("fail to check tenent ls", KR(ret), K(arg)); + } else if (OB_FAIL(ls_info.find_leader(leader_replica))) { + LOG_WARN("fail to find leader", KR(ret), K(ls_info), K(arg)); + if (OB_ENTRY_NOT_EXIST == ret) { + ret = OB_LEADER_NOT_EXIST; + LOG_USER_ERROR(OB_LEADER_NOT_EXIST); + } + } else if (OB_FAIL(dr_ls_info.init())) { + LOG_WARN("fail to init dr log stream info", KR(ret), K(dr_ls_info)); + } else if (OB_FAIL(dr_ls_info.build_disaster_ls_info( + ls_info, ls_status_info, false/*filter_readonly_replicas_with_flag*/))) { + LOG_WARN("fail to generate dr log stream info", KR(ret), K(ls_info), K(ls_status_info)); + } + return ret; +} + +int ObDRWorker::check_ls_exist_and_get_ls_info_( + const share::ObLSID& ls_id, + const int64_t tenant_id, + share::ObLSInfo& ls_info, + share::ObLSStatusInfo& ls_status_info) +{ + int ret = OB_SUCCESS; + share::ObLSStatusOperator ls_status_operator; + ls_info.reset(); + ls_status_info.reset(); + if (OB_UNLIKELY(!inited_)) { + ret = OB_NOT_INIT; + LOG_WARN("DRWorker not init", KR(ret)); + } else if (OB_UNLIKELY(!is_valid_tenant_id(tenant_id)) || OB_UNLIKELY(!ls_id.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(tenant_id), K(ls_id)); + } else if (!ls_id.is_valid_with_tenant(tenant_id)) { + ret = OB_ENTRY_NOT_EXIST; + LOG_USER_ERROR(OB_ENTRY_NOT_EXIST, "LS does not exist"); + LOG_WARN("check ls_id is_valid_with_tenant failed", KR(ret), K(tenant_id), K(ls_id)); + } else if (OB_ISNULL(lst_operator_)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("lst_operator_ is null", KR(ret), KP(lst_operator_)); + } else if (OB_FAIL(lst_operator_->get(GCONF.cluster_id, tenant_id, ls_id, + share::ObLSTable::COMPOSITE_MODE, ls_info))) { + LOG_WARN("get ls info failed", KR(ret), K(tenant_id), K(ls_id)); + } else if (ls_info.get_replicas().count() == 0) { + ret = OB_ENTRY_NOT_EXIST; + LOG_USER_ERROR(OB_ENTRY_NOT_EXIST, "LS does not exist"); + LOG_WARN("ls_info.get_replicas().count() == 0", KR(ret), K(tenant_id), K(ls_id), K(ls_info)); + } else if (OB_ISNULL(sql_proxy_)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("sql_proxy_ is null", KR(ret), KP(sql_proxy_)); + } else if (OB_FAIL(ls_status_operator.get_ls_status_info( + tenant_id, ls_id, ls_status_info, *sql_proxy_))) { + if (OB_ENTRY_NOT_EXIST == ret) { + LOG_USER_ERROR(OB_ENTRY_NOT_EXIST, "LS does not exist"); + } + LOG_WARN("fail to get all ls status", KR(ret), K(tenant_id), K(ls_id), K(ls_info), KP(sql_proxy_)); + } else if (ls_status_info.ls_is_creating() || ls_status_info.ls_is_create_abort()) { + ret = OB_OP_NOT_ALLOW; + LOG_USER_ERROR(OB_OP_NOT_ALLOW, "LS is in CREATING or CREATE_ABORT status, current operation is"); + LOG_WARN("LS is creating, current operation is", KR(ret), K(tenant_id), K(ls_id), K(ls_info)); + } + return ret; +} + +int ObDRWorker::check_unit_exist_and_get_unit_( + const common::ObAddr &task_execute_server, + const uint64_t tenant_id, + const bool is_migrate_source_valid, + share::ObUnit& unit) +{ + int ret = OB_SUCCESS; + unit.reset(); + ObUnitTableOperator unit_operator; + common::ObArray unit_info_array; + if (OB_UNLIKELY(!inited_)) { + ret = OB_NOT_INIT; + LOG_WARN("DRWorker not init", KR(ret)); + } else if (OB_UNLIKELY(!task_execute_server.is_valid() + || OB_INVALID_TENANT_ID == tenant_id)) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(task_execute_server), K(tenant_id)); + } else if (OB_ISNULL(GCTX.sql_proxy_)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("sql_proxy_ is null", KR(ret), KP(GCTX.sql_proxy_)); + } else if (OB_FAIL(unit_operator.init(*GCTX.sql_proxy_))) { + LOG_WARN("unit operator init failed", KR(ret), KP(GCTX.sql_proxy_)); + } else if (OB_FAIL(unit_operator.get_units_by_tenant( + gen_user_tenant_id(tenant_id), unit_info_array))) { + LOG_WARN("fail to get unit info array", KR(ret), K(tenant_id)); + } else { + bool found = false; + for (int64_t i = 0; OB_SUCC(ret) && !found && i < unit_info_array.count(); ++i) { + if ((unit_info_array.at(i).server_ == task_execute_server) + || (is_migrate_source_valid && unit_info_array.at(i).migrate_from_server_ == task_execute_server)) { + if (OB_FAIL(unit.assign(unit_info_array.at(i)))) { + LOG_WARN("fail to assign unit", KR(ret), K(unit_info_array.at(i))); + } else { + found = true; + } + } + } + if (OB_SUCC(ret) && !found) { + ret = OB_ENTRY_NOT_EXIST; + LOG_USER_ERROR(OB_ENTRY_NOT_EXIST, "Tenant has no unit on the server"); + LOG_WARN("this tenant has no unit on the server", + KR(ret), K(tenant_id), K(task_execute_server), K(found)); + } + } + return ret; +} + +int ObDRWorker::check_task_execute_server_status_( + const common::ObAddr &task_execute_server, + const bool need_check_can_migrate_in) +{ + int ret = OB_SUCCESS; + ObServerInfoInTable server_info; + if (OB_UNLIKELY(!inited_)) { + ret = OB_NOT_INIT; + LOG_WARN("DRWorker not init", KR(ret)); + } else if (OB_UNLIKELY(!task_execute_server.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(task_execute_server)); + } else { + char err_msg[OB_MAX_ERROR_MSG_LEN] = {0}; + char addr_str_buf[128] = {0}; + if (OB_FAIL(task_execute_server.ip_port_to_string(addr_str_buf, 128))) { + LOG_WARN("fail to get server addr string", KR(ret), K(task_execute_server)); + } else if (OB_FAIL(SVR_TRACER.get_server_info(task_execute_server, server_info))) { + LOG_WARN("fail to check server active", KR(ret), K(task_execute_server)); + } else if (!server_info.is_alive()) { + snprintf(err_msg, sizeof(err_msg), + "The task needs to be executed on %s which status is not alive and the current operation is", addr_str_buf); + ret = OB_OP_NOT_ALLOW; + LOG_USER_ERROR(OB_OP_NOT_ALLOW, err_msg); + LOG_WARN("server is not active", KR(ret), K(task_execute_server), K(server_info)); + } else if (need_check_can_migrate_in && !server_info.can_migrate_in()) { + snprintf(err_msg, sizeof(err_msg), + "The task needs to be executed on %s which can not migrate in and the current operation is", addr_str_buf); + ret = OB_OP_NOT_ALLOW; + LOG_USER_ERROR(OB_OP_NOT_ALLOW, err_msg); + LOG_WARN("server can not migrate in now", KR(ret), K(task_execute_server), K(server_info)); + } + } + return ret; +} + +int ObDRWorker::get_replica_type_by_leader_( + const common::ObAddr& server_addr, + const DRLSInfo &dr_ls_info, + common::ObReplicaType& replica_type) +{ + // not leader replica get replica type may not right. when remove or modify replica, + // replica type wrong may result in fatal error, so get it by leader + int ret = OB_SUCCESS; + replica_type = REPLICA_TYPE_MAX; + common::ObAddr leader_addr; // not used + GlobalLearnerList learner_list; + common::ObMemberList member_list; + if (OB_UNLIKELY(!inited_)) { + ret = OB_NOT_INIT; + LOG_WARN("DRWorker not init", KR(ret)); + } else if (OB_UNLIKELY(!server_addr.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(server_addr)); + } else if (OB_UNLIKELY(0 >= dr_ls_info.get_member_list_cnt())) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("leader member list has no member", KR(ret), K(dr_ls_info)); + } else if (OB_FAIL(dr_ls_info.get_leader_and_member_list(leader_addr, member_list, learner_list))) { + LOG_WARN("fail to get leader and member list", KR(ret), K(server_addr), K(dr_ls_info)); + } else if (member_list.contains(server_addr)) { + replica_type = REPLICA_TYPE_FULL; + } else if (learner_list.contains(server_addr)) { + replica_type = REPLICA_TYPE_READONLY; + } else { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("fail to find server in leader member list and learner list", + KR(ret), K(server_addr), K(dr_ls_info), K(learner_list), K(member_list)); + } + return ret; +} + +int ObDRWorker::build_add_replica_task_( + const obrpc::ObAdminAlterLSReplicaArg &arg, + const DRLSInfo &dr_ls_info, + ObAddLSReplicaTask &add_replica_task) +{ + int ret = OB_SUCCESS; + share::ObUnit unit; + share::ObLSReplica ls_replica; + if (OB_UNLIKELY(!inited_)) { + ret = OB_NOT_INIT; + LOG_WARN("DRWorker not init", KR(ret)); + } else if (OB_UNLIKELY(!arg.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(arg)); + } else if (OB_FAIL(dr_ls_info.check_replica_exist_and_get_ls_replica(arg.get_server_addr(), ls_replica))) { + LOG_WARN("fail to check and get replica by server", KR(ret), K(arg)); + } else if (ls_replica.is_valid()) { + ret = OB_ENTRY_EXIST; + LOG_USER_ERROR(OB_ENTRY_EXIST, "Target server already has a replica"); + LOG_WARN("server already has a replica of this type, cannot add more", + KR(ret), K(arg), K(dr_ls_info), K(ls_replica)); + } else if (OB_FAIL(check_unit_exist_and_get_unit_( + arg.get_server_addr(), arg.get_tenant_id(), false/*is_migrate_source_valid*/, unit))) { + LOG_WARN("fail to check unit exist and get unit", KR(ret), K(arg)); + } else if (OB_FAIL(check_task_execute_server_status_(arg.get_server_addr(), true/*need_check_can_migrate_in*/))) { + LOG_WARN("fail to check server status", KR(ret), K(arg), K(dr_ls_info)); + } else if (REPLICA_TYPE_FULL == arg.get_replica_type() + && share::ObLSReplica::DEFAULT_REPLICA_COUNT == dr_ls_info.get_member_list_cnt()) { + ret = OB_OP_NOT_ALLOW; + LOG_USER_ERROR(OB_OP_NOT_ALLOW, "Member list count has reached the limit, alter ls replica is"); + LOG_WARN("number of F replica has reached the limit", KR(ret), K(arg), K(dr_ls_info)); + } else { + share::ObTaskId task_id; + ObDstReplica dst_replica; + ObReplicaMember data_source; + int64_t data_size = 0; + ObReplicaMember force_data_source; + int64_t new_paxos_replica_number = 0; + ObReplicaMember dst_member(arg.get_server_addr(), + ObTimeUtility::current_time(), + arg.get_replica_type()); + if (FALSE_IT(task_id.init(self_addr_))) { + } else if (OB_FAIL(dst_replica.assign(unit.unit_id_, unit.unit_group_id_, unit.zone_, dst_member))) { + LOG_WARN("fail to assign dst replica", KR(ret), K(unit), K(dst_member)); + } else if (OB_FAIL(dr_ls_info.get_default_data_source(data_source, data_size))) { + LOG_WARN("fail to get data_size", KR(ret), K(dr_ls_info)); + } else if (OB_FAIL(check_data_source_available_and_init_(arg, arg.get_replica_type(), dr_ls_info, force_data_source))) { + LOG_WARN("fail to check and get data source", KR(ret), K(arg), K(dr_ls_info)); + } else if (OB_FAIL(check_and_generate_new_paxos_replica_num_( + arg, arg.get_replica_type(), dr_ls_info, new_paxos_replica_number))) { + LOG_WARN("fail to check and generate new paxos replica num", KR(ret), K(arg), K(dr_ls_info)); + } else if (OB_FAIL(add_replica_task.simple_build( + arg.get_tenant_id(), + arg.get_ls_id(), + task_id, + dst_replica, + data_source, + force_data_source, + dr_ls_info.get_paxos_replica_number(), + new_paxos_replica_number))) { + LOG_WARN("fail to build add replica task", KR(ret), K(arg), K(task_id), + K(dst_replica), K(data_source), K(force_data_source), K(dr_ls_info), K(new_paxos_replica_number)); + } + } + return ret; +} + +int ObDRWorker::build_remove_replica_task_( + const obrpc::ObAdminAlterLSReplicaArg &arg, + DRLSInfo &dr_ls_info, + ObRemoveLSReplicaTask &remove_replica_task) +{ + int ret = OB_SUCCESS; + common::ObReplicaType replica_type = REPLICA_TYPE_MAX; + common::ObAddr leader_addr; + ObMember member_to_remove; + if (OB_UNLIKELY(!inited_)) { + ret = OB_NOT_INIT; + LOG_WARN("DRWorker not init", KR(ret)); + } else if (OB_UNLIKELY(!arg.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(arg)); + } else if (OB_FAIL(dr_ls_info.get_member_by_server(arg.get_server_addr(), member_to_remove))) { + if (OB_ENTRY_NOT_EXIST == ret) { + LOG_USER_ERROR(OB_ENTRY_NOT_EXIST, "Target server does not have a replica of this LS"); + } + LOG_WARN("server does not have a replica of this log stream", KR(ret), K(arg), K(dr_ls_info)); + } else if (OB_FAIL(dr_ls_info.get_leader(leader_addr))) { + LOG_WARN("fail to get leader", KR(ret), K(dr_ls_info)); + } else if (OB_FAIL(check_task_execute_server_status_(leader_addr, false/*need_check_can_migrate_in*/))) { + LOG_WARN("fail to check server status", KR(ret), K(leader_addr), K(dr_ls_info)); + } else if (OB_FAIL(get_replica_type_by_leader_(arg.get_server_addr(), dr_ls_info, replica_type))) { + LOG_WARN("fail to get_replica_type_by_leader", KR(ret), K(dr_ls_info), K(arg)); + } else { + share::ObTaskId task_id; + int64_t new_paxos_replica_number = 0; + bool has_leader = false; + ObReplicaMember remove_member(member_to_remove); + if (FALSE_IT(task_id.init(self_addr_))) { + } else if (OB_FAIL(remove_member.set_replica_type(replica_type))) { + LOG_WARN("fail to set replica type", KR(ret), K(replica_type), K(remove_member)); + } else if (OB_FAIL(check_and_generate_new_paxos_replica_num_( + arg, replica_type, dr_ls_info, new_paxos_replica_number))) { + LOG_WARN("fail to check and generate new paxos replica num", KR(ret), K(arg), K(replica_type), K(dr_ls_info)); + } else if (REPLICA_TYPE_FULL == replica_type + && OB_FAIL(check_majority_for_remove_(arg.get_server_addr(), dr_ls_info, new_paxos_replica_number))) { + LOG_WARN("check provided paxos_replica_num failed.", KR(ret), + K(arg), K(dr_ls_info), K(new_paxos_replica_number)); + } else if (OB_FAIL(remove_replica_task.simple_build( + arg.get_tenant_id(), + arg.get_ls_id(), + task_id, + leader_addr, + remove_member, + dr_ls_info.get_paxos_replica_number(), + new_paxos_replica_number, + replica_type))) { + LOG_WARN("fail to build task", KR(ret), K(arg), K(task_id), K(leader_addr), K(remove_member), + K(dr_ls_info), K(new_paxos_replica_number), K(replica_type)); + } + } + return ret; +} + +int ObDRWorker::build_modify_replica_type_task_( + const obrpc::ObAdminAlterLSReplicaArg &arg, + DRLSInfo &dr_ls_info, + ObLSTypeTransformTask &modify_replica_task) +{ + int ret = OB_SUCCESS; + share::ObUnit unit; + share::ObLSReplica ls_replica; + common::ObReplicaType replica_type = REPLICA_TYPE_MAX; + if (OB_UNLIKELY(!inited_)) { + ret = OB_NOT_INIT; + LOG_WARN("DRWorker not init", KR(ret)); + } else if (OB_UNLIKELY(!arg.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(arg)); + } else if (OB_FAIL(dr_ls_info.check_replica_exist_and_get_ls_replica(arg.get_server_addr(), ls_replica))) { + LOG_WARN("fail to check and get replica by server", KR(ret), K(arg)); + } else if (!ls_replica.is_valid() || (ls_replica.is_valid() && !ls_replica.is_in_service())) { + ret = OB_ENTRY_NOT_EXIST; + LOG_USER_ERROR(OB_ENTRY_NOT_EXIST, "Target server does not have a replica of this LS"); + LOG_WARN("server does not have a replica of this log stream", + KR(ret), K(arg), K(dr_ls_info), K(ls_replica)); + } else if (OB_FAIL(check_unit_exist_and_get_unit_( + arg.get_server_addr(), arg.get_tenant_id(), true/*is_migrate_source_valid*/, unit))) { + LOG_WARN("fail to check unit exist and get unit", KR(ret), K(arg)); + } else if (OB_FAIL(check_task_execute_server_status_(arg.get_server_addr(), false/*need_check_can_migrate_in*/))) { + LOG_WARN("fail to check server status", KR(ret), K(arg), K(dr_ls_info)); + } else if (OB_FAIL(get_replica_type_by_leader_(arg.get_server_addr(), dr_ls_info, replica_type))) { + LOG_WARN("fail to get_replica_type_by_leader", KR(ret), K(dr_ls_info), K(arg)); + } else if (arg.get_replica_type() == replica_type) { + ret = OB_ENTRY_EXIST; + LOG_USER_ERROR(OB_ENTRY_EXIST, "Current replica type is same as the target type, no need to modify"); + LOG_WARN("replica type is the same as the target type, no need to modify type", + KR(ret), K(arg), K(replica_type)); + } else if (REPLICA_TYPE_FULL == arg.get_replica_type() + && share::ObLSReplica::DEFAULT_REPLICA_COUNT == dr_ls_info.get_member_list_cnt()) { + ret = OB_OP_NOT_ALLOW; + LOG_USER_ERROR(OB_OP_NOT_ALLOW, "Member list count has reached the limit, alter ls replica is"); + LOG_WARN("number of F replica has reached the limit", KR(ret), K(replica_type), K(dr_ls_info)); + } else { + bool has_leader = false; + int64_t new_paxos_replica_number = 0; + share::ObTaskId task_id; + ObDstReplica dst_replica; + int64_t data_size = 0; + ObReplicaMember data_source; + ObReplicaMember src_member(ls_replica.get_server(), + ls_replica.get_member_time_us(), + replica_type, + ls_replica.get_memstore_percent()); + ObReplicaMember dst_member(ls_replica.get_server(), + ObTimeUtility::current_time(), + arg.get_replica_type(), + ls_replica.get_memstore_percent()); + if (FALSE_IT(task_id.init(self_addr_))) { + } else if (OB_FAIL(dst_replica.assign(unit.unit_id_, unit.unit_group_id_, + unit.zone_, dst_member))) { + LOG_WARN("fail to assign dst replica", KR(ret), K(unit), K(dst_member)); + } else if (OB_FAIL(check_and_generate_new_paxos_replica_num_( + arg, arg.get_replica_type(), dr_ls_info, new_paxos_replica_number))) { + LOG_WARN("fail to check and generate new paxos replica num", KR(ret), K(arg), K(dr_ls_info)); + } else if (REPLICA_TYPE_READONLY == arg.get_replica_type() + && OB_FAIL(check_majority_for_remove_(arg.get_server_addr(), dr_ls_info, new_paxos_replica_number))) { + LOG_WARN("check provided paxos_replica_num failed.", KR(ret), + K(arg), K(dr_ls_info), K(new_paxos_replica_number)); + } else if (OB_FAIL(dr_ls_info.get_default_data_source(data_source, data_size))) { + LOG_WARN("fail to get data_size", KR(ret), K(dr_ls_info)); + } else if (OB_FAIL(modify_replica_task.simple_build( + arg.get_tenant_id(), + arg.get_ls_id(), + task_id, + dst_replica, + src_member, + data_source, + dr_ls_info.get_paxos_replica_number(), + new_paxos_replica_number))) { + LOG_WARN("fail to build type transform task", KR(ret), K(arg), K(task_id), K(dst_replica), + K(src_member), K(data_source), K(dr_ls_info), K(new_paxos_replica_number)); + } + } + return ret; +} + +int ObDRWorker::build_migrate_replica_task_( + const obrpc::ObAdminAlterLSReplicaArg &arg, + const DRLSInfo &dr_ls_info, + ObMigrateLSReplicaTask &migrate_replica_task) +{ + int ret = OB_SUCCESS; + share::ObUnit destination_unit; + common::ObReplicaType replica_type = REPLICA_TYPE_MAX; + share::ObLSReplica desti_ls_replica; + share::ObLSReplica source_ls_replica; + if (OB_UNLIKELY(!inited_)) { + ret = OB_NOT_INIT; + LOG_WARN("DRWorker not init", KR(ret)); + } else if (OB_UNLIKELY(!arg.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(arg)); + } else if (OB_FAIL(dr_ls_info.check_replica_exist_and_get_ls_replica( + arg.get_server_addr(), source_ls_replica))) { + LOG_WARN("fail to check and get replica by server", KR(ret), K(arg)); + } else if (!source_ls_replica.is_valid() || (source_ls_replica.is_valid() && !source_ls_replica.is_in_service())) { + ret = OB_ENTRY_NOT_EXIST; + LOG_USER_ERROR(OB_ENTRY_NOT_EXIST, "Source server does not have a replica of this LS"); + LOG_WARN("source server does not have a replica of this LS", + KR(ret), K(arg), K(dr_ls_info), K(source_ls_replica)); + } else if (OB_FAIL(dr_ls_info.check_replica_exist_and_get_ls_replica( + arg.get_destination_addr(), desti_ls_replica))) { + LOG_WARN("fail to check and get replica by server", KR(ret), K(arg)); + } else if (desti_ls_replica.is_valid()) { + ret = OB_ENTRY_EXIST; + LOG_USER_ERROR(OB_ENTRY_EXIST, "The destination server already has a replica"); + LOG_WARN("target server already has a replica, no need migrate", KR(ret), K(arg), K(dr_ls_info)); + } else if (OB_FAIL(check_unit_exist_and_get_unit_( + arg.get_destination_addr(), arg.get_tenant_id(), false/*is_migrate_source_valid*/, destination_unit))) { + LOG_WARN("fail to check unit exist and get unit", KR(ret), K(arg)); + } else if (OB_FAIL(check_task_execute_server_status_(arg.get_destination_addr(), true/*need_check_can_migrate_in*/))) { + LOG_WARN("fail to check server status", KR(ret), K(arg), K(dr_ls_info)); + } else if (OB_FAIL(get_replica_type_by_leader_(arg.get_server_addr(), dr_ls_info, replica_type))) { + LOG_WARN("fail to get_replica_type_by_leader", KR(ret), K(dr_ls_info), K(arg)); + } else if (destination_unit.zone_ != source_ls_replica.get_zone()) { + ret = OB_OP_NOT_ALLOW; + LOG_USER_ERROR(OB_OP_NOT_ALLOW, "Migrate replica can only be in the same zone, current operation"); + LOG_WARN("migration replica can only be in the same zone", KR(ret), + K(destination_unit), K(source_ls_replica.get_zone())); + } else if (REPLICA_TYPE_FULL == replica_type + && share::ObLSReplica::DEFAULT_REPLICA_COUNT == dr_ls_info.get_member_list_cnt()) { + ret = OB_OP_NOT_ALLOW; + LOG_USER_ERROR(OB_OP_NOT_ALLOW, "Member list count has reached the limit, alter ls replica is"); + LOG_WARN("number of F replica has reached the limit", KR(ret), K(replica_type), K(dr_ls_info)); + } else { + share::ObTaskId task_id; + ObDstReplica dst_replica; + ObReplicaMember data_source; + int64_t data_size = 0; + ObReplicaMember force_data_source; + ObReplicaMember src_member(source_ls_replica.get_server(), + source_ls_replica.get_member_time_us(), + replica_type, + source_ls_replica.get_memstore_percent()); + ObReplicaMember dst_member(arg.get_destination_addr(), + ObTimeUtility::current_time(), + replica_type, + source_ls_replica.get_memstore_percent()); + if (FALSE_IT(task_id.init(self_addr_))) { + } else if (OB_FAIL(dst_replica.assign(destination_unit.unit_id_, destination_unit.unit_group_id_, + source_ls_replica.get_zone(), dst_member))) { + LOG_WARN("fail to assign dst replica", + KR(ret), K(destination_unit), K(dst_member), K(source_ls_replica)); + } else if (OB_FAIL(check_data_source_available_and_init_(arg, replica_type, dr_ls_info, force_data_source))) { + LOG_WARN("fail to check and get data source", KR(ret), K(arg), K(replica_type), K(dr_ls_info)); + } else if (OB_FAIL(dr_ls_info.get_default_data_source(data_source, data_size))) { + LOG_WARN("fail to get data_size", KR(ret), K(dr_ls_info)); + } else if (OB_FAIL(migrate_replica_task.simple_build( + arg.get_tenant_id(), + arg.get_ls_id(), + task_id, + dst_replica, + src_member, + data_source, + force_data_source, + dr_ls_info.get_paxos_replica_number()))) { + LOG_WARN("fail to build migrate task", KR(ret), K(arg), K(task_id), K(dst_replica), + K(src_member), K(data_source), K(force_data_source), K(dr_ls_info)); + } + } + return ret; +} + +int ObDRWorker::build_modify_paxos_replica_num_task_( + const obrpc::ObAdminAlterLSReplicaArg &arg, + DRLSInfo &dr_ls_info, + ObLSModifyPaxosReplicaNumberTask &modify_paxos_replica_number_task) +{ + int ret = OB_SUCCESS; + share::ObTaskId task_id; + common::ObAddr leader_addr; + GlobalLearnerList learner_list; + common::ObMemberList member_list; + if (OB_UNLIKELY(!inited_)) { + ret = OB_NOT_INIT; + LOG_WARN("DRWorker not init", KR(ret)); + } else if (OB_UNLIKELY(!arg.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(arg)); + } else if (FALSE_IT(task_id.init(self_addr_))) { + } else if (OB_FAIL(dr_ls_info.get_leader_and_member_list(leader_addr, member_list, learner_list))) { + LOG_WARN("fail to get leader and member list", KR(ret), K(arg), K(dr_ls_info)); + } else if (dr_ls_info.get_paxos_replica_number() <= arg.get_paxos_replica_num()) { + ret = OB_INVALID_ARGUMENT; + LOG_USER_ERROR(OB_INVALID_ARGUMENT, "paxos_replica_num which should be less than current paxos_replica_num"); + LOG_WARN("paxos_replica_num invalid", KR(ret), K(arg), K(dr_ls_info)); + } else if (member_list.get_member_number() < majority(arg.get_paxos_replica_num())) { + ret = OB_INVALID_ARGUMENT; + LOG_USER_ERROR(OB_INVALID_ARGUMENT, "paxos_replica_num which should be satisfy majority"); + LOG_WARN("number of replicas and paxos_replica_num do not satisfy majority", + KR(ret), K(arg), K(dr_ls_info), K(member_list)); + } else if (member_list.get_member_number() > arg.get_paxos_replica_num()) { + ret = OB_INVALID_ARGUMENT; + LOG_USER_ERROR(OB_INVALID_ARGUMENT, "paxos_replica_num which should be greater or equal with member list count"); + LOG_WARN("member_list.get_member_number() > arg.get_paxos_replica_num()", + KR(ret), K(arg), K(dr_ls_info), K(member_list)); + } else if (OB_FAIL(check_task_execute_server_status_(leader_addr, false/*need_check_can_migrate_in*/))) { + LOG_WARN("fail to check server status", KR(ret), K(arg), K(dr_ls_info)); + } else if (OB_FAIL(modify_paxos_replica_number_task.simple_build( + arg.get_tenant_id(), + arg.get_ls_id(), + task_id, + leader_addr, + dr_ls_info.get_paxos_replica_number(), + arg.get_paxos_replica_num(), + member_list))) { + LOG_WARN("fail to build a modify paxos replica number task", + KR(ret), K(arg), K(task_id), K(leader_addr), K(dr_ls_info), K(member_list)); + } + return ret; +} + +int ObDRWorker::check_data_source_available_and_init_( + const obrpc::ObAdminAlterLSReplicaArg &arg, + const common::ObReplicaType &replica_type, + const DRLSInfo &dr_ls_info, + ObReplicaMember &data_source) +{ + /* + check if data_source is available, check the following conditions: + 1. replica exist and in service + 2. ls replica not restore failed + 3. F replica can be used as the data source of R replica and F replica, + R replica can only used as the data source of R replica. + 4. server status is alive and not stopped + */ + int ret = OB_SUCCESS; + data_source.reset(); + share::ObLSReplica ls_replica; + ObServerInfoInTable server_info; + common::ObReplicaType provide_replica_type = REPLICA_TYPE_MAX; + if (OB_UNLIKELY(!inited_)) { + ret = OB_NOT_INIT; + LOG_WARN("DRWorker not init", KR(ret)); + } else if (!arg.get_data_source().is_valid()) { + // passed + LOG_INFO("data_source is not valid", KR(ret), K(arg)); + } else if (OB_UNLIKELY(!arg.is_valid() + || OB_UNLIKELY(replica_type != REPLICA_TYPE_FULL && replica_type != REPLICA_TYPE_READONLY))) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(arg), K(replica_type)); + } else { + ObDataSourceCandidateChecker type_checker(replica_type); + if (OB_FAIL(dr_ls_info.check_replica_exist_and_get_ls_replica(arg.get_data_source(), ls_replica))) { + LOG_WARN("fail to get ls replica", KR(ret), K(dr_ls_info)); + } else if (!ls_replica.is_valid() || (ls_replica.is_valid() && !ls_replica.is_in_service())) { + ret = OB_OP_NOT_ALLOW; + LOG_USER_ERROR(OB_OP_NOT_ALLOW, "The data source server has no replica, which is"); + LOG_WARN("source server has no replica", KR(ret), K(arg), K(ls_replica)); + } else if (OB_FAIL(get_replica_type_by_leader_(arg.get_data_source(), dr_ls_info, provide_replica_type))) { + LOG_WARN("get replica type by leader error", KR(ret), K(arg), K(dr_ls_info)); + } else if (ls_replica.get_restore_status().is_failed()) { + ret = OB_OP_NOT_ALLOW; + LOG_USER_ERROR(OB_OP_NOT_ALLOW, "Data source replica restore or clone failed, which is"); + LOG_WARN("ls replica restore failed", KR(ret), K(arg), K(ls_replica)); + } else if (!type_checker.is_candidate(provide_replica_type)) { + ret = OB_OP_NOT_ALLOW; + LOG_USER_ERROR(OB_OP_NOT_ALLOW, "R replica is not supported as the source of F replica, which is"); + LOG_WARN("type_checker failed", KR(ret), K(arg), K(provide_replica_type)); + } else if (OB_FAIL(SVR_TRACER.get_server_info(arg.get_data_source(), server_info))) { + LOG_WARN("fail to get server info", KR(ret), K(arg)); + } else if (!server_info.is_alive()) { + ret = OB_OP_NOT_ALLOW; + LOG_USER_ERROR(OB_OP_NOT_ALLOW, "The data source server is not alive, which is"); + LOG_WARN("data source server is not alive", KR(ret), K(arg), K(server_info)); + } else if (server_info.is_stopped()) { + ret = OB_OP_NOT_ALLOW; + LOG_USER_ERROR(OB_OP_NOT_ALLOW, "The data source server is stopped, which is"); + LOG_WARN("data source server is stopped", KR(ret), K(arg), K(server_info)); + } else { + data_source = ObReplicaMember(ls_replica.get_server(), + ls_replica.get_member_time_us(), + provide_replica_type, // attention + ls_replica.get_memstore_percent()); + } + } + return ret; +} + +int ObDRWorker::check_for_alter_full_replica_( + const int64_t member_list_count, + const int64_t new_p) +{ + int ret = OB_SUCCESS; + if (OB_UNLIKELY(!inited_)) { + ret = OB_NOT_INIT; + LOG_WARN("DRWorker not init", KR(ret)); + } else if (OB_UNLIKELY(member_list_count <= 0 || new_p <= 0)) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(member_list_count), K(new_p)); + } else if (member_list_count < majority(new_p)) { + ret = OB_INVALID_ARGUMENT; + LOG_USER_ERROR(OB_INVALID_ARGUMENT, "paxos_replica_num which does not satisfy majority"); + LOG_WARN("paxos_replica_num is wrong", KR(ret), K(member_list_count), K(new_p)); + } else if (member_list_count > new_p) { + ret = OB_INVALID_ARGUMENT; + LOG_USER_ERROR(OB_INVALID_ARGUMENT, "paxos_replica_num which should be greater or equal with member list count"); + LOG_WARN("paxos_replica_num is wrong", KR(ret), K(member_list_count), K(new_p)); + } + return ret; +} + +int ObDRWorker::check_and_generate_new_paxos_replica_num_( + const obrpc::ObAdminAlterLSReplicaArg &arg, + const common::ObReplicaType &replica_type, + const DRLSInfo &dr_ls_info, + int64_t &new_p) +{ + /* + If the user provides paxos_replica_num, use the user-provided paxos_replica_num, + otherwise keep paxos_replica_num unchanged. + The change range of paxos_replica_num is limited to 1. + If the change is too large, an error will be reported to the user. + Different member_change_types are generated based on the task type and the copy type of the operation. + Special: modify R->F is equivalent to MEMBER_CHANGE_ADD, modify F->R is equivalent to MEMBER_CHANGE_SUB + */ + int ret = OB_SUCCESS; + int64_t curr_p = dr_ls_info.get_paxos_replica_number(); + int64_t provided_p = arg.get_paxos_replica_num(); + int64_t member_list_count = dr_ls_info.get_member_list_cnt(); + new_p = provided_p > 0 ? provided_p : curr_p; + obrpc::ObAlterLSReplicaTaskType task_type = arg.get_alter_task_type(); + MemberChangeType member_change_type = MEMBER_CHANGE_NOP; + if (OB_UNLIKELY(!inited_)) { + ret = OB_NOT_INIT; + LOG_WARN("DRWorker not init", KR(ret)); + } else if (OB_UNLIKELY(!arg.is_valid() + || (REPLICA_TYPE_FULL != replica_type && REPLICA_TYPE_READONLY != replica_type))) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(arg), K(replica_type)); + } else if (std::abs(new_p - curr_p) > 1) { + ret = OB_INVALID_ARGUMENT; + LOG_USER_ERROR(OB_INVALID_ARGUMENT, "paxos_replica_num which change cannot be greater than 1"); + LOG_WARN("paxos_replica_num is wrong", KR(ret), K(arg), K(new_p), K(curr_p)); + } else if (task_type.is_add_task()) { + if (REPLICA_TYPE_FULL == replica_type) { + member_change_type = MEMBER_CHANGE_ADD; + } else if (REPLICA_TYPE_READONLY == replica_type) { + member_change_type = MEMBER_CHANGE_NOP; + } + } else if (task_type.is_remove_task()) { + if (REPLICA_TYPE_FULL == replica_type) { + member_change_type = MEMBER_CHANGE_SUB; + } else if (REPLICA_TYPE_READONLY == replica_type) { + member_change_type = MEMBER_CHANGE_NOP; + } + } else if (task_type.is_modify_replica_task()) { + if (REPLICA_TYPE_FULL == replica_type) { + member_change_type = MEMBER_CHANGE_ADD; + } else if (REPLICA_TYPE_READONLY == replica_type) { + member_change_type = MEMBER_CHANGE_SUB; + } + } else { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("task type unexpected", KR(ret), K(arg), K(dr_ls_info), K(replica_type)); + } + if (OB_FAIL(ret)) { + } else if ((MEMBER_CHANGE_ADD == member_change_type)) { + if (OB_FAIL(check_for_alter_full_replica_(member_list_count + 1, new_p))) { + LOG_WARN("check failed", KR(ret), K(arg), K(replica_type), K(dr_ls_info)); + } + } else if ((MEMBER_CHANGE_SUB == member_change_type)) { + if (OB_FAIL(check_for_alter_full_replica_(member_list_count - 1, new_p))) { + LOG_WARN("check failed", KR(ret), K(arg), K(replica_type), K(dr_ls_info)); + } + } else if ((MEMBER_CHANGE_NOP == member_change_type)) { + if (new_p != curr_p) { + ret = OB_INVALID_ARGUMENT; + LOG_USER_ERROR(OB_INVALID_ARGUMENT, "paxos_replica_num which should remain unchanged"); + LOG_WARN("paxos_replica_num is wrong", KR(ret), K(arg), K(curr_p), K(new_p)); + } + } else { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("member change type unexpected", KR(ret), K(arg), K(dr_ls_info), K(member_change_type)); + } + LOG_INFO("check and generate new paxos_replica_num over", + KR(ret), K(arg), K(replica_type), K(new_p), K(curr_p), K(member_list_count)); + return ret; +} + +int ObDRWorker::check_majority_for_remove_( + const common::ObAddr& server_addr, + const DRLSInfo &dr_ls_info, + const int64_t new_p) +{ + int ret = OB_SUCCESS; + int64_t inactive_count = 0; + int64_t arb_replica_number = 0; + ObLSID ls_id; + uint64_t tenant_id = OB_INVALID_TENANT_ID; + if (OB_UNLIKELY(!inited_)) { + ret = OB_NOT_INIT; + LOG_WARN("DRWorker not init", KR(ret)); + } else if (OB_UNLIKELY(!server_addr.is_valid()) || OB_UNLIKELY(new_p <= 0)) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(server_addr), K(new_p)); + } else if (OB_FAIL(dr_ls_info.get_ls_id(tenant_id, ls_id))) { + LOG_WARN("fail to get tenant and ls id", KR(ret), K(dr_ls_info)); + } else if (OB_FAIL(ObShareUtil::generate_arb_replica_num( + tenant_id, ls_id, arb_replica_number))) { + LOG_WARN("fail to generate arb replica number", KR(ret), K(tenant_id), K(ls_id)); + } else if (OB_FAIL(check_other_inactive_server_count_( + server_addr, dr_ls_info, inactive_count))) { + LOG_WARN("fail to check other permanent offline server", + KR(ret), K(server_addr), K(dr_ls_info)); + } else if ((0 == dr_ls_info.get_member_list_cnt() - 1 - inactive_count) // no member + || (dr_ls_info.get_member_list_cnt() - 1 - inactive_count + arb_replica_number < majority(new_p))) { + ret = OB_OP_NOT_ALLOW; + LOG_USER_ERROR(OB_OP_NOT_ALLOW, "Current operation may result in no leader, while is"); + LOG_WARN("not satisfy majority", KR(ret), K(new_p), K(arb_replica_number), K(inactive_count)); + } + LOG_INFO("check majority for remove over", KR(ret), K(server_addr), K(new_p), K(arb_replica_number)); + return ret; +} + +int ObDRWorker::check_other_inactive_server_count_( + const common::ObAddr& desti_server_addr, + const DRLSInfo &dr_ls_info, + int64_t& other_inactive_server_count) +{ + /* + When removing replicas, need to judge the majority and consider the number of replicas + that have been inactive but are still in the member list. example: current paxos_replica_num = 3, + member_list_count = 3 (a b c), a is down but not permanently offline yet, now a remove operation + want remove another normal replica b, set paxos_replica_num = 2, at this time, + there is only one available replica of the underlying layer. which may result in no leader. + */ + int ret = OB_SUCCESS; + bool active = false; + common::ObAddr leader_addr; // not used + GlobalLearnerList learner_list; // not used + common::ObMemberList member_list; + other_inactive_server_count = 0; + if (OB_UNLIKELY(!inited_)) { + ret = OB_NOT_INIT; + LOG_WARN("DRWorker not init", KR(ret)); + } else if (OB_UNLIKELY(!desti_server_addr.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(desti_server_addr)); + } else if (OB_FAIL(dr_ls_info.get_leader_and_member_list(leader_addr, member_list, learner_list))) { + LOG_WARN("fail to get leader and member list", KR(ret), K(desti_server_addr), K(dr_ls_info)); + } else { + for (int64_t index = 0; OB_SUCC(ret) && index < member_list.get_member_number(); ++index) { + ObMember member; + active = false; + if (OB_FAIL(member_list.get_member_by_index(index, member))) { + LOG_WARN("fail to get member", KR(ret), K(index), K(dr_ls_info)); + } else if (OB_FAIL(SVR_TRACER.check_server_active(member.get_server(), active))) { + LOG_WARN("fail to check server permanent offline", KR(ret), K(member.get_server())); + } else if (!active && desti_server_addr != member.get_server()) { + other_inactive_server_count = other_inactive_server_count + 1; + // other_inactive_server_count not include desti_server_addr + } + } + } + LOG_INFO("check other_inactive_server_count over", KR(ret), K(dr_ls_info), + K(desti_server_addr), K(other_inactive_server_count)); + return ret; +} + int ObDRWorker::generate_task_key( const DRLSInfo &dr_ls_info, ObDRTaskKey &task_key) const @@ -2205,7 +3189,6 @@ int ObDRWorker::check_has_leader_while_remove_replica( LOG_WARN("fail to get replica cnt", KR(ret), K(dr_ls_info)); } else { int64_t full_replica_count = 0; - int64_t paxos_replica_num = 0; int64_t arb_replica_num = 0; uint64_t tenant_id = OB_INVALID_TENANT_ID; ObLSID ls_id; @@ -2238,9 +3221,6 @@ int ObDRWorker::check_has_leader_while_remove_replica( if (server_stat_info->get_server() == server) { replica_type = ls_replica->get_replica_type(); } - if (ObReplicaTypeCheck::is_paxos_replica_V2(ls_replica->get_replica_type())) { - ++paxos_replica_num; - } if (REPLICA_TYPE_FULL == ls_replica->get_replica_type()) { ++full_replica_count; } @@ -2259,14 +3239,40 @@ int ObDRWorker::check_has_leader_while_remove_replica( has_leader = false; } else if (!ObReplicaTypeCheck::is_paxos_replica_V2(replica_type)) { has_leader = true; + } else if (1 == dr_ls_info.get_paxos_replica_number()) { + // member_list count should be always less than paxos_replica_number + // so member_list should have only one member (when 1 == paxos_replica_number) + // we can not remove the only member + has_leader = false; + } else if (2 == dr_ls_info.get_paxos_replica_number()) { + // member_list count should be always less than paxos_replica_number + // although member_list count can be less than paxos_replica_number + // member_list count can not be 1, because: + // 1. 2F can not reduce to 1F with paxos_replica_number = 2 + // 2. 2F1A permanent offline 1F then paxos_replica_number will change from 2 to 1 + // so member_list should have 2 members (when 2 == paxos_replica_number) + if (2 >= dr_ls_info.get_schema_replica_cnt()) { + // if 2 == schema_replica_count, it means locality has 2F, we specialy support 2F to 1F without locality changes + // if 2 > schema_replica_count, it means locality has 1F, but migration leads to 2F, we have to remove 1F + has_leader = true; + } else { + // if 2 != schema_replica_count, it means locality is changing, we can not reduce 2F to 1F + // Consider this case: + // tenant's initial locality is 3F(z1,z2,z3) + // member_list is reduced to 2F(z1,z2) paxos_replica_number reduced to 2 by using "alter system remove replica" command + // tenant locality is changing from 3F(z1,z2,z3) to 5F(z1,z2,z3,z4,z5), current member_list is (z1,z2) and paxos_replica_number = 2 + // before add z3 replica, server in z2 is permanent offline, we do not want to remove z2 replica in this case + has_leader = false; + } } else { - has_leader = true; - if (REPLICA_TYPE_FULL == replica_type) { - has_leader = full_replica_count >= 2; - } - if (has_leader) { - has_leader = (paxos_replica_num - 1 + arb_replica_num) >= majority(dr_ls_info.get_schema_replica_cnt()); - } + // we do not reduce member_list count less than majority of schema replica count + // consider this case: + // tenant's locality is changing from 3F to 4F + // before add replica in z4, replica in z3 is permanent offline + // if we remove replica in z3 first, member_list will have only 2 members, and + // schema is 4F requiring majority is 3 members which is not good + // So we prohibit removing replica in this case + has_leader = (full_replica_count - 1 + arb_replica_num) >= majority(dr_ls_info.get_schema_replica_cnt()); } } return ret; @@ -2527,10 +3533,6 @@ int ObDRWorker::do_single_replica_permanent_offline_( ObDRTaskKey task_key; bool can_generate = false; ObReplicaMember remove_member(member_to_remove); - //ObReplicaMember remove_member(member_to_remove.get_server(), - // member_to_remove.get_timestamp(), - // replica_type, - // memstore_percent); ObDRTaskType task_type = ObReplicaTypeCheck::is_paxos_replica_V2(replica_type) ? ObDRTaskType::LS_REMOVE_PAXOS_REPLICA : ObDRTaskType::LS_REMOVE_NON_PAXOS_REPLICA; @@ -2543,7 +3545,9 @@ int ObDRWorker::do_single_replica_permanent_offline_( old_paxos_replica_number, leader_addr, replica_type))) { - LOG_WARN("fail to construct extra infos to build remove replica task"); + LOG_WARN("fail to construct extra infos to build remove replica task", + KR(ret), K(dr_ls_info), K(task_id), K(new_paxos_replica_number), + K(old_paxos_replica_number), K(leader_addr), K(replica_type)); } else if (only_for_display) { // only for display, no need to execute this task ObLSReplicaTaskDisplayInfo display_info; @@ -4314,13 +5318,6 @@ int ObDRWorker::check_need_generate_migrate_to_unit_task( KP(server_stat_info), KP(unit_stat_info), KP(unit_in_group_stat_info)); - } else if (REPLICA_STATUS_NORMAL == ls_replica->get_replica_status() - && unit_in_group_stat_info->is_in_pool() - && server_stat_info->get_server() != unit_in_group_stat_info->get_unit().server_ - && unit_in_group_stat_info->get_server_stat()->is_alive() - && !unit_in_group_stat_info->get_server_stat()->is_block()) { - need_generate = true; - is_unit_in_group_related = true; } else if (REPLICA_STATUS_NORMAL == ls_replica->get_replica_status() && unit_stat_info->is_in_pool() && server_stat_info->get_server() != unit_stat_info->get_unit().server_ @@ -4328,6 +5325,13 @@ int ObDRWorker::check_need_generate_migrate_to_unit_task( && !unit_stat_info->get_server_stat()->is_block()) { need_generate = true; is_unit_in_group_related = false; + } else if (REPLICA_STATUS_NORMAL == ls_replica->get_replica_status() + && unit_in_group_stat_info->is_in_pool() + && server_stat_info->get_server() != unit_in_group_stat_info->get_unit().server_ + && unit_in_group_stat_info->get_server_stat()->is_alive() + && !unit_in_group_stat_info->get_server_stat()->is_block()) { + need_generate = true; + is_unit_in_group_related = true; } return ret; } @@ -4621,32 +5625,34 @@ int ObDRWorker::generate_disaster_recovery_paxos_replica_number( || curr_paxos_replica_number <= 0 || locality_paxos_replica_number <= 0)) { ret = OB_INVALID_ARGUMENT; - LOG_WARN("invalid argument", KR(ret), - K(member_list_cnt), - K(curr_paxos_replica_number), + LOG_WARN("invalid argument", KR(ret), K(member_list_cnt), K(curr_paxos_replica_number), K(locality_paxos_replica_number)); } else if (MEMBER_CHANGE_ADD == member_change_type) { + // 1. ADD MEMBER_LIST operation + // When current paxos_replica_number >= locality paxos_replica_number + // we do not change paxos_replica_number and ensure that paxos_replica_number no less than new member_list count + // When current paxos_replica_number < locality paxos_replica_number + // we try to increase paxos_replica_number towards locality and ensure that majority is satisfied const int64_t member_list_cnt_after = member_list_cnt + 1; - if (curr_paxos_replica_number == locality_paxos_replica_number) { - if (locality_paxos_replica_number >= member_list_cnt_after) { - new_paxos_replica_number = curr_paxos_replica_number; - found = true; - } else if (locality_paxos_replica_number + 1 == member_list_cnt_after) { - new_paxos_replica_number = curr_paxos_replica_number + 1; - found = true; - } - } else if (curr_paxos_replica_number > locality_paxos_replica_number) { + if (curr_paxos_replica_number >= locality_paxos_replica_number) { if (curr_paxos_replica_number >= member_list_cnt_after) { new_paxos_replica_number = curr_paxos_replica_number; found = true; - } else {} // new member cnt greater than paxos_replica_number, not good - } else { // curr_paxos_replica_number < locality_paxos_replica_number + } + } else { if (majority(curr_paxos_replica_number + 1) <= member_list_cnt_after) { new_paxos_replica_number = curr_paxos_replica_number + 1; found = true; - } else {} // majority not satisfied + } } } else if (MEMBER_CHANGE_NOP == member_change_type) { + // 2. MEMBER_LIST not changed operation + // When current paxos_replica_number == locality paxos_replica_number + // we do not change paxos_replica_number + // When current paxos_replica_number > locality paxos_replica_number + // we try to reduce paxos_replica_number towards locality + // When current paxos_replica_number < locality paxos_replica_number + // we try to increase paxos_replica_number towards locality if (curr_paxos_replica_number == locality_paxos_replica_number) { new_paxos_replica_number = curr_paxos_replica_number; found = true; @@ -4655,7 +5661,7 @@ int ObDRWorker::generate_disaster_recovery_paxos_replica_number( new_paxos_replica_number = curr_paxos_replica_number - 1; found = true; } - } else { // curr_paxos_replica_number < locality_paxos_replica_number + } else { if (member_list_cnt > majority(curr_paxos_replica_number + 1)) { new_paxos_replica_number = curr_paxos_replica_number + 1; found = true; @@ -4664,31 +5670,53 @@ int ObDRWorker::generate_disaster_recovery_paxos_replica_number( } else if (MEMBER_CHANGE_SUB == member_change_type) { int64_t member_list_cnt_after = 0; int64_t arb_replica_number = 0; - if (OB_FAIL(dr_ls_info.get_ls_id(tenant_id, ls_id))) { - LOG_WARN("fail to get tenant and ls id", KR(ret), K(dr_ls_info)); - } else if (OB_FAIL(ObShareUtil::generate_arb_replica_num( - tenant_id, - ls_id, - arb_replica_number))) { - LOG_WARN("fail to generate arb replica number", KR(ret), K(tenant_id), K(ls_id)); - } - member_list_cnt_after = member_list_cnt - 1 + arb_replica_number; - if (OB_FAIL(ret)) { - } else if (curr_paxos_replica_number == locality_paxos_replica_number) { - if (majority(curr_paxos_replica_number) <= member_list_cnt_after) { - new_paxos_replica_number = curr_paxos_replica_number; - found = true; - } else {} // majority not satisfied - } else if (curr_paxos_replica_number > locality_paxos_replica_number) { - if (majority(curr_paxos_replica_number - 1) <= member_list_cnt_after) { + // 3. REMOVE MEMBER_LIST operation + // When current paxos_replica_number == locality paxos_replica_number + // we do not change paxos_replica_number and ensure majority is satisfied + // When current paxos_replica_number > locality paxos_replica_number + // we try to reduce paxos_replica_number towards locality and ensure majority is satisfied + // When current paxos_replica_number < locality paxos_replica_number + // we do not change paxos_replica_number and ensure majority is satisfied + // SPECIALLY, we support 2F reduce to 1F aotumatically + if (2 == curr_paxos_replica_number) { + if (2 == member_list_cnt) { + // Specially, we support 2F to 1F to solve these cases: + // CASE 1: + // tenant's locality is 2F1A, F-replica in zone1 migrate and leads to source replica remains in member_list, + // member_list like (F-z1, F-z1, F-z2) and paxos_replica_number = 3 + // then F-replica in zone2 permanent offline, + // member_list like (F-z1, F-z1) and paxos_replica_number = 2 + // Under this case, we have to support remove one F in zone1 and later add one F in zone2 + // CASE 2: + // tenant's locality is 1F, F-replica in zone1 migrate and leads to source replica remains in member_list, + // member_list like (F-z1, F-z1) and paxos_replica_number = 2 + // Under this case, we have to support remove one F in zone1 new_paxos_replica_number = curr_paxos_replica_number - 1; found = true; - } else {} // majority not satisfied - } else { // curr_paxos_replica_number < locality_paxos_replica_number - if (majority(curr_paxos_replica_number) <= member_list_cnt_after) { - new_paxos_replica_number = curr_paxos_replica_number; - found = true; - } else {} // majority not satisfied + } else { + // do nothing + // When current paxos_replica_number = 2 + // member_list_cnt can not be less than 2, because it will leads to no-leader + // member_list_cnt can not be larger than 2, because member_list_cnt should <= paxos_replica_number + // we do not raise error and remain found = false + } + } else if (OB_FAIL(dr_ls_info.get_ls_id(tenant_id, ls_id))) { + LOG_WARN("fail to get tenant and ls id", KR(ret), K(dr_ls_info)); + } else if (OB_FAIL(ObShareUtil::generate_arb_replica_num(tenant_id, ls_id, arb_replica_number))) { + LOG_WARN("fail to generate arb replica number", KR(ret), K(tenant_id), K(ls_id)); + } else { + member_list_cnt_after = member_list_cnt - 1 + arb_replica_number; + if (curr_paxos_replica_number <= locality_paxos_replica_number) { + if (majority(curr_paxos_replica_number) <= member_list_cnt_after) { + new_paxos_replica_number = curr_paxos_replica_number; + found = true; + } + } else if (curr_paxos_replica_number > locality_paxos_replica_number) { + if (majority(curr_paxos_replica_number - 1) <= member_list_cnt_after) { + new_paxos_replica_number = curr_paxos_replica_number - 1; + found = true; + } + } } } else { ret = OB_INVALID_ARGUMENT; diff --git a/src/rootserver/ob_disaster_recovery_worker.h b/src/rootserver/ob_disaster_recovery_worker.h index 397b27bee..ec162fd32 100755 --- a/src/rootserver/ob_disaster_recovery_worker.h +++ b/src/rootserver/ob_disaster_recovery_worker.h @@ -119,6 +119,12 @@ public: const uint64_t tenant_id, const bool only_for_display, int64_t &acc_dr_task); + int do_add_ls_replica_task(const obrpc::ObAdminAlterLSReplicaArg &arg); + int do_remove_ls_replica_task(const obrpc::ObAdminAlterLSReplicaArg &arg); + int do_migrate_ls_replica_task(const obrpc::ObAdminAlterLSReplicaArg &arg); + int do_modify_ls_replica_type_task(const obrpc::ObAdminAlterLSReplicaArg &arg); + int do_modify_ls_paxos_replica_num_task(const obrpc::ObAdminAlterLSReplicaArg &arg); + int do_cancel_ls_replica_task(const obrpc::ObAdminAlterLSReplicaArg &arg); static int check_tenant_locality_match( const uint64_t tenant_id, ObZoneManager &zone_mgr, @@ -129,6 +135,133 @@ public: common::ObSArray &task_plan); private: + + // add task in queue in mgr and execute task + // @param [in] task, the task to execute + int add_task_in_queue_and_execute_(const ObDRTask &task); + // check ls exist and init dr_ls_info + // @param [in] arg, task info + // @param [out] dr_ls_info, target dr_ls_info to init + int check_and_init_info_for_alter_ls_( + const obrpc::ObAdminAlterLSReplicaArg& arg, + DRLSInfo& dr_ls_info); + // check ls exist and get ls_info and ls_status_info + // @param [in] ls_id, which ls to check + // @param [in] tenant_id, which user does the ls to check belong to + // @param [out] ls_info, target ls_info + // @param [out] ls_status_info, target ls_status_info + int check_ls_exist_and_get_ls_info_( + const share::ObLSID& ls_id, + const int64_t tenant_id, + share::ObLSInfo& ls_info, + share::ObLSStatusInfo& ls_status_info); + // check unit exist and get unit + // @param [in] task_execute_server, the unit in which server + // @param [in] tenant_id, which user does the unit to check belong to + // @param [in] is_migrate_source_valid, is unit migration valid on the source server + // @param [out] unit, target unit + int check_unit_exist_and_get_unit_( + const common::ObAddr &task_execute_server, + const uint64_t tenant_id, + const bool is_migrate_source_valid, + share::ObUnit& unit); + // check task execute server status + // @param [in] task_execute_server, the task execute in which server + // @param [in] need_check_can_migrate_in, if need to check can_migrate_in flag + int check_task_execute_server_status_( + const common::ObAddr &task_execute_server, + const bool need_check_can_migrate_in); + // get replica type by leader + // @param [in] server_addr, the replica in which server + // @param [in] dr_ls_info, dr_ls_info + // @param [out] replica_type, the replica_type of replica in server_addr + int get_replica_type_by_leader_( + const common::ObAddr& server_addr, + const DRLSInfo &dr_ls_info, + common::ObReplicaType& replica_type); + // build a add replica task by task info + // @param [in] arg, the task info + // @param [in] dr_ls_info, dr_ls_info + // @param [out] add_replica_task, target task + int build_add_replica_task_( + const obrpc::ObAdminAlterLSReplicaArg &arg, + const DRLSInfo &dr_ls_info, + ObAddLSReplicaTask &add_replica_task); + // build a remove replica task by task info + // @param [in] arg, the task info + // @param [in] dr_ls_info, dr_ls_info + // @param [out] remove_replica_task, target task + int build_remove_replica_task_( + const obrpc::ObAdminAlterLSReplicaArg &arg, + DRLSInfo &dr_ls_info, + ObRemoveLSReplicaTask &remove_replica_task); + // build a modify replica task by task info + // @param [in] arg, the task info + // @param [in] dr_ls_info, dr_ls_info + // @param [out] modify_replica_task, target task + int build_modify_replica_type_task_( + const obrpc::ObAdminAlterLSReplicaArg &arg, + DRLSInfo &dr_ls_info, + ObLSTypeTransformTask &modify_replica_task); + // build a migrate replica task by task info + // @param [in] arg, the task info + // @param [in] dr_ls_info, dr_ls_info + // @param [out] migrate_replica_task, target task + int build_migrate_replica_task_( + const obrpc::ObAdminAlterLSReplicaArg &arg, + const DRLSInfo &dr_ls_info, + ObMigrateLSReplicaTask &migrate_replica_task); + // build a modify paxos_replica_num task by task info + // @param [in] arg, the task info + // @param [in] dr_ls_info, dr_ls_info + // @param [out] modify_paxos_replica_number_task, target task + int build_modify_paxos_replica_num_task_( + const obrpc::ObAdminAlterLSReplicaArg &arg, + DRLSInfo &dr_ls_info, + ObLSModifyPaxosReplicaNumberTask &modify_paxos_replica_number_task); + // check if provide data source available and init data_source + // @param [in] arg, the task info + // @param [in] replica_type, source replica type + // @param [in] dr_ls_info, dr_ls_info + // @param [out] data_source, target data_source + int check_data_source_available_and_init_( + const obrpc::ObAdminAlterLSReplicaArg &arg, + const common::ObReplicaType &replica_type, + const DRLSInfo &dr_ls_info, + ObReplicaMember &data_source); + // if provided paxos_replica_num is valid, check it. otherwise generate new paxos_replica_num + // @param [in] arg, the task info + // @param [in] replica_type, target replica's type + // @param [in] dr_ls_info, dr_ls_info + // @param [out] new_p, new paxos_replica_num + int check_and_generate_new_paxos_replica_num_( + const obrpc::ObAdminAlterLSReplicaArg &arg, + const common::ObReplicaType &replica_type, + const DRLSInfo &dr_ls_info, + int64_t &new_p); + // check whether the values of member list count and new paxs_replica_num are legal in alter full replica + // @param [in] member_list_count, count of leader member list after alter full replica + // @param [in] new_p, new paxos_replica_num + int check_for_alter_full_replica_( + const int64_t member_list_count, + const int64_t new_p); + // check if majority is satisfied when remove replica + // @param [in] server_addr, target replica to remove in which server + // @param [in] dr_ls_info, dr_ls_info + // @param [in] new_p, new paxos_replica_num + int check_majority_for_remove_( + const common::ObAddr& server_addr, + const DRLSInfo &dr_ls_info, + const int64_t new_p); + // check the count of inactive server except desti_server_addr + // @param [in] desti_server_addr, except desti_server_addr + // @param [in] dr_ls_info, dr_ls_info + // @param [out] other_inactive_server_count, except desti_server_addr inactive server count + int check_other_inactive_server_count_( + const common::ObAddr& desti_server_addr, + const DRLSInfo &dr_ls_info, + int64_t& other_inactive_server_count); + struct TaskCountStatistic { public: diff --git a/src/rootserver/ob_root_service.cpp b/src/rootserver/ob_root_service.cpp index 78ef4739a..80b342bb5 100755 --- a/src/rootserver/ob_root_service.cpp +++ b/src/rootserver/ob_root_service.cpp @@ -9342,6 +9342,95 @@ int ObRootService::admin_migrate_unit(const obrpc::ObAdminMigrateUnitArg &arg) return ret; } +#define ADD_EVENT_FOR_ALTER_LS_REPLICA \ + "admin_alter_ls_replica", arg.get_alter_task_type().get_type_str(), \ + "ret_code", ret_val, \ + "tenant_id", arg.get_tenant_id() \ + +int ObRootService::add_rs_event_for_alter_ls_replica_( + const obrpc::ObAdminAlterLSReplicaArg &arg, + const int ret_val) +{ + int ret = OB_SUCCESS; + if (OB_UNLIKELY(!inited_)) { + ret = OB_NOT_INIT; + LOG_WARN("not init", KR(ret)); + } else if (OB_UNLIKELY(!arg.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid arg", KR(ret), K(arg)); + } else { + char extra_info[MSG_SIZE] = {0}; + char addr_str_buf[OB_SERVER_ADDR_STR_LEN]; + if (OB_FAIL(arg.get_data_source().ip_port_to_string(addr_str_buf, OB_SERVER_ADDR_STR_LEN))) { + LOG_WARN("data source to string failed", KR(ret), K(arg)); + } else { + snprintf(extra_info, sizeof(extra_info), + "data_source: %s, paxos_replica_num: %ld", addr_str_buf, arg.get_paxos_replica_num()); + } + if (OB_FAIL(ret)) { + } else if (arg.get_alter_task_type().is_add_task() + || arg.get_alter_task_type().is_modify_replica_task() + || arg.get_alter_task_type().is_remove_task()) { + ROOTSERVICE_EVENT_ADD(ADD_EVENT_FOR_ALTER_LS_REPLICA, + "ls_id", arg.get_ls_id().id(), + "target_replica", arg.get_server_addr(), + "replica_type", replica_type_to_str(arg.get_replica_type()), + "", NULL, + extra_info); + } else if (arg.get_alter_task_type().is_migrate_task()) { + ROOTSERVICE_EVENT_ADD(ADD_EVENT_FOR_ALTER_LS_REPLICA, + "ls_id", arg.get_ls_id().id(), + "source_replica", arg.get_server_addr(), + "target_replica", arg.get_destination_addr(), + "", NULL, + extra_info); + } else if (arg.get_alter_task_type().is_modify_paxos_replica_num_task()) { + ROOTSERVICE_EVENT_ADD(ADD_EVENT_FOR_ALTER_LS_REPLICA, + "ls_id", arg.get_ls_id().id(), + "paxos_replica_num", arg.get_paxos_replica_num()); + } else if (arg.get_alter_task_type().is_cancel_task()) { + ROOTSERVICE_EVENT_ADD(ADD_EVENT_FOR_ALTER_LS_REPLICA, + "task_id", arg.get_task_id()); + } else { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(arg)); + } + } + return ret; +} + +int ObRootService::admin_alter_ls_replica(const obrpc::ObAdminAlterLSReplicaArg &arg) +{ + int ret = OB_SUCCESS; + FLOG_INFO("receive alter ls replica request", K(arg)); + int64_t start_time = ObTimeUtility::current_time(); + if (OB_UNLIKELY(!inited_)) { + ret = OB_NOT_INIT; + LOG_WARN("not init", KR(ret)); + } else if (OB_UNLIKELY(!arg.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid arg", KR(ret), K(arg)); + } else { + ObSystemAdminCtx ctx; + if (OB_FAIL(init_sys_admin_ctx(ctx))) { + LOG_WARN("init_sys_admin_ctx failed", KR(ret)); + } else { + ObAdminAlterLSReplica admin_util(ctx); + if (OB_FAIL(admin_util.execute(arg))) { + LOG_WARN("execute alter ls replica failed", KR(ret), K(arg)); + } + } + } + int64_t cost_time = ObTimeUtility::current_time() - start_time; + FLOG_INFO("alter ls replica over", KR(ret), K(arg), K(cost_time)); + int tmp_ret = OB_SUCCESS; + if (OB_SUCCESS != (tmp_ret = add_rs_event_for_alter_ls_replica_(arg, ret))) { + // ignore + LOG_WARN("add rs event for alter ls replica failed", KR(ret), KR(tmp_ret), K(arg)); + } + return ret; +} + int ObRootService::admin_upgrade_virtual_schema() { int ret = OB_SUCCESS; diff --git a/src/rootserver/ob_root_service.h b/src/rootserver/ob_root_service.h index 5586f9851..4bcfdd8cf 100644 --- a/src/rootserver/ob_root_service.h +++ b/src/rootserver/ob_root_service.h @@ -740,6 +740,7 @@ public: int admin_switch_replica_role(const obrpc::ObAdminSwitchReplicaRoleArg &arg); int admin_switch_rs_role(const obrpc::ObAdminSwitchRSRoleArg &arg); int admin_drop_replica(const obrpc::ObAdminDropReplicaArg &arg); + int admin_alter_ls_replica(const obrpc::ObAdminAlterLSReplicaArg &arg); int admin_change_replica(const obrpc::ObAdminChangeReplicaArg &arg); int admin_migrate_replica(const obrpc::ObAdminMigrateReplicaArg &arg); int admin_report_replica(const obrpc::ObAdminReportReplicaArg &arg); @@ -961,6 +962,7 @@ private: int check_mds_memory_limit_(obrpc::ObAdminSetConfigItem &item); int check_freeze_trigger_percentage_(obrpc::ObAdminSetConfigItem &item); int check_write_throttle_trigger_percentage(obrpc::ObAdminSetConfigItem &item); + int add_rs_event_for_alter_ls_replica_(const obrpc::ObAdminAlterLSReplicaArg &arg, const int ret_val); int check_data_disk_write_limit_(obrpc::ObAdminSetConfigItem &item); int check_data_disk_usage_limit_(obrpc::ObAdminSetConfigItem &item); private: diff --git a/src/rootserver/ob_rs_rpc_processor.h b/src/rootserver/ob_rs_rpc_processor.h index 1ad5a48ca..1d68251cd 100644 --- a/src/rootserver/ob_rs_rpc_processor.h +++ b/src/rootserver/ob_rs_rpc_processor.h @@ -479,6 +479,7 @@ DEFINE_RS_RPC_PROCESSOR(obrpc::OB_ADMIN_RELOAD_SERVER, ObRpcAdminReloadServerP, DEFINE_RS_RPC_PROCESSOR(obrpc::OB_ADMIN_RELOAD_ZONE, ObRpcAdminReloadZoneP, admin_reload_zone()); DEFINE_RS_RPC_PROCESSOR(obrpc::OB_ADMIN_CLEAR_MERGE_ERROR, ObRpcAdminClearMergeErrorP, admin_clear_merge_error(arg_)); DEFINE_RS_RPC_PROCESSOR(obrpc::OB_ADMIN_MIGRATE_UNIT, ObRpcAdminMigrateUnitP, admin_migrate_unit(arg_)); +DEFINE_RS_RPC_PROCESSOR(obrpc::OB_ADMIN_ALTER_LS_REPLICA, ObRpcAdminAlterLSReplicaP, admin_alter_ls_replica(arg_)); #ifdef OB_BUILD_ARBITRATION DEFINE_RS_RPC_PROCESSOR(obrpc::OB_ADMIN_ADD_ARBITRATION_SERVICE, ObRpcAdminAddArbitrationServiceP, admin_add_arbitration_service(arg_)); DEFINE_RS_RPC_PROCESSOR(obrpc::OB_ADMIN_REMOVE_ARBITRATION_SERVICE, ObRpcAdminRemoveArbitrationServiceP, admin_remove_arbitration_service(arg_)); diff --git a/src/rootserver/ob_system_admin_util.cpp b/src/rootserver/ob_system_admin_util.cpp index 7311c5bc1..0f69e0b5a 100644 --- a/src/rootserver/ob_system_admin_util.cpp +++ b/src/rootserver/ob_system_admin_util.cpp @@ -1355,6 +1355,76 @@ int ObAdminMigrateUnit::execute(const ObAdminMigrateUnitArg &arg) return ret; } +int ObAdminAlterLSReplica::execute(const obrpc::ObAdminAlterLSReplicaArg &arg) +{ + FLOG_INFO("execute alter ls replica request", K(arg)); + int ret = OB_SUCCESS; + int64_t start_time = ObTimeUtility::current_time(); + if (OB_UNLIKELY(!ctx_.is_inited())) { + ret = OB_NOT_INIT; + LOG_WARN("not init", KR(ret)); + } else if (OB_ISNULL(ctx_.root_balancer_)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("root_balancer_ is null", KR(ret), K(arg), KP(ctx_.root_balancer_)); + } else if (OB_UNLIKELY(!arg.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid arg", KR(ret), K(arg)); + } else { + switch (arg.get_alter_task_type().get_type()) { + case ObAlterLSReplicaTaskType::AddLSReplicaTask: { + if (OB_FAIL(ctx_.root_balancer_->get_disaster_recovery_worker() + .do_add_ls_replica_task(arg))) { + LOG_WARN("add ls replica task failed", KR(ret), K(arg)); + } + break; + } + case ObAlterLSReplicaTaskType::RemoveLSReplicaTask: { + if (OB_FAIL(ctx_.root_balancer_->get_disaster_recovery_worker() + .do_remove_ls_replica_task(arg))) { + LOG_WARN("remove ls replica task failed", KR(ret), K(arg)); + } + break; + } + case ObAlterLSReplicaTaskType::MigrateLSReplicaTask: { + if (OB_FAIL(ctx_.root_balancer_->get_disaster_recovery_worker() + .do_migrate_ls_replica_task(arg))) { + LOG_WARN("migrate ls replica task failed", KR(ret), K(arg)); + } + break; + } + case ObAlterLSReplicaTaskType::ModifyLSReplicaTypeTask: { + if (OB_FAIL(ctx_.root_balancer_->get_disaster_recovery_worker() + .do_modify_ls_replica_type_task(arg))) { + LOG_WARN("modify ls replica task failed", KR(ret), K(arg)); + } + break; + } + case ObAlterLSReplicaTaskType::ModifyLSPaxosReplicaNumTask: { + if (OB_FAIL(ctx_.root_balancer_->get_disaster_recovery_worker() + .do_modify_ls_paxos_replica_num_task(arg))) { + LOG_WARN("modify ls paxos_replica_num task failed", KR(ret), K(arg)); + } + break; + } + case ObAlterLSReplicaTaskType::CancelLSReplicaTask: { + if (OB_FAIL(ctx_.root_balancer_->get_disaster_recovery_worker() + .do_cancel_ls_replica_task(arg))) { + LOG_WARN("cancel ls replica task failed", KR(ret), K(arg)); + } + break; + } + default: { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("task type unexpected", KR(ret), K(arg)); + break; + } + } + } + int64_t cost_time = ObTimeUtility::current_time() - start_time; + FLOG_INFO("execute alter ls replica request over", KR(ret), K(arg), K(cost_time)); + return ret; +} + int ObAdminUpgradeVirtualSchema::execute() { int ret = OB_SUCCESS; diff --git a/src/rootserver/ob_system_admin_util.h b/src/rootserver/ob_system_admin_util.h index 898e681d6..97646d074 100644 --- a/src/rootserver/ob_system_admin_util.h +++ b/src/rootserver/ob_system_admin_util.h @@ -371,6 +371,17 @@ private: DISALLOW_COPY_AND_ASSIGN(ObAdminMigrateUnit); }; +class ObAdminAlterLSReplica : public ObSystemAdminUtil +{ +public: + explicit ObAdminAlterLSReplica(const ObSystemAdminCtx &ctx) : ObSystemAdminUtil(ctx) {} + virtual ~ObAdminAlterLSReplica() {} + + int execute(const obrpc::ObAdminAlterLSReplicaArg &arg); +private: + DISALLOW_COPY_AND_ASSIGN(ObAdminAlterLSReplica); +}; + class ObAdminUpgradeVirtualSchema : public ObSystemAdminUtil { public: diff --git a/src/share/inner_table/ob_inner_table_schema.12451_12500.cpp b/src/share/inner_table/ob_inner_table_schema.12451_12500.cpp index f60b9e34d..72b383d92 100644 --- a/src/share/inner_table/ob_inner_table_schema.12451_12500.cpp +++ b/src/share/inner_table/ob_inner_table_schema.12451_12500.cpp @@ -2295,6 +2295,474 @@ int ObInnerTableSchema::enabled_roles_schema(ObTableSchema &table_schema) return ret; } +int ObInnerTableSchema::all_virtual_ls_replica_task_history_schema(ObTableSchema &table_schema) +{ + int ret = OB_SUCCESS; + uint64_t column_id = OB_APP_MIN_COLUMN_ID - 1; + + //generated fields: + table_schema.set_tenant_id(OB_SYS_TENANT_ID); + table_schema.set_tablegroup_id(OB_INVALID_ID); + table_schema.set_database_id(OB_SYS_DATABASE_ID); + table_schema.set_table_id(OB_ALL_VIRTUAL_LS_REPLICA_TASK_HISTORY_TID); + table_schema.set_rowkey_split_pos(0); + table_schema.set_is_use_bloomfilter(false); + table_schema.set_progressive_merge_num(0); + table_schema.set_rowkey_column_num(4); + table_schema.set_load_type(TABLE_LOAD_TYPE_IN_DISK); + table_schema.set_table_type(VIRTUAL_TABLE); + table_schema.set_index_type(INDEX_TYPE_IS_NOT); + table_schema.set_def_type(TABLE_DEF_TYPE_INTERNAL); + + if (OB_SUCC(ret)) { + if (OB_FAIL(table_schema.set_table_name(OB_ALL_VIRTUAL_LS_REPLICA_TASK_HISTORY_TNAME))) { + LOG_ERROR("fail to set table_name", K(ret)); + } + } + + if (OB_SUCC(ret)) { + if (OB_FAIL(table_schema.set_compress_func_name(OB_DEFAULT_COMPRESS_FUNC_NAME))) { + LOG_ERROR("fail to set compress_func_name", K(ret)); + } + } + table_schema.set_part_level(PARTITION_LEVEL_ZERO); + table_schema.set_charset_type(ObCharset::get_default_charset()); + table_schema.set_collation_type(ObCharset::get_default_collation(ObCharset::get_default_charset())); + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("tenant_id", //column_name + ++column_id, //column_id + 1, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObIntType, //column_type + CS_TYPE_INVALID, //column_collation_type + sizeof(int64_t), //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("ls_id", //column_name + ++column_id, //column_id + 2, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObIntType, //column_type + CS_TYPE_INVALID, //column_collation_type + sizeof(int64_t), //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("task_type", //column_name + ++column_id, //column_id + 3, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_INVALID, //column_collation_type + MAX_DISASTER_RECOVERY_TASK_TYPE_LENGTH, //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("task_id", //column_name + ++column_id, //column_id + 4, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_INVALID, //column_collation_type + OB_TRACE_STAT_BUFFER_SIZE, //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA_TS("gmt_create", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObTimestampType, //column_type + CS_TYPE_INVALID, //column_collation_type + sizeof(ObPreciseDateTime), //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false, //is_autoincrement + false); //is_on_update_for_timestamp + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA_TS("gmt_modified", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObTimestampType, //column_type + CS_TYPE_INVALID, //column_collation_type + sizeof(ObPreciseDateTime), //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false, //is_autoincrement + false); //is_on_update_for_timestamp + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("task_status", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_INVALID, //column_collation_type + MAX_COLUMN_COMMENT_LENGTH, //column_length + -1, //column_precision + -1, //column_scale + true, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ObObj priority_default; + priority_default.set_int(1); + ADD_COLUMN_SCHEMA_T("priority", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObIntType, //column_type + CS_TYPE_INVALID, //column_collation_type + sizeof(int64_t), //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false, //is_autoincrement + priority_default, + priority_default); //default_value + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("target_replica_svr_ip", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_INVALID, //column_collation_type + MAX_IP_ADDR_LENGTH, //column_length + -1, //column_precision + -1, //column_scale + true, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("target_replica_svr_port", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObIntType, //column_type + CS_TYPE_INVALID, //column_collation_type + sizeof(int64_t), //column_length + -1, //column_precision + -1, //column_scale + true, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("target_paxos_replica_number", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObIntType, //column_type + CS_TYPE_INVALID, //column_collation_type + sizeof(int64_t), //column_length + -1, //column_precision + -1, //column_scale + true, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("target_replica_type", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_INVALID, //column_collation_type + MAX_REPLICA_TYPE_LENGTH, //column_length + -1, //column_precision + -1, //column_scale + true, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("source_replica_svr_ip", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_INVALID, //column_collation_type + MAX_IP_ADDR_LENGTH, //column_length + -1, //column_precision + -1, //column_scale + true, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("source_replica_svr_port", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObIntType, //column_type + CS_TYPE_INVALID, //column_collation_type + sizeof(int64_t), //column_length + -1, //column_precision + -1, //column_scale + true, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("source_paxos_replica_number", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObIntType, //column_type + CS_TYPE_INVALID, //column_collation_type + sizeof(int64_t), //column_length + -1, //column_precision + -1, //column_scale + true, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("source_replica_type", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_INVALID, //column_collation_type + MAX_REPLICA_TYPE_LENGTH, //column_length + -1, //column_precision + -1, //column_scale + true, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("data_source_svr_ip", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_INVALID, //column_collation_type + MAX_IP_ADDR_LENGTH, //column_length + -1, //column_precision + -1, //column_scale + true, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("data_source_svr_port", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObIntType, //column_type + CS_TYPE_INVALID, //column_collation_type + sizeof(int64_t), //column_length + -1, //column_precision + -1, //column_scale + true, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ObObj is_manual_default; + is_manual_default.set_tinyint(0); + ADD_COLUMN_SCHEMA_T("is_manual", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObTinyIntType, //column_type + CS_TYPE_INVALID, //column_collation_type + 1, //column_length + -1, //column_precision + -1, //column_scale + true, //is_nullable + false, //is_autoincrement + is_manual_default, + is_manual_default); //default_value + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("task_exec_svr_ip", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_INVALID, //column_collation_type + MAX_IP_ADDR_LENGTH, //column_length + -1, //column_precision + -1, //column_scale + true, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("task_exec_svr_port", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObIntType, //column_type + CS_TYPE_INVALID, //column_collation_type + sizeof(int64_t), //column_length + -1, //column_precision + -1, //column_scale + true, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ObObj gmt_default; + ObObj gmt_default_null; + + gmt_default.set_ext(ObActionFlag::OP_DEFAULT_NOW_FLAG); + gmt_default_null.set_null(); + ADD_COLUMN_SCHEMA_TS_T("generate_time", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObTimestampType, //column_type + CS_TYPE_INVALID, //column_collation_type + 0, //column_length + -1, //column_precision + 6, //column_scale + false, //is_nullable + false, //is_autoincrement + false, //is_on_update_for_timestamp + gmt_default_null, + gmt_default) + } + + if (OB_SUCC(ret)) { + ObObj gmt_default; + ObObj gmt_default_null; + + gmt_default.set_ext(ObActionFlag::OP_DEFAULT_NOW_FLAG); + gmt_default_null.set_null(); + ADD_COLUMN_SCHEMA_TS_T("schedule_time", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObTimestampType, //column_type + CS_TYPE_INVALID, //column_collation_type + 0, //column_length + -1, //column_precision + 6, //column_scale + false, //is_nullable + false, //is_autoincrement + false, //is_on_update_for_timestamp + gmt_default_null, + gmt_default) + } + + if (OB_SUCC(ret)) { + ObObj gmt_default; + ObObj gmt_default_null; + + gmt_default.set_ext(ObActionFlag::OP_DEFAULT_NOW_FLAG); + gmt_default_null.set_null(); + ADD_COLUMN_SCHEMA_TS_T("finish_time", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObTimestampType, //column_type + CS_TYPE_INVALID, //column_collation_type + 0, //column_length + -1, //column_precision + 6, //column_scale + false, //is_nullable + false, //is_autoincrement + false, //is_on_update_for_timestamp + gmt_default_null, + gmt_default) + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("execute_result", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_INVALID, //column_collation_type + MAX_COLUMN_COMMENT_LENGTH, //column_length + -1, //column_precision + -1, //column_scale + true, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("comment", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_INVALID, //column_collation_type + MAX_COLUMN_COMMENT_LENGTH, //column_length + -1, //column_precision + -1, //column_scale + true, //is_nullable + false); //is_autoincrement + } + table_schema.set_index_using_type(USING_BTREE); + table_schema.set_row_store_type(ENCODING_ROW_STORE); + table_schema.set_store_format(OB_STORE_FORMAT_DYNAMIC_MYSQL); + table_schema.set_progressive_merge_round(1); + table_schema.set_storage_format_version(3); + table_schema.set_tablet_id(0); + + table_schema.set_max_used_column_id(column_id); + return ret; +} + int ObInnerTableSchema::all_virtual_session_ps_info_schema(ObTableSchema &table_schema) { int ret = OB_SUCCESS; diff --git a/src/share/inner_table/ob_inner_table_schema.15401_15450.cpp b/src/share/inner_table/ob_inner_table_schema.15401_15450.cpp index 5e3e6d80d..818ee9021 100644 --- a/src/share/inner_table/ob_inner_table_schema.15401_15450.cpp +++ b/src/share/inner_table/ob_inner_table_schema.15401_15450.cpp @@ -6780,6 +6780,440 @@ int ObInnerTableSchema::all_virtual_index_usage_info_real_agent_ora_schema(ObTab return ret; } +int ObInnerTableSchema::all_virtual_ls_replica_task_history_ora_schema(ObTableSchema &table_schema) +{ + int ret = OB_SUCCESS; + uint64_t column_id = OB_APP_MIN_COLUMN_ID - 1; + + //generated fields: + table_schema.set_tenant_id(OB_SYS_TENANT_ID); + table_schema.set_tablegroup_id(OB_INVALID_ID); + table_schema.set_database_id(OB_ORA_SYS_DATABASE_ID); + table_schema.set_table_id(OB_ALL_VIRTUAL_LS_REPLICA_TASK_HISTORY_ORA_TID); + table_schema.set_rowkey_split_pos(0); + table_schema.set_is_use_bloomfilter(false); + table_schema.set_progressive_merge_num(0); + table_schema.set_rowkey_column_num(4); + table_schema.set_load_type(TABLE_LOAD_TYPE_IN_DISK); + table_schema.set_table_type(VIRTUAL_TABLE); + table_schema.set_index_type(INDEX_TYPE_IS_NOT); + table_schema.set_def_type(TABLE_DEF_TYPE_INTERNAL); + + if (OB_SUCC(ret)) { + if (OB_FAIL(table_schema.set_table_name(OB_ALL_VIRTUAL_LS_REPLICA_TASK_HISTORY_ORA_TNAME))) { + LOG_ERROR("fail to set table_name", K(ret)); + } + } + + if (OB_SUCC(ret)) { + if (OB_FAIL(table_schema.set_compress_func_name(OB_DEFAULT_COMPRESS_FUNC_NAME))) { + LOG_ERROR("fail to set compress_func_name", K(ret)); + } + } + table_schema.set_part_level(PARTITION_LEVEL_ZERO); + table_schema.set_charset_type(ObCharset::get_default_charset()); + table_schema.set_collation_type(ObCollationType::CS_TYPE_UTF8MB4_BIN); + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("TENANT_ID", //column_name + ++column_id, //column_id + 1, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObNumberType, //column_type + CS_TYPE_INVALID, //column_collation_type + 38, //column_length + 38, //column_precision + 0, //column_scale + false, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("LS_ID", //column_name + ++column_id, //column_id + 2, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObNumberType, //column_type + CS_TYPE_INVALID, //column_collation_type + 38, //column_length + 38, //column_precision + 0, //column_scale + false, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("TASK_TYPE", //column_name + ++column_id, //column_id + 3, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_UTF8MB4_BIN, //column_collation_type + MAX_DISASTER_RECOVERY_TASK_TYPE_LENGTH, //column_length + 2, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("TASK_ID", //column_name + ++column_id, //column_id + 4, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_UTF8MB4_BIN, //column_collation_type + OB_TRACE_STAT_BUFFER_SIZE, //column_length + 2, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("GMT_CREATE", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObTimestampLTZType, //column_type + CS_TYPE_INVALID, //column_collation_type + 0, //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("GMT_MODIFIED", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObTimestampLTZType, //column_type + CS_TYPE_INVALID, //column_collation_type + 0, //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("TASK_STATUS", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_UTF8MB4_BIN, //column_collation_type + MAX_COLUMN_COMMENT_LENGTH, //column_length + 2, //column_precision + -1, //column_scale + true, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("PRIORITY", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObNumberType, //column_type + CS_TYPE_INVALID, //column_collation_type + 38, //column_length + 38, //column_precision + 0, //column_scale + false, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("TARGET_REPLICA_SVR_IP", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_UTF8MB4_BIN, //column_collation_type + MAX_IP_ADDR_LENGTH, //column_length + 2, //column_precision + -1, //column_scale + true, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("TARGET_REPLICA_SVR_PORT", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObNumberType, //column_type + CS_TYPE_INVALID, //column_collation_type + 38, //column_length + 38, //column_precision + 0, //column_scale + true, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("TARGET_PAXOS_REPLICA_NUMBER", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObNumberType, //column_type + CS_TYPE_INVALID, //column_collation_type + 38, //column_length + 38, //column_precision + 0, //column_scale + true, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("TARGET_REPLICA_TYPE", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_UTF8MB4_BIN, //column_collation_type + MAX_REPLICA_TYPE_LENGTH, //column_length + 2, //column_precision + -1, //column_scale + true, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("SOURCE_REPLICA_SVR_IP", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_UTF8MB4_BIN, //column_collation_type + MAX_IP_ADDR_LENGTH, //column_length + 2, //column_precision + -1, //column_scale + true, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("SOURCE_REPLICA_SVR_PORT", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObNumberType, //column_type + CS_TYPE_INVALID, //column_collation_type + 38, //column_length + 38, //column_precision + 0, //column_scale + true, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("SOURCE_PAXOS_REPLICA_NUMBER", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObNumberType, //column_type + CS_TYPE_INVALID, //column_collation_type + 38, //column_length + 38, //column_precision + 0, //column_scale + true, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("SOURCE_REPLICA_TYPE", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_UTF8MB4_BIN, //column_collation_type + MAX_REPLICA_TYPE_LENGTH, //column_length + 2, //column_precision + -1, //column_scale + true, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("DATA_SOURCE_SVR_IP", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_UTF8MB4_BIN, //column_collation_type + MAX_IP_ADDR_LENGTH, //column_length + 2, //column_precision + -1, //column_scale + true, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("DATA_SOURCE_SVR_PORT", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObNumberType, //column_type + CS_TYPE_INVALID, //column_collation_type + 38, //column_length + 38, //column_precision + 0, //column_scale + true, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("IS_MANUAL", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObNumberType, //column_type + CS_TYPE_INVALID, //column_collation_type + 38, //column_length + 38, //column_precision + 0, //column_scale + true, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("TASK_EXEC_SVR_IP", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_UTF8MB4_BIN, //column_collation_type + MAX_IP_ADDR_LENGTH, //column_length + 2, //column_precision + -1, //column_scale + true, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("TASK_EXEC_SVR_PORT", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObNumberType, //column_type + CS_TYPE_INVALID, //column_collation_type + 38, //column_length + 38, //column_precision + 0, //column_scale + true, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("GENERATE_TIME", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObTimestampLTZType, //column_type + CS_TYPE_INVALID, //column_collation_type + 0, //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("SCHEDULE_TIME", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObTimestampLTZType, //column_type + CS_TYPE_INVALID, //column_collation_type + 0, //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("FINISH_TIME", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObTimestampLTZType, //column_type + CS_TYPE_INVALID, //column_collation_type + 0, //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("EXECUTE_RESULT", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_UTF8MB4_BIN, //column_collation_type + MAX_COLUMN_COMMENT_LENGTH, //column_length + 2, //column_precision + -1, //column_scale + true, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("COMMENT", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_UTF8MB4_BIN, //column_collation_type + MAX_COLUMN_COMMENT_LENGTH, //column_length + 2, //column_precision + -1, //column_scale + true, //is_nullable + false); //is_autoincrement + } + table_schema.set_index_using_type(USING_BTREE); + table_schema.set_row_store_type(ENCODING_ROW_STORE); + table_schema.set_store_format(OB_STORE_FORMAT_DYNAMIC_MYSQL); + table_schema.set_progressive_merge_round(1); + table_schema.set_storage_format_version(3); + table_schema.set_tablet_id(0); + + table_schema.set_max_used_column_id(column_id); + return ret; +} + int ObInnerTableSchema::all_virtual_session_ps_info_ora_schema(ObTableSchema &table_schema) { int ret = OB_SUCCESS; diff --git a/src/share/inner_table/ob_inner_table_schema.21301_21350.cpp b/src/share/inner_table/ob_inner_table_schema.21301_21350.cpp index 7dd2d5b88..50e2a1891 100644 --- a/src/share/inner_table/ob_inner_table_schema.21301_21350.cpp +++ b/src/share/inner_table/ob_inner_table_schema.21301_21350.cpp @@ -1217,7 +1217,7 @@ int ObInnerTableSchema::dba_ob_ls_replica_tasks_schema(ObTableSchema &table_sche table_schema.set_collation_type(ObCharset::get_default_collation(ObCharset::get_default_charset())); if (OB_SUCC(ret)) { - if (OB_FAIL(table_schema.set_view_definition(R"__( ( SELECT LS_ID, TASK_TYPE, TASK_ID, TASK_STATUS, CAST(CASE PRIORITY WHEN 0 THEN 'HIGH' WHEN 1 THEN 'LOW' ELSE NULL END AS CHAR(5)) AS PRIORITY, TARGET_REPLICA_SVR_IP, TARGET_REPLICA_SVR_PORT, TARGET_PAXOS_REPLICA_NUMBER, TARGET_REPLICA_TYPE, (CASE SOURCE_REPLICA_SVR_IP WHEN "" THEN NULL ELSE SOURCE_REPLICA_SVR_IP END) AS SOURCE_REPLICA_SVR_IP, SOURCE_REPLICA_SVR_PORT, SOURCE_PAXOS_REPLICA_NUMBER, (CASE SOURCE_REPLICA_TYPE WHEN "" THEN NULL ELSE SOURCE_REPLICA_TYPE END) AS SOURCE_REPLICA_TYPE, TASK_EXEC_SVR_IP, TASK_EXEC_SVR_PORT, CAST(GMT_CREATE AS DATETIME) AS CREATE_TIME, CAST(SCHEDULE_TIME AS DATETIME) AS START_TIME, CAST(GMT_MODIFIED AS DATETIME) AS MODIFY_TIME, COMMENT FROM OCEANBASE.__ALL_VIRTUAL_LS_REPLICA_TASK WHERE TENANT_ID = EFFECTIVE_TENANT_ID() ) )__"))) { + if (OB_FAIL(table_schema.set_view_definition(R"__( ( SELECT LS_ID, TASK_TYPE, TASK_ID, TASK_STATUS, CAST(CASE PRIORITY WHEN 0 THEN 'HIGH' WHEN 1 THEN 'LOW' ELSE NULL END AS CHAR(5)) AS PRIORITY, TARGET_REPLICA_SVR_IP, TARGET_REPLICA_SVR_PORT, TARGET_PAXOS_REPLICA_NUMBER, TARGET_REPLICA_TYPE, (CASE SOURCE_REPLICA_SVR_IP WHEN "" THEN NULL ELSE SOURCE_REPLICA_SVR_IP END) AS SOURCE_REPLICA_SVR_IP, SOURCE_REPLICA_SVR_PORT, SOURCE_PAXOS_REPLICA_NUMBER, (CASE SOURCE_REPLICA_TYPE WHEN "" THEN NULL ELSE SOURCE_REPLICA_TYPE END) AS SOURCE_REPLICA_TYPE, (CASE DATA_SOURCE_SVR_IP WHEN "" THEN NULL ELSE DATA_SOURCE_SVR_IP END) AS DATA_SOURCE_SVR_IP, DATA_SOURCE_SVR_PORT, CAST(CASE IS_MANUAL WHEN 0 THEN 'FALSE' WHEN 1 THEN 'TRUE' ELSE NULL END AS CHAR(6)) AS IS_MANUAL, TASK_EXEC_SVR_IP, TASK_EXEC_SVR_PORT, CAST(GMT_CREATE AS DATETIME) AS CREATE_TIME, CAST(SCHEDULE_TIME AS DATETIME) AS START_TIME, CAST(GMT_MODIFIED AS DATETIME) AS MODIFY_TIME, COMMENT FROM OCEANBASE.__ALL_VIRTUAL_LS_REPLICA_TASK WHERE TENANT_ID = EFFECTIVE_TENANT_ID() ) )__"))) { LOG_ERROR("fail to set view_definition", K(ret)); } } @@ -1267,7 +1267,7 @@ int ObInnerTableSchema::cdb_ob_ls_replica_tasks_schema(ObTableSchema &table_sche table_schema.set_collation_type(ObCharset::get_default_collation(ObCharset::get_default_charset())); if (OB_SUCC(ret)) { - if (OB_FAIL(table_schema.set_view_definition(R"__( ( SELECT TENANT_ID, LS_ID, TASK_TYPE, TASK_ID, TASK_STATUS, CAST(CASE PRIORITY WHEN 0 THEN 'HIGH' WHEN 1 THEN 'LOW' ELSE NULL END AS CHAR(5)) AS PRIORITY, TARGET_REPLICA_SVR_IP, TARGET_REPLICA_SVR_PORT, TARGET_PAXOS_REPLICA_NUMBER, TARGET_REPLICA_TYPE, (CASE SOURCE_REPLICA_SVR_IP WHEN "" THEN NULL ELSE SOURCE_REPLICA_SVR_IP END) AS SOURCE_REPLICA_SVR_IP, SOURCE_REPLICA_SVR_PORT, SOURCE_PAXOS_REPLICA_NUMBER, (CASE SOURCE_REPLICA_TYPE WHEN "" THEN NULL ELSE SOURCE_REPLICA_TYPE END) AS SOURCE_REPLICA_TYPE, TASK_EXEC_SVR_IP, TASK_EXEC_SVR_PORT, CAST(GMT_CREATE AS DATETIME) AS CREATE_TIME, CAST(SCHEDULE_TIME AS DATETIME) AS START_TIME, CAST(GMT_MODIFIED AS DATETIME) AS MODIFY_TIME, COMMENT FROM OCEANBASE.__ALL_VIRTUAL_LS_REPLICA_TASK ) )__"))) { + if (OB_FAIL(table_schema.set_view_definition(R"__( ( SELECT TENANT_ID, LS_ID, TASK_TYPE, TASK_ID, TASK_STATUS, CAST(CASE PRIORITY WHEN 0 THEN 'HIGH' WHEN 1 THEN 'LOW' ELSE NULL END AS CHAR(5)) AS PRIORITY, TARGET_REPLICA_SVR_IP, TARGET_REPLICA_SVR_PORT, TARGET_PAXOS_REPLICA_NUMBER, TARGET_REPLICA_TYPE, (CASE SOURCE_REPLICA_SVR_IP WHEN "" THEN NULL ELSE SOURCE_REPLICA_SVR_IP END) AS SOURCE_REPLICA_SVR_IP, SOURCE_REPLICA_SVR_PORT, SOURCE_PAXOS_REPLICA_NUMBER, (CASE SOURCE_REPLICA_TYPE WHEN "" THEN NULL ELSE SOURCE_REPLICA_TYPE END) AS SOURCE_REPLICA_TYPE, (CASE DATA_SOURCE_SVR_IP WHEN "" THEN NULL ELSE DATA_SOURCE_SVR_IP END) AS DATA_SOURCE_SVR_IP, DATA_SOURCE_SVR_PORT, CAST(CASE IS_MANUAL WHEN 0 THEN 'FALSE' WHEN 1 THEN 'TRUE' ELSE NULL END AS CHAR(6)) AS IS_MANUAL, TASK_EXEC_SVR_IP, TASK_EXEC_SVR_PORT, CAST(GMT_CREATE AS DATETIME) AS CREATE_TIME, CAST(SCHEDULE_TIME AS DATETIME) AS START_TIME, CAST(GMT_MODIFIED AS DATETIME) AS MODIFY_TIME, COMMENT FROM OCEANBASE.__ALL_VIRTUAL_LS_REPLICA_TASK ) )__"))) { LOG_ERROR("fail to set view_definition", K(ret)); } } diff --git a/src/share/inner_table/ob_inner_table_schema.21501_21550.cpp b/src/share/inner_table/ob_inner_table_schema.21501_21550.cpp index c259b3fb0..625cddb4f 100644 --- a/src/share/inner_table/ob_inner_table_schema.21501_21550.cpp +++ b/src/share/inner_table/ob_inner_table_schema.21501_21550.cpp @@ -725,6 +725,106 @@ int ObInnerTableSchema::dba_ob_clone_history_schema(ObTableSchema &table_schema) return ret; } +int ObInnerTableSchema::dba_ob_ls_replica_task_history_schema(ObTableSchema &table_schema) +{ + int ret = OB_SUCCESS; + uint64_t column_id = OB_APP_MIN_COLUMN_ID - 1; + + //generated fields: + table_schema.set_tenant_id(OB_SYS_TENANT_ID); + table_schema.set_tablegroup_id(OB_INVALID_ID); + table_schema.set_database_id(OB_SYS_DATABASE_ID); + table_schema.set_table_id(OB_DBA_OB_LS_REPLICA_TASK_HISTORY_TID); + table_schema.set_rowkey_split_pos(0); + table_schema.set_is_use_bloomfilter(false); + table_schema.set_progressive_merge_num(0); + table_schema.set_rowkey_column_num(0); + table_schema.set_load_type(TABLE_LOAD_TYPE_IN_DISK); + table_schema.set_table_type(SYSTEM_VIEW); + table_schema.set_index_type(INDEX_TYPE_IS_NOT); + table_schema.set_def_type(TABLE_DEF_TYPE_INTERNAL); + + if (OB_SUCC(ret)) { + if (OB_FAIL(table_schema.set_table_name(OB_DBA_OB_LS_REPLICA_TASK_HISTORY_TNAME))) { + LOG_ERROR("fail to set table_name", K(ret)); + } + } + + if (OB_SUCC(ret)) { + if (OB_FAIL(table_schema.set_compress_func_name(OB_DEFAULT_COMPRESS_FUNC_NAME))) { + LOG_ERROR("fail to set compress_func_name", K(ret)); + } + } + table_schema.set_part_level(PARTITION_LEVEL_ZERO); + table_schema.set_charset_type(ObCharset::get_default_charset()); + table_schema.set_collation_type(ObCharset::get_default_collation(ObCharset::get_default_charset())); + + if (OB_SUCC(ret)) { + if (OB_FAIL(table_schema.set_view_definition(R"__( ( SELECT LS_ID, TASK_TYPE, TASK_ID, TASK_STATUS, CAST(CASE PRIORITY WHEN 0 THEN 'HIGH' WHEN 1 THEN 'LOW' ELSE NULL END AS CHAR(5)) AS PRIORITY, TARGET_REPLICA_SVR_IP, TARGET_REPLICA_SVR_PORT, TARGET_PAXOS_REPLICA_NUMBER, TARGET_REPLICA_TYPE, (CASE SOURCE_REPLICA_SVR_IP WHEN "" THEN NULL ELSE SOURCE_REPLICA_SVR_IP END) AS SOURCE_REPLICA_SVR_IP, SOURCE_REPLICA_SVR_PORT, SOURCE_PAXOS_REPLICA_NUMBER, (CASE SOURCE_REPLICA_TYPE WHEN "" THEN NULL ELSE SOURCE_REPLICA_TYPE END) AS SOURCE_REPLICA_TYPE, (CASE DATA_SOURCE_SVR_IP WHEN "" THEN NULL ELSE DATA_SOURCE_SVR_IP END) AS DATA_SOURCE_SVR_IP, DATA_SOURCE_SVR_PORT, CAST(CASE IS_MANUAL WHEN 0 THEN 'FALSE' WHEN 1 THEN 'TRUE' ELSE NULL END AS CHAR(6)) AS IS_MANUAL, TASK_EXEC_SVR_IP, TASK_EXEC_SVR_PORT, CAST(GMT_CREATE AS DATETIME) AS CREATE_TIME, CAST(SCHEDULE_TIME AS DATETIME) AS START_TIME, CAST(GMT_MODIFIED AS DATETIME) AS MODIFY_TIME, CAST(FINISH_TIME AS DATETIME) AS FINISH_TIME, (CASE EXECUTE_RESULT WHEN "" THEN NULL ELSE EXECUTE_RESULT END) AS EXECUTE_RESULT, COMMENT FROM OCEANBASE.__ALL_VIRTUAL_LS_REPLICA_TASK_HISTORY WHERE TENANT_ID = EFFECTIVE_TENANT_ID() ) )__"))) { + LOG_ERROR("fail to set view_definition", K(ret)); + } + } + table_schema.set_index_using_type(USING_BTREE); + table_schema.set_row_store_type(ENCODING_ROW_STORE); + table_schema.set_store_format(OB_STORE_FORMAT_DYNAMIC_MYSQL); + table_schema.set_progressive_merge_round(1); + table_schema.set_storage_format_version(3); + table_schema.set_tablet_id(0); + + table_schema.set_max_used_column_id(column_id); + return ret; +} + +int ObInnerTableSchema::cdb_ob_ls_replica_task_history_schema(ObTableSchema &table_schema) +{ + int ret = OB_SUCCESS; + uint64_t column_id = OB_APP_MIN_COLUMN_ID - 1; + + //generated fields: + table_schema.set_tenant_id(OB_SYS_TENANT_ID); + table_schema.set_tablegroup_id(OB_INVALID_ID); + table_schema.set_database_id(OB_SYS_DATABASE_ID); + table_schema.set_table_id(OB_CDB_OB_LS_REPLICA_TASK_HISTORY_TID); + table_schema.set_rowkey_split_pos(0); + table_schema.set_is_use_bloomfilter(false); + table_schema.set_progressive_merge_num(0); + table_schema.set_rowkey_column_num(0); + table_schema.set_load_type(TABLE_LOAD_TYPE_IN_DISK); + table_schema.set_table_type(SYSTEM_VIEW); + table_schema.set_index_type(INDEX_TYPE_IS_NOT); + table_schema.set_def_type(TABLE_DEF_TYPE_INTERNAL); + + if (OB_SUCC(ret)) { + if (OB_FAIL(table_schema.set_table_name(OB_CDB_OB_LS_REPLICA_TASK_HISTORY_TNAME))) { + LOG_ERROR("fail to set table_name", K(ret)); + } + } + + if (OB_SUCC(ret)) { + if (OB_FAIL(table_schema.set_compress_func_name(OB_DEFAULT_COMPRESS_FUNC_NAME))) { + LOG_ERROR("fail to set compress_func_name", K(ret)); + } + } + table_schema.set_part_level(PARTITION_LEVEL_ZERO); + table_schema.set_charset_type(ObCharset::get_default_charset()); + table_schema.set_collation_type(ObCharset::get_default_collation(ObCharset::get_default_charset())); + + if (OB_SUCC(ret)) { + if (OB_FAIL(table_schema.set_view_definition(R"__( ( SELECT TENANT_ID, LS_ID, TASK_TYPE, TASK_ID, TASK_STATUS, CAST(CASE PRIORITY WHEN 0 THEN 'HIGH' WHEN 1 THEN 'LOW' ELSE NULL END AS CHAR(5)) AS PRIORITY, TARGET_REPLICA_SVR_IP, TARGET_REPLICA_SVR_PORT, TARGET_PAXOS_REPLICA_NUMBER, TARGET_REPLICA_TYPE, (CASE SOURCE_REPLICA_SVR_IP WHEN "" THEN NULL ELSE SOURCE_REPLICA_SVR_IP END) AS SOURCE_REPLICA_SVR_IP, SOURCE_REPLICA_SVR_PORT, SOURCE_PAXOS_REPLICA_NUMBER, (CASE SOURCE_REPLICA_TYPE WHEN "" THEN NULL ELSE SOURCE_REPLICA_TYPE END) AS SOURCE_REPLICA_TYPE, (CASE DATA_SOURCE_SVR_IP WHEN "" THEN NULL ELSE DATA_SOURCE_SVR_IP END) AS DATA_SOURCE_SVR_IP, DATA_SOURCE_SVR_PORT, CAST(CASE IS_MANUAL WHEN 0 THEN 'FALSE' WHEN 1 THEN 'TRUE' ELSE NULL END AS CHAR(6)) AS IS_MANUAL, TASK_EXEC_SVR_IP, TASK_EXEC_SVR_PORT, CAST(GMT_CREATE AS DATETIME) AS CREATE_TIME, CAST(SCHEDULE_TIME AS DATETIME) AS START_TIME, CAST(GMT_MODIFIED AS DATETIME) AS MODIFY_TIME, CAST(FINISH_TIME AS DATETIME) AS FINISH_TIME, (CASE EXECUTE_RESULT WHEN "" THEN NULL ELSE EXECUTE_RESULT END) AS EXECUTE_RESULT, COMMENT FROM OCEANBASE.__ALL_VIRTUAL_LS_REPLICA_TASK_HISTORY ) )__"))) { + LOG_ERROR("fail to set view_definition", K(ret)); + } + } + table_schema.set_index_using_type(USING_BTREE); + table_schema.set_row_store_type(ENCODING_ROW_STORE); + table_schema.set_store_format(OB_STORE_FORMAT_DYNAMIC_MYSQL); + table_schema.set_progressive_merge_round(1); + table_schema.set_storage_format_version(3); + table_schema.set_tablet_id(0); + + table_schema.set_max_used_column_id(column_id); + return ret; +} + int ObInnerTableSchema::cdb_mview_logs_schema(ObTableSchema &table_schema) { int ret = OB_SUCCESS; diff --git a/src/share/inner_table/ob_inner_table_schema.25201_25250.cpp b/src/share/inner_table/ob_inner_table_schema.25201_25250.cpp index 618a57f6d..e08bda956 100644 --- a/src/share/inner_table/ob_inner_table_schema.25201_25250.cpp +++ b/src/share/inner_table/ob_inner_table_schema.25201_25250.cpp @@ -110,7 +110,7 @@ int ObInnerTableSchema::dba_ob_ls_replica_tasks_ora_schema(ObTableSchema &table_ table_schema.set_collation_type(ObCharset::get_default_collation(ObCharset::get_default_charset())); if (OB_SUCC(ret)) { - if (OB_FAIL(table_schema.set_view_definition(R"__( ( SELECT LS_ID, TASK_TYPE, TASK_ID, TASK_STATUS, CAST(CASE PRIORITY WHEN 1 THEN 'HIGH' WHEN 2 THEN 'LOW' ELSE NULL END AS CHAR(5)) AS PRIORITY, TARGET_REPLICA_SVR_IP, TARGET_REPLICA_SVR_PORT, TARGET_PAXOS_REPLICA_NUMBER, TARGET_REPLICA_TYPE, CASE SOURCE_REPLICA_SVR_IP WHEN '' THEN NULL ELSE SOURCE_REPLICA_SVR_IP END AS SOURCE_REPLICA_SVR_IP, SOURCE_REPLICA_SVR_PORT, SOURCE_PAXOS_REPLICA_NUMBER, CASE SOURCE_REPLICA_TYPE WHEN '' THEN NULL ELSE SOURCE_REPLICA_TYPE END AS SOURCE_REPLICA_TYPE, TASK_EXEC_SVR_IP, TASK_EXEC_SVR_PORT, CAST(GMT_CREATE AS TIMESTAMP(6)) AS CREATE_TIME, CAST(SCHEDULE_TIME AS TIMESTAMP(6)) AS START_TIME, CAST(GMT_MODIFIED AS TIMESTAMP(6)) AS MODIFY_TIME, "COMMENT" FROM SYS.ALL_VIRTUAL_LS_REPLICA_TASK WHERE TENANT_ID = EFFECTIVE_TENANT_ID() ) )__"))) { + if (OB_FAIL(table_schema.set_view_definition(R"__( ( SELECT LS_ID, TASK_TYPE, TASK_ID, TASK_STATUS, CAST(CASE PRIORITY WHEN 0 THEN 'HIGH' WHEN 1 THEN 'LOW' ELSE NULL END AS CHAR(5)) AS PRIORITY, TARGET_REPLICA_SVR_IP, TARGET_REPLICA_SVR_PORT, TARGET_PAXOS_REPLICA_NUMBER, TARGET_REPLICA_TYPE, CASE SOURCE_REPLICA_SVR_IP WHEN '' THEN NULL ELSE SOURCE_REPLICA_SVR_IP END AS SOURCE_REPLICA_SVR_IP, SOURCE_REPLICA_SVR_PORT, SOURCE_PAXOS_REPLICA_NUMBER, CASE SOURCE_REPLICA_TYPE WHEN '' THEN NULL ELSE SOURCE_REPLICA_TYPE END AS SOURCE_REPLICA_TYPE, CASE DATA_SOURCE_SVR_IP WHEN '' THEN NULL ELSE DATA_SOURCE_SVR_IP END AS DATA_SOURCE_SVR_IP, DATA_SOURCE_SVR_PORT, CAST(CASE IS_MANUAL WHEN 0 THEN 'FALSE' WHEN 1 THEN 'TRUE' ELSE NULL END AS CHAR(6)) AS IS_MANUAL, TASK_EXEC_SVR_IP, TASK_EXEC_SVR_PORT, CAST(GMT_CREATE AS TIMESTAMP(6)) AS CREATE_TIME, CAST(SCHEDULE_TIME AS TIMESTAMP(6)) AS START_TIME, CAST(GMT_MODIFIED AS TIMESTAMP(6)) AS MODIFY_TIME, "COMMENT" FROM SYS.ALL_VIRTUAL_LS_REPLICA_TASK WHERE TENANT_ID = EFFECTIVE_TENANT_ID() ) )__"))) { LOG_ERROR("fail to set view_definition", K(ret)); } } diff --git a/src/share/inner_table/ob_inner_table_schema.25251_25300.cpp b/src/share/inner_table/ob_inner_table_schema.25251_25300.cpp index de5dce0dd..809593437 100644 --- a/src/share/inner_table/ob_inner_table_schema.25251_25300.cpp +++ b/src/share/inner_table/ob_inner_table_schema.25251_25300.cpp @@ -775,6 +775,56 @@ int ObInnerTableSchema::user_users_schema(ObTableSchema &table_schema) return ret; } +int ObInnerTableSchema::dba_ob_ls_replica_task_history_ora_schema(ObTableSchema &table_schema) +{ + int ret = OB_SUCCESS; + uint64_t column_id = OB_APP_MIN_COLUMN_ID - 1; + + //generated fields: + table_schema.set_tenant_id(OB_SYS_TENANT_ID); + table_schema.set_tablegroup_id(OB_INVALID_ID); + table_schema.set_database_id(OB_ORA_SYS_DATABASE_ID); + table_schema.set_table_id(OB_DBA_OB_LS_REPLICA_TASK_HISTORY_ORA_TID); + table_schema.set_rowkey_split_pos(0); + table_schema.set_is_use_bloomfilter(false); + table_schema.set_progressive_merge_num(0); + table_schema.set_rowkey_column_num(0); + table_schema.set_load_type(TABLE_LOAD_TYPE_IN_DISK); + table_schema.set_table_type(SYSTEM_VIEW); + table_schema.set_index_type(INDEX_TYPE_IS_NOT); + table_schema.set_def_type(TABLE_DEF_TYPE_INTERNAL); + + if (OB_SUCC(ret)) { + if (OB_FAIL(table_schema.set_table_name(OB_DBA_OB_LS_REPLICA_TASK_HISTORY_ORA_TNAME))) { + LOG_ERROR("fail to set table_name", K(ret)); + } + } + + if (OB_SUCC(ret)) { + if (OB_FAIL(table_schema.set_compress_func_name(OB_DEFAULT_COMPRESS_FUNC_NAME))) { + LOG_ERROR("fail to set compress_func_name", K(ret)); + } + } + table_schema.set_part_level(PARTITION_LEVEL_ZERO); + table_schema.set_charset_type(ObCharset::get_default_charset()); + table_schema.set_collation_type(ObCharset::get_default_collation(ObCharset::get_default_charset())); + + if (OB_SUCC(ret)) { + if (OB_FAIL(table_schema.set_view_definition(R"__( ( SELECT LS_ID, TASK_TYPE, TASK_ID, TASK_STATUS, CAST(CASE PRIORITY WHEN 0 THEN 'HIGH' WHEN 1 THEN 'LOW' ELSE NULL END AS CHAR(5)) AS PRIORITY, TARGET_REPLICA_SVR_IP, TARGET_REPLICA_SVR_PORT, TARGET_PAXOS_REPLICA_NUMBER, TARGET_REPLICA_TYPE, CASE SOURCE_REPLICA_SVR_IP WHEN '' THEN NULL ELSE SOURCE_REPLICA_SVR_IP END AS SOURCE_REPLICA_SVR_IP, SOURCE_REPLICA_SVR_PORT, SOURCE_PAXOS_REPLICA_NUMBER, CASE SOURCE_REPLICA_TYPE WHEN '' THEN NULL ELSE SOURCE_REPLICA_TYPE END AS SOURCE_REPLICA_TYPE, CASE DATA_SOURCE_SVR_IP WHEN '' THEN NULL ELSE DATA_SOURCE_SVR_IP END AS DATA_SOURCE_SVR_IP, DATA_SOURCE_SVR_PORT, CAST(CASE IS_MANUAL WHEN 0 THEN 'FALSE' WHEN 1 THEN 'TRUE' ELSE NULL END AS CHAR(6)) AS IS_MANUAL, TASK_EXEC_SVR_IP, TASK_EXEC_SVR_PORT, CAST(GMT_CREATE AS TIMESTAMP(6)) AS CREATE_TIME, CAST(SCHEDULE_TIME AS TIMESTAMP(6)) AS START_TIME, CAST(GMT_MODIFIED AS TIMESTAMP(6)) AS MODIFY_TIME, CAST(FINISH_TIME AS TIMESTAMP(6)) AS FINISH_TIME, CASE EXECUTE_RESULT WHEN '' THEN NULL ELSE EXECUTE_RESULT END AS EXECUTE_RESULT, "COMMENT" FROM SYS.ALL_VIRTUAL_LS_REPLICA_TASK_HISTORY WHERE TENANT_ID = EFFECTIVE_TENANT_ID() ) )__"))) { + LOG_ERROR("fail to set view_definition", K(ret)); + } + } + table_schema.set_index_using_type(USING_BTREE); + table_schema.set_row_store_type(ENCODING_ROW_STORE); + table_schema.set_store_format(OB_STORE_FORMAT_DYNAMIC_MYSQL); + table_schema.set_progressive_merge_round(1); + table_schema.set_storage_format_version(3); + table_schema.set_tablet_id(0); + + table_schema.set_max_used_column_id(column_id); + return ret; +} + int ObInnerTableSchema::dba_mview_logs_ora_schema(ObTableSchema &table_schema) { int ret = OB_SUCCESS; diff --git a/src/share/inner_table/ob_inner_table_schema.501_550.cpp b/src/share/inner_table/ob_inner_table_schema.501_550.cpp index 0ccf20f71..21590a612 100644 --- a/src/share/inner_table/ob_inner_table_schema.501_550.cpp +++ b/src/share/inner_table/ob_inner_table_schema.501_550.cpp @@ -1035,6 +1035,490 @@ int ObInnerTableSchema::all_tenant_snapshot_ls_replica_history_schema(ObTableSch return ret; } +int ObInnerTableSchema::all_ls_replica_task_history_schema(ObTableSchema &table_schema) +{ + int ret = OB_SUCCESS; + uint64_t column_id = OB_APP_MIN_COLUMN_ID - 1; + + //generated fields: + table_schema.set_tenant_id(OB_SYS_TENANT_ID); + table_schema.set_tablegroup_id(OB_SYS_TABLEGROUP_ID); + table_schema.set_database_id(OB_SYS_DATABASE_ID); + table_schema.set_table_id(OB_ALL_LS_REPLICA_TASK_HISTORY_TID); + table_schema.set_rowkey_split_pos(0); + table_schema.set_is_use_bloomfilter(false); + table_schema.set_progressive_merge_num(0); + table_schema.set_rowkey_column_num(4); + table_schema.set_load_type(TABLE_LOAD_TYPE_IN_DISK); + table_schema.set_table_type(SYSTEM_TABLE); + table_schema.set_index_type(INDEX_TYPE_IS_NOT); + table_schema.set_def_type(TABLE_DEF_TYPE_INTERNAL); + + if (OB_SUCC(ret)) { + if (OB_FAIL(table_schema.set_table_name(OB_ALL_LS_REPLICA_TASK_HISTORY_TNAME))) { + LOG_ERROR("fail to set table_name", K(ret)); + } + } + + if (OB_SUCC(ret)) { + if (OB_FAIL(table_schema.set_compress_func_name(OB_DEFAULT_COMPRESS_FUNC_NAME))) { + LOG_ERROR("fail to set compress_func_name", K(ret)); + } + } + table_schema.set_part_level(PARTITION_LEVEL_ZERO); + table_schema.set_charset_type(ObCharset::get_default_charset()); + table_schema.set_collation_type(ObCharset::get_default_collation(ObCharset::get_default_charset())); + + if (OB_SUCC(ret)) { + ObObj gmt_create_default; + ObObj gmt_create_default_null; + + gmt_create_default.set_ext(ObActionFlag::OP_DEFAULT_NOW_FLAG); + gmt_create_default_null.set_null(); + ADD_COLUMN_SCHEMA_TS_T("gmt_create", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObTimestampType, //column_type + CS_TYPE_BINARY,//collation_type + 0, //column length + -1, //column_precision + 6, //column_scale + true,//is nullable + false, //is_autoincrement + false, //is_on_update_for_timestamp + gmt_create_default_null, + gmt_create_default) + } + + if (OB_SUCC(ret)) { + ObObj gmt_modified_default; + ObObj gmt_modified_default_null; + + gmt_modified_default.set_ext(ObActionFlag::OP_DEFAULT_NOW_FLAG); + gmt_modified_default_null.set_null(); + ADD_COLUMN_SCHEMA_TS_T("gmt_modified", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObTimestampType, //column_type + CS_TYPE_BINARY,//collation_type + 0, //column length + -1, //column_precision + 6, //column_scale + true,//is nullable + false, //is_autoincrement + true, //is_on_update_for_timestamp + gmt_modified_default_null, + gmt_modified_default) + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("tenant_id", //column_name + ++column_id, //column_id + 1, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObIntType, //column_type + CS_TYPE_INVALID, //column_collation_type + sizeof(int64_t), //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("ls_id", //column_name + ++column_id, //column_id + 2, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObIntType, //column_type + CS_TYPE_INVALID, //column_collation_type + sizeof(int64_t), //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("task_type", //column_name + ++column_id, //column_id + 3, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_INVALID, //column_collation_type + MAX_DISASTER_RECOVERY_TASK_TYPE_LENGTH, //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("task_id", //column_name + ++column_id, //column_id + 4, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_INVALID, //column_collation_type + OB_TRACE_STAT_BUFFER_SIZE, //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("task_status", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_INVALID, //column_collation_type + MAX_COLUMN_COMMENT_LENGTH, //column_length + -1, //column_precision + -1, //column_scale + true, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ObObj priority_default; + priority_default.set_int(1); + ADD_COLUMN_SCHEMA_T("priority", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObIntType, //column_type + CS_TYPE_INVALID, //column_collation_type + sizeof(int64_t), //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false, //is_autoincrement + priority_default, + priority_default); //default_value + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("target_replica_svr_ip", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_INVALID, //column_collation_type + MAX_IP_ADDR_LENGTH, //column_length + -1, //column_precision + -1, //column_scale + true, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("target_replica_svr_port", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObIntType, //column_type + CS_TYPE_INVALID, //column_collation_type + sizeof(int64_t), //column_length + -1, //column_precision + -1, //column_scale + true, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("target_paxos_replica_number", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObIntType, //column_type + CS_TYPE_INVALID, //column_collation_type + sizeof(int64_t), //column_length + -1, //column_precision + -1, //column_scale + true, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("target_replica_type", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_INVALID, //column_collation_type + MAX_REPLICA_TYPE_LENGTH, //column_length + -1, //column_precision + -1, //column_scale + true, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("source_replica_svr_ip", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_INVALID, //column_collation_type + MAX_IP_ADDR_LENGTH, //column_length + -1, //column_precision + -1, //column_scale + true, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("source_replica_svr_port", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObIntType, //column_type + CS_TYPE_INVALID, //column_collation_type + sizeof(int64_t), //column_length + -1, //column_precision + -1, //column_scale + true, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("source_paxos_replica_number", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObIntType, //column_type + CS_TYPE_INVALID, //column_collation_type + sizeof(int64_t), //column_length + -1, //column_precision + -1, //column_scale + true, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("source_replica_type", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_INVALID, //column_collation_type + MAX_REPLICA_TYPE_LENGTH, //column_length + -1, //column_precision + -1, //column_scale + true, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("data_source_svr_ip", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_INVALID, //column_collation_type + MAX_IP_ADDR_LENGTH, //column_length + -1, //column_precision + -1, //column_scale + true, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("data_source_svr_port", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObIntType, //column_type + CS_TYPE_INVALID, //column_collation_type + sizeof(int64_t), //column_length + -1, //column_precision + -1, //column_scale + true, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ObObj is_manual_default; + is_manual_default.set_tinyint(0); + ADD_COLUMN_SCHEMA_T("is_manual", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObTinyIntType, //column_type + CS_TYPE_INVALID, //column_collation_type + 1, //column_length + -1, //column_precision + -1, //column_scale + true, //is_nullable + false, //is_autoincrement + is_manual_default, + is_manual_default); //default_value + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("task_exec_svr_ip", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_INVALID, //column_collation_type + MAX_IP_ADDR_LENGTH, //column_length + -1, //column_precision + -1, //column_scale + true, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("task_exec_svr_port", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObIntType, //column_type + CS_TYPE_INVALID, //column_collation_type + sizeof(int64_t), //column_length + -1, //column_precision + -1, //column_scale + true, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ObObj gmt_default; + ObObj gmt_default_null; + + gmt_default.set_ext(ObActionFlag::OP_DEFAULT_NOW_FLAG); + gmt_default_null.set_null(); + ADD_COLUMN_SCHEMA_TS_T("generate_time", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObTimestampType, //column_type + CS_TYPE_INVALID, //column_collation_type + 0, //column_length + -1, //column_precision + 6, //column_scale + false, //is_nullable + false, //is_autoincrement + false, //is_on_update_for_timestamp + gmt_default_null, + gmt_default) + } + + if (OB_SUCC(ret)) { + ObObj gmt_default; + ObObj gmt_default_null; + + gmt_default.set_ext(ObActionFlag::OP_DEFAULT_NOW_FLAG); + gmt_default_null.set_null(); + ADD_COLUMN_SCHEMA_TS_T("schedule_time", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObTimestampType, //column_type + CS_TYPE_INVALID, //column_collation_type + 0, //column_length + -1, //column_precision + 6, //column_scale + false, //is_nullable + false, //is_autoincrement + false, //is_on_update_for_timestamp + gmt_default_null, + gmt_default) + } + + if (OB_SUCC(ret)) { + ObObj gmt_default; + ObObj gmt_default_null; + + gmt_default.set_ext(ObActionFlag::OP_DEFAULT_NOW_FLAG); + gmt_default_null.set_null(); + ADD_COLUMN_SCHEMA_TS_T("finish_time", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObTimestampType, //column_type + CS_TYPE_INVALID, //column_collation_type + 0, //column_length + -1, //column_precision + 6, //column_scale + false, //is_nullable + false, //is_autoincrement + false, //is_on_update_for_timestamp + gmt_default_null, + gmt_default) + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("execute_result", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_INVALID, //column_collation_type + MAX_COLUMN_COMMENT_LENGTH, //column_length + -1, //column_precision + -1, //column_scale + true, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("comment", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_INVALID, //column_collation_type + MAX_COLUMN_COMMENT_LENGTH, //column_length + -1, //column_precision + -1, //column_scale + true, //is_nullable + false); //is_autoincrement + } + table_schema.set_index_using_type(USING_BTREE); + table_schema.set_row_store_type(ENCODING_ROW_STORE); + table_schema.set_store_format(OB_STORE_FORMAT_DYNAMIC_MYSQL); + table_schema.set_progressive_merge_round(1); + table_schema.set_storage_format_version(3); + table_schema.set_tablet_id(OB_ALL_LS_REPLICA_TASK_HISTORY_TID); + table_schema.set_aux_lob_meta_tid(OB_ALL_LS_REPLICA_TASK_HISTORY_AUX_LOB_META_TID); + table_schema.set_aux_lob_piece_tid(OB_ALL_LS_REPLICA_TASK_HISTORY_AUX_LOB_PIECE_TID); + + table_schema.set_max_used_column_id(column_id); + return ret; +} + int ObInnerTableSchema::all_user_proxy_info_schema(ObTableSchema &table_schema) { int ret = OB_SUCCESS; diff --git a/src/share/inner_table/ob_inner_table_schema.50501_50550.cpp b/src/share/inner_table/ob_inner_table_schema.50501_50550.cpp index 416ada242..d60eb1b9a 100644 --- a/src/share/inner_table/ob_inner_table_schema.50501_50550.cpp +++ b/src/share/inner_table/ob_inner_table_schema.50501_50550.cpp @@ -700,6 +700,141 @@ int ObInnerTableSchema::all_tenant_snapshot_ls_replica_history_aux_lob_meta_sche return ret; } +int ObInnerTableSchema::all_ls_replica_task_history_aux_lob_meta_schema(ObTableSchema &table_schema) +{ + int ret = OB_SUCCESS; + uint64_t column_id = OB_APP_MIN_COLUMN_ID - 1; + + //generated fields: + table_schema.set_tenant_id(OB_SYS_TENANT_ID); + table_schema.set_tablegroup_id(OB_SYS_TABLEGROUP_ID); + table_schema.set_database_id(OB_SYS_DATABASE_ID); + table_schema.set_table_id(OB_ALL_LS_REPLICA_TASK_HISTORY_AUX_LOB_META_TID); + table_schema.set_rowkey_split_pos(0); + table_schema.set_is_use_bloomfilter(false); + table_schema.set_progressive_merge_num(0); + table_schema.set_rowkey_column_num(2); + table_schema.set_load_type(TABLE_LOAD_TYPE_IN_DISK); + table_schema.set_table_type(AUX_LOB_META); + table_schema.set_index_type(INDEX_TYPE_IS_NOT); + table_schema.set_def_type(TABLE_DEF_TYPE_INTERNAL); + + if (OB_SUCC(ret)) { + if (OB_FAIL(table_schema.set_table_name(OB_ALL_LS_REPLICA_TASK_HISTORY_AUX_LOB_META_TNAME))) { + LOG_ERROR("fail to set table_name", K(ret)); + } + } + + if (OB_SUCC(ret)) { + if (OB_FAIL(table_schema.set_compress_func_name(OB_DEFAULT_COMPRESS_FUNC_NAME))) { + LOG_ERROR("fail to set compress_func_name", K(ret)); + } + } + table_schema.set_part_level(PARTITION_LEVEL_ZERO); + table_schema.set_charset_type(ObCharset::get_default_charset()); + table_schema.set_collation_type(ObCharset::get_default_collation(ObCharset::get_default_charset())); + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("lob_id", //column_name + ++column_id, //column_id + 1, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_BINARY, //column_collation_type + 16, //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("seq_id", //column_name + ++column_id, //column_id + 2, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_BINARY, //column_collation_type + 8192, //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("binary_len", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObUInt32Type, //column_type + CS_TYPE_INVALID, //column_collation_type + sizeof(uint32_t), //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("char_len", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObUInt32Type, //column_type + CS_TYPE_INVALID, //column_collation_type + sizeof(uint32_t), //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("piece_id", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObUInt64Type, //column_type + CS_TYPE_INVALID, //column_collation_type + sizeof(uint64_t), //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("lob_data", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_BINARY, //column_collation_type + 262144, //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + table_schema.set_index_using_type(USING_BTREE); + table_schema.set_row_store_type(ENCODING_ROW_STORE); + table_schema.set_store_format(OB_STORE_FORMAT_DYNAMIC_MYSQL); + table_schema.set_progressive_merge_round(1); + table_schema.set_storage_format_version(3); + table_schema.set_tablet_id(OB_ALL_LS_REPLICA_TASK_HISTORY_AUX_LOB_META_TID); + table_schema.set_data_table_id(OB_ALL_LS_REPLICA_TASK_HISTORY_TID); + + table_schema.set_max_used_column_id(column_id); + return ret; +} + int ObInnerTableSchema::all_user_proxy_info_aux_lob_meta_schema(ObTableSchema &table_schema) { int ret = OB_SUCCESS; diff --git a/src/share/inner_table/ob_inner_table_schema.60501_60550.cpp b/src/share/inner_table/ob_inner_table_schema.60501_60550.cpp index 5e2488a05..04d44f0e6 100644 --- a/src/share/inner_table/ob_inner_table_schema.60501_60550.cpp +++ b/src/share/inner_table/ob_inner_table_schema.60501_60550.cpp @@ -475,6 +475,96 @@ int ObInnerTableSchema::all_tenant_snapshot_ls_replica_history_aux_lob_piece_sch return ret; } +int ObInnerTableSchema::all_ls_replica_task_history_aux_lob_piece_schema(ObTableSchema &table_schema) +{ + int ret = OB_SUCCESS; + uint64_t column_id = OB_APP_MIN_COLUMN_ID - 1; + + //generated fields: + table_schema.set_tenant_id(OB_SYS_TENANT_ID); + table_schema.set_tablegroup_id(OB_SYS_TABLEGROUP_ID); + table_schema.set_database_id(OB_SYS_DATABASE_ID); + table_schema.set_table_id(OB_ALL_LS_REPLICA_TASK_HISTORY_AUX_LOB_PIECE_TID); + table_schema.set_rowkey_split_pos(0); + table_schema.set_is_use_bloomfilter(false); + table_schema.set_progressive_merge_num(0); + table_schema.set_rowkey_column_num(1); + table_schema.set_load_type(TABLE_LOAD_TYPE_IN_DISK); + table_schema.set_table_type(AUX_LOB_PIECE); + table_schema.set_index_type(INDEX_TYPE_IS_NOT); + table_schema.set_def_type(TABLE_DEF_TYPE_INTERNAL); + + if (OB_SUCC(ret)) { + if (OB_FAIL(table_schema.set_table_name(OB_ALL_LS_REPLICA_TASK_HISTORY_AUX_LOB_PIECE_TNAME))) { + LOG_ERROR("fail to set table_name", K(ret)); + } + } + + if (OB_SUCC(ret)) { + if (OB_FAIL(table_schema.set_compress_func_name(OB_DEFAULT_COMPRESS_FUNC_NAME))) { + LOG_ERROR("fail to set compress_func_name", K(ret)); + } + } + table_schema.set_part_level(PARTITION_LEVEL_ZERO); + table_schema.set_charset_type(ObCharset::get_default_charset()); + table_schema.set_collation_type(ObCharset::get_default_collation(ObCharset::get_default_charset())); + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("piece_id", //column_name + ++column_id, //column_id + 1, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObUInt64Type, //column_type + CS_TYPE_INVALID, //column_collation_type + sizeof(uint64_t), //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("data_len", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObUInt32Type, //column_type + CS_TYPE_INVALID, //column_collation_type + sizeof(uint32_t), //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + + if (OB_SUCC(ret)) { + ADD_COLUMN_SCHEMA("lob_data", //column_name + ++column_id, //column_id + 0, //rowkey_id + 0, //index_id + 0, //part_key_pos + ObVarcharType, //column_type + CS_TYPE_BINARY, //column_collation_type + 32, //column_length + -1, //column_precision + -1, //column_scale + false, //is_nullable + false); //is_autoincrement + } + table_schema.set_index_using_type(USING_BTREE); + table_schema.set_row_store_type(ENCODING_ROW_STORE); + table_schema.set_store_format(OB_STORE_FORMAT_DYNAMIC_MYSQL); + table_schema.set_progressive_merge_round(1); + table_schema.set_storage_format_version(3); + table_schema.set_tablet_id(OB_ALL_LS_REPLICA_TASK_HISTORY_AUX_LOB_PIECE_TID); + table_schema.set_data_table_id(OB_ALL_LS_REPLICA_TASK_HISTORY_TID); + + table_schema.set_max_used_column_id(column_id); + return ret; +} + int ObInnerTableSchema::all_user_proxy_info_aux_lob_piece_schema(ObTableSchema &table_schema) { int ret = OB_SUCCESS; diff --git a/src/share/inner_table/ob_inner_table_schema.h b/src/share/inner_table/ob_inner_table_schema.h index 2b112a4bc..16492e107 100644 --- a/src/share/inner_table/ob_inner_table_schema.h +++ b/src/share/inner_table/ob_inner_table_schema.h @@ -621,6 +621,7 @@ public: static int all_column_privilege_schema(share::schema::ObTableSchema &table_schema); static int all_column_privilege_history_schema(share::schema::ObTableSchema &table_schema); static int all_tenant_snapshot_ls_replica_history_schema(share::schema::ObTableSchema &table_schema); + static int all_ls_replica_task_history_schema(share::schema::ObTableSchema &table_schema); static int all_user_proxy_info_schema(share::schema::ObTableSchema &table_schema); static int all_user_proxy_info_history_schema(share::schema::ObTableSchema &table_schema); static int all_user_proxy_role_info_schema(share::schema::ObTableSchema &table_schema); @@ -1070,6 +1071,7 @@ public: static int all_virtual_column_privilege_history_schema(share::schema::ObTableSchema &table_schema); static int all_virtual_tenant_snapshot_ls_replica_history_schema(share::schema::ObTableSchema &table_schema); static int enabled_roles_schema(share::schema::ObTableSchema &table_schema); + static int all_virtual_ls_replica_task_history_schema(share::schema::ObTableSchema &table_schema); static int all_virtual_session_ps_info_schema(share::schema::ObTableSchema &table_schema); static int all_virtual_tracepoint_info_schema(share::schema::ObTableSchema &table_schema); static int all_virtual_compatibility_control_schema(share::schema::ObTableSchema &table_schema); @@ -1347,6 +1349,7 @@ public: static int all_virtual_transfer_partition_task_history_real_agent_ora_schema(share::schema::ObTableSchema &table_schema); static int all_virtual_ls_snapshot_ora_schema(share::schema::ObTableSchema &table_schema); static int all_virtual_index_usage_info_real_agent_ora_schema(share::schema::ObTableSchema &table_schema); + static int all_virtual_ls_replica_task_history_ora_schema(share::schema::ObTableSchema &table_schema); static int all_virtual_session_ps_info_ora_schema(share::schema::ObTableSchema &table_schema); static int all_virtual_tracepoint_info_ora_schema(share::schema::ObTableSchema &table_schema); static int all_virtual_user_proxy_info_real_agent_ora_schema(share::schema::ObTableSchema &table_schema); @@ -1752,6 +1755,8 @@ public: static int gv_ob_ls_snapshots_schema(share::schema::ObTableSchema &table_schema); static int v_ob_ls_snapshots_schema(share::schema::ObTableSchema &table_schema); static int dba_ob_clone_history_schema(share::schema::ObTableSchema &table_schema); + static int dba_ob_ls_replica_task_history_schema(share::schema::ObTableSchema &table_schema); + static int cdb_ob_ls_replica_task_history_schema(share::schema::ObTableSchema &table_schema); static int cdb_mview_logs_schema(share::schema::ObTableSchema &table_schema); static int dba_mview_logs_schema(share::schema::ObTableSchema &table_schema); static int cdb_mviews_schema(share::schema::ObTableSchema &table_schema); @@ -2060,6 +2065,7 @@ public: static int dba_ob_transfer_partition_tasks_ora_schema(share::schema::ObTableSchema &table_schema); static int dba_ob_transfer_partition_task_history_ora_schema(share::schema::ObTableSchema &table_schema); static int user_users_schema(share::schema::ObTableSchema &table_schema); + static int dba_ob_ls_replica_task_history_ora_schema(share::schema::ObTableSchema &table_schema); static int dba_mview_logs_ora_schema(share::schema::ObTableSchema &table_schema); static int all_mview_logs_ora_schema(share::schema::ObTableSchema &table_schema); static int user_mview_logs_ora_schema(share::schema::ObTableSchema &table_schema); @@ -2561,6 +2567,7 @@ public: static int all_column_privilege_aux_lob_meta_schema(share::schema::ObTableSchema &table_schema); static int all_column_privilege_history_aux_lob_meta_schema(share::schema::ObTableSchema &table_schema); static int all_tenant_snapshot_ls_replica_history_aux_lob_meta_schema(share::schema::ObTableSchema &table_schema); + static int all_ls_replica_task_history_aux_lob_meta_schema(share::schema::ObTableSchema &table_schema); static int all_user_proxy_info_aux_lob_meta_schema(share::schema::ObTableSchema &table_schema); static int all_user_proxy_info_history_aux_lob_meta_schema(share::schema::ObTableSchema &table_schema); static int all_user_proxy_role_info_aux_lob_meta_schema(share::schema::ObTableSchema &table_schema); @@ -2860,6 +2867,7 @@ public: static int all_column_privilege_aux_lob_piece_schema(share::schema::ObTableSchema &table_schema); static int all_column_privilege_history_aux_lob_piece_schema(share::schema::ObTableSchema &table_schema); static int all_tenant_snapshot_ls_replica_history_aux_lob_piece_schema(share::schema::ObTableSchema &table_schema); + static int all_ls_replica_task_history_aux_lob_piece_schema(share::schema::ObTableSchema &table_schema); static int all_user_proxy_info_aux_lob_piece_schema(share::schema::ObTableSchema &table_schema); static int all_user_proxy_info_history_aux_lob_piece_schema(share::schema::ObTableSchema &table_schema); static int all_user_proxy_role_info_aux_lob_piece_schema(share::schema::ObTableSchema &table_schema); @@ -3378,6 +3386,7 @@ const schema_create_func sys_table_schema_creators [] = { ObInnerTableSchema::all_column_privilege_schema, ObInnerTableSchema::all_column_privilege_history_schema, ObInnerTableSchema::all_tenant_snapshot_ls_replica_history_schema, + ObInnerTableSchema::all_ls_replica_task_history_schema, ObInnerTableSchema::all_user_proxy_info_schema, ObInnerTableSchema::all_user_proxy_info_history_schema, ObInnerTableSchema::all_user_proxy_role_info_schema, @@ -3830,6 +3839,7 @@ const schema_create_func virtual_table_schema_creators [] = { ObInnerTableSchema::all_virtual_column_privilege_history_schema, ObInnerTableSchema::all_virtual_tenant_snapshot_ls_replica_history_schema, ObInnerTableSchema::enabled_roles_schema, + ObInnerTableSchema::all_virtual_ls_replica_task_history_schema, ObInnerTableSchema::all_virtual_session_ps_info_schema, ObInnerTableSchema::all_virtual_tracepoint_info_schema, ObInnerTableSchema::all_virtual_compatibility_control_schema, @@ -4117,6 +4127,7 @@ const schema_create_func virtual_table_schema_creators [] = { ObInnerTableSchema::all_virtual_transfer_partition_task_history_real_agent_ora_schema, ObInnerTableSchema::all_virtual_ls_snapshot_ora_schema, ObInnerTableSchema::all_virtual_index_usage_info_real_agent_ora_schema, + ObInnerTableSchema::all_virtual_ls_replica_task_history_ora_schema, ObInnerTableSchema::all_virtual_session_ps_info_ora_schema, ObInnerTableSchema::all_virtual_tracepoint_info_ora_schema, ObInnerTableSchema::all_virtual_user_proxy_info_real_agent_ora_schema, @@ -4609,6 +4620,8 @@ const schema_create_func sys_view_schema_creators [] = { ObInnerTableSchema::gv_ob_ls_snapshots_schema, ObInnerTableSchema::v_ob_ls_snapshots_schema, ObInnerTableSchema::dba_ob_clone_history_schema, + ObInnerTableSchema::dba_ob_ls_replica_task_history_schema, + ObInnerTableSchema::cdb_ob_ls_replica_task_history_schema, ObInnerTableSchema::cdb_mview_logs_schema, ObInnerTableSchema::dba_mview_logs_schema, ObInnerTableSchema::cdb_mviews_schema, @@ -4917,6 +4930,7 @@ const schema_create_func sys_view_schema_creators [] = { ObInnerTableSchema::dba_ob_transfer_partition_tasks_ora_schema, ObInnerTableSchema::dba_ob_transfer_partition_task_history_ora_schema, ObInnerTableSchema::user_users_schema, + ObInnerTableSchema::dba_ob_ls_replica_task_history_ora_schema, ObInnerTableSchema::dba_mview_logs_ora_schema, ObInnerTableSchema::all_mview_logs_ora_schema, ObInnerTableSchema::user_mview_logs_ora_schema, @@ -5520,6 +5534,7 @@ const uint64_t tenant_space_tables [] = { OB_ALL_COLUMN_PRIVILEGE_TID, OB_ALL_COLUMN_PRIVILEGE_HISTORY_TID, OB_ALL_TENANT_SNAPSHOT_LS_REPLICA_HISTORY_TID, + OB_ALL_LS_REPLICA_TASK_HISTORY_TID, OB_ALL_USER_PROXY_INFO_TID, OB_ALL_USER_PROXY_INFO_HISTORY_TID, OB_ALL_USER_PROXY_ROLE_INFO_TID, @@ -5753,6 +5768,7 @@ const uint64_t tenant_space_tables [] = { OB_ALL_VIRTUAL_LS_SNAPSHOT_TID, OB_ALL_VIRTUAL_TENANT_SNAPSHOT_LS_REPLICA_HISTORY_TID, OB_ENABLED_ROLES_TID, + OB_ALL_VIRTUAL_LS_REPLICA_TASK_HISTORY_TID, OB_ALL_VIRTUAL_SESSION_PS_INFO_TID, OB_ALL_VIRTUAL_TRACEPOINT_INFO_TID, OB_ALL_VIRTUAL_COMPATIBILITY_CONTROL_TID, @@ -6033,6 +6049,7 @@ const uint64_t tenant_space_tables [] = { OB_ALL_VIRTUAL_TRANSFER_PARTITION_TASK_HISTORY_REAL_AGENT_ORA_TID, OB_ALL_VIRTUAL_LS_SNAPSHOT_ORA_TID, OB_ALL_VIRTUAL_INDEX_USAGE_INFO_REAL_AGENT_ORA_TID, + OB_ALL_VIRTUAL_LS_REPLICA_TASK_HISTORY_ORA_TID, OB_ALL_VIRTUAL_SESSION_PS_INFO_ORA_TID, OB_ALL_VIRTUAL_TRACEPOINT_INFO_ORA_TID, OB_ALL_VIRTUAL_USER_PROXY_INFO_REAL_AGENT_ORA_TID, @@ -6327,6 +6344,7 @@ const uint64_t tenant_space_tables [] = { OB_COLUMNS_PRIV_TID, OB_GV_OB_LS_SNAPSHOTS_TID, OB_V_OB_LS_SNAPSHOTS_TID, + OB_DBA_OB_LS_REPLICA_TASK_HISTORY_TID, OB_DBA_MVIEW_LOGS_TID, OB_DBA_MVIEWS_TID, OB_DBA_MVREF_STATS_SYS_DEFAULTS_TID, @@ -6626,6 +6644,7 @@ const uint64_t tenant_space_tables [] = { OB_DBA_OB_TRANSFER_PARTITION_TASKS_ORA_TID, OB_DBA_OB_TRANSFER_PARTITION_TASK_HISTORY_ORA_TID, OB_USER_USERS_TID, + OB_DBA_OB_LS_REPLICA_TASK_HISTORY_ORA_TID, OB_DBA_MVIEW_LOGS_ORA_TID, OB_ALL_MVIEW_LOGS_ORA_TID, OB_USER_MVIEW_LOGS_ORA_TID, @@ -7282,6 +7301,7 @@ const uint64_t tenant_space_tables [] = { OB_ALL_COLUMN_PRIVILEGE_AUX_LOB_META_TID, OB_ALL_COLUMN_PRIVILEGE_HISTORY_AUX_LOB_META_TID, OB_ALL_TENANT_SNAPSHOT_LS_REPLICA_HISTORY_AUX_LOB_META_TID, + OB_ALL_LS_REPLICA_TASK_HISTORY_AUX_LOB_META_TID, OB_ALL_USER_PROXY_INFO_AUX_LOB_META_TID, OB_ALL_USER_PROXY_INFO_HISTORY_AUX_LOB_META_TID, OB_ALL_USER_PROXY_ROLE_INFO_AUX_LOB_META_TID, @@ -7557,6 +7577,7 @@ const uint64_t tenant_space_tables [] = { OB_ALL_COLUMN_PRIVILEGE_AUX_LOB_PIECE_TID, OB_ALL_COLUMN_PRIVILEGE_HISTORY_AUX_LOB_PIECE_TID, OB_ALL_TENANT_SNAPSHOT_LS_REPLICA_HISTORY_AUX_LOB_PIECE_TID, + OB_ALL_LS_REPLICA_TASK_HISTORY_AUX_LOB_PIECE_TID, OB_ALL_USER_PROXY_INFO_AUX_LOB_PIECE_TID, OB_ALL_USER_PROXY_INFO_HISTORY_AUX_LOB_PIECE_TID, OB_ALL_USER_PROXY_ROLE_INFO_AUX_LOB_PIECE_TID, @@ -7706,6 +7727,7 @@ const uint64_t all_ora_mapping_virtual_table_org_tables [] = { OB_ALL_VIRTUAL_CGROUP_CONFIG_TID, OB_ALL_VIRTUAL_SYS_VARIABLE_DEFAULT_VALUE_TID, OB_ALL_VIRTUAL_LS_SNAPSHOT_TID, + OB_ALL_VIRTUAL_LS_REPLICA_TASK_HISTORY_TID, OB_ALL_VIRTUAL_SESSION_PS_INFO_TID, OB_ALL_VIRTUAL_TRACEPOINT_INFO_TID, OB_ALL_VIRTUAL_TENANT_RESOURCE_LIMIT_TID, @@ -7853,6 +7875,7 @@ const uint64_t all_ora_mapping_virtual_tables [] = { OB_ALL_VIRTUAL_SQL_AUDIT_O , OB_ALL_VIRTUAL_CGROUP_CONFIG_ORA_TID , OB_ALL_VIRTUAL_SYS_VARIABLE_DEFAULT_VALUE_ORA_TID , OB_ALL_VIRTUAL_LS_SNAPSHOT_ORA_TID +, OB_ALL_VIRTUAL_LS_REPLICA_TASK_HISTORY_ORA_TID , OB_ALL_VIRTUAL_SESSION_PS_INFO_ORA_TID , OB_ALL_VIRTUAL_TRACEPOINT_INFO_ORA_TID , OB_ALL_VIRTUAL_TENANT_RESOURCE_LIMIT_ORA_TID @@ -8142,6 +8165,7 @@ const char* const tenant_space_table_names [] = { OB_ALL_COLUMN_PRIVILEGE_TNAME, OB_ALL_COLUMN_PRIVILEGE_HISTORY_TNAME, OB_ALL_TENANT_SNAPSHOT_LS_REPLICA_HISTORY_TNAME, + OB_ALL_LS_REPLICA_TASK_HISTORY_TNAME, OB_ALL_USER_PROXY_INFO_TNAME, OB_ALL_USER_PROXY_INFO_HISTORY_TNAME, OB_ALL_USER_PROXY_ROLE_INFO_TNAME, @@ -8375,6 +8399,7 @@ const char* const tenant_space_table_names [] = { OB_ALL_VIRTUAL_LS_SNAPSHOT_TNAME, OB_ALL_VIRTUAL_TENANT_SNAPSHOT_LS_REPLICA_HISTORY_TNAME, OB_ENABLED_ROLES_TNAME, + OB_ALL_VIRTUAL_LS_REPLICA_TASK_HISTORY_TNAME, OB_ALL_VIRTUAL_SESSION_PS_INFO_TNAME, OB_ALL_VIRTUAL_TRACEPOINT_INFO_TNAME, OB_ALL_VIRTUAL_COMPATIBILITY_CONTROL_TNAME, @@ -8655,6 +8680,7 @@ const char* const tenant_space_table_names [] = { OB_ALL_VIRTUAL_TRANSFER_PARTITION_TASK_HISTORY_REAL_AGENT_ORA_TNAME, OB_ALL_VIRTUAL_LS_SNAPSHOT_ORA_TNAME, OB_ALL_VIRTUAL_INDEX_USAGE_INFO_REAL_AGENT_ORA_TNAME, + OB_ALL_VIRTUAL_LS_REPLICA_TASK_HISTORY_ORA_TNAME, OB_ALL_VIRTUAL_SESSION_PS_INFO_ORA_TNAME, OB_ALL_VIRTUAL_TRACEPOINT_INFO_ORA_TNAME, OB_ALL_VIRTUAL_USER_PROXY_INFO_REAL_AGENT_ORA_TNAME, @@ -8949,6 +8975,7 @@ const char* const tenant_space_table_names [] = { OB_COLUMNS_PRIV_TNAME, OB_GV_OB_LS_SNAPSHOTS_TNAME, OB_V_OB_LS_SNAPSHOTS_TNAME, + OB_DBA_OB_LS_REPLICA_TASK_HISTORY_TNAME, OB_DBA_MVIEW_LOGS_TNAME, OB_DBA_MVIEWS_TNAME, OB_DBA_MVREF_STATS_SYS_DEFAULTS_TNAME, @@ -9248,6 +9275,7 @@ const char* const tenant_space_table_names [] = { OB_DBA_OB_TRANSFER_PARTITION_TASKS_ORA_TNAME, OB_DBA_OB_TRANSFER_PARTITION_TASK_HISTORY_ORA_TNAME, OB_USER_USERS_TNAME, + OB_DBA_OB_LS_REPLICA_TASK_HISTORY_ORA_TNAME, OB_DBA_MVIEW_LOGS_ORA_TNAME, OB_ALL_MVIEW_LOGS_ORA_TNAME, OB_USER_MVIEW_LOGS_ORA_TNAME, @@ -9904,6 +9932,7 @@ const char* const tenant_space_table_names [] = { OB_ALL_COLUMN_PRIVILEGE_AUX_LOB_META_TNAME, OB_ALL_COLUMN_PRIVILEGE_HISTORY_AUX_LOB_META_TNAME, OB_ALL_TENANT_SNAPSHOT_LS_REPLICA_HISTORY_AUX_LOB_META_TNAME, + OB_ALL_LS_REPLICA_TASK_HISTORY_AUX_LOB_META_TNAME, OB_ALL_USER_PROXY_INFO_AUX_LOB_META_TNAME, OB_ALL_USER_PROXY_INFO_HISTORY_AUX_LOB_META_TNAME, OB_ALL_USER_PROXY_ROLE_INFO_AUX_LOB_META_TNAME, @@ -10179,6 +10208,7 @@ const char* const tenant_space_table_names [] = { OB_ALL_COLUMN_PRIVILEGE_AUX_LOB_PIECE_TNAME, OB_ALL_COLUMN_PRIVILEGE_HISTORY_AUX_LOB_PIECE_TNAME, OB_ALL_TENANT_SNAPSHOT_LS_REPLICA_HISTORY_AUX_LOB_PIECE_TNAME, + OB_ALL_LS_REPLICA_TASK_HISTORY_AUX_LOB_PIECE_TNAME, OB_ALL_USER_PROXY_INFO_AUX_LOB_PIECE_TNAME, OB_ALL_USER_PROXY_INFO_HISTORY_AUX_LOB_PIECE_TNAME, OB_ALL_USER_PROXY_ROLE_INFO_AUX_LOB_PIECE_TNAME, @@ -10542,6 +10572,7 @@ const uint64_t restrict_access_virtual_tables[] = { OB_ALL_VIRTUAL_TRANSFER_PARTITION_TASK_HISTORY_REAL_AGENT_ORA_TID, OB_ALL_VIRTUAL_LS_SNAPSHOT_ORA_TID, OB_ALL_VIRTUAL_INDEX_USAGE_INFO_REAL_AGENT_ORA_TID, + OB_ALL_VIRTUAL_LS_REPLICA_TASK_HISTORY_ORA_TID, OB_ALL_VIRTUAL_SESSION_PS_INFO_ORA_TID, OB_ALL_VIRTUAL_TRACEPOINT_INFO_ORA_TID, OB_ALL_VIRTUAL_USER_PROXY_INFO_REAL_AGENT_ORA_TID, @@ -13040,6 +13071,14 @@ LOBMapping const lob_aux_table_mappings [] = { ObInnerTableSchema::all_tenant_snapshot_ls_replica_history_aux_lob_piece_schema }, + { + OB_ALL_LS_REPLICA_TASK_HISTORY_TID, + OB_ALL_LS_REPLICA_TASK_HISTORY_AUX_LOB_META_TID, + OB_ALL_LS_REPLICA_TASK_HISTORY_AUX_LOB_PIECE_TID, + ObInnerTableSchema::all_ls_replica_task_history_aux_lob_meta_schema, + ObInnerTableSchema::all_ls_replica_task_history_aux_lob_piece_schema + }, + { OB_ALL_USER_PROXY_INFO_TID, OB_ALL_USER_PROXY_INFO_AUX_LOB_META_TID, @@ -13125,12 +13164,12 @@ static inline int get_sys_table_lob_aux_schema(const uint64_t tid, } const int64_t OB_CORE_TABLE_COUNT = 4; -const int64_t OB_SYS_TABLE_COUNT = 296; -const int64_t OB_VIRTUAL_TABLE_COUNT = 823; -const int64_t OB_SYS_VIEW_COUNT = 912; -const int64_t OB_SYS_TENANT_TABLE_COUNT = 2036; +const int64_t OB_SYS_TABLE_COUNT = 297; +const int64_t OB_VIRTUAL_TABLE_COUNT = 825; +const int64_t OB_SYS_VIEW_COUNT = 915; +const int64_t OB_SYS_TENANT_TABLE_COUNT = 2042; const int64_t OB_CORE_SCHEMA_VERSION = 1; -const int64_t OB_BOOTSTRAP_SCHEMA_VERSION = 2039; +const int64_t OB_BOOTSTRAP_SCHEMA_VERSION = 2045; } // end namespace share } // end namespace oceanbase diff --git a/src/share/inner_table/ob_inner_table_schema.lob.cpp b/src/share/inner_table/ob_inner_table_schema.lob.cpp index 95a758380..9d142bcc0 100644 --- a/src/share/inner_table/ob_inner_table_schema.lob.cpp +++ b/src/share/inner_table/ob_inner_table_schema.lob.cpp @@ -21,7 +21,7 @@ inner_lob_map_t inner_lob_map; bool lob_mapping_init() { int ret = OB_SUCCESS; - if (OB_FAIL(inner_lob_map.create(299, ObModIds::OB_INNER_LOB_HASH_SET))) { + if (OB_FAIL(inner_lob_map.create(300, ObModIds::OB_INNER_LOB_HASH_SET))) { SERVER_LOG(WARN, "fail to create inner lob map", K(ret)); } else { for (int64_t i = 0; OB_SUCC(ret) && i < ARRAYSIZEOF(lob_aux_table_mappings); ++i) { diff --git a/src/share/inner_table/ob_inner_table_schema_constants.h b/src/share/inner_table/ob_inner_table_schema_constants.h index 41cbb5947..06953678d 100644 --- a/src/share/inner_table/ob_inner_table_schema_constants.h +++ b/src/share/inner_table/ob_inner_table_schema_constants.h @@ -321,6 +321,7 @@ const uint64_t OB_ALL_TRUSTED_ROOT_CERTIFICATE_TID = 502; // "__all_trusted_root const uint64_t OB_ALL_COLUMN_PRIVILEGE_TID = 505; // "__all_column_privilege" const uint64_t OB_ALL_COLUMN_PRIVILEGE_HISTORY_TID = 506; // "__all_column_privilege_history" const uint64_t OB_ALL_TENANT_SNAPSHOT_LS_REPLICA_HISTORY_TID = 507; // "__all_tenant_snapshot_ls_replica_history" +const uint64_t OB_ALL_LS_REPLICA_TASK_HISTORY_TID = 508; // "__all_ls_replica_task_history" const uint64_t OB_ALL_USER_PROXY_INFO_TID = 512; // "__all_user_proxy_info" const uint64_t OB_ALL_USER_PROXY_INFO_HISTORY_TID = 513; // "__all_user_proxy_info_history" const uint64_t OB_ALL_USER_PROXY_ROLE_INFO_TID = 514; // "__all_user_proxy_role_info" @@ -770,6 +771,7 @@ const uint64_t OB_ALL_VIRTUAL_COLUMN_PRIVILEGE_TID = 12462; // "__all_virtual_co const uint64_t OB_ALL_VIRTUAL_COLUMN_PRIVILEGE_HISTORY_TID = 12463; // "__all_virtual_column_privilege_history" const uint64_t OB_ALL_VIRTUAL_TENANT_SNAPSHOT_LS_REPLICA_HISTORY_TID = 12464; // "__all_virtual_tenant_snapshot_ls_replica_history" const uint64_t OB_ENABLED_ROLES_TID = 12466; // "ENABLED_ROLES" +const uint64_t OB_ALL_VIRTUAL_LS_REPLICA_TASK_HISTORY_TID = 12467; // "__all_virtual_ls_replica_task_history" const uint64_t OB_ALL_VIRTUAL_SESSION_PS_INFO_TID = 12468; // "__all_virtual_session_ps_info" const uint64_t OB_ALL_VIRTUAL_TRACEPOINT_INFO_TID = 12469; // "__all_virtual_tracepoint_info" const uint64_t OB_ALL_VIRTUAL_COMPATIBILITY_CONTROL_TID = 12473; // "__all_virtual_compatibility_control" @@ -1047,6 +1049,7 @@ const uint64_t OB_ALL_VIRTUAL_TRANSFER_PARTITION_TASK_REAL_AGENT_ORA_TID = 15430 const uint64_t OB_ALL_VIRTUAL_TRANSFER_PARTITION_TASK_HISTORY_REAL_AGENT_ORA_TID = 15431; // "ALL_VIRTUAL_TRANSFER_PARTITION_TASK_HISTORY_REAL_AGENT_ORA" const uint64_t OB_ALL_VIRTUAL_LS_SNAPSHOT_ORA_TID = 15439; // "ALL_VIRTUAL_LS_SNAPSHOT_ORA" const uint64_t OB_ALL_VIRTUAL_INDEX_USAGE_INFO_REAL_AGENT_ORA_TID = 15440; // "ALL_VIRTUAL_INDEX_USAGE_INFO_REAL_AGENT_ORA" +const uint64_t OB_ALL_VIRTUAL_LS_REPLICA_TASK_HISTORY_ORA_TID = 15443; // "ALL_VIRTUAL_LS_REPLICA_TASK_HISTORY_ORA" const uint64_t OB_ALL_VIRTUAL_SESSION_PS_INFO_ORA_TID = 15444; // "ALL_VIRTUAL_SESSION_PS_INFO_ORA" const uint64_t OB_ALL_VIRTUAL_TRACEPOINT_INFO_ORA_TID = 15445; // "ALL_VIRTUAL_TRACEPOINT_INFO_ORA" const uint64_t OB_ALL_VIRTUAL_USER_PROXY_INFO_REAL_AGENT_ORA_TID = 15446; // "ALL_VIRTUAL_USER_PROXY_INFO_REAL_AGENT_ORA" @@ -1452,6 +1455,8 @@ const uint64_t OB_COLUMNS_PRIV_TID = 21516; // "columns_priv" const uint64_t OB_GV_OB_LS_SNAPSHOTS_TID = 21517; // "GV$OB_LS_SNAPSHOTS" const uint64_t OB_V_OB_LS_SNAPSHOTS_TID = 21518; // "V$OB_LS_SNAPSHOTS" const uint64_t OB_DBA_OB_CLONE_HISTORY_TID = 21519; // "DBA_OB_CLONE_HISTORY" +const uint64_t OB_DBA_OB_LS_REPLICA_TASK_HISTORY_TID = 21523; // "DBA_OB_LS_REPLICA_TASK_HISTORY" +const uint64_t OB_CDB_OB_LS_REPLICA_TASK_HISTORY_TID = 21524; // "CDB_OB_LS_REPLICA_TASK_HISTORY" const uint64_t OB_CDB_MVIEW_LOGS_TID = 21525; // "CDB_MVIEW_LOGS" const uint64_t OB_DBA_MVIEW_LOGS_TID = 21526; // "DBA_MVIEW_LOGS" const uint64_t OB_CDB_MVIEWS_TID = 21527; // "CDB_MVIEWS" @@ -1760,6 +1765,7 @@ const uint64_t OB_DBA_OB_IMPORT_TABLE_TASK_HISTORY_ORA_TID = 25267; // "DBA_OB_I const uint64_t OB_DBA_OB_TRANSFER_PARTITION_TASKS_ORA_TID = 25275; // "DBA_OB_TRANSFER_PARTITION_TASKS_ORA" const uint64_t OB_DBA_OB_TRANSFER_PARTITION_TASK_HISTORY_ORA_TID = 25276; // "DBA_OB_TRANSFER_PARTITION_TASK_HISTORY_ORA" const uint64_t OB_USER_USERS_TID = 25278; // "USER_USERS" +const uint64_t OB_DBA_OB_LS_REPLICA_TASK_HISTORY_ORA_TID = 25279; // "DBA_OB_LS_REPLICA_TASK_HISTORY_ORA" const uint64_t OB_DBA_MVIEW_LOGS_ORA_TID = 25283; // "DBA_MVIEW_LOGS_ORA" const uint64_t OB_ALL_MVIEW_LOGS_ORA_TID = 25284; // "ALL_MVIEW_LOGS_ORA" const uint64_t OB_USER_MVIEW_LOGS_ORA_TID = 25285; // "USER_MVIEW_LOGS_ORA" @@ -2261,6 +2267,7 @@ const uint64_t OB_ALL_TRUSTED_ROOT_CERTIFICATE_AUX_LOB_META_TID = 50502; // "__a const uint64_t OB_ALL_COLUMN_PRIVILEGE_AUX_LOB_META_TID = 50505; // "__all_column_privilege_aux_lob_meta" const uint64_t OB_ALL_COLUMN_PRIVILEGE_HISTORY_AUX_LOB_META_TID = 50506; // "__all_column_privilege_history_aux_lob_meta" const uint64_t OB_ALL_TENANT_SNAPSHOT_LS_REPLICA_HISTORY_AUX_LOB_META_TID = 50507; // "__all_tenant_snapshot_ls_replica_history_aux_lob_meta" +const uint64_t OB_ALL_LS_REPLICA_TASK_HISTORY_AUX_LOB_META_TID = 50508; // "__all_ls_replica_task_history_aux_lob_meta" const uint64_t OB_ALL_USER_PROXY_INFO_AUX_LOB_META_TID = 50512; // "__all_user_proxy_info_aux_lob_meta" const uint64_t OB_ALL_USER_PROXY_INFO_HISTORY_AUX_LOB_META_TID = 50513; // "__all_user_proxy_info_history_aux_lob_meta" const uint64_t OB_ALL_USER_PROXY_ROLE_INFO_AUX_LOB_META_TID = 50514; // "__all_user_proxy_role_info_aux_lob_meta" @@ -2560,6 +2567,7 @@ const uint64_t OB_ALL_TRUSTED_ROOT_CERTIFICATE_AUX_LOB_PIECE_TID = 60502; // "__ const uint64_t OB_ALL_COLUMN_PRIVILEGE_AUX_LOB_PIECE_TID = 60505; // "__all_column_privilege_aux_lob_piece" const uint64_t OB_ALL_COLUMN_PRIVILEGE_HISTORY_AUX_LOB_PIECE_TID = 60506; // "__all_column_privilege_history_aux_lob_piece" const uint64_t OB_ALL_TENANT_SNAPSHOT_LS_REPLICA_HISTORY_AUX_LOB_PIECE_TID = 60507; // "__all_tenant_snapshot_ls_replica_history_aux_lob_piece" +const uint64_t OB_ALL_LS_REPLICA_TASK_HISTORY_AUX_LOB_PIECE_TID = 60508; // "__all_ls_replica_task_history_aux_lob_piece" const uint64_t OB_ALL_USER_PROXY_INFO_AUX_LOB_PIECE_TID = 60512; // "__all_user_proxy_info_aux_lob_piece" const uint64_t OB_ALL_USER_PROXY_INFO_HISTORY_AUX_LOB_PIECE_TID = 60513; // "__all_user_proxy_info_history_aux_lob_piece" const uint64_t OB_ALL_USER_PROXY_ROLE_INFO_AUX_LOB_PIECE_TID = 60514; // "__all_user_proxy_role_info_aux_lob_piece" @@ -3065,6 +3073,7 @@ const char *const OB_ALL_TRUSTED_ROOT_CERTIFICATE_TNAME = "__all_trusted_root_ce const char *const OB_ALL_COLUMN_PRIVILEGE_TNAME = "__all_column_privilege"; const char *const OB_ALL_COLUMN_PRIVILEGE_HISTORY_TNAME = "__all_column_privilege_history"; const char *const OB_ALL_TENANT_SNAPSHOT_LS_REPLICA_HISTORY_TNAME = "__all_tenant_snapshot_ls_replica_history"; +const char *const OB_ALL_LS_REPLICA_TASK_HISTORY_TNAME = "__all_ls_replica_task_history"; const char *const OB_ALL_USER_PROXY_INFO_TNAME = "__all_user_proxy_info"; const char *const OB_ALL_USER_PROXY_INFO_HISTORY_TNAME = "__all_user_proxy_info_history"; const char *const OB_ALL_USER_PROXY_ROLE_INFO_TNAME = "__all_user_proxy_role_info"; @@ -3514,6 +3523,7 @@ const char *const OB_ALL_VIRTUAL_COLUMN_PRIVILEGE_TNAME = "__all_virtual_column_ const char *const OB_ALL_VIRTUAL_COLUMN_PRIVILEGE_HISTORY_TNAME = "__all_virtual_column_privilege_history"; const char *const OB_ALL_VIRTUAL_TENANT_SNAPSHOT_LS_REPLICA_HISTORY_TNAME = "__all_virtual_tenant_snapshot_ls_replica_history"; const char *const OB_ENABLED_ROLES_TNAME = "ENABLED_ROLES"; +const char *const OB_ALL_VIRTUAL_LS_REPLICA_TASK_HISTORY_TNAME = "__all_virtual_ls_replica_task_history"; const char *const OB_ALL_VIRTUAL_SESSION_PS_INFO_TNAME = "__all_virtual_session_ps_info"; const char *const OB_ALL_VIRTUAL_TRACEPOINT_INFO_TNAME = "__all_virtual_tracepoint_info"; const char *const OB_ALL_VIRTUAL_COMPATIBILITY_CONTROL_TNAME = "__all_virtual_compatibility_control"; @@ -3791,6 +3801,7 @@ const char *const OB_ALL_VIRTUAL_TRANSFER_PARTITION_TASK_REAL_AGENT_ORA_TNAME = const char *const OB_ALL_VIRTUAL_TRANSFER_PARTITION_TASK_HISTORY_REAL_AGENT_ORA_TNAME = "ALL_VIRTUAL_TRANSFER_PARTITION_TASK_HISTORY_REAL_AGENT"; const char *const OB_ALL_VIRTUAL_LS_SNAPSHOT_ORA_TNAME = "ALL_VIRTUAL_LS_SNAPSHOT"; const char *const OB_ALL_VIRTUAL_INDEX_USAGE_INFO_REAL_AGENT_ORA_TNAME = "ALL_VIRTUAL_INDEX_USAGE_INFO_REAL_AGENT"; +const char *const OB_ALL_VIRTUAL_LS_REPLICA_TASK_HISTORY_ORA_TNAME = "ALL_VIRTUAL_LS_REPLICA_TASK_HISTORY"; const char *const OB_ALL_VIRTUAL_SESSION_PS_INFO_ORA_TNAME = "ALL_VIRTUAL_SESSION_PS_INFO"; const char *const OB_ALL_VIRTUAL_TRACEPOINT_INFO_ORA_TNAME = "ALL_VIRTUAL_TRACEPOINT_INFO"; const char *const OB_ALL_VIRTUAL_USER_PROXY_INFO_REAL_AGENT_ORA_TNAME = "ALL_VIRTUAL_USER_PROXY_INFO_REAL_AGENT"; @@ -4196,6 +4207,8 @@ const char *const OB_COLUMNS_PRIV_TNAME = "columns_priv"; const char *const OB_GV_OB_LS_SNAPSHOTS_TNAME = "GV$OB_LS_SNAPSHOTS"; const char *const OB_V_OB_LS_SNAPSHOTS_TNAME = "V$OB_LS_SNAPSHOTS"; const char *const OB_DBA_OB_CLONE_HISTORY_TNAME = "DBA_OB_CLONE_HISTORY"; +const char *const OB_DBA_OB_LS_REPLICA_TASK_HISTORY_TNAME = "DBA_OB_LS_REPLICA_TASK_HISTORY"; +const char *const OB_CDB_OB_LS_REPLICA_TASK_HISTORY_TNAME = "CDB_OB_LS_REPLICA_TASK_HISTORY"; const char *const OB_CDB_MVIEW_LOGS_TNAME = "CDB_MVIEW_LOGS"; const char *const OB_DBA_MVIEW_LOGS_TNAME = "DBA_MVIEW_LOGS"; const char *const OB_CDB_MVIEWS_TNAME = "CDB_MVIEWS"; @@ -4504,6 +4517,7 @@ const char *const OB_DBA_OB_IMPORT_TABLE_TASK_HISTORY_ORA_TNAME = "DBA_OB_IMPORT const char *const OB_DBA_OB_TRANSFER_PARTITION_TASKS_ORA_TNAME = "DBA_OB_TRANSFER_PARTITION_TASKS"; const char *const OB_DBA_OB_TRANSFER_PARTITION_TASK_HISTORY_ORA_TNAME = "DBA_OB_TRANSFER_PARTITION_TASK_HISTORY"; const char *const OB_USER_USERS_TNAME = "USER_USERS"; +const char *const OB_DBA_OB_LS_REPLICA_TASK_HISTORY_ORA_TNAME = "DBA_OB_LS_REPLICA_TASK_HISTORY"; const char *const OB_DBA_MVIEW_LOGS_ORA_TNAME = "DBA_MVIEW_LOGS"; const char *const OB_ALL_MVIEW_LOGS_ORA_TNAME = "ALL_MVIEW_LOGS"; const char *const OB_USER_MVIEW_LOGS_ORA_TNAME = "USER_MVIEW_LOGS"; @@ -5005,6 +5019,7 @@ const char *const OB_ALL_TRUSTED_ROOT_CERTIFICATE_AUX_LOB_META_TNAME = "__all_tr const char *const OB_ALL_COLUMN_PRIVILEGE_AUX_LOB_META_TNAME = "__all_column_privilege_aux_lob_meta"; const char *const OB_ALL_COLUMN_PRIVILEGE_HISTORY_AUX_LOB_META_TNAME = "__all_column_privilege_history_aux_lob_meta"; const char *const OB_ALL_TENANT_SNAPSHOT_LS_REPLICA_HISTORY_AUX_LOB_META_TNAME = "__all_tenant_snapshot_ls_replica_history_aux_lob_meta"; +const char *const OB_ALL_LS_REPLICA_TASK_HISTORY_AUX_LOB_META_TNAME = "__all_ls_replica_task_history_aux_lob_meta"; const char *const OB_ALL_USER_PROXY_INFO_AUX_LOB_META_TNAME = "__all_user_proxy_info_aux_lob_meta"; const char *const OB_ALL_USER_PROXY_INFO_HISTORY_AUX_LOB_META_TNAME = "__all_user_proxy_info_history_aux_lob_meta"; const char *const OB_ALL_USER_PROXY_ROLE_INFO_AUX_LOB_META_TNAME = "__all_user_proxy_role_info_aux_lob_meta"; @@ -5304,6 +5319,7 @@ const char *const OB_ALL_TRUSTED_ROOT_CERTIFICATE_AUX_LOB_PIECE_TNAME = "__all_t const char *const OB_ALL_COLUMN_PRIVILEGE_AUX_LOB_PIECE_TNAME = "__all_column_privilege_aux_lob_piece"; const char *const OB_ALL_COLUMN_PRIVILEGE_HISTORY_AUX_LOB_PIECE_TNAME = "__all_column_privilege_history_aux_lob_piece"; const char *const OB_ALL_TENANT_SNAPSHOT_LS_REPLICA_HISTORY_AUX_LOB_PIECE_TNAME = "__all_tenant_snapshot_ls_replica_history_aux_lob_piece"; +const char *const OB_ALL_LS_REPLICA_TASK_HISTORY_AUX_LOB_PIECE_TNAME = "__all_ls_replica_task_history_aux_lob_piece"; const char *const OB_ALL_USER_PROXY_INFO_AUX_LOB_PIECE_TNAME = "__all_user_proxy_info_aux_lob_piece"; const char *const OB_ALL_USER_PROXY_INFO_HISTORY_AUX_LOB_PIECE_TNAME = "__all_user_proxy_info_history_aux_lob_piece"; const char *const OB_ALL_USER_PROXY_ROLE_INFO_AUX_LOB_PIECE_TNAME = "__all_user_proxy_role_info_aux_lob_piece"; diff --git a/src/share/inner_table/ob_inner_table_schema_def.py b/src/share/inner_table/ob_inner_table_schema_def.py index 478c7bab8..786ba625f 100644 --- a/src/share/inner_table/ob_inner_table_schema_def.py +++ b/src/share/inner_table/ob_inner_table_schema_def.py @@ -7189,6 +7189,45 @@ all_tenant_snapshot_ls_replica_history_def = dict( ) def_table_schema(**all_tenant_snapshot_ls_replica_history_def) +def_table_schema( + owner = 'jinqian.zzy', + table_name = '__all_ls_replica_task_history', + table_id = '508', + table_type = 'SYSTEM_TABLE', + gm_columns = ['gmt_create', 'gmt_modified'], + rowkey_columns = [ + ('tenant_id', 'int'), + ('ls_id', 'int'), + ('task_type', 'varchar:MAX_DISASTER_RECOVERY_TASK_TYPE_LENGTH'), + ('task_id', 'varchar:OB_TRACE_STAT_BUFFER_SIZE'), + ], + in_tenant_space = True, + is_cluster_private = True, + meta_record_in_sys = False, + normal_columns = [ + ('task_status', 'varchar:MAX_COLUMN_COMMENT_LENGTH', 'true'), + ('priority', 'int', 'false', 1), + ('target_replica_svr_ip', 'varchar:MAX_IP_ADDR_LENGTH', 'true'), + ('target_replica_svr_port', 'int', 'true'), + ('target_paxos_replica_number', 'int', 'true'), + ('target_replica_type', 'varchar:MAX_REPLICA_TYPE_LENGTH', 'true'), + ('source_replica_svr_ip', 'varchar:MAX_IP_ADDR_LENGTH', 'true'), + ('source_replica_svr_port', 'int', 'true'), + ('source_paxos_replica_number', 'int', 'true'), + ('source_replica_type', 'varchar:MAX_REPLICA_TYPE_LENGTH', 'true'), + ('data_source_svr_ip', 'varchar:MAX_IP_ADDR_LENGTH', 'true'), + ('data_source_svr_port', 'int', 'true'), + ('is_manual', 'bool', 'true', '0'), + ('task_exec_svr_ip', 'varchar:MAX_IP_ADDR_LENGTH', 'true'), + ('task_exec_svr_port', 'int', 'true'), + ('generate_time', 'timestamp:6', 'false', 0), + ('schedule_time', 'timestamp:6', 'false', 0), + ('finish_time', 'timestamp:6', 'false', 0), + ('execute_result', 'varchar:MAX_COLUMN_COMMENT_LENGTH', 'true'), + ('comment', 'varchar:MAX_COLUMN_COMMENT_LENGTH', 'true'), + ], +) + all_user_proxy_info_def = dict( owner = 'mingye.swj', table_name = '__all_user_proxy_info', @@ -14419,7 +14458,12 @@ def_table_schema( ], ) -# 12467: __all_virtual_ls_replica_task_history +def_table_schema(**gen_iterate_private_virtual_table_def( + table_id = '12467', + table_name = '__all_virtual_ls_replica_task_history', + in_tenant_space = True, + keywords = all_def_keywords['__all_ls_replica_task_history'])) + def_table_schema( owner = 'gongyusen.gys', table_name = '__all_virtual_session_ps_info', @@ -15074,13 +15118,12 @@ def_table_schema(**no_direct_access(gen_oracle_mapping_real_virtual_table_def('1 # 15438: abandoned def_table_schema(**no_direct_access(gen_oracle_mapping_virtual_table_def('15439', all_def_keywords['__all_virtual_ls_snapshot']))) def_table_schema(**no_direct_access(gen_oracle_mapping_real_virtual_table_def('15440', all_def_keywords['__all_index_usage_info']))) -def_table_schema(**no_direct_access(gen_oracle_mapping_virtual_table_def('15444', all_def_keywords['__all_virtual_session_ps_info']))) # 余留位置 # 15441: __all_virtual_shared_storage_quota # 15442: __all_virtual_column_group -# 15443: __all_virtual_ls_replica_task_history -# 15444: __all_virtual_session_ps_info +def_table_schema(**no_direct_access(gen_oracle_mapping_virtual_table_def('15443', all_def_keywords['__all_virtual_ls_replica_task_history']))) +def_table_schema(**no_direct_access(gen_oracle_mapping_virtual_table_def('15444', all_def_keywords['__all_virtual_session_ps_info']))) def_table_schema(**no_direct_access(gen_oracle_mapping_virtual_table_def('15445', all_def_keywords['__all_virtual_tracepoint_info']))) def_table_schema(**no_direct_access(gen_oracle_mapping_real_virtual_table_def('15446', all_def_keywords['__all_user_proxy_info']))) def_table_schema(**no_direct_access(gen_oracle_mapping_real_virtual_table_def('15447', all_def_keywords['__all_user_proxy_role_info']))) @@ -28545,6 +28588,14 @@ def_table_schema( (CASE SOURCE_REPLICA_TYPE WHEN "" THEN NULL ELSE SOURCE_REPLICA_TYPE END) AS SOURCE_REPLICA_TYPE, + (CASE DATA_SOURCE_SVR_IP + WHEN "" THEN NULL + ELSE DATA_SOURCE_SVR_IP END) AS DATA_SOURCE_SVR_IP, + DATA_SOURCE_SVR_PORT, + CAST(CASE IS_MANUAL + WHEN 0 THEN 'FALSE' + WHEN 1 THEN 'TRUE' + ELSE NULL END AS CHAR(6)) AS IS_MANUAL, TASK_EXEC_SVR_IP, TASK_EXEC_SVR_PORT, CAST(GMT_CREATE AS DATETIME) AS CREATE_TIME, @@ -28590,6 +28641,14 @@ def_table_schema( (CASE SOURCE_REPLICA_TYPE WHEN "" THEN NULL ELSE SOURCE_REPLICA_TYPE END) AS SOURCE_REPLICA_TYPE, + (CASE DATA_SOURCE_SVR_IP + WHEN "" THEN NULL + ELSE DATA_SOURCE_SVR_IP END) AS DATA_SOURCE_SVR_IP, + DATA_SOURCE_SVR_PORT, + CAST(CASE IS_MANUAL + WHEN 0 THEN 'FALSE' + WHEN 1 THEN 'TRUE' + ELSE NULL END AS CHAR(6)) AS IS_MANUAL, TASK_EXEC_SVR_IP, TASK_EXEC_SVR_PORT, CAST(GMT_CREATE AS DATETIME) AS CREATE_TIME, @@ -33777,8 +33836,117 @@ FROM oceanbase.__all_clone_job_history ORDER BY CLONE_START_TIME #21520: GV$OB_SHARED_STORAGE_QUOTA #21521: V$OB_SHARED_STORAGE_QUOTA #21522: CDB_UNUSED_COL_TABS -#21523: DBA_OB_LS_REPLICA_TASK_HISTORY -#21524: CDB_OB_LS_REPLICA_TASK_HISTORY + +def_table_schema( + owner = 'jinqian.zzy', + table_name = 'DBA_OB_LS_REPLICA_TASK_HISTORY', + table_id = '21523', + table_type = 'SYSTEM_VIEW', + gm_columns = [], + rowkey_columns = [], + normal_columns = [], + in_tenant_space = True, + view_definition = + """ + ( + SELECT LS_ID, + TASK_TYPE, + TASK_ID, + TASK_STATUS, + CAST(CASE PRIORITY + WHEN 0 THEN 'HIGH' + WHEN 1 THEN 'LOW' + ELSE NULL END AS CHAR(5)) AS PRIORITY, + TARGET_REPLICA_SVR_IP, + TARGET_REPLICA_SVR_PORT, + TARGET_PAXOS_REPLICA_NUMBER, + TARGET_REPLICA_TYPE, + (CASE SOURCE_REPLICA_SVR_IP + WHEN "" THEN NULL + ELSE SOURCE_REPLICA_SVR_IP END) AS SOURCE_REPLICA_SVR_IP, + SOURCE_REPLICA_SVR_PORT, + SOURCE_PAXOS_REPLICA_NUMBER, + (CASE SOURCE_REPLICA_TYPE + WHEN "" THEN NULL + ELSE SOURCE_REPLICA_TYPE END) AS SOURCE_REPLICA_TYPE, + (CASE DATA_SOURCE_SVR_IP + WHEN "" THEN NULL + ELSE DATA_SOURCE_SVR_IP END) AS DATA_SOURCE_SVR_IP, + DATA_SOURCE_SVR_PORT, + CAST(CASE IS_MANUAL + WHEN 0 THEN 'FALSE' + WHEN 1 THEN 'TRUE' + ELSE NULL END AS CHAR(6)) AS IS_MANUAL, + TASK_EXEC_SVR_IP, + TASK_EXEC_SVR_PORT, + CAST(GMT_CREATE AS DATETIME) AS CREATE_TIME, + CAST(SCHEDULE_TIME AS DATETIME) AS START_TIME, + CAST(GMT_MODIFIED AS DATETIME) AS MODIFY_TIME, + CAST(FINISH_TIME AS DATETIME) AS FINISH_TIME, + (CASE EXECUTE_RESULT + WHEN "" THEN NULL + ELSE EXECUTE_RESULT END) AS EXECUTE_RESULT, + COMMENT + FROM OCEANBASE.__ALL_VIRTUAL_LS_REPLICA_TASK_HISTORY + WHERE TENANT_ID = EFFECTIVE_TENANT_ID() + ) + """.replace("\n", " "), +) + +def_table_schema( + owner = 'jinqian.zzy', + table_name = 'CDB_OB_LS_REPLICA_TASK_HISTORY', + table_id = '21524', + table_type = 'SYSTEM_VIEW', + gm_columns = [], + rowkey_columns = [], + normal_columns = [], + view_definition = + """ + ( + SELECT TENANT_ID, + LS_ID, + TASK_TYPE, + TASK_ID, + TASK_STATUS, + CAST(CASE PRIORITY + WHEN 0 THEN 'HIGH' + WHEN 1 THEN 'LOW' + ELSE NULL END AS CHAR(5)) AS PRIORITY, + TARGET_REPLICA_SVR_IP, + TARGET_REPLICA_SVR_PORT, + TARGET_PAXOS_REPLICA_NUMBER, + TARGET_REPLICA_TYPE, + (CASE SOURCE_REPLICA_SVR_IP + WHEN "" THEN NULL + ELSE SOURCE_REPLICA_SVR_IP END) AS SOURCE_REPLICA_SVR_IP, + SOURCE_REPLICA_SVR_PORT, + SOURCE_PAXOS_REPLICA_NUMBER, + (CASE SOURCE_REPLICA_TYPE + WHEN "" THEN NULL + ELSE SOURCE_REPLICA_TYPE END) AS SOURCE_REPLICA_TYPE, + (CASE DATA_SOURCE_SVR_IP + WHEN "" THEN NULL + ELSE DATA_SOURCE_SVR_IP END) AS DATA_SOURCE_SVR_IP, + DATA_SOURCE_SVR_PORT, + CAST(CASE IS_MANUAL + WHEN 0 THEN 'FALSE' + WHEN 1 THEN 'TRUE' + ELSE NULL END AS CHAR(6)) AS IS_MANUAL, + TASK_EXEC_SVR_IP, + TASK_EXEC_SVR_PORT, + CAST(GMT_CREATE AS DATETIME) AS CREATE_TIME, + CAST(SCHEDULE_TIME AS DATETIME) AS START_TIME, + CAST(GMT_MODIFIED AS DATETIME) AS MODIFY_TIME, + CAST(FINISH_TIME AS DATETIME) AS FINISH_TIME, + (CASE EXECUTE_RESULT + WHEN "" THEN NULL + ELSE EXECUTE_RESULT END) AS EXECUTE_RESULT, + COMMENT + FROM OCEANBASE.__ALL_VIRTUAL_LS_REPLICA_TASK_HISTORY + ) + """.replace("\n", " "), +) def_table_schema( owner = 'suzhi.yt', @@ -52363,8 +52531,8 @@ def_table_schema( TASK_ID, TASK_STATUS, CAST(CASE PRIORITY - WHEN 1 THEN 'HIGH' - WHEN 2 THEN 'LOW' + WHEN 0 THEN 'HIGH' + WHEN 1 THEN 'LOW' ELSE NULL END AS CHAR(5)) AS PRIORITY, TARGET_REPLICA_SVR_IP, TARGET_REPLICA_SVR_PORT, @@ -52378,6 +52546,14 @@ def_table_schema( CASE SOURCE_REPLICA_TYPE WHEN '' THEN NULL ELSE SOURCE_REPLICA_TYPE END AS SOURCE_REPLICA_TYPE, + CASE DATA_SOURCE_SVR_IP + WHEN '' THEN NULL + ELSE DATA_SOURCE_SVR_IP END AS DATA_SOURCE_SVR_IP, + DATA_SOURCE_SVR_PORT, + CAST(CASE IS_MANUAL + WHEN 0 THEN 'FALSE' + WHEN 1 THEN 'TRUE' + ELSE NULL END AS CHAR(6)) AS IS_MANUAL, TASK_EXEC_SVR_IP, TASK_EXEC_SVR_PORT, CAST(GMT_CREATE AS TIMESTAMP(6)) AS CREATE_TIME, @@ -54796,7 +54972,66 @@ def_table_schema( AND B.TENANT_ID = EFFECTIVE_TENANT_ID() """.replace("\n", " ") ) -# 25279: DBA_OB_LS_REPLICA_TASK_HISTORY + +def_table_schema( + owner = 'jinqian.zzy', + table_name = 'DBA_OB_LS_REPLICA_TASK_HISTORY', + name_postfix = '_ORA', + database_id = 'OB_ORA_SYS_DATABASE_ID', + table_id = '25279', + table_type = 'SYSTEM_VIEW', + gm_columns = [], + rowkey_columns = [], + normal_columns = [], + in_tenant_space = True, + view_definition = + """ + ( + SELECT LS_ID, + TASK_TYPE, + TASK_ID, + TASK_STATUS, + CAST(CASE PRIORITY + WHEN 0 THEN 'HIGH' + WHEN 1 THEN 'LOW' + ELSE NULL END AS CHAR(5)) AS PRIORITY, + TARGET_REPLICA_SVR_IP, + TARGET_REPLICA_SVR_PORT, + TARGET_PAXOS_REPLICA_NUMBER, + TARGET_REPLICA_TYPE, + CASE SOURCE_REPLICA_SVR_IP + WHEN '' THEN NULL + ELSE SOURCE_REPLICA_SVR_IP END AS SOURCE_REPLICA_SVR_IP, + SOURCE_REPLICA_SVR_PORT, + SOURCE_PAXOS_REPLICA_NUMBER, + CASE SOURCE_REPLICA_TYPE + WHEN '' THEN NULL + ELSE SOURCE_REPLICA_TYPE END AS SOURCE_REPLICA_TYPE, + CASE DATA_SOURCE_SVR_IP + WHEN '' THEN NULL + ELSE DATA_SOURCE_SVR_IP END AS DATA_SOURCE_SVR_IP, + DATA_SOURCE_SVR_PORT, + CAST(CASE IS_MANUAL + WHEN 0 THEN 'FALSE' + WHEN 1 THEN 'TRUE' + ELSE NULL END AS CHAR(6)) AS IS_MANUAL, + TASK_EXEC_SVR_IP, + TASK_EXEC_SVR_PORT, + CAST(GMT_CREATE AS TIMESTAMP(6)) AS CREATE_TIME, + CAST(SCHEDULE_TIME AS TIMESTAMP(6)) AS START_TIME, + CAST(GMT_MODIFIED AS TIMESTAMP(6)) AS MODIFY_TIME, + CAST(FINISH_TIME AS TIMESTAMP(6)) AS FINISH_TIME, + CASE EXECUTE_RESULT + WHEN '' THEN NULL + ELSE EXECUTE_RESULT END AS EXECUTE_RESULT, + "COMMENT" + FROM SYS.ALL_VIRTUAL_LS_REPLICA_TASK_HISTORY + WHERE + TENANT_ID = EFFECTIVE_TENANT_ID() + ) + """.replace("\n", " "), +) + # 25280: ALL_UNUSED_COL_TABS # 25281: DBA_UNUSED_COL_TABS # 25282: USER_UNUSED_COL_TABS diff --git a/src/share/inner_table/ob_inner_table_schema_misc.ipp b/src/share/inner_table/ob_inner_table_schema_misc.ipp index ccf0d3982..8145ce45e 100644 --- a/src/share/inner_table/ob_inner_table_schema_misc.ipp +++ b/src/share/inner_table/ob_inner_table_schema_misc.ipp @@ -475,6 +475,7 @@ case OB_ALL_VIRTUAL_LS_LOG_ARCHIVE_PROGRESS_TID: case OB_ALL_VIRTUAL_LS_META_TABLE_TID: case OB_ALL_VIRTUAL_LS_RECOVERY_STAT_TID: case OB_ALL_VIRTUAL_LS_REPLICA_TASK_TID: +case OB_ALL_VIRTUAL_LS_REPLICA_TASK_HISTORY_TID: case OB_ALL_VIRTUAL_LS_RESTORE_HISTORY_TID: case OB_ALL_VIRTUAL_LS_RESTORE_PROGRESS_TID: case OB_ALL_VIRTUAL_LS_STATUS_TID: @@ -1283,6 +1284,22 @@ case OB_ALL_VIRTUAL_ZONE_MERGE_INFO_TID: break; } + case OB_ALL_VIRTUAL_LS_REPLICA_TASK_HISTORY_TID: { + ObIteratePrivateVirtualTable *iter = NULL; + const bool meta_record_in_sys = false; + if (OB_FAIL(NEW_VIRTUAL_TABLE(ObIteratePrivateVirtualTable, iter))) { + SERVER_LOG(WARN, "create iterate private virtual table iterator failed", KR(ret)); + } else if (OB_FAIL(iter->init(OB_ALL_LS_REPLICA_TASK_HISTORY_TID, meta_record_in_sys, index_schema, params))) { + SERVER_LOG(WARN, "iterate private virtual table iter init failed", KR(ret)); + iter->~ObIteratePrivateVirtualTable(); + allocator.free(iter); + iter = NULL; + } else { + vt_iter = iter; + } + break; + } + case OB_ALL_VIRTUAL_LS_RESTORE_HISTORY_TID: { ObIteratePrivateVirtualTable *iter = NULL; const bool meta_record_in_sys = false; @@ -1458,7 +1475,9 @@ case OB_ALL_VIRTUAL_ZONE_MERGE_INFO_TID: } break; } + END_CREATE_VT_ITER_SWITCH_LAMBDA + BEGIN_CREATE_VT_ITER_SWITCH_LAMBDA case OB_ALL_VIRTUAL_TABLET_META_TABLE_TID: { ObIteratePrivateVirtualTable *iter = NULL; const bool meta_record_in_sys = false; @@ -1474,9 +1493,7 @@ case OB_ALL_VIRTUAL_ZONE_MERGE_INFO_TID: } break; } - END_CREATE_VT_ITER_SWITCH_LAMBDA - BEGIN_CREATE_VT_ITER_SWITCH_LAMBDA case OB_ALL_VIRTUAL_TABLET_REPLICA_CHECKSUM_TID: { ObIteratePrivateVirtualTable *iter = NULL; const bool meta_record_in_sys = false; @@ -4793,6 +4810,9 @@ case OB_ALL_LS_RECOVERY_STAT_AUX_LOB_PIECE_TID: case OB_ALL_LS_REPLICA_TASK_TID: case OB_ALL_LS_REPLICA_TASK_AUX_LOB_META_TID: case OB_ALL_LS_REPLICA_TASK_AUX_LOB_PIECE_TID: +case OB_ALL_LS_REPLICA_TASK_HISTORY_TID: +case OB_ALL_LS_REPLICA_TASK_HISTORY_AUX_LOB_META_TID: +case OB_ALL_LS_REPLICA_TASK_HISTORY_AUX_LOB_PIECE_TID: case OB_ALL_LS_RESTORE_HISTORY_TID: case OB_ALL_LS_RESTORE_HISTORY_AUX_LOB_META_TID: case OB_ALL_LS_RESTORE_HISTORY_AUX_LOB_PIECE_TID: diff --git a/src/share/inner_table/table_id_to_name b/src/share/inner_table/table_id_to_name index f0ab9066f..01fc57da9 100644 --- a/src/share/inner_table/table_id_to_name +++ b/src/share/inner_table/table_id_to_name @@ -348,6 +348,7 @@ # 506: __all_column_privilege_history # 506: __all_column_privilege # BASE_TABLE_NAME # 507: __all_tenant_snapshot_ls_replica_history +# 508: __all_ls_replica_task_history # 512: __all_user_proxy_info # 513: __all_user_proxy_info_history # 513: __all_user_proxy_info # BASE_TABLE_NAME @@ -1105,6 +1106,8 @@ # 12464: __all_virtual_tenant_snapshot_ls_replica_history # 12464: __all_tenant_snapshot_ls_replica_history # BASE_TABLE_NAME # 12466: ENABLED_ROLES +# 12467: __all_virtual_ls_replica_task_history +# 12467: __all_ls_replica_task_history # BASE_TABLE_NAME # 12468: __all_virtual_session_ps_info # 12469: __all_virtual_tracepoint_info # 12473: __all_virtual_compatibility_control @@ -1712,6 +1715,9 @@ # 15439: __all_virtual_ls_snapshot # BASE_TABLE_NAME # 15440: ALL_VIRTUAL_INDEX_USAGE_INFO_REAL_AGENT # 15440: __all_index_usage_info # BASE_TABLE_NAME +# 15443: ALL_VIRTUAL_LS_REPLICA_TASK_HISTORY +# 15443: __all_ls_replica_task_history # BASE_TABLE_NAME +# 15443: __all_virtual_ls_replica_task_history # BASE_TABLE_NAME1 # 15444: ALL_VIRTUAL_SESSION_PS_INFO # 15444: __all_virtual_session_ps_info # BASE_TABLE_NAME # 15445: ALL_VIRTUAL_TRACEPOINT_INFO @@ -2126,6 +2132,8 @@ # 21517: GV$OB_LS_SNAPSHOTS # 21518: V$OB_LS_SNAPSHOTS # 21519: DBA_OB_CLONE_HISTORY +# 21523: DBA_OB_LS_REPLICA_TASK_HISTORY +# 21524: CDB_OB_LS_REPLICA_TASK_HISTORY # 21525: CDB_MVIEW_LOGS # 21526: DBA_MVIEW_LOGS # 21527: CDB_MVIEWS @@ -2434,6 +2442,7 @@ # 25275: DBA_OB_TRANSFER_PARTITION_TASKS # 25276: DBA_OB_TRANSFER_PARTITION_TASK_HISTORY # 25278: USER_USERS +# 25279: DBA_OB_LS_REPLICA_TASK_HISTORY # 25283: DBA_MVIEW_LOGS # 25284: ALL_MVIEW_LOGS # 25285: USER_MVIEW_LOGS diff --git a/src/share/ob_common_rpc_proxy.h b/src/share/ob_common_rpc_proxy.h index a5c598a0b..35f0ada41 100644 --- a/src/share/ob_common_rpc_proxy.h +++ b/src/share/ob_common_rpc_proxy.h @@ -230,6 +230,7 @@ public: RPC_S(PR5 admin_reload_zone, obrpc::OB_ADMIN_RELOAD_ZONE); RPC_S(PR5 admin_clear_merge_error, obrpc::OB_ADMIN_CLEAR_MERGE_ERROR, (ObAdminMergeArg)); RPC_S(PR5 admin_migrate_unit, obrpc::OB_ADMIN_MIGRATE_UNIT, (ObAdminMigrateUnitArg)); + RPC_S(PR5 admin_alter_ls_replica, obrpc::OB_ADMIN_ALTER_LS_REPLICA, (ObAdminAlterLSReplicaArg)); RPC_S(PRD admin_upgrade_virtual_schema, obrpc::OB_ADMIN_UPGRADE_VIRTUAL_SCHEMA); RPC_S(PRD run_job, obrpc::OB_RUN_JOB, (ObRunJobArg)); RPC_S(PRD run_upgrade_job, obrpc::OB_RUN_UPGRADE_JOB, (ObUpgradeJobArg)); diff --git a/src/share/ob_debug_sync_point.h b/src/share/ob_debug_sync_point.h index 898fe47d1..52b1e7eeb 100755 --- a/src/share/ob_debug_sync_point.h +++ b/src/share/ob_debug_sync_point.h @@ -552,6 +552,7 @@ class ObString; ACT(BEFORE_PARELLEL_TRUNCATE,)\ ACT(END_DDL_IN_PX_SUBCOORD,)\ ACT(BEFORE_SEND_ADD_REPLICA_DRTASK,)\ + ACT(BEFORE_ADD_MANUAL_REPLICA_TASK_IN_INNER_TABLE,)\ ACT(BETWEEN_INSERT_LOCK_INFO_AND_TRY_LOCK_CONFIG_CHANGE,)\ ACT(BEFORE_CHECK_SHRINK_RESOURCE_POOL,)\ ACT(STOP_RECOVERY_LS_THREAD0,)\ @@ -599,6 +600,10 @@ class ObString; ACT(HANG_IN_CLONE_SYS_FAILED_STATUS,)\ ACT(BEFORE_BACKUP_PREFETCH_TASK,)\ ACT(BEFORE_BACKUP_DATA_TASK,)\ + ACT(BEFORE_WAIT_TRANSFER_OUT_TABLET_READY,)\ + ACT(BEFORE_CHECK_TABLET_READY,)\ + ACT(BEFORE_CHECK_TABLET_TRANSFER_TABLE_READY,)\ + ACT(BEFORE_LOG_REPLAY_TO_MAX_MINOR_END_SCN,)\ ACT(HOLD_DDL_COMPLEMENT_DAG_WHEN_APPEND_ROW,)\ ACT(HOLD_DDL_COMPLEMENT_DAG_BEFORE_REPORT_FINISH,)\ ACT(HOLD_DDL_COMPLEMENT_DAG_AFTER_REPORT_FINISH,)\ diff --git a/src/share/ob_rpc_struct.cpp b/src/share/ob_rpc_struct.cpp index f45a61333..7ed0b2753 100644 --- a/src/share/ob_rpc_struct.cpp +++ b/src/share/ob_rpc_struct.cpp @@ -3982,6 +3982,299 @@ void ObCalcColumnChecksumResponseArg::reset() tenant_id_ = OB_INVALID_TENANT_ID; } +OB_SERIALIZE_MEMBER( + ObAlterLSReplicaTaskType, + type_); + +static const char* alter_ls_replica_task_type_strs[] = { + "ADD_LS_REPLICA", + "REMOVE_LS_REPLICA", + "MIGRATE_LS_REPLICA", + "MODIFY_LS_REPLICA_TYPE", + "MODIFY_LS_PAXOS_REPLICA_NUM", + "CANCEL_LS_REPLICA_TASK", +}; + +const char* ObAlterLSReplicaTaskType::get_type_str() const { + STATIC_ASSERT(ARRAYSIZEOF(alter_ls_replica_task_type_strs) == (int64_t)LSReplicaTaskMax, + "alter_ls_replica_task_type string array size mismatch enum AlterLSReplicaTaskType count"); + const char *str = NULL; + if (type_ != LSReplicaTaskMax) { + str = alter_ls_replica_task_type_strs[static_cast(type_)]; + } else { + LOG_WARN_RET(OB_ERR_UNEXPECTED, "invalid AlterLSReplicaTaskType", K_(type)); + } + return str; +} + +int64_t ObAlterLSReplicaTaskType::to_string(char *buf, const int64_t buf_len) const +{ + int64_t pos = 0; + J_OBJ_START(); + J_KV(K_(type), "type", get_type_str()); + J_OBJ_END(); + return pos; +} + +int ObAlterLSReplicaTaskType::parse_from_string(const ObString &type) +{ + int ret = OB_SUCCESS; + bool found = false; + STATIC_ASSERT(ARRAYSIZEOF(alter_ls_replica_task_type_strs) == (int64_t)LSReplicaTaskMax, + "alter_ls_replica_task_type string array size mismatch enum AlterLSReplicaTaskType count"); + for (int64_t i = 0; i < ARRAYSIZEOF(alter_ls_replica_task_type_strs) && !found; i++) { + if (0 == type.case_compare(alter_ls_replica_task_type_strs[i])) { + type_ = static_cast(i); + found = true; + break; + } + } + if (!found) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("fail to parse type from string", KR(ret), K(type), K_(type)); + } + return ret; +} + +OB_SERIALIZE_MEMBER(ObAdminAlterLSReplicaArg, + ls_id_, + server_addr_, + destination_addr_, + replica_type_, + tenant_id_, + task_id_, + data_source_, + paxos_replica_num_, + alter_task_type_); + +int ObAdminAlterLSReplicaArg::assign(const ObAdminAlterLSReplicaArg &that) +{ + int ret = OB_SUCCESS; + if (this == &that) { + //pass + } else if (OB_FAIL(task_id_.assign(that.task_id_))) { + LOG_WARN("task_id_ assign failed", KR(ret), K(that.task_id_)); + } else { + ls_id_ = that.ls_id_; + server_addr_ = that.server_addr_; + destination_addr_ = that.destination_addr_; + replica_type_ = that.replica_type_; + data_source_ = that.data_source_; + paxos_replica_num_ = that.paxos_replica_num_; + tenant_id_ = that.tenant_id_; + alter_task_type_ = that.alter_task_type_; + } + return ret; +} + +int ObAdminAlterLSReplicaArg::init_add( + const share::ObLSID& ls_id, + const common::ObAddr& server_addr, + const common::ObReplicaType& replica_type, + const common::ObAddr& data_source, + const int64_t paxos_replica_num, + const uint64_t tenant_id) +{ + int ret = OB_SUCCESS; + if (OB_UNLIKELY(!ls_id.is_valid()) + || OB_UNLIKELY(!server_addr.is_valid()) + || OB_UNLIKELY(replica_type != REPLICA_TYPE_FULL && replica_type != REPLICA_TYPE_READONLY) + || OB_UNLIKELY(paxos_replica_num < 0) + || OB_UNLIKELY(!is_valid_tenant_id(tenant_id))) { + //data_source and paxos_replica_num is optional parameter + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(ls_id), K(server_addr), K(replica_type), + K(paxos_replica_num), K(tenant_id)); + } else { + ls_id_ = ls_id; + server_addr_ = server_addr; + replica_type_ = replica_type; + data_source_ = data_source; + paxos_replica_num_ = paxos_replica_num; + tenant_id_ = tenant_id; + alter_task_type_ = ObAlterLSReplicaTaskType(ObAlterLSReplicaTaskType::AddLSReplicaTask); + } + return ret; +} + +int ObAdminAlterLSReplicaArg::init_remove( + const share::ObLSID& ls_id, + const common::ObAddr& server_addr, + const int64_t paxos_replica_num, + const uint64_t tenant_id) +{ + int ret = OB_SUCCESS; + if (OB_UNLIKELY(!ls_id.is_valid()) + || OB_UNLIKELY(!server_addr.is_valid()) + || OB_UNLIKELY(paxos_replica_num < 0) + || OB_UNLIKELY(!is_valid_tenant_id(tenant_id))) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(ls_id), K(server_addr), + K(paxos_replica_num), K(tenant_id)); + } else { + ls_id_ = ls_id; + server_addr_ = server_addr; + paxos_replica_num_ = paxos_replica_num; + tenant_id_ = tenant_id; + alter_task_type_ = ObAlterLSReplicaTaskType(ObAlterLSReplicaTaskType::RemoveLSReplicaTask); + } + return ret; +} + +int ObAdminAlterLSReplicaArg::init_migrate( + const share::ObLSID& ls_id, + const common::ObAddr& server_addr, + const common::ObAddr& destination_addr, + const common::ObAddr& data_source, + const uint64_t tenant_id) +{ + int ret = OB_SUCCESS; + if (OB_UNLIKELY(!ls_id.is_valid()) + || OB_UNLIKELY(!server_addr.is_valid()) + || OB_UNLIKELY(!destination_addr.is_valid()) + || OB_UNLIKELY(!is_valid_tenant_id(tenant_id))) { + ret = OB_INVALID_ARGUMENT; //data_surce is optional parameter + LOG_WARN("invalid argument", KR(ret), K(ls_id), + K(server_addr), K(destination_addr), K(tenant_id)); + } else { + ls_id_ = ls_id; + server_addr_ = server_addr; + destination_addr_ = destination_addr; + data_source_ = data_source; + tenant_id_ = tenant_id; + alter_task_type_ = ObAlterLSReplicaTaskType(ObAlterLSReplicaTaskType::MigrateLSReplicaTask); + } + return ret; +} + +int ObAdminAlterLSReplicaArg::init_modify_replica( + const share::ObLSID& ls_id, + const common::ObAddr& server_addr, + const common::ObReplicaType& replica_type, + const int64_t paxos_replica_num, + const uint64_t tenant_id) +{ + int ret = OB_SUCCESS; + if (OB_UNLIKELY(!ls_id.is_valid()) + || OB_UNLIKELY(!server_addr.is_valid()) + || OB_UNLIKELY(replica_type != REPLICA_TYPE_FULL && replica_type != REPLICA_TYPE_READONLY) + || OB_UNLIKELY(paxos_replica_num < 0) + || OB_UNLIKELY(!is_valid_tenant_id(tenant_id))) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(ls_id), K(server_addr), K(replica_type), + K(paxos_replica_num), K(tenant_id)); + } else { + ls_id_ = ls_id; + server_addr_ = server_addr; + replica_type_ = replica_type; + paxos_replica_num_ = paxos_replica_num; + tenant_id_ = tenant_id; + alter_task_type_ = ObAlterLSReplicaTaskType(ObAlterLSReplicaTaskType::ModifyLSReplicaTypeTask); + } + return ret; +} + +int ObAdminAlterLSReplicaArg::init_modify_paxos_replica_num( + const share::ObLSID& ls_id, + const int64_t paxos_replica_num, + const uint64_t tenant_id) +{ + int ret = OB_SUCCESS; + if (OB_UNLIKELY(!ls_id.is_valid()) + || OB_UNLIKELY(paxos_replica_num <= 0) + || OB_UNLIKELY(!is_valid_tenant_id(tenant_id))) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(ls_id), + K(paxos_replica_num), K(tenant_id)); + } else { + ls_id_ = ls_id; + paxos_replica_num_ = paxos_replica_num; + tenant_id_ = tenant_id; + alter_task_type_ = ObAlterLSReplicaTaskType(ObAlterLSReplicaTaskType::ModifyLSPaxosReplicaNumTask); + } + return ret; +} + +int ObAdminAlterLSReplicaArg::init_cancel( + const common::ObFixedLengthString& task_id, + const uint64_t tenant_id) +{ + int ret = OB_SUCCESS; + if (OB_UNLIKELY(!is_valid_tenant_id(tenant_id)) + || OB_UNLIKELY(task_id.is_empty())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(tenant_id), K(task_id)); + } else if (OB_FAIL(task_id_.assign(task_id))) { + LOG_WARN("task_id_ assign failed", KR(ret), K(task_id)); + } else { + tenant_id_ = tenant_id; + alter_task_type_ = ObAlterLSReplicaTaskType(ObAlterLSReplicaTaskType::CancelLSReplicaTask); + } + return ret; +} + +void ObAdminAlterLSReplicaArg::reset() +{ + ls_id_.reset(); + server_addr_.reset(); + destination_addr_.reset(); + replica_type_ = common::REPLICA_TYPE_MAX; + tenant_id_ = OB_INVALID_TENANT_ID; + task_id_.reset(); + data_source_.reset(); + paxos_replica_num_ = 0; + alter_task_type_.reset(); +} + +OB_SERIALIZE_MEMBER(ObLSCancelReplicaTaskArg, + task_id_, + ls_id_, + tenant_id_); + +int ObLSCancelReplicaTaskArg::assign(const ObLSCancelReplicaTaskArg &that) +{ + int ret = OB_SUCCESS; + if (this != &that) { + task_id_ = that.task_id_; + ls_id_ = that.ls_id_; + tenant_id_ = that.tenant_id_; + } + return ret; +} + +int ObLSCancelReplicaTaskArg::init( + const share::ObTaskId &task_id, + const share::ObLSID &ls_id, + const uint64_t tenant_id) +{ + int ret = OB_SUCCESS; + if (OB_UNLIKELY(!task_id.is_valid() + || !ls_id.is_valid() + || OB_INVALID_TENANT_ID == tenant_id)) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(task_id), K(ls_id), K(tenant_id)); + } else { + task_id_ = task_id; + ls_id_ = ls_id; + tenant_id_ = tenant_id; + } + return ret; +} + +void ObLSCancelReplicaTaskArg::reset() +{ + task_id_.reset(); + ls_id_.reset(); + tenant_id_ = OB_INVALID_TENANT_ID; +} + +bool ObLSCancelReplicaTaskArg::is_valid() const +{ + return task_id_.is_valid() + && ls_id_.is_valid() + && OB_INVALID_TENANT_ID != tenant_id_; +} + OB_SERIALIZE_MEMBER(ObLSMigrateReplicaArg, task_id_, tenant_id_, diff --git a/src/share/ob_rpc_struct.h b/src/share/ob_rpc_struct.h index 4a08a8dee..ad8be3f22 100644 --- a/src/share/ob_rpc_struct.h +++ b/src/share/ob_rpc_struct.h @@ -4290,6 +4290,197 @@ public: OB_UNIS_VERSION(3); }; +class ObAlterLSReplicaTaskType +{ + OB_UNIS_VERSION(1); +public: + enum AlterLSReplicaTaskType + { + AddLSReplicaTask = 0, + RemoveLSReplicaTask, + MigrateLSReplicaTask, + ModifyLSReplicaTypeTask, + ModifyLSPaxosReplicaNumTask, + CancelLSReplicaTask, + LSReplicaTaskMax + }; +public: + ObAlterLSReplicaTaskType() : type_(LSReplicaTaskMax) {} + ObAlterLSReplicaTaskType(AlterLSReplicaTaskType type) : type_(type) {} + + ObAlterLSReplicaTaskType &operator=(const AlterLSReplicaTaskType type) { type_ = type; return *this; } + ObAlterLSReplicaTaskType &operator=(const ObAlterLSReplicaTaskType &other) { type_ = other.type_; return *this; } + bool operator==(const ObAlterLSReplicaTaskType &other) const { return other.type_ == type_; } + bool operator!=(const ObAlterLSReplicaTaskType &other) const { return other.type_ != type_; } + + void reset() { type_ = LSReplicaTaskMax; } + int64_t to_string(char *buf, const int64_t buf_len) const; + void assign(const ObAlterLSReplicaTaskType &other) { type_ = other.type_; } + bool is_valid() const { return LSReplicaTaskMax != type_; } + bool is_add_task() const { return AddLSReplicaTask == type_; } + bool is_remove_task() const { return RemoveLSReplicaTask == type_; } + bool is_migrate_task() const { return MigrateLSReplicaTask == type_; } + bool is_modify_replica_task() const { return ModifyLSReplicaTypeTask == type_; } + bool is_modify_paxos_replica_num_task() const { return ModifyLSPaxosReplicaNumTask == type_; } + bool is_cancel_task() const { return CancelLSReplicaTask == type_; } + int parse_from_string(const ObString &type); + const AlterLSReplicaTaskType &get_type() const { return type_; } + const char* get_type_str() const; +private: + AlterLSReplicaTaskType type_; +}; + +struct ObAdminAlterLSReplicaArg +{ +public: + OB_UNIS_VERSION(1); +public: + ObAdminAlterLSReplicaArg() + : ls_id_(), + server_addr_(), + destination_addr_(), + replica_type_(common::REPLICA_TYPE_MAX), + tenant_id_(OB_INVALID_TENANT_ID), + task_id_(), + data_source_(), + paxos_replica_num_(0), + alter_task_type_(ObAlterLSReplicaTaskType::LSReplicaTaskMax) {} +public: + int assign(const ObAdminAlterLSReplicaArg &that); + int init_add( + const share::ObLSID& ls_id, + const common::ObAddr& server_addr, + const common::ObReplicaType& replica_type, + const common::ObAddr& data_source, + const int64_t paxos_replica_num, + const uint64_t tenant_id); + int init_remove( + const share::ObLSID& ls_id, + const common::ObAddr& server_addr, + const int64_t paxos_replica_num, + const uint64_t tenant_id); + int init_migrate( + const share::ObLSID& ls_id, + const common::ObAddr& server_addr, + const common::ObAddr& destination_addr, + const common::ObAddr& data_source, + const uint64_t tenant_id); + int init_modify_replica( + const share::ObLSID& ls_id, + const common::ObAddr& server_addr, + const common::ObReplicaType& replica_type, + const int64_t paxos_replica_num, + const uint64_t tenant_id); + int init_modify_paxos_replica_num( + const share::ObLSID& ls_id, + const int64_t paxos_replica_num, + const uint64_t tenant_id); + int init_cancel( + const common::ObFixedLengthString& task_id, + const uint64_t tenant_id); + void reset(); + TO_STRING_KV(K_(ls_id), + K_(server_addr), + K_(destination_addr), + K_(replica_type), + K_(tenant_id), + K_(task_id), + K_(data_source), + K_(paxos_replica_num), + K_(alter_task_type)); + bool is_valid() const { + return alter_task_type_.is_valid() && + ((alter_task_type_.is_add_task() && is_add_valid_()) + || (alter_task_type_.is_remove_task() && is_remove_valid_()) + || (alter_task_type_.is_migrate_task() && is_migrate_valid_()) + || (alter_task_type_.is_modify_replica_task() && is_modify_replica_valid_()) + || (alter_task_type_.is_modify_paxos_replica_num_task() && is_modify_paxos_replica_num_valid_()) + || (alter_task_type_.is_cancel_task() && is_cancel_valid_())); + } + const share::ObLSID &get_ls_id() const { return ls_id_; } + const common::ObAddr &get_server_addr() const { return server_addr_; } + const common::ObAddr &get_destination_addr() const { return destination_addr_; } + const common::ObReplicaType& get_replica_type() const { return replica_type_; } + const uint64_t& get_tenant_id() const { return tenant_id_; } + const common::ObFixedLengthString &get_task_id() const { return task_id_; } + const common::ObAddr &get_data_source() const { return data_source_; } + const int64_t& get_paxos_replica_num() const { return paxos_replica_num_; } + const ObAlterLSReplicaTaskType& get_alter_task_type() const { return alter_task_type_; } + +private: + bool is_add_valid_() const { + return ls_id_.is_valid() + && server_addr_.is_valid() + && REPLICA_TYPE_MAX != replica_type_ + && is_valid_tenant_id(tenant_id_) + && paxos_replica_num_ >= 0; + } + bool is_remove_valid_() const { + return ls_id_.is_valid() + && server_addr_.is_valid() + && is_valid_tenant_id(tenant_id_) + && paxos_replica_num_ >= 0; + } + bool is_migrate_valid_() const { + return ls_id_.is_valid() + && server_addr_.is_valid() + && destination_addr_.is_valid() + && is_valid_tenant_id(tenant_id_); + } + bool is_modify_replica_valid_() const { + return ls_id_.is_valid() + && server_addr_.is_valid() + && REPLICA_TYPE_MAX != replica_type_ + && is_valid_tenant_id(tenant_id_) + && paxos_replica_num_ >= 0; + } + bool is_modify_paxos_replica_num_valid_() const { + return ls_id_.is_valid() + && paxos_replica_num_ > 0 + && is_valid_tenant_id(tenant_id_); + } + bool is_cancel_valid_() const { + return !task_id_.is_empty() && is_valid_tenant_id(tenant_id_); + } + + share::ObLSID ls_id_; + common::ObAddr server_addr_; + common::ObAddr destination_addr_; + common::ObReplicaType replica_type_; + uint64_t tenant_id_; + common::ObFixedLengthString task_id_; + common::ObAddr data_source_; + int64_t paxos_replica_num_; + ObAlterLSReplicaTaskType alter_task_type_; +}; + +struct ObLSCancelReplicaTaskArg +{ + OB_UNIS_VERSION(1); +public: + ObLSCancelReplicaTaskArg() + : task_id_(), + ls_id_(), + tenant_id_(OB_INVALID_TENANT_ID) {} +public: + int assign(const ObLSCancelReplicaTaskArg &that); + int init(const share::ObTaskId &task_id, + const share::ObLSID &ls_id, + const uint64_t tenant_id); + void reset(); + TO_STRING_KV(K_(task_id), + K_(ls_id), + K_(tenant_id)); + bool is_valid() const; + const share::ObTaskId &get_task_id() const { return task_id_; } + const share::ObLSID &get_ls_id() const { return ls_id_; } + const uint64_t &get_tenant_id() const { return tenant_id_; } +private: + share::ObTaskId task_id_; + share::ObLSID ls_id_; + uint64_t tenant_id_; +}; + struct ObLSMigrateReplicaArg { public: diff --git a/src/share/ob_srv_rpc_proxy.h b/src/share/ob_srv_rpc_proxy.h index 125cd5a85..6880a2c86 100644 --- a/src/share/ob_srv_rpc_proxy.h +++ b/src/share/ob_srv_rpc_proxy.h @@ -77,6 +77,7 @@ public: RPC_S(PR5 notify_archive, OB_NOTIFY_ARCHIVE, (ObNotifyArchiveArg)); // ls disaster recovery rpc + RPC_S(PR5 ls_cancel_replica_task, OB_LS_CANCEL_REPLICA_TASK, (ObLSCancelReplicaTaskArg)); RPC_S(PR5 ls_migrate_replica, OB_LS_MIGRATE_REPLICA, (ObLSMigrateReplicaArg)); RPC_S(PR5 ls_add_replica, OB_LS_ADD_REPLICA, (ObLSAddReplicaArg)); RPC_S(PR5 ls_type_transform, OB_LS_TYPE_TRANSFORM, (ObLSChangeReplicaArg)); diff --git a/src/share/unit/ob_unit_info.cpp b/src/share/unit/ob_unit_info.cpp index 845f88844..3722f4ded 100644 --- a/src/share/unit/ob_unit_info.cpp +++ b/src/share/unit/ob_unit_info.cpp @@ -13,6 +13,7 @@ #include #include "ob_unit_info.h" +#include "share/ob_errno.h" namespace oceanbase { diff --git a/src/sql/engine/cmd/ob_alter_system_executor.cpp b/src/sql/engine/cmd/ob_alter_system_executor.cpp index f18a985b6..63b99f498 100644 --- a/src/sql/engine/cmd/ob_alter_system_executor.cpp +++ b/src/sql/engine/cmd/ob_alter_system_executor.cpp @@ -1322,6 +1322,26 @@ int ObMigrateUnitExecutor::execute(ObExecContext &ctx, ObMigrateUnitStmt &stmt) return ret; } +int ObAlterLSReplicaExecutor::execute(ObExecContext &ctx, ObAlterLSReplicaStmt &stmt) +{ + int ret = OB_SUCCESS; + ObTaskExecutorCtx *task_exec_ctx = GET_TASK_EXECUTOR_CTX(ctx); + obrpc::ObCommonRpcProxy *common_rpc = NULL; + if (OB_UNLIKELY(!stmt.get_rpc_arg().is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("rpc args is invalid", KR(ret), K(stmt)); + } else if (OB_ISNULL(task_exec_ctx)) { + ret = OB_NOT_INIT; + LOG_WARN("get task executor context failed", KR(ret)); + } else if (OB_ISNULL(common_rpc = task_exec_ctx->get_common_rpc())) { + ret = OB_NOT_INIT; + LOG_WARN("get common rpc proxy failed", KR(ret), KP(task_exec_ctx)); + } else if (OB_FAIL(common_rpc->admin_alter_ls_replica(stmt.get_rpc_arg()))) { + LOG_WARN("add ls replica rpc failed", KR(ret), K(stmt.get_rpc_arg())); + } + return ret; +} + int ObAddArbitrationServiceExecutor::execute(ObExecContext &ctx, ObAddArbitrationServiceStmt &stmt) { int ret = OB_SUCCESS; diff --git a/src/sql/engine/cmd/ob_alter_system_executor.h b/src/sql/engine/cmd/ob_alter_system_executor.h index 5005ca479..08cfdd67f 100644 --- a/src/sql/engine/cmd/ob_alter_system_executor.h +++ b/src/sql/engine/cmd/ob_alter_system_executor.h @@ -84,6 +84,8 @@ DEF_SIMPLE_EXECUTOR(ObClearMergeError); DEF_SIMPLE_EXECUTOR(ObMigrateUnit); +DEF_SIMPLE_EXECUTOR(ObAlterLSReplica); + DEF_SIMPLE_EXECUTOR(ObAddArbitrationService); DEF_SIMPLE_EXECUTOR(ObRemoveArbitrationService); diff --git a/src/sql/executor/ob_cmd_executor.cpp b/src/sql/executor/ob_cmd_executor.cpp index b2423e238..317a6a9e5 100644 --- a/src/sql/executor/ob_cmd_executor.cpp +++ b/src/sql/executor/ob_cmd_executor.cpp @@ -654,6 +654,10 @@ int ObCmdExecutor::execute(ObExecContext &ctx, ObICmd &cmd) DEFINE_EXECUTE_CMD(ObMigrateUnitStmt, ObMigrateUnitExecutor); break; } + case stmt::T_ALTER_LS_REPLICA: { + DEFINE_EXECUTE_CMD(ObAlterLSReplicaStmt, ObAlterLSReplicaExecutor); + break; + } case stmt::T_ADD_ARBITRATION_SERVICE: { DEFINE_EXECUTE_CMD(ObAddArbitrationServiceStmt, ObAddArbitrationServiceExecutor); break; diff --git a/src/sql/parser/non_reserved_keywords_mysql_mode.c b/src/sql/parser/non_reserved_keywords_mysql_mode.c index 0becfb89a..9f846978b 100644 --- a/src/sql/parser/non_reserved_keywords_mysql_mode.c +++ b/src/sql/parser/non_reserved_keywords_mysql_mode.c @@ -192,6 +192,7 @@ static const NonReservedKeyword Mysql_none_reserved_keywords[] = {"data_table_id", DATA_TABLE_ID}, {"database", DATABASE}, {"databases", DATABASES}, + {"data_source", DATA_SOURCE}, {"date", DATE}, {"date_add", DATE_ADD}, {"date_sub", DATE_SUB}, @@ -635,6 +636,7 @@ static const NonReservedKeyword Mysql_none_reserved_keywords[] = {"path", PATH}, {"pattern", PATTERN}, {"pause", PAUSE}, + {"paxos_replica_num", PAXOS_REPLICA_NUM}, {"percentage", PERCENTAGE}, {"percent_rank", PERCENT_RANK}, {"performance", PERFORMANCE}, @@ -907,6 +909,7 @@ static const NonReservedKeyword Mysql_none_reserved_keywords[] = {"tablet_id", TABLET_ID}, {"tablet_max_size", TABLET_MAX_SIZE}, {"task", TASK}, + {"task_id", TASK_ID}, {"template", TEMPLATE}, {"temporary", TEMPORARY}, {"temptable", TEMPTABLE}, diff --git a/src/sql/parser/sql_parser_mysql_mode.y b/src/sql/parser/sql_parser_mysql_mode.y index b57aca142..ab87f163f 100644 --- a/src/sql/parser/sql_parser_mysql_mode.y +++ b/src/sql/parser/sql_parser_mysql_mode.y @@ -278,7 +278,7 @@ END_P SET_VAR DELIMITER CONSTRAINT_NAME CONSTRAINT_SCHEMA CONTAINS CONTEXT CONTRIBUTORS COPY COUNT CPU CREATE_TIMESTAMP CTXCAT CTX_ID CUBE CURDATE CURRENT STACKED CURTIME CURSOR_NAME CUME_DIST CYCLE CALC_PARTITION_ID CONNECT - DAG DATA DATAFILE DATA_TABLE_ID DATE DATE_ADD DATE_SUB DATETIME DAY DEALLOCATE DECRYPTION + DAG DATA DATAFILE DATA_TABLE_ID DATA_SOURCE DATE DATE_ADD DATE_SUB DATETIME DAY DEALLOCATE DECRYPTION DEFAULT_AUTH DEFAULT_LOB_INROW_THRESHOLD DEFINER DELAY DELAY_KEY_WRITE DEPTH DES_KEY_FILE DENSE_RANK DESCRIPTION DESTINATION DIAGNOSTICS DIRECTORY DISABLE DISALLOW DISCARD DISK DISKGROUP DO DOT DUMP DUMPFILE DUPLICATE DUPLICATE_SCOPE DYNAMIC DATABASE_ID DEFAULT_TABLEGROUP DISCONNECT DEMAND @@ -328,7 +328,7 @@ END_P SET_VAR DELIMITER OBCONFIG_URL OJ OBJECT_ID - PACK_KEYS PAGE PARALLEL PARAMETERS PARSER PARTIAL PARTITION_ID PARTITIONING PARTITIONS PASSWORD PATH PAUSE PERCENTAGE + PACK_KEYS PAGE PARALLEL PARAMETERS PARSER PARTIAL PARTITION_ID PARTITIONING PARTITIONS PASSWORD PATH PAUSE PAXOS_REPLICA_NUM PERCENTAGE PERCENT_RANK PHASE PLAN PHYSICAL PLANREGRESS PLUGIN PLUGIN_DIR PLUGINS POINT POLYGON PERFORMANCE PROTECTION PRIORITY PL POLICY POOL PORT POSITION PREPARE PRESERVE PRETTY PRETTY_COLOR PREV PRIMARY_ZONE PRIVILEGES PROCESS PROCESSLIST PROFILE PROFILES PROXY PRECEDING PCTFREE P_ENTITY P_CHUNK @@ -356,7 +356,7 @@ END_P SET_VAR DELIMITER SUPER SUSPEND SWAPS SWITCH SWITCHES SWITCHOVER SYSTEM SYSTEM_USER SYSDATE SESSION_ALIAS SIZE SKEWONLY SEQUENCE SLOG STATEMENT_ID SKIP_HEADER SKIP_BLANK_LINES STATEMENT SUM_OPNSIZE - TABLE_CHECKSUM TABLE_MODE TABLE_ID TABLE_NAME TABLEGROUPS TABLES TABLESPACE TABLET TABLET_ID TABLET_MAX_SIZE + TABLE_CHECKSUM TABLE_MODE TABLE_ID TABLE_NAME TABLEGROUPS TABLES TABLESPACE TABLET TABLET_ID TABLET_MAX_SIZE TASK_ID TEMPLATE TEMPORARY TEMPTABLE TENANT TEXT THAN TIME TIMESTAMP TIMESTAMPADD TIMESTAMPDIFF TP_NO TP_NAME TRACE TRADITIONAL TRANSACTION TRIGGERS TRIM TRUNCATE TYPE TYPES TASK TABLET_SIZE TABLEGROUP_ID TENANT_ID THROTTLE TIME_ZONE_INFO TOP_K_FRE_HIST TIMES TRIM_SPACE TTL @@ -488,7 +488,7 @@ END_P SET_VAR DELIMITER %type partition_role ls_role zone_desc opt_zone_desc server_or_zone opt_server_or_zone opt_partitions opt_subpartitions add_or_alter_zone_options alter_or_change_or_modify %type ls opt_tenant_list_or_ls_or_tablet_id ls_server_or_server_or_zone_or_tenant add_or_alter_zone_option %type opt_tenant_list_v2 -%type suspend_or_resume tenant_name opt_tenant_name cache_name opt_cache_name file_id opt_file_id cancel_task_type +%type suspend_or_resume tenant_name opt_tenant_name cache_name opt_cache_name file_id opt_file_id cancel_task_type opt_data_source opt_paxos_replica_num %type sql_id_or_schema_id_expr opt_sql_id_or_schema_id %type namespace_expr opt_namespace %type server_action server_list opt_server_list @@ -18025,6 +18025,51 @@ alter_with_opt_hint SYSTEM CANCEL MIGRATE UNIT INTNUM malloc_non_terminal_node($$, result->malloc_pool_, T_MIGRATE_UNIT, 2, $6, NULL); } | +alter_with_opt_hint SYSTEM ADD REPLICA ls SERVER opt_equal_mark STRING_VALUE REPLICA_TYPE opt_equal_mark STRING_VALUE opt_data_source opt_paxos_replica_num opt_tenant_name +{ + (void)($1); + (void)($7); + (void)($10); + malloc_non_terminal_node($$, result->malloc_pool_, T_ADD_LS_REPLICA, 6, $5, $8, $11, $12, $13, $14); +} +| +alter_with_opt_hint SYSTEM REMOVE REPLICA ls SERVER opt_equal_mark STRING_VALUE opt_paxos_replica_num opt_tenant_name +{ + (void)($1); + (void)($7); + malloc_non_terminal_node($$, result->malloc_pool_, T_REMOVE_LS_REPLICA, 4, $5, $8, $9, $10); +} +| +alter_with_opt_hint SYSTEM MIGRATE REPLICA ls SOURCE opt_equal_mark STRING_VALUE DESTINATION opt_equal_mark STRING_VALUE opt_data_source opt_tenant_name +{ + (void)($1); + (void)($7); + (void)($10); + malloc_non_terminal_node($$, result->malloc_pool_, T_MIGRATE_LS_REPLICA, 5, $5, $8, $11, $12, $13); +} +| +alter_with_opt_hint SYSTEM MODIFY REPLICA ls SERVER opt_equal_mark STRING_VALUE REPLICA_TYPE opt_equal_mark STRING_VALUE opt_paxos_replica_num opt_tenant_name +{ + (void)($1); + (void)($7); + (void)($10); + malloc_non_terminal_node($$, result->malloc_pool_, T_MODIFY_LS_REPLICA_TYPE, 5, $5, $8, $11, $12, $13); +} +| +alter_with_opt_hint SYSTEM MODIFY ls PAXOS_REPLICA_NUM opt_equal_mark INTNUM opt_tenant_name +{ + (void)($1); + (void)($6); + malloc_non_terminal_node($$, result->malloc_pool_, T_MODIFY_LS_PAXOS_REPLICA_NUM, 3, $4, $7, $8); +} +| +alter_with_opt_hint SYSTEM CANCEL REPLICA TASK TASK_ID opt_equal_mark STRING_VALUE opt_tenant_name +{ + (void)($1); + (void)($7); + malloc_non_terminal_node($$, result->malloc_pool_, T_CANCEL_LS_REPLICA_TASK, 2, $8, $9); +} +| alter_with_opt_hint SYSTEM UPGRADE VIRTUAL SCHEMA { (void)($1); @@ -18956,6 +19001,30 @@ RT COMP_EQ int_or_decimal } ; +opt_data_source: +DATA_SOURCE opt_equal_mark STRING_VALUE +{ + (void)($2); + $$ = $3; +} +| /*EMPTY*/ +{ + $$ = NULL; +} +; + +opt_paxos_replica_num: +PAXOS_REPLICA_NUM opt_equal_mark INTNUM +{ + (void)($2); + $$ = $3; +} +| /*EMPTY*/ +{ + $$ = NULL; +} +; + opt_disk_alias: NAME opt_equal_mark relation_name_or_string { @@ -21996,6 +22065,7 @@ ACCOUNT | DATABASE_ID | DATAFILE | DATA_TABLE_ID +| DATA_SOURCE | DATE | DATE_ADD | DATE_SUB @@ -22307,6 +22377,7 @@ ACCOUNT | PARTITIONS | PARTITION_TYPE | PATTERN +| PAXOS_REPLICA_NUM | PERCENT_RANK | PAUSE | PERCENTAGE @@ -22522,7 +22593,8 @@ ACCOUNT | TABLET_ID | TABLET_SIZE | TABLET_MAX_SIZE -| TASK +| TASK +| TASK_ID | TEMPLATE | TEMPORARY | TEMPTABLE diff --git a/src/sql/privilege_check/ob_privilege_check.cpp b/src/sql/privilege_check/ob_privilege_check.cpp index d2f5184d3..3158640fa 100644 --- a/src/sql/privilege_check/ob_privilege_check.cpp +++ b/src/sql/privilege_check/ob_privilege_check.cpp @@ -2551,7 +2551,8 @@ int get_sys_tenant_alter_system_priv( stmt::T_RECOVER != basic_stmt->get_stmt_type() && stmt::T_TABLE_TTL != basic_stmt->get_stmt_type() && stmt::T_ALTER_SYSTEM_RESET_PARAMETER != basic_stmt->get_stmt_type() && - stmt::T_TRANSFER_PARTITION != basic_stmt->get_stmt_type()) { + stmt::T_TRANSFER_PARTITION != basic_stmt->get_stmt_type() && + stmt::T_ALTER_LS_REPLICA != basic_stmt->get_stmt_type()) { ret = OB_ERR_NO_PRIVILEGE; LOG_WARN("Only sys tenant can do this operation", K(ret), "stmt type", basic_stmt->get_stmt_type()); diff --git a/src/sql/resolver/cmd/ob_alter_system_resolver.cpp b/src/sql/resolver/cmd/ob_alter_system_resolver.cpp index 85f0d8114..85a41fa94 100644 --- a/src/sql/resolver/cmd/ob_alter_system_resolver.cpp +++ b/src/sql/resolver/cmd/ob_alter_system_resolver.cpp @@ -2779,6 +2779,538 @@ int ObMigrateUnitResolver::resolve(const ParseNode &parse_tree) return ret; } +int ObAlterSystemResolverUtil::get_and_verify_tenant_name( + const ParseNode* tenant_name_node, + const uint64_t exec_tenant_id, + uint64_t &target_tenant_id) +{ + // get tenant id + int ret = OB_SUCCESS; + target_tenant_id = OB_INVALID_TENANT_ID; + ObString tenant_name; + ObSchemaGetterGuard schema_guard; + const ObSimpleTenantSchema *tenant_schema = NULL; + if (OB_UNLIKELY(!is_valid_tenant_id(exec_tenant_id))) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("exec tenant id is invalid", KR(ret), K(exec_tenant_id)); + } else if (OB_ISNULL(GCTX.schema_service_)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("GCTX.schema_service_ is null", KR(ret), KP(GCTX.schema_service_)); + } else if (OB_FAIL(GCTX.schema_service_->get_tenant_schema_guard(OB_SYS_TENANT_ID, schema_guard))) { + LOG_WARN("failed to get_tenant_schema_guard", KR(ret)); + } else if (NULL == tenant_name_node) { + target_tenant_id = exec_tenant_id; + } else if (OB_FAIL(resolve_tenant_name(tenant_name_node, exec_tenant_id, tenant_name))) { + LOG_WARN("fail to resolve target tenant id", KR(ret)); + } else if (OB_FAIL(schema_guard.get_tenant_id(tenant_name, target_tenant_id))) { + LOG_WARN("failed to get tenant id from schema guard", KR(ret), K(tenant_name)); + if (OB_TENANT_NOT_EXIST == ret || OB_ERR_INVALID_TENANT_NAME == ret) { + ret = OB_TENANT_NOT_EXIST; + LOG_USER_ERROR(OB_TENANT_NOT_EXIST, tenant_name.length(), tenant_name.ptr()); + } + } else if (OB_SYS_TENANT_ID != exec_tenant_id && target_tenant_id != exec_tenant_id) { + ret = OB_ERR_NO_PRIVILEGE; + LOG_USER_ERROR(OB_ERR_NO_PRIVILEGE, "sys tenant"); + LOG_WARN("no support operating other user tenants", KR(ret), K(target_tenant_id), K(exec_tenant_id)); + } + // check tenant status + if (OB_FAIL(ret)) { + } else if (OB_FAIL(schema_guard.get_tenant_info(target_tenant_id, tenant_schema))) { + LOG_WARN("fail to get tenant schema", KR(ret), K(target_tenant_id)); + } else if (OB_ISNULL(tenant_schema)) { + ret = OB_TENANT_NOT_EXIST; + LOG_USER_ERROR(OB_TENANT_NOT_EXIST, tenant_name.length(), tenant_name.ptr()); + LOG_WARN("tenant not exist", KR(ret), KP(tenant_schema), K(tenant_name), K(target_tenant_id)); + } else if (tenant_schema->is_creating() || tenant_schema->is_dropping()) { + ret = OB_OP_NOT_ALLOW; + LOG_USER_ERROR(OB_OP_NOT_ALLOW, "Tenant is creating or dropping, current operation is"); + LOG_WARN("tenant status not normal", KR(ret), + K(tenant_schema->is_creating()), K(tenant_schema->is_dropping())); + } + return ret; +} + +int ObAlterSystemResolverUtil::check_and_get_data_source( + const ParseNode* data_source_node, + common::ObAddr& data_source) +{ + int ret = OB_SUCCESS; + data_source.reset(); + if (NULL == data_source_node) { + // pass + } else if (OB_FAIL(Util::resolve_server_value(data_source_node, data_source))) { + LOG_WARN("resolve data_source failed", KR(ret), KP(data_source_node)); + if (OB_INVALID_ARGUMENT == ret) { + LOG_USER_ERROR(OB_INVALID_ARGUMENT, "data_source"); + } + } else if (OB_UNLIKELY(!data_source.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_USER_ERROR(OB_INVALID_ARGUMENT, "data_source"); + LOG_WARN("data_source is invalid", KR(ret), K(data_source)); + } + return ret; +} + +int ObAlterSystemResolverUtil::check_and_get_server_addr( + const ParseNode* server_addr_node, + common::ObAddr& server_addr) +{ + int ret = OB_SUCCESS; + server_addr.reset(); + if (OB_ISNULL(server_addr_node)) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), KP(server_addr_node)); + } else if (OB_FAIL(Util::resolve_server_value(server_addr_node, server_addr))) { + LOG_WARN("resolve server failed", KR(ret), KP(server_addr_node)); + if (OB_INVALID_ARGUMENT == ret) { + LOG_USER_ERROR(OB_INVALID_ARGUMENT, "server"); + } + } else if (OB_UNLIKELY(!server_addr.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_USER_ERROR(OB_INVALID_ARGUMENT, "server"); + LOG_WARN("server_addr is invalid", KR(ret), K(server_addr)); + } + return ret; +} + +int ObAlterSystemResolverUtil::check_and_get_paxos_replica_num( + const ParseNode* paxos_replica_num_node, + int64_t& paxos_replica_num) +{ + int ret = OB_SUCCESS; + paxos_replica_num = 0; + if (NULL == paxos_replica_num_node) { + // pass + } else { + paxos_replica_num = paxos_replica_num_node->value_; + if (paxos_replica_num <= 0) { + ret = OB_INVALID_ARGUMENT; + LOG_USER_ERROR(OB_INVALID_ARGUMENT, "paxos_replica_num which should not be less than or equal to 0"); + LOG_WARN("not allowed to set paxos_replica_num less than or equal to 0", KR(ret), K(paxos_replica_num)); + } + } + return ret; +} + +int ObAlterSystemResolverUtil::check_compatibility_for_alter_ls_replica( + const uint64_t cur_tenant_id) +{ + int ret = OB_SUCCESS; + uint64_t tenant_data_version = 0; + if (OB_UNLIKELY(OB_INVALID_TENANT_ID == cur_tenant_id)) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), K(cur_tenant_id)); + } else if (OB_FAIL(GET_MIN_DATA_VERSION(cur_tenant_id, tenant_data_version))) { + //The internal tables involved are under the tenant (meta) and do not involve sys tenants. + LOG_WARN("get tenant data version failed", KR(ret), K(cur_tenant_id)); + } else if (!((tenant_data_version >= DATA_VERSION_4_3_3_0) + || (tenant_data_version >= MOCK_DATA_VERSION_4_2_3_0 && tenant_data_version < DATA_VERSION_4_3_0_0) + || (tenant_data_version >= MOCK_DATA_VERSION_4_2_1_8 && tenant_data_version < DATA_VERSION_4_2_2_0))) { + ret = OB_NOT_SUPPORTED; + LOG_WARN("Tenant data version does not match, alter LS replica command is not allowed", + KR(ret), K(cur_tenant_id), K(tenant_data_version)); + LOG_USER_ERROR(OB_NOT_SUPPORTED, "Tenant data version does not match, alter LS replica command is"); + } + return ret; +} + +int ObAlterSystemResolverUtil::do_check_for_alter_ls_replica( + const ParseNode *tenant_name_node, + ObSchemaChecker *schema_checker, + ObSQLSessionInfo *session_info, + uint64_t &target_tenant_id) +{ + int ret = OB_SUCCESS; + target_tenant_id = OB_INVALID_TENANT_ID; + if(OB_ISNULL(schema_checker) || OB_ISNULL(session_info)) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", KR(ret), KP(schema_checker), KP(session_info)); + } else if (OB_FAIL(Util::get_and_verify_tenant_name(tenant_name_node, + session_info->get_effective_tenant_id(), + target_tenant_id))) { + LOG_WARN("get and verify tenant_name failed", KR(ret), + KP(tenant_name_node), K(session_info->get_effective_tenant_id())); + } else if (OB_FAIL(Util::check_compatibility_for_alter_ls_replica(target_tenant_id))) { + LOG_WARN("check compatibility for alter ls replica failed", KR(ret), K(target_tenant_id)); + } else if (ObSchemaChecker::is_ora_priv_check()) { + if (OB_FAIL(schema_checker->check_ora_ddl_priv( + session_info->get_effective_tenant_id(), + session_info->get_priv_user_id(), ObString(""), + // why use T_ALTER_SYSTEM_SET_PARAMETER? + // because T_ALTER_SYSTEM_SET_PARAMETER has following + // traits: T_ALTER_SYSTEM_SET_PARAMETER can allow dba to + // do an operation and prohibit other user to do this + // operation so we reuse this. + stmt::T_ALTER_SYSTEM_SET_PARAMETER, + session_info->get_enable_role_array()))) { + LOG_WARN("failed to check privilege", KR(ret), K(session_info->get_effective_tenant_id()), + K(session_info->get_user_id())); + } + } + return ret; +} + +int ObAddLSReplicaResolver::resolve(const ParseNode &parse_tree) +{ + int ret = OB_SUCCESS; + ObAlterLSReplicaStmt *stmt = NULL; + if (OB_UNLIKELY(T_ADD_LS_REPLICA != parse_tree.type_)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("type is not T_ADD_LS_REPLICA", KR(ret), "type", get_type_name(parse_tree.type_)); + } else if (OB_ISNULL(parse_tree.children_) || OB_UNLIKELY(parse_tree.num_child_ != 6)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("invalid argument", KR(ret), "type", get_type_name(parse_tree.type_), + "child_num", parse_tree.num_child_); + } else if (OB_ISNULL(parse_tree.children_[0]) + || OB_ISNULL(parse_tree.children_[1]) + || OB_ISNULL(parse_tree.children_[2])) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("invalid parse tree", KR(ret), KP(parse_tree.children_[0]), + KP(parse_tree.children_[1]), KP(parse_tree.children_[2])); + } else if (OB_ISNULL(stmt = create_stmt())) { + ret = OB_ALLOCATE_MEMORY_FAILED; + LOG_WARN("create ObAlterLSReplicaStmt failed", KR(ret)); + } else if (OB_ISNULL(session_info_) || OB_ISNULL(schema_checker_)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("session_info_ is null", KR(ret), KP(session_info_), KP(schema_checker_)); + } else { + stmt_ = stmt; + ParseNode *ls_id_node = parse_tree.children_[0]; + ParseNode *server_addr_node = parse_tree.children_[1]; + ParseNode *replica_type_node = parse_tree.children_[2]; + ParseNode *data_source_node = parse_tree.children_[3]; + ParseNode *paxos_replica_num_node = parse_tree.children_[4]; + ParseNode *tenant_name_node = parse_tree.children_[5]; + + int64_t ls_id = 0; + common::ObAddr server_addr; + common::ObReplicaType replica_type = REPLICA_TYPE_MAX; + common::ObAddr data_source; + int64_t paxos_replica_num = 0; + uint64_t tenant_id = OB_INVALID_TENANT_ID; + if (OB_FAIL(Util::do_check_for_alter_ls_replica(tenant_name_node, + schema_checker_, + session_info_, + tenant_id))) { + LOG_WARN("do check for alter ls replica failed", KR(ret), + KP(tenant_name_node), KP(schema_checker_), KP(session_info_)); + } else if (OB_FAIL(Util::resolve_ls_id(ls_id_node, ls_id))) { + LOG_WARN("resolve ls id failed", KR(ret), KP(ls_id_node)); + } else if (OB_FAIL(Util::check_and_get_server_addr(server_addr_node, server_addr))) { + LOG_WARN("resolve server failed", KR(ret), KP(server_addr_node)); + } else if (OB_FAIL(Util::resolve_replica_type(replica_type_node, replica_type))) { + LOG_WARN("resolve replica type failed", KR(ret), KP(replica_type_node)); + } else if (OB_FAIL(Util::check_and_get_data_source(data_source_node, data_source))) { + LOG_WARN("check and get data source failed", KR(ret), KP(data_source_node)); + } else if (OB_FAIL(Util::check_and_get_paxos_replica_num(paxos_replica_num_node, paxos_replica_num))) { + LOG_WARN("check and get paxos replica num failed", KR(ret), KP(paxos_replica_num_node)); + } + if (OB_SUCC(ret)) { + share::ObLSID id(ls_id); + if (OB_FAIL(stmt->get_rpc_arg().init_add(id, server_addr, replica_type, + data_source, paxos_replica_num, tenant_id))) { + LOG_WARN("init rpc arg failed", KR(ret), K(id), K(server_addr), + K(replica_type), K(data_source), K(paxos_replica_num), K(tenant_id)); + } + } + } + FLOG_INFO("resolve add replica parse tree over", KR(ret)); + return ret; +} + +int ObRemoveLSReplicaResolver::resolve(const ParseNode &parse_tree) +{ + int ret = OB_SUCCESS; + ObAlterLSReplicaStmt *stmt = NULL; + if (OB_UNLIKELY(T_REMOVE_LS_REPLICA != parse_tree.type_)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("type is not T_REMOVE_LS_REPLICA", KR(ret), "type", get_type_name(parse_tree.type_)); + } else if (OB_ISNULL(parse_tree.children_) || OB_UNLIKELY(parse_tree.num_child_ != 4)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("invalid argument", KR(ret), "type", get_type_name(parse_tree.type_), + "child_num", parse_tree.num_child_); + } else if (OB_ISNULL(parse_tree.children_[0]) || OB_ISNULL(parse_tree.children_[1])) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("invalid parse tree", KR(ret), KP(parse_tree.children_[0]), KP(parse_tree.children_[1])); + } else if (OB_ISNULL(stmt = create_stmt())) { + ret = OB_ALLOCATE_MEMORY_FAILED; + LOG_WARN("create ObAlterLSReplicaStmt failed", KR(ret)); + } else if (OB_ISNULL(session_info_) || OB_ISNULL(schema_checker_)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("session_info_ is null", KR(ret), KP(session_info_), KP(schema_checker_)); + } else { + stmt_ = stmt; + ParseNode *ls_id_node = parse_tree.children_[0]; + ParseNode *server_addr_node = parse_tree.children_[1]; + ParseNode *paxos_replica_num_node = parse_tree.children_[2]; + ParseNode *tenant_name_node = parse_tree.children_[3]; + int64_t ls_id = 0; + common::ObAddr server_addr; + int64_t paxos_replica_num = 0; + uint64_t tenant_id = OB_INVALID_TENANT_ID; + if (OB_FAIL(Util::do_check_for_alter_ls_replica(tenant_name_node, + schema_checker_, + session_info_, + tenant_id))) { + LOG_WARN("do check for alter ls replica failed", KR(ret), + KP(tenant_name_node), KP(schema_checker_), KP(session_info_)); + } else if (OB_FAIL(Util::resolve_ls_id(ls_id_node, ls_id))) { + LOG_WARN("resolve ls id failed", KR(ret), KP(ls_id_node)); + } else if (OB_FAIL(Util::check_and_get_server_addr(server_addr_node, server_addr))) { + LOG_WARN("resolve server failed", KR(ret), KP(server_addr_node)); + } else if (OB_FAIL(Util::check_and_get_paxos_replica_num(paxos_replica_num_node, paxos_replica_num))) { + LOG_WARN("check and get paxos replica num failed", KR(ret), KP(paxos_replica_num_node)); + } + if (OB_SUCC(ret)) { + share::ObLSID id(ls_id); + if (OB_FAIL(stmt->get_rpc_arg().init_remove(id, server_addr, paxos_replica_num, tenant_id))) { + LOG_WARN("init rpc arg failed", KR(ret), K(id), K(server_addr), + K(paxos_replica_num), K(tenant_id)); + } + } + } + FLOG_INFO("resolve remove replica parse tree over", KR(ret)); + return ret; +} + +int ObMigrateLSReplicaResolver::resolve(const ParseNode &parse_tree) +{ + int ret = OB_SUCCESS; + ObAlterLSReplicaStmt *stmt = NULL; + if (OB_UNLIKELY(T_MIGRATE_LS_REPLICA != parse_tree.type_)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("type is not T_MIGRATE_LS_REPLICA", KR(ret), "type", get_type_name(parse_tree.type_)); + } else if (OB_ISNULL(parse_tree.children_) || OB_UNLIKELY(parse_tree.num_child_ != 5)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("invalid argument", KR(ret), "type", get_type_name(parse_tree.type_), + "child_num", parse_tree.num_child_); + } else if (OB_ISNULL(parse_tree.children_[0]) + || OB_ISNULL(parse_tree.children_[1]) + || OB_ISNULL(parse_tree.children_[2])) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("invalid parse tree", KR(ret), KP(parse_tree.children_[0]), + KP(parse_tree.children_[1]), KP(parse_tree.children_[2])); + } else if (OB_ISNULL(stmt = create_stmt())) { + ret = OB_ALLOCATE_MEMORY_FAILED; + LOG_WARN("create ObAlterLSReplicaStmt failed", KR(ret)); + } else if (OB_ISNULL(session_info_) || OB_ISNULL(schema_checker_)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("session_info_ is null", KR(ret), KP(session_info_), KP(schema_checker_)); + } else { + stmt_ = stmt; + ParseNode *ls_id_node = parse_tree.children_[0]; + ParseNode *source_addr_node = parse_tree.children_[1]; + ParseNode *destination_addr_node = parse_tree.children_[2]; + ParseNode *data_source_node = parse_tree.children_[3]; + ParseNode *tenant_name_node = parse_tree.children_[4]; + int64_t ls_id = 0; + common::ObAddr source_addr; + common::ObAddr destination_addr; + common::ObAddr data_source; + uint64_t tenant_id = OB_INVALID_TENANT_ID; + if (OB_FAIL(Util::do_check_for_alter_ls_replica(tenant_name_node, + schema_checker_, + session_info_, + tenant_id))) { + LOG_WARN("do check for alter ls replica failed", KR(ret), + KP(tenant_name_node), KP(schema_checker_), KP(session_info_)); + } else if (OB_FAIL(Util::resolve_ls_id(ls_id_node, ls_id))) { + LOG_WARN("resolve ls id failed", KR(ret), KP(ls_id_node)); + } else if (OB_FAIL(Util::resolve_server_value(source_addr_node, source_addr))) { + LOG_WARN("resolve server failed", KR(ret), KP(source_addr_node)); + if (OB_INVALID_ARGUMENT == ret) { + LOG_USER_ERROR(OB_INVALID_ARGUMENT, "source"); + } + } else if (OB_FAIL(Util::resolve_server_value(destination_addr_node, destination_addr))) { + LOG_WARN("resolve server failed", KR(ret), KP(destination_addr_node)); + if (OB_INVALID_ARGUMENT == ret) { + LOG_USER_ERROR(OB_INVALID_ARGUMENT, "destination"); + } + } else if (OB_UNLIKELY(!source_addr.is_valid()) || OB_UNLIKELY(!destination_addr.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_USER_ERROR(OB_INVALID_ARGUMENT, "source or destination"); + LOG_WARN("source_addr or destination_addr is invalid", KR(ret), K(source_addr), K(destination_addr)); + } else if (OB_FAIL(Util::check_and_get_data_source(data_source_node, data_source))) { + LOG_WARN("check and get data source failed", KR(ret), KP(data_source_node)); + } + if (OB_SUCC(ret)) { + share::ObLSID id(ls_id); + if (OB_FAIL(stmt->get_rpc_arg().init_migrate(id, source_addr, destination_addr, + data_source, tenant_id))) { + LOG_WARN("init rpc arg failed", KR(ret), K(id), K(source_addr), K(destination_addr), + K(data_source), K(tenant_id)); + } + } + } + FLOG_INFO("resolve migrate replica parse tree over", KR(ret)); + return ret; +} + +int ObModifyLSReplicaResolver::resolve(const ParseNode &parse_tree) +{ + int ret = OB_SUCCESS; + ObAlterLSReplicaStmt *stmt = NULL; + if (OB_UNLIKELY(T_MODIFY_LS_REPLICA_TYPE != parse_tree.type_)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("type is not T_MODIFY_LS_REPLICA_TYPE", KR(ret), "type", get_type_name(parse_tree.type_)); + } else if (OB_ISNULL(parse_tree.children_) || OB_UNLIKELY(parse_tree.num_child_ != 5)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("invalid argument", KR(ret), "type", get_type_name(parse_tree.type_), + "child_num", parse_tree.num_child_); + } else if (OB_ISNULL(parse_tree.children_[0]) + || OB_ISNULL(parse_tree.children_[1]) + || OB_ISNULL(parse_tree.children_[2])) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("invalid parse tree", KR(ret), KP(parse_tree.children_[0]), + KP(parse_tree.children_[1]), KP(parse_tree.children_[2])); + } else if (OB_ISNULL(stmt = create_stmt())) { + ret = OB_ALLOCATE_MEMORY_FAILED; + LOG_WARN("create ObAlterLSReplicaStmt failed", KR(ret)); + } else if (OB_ISNULL(session_info_) || OB_ISNULL(schema_checker_)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("session_info_ is null", KR(ret), KP(session_info_), KP(schema_checker_)); + } else { + stmt_ = stmt; + ParseNode *ls_id_node = parse_tree.children_[0]; + ParseNode *server_addr_node = parse_tree.children_[1]; + ParseNode *replica_type_node = parse_tree.children_[2]; + ParseNode *paxos_replica_num_node = parse_tree.children_[3]; + ParseNode *tenant_name_node = parse_tree.children_[4]; + int64_t ls_id = 0; + common::ObAddr server_addr; + common::ObReplicaType replica_type = REPLICA_TYPE_MAX; + int64_t paxos_replica_num = 0; + uint64_t tenant_id = OB_INVALID_TENANT_ID; + if (OB_FAIL(Util::do_check_for_alter_ls_replica(tenant_name_node, + schema_checker_, + session_info_, + tenant_id))) { + LOG_WARN("do check for alter ls replica failed", KR(ret), + KP(tenant_name_node), KP(schema_checker_), KP(session_info_)); + } else if (OB_FAIL(Util::resolve_ls_id(ls_id_node, ls_id))) { + LOG_WARN("resolve ls id failed", KR(ret), KP(ls_id_node)); + } else if (OB_FAIL(Util::check_and_get_server_addr(server_addr_node, server_addr))) { + LOG_WARN("resolve server failed", KR(ret), KP(server_addr_node)); + } else if (OB_FAIL(Util::resolve_replica_type(replica_type_node, replica_type))) { + LOG_WARN("resolve replica type failed", KR(ret), KP(replica_type_node)); + } else if (OB_FAIL(Util::check_and_get_paxos_replica_num(paxos_replica_num_node, paxos_replica_num))) { + LOG_WARN("check and get paxos replica num failed", KR(ret), KP(paxos_replica_num_node)); + } + if (OB_SUCC(ret)) { + share::ObLSID id(ls_id); + if (OB_FAIL(stmt->get_rpc_arg().init_modify_replica(id, server_addr, replica_type, + paxos_replica_num, tenant_id))) { + LOG_WARN("init rpc arg failed", KR(ret), K(id), K(server_addr), + K(replica_type), K(paxos_replica_num), K(tenant_id)); + } + } + } + FLOG_INFO("resolve modify replica type parse tree over", KR(ret)); + return ret; +} + +int ObModifyLSPaxosReplicaNumResolver::resolve(const ParseNode &parse_tree) +{ + int ret = OB_SUCCESS; + ObAlterLSReplicaStmt *stmt = NULL; + if (OB_UNLIKELY(T_MODIFY_LS_PAXOS_REPLICA_NUM != parse_tree.type_)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("type is not T_MODIFY_LS_PAXOS_REPLICA_NUM", KR(ret), "type", get_type_name(parse_tree.type_)); + } else if (OB_ISNULL(parse_tree.children_) || OB_UNLIKELY(parse_tree.num_child_ != 3)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("invalid argument", KR(ret), "type", get_type_name(parse_tree.type_), + "child_num", parse_tree.num_child_); + } else if (OB_ISNULL(parse_tree.children_[0]) || OB_ISNULL(parse_tree.children_[1])) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("invalid parse tree", KR(ret), KP(parse_tree.children_[0]), + KP(parse_tree.children_[1])); + } else if (OB_ISNULL(stmt = create_stmt())) { + ret = OB_ALLOCATE_MEMORY_FAILED; + LOG_WARN("create ObAlterLSReplicaStmt failed", KR(ret)); + } else if (OB_ISNULL(session_info_) || OB_ISNULL(schema_checker_)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("session_info_ is null", KR(ret), KP(session_info_), KP(schema_checker_)); + } else { + stmt_ = stmt; + ParseNode *ls_id_node = parse_tree.children_[0]; + ParseNode *paxos_replica_num_node = parse_tree.children_[1]; + ParseNode *tenant_name_node = parse_tree.children_[2]; + int64_t ls_id = 0; + int64_t paxos_replica_num = 0; + uint64_t tenant_id = OB_INVALID_TENANT_ID; + if (OB_FAIL(Util::do_check_for_alter_ls_replica(tenant_name_node, + schema_checker_, + session_info_, + tenant_id))) { + LOG_WARN("do check for alter ls replica failed", KR(ret), + KP(tenant_name_node), KP(schema_checker_), KP(session_info_)); + } else if (OB_FAIL(Util::resolve_ls_id(ls_id_node, ls_id))) { + LOG_WARN("resolve ls id failed", KR(ret), KP(ls_id_node)); + } + if (OB_SUCC(ret)) { + paxos_replica_num = paxos_replica_num_node->value_; + if (paxos_replica_num <= 0) { + ret = OB_INVALID_ARGUMENT; + LOG_USER_ERROR(OB_INVALID_ARGUMENT, "paxos_replica_num which should not be less than or equal to 0"); + LOG_WARN("not allowed to set paxos_replica_num less than or equal to 0", KR(ret), K(paxos_replica_num)); + } + } + if (OB_SUCC(ret)) { + share::ObLSID id(ls_id); + if (OB_FAIL(stmt->get_rpc_arg().init_modify_paxos_replica_num(id, paxos_replica_num, tenant_id))) { + LOG_WARN("init rpc arg failed", KR(ret), K(id), K(paxos_replica_num), K(tenant_id)); + } + } + } + FLOG_INFO("resolve modify paxos_replica_num parse tree over", KR(ret)); + return ret; +} + +int ObCancelLSReplicaTaskResolver::resolve(const ParseNode &parse_tree) +{ + int ret = OB_SUCCESS; + ObAlterLSReplicaStmt *stmt = NULL; + if (OB_UNLIKELY(T_CANCEL_LS_REPLICA_TASK != parse_tree.type_)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("type is not T_CANCEL_LS_REPLICA_TASK", KR(ret), "type", get_type_name(parse_tree.type_)); + } else if (OB_ISNULL(parse_tree.children_) || OB_UNLIKELY(parse_tree.num_child_ != 2)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("invalid argument", KR(ret), "type", get_type_name(parse_tree.type_), + "child_num", parse_tree.num_child_); + } else if (OB_ISNULL(parse_tree.children_[0])) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("invalid parse tree", KR(ret), KP(parse_tree.children_[0])); + } else if (OB_ISNULL(stmt = create_stmt())) { + ret = OB_ALLOCATE_MEMORY_FAILED; + LOG_WARN("create ObAlterLSReplicaStmt failed", KR(ret)); + } else if (OB_ISNULL(session_info_) || OB_ISNULL(schema_checker_)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("session_info_ is null", KR(ret), KP(session_info_), KP(schema_checker_)); + } else { + stmt_ = stmt; + ParseNode *task_id_node = parse_tree.children_[0]; + ParseNode *tenant_name_node = parse_tree.children_[1]; + Task_Id task_id; + uint64_t tenant_id = OB_INVALID_TENANT_ID; + ObString task_id_str; + if (OB_FAIL(Util::do_check_for_alter_ls_replica(tenant_name_node, + schema_checker_, + session_info_, + tenant_id))) { + LOG_WARN("do check for alter ls replica failed", KR(ret), + KP(tenant_name_node), KP(schema_checker_), KP(session_info_)); + } else if (OB_FAIL(Util::resolve_string(task_id_node, task_id_str))) { + LOG_WARN("tenant name assign failed", KR(ret), KP(task_id_node)); + } else if (OB_FAIL(task_id.assign(task_id_str))) { + LOG_WARN("task id assign failed", KR(ret), K(task_id_str)); + } else if (OB_FAIL(stmt->get_rpc_arg().init_cancel(task_id, tenant_id))) { + LOG_WARN("init rpc arg failed", KR(ret), K(task_id), K(tenant_id)); + } + } + FLOG_INFO("resolve cancel parse tree over", KR(ret)); + return ret; +} + int ObAddArbitrationServiceResolver::resolve(const ParseNode &parse_tree) { int ret = OB_SUCCESS; diff --git a/src/sql/resolver/cmd/ob_alter_system_resolver.h b/src/sql/resolver/cmd/ob_alter_system_resolver.h index 18e23707a..682350bb1 100644 --- a/src/sql/resolver/cmd/ob_alter_system_resolver.h +++ b/src/sql/resolver/cmd/ob_alter_system_resolver.h @@ -65,8 +65,24 @@ public: bool &affect_all, bool &affect_all_user, bool &affect_all_meta); + static int get_and_verify_tenant_name(const ParseNode* tenant_name_node, + const uint64_t exec_tenant_id, + uint64_t &target_tenant_id); + static int check_and_get_data_source(const ParseNode* data_source_node, + common::ObAddr& data_source); + static int check_and_get_server_addr(const ParseNode* server_addr_node, + common::ObAddr& server_addr); + static int check_and_get_paxos_replica_num(const ParseNode* paxos_replica_num_node, + int64_t& paxos_replica_num); + static int check_compatibility_for_alter_ls_replica(const uint64_t cur_tenant_id); + static int do_check_for_alter_ls_replica(const ParseNode *tenant_name_node, + ObSchemaChecker *schema_checker, + ObSQLSessionInfo *session_info, + uint64_t &target_tenant_id); }; +typedef common::ObFixedLengthString Task_Id; + #define DEF_SIMPLE_CMD_RESOLVER(name) \ class name : public ObSystemCmdResolver \ { \ @@ -128,6 +144,13 @@ DEF_SIMPLE_CMD_RESOLVER(ObReplaceArbitrationServiceResolver); DEF_SIMPLE_CMD_RESOLVER(ObMigrateUnitResolver); +DEF_SIMPLE_CMD_RESOLVER(ObAddLSReplicaResolver); +DEF_SIMPLE_CMD_RESOLVER(ObRemoveLSReplicaResolver); +DEF_SIMPLE_CMD_RESOLVER(ObMigrateLSReplicaResolver); +DEF_SIMPLE_CMD_RESOLVER(ObModifyLSReplicaResolver); +DEF_SIMPLE_CMD_RESOLVER(ObModifyLSPaxosReplicaNumResolver); +DEF_SIMPLE_CMD_RESOLVER(ObCancelLSReplicaTaskResolver); + DEF_SIMPLE_CMD_RESOLVER(ObUpgradeVirtualSchemaResolver); DEF_SIMPLE_CMD_RESOLVER(ObRunJobResolver); diff --git a/src/sql/resolver/cmd/ob_alter_system_stmt.h b/src/sql/resolver/cmd/ob_alter_system_stmt.h index 35ff6d895..dd0a4add5 100644 --- a/src/sql/resolver/cmd/ob_alter_system_stmt.h +++ b/src/sql/resolver/cmd/ob_alter_system_stmt.h @@ -402,6 +402,18 @@ private: obrpc::ObAdminMigrateUnitArg rpc_arg_; }; +class ObAlterLSReplicaStmt : public ObSystemCmdStmt +{ +public: + ObAlterLSReplicaStmt() : ObSystemCmdStmt(stmt::T_ALTER_LS_REPLICA) {} + virtual ~ObAlterLSReplicaStmt() {} + + obrpc::ObAdminAlterLSReplicaArg &get_rpc_arg() { return rpc_arg_; } + TO_STRING_KV(N_STMT_TYPE, ((int)stmt_type_), K_(rpc_arg)); +private: + obrpc::ObAdminAlterLSReplicaArg rpc_arg_; +}; + class ObAddArbitrationServiceStmt : public ObSystemCmdStmt { public: diff --git a/src/sql/resolver/ob_resolver.cpp b/src/sql/resolver/ob_resolver.cpp index 1cad9426b..7eb90e741 100644 --- a/src/sql/resolver/ob_resolver.cpp +++ b/src/sql/resolver/ob_resolver.cpp @@ -533,6 +533,30 @@ int ObResolver::resolve(IsPrepared if_prepared, const ParseNode &parse_tree, ObS REGISTER_STMT_RESOLVER(MigrateUnit); break; } + case T_ADD_LS_REPLICA: { + REGISTER_STMT_RESOLVER(AddLSReplica); + break; + } + case T_REMOVE_LS_REPLICA: { + REGISTER_STMT_RESOLVER(RemoveLSReplica); + break; + } + case T_MIGRATE_LS_REPLICA: { + REGISTER_STMT_RESOLVER(MigrateLSReplica); + break; + } + case T_MODIFY_LS_REPLICA_TYPE: { + REGISTER_STMT_RESOLVER(ModifyLSReplica); + break; + } + case T_MODIFY_LS_PAXOS_REPLICA_NUM: { + REGISTER_STMT_RESOLVER(ModifyLSPaxosReplicaNum); + break; + } + case T_CANCEL_LS_REPLICA_TASK: { + REGISTER_STMT_RESOLVER(CancelLSReplicaTask); + break; + } case T_ADD_ARBITRATION_SERVICE: { REGISTER_STMT_RESOLVER(AddArbitrationService); break; diff --git a/src/sql/resolver/ob_stmt_type.h b/src/sql/resolver/ob_stmt_type.h index 81e751200..5a24fa63d 100644 --- a/src/sql/resolver/ob_stmt_type.h +++ b/src/sql/resolver/ob_stmt_type.h @@ -296,7 +296,7 @@ OB_STMT_TYPE_DEF_UNKNOWN_AT(T_CREATE_MLOG, get_create_mlog_stmt_need_privs, 295) OB_STMT_TYPE_DEF_UNKNOWN_AT(T_DROP_MLOG, get_drop_mlog_stmt_need_privs, 296) OB_STMT_TYPE_DEF_UNKNOWN_AT(T_TRANSFER_PARTITION, get_sys_tenant_alter_system_priv, 297) OB_STMT_TYPE_DEF_UNKNOWN_AT(T_FLUSH_PRIVILEGES, no_priv_needed, 298) -// OB_STMT_TYPE_DEF_UNKNOWN_AT(T_ALTER_LS_REPLICA, get_sys_tenant_alter_system_priv, 299) +OB_STMT_TYPE_DEF_UNKNOWN_AT(T_ALTER_LS_REPLICA, get_sys_tenant_alter_system_priv, 299) OB_STMT_TYPE_DEF_UNKNOWN_AT(T_SHOW_PROCEDURE_CODE, err_stmt_type_priv, 300) OB_STMT_TYPE_DEF_UNKNOWN_AT(T_SHOW_FUNCTION_CODE, err_stmt_type_priv, 301) OB_STMT_TYPE_DEF(T_CHANGE_EXTERNAL_STORAGE_DEST, no_priv_needed, 302, ACTION_TYPE_ALTER_SYSTEM) diff --git a/src/storage/high_availability/ob_ls_complete_migration.cpp b/src/storage/high_availability/ob_ls_complete_migration.cpp index a4fbb607f..75f3510da 100644 --- a/src/storage/high_availability/ob_ls_complete_migration.cpp +++ b/src/storage/high_availability/ob_ls_complete_migration.cpp @@ -1740,6 +1740,7 @@ int ObStartCompleteMigrationTask::check_tablet_ready_( ret = OB_INVALID_ARGUMENT; LOG_WARN("check tablet ready get invalid argument", K(ret), K(tablet_id), KP(ls)); } else { + DEBUG_SYNC(BEFORE_CHECK_TABLET_READY); const int64_t wait_tablet_start_ts = ObTimeUtility::current_time(); while (OB_SUCC(ret)) { @@ -1825,6 +1826,7 @@ int ObStartCompleteMigrationTask::check_tablet_transfer_table_ready_( ret = OB_ERR_UNEXPECTED; LOG_WARN("transfer service should not be NULL", K(ret), KP(transfer_service)); } else { + DEBUG_SYNC(BEFORE_CHECK_TABLET_TRANSFER_TABLE_READY); const int64_t wait_tablet_start_ts = ObTimeUtility::current_time(); bool need_check_again = false; @@ -1973,6 +1975,7 @@ int ObStartCompleteMigrationTask::wait_log_replay_to_max_minor_end_scn_() } else if (OB_FAIL(init_timeout_ctx_(timeout, timeout_ctx))) { LOG_WARN("failed to init timeout ctx", K(ret)); } else { + DEBUG_SYNC(BEFORE_LOG_REPLAY_TO_MAX_MINOR_END_SCN); const int64_t wait_replay_start_ts = ObTimeUtility::current_time(); while (OB_SUCC(ret)) { if (timeout_ctx.is_timeouted()) { @@ -2026,6 +2029,7 @@ int ObStartCompleteMigrationTask::check_ls_and_task_status_( bool is_cancel = false; bool is_ls_deleted = true; int32_t result = OB_SUCCESS; + ObIDagNet *dag_net = nullptr; if (OB_ISNULL(ls)) { ret = OB_INVALID_ARGUMENT; @@ -2036,11 +2040,15 @@ int ObStartCompleteMigrationTask::check_ls_and_task_status_( } else if (ls->is_stopped()) { ret = OB_NOT_RUNNING; LOG_WARN("ls is not running, stop migration dag net", K(ret), KPC(ctx_)); - } else if (OB_FAIL(SYS_TASK_STATUS_MGR.is_task_cancel(get_dag()->get_dag_id(), is_cancel))) { - STORAGE_LOG(ERROR, "failed to check is task canceled", K(ret), K(*this)); - } else if (is_cancel) { + } else if (OB_ISNULL(this->get_dag())) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("dag should not be nullptr", K(ret), KP(this->get_dag())); + } else if (OB_ISNULL(dag_net = this->get_dag()->get_dag_net())) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("dag net should not be nullptr", K(ret), KP(dag_net)); + } else if (dag_net->is_cancel()) { ret = OB_CANCELED; - STORAGE_LOG(WARN, "task is cancelled", K(ret), K(*this)); + LOG_WARN("task is cancelled", K(ret), K(*this)); } else if (OB_FAIL(ObStorageHAUtils::check_ls_deleted(ls->get_ls_id(), is_ls_deleted))) { LOG_WARN("failed to get ls status from inner table", K(ret)); } else if (is_ls_deleted) { diff --git a/src/storage/high_availability/ob_ls_migration.cpp b/src/storage/high_availability/ob_ls_migration.cpp index 1824399e5..b773059db 100644 --- a/src/storage/high_availability/ob_ls_migration.cpp +++ b/src/storage/high_availability/ob_ls_migration.cpp @@ -1544,6 +1544,7 @@ int ObStartMigrationTask::create_all_tablets_( ObLS *ls = nullptr; ObArray tablet_id_array; bool need_check_tablet_limit = false; + ObIDagNet *dag_net = nullptr; if (!is_inited_) { ret = OB_NOT_INIT; @@ -1551,6 +1552,12 @@ int ObStartMigrationTask::create_all_tablets_( } else if (OB_ISNULL(ob_reader)) { ret = OB_INVALID_ARGUMENT; LOG_WARN("create all tablets get ivnalid argument", K(ret)); + } else if (OB_ISNULL(this->get_dag())) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("dag should not be nullptr", K(ret), KP(this->get_dag())); + } else if (OB_ISNULL(dag_net = this->get_dag()->get_dag_net())) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("dag net should not be nullptr", K(ret), KP(dag_net)); } else if (FALSE_IT(need_check_tablet_limit = ctx_->arg_.type_ != ObMigrationOpType::REBUILD_LS_OP)) { } else if (OB_FAIL(ObStorageHADagUtils::get_ls(ctx_->arg_.ls_id_, ls_handle))) { LOG_WARN("failed to get ls", K(ret), KPC(ctx_)); @@ -1561,7 +1568,7 @@ int ObStartMigrationTask::create_all_tablets_( ctx_->tenant_id_, tablet_id_array, ctx_->minor_src_, ctx_->local_rebuild_seq_, ctx_->arg_.type_, ls, &ctx_->ha_table_info_mgr_, ha_tablets_builder))) { LOG_WARN("failed to init ha tablets builder", K(ret), KPC(ctx_)); - } else if (OB_FAIL(ha_tablets_builder.create_all_tablets(need_check_tablet_limit, ob_reader, + } else if (OB_FAIL(ha_tablets_builder.create_all_tablets(need_check_tablet_limit, ob_reader, dag_net, ctx_->sys_tablet_id_array_, ctx_->data_tablet_id_array_, ctx_->tablet_simple_info_map_))) { LOG_WARN("failed to create all tablets", K(ret), KPC(ctx_)); @@ -1641,10 +1648,17 @@ int ObStartMigrationTask::create_all_tablets_with_4_1_rpc_() ObLSHandle ls_handle; ObLS *ls = nullptr; ObArray tablet_id_array; + ObIDagNet *dag_net = nullptr; if (!is_inited_) { ret = OB_NOT_INIT; LOG_WARN("start migration task do not init", K(ret)); + } else if (OB_ISNULL(this->get_dag())) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("dag should not be nullptr", K(ret), KP(this->get_dag())); + } else if (OB_ISNULL(dag_net = this->get_dag()->get_dag_net())) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("dag net should not be nullptr", K(ret), KP(dag_net)); } else if (OB_FAIL(ObStorageHAUtils::append_tablet_list(ctx_->sys_tablet_id_array_, tablet_id_array))) { LOG_WARN("failed to append sys tablet id array", K(ret), KPC(ctx_)); } else if (OB_FAIL(ObStorageHAUtils::append_tablet_list(ctx_->data_tablet_id_array_, tablet_id_array))) { @@ -1658,7 +1672,7 @@ int ObStartMigrationTask::create_all_tablets_with_4_1_rpc_() ctx_->tenant_id_, tablet_id_array, ctx_->minor_src_, ctx_->local_rebuild_seq_, ctx_->arg_.type_, ls, &ctx_->ha_table_info_mgr_, ha_tablets_builder))) { LOG_WARN("failed to init ha tablets builder", K(ret), KPC(ctx_)); - } else if (OB_FAIL(ha_tablets_builder.create_all_tablets_with_4_1_rpc( + } else if (OB_FAIL(ha_tablets_builder.create_all_tablets_with_4_1_rpc(dag_net, ctx_->tablet_simple_info_map_, ctx_->sys_tablet_id_array_, ctx_->data_tablet_id_array_))) { LOG_WARN("failed to create all tablets", K(ret), KPC(ctx_)); } @@ -1895,12 +1909,20 @@ int ObSysTabletsMigrationTask::process() int ObSysTabletsMigrationTask::build_tablets_sstable_info_() { int ret = OB_SUCCESS; + ObIDagNet *dag_net = nullptr; if (!is_inited_) { ret = OB_NOT_INIT; LOG_WARN("sys tablets migration task do not init", K(ret), KPC(ctx_)); - } else if (OB_FAIL(ha_tablets_builder_.build_tablets_sstable_info())) { + } else if (OB_ISNULL(this->get_dag())) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("dag should not be nullptr", K(ret), KP(this->get_dag())); + } else if (OB_ISNULL(dag_net = this->get_dag()->get_dag_net())) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("dag net should not be nullptr", K(ret), KP(dag_net)); + } else if (OB_FAIL(ha_tablets_builder_.build_tablets_sstable_info(dag_net))) { LOG_WARN("failed to build tablets sstable info", K(ret), KPC(ctx_)); } + return ret; } @@ -2904,6 +2926,7 @@ int ObTabletMigrationTask::try_update_tablet_() ObLS *ls = nullptr; bool is_exist = false; ObCopyTabletStatus::STATUS status = ObCopyTabletStatus::MAX_STATUS; + ObIDagNet *dag_net = nullptr; if (!is_inited_) { ret = OB_NOT_INIT; @@ -2937,9 +2960,12 @@ int ObTabletMigrationTask::try_update_tablet_() } if (OB_FAIL(ret)) { - } else if (copy_tablet_ctx_->tablet_id_.is_ls_inner_tablet() && OB_FAIL(ha_tablets_builder.create_or_update_tablets())) { + } else if (OB_ISNULL(dag_net = dag->get_dag_net())) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("dag net should not be nullptr", K(ret), KP(dag_net)); + } else if (copy_tablet_ctx_->tablet_id_.is_ls_inner_tablet() && OB_FAIL(ha_tablets_builder.create_or_update_tablets(dag_net))) { LOG_WARN("failed to create or update inner tablet", K(ret), KPC(ctx_)); - } else if (OB_FAIL(ha_tablets_builder.build_tablets_sstable_info())) { + } else if (OB_FAIL(ha_tablets_builder.build_tablets_sstable_info(dag_net))) { LOG_WARN("failed to build tablets sstable info", K(ret), KPC(ctx_), KPC(copy_tablet_ctx_)); } else if (OB_FAIL(ctx_->ha_table_info_mgr_.check_tablet_table_info_exist(copy_tablet_ctx_->tablet_id_, is_exist))) { LOG_WARN("failed to check tablet table info exist", K(ret), KPC(copy_tablet_ctx_)); @@ -3745,6 +3771,7 @@ int ObDataTabletsMigrationTask::try_remove_unneeded_tablets_() ObArray tablet_id_array; const int64_t MAX_BUCKET_NUM = 1024; const bool need_initial_state = true; + ObIDagNet *dag_net = nullptr; if (!is_inited_) { ret = OB_NOT_INIT; @@ -3780,7 +3807,16 @@ int ObDataTabletsMigrationTask::try_remove_unneeded_tablets_() LOG_WARN("failed to build tablet iter", K(ret), KPC(ctx_)); } else { while (OB_SUCC(ret)) { - if (OB_FAIL(iter.get_next_tablet_id(tablet_id))) { + if (OB_ISNULL(this->get_dag())) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("dag should not be nullptr", K(ret), KP(this->get_dag())); + } else if (OB_ISNULL(dag_net = this->get_dag()->get_dag_net())) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("dag net should not be nullptr", K(ret), KP(dag_net)); + } else if (dag_net->is_cancel()) { + ret = OB_CANCELED; + LOG_WARN("task is cancelled", K(ret), K(*this)); + } else if (OB_FAIL(iter.get_next_tablet_id(tablet_id))) { if (OB_ITER_END == ret) { ret = OB_SUCCESS; break; @@ -4301,6 +4337,7 @@ int ObTabletGroupMigrationTask::build_tablets_sstable_info_() { int ret = OB_SUCCESS; bool has_inner_table = false; + ObIDagNet *dag_net = nullptr; ObArray tablet_id_array; if (!is_inited_) { @@ -4315,7 +4352,13 @@ int ObTabletGroupMigrationTask::build_tablets_sstable_info_() DEBUG_SYNC(BEFORE_MIGRATION_BUILD_TABLET_SSTABLE_INFO); } - if (OB_FAIL(ha_tablets_builder_.build_tablets_sstable_info())) { + if (OB_ISNULL(this->get_dag())) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("dag should not be nullptr", K(ret), KP(this->get_dag())); + } else if (OB_ISNULL(dag_net = this->get_dag()->get_dag_net())) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("dag net should not be nullptr", K(ret), KP(dag_net)); + } else if (OB_FAIL(ha_tablets_builder_.build_tablets_sstable_info(dag_net))) { LOG_WARN("failed to build tablets sstable info", K(ret), KPC(ctx_)); } } diff --git a/src/storage/high_availability/ob_ls_migration_handler.cpp b/src/storage/high_availability/ob_ls_migration_handler.cpp index d4a8ef690..ec3dd6a64 100644 --- a/src/storage/high_availability/ob_ls_migration_handler.cpp +++ b/src/storage/high_availability/ob_ls_migration_handler.cpp @@ -44,7 +44,7 @@ int ObLSMigrationHandlerStatusHelper::check_can_change_status( if (!is_valid(curr_status) || !is_valid(change_status)) { ret = OB_INVALID_ARGUMENT; LOG_WARN("check can change status get invalid argument", K(ret), K(curr_status), K(change_status)); - }else { + } else { switch (curr_status) { case ObLSMigrationHandlerStatus::INIT: { if (ObLSMigrationHandlerStatus::INIT == change_status @@ -164,7 +164,8 @@ ObLSMigrationHandler::ObLSMigrationHandler() lock_(), status_(ObLSMigrationHandlerStatus::INIT), result_(OB_SUCCESS), - is_stop_(false) + is_stop_(false), + is_cancel_(false) { } @@ -297,6 +298,7 @@ void ObLSMigrationHandler::reuse_() task_list_.reset(); status_ = ObLSMigrationHandlerStatus::INIT; result_ = OB_SUCCESS; + is_cancel_ = false; } void ObLSMigrationHandler::wakeup_() @@ -320,14 +322,8 @@ int ObLSMigrationHandler::get_ls_migration_task_(ObLSMigrationTask &task) LOG_WARN("ls migration handler do not init", K(ret)); } else { common::SpinRLockGuard guard(lock_); - if (task_list_.empty()) { - ret = OB_ENTRY_NOT_EXIST; - LOG_WARN("migration task is empty", K(ret), KPC(ls_)); - } else if (task_list_.count() > 1) { - ret = OB_ERR_UNEXPECTED; - LOG_WARN("ls migration task count should not more than 1", K(ret), K(task_list_), KPC(ls_)); - } else { - task = task_list_.at(0); + if (OB_FAIL(get_ls_migration_task_with_nolock_(task))) { + LOG_WARN("failed to get ls migration task", K(ret)); } } return ret; @@ -419,30 +415,15 @@ int ObLSMigrationHandler::add_ls_migration_task( int ObLSMigrationHandler::switch_next_stage(const int32_t result) { int ret = OB_SUCCESS; - ObLSMigrationHandlerStatus next_status = ObLSMigrationHandlerStatus::MAX_STATUS; - bool can_change = false; - int32_t new_result = OB_SUCCESS; if (!is_inited_) { ret = OB_NOT_INIT; LOG_WARN("ls migration handler do not init", K(ret)); } else { common::SpinWLockGuard guard(lock_); - new_result = OB_SUCCESS != result_ ? result_ : result; - - if (OB_FAIL(ObLSMigrationHandlerStatusHelper::get_next_change_status(status_, new_result, next_status))) { - LOG_WARN("failed to get next change status", K(ret), K(status_), K(result), K(new_result)); - } else if (OB_FAIL(ObLSMigrationHandlerStatusHelper::check_can_change_status(status_, next_status, can_change))) { - LOG_WARN("failed to check can change status", K(ret), K(status_), K(next_status)); - } else if (!can_change) { - ret = OB_ERR_UNEXPECTED; - LOG_WARN("can not change ls migration handler status", K(ret), K(status_), K(next_status)); - } else { - FLOG_INFO("report result", K(result), K(new_result), K(result_), K(status_), K(next_status)); - result_ = new_result; - status_ = next_status; + if (OB_FAIL(switch_next_stage_with_nolock_(result))) { + LOG_WARN("failed to switch next stage", K(ret), K(result)); } - wakeup_(); } return ret; } @@ -541,6 +522,43 @@ int ObLSMigrationHandler::process() return ret; } +int ObLSMigrationHandler::cancel_task(const share::ObTaskId &task_id, bool &is_exist) +{ + int ret = OB_SUCCESS; + ObTenantDagScheduler *scheduler = nullptr; + is_exist = false; + if (IS_NOT_INIT) { + ret = OB_NOT_INIT; + LOG_WARN("ls migration handle do not init", K(ret)); + } else if (!task_id.is_valid()) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", K(ret), K(task_id)); + } else { + common::SpinWLockGuard guard(lock_); + if (OB_FAIL(check_task_exist_with_nolock_(task_id, is_exist))) { + LOG_WARN("fail to check task exist", K(ret), K(task_id)); + } else if (!is_exist) { + LOG_INFO("task is not exist in migration task", K(task_id)); + } else if (OB_ISNULL(scheduler = MTL(ObTenantDagScheduler*))) { + ret = OB_ERR_UNEXPECTED; + LOG_ERROR("failed to get ObTenantDagScheduler from MTL", K(ret)); + } + // If task not exist, cancel_dag_net return OB_SUCCESS + else if (OB_FAIL(scheduler->cancel_dag_net(task_id))) { + LOG_WARN("failed to cancel dag net", K(ret), K(this), K(task_id)); + } else { + is_cancel_ = true; + } + } + return ret; +} + +bool ObLSMigrationHandler::is_cancel() const +{ + common::SpinRLockGuard guard(lock_); + return is_cancel_; +} + int ObLSMigrationHandler::do_init_status_() { int ret = OB_SUCCESS; @@ -555,6 +573,9 @@ int ObLSMigrationHandler::do_init_status_() LOG_WARN("ls migration handler do not init", K(ret)); } else if (is_migration_failed_()) { //do nothing + } else if (is_cancel()) { + ret = OB_CANCELED; + LOG_WARN("task is canceled", K(ret)); } else { // this lock make sure the ls creating is not scheduled to migrate. ObLSLockGuard lock_ls(ls_, true /* read lock */); @@ -740,23 +761,39 @@ int ObLSMigrationHandler::schedule_build_ls_dag_net_( ret = OB_INVALID_ARGUMENT; LOG_WARN("schedule build ls dag net get invalid argument", K(ret), K(task)); } else { + const int32_t cancel_result = OB_CANCELED; +#ifdef ERRSIM + SERVER_EVENT_ADD("storage_ha", "build_ls_migration_dag_net_before", + "tenant_id", ls_->get_tenant_id(), + "ls_id", ls_->get_ls_id().id(), + "src", task.arg_.src_.get_server(), + "dst", task.arg_.dst_.get_server(), + "task_id", task.task_id_); +#endif DEBUG_SYNC(BEFORE_BUILD_LS_MIGRATION_DAG_NET); - ObTenantDagScheduler *scheduler = nullptr; - ObMigrationDagNetInitParam param; - param.arg_ = task.arg_; - param.task_id_ = task.task_id_; - param.bandwidth_throttle_ = bandwidth_throttle_; - param.storage_rpc_ = storage_rpc_; - param.svr_rpc_proxy_ = svr_rpc_proxy_; - param.sql_proxy_ = sql_proxy_; - - if (OB_ISNULL(scheduler = MTL(ObTenantDagScheduler*))) { - ret = OB_ERR_UNEXPECTED; - LOG_WARN("failed to get ObTenantDagScheduler from MTL", K(ret), KP(scheduler)); - } else if (OB_FAIL(scheduler->create_and_add_dag_net(¶m))) { - LOG_WARN("failed to create and add migration dag net", K(ret), K(task), KPC(ls_)); + common::SpinWLockGuard guard(lock_); + if (is_cancel_) { + if (OB_FAIL(switch_next_stage_with_nolock_(cancel_result))) { + LOG_WARN("failed to swicth next stage cancel", K(ret)); + } } else { - LOG_INFO("success to create migration dag net", K(ret), K(task)); + ObTenantDagScheduler *scheduler = nullptr; + ObMigrationDagNetInitParam param; + param.arg_ = task.arg_; + param.task_id_ = task.task_id_; + param.bandwidth_throttle_ = bandwidth_throttle_; + param.storage_rpc_ = storage_rpc_; + param.svr_rpc_proxy_ = svr_rpc_proxy_; + param.sql_proxy_ = sql_proxy_; + + if (OB_ISNULL(scheduler = MTL(ObTenantDagScheduler*))) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("failed to get ObTenantDagScheduler from MTL", K(ret), KP(scheduler)); + } else if (OB_FAIL(scheduler->create_and_add_dag_net(¶m))) { + LOG_WARN("failed to create and add migration dag net", K(ret), K(task), KPC(ls_)); + } else { + LOG_INFO("success to create migration dag net", K(ret), K(task)); + } } } return ret; @@ -792,18 +829,26 @@ int ObLSMigrationHandler::schedule_prepare_ls_dag_net_( ret = OB_INVALID_ARGUMENT; LOG_WARN("schedule prepare ls dag net get invalid argument", K(ret), K(task)); } else { - ObTenantDagScheduler *scheduler = nullptr; - ObLSPrepareMigrationParam param; - param.arg_ = task.arg_; - param.task_id_ = task.task_id_; - - if (OB_ISNULL(scheduler = MTL(ObTenantDagScheduler*))) { - ret = OB_ERR_UNEXPECTED; - LOG_WARN("failed to get ObTenantDagScheduler from MTL", K(ret), KP(scheduler)); - } else if (OB_FAIL(scheduler->create_and_add_dag_net(¶m))) { - LOG_WARN("failed to create and add migration dag net", K(ret), K(task), KPC(ls_)); + const int32_t cancel_result = OB_CANCELED; + common::SpinWLockGuard guard(lock_); + if (is_cancel_) { + if (OB_FAIL(switch_next_stage_with_nolock_(cancel_result))) { + LOG_WARN("failed to swicth next stage cancel", K(ret)); + } } else { - LOG_INFO("success to create ls prepare migration dag net", K(ret), K(task)); + ObTenantDagScheduler *scheduler = nullptr; + ObLSPrepareMigrationParam param; + param.arg_ = task.arg_; + param.task_id_ = task.task_id_; + + if (OB_ISNULL(scheduler = MTL(ObTenantDagScheduler*))) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("failed to get ObTenantDagScheduler from MTL", K(ret), KP(scheduler)); + } else if (OB_FAIL(scheduler->create_and_add_dag_net(¶m))) { + LOG_WARN("failed to create and add migration dag net", K(ret), K(task), KPC(ls_)); + } else { + LOG_INFO("success to create ls prepare migration dag net", K(ret), K(task)); + } } } return ret; @@ -1171,7 +1216,69 @@ void ObLSMigrationHandler::wait(bool &wait_finished) } } +int ObLSMigrationHandler::get_ls_migration_task_with_nolock_(ObLSMigrationTask &task) const +{ + int ret = OB_SUCCESS; + task.reset(); + if (task_list_.empty()) { + ret = OB_ENTRY_NOT_EXIST; + LOG_WARN("migration task is empty", K(ret), KPC(ls_)); + } else if (task_list_.count() > 1) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("ls migration task count should not more than 1", K(ret), K(task_list_), KPC(ls_)); + } else { + task = task_list_.at(0); + } + return ret; +} +int ObLSMigrationHandler::check_task_exist_with_nolock_(const share::ObTaskId &task_id, bool &is_exist) const +{ + int ret = OB_SUCCESS; + is_exist = false; + ObLSMigrationTask task; + if (task_id.is_invalid()) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", K(ret), K(task_id)); + } else if (OB_FAIL(get_ls_migration_task_with_nolock_(task))) { + if (OB_ENTRY_NOT_EXIST == ret) { + is_exist = false; + ret = OB_SUCCESS; + } else { + LOG_WARN("failed to get ls migration task", K(ret), KPC(ls_)); + } + } else if (task_id == task.task_id_) { + is_exist = true; + } else { + is_exist = false; + } + return ret; +} + +int ObLSMigrationHandler::switch_next_stage_with_nolock_(const int32_t result) +{ + int ret = OB_SUCCESS; + ObLSMigrationHandlerStatus next_status = ObLSMigrationHandlerStatus::MAX_STATUS; + bool can_change = false; + int32_t new_result = OB_SUCCESS; + + new_result = OB_SUCCESS != result_ ? result_ : result; + + if (OB_FAIL(ObLSMigrationHandlerStatusHelper::get_next_change_status(status_, new_result, next_status))) { + LOG_WARN("failed to get next change status", K(ret), K(status_), K(result), K(new_result)); + } else if (OB_FAIL(ObLSMigrationHandlerStatusHelper::check_can_change_status(status_, next_status, can_change))) { + LOG_WARN("failed to check can change status", K(ret), K(status_), K(next_status)); + } else if (!can_change) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("can not change ls migration handler status", K(ret), K(status_), K(next_status)); + } else { + FLOG_INFO("report result", K(result), K(new_result), K(result_), K(status_), K(next_status)); + result_ = new_result; + status_ = next_status; + } + wakeup_(); + return ret; +} } } diff --git a/src/storage/high_availability/ob_ls_migration_handler.h b/src/storage/high_availability/ob_ls_migration_handler.h index 172dab351..0344c8388 100644 --- a/src/storage/high_availability/ob_ls_migration_handler.h +++ b/src/storage/high_availability/ob_ls_migration_handler.h @@ -84,6 +84,8 @@ public: void destroy(); void stop(); void wait(bool &wait_finished); + int cancel_task(const share::ObTaskId &task_id, bool &is_exist); + bool is_cancel() const; private: void reuse_(); @@ -130,6 +132,9 @@ private: const uint64_t tenant_id, const share::ObLSID &ls_id, share::ObLSInfo &ls_info); + int get_ls_migration_task_with_nolock_(ObLSMigrationTask &task) const; + int check_task_exist_with_nolock_(const share::ObTaskId &task_id, bool &is_exist) const; + int switch_next_stage_with_nolock_(const int32_t result); private: bool is_inited_; @@ -146,6 +151,7 @@ private: ObLSMigrationHandlerStatus status_; int32_t result_; bool is_stop_; + bool is_cancel_; DISALLOW_COPY_AND_ASSIGN(ObLSMigrationHandler); }; diff --git a/src/storage/high_availability/ob_ls_prepare_migration.cpp b/src/storage/high_availability/ob_ls_prepare_migration.cpp index 9042092cf..8e977835a 100644 --- a/src/storage/high_availability/ob_ls_prepare_migration.cpp +++ b/src/storage/high_availability/ob_ls_prepare_migration.cpp @@ -828,11 +828,22 @@ int ObStartPrepareMigrationTask::wait_transfer_tablets_ready_() } else if (OB_FAIL(ls->build_tablet_iter(tablet_iterator))) { LOG_WARN("failed to build ls tablet iter", K(ret), KPC(ctx_)); } else { + DEBUG_SYNC(BEFORE_WAIT_TRANSFER_OUT_TABLET_READY); + ObIDagNet *dag_net = nullptr; while (OB_SUCC(ret)) { ObTabletHandle tablet_handle; ObTablet *tablet = nullptr; user_data.reset(); - if (OB_FAIL(tablet_iterator.get_next_tablet(tablet_handle))) { + if (OB_ISNULL(this->get_dag())) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("dag should not be nullptr", K(ret), KP(this->get_dag())); + } else if (OB_ISNULL(dag_net = this->get_dag()->get_dag_net())) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("dag net should not be nullptr", K(ret), KP(dag_net)); + } else if (dag_net->is_cancel()) { + ret = OB_CANCELED; + LOG_WARN("task is cancelled", K(ret), K(*this)); + } else if (OB_FAIL(tablet_iterator.get_next_tablet(tablet_handle))) { if (OB_ITER_END == ret) { ret = OB_SUCCESS; break; @@ -897,6 +908,7 @@ int ObStartPrepareMigrationTask::wait_transfer_out_tablet_ready_( LOG_WARN("tablet status is unexpected", K(ret), K(user_data), KPC(tablet)); } else { const int64_t wait_transfer_tablet_ready_ts = ObTimeUtility::current_time(); + ObIDagNet *dag_net = nullptr; while (OB_SUCC(ret)) { if (ctx_->is_failed()) { ret = OB_CANCELED; @@ -904,11 +916,15 @@ int ObStartPrepareMigrationTask::wait_transfer_out_tablet_ready_( } else if (ls->is_stopped()) { ret = OB_NOT_RUNNING; LOG_WARN("ls is not running, stop migration dag net", K(ret), K(ctx_)); - } else if (OB_FAIL(SYS_TASK_STATUS_MGR.is_task_cancel(get_dag()->get_dag_id(), is_cancel))) { - STORAGE_LOG(ERROR, "failed to check is task canceled", K(ret), K(*this)); - } else if (is_cancel) { + } else if (OB_ISNULL(this->get_dag())) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("dag should not be nullptr", K(ret), KP(this->get_dag())); + } else if (OB_ISNULL(dag_net = this->get_dag()->get_dag_net())) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("dag net should not be nullptr", K(ret), KP(dag_net)); + } else if (dag_net->is_cancel()) { ret = OB_CANCELED; - STORAGE_LOG(WARN, "task is cancelled", K(ret), K(*this)); + LOG_WARN("task is cancelled", K(ret), K(*this)); } else if (OB_FAIL(ObStorageHADagUtils::get_ls(user_data.transfer_ls_id_, dest_ls_handle))) { if (OB_LS_NOT_EXIST == ret) { ret = OB_SUCCESS; diff --git a/src/storage/high_availability/ob_ls_restore.cpp b/src/storage/high_availability/ob_ls_restore.cpp index fcb3c97dc..1225c448b 100644 --- a/src/storage/high_availability/ob_ls_restore.cpp +++ b/src/storage/high_availability/ob_ls_restore.cpp @@ -1353,10 +1353,17 @@ int ObSysTabletsRestoreTask::process() int ObSysTabletsRestoreTask::create_or_update_tablets_() { int ret = OB_SUCCESS; + ObIDagNet *dag_net = nullptr; if (!is_inited_) { ret = OB_NOT_INIT; LOG_WARN("sys tablets restore task do not init", K(ret)); - } else if (OB_FAIL(ha_tablets_builder_.create_or_update_tablets())) { + } else if (OB_ISNULL(this->get_dag())) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("dag should not be nullptr", K(ret), KP(this->get_dag())); + } else if (OB_ISNULL(dag_net = this->get_dag()->get_dag_net())) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("dag net should not be nullptr", K(ret), KP(dag_net)); + } else if (OB_FAIL(ha_tablets_builder_.create_or_update_tablets(dag_net))) { LOG_WARN("failed to create or update tablets", K(ret), KPC(ctx_)); } return ret; @@ -1365,11 +1372,17 @@ int ObSysTabletsRestoreTask::create_or_update_tablets_() int ObSysTabletsRestoreTask::build_tablets_sstable_info_() { int ret = OB_SUCCESS; - + ObIDagNet *dag_net = nullptr; if (!is_inited_) { ret = OB_NOT_INIT; LOG_WARN("sys tablets restore task do not init", K(ret), KPC(ctx_)); - } else if (OB_FAIL(ha_tablets_builder_.build_tablets_sstable_info())) { + } else if (OB_ISNULL(this->get_dag())) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("dag should not be nullptr", K(ret), KP(this->get_dag())); + } else if (OB_ISNULL(dag_net = this->get_dag()->get_dag_net())) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("dag net should not be nullptr", K(ret), KP(dag_net)); + } else if (OB_FAIL(ha_tablets_builder_.build_tablets_sstable_info(dag_net))) { LOG_WARN("failed to build tablets sstable info", K(ret), KPC(ctx_)); } return ret; diff --git a/src/storage/high_availability/ob_storage_ha_dag.cpp b/src/storage/high_availability/ob_storage_ha_dag.cpp index 01d5252ce..2a86eeb42 100644 --- a/src/storage/high_availability/ob_storage_ha_dag.cpp +++ b/src/storage/high_availability/ob_storage_ha_dag.cpp @@ -823,7 +823,45 @@ int ObStorageHATaskUtils::check_ddl_sstable_need_copy_( return ret; } +int ObStorageHACancelDagNetUtils::cancel_task(const share::ObLSID &ls_id, const share::ObTaskId &task_id) +{ + int ret = OB_SUCCESS; + ObLSHandle ls_handle; + if (!ls_id.is_valid() || !task_id.is_valid()) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", K(ret), K(task_id), K(task_id)); + } else if (OB_FAIL(ObStorageHADagUtils::get_ls(ls_id, ls_handle))) { + LOG_WARN("failed to get ls", K(ret), K(ls_id)); + } else { + bool is_exist = false; + if (OB_FAIL(cancel_migration_task_(task_id, ls_handle, is_exist))) { + LOG_WARN("failed to cancel migration task.", K(ret), K(ls_id), K(task_id), K(ls_handle)); + } else if (is_exist) { + } else { + ret = OB_ENTRY_NOT_EXIST; + LOG_WARN("task is not exist", K(ret), K(ls_id), K(task_id)); + } + } + return ret; +} +int ObStorageHACancelDagNetUtils::cancel_migration_task_(const share::ObTaskId &task_id, + const ObLSHandle &ls_handle, bool &is_exist) +{ + int ret = OB_SUCCESS; + ObLS *ls = nullptr; + is_exist = false; + if (!task_id.is_valid() || !ls_handle.is_valid()) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", K(ret), K(task_id), K(ls_handle)); + } else if (OB_ISNULL(ls = ls_handle.get_ls())) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("log stream should not be nullptr", K(ret), KP(ls)); + } else if (OB_FAIL(ls->get_ls_migration_handler()->cancel_task(task_id, is_exist))) { + LOG_WARN("failed to cancel migration task", K(ret), K(task_id), K(ls_handle)); + } + return ret; +} } } diff --git a/src/storage/high_availability/ob_storage_ha_dag.h b/src/storage/high_availability/ob_storage_ha_dag.h index 7eb156184..75397dd3f 100644 --- a/src/storage/high_availability/ob_storage_ha_dag.h +++ b/src/storage/high_availability/ob_storage_ha_dag.h @@ -197,7 +197,13 @@ private: }; - +class ObStorageHACancelDagNetUtils +{ +public: + static int cancel_task(const share::ObLSID &ls_id, const share::ObTaskId &task_id); +private: + static int cancel_migration_task_(const share::ObTaskId &task_id, const ObLSHandle &ls_handle, bool &is_exist); +}; } } #endif diff --git a/src/storage/high_availability/ob_storage_ha_tablet_builder.cpp b/src/storage/high_availability/ob_storage_ha_tablet_builder.cpp index aa225c19c..578e84222 100644 --- a/src/storage/high_availability/ob_storage_ha_tablet_builder.cpp +++ b/src/storage/high_availability/ob_storage_ha_tablet_builder.cpp @@ -148,7 +148,7 @@ int ObStorageHATabletsBuilder::init(const ObStorageHATabletsBuilderParam ¶m) return ret; } -int ObStorageHATabletsBuilder::create_or_update_tablets() +int ObStorageHATabletsBuilder::create_or_update_tablets(ObIDagNet *dag_net) { int ret = OB_SUCCESS; ObLS *ls = nullptr; @@ -160,6 +160,9 @@ int ObStorageHATabletsBuilder::create_or_update_tablets() if (!is_inited_) { ret = OB_NOT_INIT; LOG_WARN("storage ha tablets builder do not init", K(ret)); + } else if (OB_ISNULL(dag_net)) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argumnet", K(ret), KP(dag_net)); } else if (OB_ISNULL(ls = param_.ls_)) { ret = OB_ERR_UNEXPECTED; LOG_WARN("log stream should not be NULL", K(ret), KP(ls), K(param_)); @@ -168,7 +171,13 @@ int ObStorageHATabletsBuilder::create_or_update_tablets() } else { while (OB_SUCC(ret)) { tablet_info.reset(); - if (OB_FAIL(reader->fetch_tablet_info(tablet_info))) { + if (OB_ISNULL(dag_net)) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argumnet", K(ret), KP(dag_net)); + } else if (dag_net->is_cancel()) { + ret = OB_CANCELED; + LOG_WARN("task is cancelled", K(ret)); + } else if (OB_FAIL(reader->fetch_tablet_info(tablet_info))) { if (OB_ITER_END == ret) { ret = OB_SUCCESS; break; @@ -202,6 +211,7 @@ int ObStorageHATabletsBuilder::create_or_update_tablets() int ObStorageHATabletsBuilder::create_all_tablets( const bool need_check_tablet_limit, ObICopyLSViewInfoReader *reader, + ObIDagNet *dag_net, common::ObIArray &sys_tablet_id_list, common::ObIArray &data_tablet_id_list, CopyTabletSimpleInfoMap &simple_info_map) @@ -218,9 +228,9 @@ int ObStorageHATabletsBuilder::create_all_tablets( if (!is_inited_) { ret = OB_NOT_INIT; LOG_WARN("storage ha tablets builder do not init", K(ret)); - } else if (OB_ISNULL(reader)) { + } else if (OB_ISNULL(reader) || OB_ISNULL(dag_net)) { ret = OB_INVALID_ARGUMENT; - LOG_WARN("create all tablets get invalid argument", K(ret), KP(reader)); + LOG_WARN("create all tablets get invalid argument", K(ret), KP(reader), KP(dag_net)); } else if (OB_ISNULL(ls = param_.ls_)) { ret = OB_ERR_UNEXPECTED; LOG_WARN("log stream should not be NULL", K(ret), KP(ls), K(param_)); @@ -231,7 +241,13 @@ int ObStorageHATabletsBuilder::create_all_tablets( tablet_info.reset(); tablet_simple_info.reset(); logic_tablet_id.reset(); - if (OB_FAIL(reader->get_next_tablet_info(tablet_info))) { + if (OB_ISNULL(dag_net)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("dag net should not be nullptr", K(ret), KP(dag_net)); + } else if (dag_net->is_cancel()) { + ret = OB_CANCELED; + LOG_WARN("task is cancelled", K(ret)); + } else if (OB_FAIL(reader->get_next_tablet_info(tablet_info))) { if (OB_ITER_END == ret) { ret = OB_SUCCESS; break; @@ -280,6 +296,7 @@ int ObStorageHATabletsBuilder::create_all_tablets( } int ObStorageHATabletsBuilder::create_all_tablets_with_4_1_rpc( + ObIDagNet *dag_net, CopyTabletSimpleInfoMap &simple_info_map, common::ObIArray &sys_tablet_id_list, common::ObIArray &data_tablet_id_list) @@ -298,6 +315,9 @@ int ObStorageHATabletsBuilder::create_all_tablets_with_4_1_rpc( if (!is_inited_) { ret = OB_NOT_INIT; LOG_WARN("storage ha tablets builder do not init", K(ret)); + } else if (OB_ISNULL(dag_net)) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("create all tablets get invalid argument", K(ret), KP(dag_net)); } else if (OB_ISNULL(ls = param_.ls_)) { ret = OB_ERR_UNEXPECTED; LOG_WARN("log stream should not be NULL", K(ret), KP(ls), K(param_)); @@ -307,7 +327,13 @@ int ObStorageHATabletsBuilder::create_all_tablets_with_4_1_rpc( while (OB_SUCC(ret)) { tablet_info.reset(); logic_tablet_id.reset(); - if (OB_FAIL(reader->fetch_tablet_info(tablet_info))) { + if (OB_ISNULL(dag_net)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("dag net should not be nullptr", K(ret), KP(dag_net)); + } else if (dag_net->is_cancel()) { + ret = OB_CANCELED; + LOG_WARN("task is cancelled", K(ret)); + } else if (OB_FAIL(reader->fetch_tablet_info(tablet_info))) { if (OB_ITER_END == ret) { ret = OB_SUCCESS; break; @@ -591,7 +617,7 @@ int ObStorageHATabletsBuilder::create_or_update_tablet_( return ret; } -int ObStorageHATabletsBuilder::build_tablets_sstable_info() +int ObStorageHATabletsBuilder::build_tablets_sstable_info(ObIDagNet *dag_net) { int ret = OB_SUCCESS; ObICopySSTableInfoReader *reader = nullptr; @@ -603,6 +629,9 @@ int ObStorageHATabletsBuilder::build_tablets_sstable_info() if (!is_inited_) { ret = OB_NOT_INIT; LOG_WARN("storage ha tablets builder do not init", K(ret)); + } else if (OB_ISNULL(dag_net)) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("build_tablets_sstable_info get invalid argument", K(ret), KP(dag_net)); } else if (OB_ISNULL(ls = param_.ls_)) { ret = OB_ERR_UNEXPECTED; LOG_WARN("log stream should not be NULL", K(ret), KP(ls)); @@ -617,8 +646,13 @@ int ObStorageHATabletsBuilder::build_tablets_sstable_info() while (OB_SUCC(ret)) { sstable_info.reset(); copy_header.reset(); - - if (OB_FAIL(reader->get_next_tablet_sstable_header(copy_header))) { + if (OB_ISNULL(dag_net)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("dag net should not be nullptr", K(ret), KP(dag_net)); + } else if (dag_net->is_cancel()) { + ret = OB_CANCELED; + LOG_WARN("task is cancelled", K(ret)); + } else if (OB_FAIL(reader->get_next_tablet_sstable_header(copy_header))) { if (OB_ITER_END == ret) { ret = OB_SUCCESS; break; diff --git a/src/storage/high_availability/ob_storage_ha_tablet_builder.h b/src/storage/high_availability/ob_storage_ha_tablet_builder.h index b057ba107..74b03739c 100644 --- a/src/storage/high_availability/ob_storage_ha_tablet_builder.h +++ b/src/storage/high_availability/ob_storage_ha_tablet_builder.h @@ -71,10 +71,11 @@ public: virtual ~ObStorageHATabletsBuilder(); int init(const ObStorageHATabletsBuilderParam ¶m); // Create all tablets with remote tablet meta. - int create_or_update_tablets(); + int create_or_update_tablets(ObIDagNet *dag_net); int create_all_tablets( const bool need_check_tablet_limit, ObICopyLSViewInfoReader *reader, + ObIDagNet *dag_net, common::ObIArray &sys_tablet_id_list, common::ObIArray &data_tablet_id_list, CopyTabletSimpleInfoMap &simple_info_map); @@ -83,8 +84,9 @@ public: // If that tablet meta identified uniquely by transfer sequence exists, replace and update the restore status to EMPTY. // Otherwise, just update it to UNDEFINED. int update_pending_tablets_with_remote(); - int build_tablets_sstable_info(); + int build_tablets_sstable_info(ObIDagNet *dag_net); int create_all_tablets_with_4_1_rpc( + ObIDagNet *dag_net, CopyTabletSimpleInfoMap &simple_info_map, common::ObIArray &sys_tablet_id_list, common::ObIArray &data_tablet_id_list); diff --git a/src/storage/high_availability/ob_tablet_group_restore.cpp b/src/storage/high_availability/ob_tablet_group_restore.cpp index c7b9d09bb..ed4fd80fe 100644 --- a/src/storage/high_availability/ob_tablet_group_restore.cpp +++ b/src/storage/high_availability/ob_tablet_group_restore.cpp @@ -1395,6 +1395,7 @@ int ObStartTabletGroupRestoreTask::generate_tablet_restore_dag_() int ObStartTabletGroupRestoreTask::create_tablets_sstable_() { int ret = OB_SUCCESS; + ObIDagNet *dag_net = nullptr; #ifdef ERRSIM if (ctx_->arg_.ls_id_.is_user_ls()) { @@ -1412,7 +1413,13 @@ int ObStartTabletGroupRestoreTask::create_tablets_sstable_() LOG_WARN("start tablet group restore task do not init", K(ret), KPC(ctx_)); } else if (ObTabletRestoreAction::is_restore_tablet_meta(ctx_->arg_.action_)) { //do nothing - } else if (OB_FAIL(ha_tablets_builder_.build_tablets_sstable_info())) { + } else if (OB_ISNULL(this->get_dag())) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("dag should not be nullptr", K(ret), KP(this->get_dag())); + } else if (OB_ISNULL(dag_net = this->get_dag()->get_dag_net())) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("dag net should not be nullptr", K(ret), KP(dag_net)); + } else if (OB_FAIL(ha_tablets_builder_.build_tablets_sstable_info(dag_net))) { LOG_WARN("failed to build tablets sstable info", K(ret), KPC(ctx_)); } return ret; @@ -2534,6 +2541,7 @@ int ObTabletRestoreTask::try_update_tablet_() ObLS *ls = nullptr; bool is_exist = false; ObCopyTabletStatus::STATUS status = ObCopyTabletStatus::MAX_STATUS; + ObIDagNet *dag_net = nullptr; if (!is_inited_) { ret = OB_NOT_INIT; @@ -2572,9 +2580,12 @@ int ObTabletRestoreTask::try_update_tablet_() } if (OB_FAIL(ret)) { - } else if (tablet_restore_ctx_->tablet_id_.is_ls_inner_tablet() && OB_FAIL(ha_tablets_builder.create_or_update_tablets())) { + } else if (OB_ISNULL(dag_net = dag->get_dag_net())) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("dag net should not be nullptr", K(ret), KP(dag_net)); + } else if (tablet_restore_ctx_->tablet_id_.is_ls_inner_tablet() && OB_FAIL(ha_tablets_builder.create_or_update_tablets(dag_net))) { LOG_WARN("failed to create or update inner tablet", K(ret), KPC(tablet_restore_ctx_)); - } else if (OB_FAIL(ha_tablets_builder.build_tablets_sstable_info())) { + } else if (OB_FAIL(ha_tablets_builder.build_tablets_sstable_info(dag_net))) { LOG_WARN("failed to build tablets sstable info", K(ret), KPC(tablet_restore_ctx_)); } else if (OB_FAIL(tablet_restore_ctx_->ha_table_info_mgr_->check_tablet_table_info_exist( tablet_restore_ctx_->tablet_id_, is_exist))) { diff --git a/tools/deploy/mysql_test/r/mysql/information_schema.result b/tools/deploy/mysql_test/r/mysql/information_schema.result index de38b876c..3d35af953 100644 --- a/tools/deploy/mysql_test/r/mysql/information_schema.result +++ b/tools/deploy/mysql_test/r/mysql/information_schema.result @@ -320,6 +320,7 @@ select * from information_schema.tables where table_schema in ('oceanbase', 'mys | def | oceanbase | DBA_OB_LS_LOCATIONS | SYSTEM VIEW | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | | def | oceanbase | DBA_OB_LS_LOG_ARCHIVE_PROGRESS | SYSTEM VIEW | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | | def | oceanbase | DBA_OB_LS_REPLICA_TASKS | SYSTEM VIEW | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | +| def | oceanbase | DBA_OB_LS_REPLICA_TASK_HISTORY | SYSTEM VIEW | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | | def | oceanbase | DBA_OB_MAJOR_COMPACTION | SYSTEM VIEW | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | | def | oceanbase | DBA_OB_OUTLINES | SYSTEM VIEW | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | | def | oceanbase | DBA_OB_OUTLINE_CONCURRENT_HISTORY | SYSTEM VIEW | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | @@ -795,6 +796,7 @@ select * from information_schema.tables where table_schema in ('oceanbase', 'mys | def | oceanbase | __all_virtual_ls_meta_table | SYSTEM TABLE | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | | def | oceanbase | __all_virtual_ls_recovery_stat | SYSTEM TABLE | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | | def | oceanbase | __all_virtual_ls_replica_task | SYSTEM TABLE | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | +| def | oceanbase | __all_virtual_ls_replica_task_history | SYSTEM TABLE | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | | def | oceanbase | __all_virtual_ls_replica_task_plan | SYSTEM TABLE | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | | def | oceanbase | __all_virtual_ls_restore_history | SYSTEM TABLE | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | | def | oceanbase | __all_virtual_ls_restore_progress | SYSTEM TABLE | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | @@ -1826,6 +1828,7 @@ select * from information_schema.tables where table_schema in ('oceanbase', 'mys | def | oceanbase | DBA_OB_LS_LOCATIONS | SYSTEM VIEW | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | | def | oceanbase | DBA_OB_LS_LOG_ARCHIVE_PROGRESS | SYSTEM VIEW | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | | def | oceanbase | DBA_OB_LS_REPLICA_TASKS | SYSTEM VIEW | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | +| def | oceanbase | DBA_OB_LS_REPLICA_TASK_HISTORY | SYSTEM VIEW | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | | def | oceanbase | DBA_OB_MAJOR_COMPACTION | SYSTEM VIEW | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | | def | oceanbase | DBA_OB_OUTLINES | SYSTEM VIEW | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | | def | oceanbase | DBA_OB_OUTLINE_CONCURRENT_HISTORY | SYSTEM VIEW | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | @@ -2301,6 +2304,7 @@ select * from information_schema.tables where table_schema in ('oceanbase', 'mys | def | oceanbase | __all_virtual_ls_meta_table | SYSTEM TABLE | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | | def | oceanbase | __all_virtual_ls_recovery_stat | SYSTEM TABLE | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | | def | oceanbase | __all_virtual_ls_replica_task | SYSTEM TABLE | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | +| def | oceanbase | __all_virtual_ls_replica_task_history | SYSTEM TABLE | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | | def | oceanbase | __all_virtual_ls_replica_task_plan | SYSTEM TABLE | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | | def | oceanbase | __all_virtual_ls_restore_history | SYSTEM TABLE | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | | def | oceanbase | __all_virtual_ls_restore_progress | SYSTEM TABLE | MEMORY | NULL | DYNAMIC | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | utf8mb4_general_ci | NULL | NULL | | @@ -2862,6 +2866,10 @@ select * from information_schema.statistics where table_schema in ('oceanbase', | def | oceanbase | __all_ls_replica_task | 0 | oceanbase | PRIMARY | 2 | ls_id | A | NULL | NULL | NULL | | BTREE | VALID | | YES | NULL | | def | oceanbase | __all_ls_replica_task | 0 | oceanbase | PRIMARY | 3 | task_type | A | NULL | NULL | NULL | | BTREE | VALID | | YES | NULL | | def | oceanbase | __all_ls_replica_task | 0 | oceanbase | PRIMARY | 4 | task_id | A | NULL | NULL | NULL | | BTREE | VALID | | YES | NULL | +| def | oceanbase | __all_ls_replica_task_history | 0 | oceanbase | PRIMARY | 1 | tenant_id | A | NULL | NULL | NULL | | BTREE | VALID | | YES | NULL | +| def | oceanbase | __all_ls_replica_task_history | 0 | oceanbase | PRIMARY | 2 | ls_id | A | NULL | NULL | NULL | | BTREE | VALID | | YES | NULL | +| def | oceanbase | __all_ls_replica_task_history | 0 | oceanbase | PRIMARY | 3 | task_type | A | NULL | NULL | NULL | | BTREE | VALID | | YES | NULL | +| def | oceanbase | __all_ls_replica_task_history | 0 | oceanbase | PRIMARY | 4 | task_id | A | NULL | NULL | NULL | | BTREE | VALID | | YES | NULL | | def | oceanbase | __all_ls_restore_history | 0 | oceanbase | PRIMARY | 1 | tenant_id | A | NULL | NULL | NULL | | BTREE | VALID | | YES | NULL | | def | oceanbase | __all_ls_restore_history | 0 | oceanbase | PRIMARY | 2 | job_id | A | NULL | NULL | NULL | | BTREE | VALID | | YES | NULL | | def | oceanbase | __all_ls_restore_history | 0 | oceanbase | PRIMARY | 3 | ls_id | A | NULL | NULL | NULL | | BTREE | VALID | | YES | NULL | diff --git a/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/desc_sys_views_in_mysql.result b/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/desc_sys_views_in_mysql.result index 7de9dd0b6..30434c887 100644 --- a/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/desc_sys_views_in_mysql.result +++ b/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/desc_sys_views_in_mysql.result @@ -4325,6 +4325,9 @@ SOURCE_REPLICA_SVR_IP varchar(46) YES NULL SOURCE_REPLICA_SVR_PORT bigint(20) YES NULL SOURCE_PAXOS_REPLICA_NUMBER bigint(20) YES NULL SOURCE_REPLICA_TYPE varchar(16) YES NULL +DATA_SOURCE_SVR_IP varchar(46) YES NULL +DATA_SOURCE_SVR_PORT bigint(20) YES NULL +IS_MANUAL varchar(6) NO TASK_EXEC_SVR_IP varchar(46) YES NULL TASK_EXEC_SVR_PORT bigint(20) YES NULL CREATE_TIME datetime NO NULL @@ -6021,6 +6024,35 @@ TSNAP_META_EXISTED varchar(3) NO select /*+QUERY_TIMEOUT(60000000)*/ count(*) as cnt from (select * from oceanbase.V$OB_LS_SNAPSHOTS limit 1); cnt 1 +desc oceanbase.DBA_OB_LS_REPLICA_TASK_HISTORY; +Field Type Null Key Default Extra +LS_ID bigint(20) NO NULL +TASK_TYPE varchar(64) NO NULL +TASK_ID varchar(200) NO NULL +TASK_STATUS varchar(2048) YES NULL +PRIORITY varchar(5) NO +TARGET_REPLICA_SVR_IP varchar(46) YES NULL +TARGET_REPLICA_SVR_PORT bigint(20) YES NULL +TARGET_PAXOS_REPLICA_NUMBER bigint(20) YES NULL +TARGET_REPLICA_TYPE varchar(16) YES NULL +SOURCE_REPLICA_SVR_IP varchar(46) YES NULL +SOURCE_REPLICA_SVR_PORT bigint(20) YES NULL +SOURCE_PAXOS_REPLICA_NUMBER bigint(20) YES NULL +SOURCE_REPLICA_TYPE varchar(16) YES NULL +DATA_SOURCE_SVR_IP varchar(46) YES NULL +DATA_SOURCE_SVR_PORT bigint(20) YES NULL +IS_MANUAL varchar(6) NO +TASK_EXEC_SVR_IP varchar(46) YES NULL +TASK_EXEC_SVR_PORT bigint(20) YES NULL +CREATE_TIME datetime NO NULL +START_TIME datetime NO +MODIFY_TIME datetime NO NULL +FINISH_TIME datetime NO +EXECUTE_RESULT varchar(2048) YES NULL +COMMENT varchar(2048) YES NULL +select /*+QUERY_TIMEOUT(60000000)*/ count(*) as cnt from (select * from oceanbase.DBA_OB_LS_REPLICA_TASK_HISTORY limit 1); +cnt +1 desc oceanbase.DBA_MVIEW_LOGS; Field Type Null Key Default Extra LOG_OWNER varchar(128) NO diff --git a/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/desc_sys_views_in_sys.result b/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/desc_sys_views_in_sys.result index 0b6a55c22..9fe9f06f7 100644 --- a/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/desc_sys_views_in_sys.result +++ b/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/desc_sys_views_in_sys.result @@ -6032,6 +6032,9 @@ SOURCE_REPLICA_SVR_IP varchar(46) YES NULL SOURCE_REPLICA_SVR_PORT bigint(20) YES NULL SOURCE_PAXOS_REPLICA_NUMBER bigint(20) YES NULL SOURCE_REPLICA_TYPE varchar(16) YES NULL +DATA_SOURCE_SVR_IP varchar(46) YES NULL +DATA_SOURCE_SVR_PORT bigint(20) YES NULL +IS_MANUAL varchar(6) NO TASK_EXEC_SVR_IP varchar(46) YES NULL TASK_EXEC_SVR_PORT bigint(20) YES NULL CREATE_TIME datetime NO NULL @@ -6057,6 +6060,9 @@ SOURCE_REPLICA_SVR_IP varchar(46) YES NULL SOURCE_REPLICA_SVR_PORT bigint(20) YES NULL SOURCE_PAXOS_REPLICA_NUMBER bigint(20) YES NULL SOURCE_REPLICA_TYPE varchar(16) YES NULL +DATA_SOURCE_SVR_IP varchar(46) YES NULL +DATA_SOURCE_SVR_PORT bigint(20) YES NULL +IS_MANUAL varchar(6) NO TASK_EXEC_SVR_IP varchar(46) YES NULL TASK_EXEC_SVR_PORT bigint(20) YES NULL CREATE_TIME datetime NO NULL @@ -8618,6 +8624,65 @@ ERROR_MESSAGE varchar(512) YES NULL select /*+QUERY_TIMEOUT(60000000)*/ count(*) as cnt from (select * from oceanbase.DBA_OB_CLONE_HISTORY limit 1); cnt 1 +desc oceanbase.DBA_OB_LS_REPLICA_TASK_HISTORY; +Field Type Null Key Default Extra +LS_ID bigint(20) NO NULL +TASK_TYPE varchar(64) NO NULL +TASK_ID varchar(200) NO NULL +TASK_STATUS varchar(2048) YES NULL +PRIORITY varchar(5) NO +TARGET_REPLICA_SVR_IP varchar(46) YES NULL +TARGET_REPLICA_SVR_PORT bigint(20) YES NULL +TARGET_PAXOS_REPLICA_NUMBER bigint(20) YES NULL +TARGET_REPLICA_TYPE varchar(16) YES NULL +SOURCE_REPLICA_SVR_IP varchar(46) YES NULL +SOURCE_REPLICA_SVR_PORT bigint(20) YES NULL +SOURCE_PAXOS_REPLICA_NUMBER bigint(20) YES NULL +SOURCE_REPLICA_TYPE varchar(16) YES NULL +DATA_SOURCE_SVR_IP varchar(46) YES NULL +DATA_SOURCE_SVR_PORT bigint(20) YES NULL +IS_MANUAL varchar(6) NO +TASK_EXEC_SVR_IP varchar(46) YES NULL +TASK_EXEC_SVR_PORT bigint(20) YES NULL +CREATE_TIME datetime NO NULL +START_TIME datetime NO +MODIFY_TIME datetime NO NULL +FINISH_TIME datetime NO +EXECUTE_RESULT varchar(2048) YES NULL +COMMENT varchar(2048) YES NULL +select /*+QUERY_TIMEOUT(60000000)*/ count(*) as cnt from (select * from oceanbase.DBA_OB_LS_REPLICA_TASK_HISTORY limit 1); +cnt +1 +desc oceanbase.CDB_OB_LS_REPLICA_TASK_HISTORY; +Field Type Null Key Default Extra +TENANT_ID bigint(20) NO NULL +LS_ID bigint(20) NO NULL +TASK_TYPE varchar(64) NO NULL +TASK_ID varchar(200) NO NULL +TASK_STATUS varchar(2048) YES NULL +PRIORITY varchar(5) NO +TARGET_REPLICA_SVR_IP varchar(46) YES NULL +TARGET_REPLICA_SVR_PORT bigint(20) YES NULL +TARGET_PAXOS_REPLICA_NUMBER bigint(20) YES NULL +TARGET_REPLICA_TYPE varchar(16) YES NULL +SOURCE_REPLICA_SVR_IP varchar(46) YES NULL +SOURCE_REPLICA_SVR_PORT bigint(20) YES NULL +SOURCE_PAXOS_REPLICA_NUMBER bigint(20) YES NULL +SOURCE_REPLICA_TYPE varchar(16) YES NULL +DATA_SOURCE_SVR_IP varchar(46) YES NULL +DATA_SOURCE_SVR_PORT bigint(20) YES NULL +IS_MANUAL varchar(6) NO +TASK_EXEC_SVR_IP varchar(46) YES NULL +TASK_EXEC_SVR_PORT bigint(20) YES NULL +CREATE_TIME datetime NO NULL +START_TIME datetime NO +MODIFY_TIME datetime NO NULL +FINISH_TIME datetime NO +EXECUTE_RESULT varchar(2048) YES NULL +COMMENT varchar(2048) YES NULL +select /*+QUERY_TIMEOUT(60000000)*/ count(*) as cnt from (select * from oceanbase.CDB_OB_LS_REPLICA_TASK_HISTORY limit 1); +cnt +1 desc oceanbase.CDB_MVIEW_LOGS; Field Type Null Key Default Extra TENANT_ID bigint(20) NO NULL diff --git a/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/desc_virtual_table_in_mysql.result b/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/desc_virtual_table_in_mysql.result index d9934d903..9ac59d3c2 100644 --- a/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/desc_virtual_table_in_mysql.result +++ b/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/desc_virtual_table_in_mysql.result @@ -4905,6 +4905,37 @@ IS_MANDATORY varchar(1024) NO select /*+QUERY_TIMEOUT(60000000)*/ IF(count(*) >= 0, 1, 0) from information_schema.ENABLED_ROLES; IF(count(*) >= 0, 1, 0) 1 +desc oceanbase.__all_virtual_ls_replica_task_history; +Field Type Null Key Default Extra +tenant_id bigint(20) NO PRI NULL +ls_id bigint(20) NO PRI NULL +task_type varchar(64) NO PRI NULL +task_id varchar(200) NO PRI NULL +gmt_create timestamp(6) NO NULL +gmt_modified timestamp(6) NO NULL +task_status varchar(2048) YES NULL +priority bigint(20) NO 1 +target_replica_svr_ip varchar(46) YES NULL +target_replica_svr_port bigint(20) YES NULL +target_paxos_replica_number bigint(20) YES NULL +target_replica_type varchar(16) YES NULL +source_replica_svr_ip varchar(46) YES NULL +source_replica_svr_port bigint(20) YES NULL +source_paxos_replica_number bigint(20) YES NULL +source_replica_type varchar(16) YES NULL +data_source_svr_ip varchar(46) YES NULL +data_source_svr_port bigint(20) YES NULL +is_manual tinyint(4) YES 0 +task_exec_svr_ip varchar(46) YES NULL +task_exec_svr_port bigint(20) YES NULL +generate_time timestamp(6) NO CURRENT_TIMESTAMP(6) +schedule_time timestamp(6) NO CURRENT_TIMESTAMP(6) +finish_time timestamp(6) NO CURRENT_TIMESTAMP(6) +execute_result varchar(2048) YES NULL +comment varchar(2048) YES NULL +select /*+QUERY_TIMEOUT(60000000)*/ IF(count(*) >= 0, 1, 0) from oceanbase.__all_virtual_ls_replica_task_history; +IF(count(*) >= 0, 1, 0) +1 desc oceanbase.__all_virtual_session_ps_info; Field Type Null Key Default Extra svr_ip varchar(46) NO NULL diff --git a/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/desc_virtual_table_in_sys.result b/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/desc_virtual_table_in_sys.result index d0c0d5c51..6f009a4f9 100644 --- a/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/desc_virtual_table_in_sys.result +++ b/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/desc_virtual_table_in_sys.result @@ -9487,6 +9487,37 @@ IS_MANDATORY varchar(1024) NO select /*+QUERY_TIMEOUT(60000000)*/ IF(count(*) >= 0, 1, 0) from information_schema.ENABLED_ROLES; IF(count(*) >= 0, 1, 0) 1 +desc oceanbase.__all_virtual_ls_replica_task_history; +Field Type Null Key Default Extra +tenant_id bigint(20) NO PRI NULL +ls_id bigint(20) NO PRI NULL +task_type varchar(64) NO PRI NULL +task_id varchar(200) NO PRI NULL +gmt_create timestamp(6) NO NULL +gmt_modified timestamp(6) NO NULL +task_status varchar(2048) YES NULL +priority bigint(20) NO 1 +target_replica_svr_ip varchar(46) YES NULL +target_replica_svr_port bigint(20) YES NULL +target_paxos_replica_number bigint(20) YES NULL +target_replica_type varchar(16) YES NULL +source_replica_svr_ip varchar(46) YES NULL +source_replica_svr_port bigint(20) YES NULL +source_paxos_replica_number bigint(20) YES NULL +source_replica_type varchar(16) YES NULL +data_source_svr_ip varchar(46) YES NULL +data_source_svr_port bigint(20) YES NULL +is_manual tinyint(4) YES 0 +task_exec_svr_ip varchar(46) YES NULL +task_exec_svr_port bigint(20) YES NULL +generate_time timestamp(6) NO CURRENT_TIMESTAMP(6) +schedule_time timestamp(6) NO CURRENT_TIMESTAMP(6) +finish_time timestamp(6) NO CURRENT_TIMESTAMP(6) +execute_result varchar(2048) YES NULL +comment varchar(2048) YES NULL +select /*+QUERY_TIMEOUT(60000000)*/ IF(count(*) >= 0, 1, 0) from oceanbase.__all_virtual_ls_replica_task_history; +IF(count(*) >= 0, 1, 0) +1 desc oceanbase.__all_virtual_session_ps_info; Field Type Null Key Default Extra svr_ip varchar(46) NO NULL diff --git a/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/inner_table_overall.result b/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/inner_table_overall.result index 2d1fcbd88..f7b93ef3d 100644 --- a/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/inner_table_overall.result +++ b/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/inner_table_overall.result @@ -293,6 +293,7 @@ select 0xffffffffff & table_id, table_name, table_type, database_id, part_num fr 505 __all_column_privilege 0 201001 1 506 __all_column_privilege_history 0 201001 1 507 __all_tenant_snapshot_ls_replica_history 0 201001 1 +508 __all_ls_replica_task_history 0 201001 1 512 __all_user_proxy_info 0 201001 1 513 __all_user_proxy_info_history 0 201001 1 514 __all_user_proxy_role_info 0 201001 1 @@ -742,6 +743,7 @@ select 0xffffffffff & table_id, table_name, table_type, database_id, part_num fr 12463 __all_virtual_column_privilege_history 2 201001 1 12464 __all_virtual_tenant_snapshot_ls_replica_history 2 201001 1 12466 ENABLED_ROLES 2 201002 1 +12467 __all_virtual_ls_replica_task_history 2 201001 1 12468 __all_virtual_session_ps_info 2 201001 1 12469 __all_virtual_tracepoint_info 2 201001 1 12473 __all_virtual_compatibility_control 2 201001 1 @@ -1150,6 +1152,8 @@ select 0xffffffffff & table_id, table_name, table_type, database_id, part_num fr 21517 GV$OB_LS_SNAPSHOTS 1 201001 1 21518 V$OB_LS_SNAPSHOTS 1 201001 1 21519 DBA_OB_CLONE_HISTORY 1 201001 1 +21523 DBA_OB_LS_REPLICA_TASK_HISTORY 1 201001 1 +21524 CDB_OB_LS_REPLICA_TASK_HISTORY 1 201001 1 21525 CDB_MVIEW_LOGS 1 201001 1 21526 DBA_MVIEW_LOGS 1 201001 1 21527 CDB_MVIEWS 1 201001 1 diff --git a/unittest/sql/parser/print_parser_tree.result b/unittest/sql/parser/print_parser_tree.result index f34396099..65a69f1b5 100644 --- a/unittest/sql/parser/print_parser_tree.result +++ b/unittest/sql/parser/print_parser_tree.result @@ -3709,3 +3709,202 @@ question_mask_size: 0 |--[0],[T_CANCEL_BALANCE_JOB], str_value_=[], value=[0] |--[0],[T_TENANT_NAME], str_value_=[], value=[9223372036854775807] |--[0],[T_VARCHAR], str_value_=[mysql], value=[9223372036854775807] + +************** Case 204 *************** +alter system add replica ls=100 server='100.88.107.212:5000' replica_type='F' data_source='100.88.107.212:5001' paxos_replica_num=3 tenant='mysql_tenant'; +question_mask_size: 0 + +|--[0],[T_STMT_LIST], str_value_=[], value=[9223372036854775807] + |--[0],[T_ADD_LS_REPLICA], str_value_=[], value=[0] + |--[0],[T_LS], str_value_=[], value=[9223372036854775807] + |--[0],[T_INT], str_value_=[100], value=[100] + |--[1],[T_VARCHAR], str_value_=["100.88.107.212":5000], value=[9223372036854775807] + |--[2],[T_VARCHAR], str_value_=[F], value=[9223372036854775807] + |--[3],[T_VARCHAR], str_value_=["100.88.107.212":5001], value=[9223372036854775807] + |--[4],[T_INT], str_value_=[3], value=[3] + |--[5],[T_TENANT_NAME], str_value_=[], value=[9223372036854775807] + |--[0],[T_VARCHAR], str_value_=[mysql_tenant], value=[9223372036854775807] + +************** Case 205 *************** +alter system add replica ls=100 server='100.88.107.212:5000' replica_type='F' data_source='100.88.107.212:5001' paxos_replica_num=3; +question_mask_size: 0 + +|--[0],[T_STMT_LIST], str_value_=[], value=[9223372036854775807] + |--[0],[T_ADD_LS_REPLICA], str_value_=[], value=[0] + |--[0],[T_LS], str_value_=[], value=[9223372036854775807] + |--[0],[T_INT], str_value_=[100], value=[100] + |--[1],[T_VARCHAR], str_value_=["100.88.107.212":5000], value=[9223372036854775807] + |--[2],[T_VARCHAR], str_value_=[F], value=[9223372036854775807] + |--[3],[T_VARCHAR], str_value_=["100.88.107.212":5001], value=[9223372036854775807] + |--[4],[T_INT], str_value_=[3], value=[3] + +************** Case 206 *************** +alter system add replica ls=100 server='100.88.107.212:5000' replica_type='F' data_source='100.88.107.212:5001'; +question_mask_size: 0 + +|--[0],[T_STMT_LIST], str_value_=[], value=[9223372036854775807] + |--[0],[T_ADD_LS_REPLICA], str_value_=[], value=[0] + |--[0],[T_LS], str_value_=[], value=[9223372036854775807] + |--[0],[T_INT], str_value_=[100], value=[100] + |--[1],[T_VARCHAR], str_value_=["100.88.107.212":5000], value=[9223372036854775807] + |--[2],[T_VARCHAR], str_value_=[F], value=[9223372036854775807] + |--[3],[T_VARCHAR], str_value_=["100.88.107.212":5001], value=[9223372036854775807] + +************** Case 207 *************** +alter system add replica ls=100 server='100.88.107.212:5000' replica_type='F'; +question_mask_size: 0 + +|--[0],[T_STMT_LIST], str_value_=[], value=[9223372036854775807] + |--[0],[T_ADD_LS_REPLICA], str_value_=[], value=[0] + |--[0],[T_LS], str_value_=[], value=[9223372036854775807] + |--[0],[T_INT], str_value_=[100], value=[100] + |--[1],[T_VARCHAR], str_value_=["100.88.107.212":5000], value=[9223372036854775807] + |--[2],[T_VARCHAR], str_value_=[F], value=[9223372036854775807] + +************** Case 208 *************** +alter system remove replica ls=100 server='100.88.107.212:5000' paxos_replica_num=3 tenant='mysql_tenant'; +question_mask_size: 0 + +|--[0],[T_STMT_LIST], str_value_=[], value=[9223372036854775807] + |--[0],[T_REMOVE_LS_REPLICA], str_value_=[], value=[0] + |--[0],[T_LS], str_value_=[], value=[9223372036854775807] + |--[0],[T_INT], str_value_=[100], value=[100] + |--[1],[T_VARCHAR], str_value_=["100.88.107.212":5000], value=[9223372036854775807] + |--[2],[T_INT], str_value_=[3], value=[3] + |--[3],[T_TENANT_NAME], str_value_=[], value=[9223372036854775807] + |--[0],[T_VARCHAR], str_value_=[mysql_tenant], value=[9223372036854775807] + +************** Case 209 *************** +alter system remove replica ls=100 server='100.88.107.212:5000' paxos_replica_num=3; +question_mask_size: 0 + +|--[0],[T_STMT_LIST], str_value_=[], value=[9223372036854775807] + |--[0],[T_REMOVE_LS_REPLICA], str_value_=[], value=[0] + |--[0],[T_LS], str_value_=[], value=[9223372036854775807] + |--[0],[T_INT], str_value_=[100], value=[100] + |--[1],[T_VARCHAR], str_value_=["100.88.107.212":5000], value=[9223372036854775807] + |--[2],[T_INT], str_value_=[3], value=[3] + +************** Case 210 *************** +alter system remove replica ls=100 server='100.88.107.212:5000'; +question_mask_size: 0 + +|--[0],[T_STMT_LIST], str_value_=[], value=[9223372036854775807] + |--[0],[T_REMOVE_LS_REPLICA], str_value_=[], value=[0] + |--[0],[T_LS], str_value_=[], value=[9223372036854775807] + |--[0],[T_INT], str_value_=[100], value=[100] + |--[1],[T_VARCHAR], str_value_=["100.88.107.212":5000], value=[9223372036854775807] + +************** Case 211 *************** +alter system modify replica ls=100 server='100.88.107.212:5000' replica_type='F' paxos_replica_num=3 tenant='mysql_tenant'; +question_mask_size: 0 + +|--[0],[T_STMT_LIST], str_value_=[], value=[9223372036854775807] + |--[0],[T_MODIFY_LS_REPLICA_TYPE], str_value_=[], value=[0] + |--[0],[T_LS], str_value_=[], value=[9223372036854775807] + |--[0],[T_INT], str_value_=[100], value=[100] + |--[1],[T_VARCHAR], str_value_=["100.88.107.212":5000], value=[9223372036854775807] + |--[2],[T_VARCHAR], str_value_=[F], value=[9223372036854775807] + |--[3],[T_INT], str_value_=[3], value=[3] + |--[4],[T_TENANT_NAME], str_value_=[], value=[9223372036854775807] + |--[0],[T_VARCHAR], str_value_=[mysql_tenant], value=[9223372036854775807] + +************** Case 212 *************** +alter system modify replica ls=100 server='100.88.107.212:5000' replica_type='F' paxos_replica_num=3; +question_mask_size: 0 + +|--[0],[T_STMT_LIST], str_value_=[], value=[9223372036854775807] + |--[0],[T_MODIFY_LS_REPLICA_TYPE], str_value_=[], value=[0] + |--[0],[T_LS], str_value_=[], value=[9223372036854775807] + |--[0],[T_INT], str_value_=[100], value=[100] + |--[1],[T_VARCHAR], str_value_=["100.88.107.212":5000], value=[9223372036854775807] + |--[2],[T_VARCHAR], str_value_=[F], value=[9223372036854775807] + |--[3],[T_INT], str_value_=[3], value=[3] + +************** Case 213 *************** +alter system modify replica ls=100 server='100.88.107.212:5000' replica_type='F'; +question_mask_size: 0 + +|--[0],[T_STMT_LIST], str_value_=[], value=[9223372036854775807] + |--[0],[T_MODIFY_LS_REPLICA_TYPE], str_value_=[], value=[0] + |--[0],[T_LS], str_value_=[], value=[9223372036854775807] + |--[0],[T_INT], str_value_=[100], value=[100] + |--[1],[T_VARCHAR], str_value_=["100.88.107.212":5000], value=[9223372036854775807] + |--[2],[T_VARCHAR], str_value_=[F], value=[9223372036854775807] + +************** Case 214 *************** +alter system modify ls=100 paxos_replica_num=3 tenant='mysql_tenant'; +question_mask_size: 0 + +|--[0],[T_STMT_LIST], str_value_=[], value=[9223372036854775807] + |--[0],[T_MODIFY_LS_PAXOS_REPLICA_NUM], str_value_=[], value=[0] + |--[0],[T_LS], str_value_=[], value=[9223372036854775807] + |--[0],[T_INT], str_value_=[100], value=[100] + |--[1],[T_INT], str_value_=[3], value=[3] + |--[2],[T_TENANT_NAME], str_value_=[], value=[9223372036854775807] + |--[0],[T_VARCHAR], str_value_=[mysql_tenant], value=[9223372036854775807] + +************** Case 215 *************** +alter system modify ls=100 paxos_replica_num=3; +question_mask_size: 0 + +|--[0],[T_STMT_LIST], str_value_=[], value=[9223372036854775807] + |--[0],[T_MODIFY_LS_PAXOS_REPLICA_NUM], str_value_=[], value=[0] + |--[0],[T_LS], str_value_=[], value=[9223372036854775807] + |--[0],[T_INT], str_value_=[100], value=[100] + |--[1],[T_INT], str_value_=[3], value=[3] + +************** Case 216 *************** +alter system migrate replica ls=100 source='100.88.107.212:5000' destination='100.88.107.212:5000' data_source='100.88.107.212:5001' tenant='mysql_tenant'; +question_mask_size: 0 + +|--[0],[T_STMT_LIST], str_value_=[], value=[9223372036854775807] + |--[0],[T_MIGRATE_LS_REPLICA], str_value_=[], value=[0] + |--[0],[T_LS], str_value_=[], value=[9223372036854775807] + |--[0],[T_INT], str_value_=[100], value=[100] + |--[1],[T_VARCHAR], str_value_=["100.88.107.212":5000], value=[9223372036854775807] + |--[2],[T_VARCHAR], str_value_=["100.88.107.212":5000], value=[9223372036854775807] + |--[3],[T_VARCHAR], str_value_=["100.88.107.212":5001], value=[9223372036854775807] + |--[4],[T_TENANT_NAME], str_value_=[], value=[9223372036854775807] + |--[0],[T_VARCHAR], str_value_=[mysql_tenant], value=[9223372036854775807] + +************** Case 217 *************** +alter system migrate replica ls=100 source='100.88.107.212:5000' destination='100.88.107.212:5000' data_source='100.88.107.212:5001'; +question_mask_size: 0 + +|--[0],[T_STMT_LIST], str_value_=[], value=[9223372036854775807] + |--[0],[T_MIGRATE_LS_REPLICA], str_value_=[], value=[0] + |--[0],[T_LS], str_value_=[], value=[9223372036854775807] + |--[0],[T_INT], str_value_=[100], value=[100] + |--[1],[T_VARCHAR], str_value_=["100.88.107.212":5000], value=[9223372036854775807] + |--[2],[T_VARCHAR], str_value_=["100.88.107.212":5000], value=[9223372036854775807] + |--[3],[T_VARCHAR], str_value_=["100.88.107.212":5001], value=[9223372036854775807] + +************** Case 218 *************** +alter system migrate replica ls=100 source='100.88.107.212:5000' destination='100.88.107.212:5000'; +question_mask_size: 0 + +|--[0],[T_STMT_LIST], str_value_=[], value=[9223372036854775807] + |--[0],[T_MIGRATE_LS_REPLICA], str_value_=[], value=[0] + |--[0],[T_LS], str_value_=[], value=[9223372036854775807] + |--[0],[T_INT], str_value_=[100], value=[100] + |--[1],[T_VARCHAR], str_value_=["100.88.107.212":5000], value=[9223372036854775807] + |--[2],[T_VARCHAR], str_value_=["100.88.107.212":5000], value=[9223372036854775807] + +************** Case 219 *************** +alter system cancel replica task task_id = 'xxx' tenant='mysql_tenant'; +question_mask_size: 0 + +|--[0],[T_STMT_LIST], str_value_=[], value=[9223372036854775807] + |--[0],[T_CANCEL_LS_REPLICA_TASK], str_value_=[], value=[0] + |--[0],[T_VARCHAR], str_value_=[xxx], value=[9223372036854775807] + |--[1],[T_TENANT_NAME], str_value_=[], value=[9223372036854775807] + |--[0],[T_VARCHAR], str_value_=[mysql_tenant], value=[9223372036854775807] + +************** Case 220 *************** +alter system cancel replica task task_id = 'xxx'; +question_mask_size: 0 + +|--[0],[T_STMT_LIST], str_value_=[], value=[9223372036854775807] + |--[0],[T_CANCEL_LS_REPLICA_TASK], str_value_=[], value=[0] + |--[0],[T_VARCHAR], str_value_=[xxx], value=[9223372036854775807] diff --git a/unittest/sql/parser/test_parser.result b/unittest/sql/parser/test_parser.result index 62c36ff44..569acc869 100644 --- a/unittest/sql/parser/test_parser.result +++ b/unittest/sql/parser/test_parser.result @@ -24165,3 +24165,865 @@ question_mask_size: 0 } ] } +************** Case 204 *************** +alter system add replica ls=100 server='100.88.107.212:5000' replica_type='F' data_source='100.88.107.212:5001' paxos_replica_num=3 tenant='mysql_tenant'; +question_mask_size: 0 +{ + "type":"T_STMT_LIST", + "int_val":9223372036854775807, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_ADD_LS_REPLICA", + "int_val":0, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_LS", + "int_val":9223372036854775807, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_INT", + "int_val":100, + "str_len":3, + "str_val":"100" + } + ] + }, + { + "type":"T_VARCHAR", + "int_val":9223372036854775807, + "str_len":19, + "str_val":"100.88.107.212:5000" + }, + { + "type":"T_VARCHAR", + "int_val":9223372036854775807, + "str_len":1, + "str_val":"F" + }, + { + "type":"T_VARCHAR", + "int_val":9223372036854775807, + "str_len":19, + "str_val":"100.88.107.212:5001" + }, + { + "type":"T_INT", + "int_val":3, + "str_len":1, + "str_val":"3" + }, + { + "type":"T_TENANT_NAME", + "int_val":9223372036854775807, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_VARCHAR", + "int_val":9223372036854775807, + "str_len":12, + "str_val":"mysql_tenant" + } + ] + } + ] + } + ] +} +************** Case 205 *************** +alter system add replica ls=100 server='100.88.107.212:5000' replica_type='F' data_source='100.88.107.212:5001' paxos_replica_num=3; +question_mask_size: 0 +{ + "type":"T_STMT_LIST", + "int_val":9223372036854775807, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_ADD_LS_REPLICA", + "int_val":0, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_LS", + "int_val":9223372036854775807, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_INT", + "int_val":100, + "str_len":3, + "str_val":"100" + } + ] + }, + { + "type":"T_VARCHAR", + "int_val":9223372036854775807, + "str_len":19, + "str_val":"100.88.107.212:5000" + }, + { + "type":"T_VARCHAR", + "int_val":9223372036854775807, + "str_len":1, + "str_val":"F" + }, + { + "type":"T_VARCHAR", + "int_val":9223372036854775807, + "str_len":19, + "str_val":"100.88.107.212:5001" + }, + { + "type":"T_INT", + "int_val":3, + "str_len":1, + "str_val":"3" + }, + { } + ] + } + ] +} +************** Case 206 *************** +alter system add replica ls=100 server='100.88.107.212:5000' replica_type='F' data_source='100.88.107.212:5001'; +question_mask_size: 0 +{ + "type":"T_STMT_LIST", + "int_val":9223372036854775807, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_ADD_LS_REPLICA", + "int_val":0, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_LS", + "int_val":9223372036854775807, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_INT", + "int_val":100, + "str_len":3, + "str_val":"100" + } + ] + }, + { + "type":"T_VARCHAR", + "int_val":9223372036854775807, + "str_len":19, + "str_val":"100.88.107.212:5000" + }, + { + "type":"T_VARCHAR", + "int_val":9223372036854775807, + "str_len":1, + "str_val":"F" + }, + { + "type":"T_VARCHAR", + "int_val":9223372036854775807, + "str_len":19, + "str_val":"100.88.107.212:5001" + }, + { }, + { } + ] + } + ] +} +************** Case 207 *************** +alter system add replica ls=100 server='100.88.107.212:5000' replica_type='F'; +question_mask_size: 0 +{ + "type":"T_STMT_LIST", + "int_val":9223372036854775807, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_ADD_LS_REPLICA", + "int_val":0, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_LS", + "int_val":9223372036854775807, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_INT", + "int_val":100, + "str_len":3, + "str_val":"100" + } + ] + }, + { + "type":"T_VARCHAR", + "int_val":9223372036854775807, + "str_len":19, + "str_val":"100.88.107.212:5000" + }, + { + "type":"T_VARCHAR", + "int_val":9223372036854775807, + "str_len":1, + "str_val":"F" + }, + { }, + { }, + { } + ] + } + ] +} +************** Case 208 *************** +alter system remove replica ls=100 server='100.88.107.212:5000' paxos_replica_num=3 tenant='mysql_tenant'; +question_mask_size: 0 +{ + "type":"T_STMT_LIST", + "int_val":9223372036854775807, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_REMOVE_LS_REPLICA", + "int_val":0, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_LS", + "int_val":9223372036854775807, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_INT", + "int_val":100, + "str_len":3, + "str_val":"100" + } + ] + }, + { + "type":"T_VARCHAR", + "int_val":9223372036854775807, + "str_len":19, + "str_val":"100.88.107.212:5000" + }, + { + "type":"T_INT", + "int_val":3, + "str_len":1, + "str_val":"3" + }, + { + "type":"T_TENANT_NAME", + "int_val":9223372036854775807, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_VARCHAR", + "int_val":9223372036854775807, + "str_len":12, + "str_val":"mysql_tenant" + } + ] + } + ] + } + ] +} +************** Case 209 *************** +alter system remove replica ls=100 server='100.88.107.212:5000' paxos_replica_num=3; +question_mask_size: 0 +{ + "type":"T_STMT_LIST", + "int_val":9223372036854775807, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_REMOVE_LS_REPLICA", + "int_val":0, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_LS", + "int_val":9223372036854775807, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_INT", + "int_val":100, + "str_len":3, + "str_val":"100" + } + ] + }, + { + "type":"T_VARCHAR", + "int_val":9223372036854775807, + "str_len":19, + "str_val":"100.88.107.212:5000" + }, + { + "type":"T_INT", + "int_val":3, + "str_len":1, + "str_val":"3" + }, + { } + ] + } + ] +} +************** Case 210 *************** +alter system remove replica ls=100 server='100.88.107.212:5000'; +question_mask_size: 0 +{ + "type":"T_STMT_LIST", + "int_val":9223372036854775807, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_REMOVE_LS_REPLICA", + "int_val":0, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_LS", + "int_val":9223372036854775807, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_INT", + "int_val":100, + "str_len":3, + "str_val":"100" + } + ] + }, + { + "type":"T_VARCHAR", + "int_val":9223372036854775807, + "str_len":19, + "str_val":"100.88.107.212:5000" + }, + { }, + { } + ] + } + ] +} +************** Case 211 *************** +alter system modify replica ls=100 server='100.88.107.212:5000' replica_type='F' paxos_replica_num=3 tenant='mysql_tenant'; +question_mask_size: 0 +{ + "type":"T_STMT_LIST", + "int_val":9223372036854775807, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_MODIFY_LS_REPLICA_TYPE", + "int_val":0, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_LS", + "int_val":9223372036854775807, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_INT", + "int_val":100, + "str_len":3, + "str_val":"100" + } + ] + }, + { + "type":"T_VARCHAR", + "int_val":9223372036854775807, + "str_len":19, + "str_val":"100.88.107.212:5000" + }, + { + "type":"T_VARCHAR", + "int_val":9223372036854775807, + "str_len":1, + "str_val":"F" + }, + { + "type":"T_INT", + "int_val":3, + "str_len":1, + "str_val":"3" + }, + { + "type":"T_TENANT_NAME", + "int_val":9223372036854775807, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_VARCHAR", + "int_val":9223372036854775807, + "str_len":12, + "str_val":"mysql_tenant" + } + ] + } + ] + } + ] +} +************** Case 212 *************** +alter system modify replica ls=100 server='100.88.107.212:5000' replica_type='F' paxos_replica_num=3; +question_mask_size: 0 +{ + "type":"T_STMT_LIST", + "int_val":9223372036854775807, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_MODIFY_LS_REPLICA_TYPE", + "int_val":0, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_LS", + "int_val":9223372036854775807, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_INT", + "int_val":100, + "str_len":3, + "str_val":"100" + } + ] + }, + { + "type":"T_VARCHAR", + "int_val":9223372036854775807, + "str_len":19, + "str_val":"100.88.107.212:5000" + }, + { + "type":"T_VARCHAR", + "int_val":9223372036854775807, + "str_len":1, + "str_val":"F" + }, + { + "type":"T_INT", + "int_val":3, + "str_len":1, + "str_val":"3" + }, + { } + ] + } + ] +} +************** Case 213 *************** +alter system modify replica ls=100 server='100.88.107.212:5000' replica_type='F'; +question_mask_size: 0 +{ + "type":"T_STMT_LIST", + "int_val":9223372036854775807, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_MODIFY_LS_REPLICA_TYPE", + "int_val":0, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_LS", + "int_val":9223372036854775807, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_INT", + "int_val":100, + "str_len":3, + "str_val":"100" + } + ] + }, + { + "type":"T_VARCHAR", + "int_val":9223372036854775807, + "str_len":19, + "str_val":"100.88.107.212:5000" + }, + { + "type":"T_VARCHAR", + "int_val":9223372036854775807, + "str_len":1, + "str_val":"F" + }, + { }, + { } + ] + } + ] +} +************** Case 214 *************** +alter system modify ls=100 paxos_replica_num=3 tenant='mysql_tenant'; +question_mask_size: 0 +{ + "type":"T_STMT_LIST", + "int_val":9223372036854775807, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_MODIFY_LS_PAXOS_REPLICA_NUM", + "int_val":0, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_LS", + "int_val":9223372036854775807, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_INT", + "int_val":100, + "str_len":3, + "str_val":"100" + } + ] + }, + { + "type":"T_INT", + "int_val":3, + "str_len":1, + "str_val":"3" + }, + { + "type":"T_TENANT_NAME", + "int_val":9223372036854775807, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_VARCHAR", + "int_val":9223372036854775807, + "str_len":12, + "str_val":"mysql_tenant" + } + ] + } + ] + } + ] +} +************** Case 215 *************** +alter system modify ls=100 paxos_replica_num=3; +question_mask_size: 0 +{ + "type":"T_STMT_LIST", + "int_val":9223372036854775807, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_MODIFY_LS_PAXOS_REPLICA_NUM", + "int_val":0, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_LS", + "int_val":9223372036854775807, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_INT", + "int_val":100, + "str_len":3, + "str_val":"100" + } + ] + }, + { + "type":"T_INT", + "int_val":3, + "str_len":1, + "str_val":"3" + }, + { } + ] + } + ] +} +************** Case 216 *************** +alter system migrate replica ls=100 source='100.88.107.212:5000' destination='100.88.107.212:5000' data_source='100.88.107.212:5001' tenant='mysql_tenant'; +question_mask_size: 0 +{ + "type":"T_STMT_LIST", + "int_val":9223372036854775807, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_MIGRATE_LS_REPLICA", + "int_val":0, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_LS", + "int_val":9223372036854775807, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_INT", + "int_val":100, + "str_len":3, + "str_val":"100" + } + ] + }, + { + "type":"T_VARCHAR", + "int_val":9223372036854775807, + "str_len":19, + "str_val":"100.88.107.212:5000" + }, + { + "type":"T_VARCHAR", + "int_val":9223372036854775807, + "str_len":19, + "str_val":"100.88.107.212:5000" + }, + { + "type":"T_VARCHAR", + "int_val":9223372036854775807, + "str_len":19, + "str_val":"100.88.107.212:5001" + }, + { + "type":"T_TENANT_NAME", + "int_val":9223372036854775807, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_VARCHAR", + "int_val":9223372036854775807, + "str_len":12, + "str_val":"mysql_tenant" + } + ] + } + ] + } + ] +} +************** Case 217 *************** +alter system migrate replica ls=100 source='100.88.107.212:5000' destination='100.88.107.212:5000' data_source='100.88.107.212:5001'; +question_mask_size: 0 +{ + "type":"T_STMT_LIST", + "int_val":9223372036854775807, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_MIGRATE_LS_REPLICA", + "int_val":0, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_LS", + "int_val":9223372036854775807, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_INT", + "int_val":100, + "str_len":3, + "str_val":"100" + } + ] + }, + { + "type":"T_VARCHAR", + "int_val":9223372036854775807, + "str_len":19, + "str_val":"100.88.107.212:5000" + }, + { + "type":"T_VARCHAR", + "int_val":9223372036854775807, + "str_len":19, + "str_val":"100.88.107.212:5000" + }, + { + "type":"T_VARCHAR", + "int_val":9223372036854775807, + "str_len":19, + "str_val":"100.88.107.212:5001" + }, + { } + ] + } + ] +} +************** Case 218 *************** +alter system migrate replica ls=100 source='100.88.107.212:5000' destination='100.88.107.212:5000'; +question_mask_size: 0 +{ + "type":"T_STMT_LIST", + "int_val":9223372036854775807, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_MIGRATE_LS_REPLICA", + "int_val":0, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_LS", + "int_val":9223372036854775807, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_INT", + "int_val":100, + "str_len":3, + "str_val":"100" + } + ] + }, + { + "type":"T_VARCHAR", + "int_val":9223372036854775807, + "str_len":19, + "str_val":"100.88.107.212:5000" + }, + { + "type":"T_VARCHAR", + "int_val":9223372036854775807, + "str_len":19, + "str_val":"100.88.107.212:5000" + }, + { }, + { } + ] + } + ] +} +************** Case 219 *************** +alter system cancel replica task task_id = 'xxx' tenant='mysql_tenant'; +question_mask_size: 0 +{ + "type":"T_STMT_LIST", + "int_val":9223372036854775807, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_CANCEL_LS_REPLICA_TASK", + "int_val":0, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_VARCHAR", + "int_val":9223372036854775807, + "str_len":3, + "str_val":"xxx" + }, + { + "type":"T_TENANT_NAME", + "int_val":9223372036854775807, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_VARCHAR", + "int_val":9223372036854775807, + "str_len":12, + "str_val":"mysql_tenant" + } + ] + } + ] + } + ] +} +************** Case 220 *************** +alter system cancel replica task task_id = 'xxx'; +question_mask_size: 0 +{ + "type":"T_STMT_LIST", + "int_val":9223372036854775807, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_CANCEL_LS_REPLICA_TASK", + "int_val":0, + "str_len":0, + "str_val":"", + "children": [ + { + "type":"T_VARCHAR", + "int_val":9223372036854775807, + "str_len":3, + "str_val":"xxx" + }, + { } + ] + } + ] +} diff --git a/unittest/sql/parser/test_parser.test b/unittest/sql/parser/test_parser.test index e05feca20..c3add7f5a 100644 --- a/unittest/sql/parser/test_parser.test +++ b/unittest/sql/parser/test_parser.test @@ -313,3 +313,28 @@ alter system cancel transfer partition ALL; alter system cancel transfer partition ALL tenant 'mysql'; alter system cancel balance job; alter system cancel balance job tenant 'mysql'; + + + +alter system add replica ls=100 server='100.88.107.212:5000' replica_type='F' data_source='100.88.107.212:5001' paxos_replica_num=3 tenant='mysql_tenant'; +alter system add replica ls=100 server='100.88.107.212:5000' replica_type='F' data_source='100.88.107.212:5001' paxos_replica_num=3; +alter system add replica ls=100 server='100.88.107.212:5000' replica_type='F' data_source='100.88.107.212:5001'; +alter system add replica ls=100 server='100.88.107.212:5000' replica_type='F'; + +alter system remove replica ls=100 server='100.88.107.212:5000' paxos_replica_num=3 tenant='mysql_tenant'; +alter system remove replica ls=100 server='100.88.107.212:5000' paxos_replica_num=3; +alter system remove replica ls=100 server='100.88.107.212:5000'; + +alter system modify replica ls=100 server='100.88.107.212:5000' replica_type='F' paxos_replica_num=3 tenant='mysql_tenant'; +alter system modify replica ls=100 server='100.88.107.212:5000' replica_type='F' paxos_replica_num=3; +alter system modify replica ls=100 server='100.88.107.212:5000' replica_type='F'; + +alter system modify ls=100 paxos_replica_num=3 tenant='mysql_tenant'; +alter system modify ls=100 paxos_replica_num=3; + +alter system migrate replica ls=100 source='100.88.107.212:5000' destination='100.88.107.212:5000' data_source='100.88.107.212:5001' tenant='mysql_tenant'; +alter system migrate replica ls=100 source='100.88.107.212:5000' destination='100.88.107.212:5000' data_source='100.88.107.212:5001'; +alter system migrate replica ls=100 source='100.88.107.212:5000' destination='100.88.107.212:5000'; + +alter system cancel replica task task_id = 'xxx' tenant='mysql_tenant'; +alter system cancel replica task task_id = 'xxx';