diff --git a/deps/oblib/src/common/ob_learner_list.h b/deps/oblib/src/common/ob_learner_list.h index c97dbef109..f3b51bdf4d 100644 --- a/deps/oblib/src/common/ob_learner_list.h +++ b/deps/oblib/src/common/ob_learner_list.h @@ -62,6 +62,7 @@ public: int64_t get_index_by_learner(const T &learner) const; // by addr int64_t get_index_by_addr(const common::ObAddr &server) const; + int get_addr_array(ObIArray &addr_array) const; private: typedef common::ObSEArray LogLearnerArray; LogLearnerArray learner_array_; diff --git a/deps/oblib/src/common/ob_learner_list.ipp b/deps/oblib/src/common/ob_learner_list.ipp index db70d134cf..3763789282 100644 --- a/deps/oblib/src/common/ob_learner_list.ipp +++ b/deps/oblib/src/common/ob_learner_list.ipp @@ -365,5 +365,26 @@ int BaseLearnerList::transform_to_string( } return ret; } + +template +int BaseLearnerList::get_addr_array(ObIArray &addr_array) const +{ + int ret = OB_SUCCESS; + const int64_t number = get_member_number(); + addr_array.reset(); + for (int64_t idx = 0; idx < number && OB_SUCC(ret); ++idx) { + common::ObAddr server; + if (OB_FAIL(get_server_by_index(idx, server))) { + COMMON_LOG(WARN, "get_server_by_index failed", K(ret), K(idx)); + } else if (OB_FAIL(addr_array.push_back(server))) { + COMMON_LOG(WARN, "add addr array failed", K(ret), K(server)); + } + } + if (OB_FAIL(ret)) { + COMMON_LOG(WARN, "BaseLearnerList get_addr_array failed", K(ret)); + } + return ret; +} + } // namespace common end } // namespace oceanbase end diff --git a/mittest/logservice/test_ob_simple_log_config_change.cpp b/mittest/logservice/test_ob_simple_log_config_change.cpp index 03ff4e7af5..a77615cca6 100644 --- a/mittest/logservice/test_ob_simple_log_config_change.cpp +++ b/mittest/logservice/test_ob_simple_log_config_change.cpp @@ -407,9 +407,12 @@ TEST_F(TestObSimpleLogClusterConfigChange, test_basic_config_change_for_migratio PALF_LOG(INFO, "CASE1: replicate an FULL replica", K(id)); common::ObMember added_member = ObMember(addr3, 1); added_member.set_migrating(); + LogLearnerList learners; + learners.add_learner(LogLearner(added_member.get_server(), 1)); EXPECT_EQ(OB_SUCCESS, leader.palf_handle_impl_->add_learner(added_member, CONFIG_CHANGE_TIMEOUT)); EXPECT_TRUE(leader.palf_handle_impl_->config_mgr_.log_ms_meta_.curr_.config_.learnerlist_.contains(added_member)); EXPECT_EQ(3, leader.palf_handle_impl_->config_mgr_.log_ms_meta_.curr_.config_.log_sync_replica_num_); + EXPECT_UNTIL_EQ(true, check_children_valid(palf_list, learners)); // clean EXPECT_EQ(OB_SUCCESS, leader.palf_handle_impl_->remove_learner(added_member, CONFIG_CHANGE_TIMEOUT)); diff --git a/src/logservice/ob_log_handler.cpp b/src/logservice/ob_log_handler.cpp index b158ce6212..a30b73af79 100755 --- a/src/logservice/ob_log_handler.cpp +++ b/src/logservice/ob_log_handler.cpp @@ -474,6 +474,12 @@ int ObLogHandler::get_election_leader(common::ObAddr &addr) const return palf_handle_.get_election_leader(addr); } +int ObLogHandler::get_parent(common::ObAddr &parent) const +{ + RLockGuard guard(lock_); + return palf_handle_.get_parent(parent); +} + int ObLogHandler::enable_sync() { RLockGuard guard(lock_); diff --git a/src/logservice/ob_log_handler.h b/src/logservice/ob_log_handler.h index b8a98fc7a9..1c45d98003 100755 --- a/src/logservice/ob_log_handler.h +++ b/src/logservice/ob_log_handler.h @@ -131,6 +131,7 @@ public: virtual int get_leader_config_version(palf::LogConfigVersion &config_version) const = 0; // get leader from election, used only for non_palf_leader rebuilding. virtual int get_election_leader(common::ObAddr &addr) const = 0; + virtual int get_parent(common::ObAddr &parent) const = 0; virtual int change_replica_num(const common::ObMemberList &member_list, const int64_t curr_replica_num, const int64_t new_replica_num, @@ -429,6 +430,13 @@ public: // OB_NOT_INIT // OB_LEADER_NOT_EXIST int get_election_leader(common::ObAddr &addr) const override final; + // @brief, get parent + // @param[out] addr: address of parent + // retval: + // OB_SUCCESS + // OB_NOT_INIT + // OB_ENTRY_NOT_EXIST: parent is invalid + int get_parent(common::ObAddr &parent) const override final; // PalfBaseInfo include the 'base_lsn' and the 'prev_log_info' of sliding window. // @param[in] const LSN&, base_lsn of ls. // @param[out] PalfBaseInfo&, palf_base_info diff --git a/src/logservice/palf/log_config_mgr.cpp b/src/logservice/palf/log_config_mgr.cpp index 9aeefed1ef..52813704be 100755 --- a/src/logservice/palf/log_config_mgr.cpp +++ b/src/logservice/palf/log_config_mgr.cpp @@ -69,6 +69,7 @@ LogConfigMgr::LogConfigMgr() last_first_register_time_us_(OB_INVALID_TIMESTAMP), child_lock_(common::ObLatchIds::PALF_CM_CHILD_LOCK), children_(), + log_sync_children_(), last_submit_keepalive_time_us_(OB_INVALID_TIMESTAMP), log_engine_(NULL), sw_(NULL), @@ -162,6 +163,7 @@ void LogConfigMgr::destroy() parent_keepalive_time_us_ = OB_INVALID_TIMESTAMP; reset_registering_state_(); children_.reset(); + log_sync_children_.reset(); last_submit_keepalive_time_us_ = OB_INVALID_TIMESTAMP; ms_ack_list_.reset(); resend_config_version_.reset(); @@ -492,6 +494,21 @@ int LogConfigMgr::get_children_list(LogLearnerList &children) const return ret; } +// considering log replication performance, we update log_sync_children_ in the background +int LogConfigMgr::get_log_sync_children_list(LogLearnerList &children) const +{ + int ret = OB_SUCCESS; + SpinLockGuard gaurd(child_lock_); + if (IS_NOT_INIT) { + ret = OB_NOT_INIT; + } else if (OB_FAIL(children.deep_copy(log_sync_children_))) { + PALF_LOG(WARN, "deep_copy children_list failed", KR(ret), K_(palf_id), K_(self)); + } else { + //pass + } + return ret; +} + int LogConfigMgr::get_config_version(LogConfigVersion &config_version) const { int ret = OB_SUCCESS; @@ -2818,18 +2835,14 @@ int LogConfigMgr::handle_register_parent_req(const LogLearner &child, const bool LogLearnerList retired_children; LogLearnerList diff_region_children; RegisterReturn reg_ret = INVALID_REG_RET; - common::ObMember learner_in_list; - const int in_list_ret = all_learnerlist_.get_learner_by_addr(child.get_server(), learner_in_list); if (IS_NOT_INIT) { ret = OB_NOT_INIT; } else if (!child.is_valid() || child.register_time_us_ <= 0) { ret = OB_INVALID_ARGUMENT; PALF_LOG(WARN, "invalid argument", KR(ret), K_(palf_id), K_(self), K(child)); - } else if (is_to_leader && (OB_SUCCESS != in_list_ret || learner_in_list.is_migrating())) { + } else if (is_to_leader && !all_learnerlist_.contains(child.get_server())) { ret = OB_INVALID_ARGUMENT; - // Note: do not register parent for migrating learners, because their logs may lag behind its parent - PALF_LOG(WARN, "registering child is not in learner list or is migrating", K_(palf_id), - K_(self), K(child), K(in_list_ret), K(learner_in_list)); + PALF_LOG(WARN, "registering child is not in learner list", K_(palf_id), K_(self), K(child)); } else { SpinLockGuard guard(child_lock_); int64_t idx = -1; @@ -2864,6 +2877,8 @@ int LogConfigMgr::handle_register_parent_req(const LogLearner &child, const bool dst_child.register_time_us_ = child.register_time_us_; if (OB_FAIL(children_.add_learner(dst_child))) { PALF_LOG(WARN, "handle_register_parent_req failed", KR(ret), K_(palf_id), K_(self), K(is_to_leader), K(dst_child)); + } else if (OB_FAIL(log_sync_children_.add_learner(dst_child))) { + PALF_LOG(WARN, "add_learner failed", KR(ret), K_(palf_id), K_(self), K_(log_sync_children), K(dst_child)); } else { reg_ret = REGISTER_DONE; } @@ -2881,6 +2896,8 @@ int LogConfigMgr::handle_register_parent_req(const LogLearner &child, const bool dst_child.register_time_us_ = child.register_time_us_; if (OB_FAIL(children_.add_learner(dst_child))) { PALF_LOG(WARN, "handle_register_parent_req failed", KR(ret), K_(palf_id), K_(self), K(is_to_leader), K(dst_child)); + } else if (OB_FAIL(log_sync_children_.add_learner(dst_child))) { + PALF_LOG(WARN, "add_learner failed", KR(ret), K_(palf_id), K_(self), K_(log_sync_children), K(dst_child)); } else { reg_ret = REGISTER_DONE; } @@ -3022,6 +3039,21 @@ void LogConfigMgr::check_children_health() RetireChildReason::DUPLICATE_REGION_IN_LEADER))) { PALF_LOG(WARN, "submit_retire_children_req failed", KR(ret), K_(palf_id), K_(self), K(dup_region_children)); } + // 6. update log_sync_children_ + { + SpinLockGuard guard(child_lock_); + log_sync_children_.reset(); + for (int i = 0; i < children_.get_member_number(); i++) { + const LogLearner &learner = children_.get_learner(i); + common::ObMember learner_in_list; + const int in_list_ret = all_learnerlist_.get_learner_by_addr(learner.get_server(), learner_in_list); + if (OB_SUCCESS != in_list_ret || learner_in_list.is_migrating()) { + // skip + } else if (OB_FAIL(log_sync_children_.add_learner(learner))) { + PALF_LOG(WARN, "add_learner failed", KR(ret), K_(palf_id), K_(self), K(learner), K_(log_sync_children)); + } + } + } } } diff --git a/src/logservice/palf/log_config_mgr.h b/src/logservice/palf/log_config_mgr.h index 652c330af3..973edcaf5f 100755 --- a/src/logservice/palf/log_config_mgr.h +++ b/src/logservice/palf/log_config_mgr.h @@ -418,6 +418,7 @@ public: virtual int get_arbitration_member(common::ObMember &arb_member) const; virtual int get_prev_member_list(common::ObMemberList &member_list) const; virtual int get_children_list(LogLearnerList &children) const; + virtual int get_log_sync_children_list(LogLearnerList &children) const; virtual int get_config_version(LogConfigVersion &config_version) const; // @brief get replica_num of expected paxos member list, excluding arbitraion member, // and including degraded members. @@ -779,6 +780,9 @@ private: // ==================== Parent ======================== mutable common::ObSpinLock child_lock_; LogLearnerList children_; + // cached children_ for pushing logs + // log_sync_children_ = children_ - migrating learners - learners not in learnerlist_ + LogLearnerList log_sync_children_; int64_t last_submit_keepalive_time_us_; // ==================== Parent ======================== LogEngine *log_engine_; diff --git a/src/logservice/palf/log_sliding_window.cpp b/src/logservice/palf/log_sliding_window.cpp index d4bbadc905..3500e284d1 100644 --- a/src/logservice/palf/log_sliding_window.cpp +++ b/src/logservice/palf/log_sliding_window.cpp @@ -885,7 +885,7 @@ int LogSlidingWindow::try_push_log_to_children_(const int64_t curr_proposal_id, common::GlobalLearnerList degraded_learner_list; const bool need_presend_log = (state_mgr_->is_leader_active()) ? true : false; const bool need_batch_push = need_use_batch_rpc_(log_write_buf.get_total_size()); - if (OB_FAIL(mm_->get_children_list(children_list))) { + if (OB_FAIL(mm_->get_log_sync_children_list(children_list))) { PALF_LOG(WARN, "get_children_list failed", K(ret), K_(palf_id)); } else if (children_list.is_valid() && OB_FAIL(log_engine_->submit_push_log_req(children_list, PUSH_LOG, curr_proposal_id, diff --git a/src/logservice/palf/palf_handle.cpp b/src/logservice/palf/palf_handle.cpp index aed72f2a81..3676395272 100755 --- a/src/logservice/palf/palf_handle.cpp +++ b/src/logservice/palf/palf_handle.cpp @@ -327,6 +327,12 @@ int PalfHandle::get_election_leader(common::ObAddr &addr) const return palf_handle_impl_->get_election_leader(addr); } +int PalfHandle::get_parent(common::ObAddr &parent) const +{ + CHECK_VALID; + return palf_handle_impl_->get_parent(parent); +} + int PalfHandle::change_replica_num(const common::ObMemberList &member_list, const int64_t curr_replica_num, const int64_t new_replica_num, diff --git a/src/logservice/palf/palf_handle.h b/src/logservice/palf/palf_handle.h index 18d4103ba0..d9cccc77fd 100755 --- a/src/logservice/palf/palf_handle.h +++ b/src/logservice/palf/palf_handle.h @@ -226,6 +226,7 @@ public: int64_t &paxos_replica_num, GlobalLearnerList &learner_list) const; int get_election_leader(common::ObAddr &addr) const; + int get_parent(common::ObAddr &parent) const; // @brief: a special config change interface, change replica number of paxos group // @param[in] common::ObMemberList: current memberlist, for pre-check diff --git a/src/logservice/palf/palf_handle_impl.cpp b/src/logservice/palf/palf_handle_impl.cpp index 9e2b2ebd69..dfa18756fe 100755 --- a/src/logservice/palf/palf_handle_impl.cpp +++ b/src/logservice/palf/palf_handle_impl.cpp @@ -600,6 +600,19 @@ int PalfHandleImpl::get_election_leader(ObAddr &addr) const return ret; } +int PalfHandleImpl::get_parent(common::ObAddr &parent) const +{ + int ret = OB_SUCCESS; + if (IS_NOT_INIT) { + ret = OB_NOT_INIT; + PALF_LOG(ERROR, "PalfHandleImpl has not inited", K(ret)); + } else { + parent = config_mgr_.get_parent(); + ret = (parent.is_valid())? OB_SUCCESS: OB_ENTRY_NOT_EXIST; + } + return ret; +} + int PalfHandleImpl::handle_config_change_pre_check(const ObAddr &server, const LogGetMCStReq &req, LogGetMCStResp &resp) diff --git a/src/logservice/palf/palf_handle_impl.h b/src/logservice/palf/palf_handle_impl.h index e94b5cbb44..98231d7f57 100755 --- a/src/logservice/palf/palf_handle_impl.h +++ b/src/logservice/palf/palf_handle_impl.h @@ -362,6 +362,7 @@ public: int64_t &paxos_replica_num, common::GlobalLearnerList &learner_list) const = 0; virtual int get_election_leader(common::ObAddr &addr) const = 0; + virtual int get_parent(common::ObAddr &parent) const = 0; // @brief: a special config change interface, change replica number of paxos group // @param[in] common::ObMemberList: current memberlist, for pre-check @@ -904,6 +905,7 @@ public: int64_t &paxos_replica_num, common::GlobalLearnerList &learner_list) const override final; int get_election_leader(common::ObAddr &addr) const; + int get_parent(common::ObAddr &parent) const; int force_set_as_single_replica() override final; int change_replica_num(const common::ObMemberList &member_list, const int64_t curr_replica_num, diff --git a/src/logservice/restoreservice/ob_log_archive_piece_mgr.cpp b/src/logservice/restoreservice/ob_log_archive_piece_mgr.cpp index 992fe55bd9..0754dbdf7f 100644 --- a/src/logservice/restoreservice/ob_log_archive_piece_mgr.cpp +++ b/src/logservice/restoreservice/ob_log_archive_piece_mgr.cpp @@ -1746,5 +1746,460 @@ int ObLogArchivePieceContext::get_ls_meta_file_in_array_(const SCN ×tamp, } return ret; } + +ObLogRawPathPieceContext::ObLogRawPathPieceContext() : + is_inited_(false), + id_(), + array_(), + index_(0), + file_id_(0), + min_file_id_(0), + max_file_id_(0), + min_lsn_(), + max_lsn_(), + file_offset_(0) +{} + +ObLogRawPathPieceContext::~ObLogRawPathPieceContext() +{ + reset(); +} + +void ObLogRawPathPieceContext::reset() +{ + is_inited_ = false; + id_.reset(); + array_.reset(); + index_ = 0; + file_id_ = 0; + min_file_id_ = 0; + max_file_id_ = 0; + min_lsn_.reset(); + max_lsn_.reset(); + file_offset_ = 0; +} + +int ObLogRawPathPieceContext::init(const ObLSID &id, const DirArray &array) +{ + int ret = OB_SUCCESS; + reset(); + if (OB_UNLIKELY(!id.is_valid() || array.empty())) { + ret = OB_INVALID_ARGUMENT; + CLOG_LOG(WARN, "invalid argument", K(ret), K(id)); + } else if (OB_FAIL(array_.assign(array))) { + CLOG_LOG(WARN, "fail to assign array", K(id), K(array)); + } else { + is_inited_ = true; + id_ = id; + } + return ret; +} + +bool ObLogRawPathPieceContext::is_valid() const +{ + return is_inited_ + && id_.is_valid() + && !array_.empty() + && index_ >= 0 + && file_id_ >= 0 + && min_file_id_ >= 0 + && max_file_id_ >= min_file_id_ + && file_offset_ >= 0; +} + +int ObLogRawPathPieceContext::deep_copy_to(ObLogRawPathPieceContext &other) +{ + int ret = OB_SUCCESS; + if (OB_FAIL(other.array_.assign(array_))) { + CLOG_LOG(WARN, "deep copy failed", K(ret), K_(id)); + } else { + other.is_inited_ = is_inited_; + other.id_ = id_; + other.index_ = index_; + other.file_id_ = file_id_; + other.min_file_id_ = min_file_id_; + other.max_file_id_ = max_file_id_; + other.min_lsn_ = min_lsn_; + other.max_lsn_ = max_lsn_; + other.file_offset_ = file_offset_; + } + return ret; +} + +int ObLogRawPathPieceContext::get_cur_uri(char *buf, const int64_t buf_size) +{ + int ret = OB_SUCCESS; + if (OB_UNLIKELY(OB_ISNULL(buf) || buf_size <= 0)) { + ret = OB_INVALID_ARGUMENT; + CLOG_LOG(WARN, "invalid argument when get cur uri ptr"); + } else if (OB_UNLIKELY(array_.empty() || index_ < 0 || index_ > array_.count() - 1)) { + ret = OB_ERR_UNEXPECTED; + CLOG_LOG(WARN, "rawpath is invalid", KPC(this)); + } else if (OB_FAIL(databuff_printf(buf, buf_size, "%s", array_[index_].first.ptr()))) { + CLOG_LOG(WARN, "fail to print uri ptr"); + } + return ret; +} + +int ObLogRawPathPieceContext::get_cur_storage_info(char *buf, const int64_t buf_size) +{ + int ret = OB_SUCCESS; + if (OB_UNLIKELY(OB_ISNULL(buf) || buf_size <= 0)) { + ret = OB_INVALID_ARGUMENT; + CLOG_LOG(WARN, "invalid argument when get cur storage info ptr"); + } else if (OB_UNLIKELY(array_.empty() || index_ < 0 || index_ > array_.count() - 1)) { + ret = OB_ERR_UNEXPECTED; + CLOG_LOG(WARN, "rawpath is invalid when get cur storage info", KPC(this)); + } else if (OB_FAIL(databuff_printf(buf, buf_size, "%s", array_[index_].second.ptr()))) { + CLOG_LOG(WARN, "fail to print storage ptr"); + } + return ret; +} + +int ObLogRawPathPieceContext::get_file_id(int64_t &file_id) +{ + int ret = OB_SUCCESS; + if (OB_UNLIKELY(! is_valid())){ + ret = OB_ERR_UNEXPECTED; + CLOG_LOG(WARN, "array may be invalid"); + } else { + file_id = file_id_; + } + return ret; +} + +int ObLogRawPathPieceContext::locate_precise_piece(palf::LSN &fetch_lsn) +{ + int ret = OB_SUCCESS; + ObBackupDest backup_dest; + bool file_exist; + if (OB_UNLIKELY(array_.empty())) { + ret = OB_INVALID_ARGUMENT; + CLOG_LOG(WARN, "dest array is empty", K(ret), K_(id), KPC(this)); + } else if (piece_index_match(fetch_lsn)) { + CLOG_LOG(TRACE, "piecec index matched", K_(id), K(fetch_lsn)); + } else { + int64_t min_file_id = 0; + int64_t max_file_id = 0; + bool locate = false; + for (int64_t curr_index = index_; OB_SUCC(ret) && curr_index < array_.count(); curr_index++) { // piece list is in sort + ObString uri(array_[curr_index].first.ptr()); + share::ObBackupStorageInfo storage_info; + if (curr_index != index_) { //reset if switch piece + min_lsn_.reset(); + max_lsn_.reset(); + } + if (OB_FAIL(storage_info.set(uri.ptr(), array_[curr_index].second.ptr()))) { + CLOG_LOG(WARN, "fail to set storage info", K(ret), K_(id), KPC(this)); + } else if (OB_FAIL(list_dir_files(uri, &storage_info, min_file_id, max_file_id))) { + CLOG_LOG(WARN, "list dir files failed", K(ret), K_(id), KPC(this)); + } else if (file_id_ >= min_file_id && file_id_ <= max_file_id) { + if (OB_FAIL(get_ls_piece_info(curr_index, file_exist))) { + CLOG_LOG(WARN, "fail to get piece info", K(ret), K_(id), KPC(this), K(file_exist), K(curr_index)); + } else if ((min_lsn_.is_valid() && fetch_lsn < min_lsn_) || (max_lsn_.is_valid() && fetch_lsn >= max_lsn_)) { + CLOG_LOG(TRACE, "piece lsn not match", K_(id), K(fetch_lsn), K_(min_lsn), K_(max_lsn), K(curr_index)); + } else { + locate = true; + index_ = curr_index; + min_file_id_ = min_file_id; + max_file_id_ = max_file_id; + CLOG_LOG(TRACE, "raw path piece matched", K_(id), K(curr_index), K(fetch_lsn), KPC(this), K(file_exist)); + break; + } + } + } + + if (OB_SUCC(ret) && locate && ! file_exist && index_ == array_.count() - 1 && fetch_lsn >= max_lsn_) { + ret = OB_ITER_END; + CLOG_LOG(TRACE, "fetch to piece list end", K_(index), K(fetch_lsn), KPC(this)); + } + + if (OB_SUCC(ret) && ! locate) { + ret = OB_ENTRY_NOT_EXIST; + CLOG_LOG(INFO, "locate failed", K_(id), K(fetch_lsn), KPC(this)); + } + } + return ret; +} + +int ObLogRawPathPieceContext::list_dir_files(const ObString &base, + const share::ObBackupStorageInfo *storage_info, + int64_t &min_file_id, + int64_t &max_file_id) +{ + int ret = OB_SUCCESS; + share::ObBackupPath prefix; + if (OB_FAIL(ObArchivePathUtil::build_restore_prefix(base.ptr(), id_, prefix))) { + CLOG_LOG(WARN, "fail to build_restore_prefix", K(base), K_(id)); + } else { + ObString uri(prefix.get_obstr()); + ret = archive::ObArchiveFileUtils::get_file_range(uri, storage_info, min_file_id, max_file_id); + if (OB_ENTRY_NOT_EXIST == ret) { + CLOG_LOG(WARN, "file may not exist", K(ret), K(base), K_(id)); + ret = OB_SUCCESS; + } + } + return ret; +} + +int ObLogRawPathPieceContext::get_ls_piece_info(const int64_t curr_index, bool &exist) +{ + int ret = OB_SUCCESS; + const ObLSID ls_id(id_); + share::ObArchiveStore archive_store; + share::ObSingleLSInfoDesc desc; + ObBackupDest dest; + exist = false; + if (OB_UNLIKELY(array_.empty() || curr_index > array_.count() - 1)) { + ret = OB_INVALID_ARGUMENT; + CLOG_LOG(WARN, "invalid argument to get ls inner piece info"); + } else if (OB_FAIL(dest.set(array_[curr_index].first.ptr(), array_[curr_index].second.ptr()))) { + CLOG_LOG(WARN, "fail to set backup dest", K_(id), KPC(this)); + } else if (OB_FAIL(archive_store.init(dest))) { + CLOG_LOG(WARN, "archive store init failed", K(ret), K_(id)); + } else if (OB_FAIL(archive_store.read_single_ls_info(ls_id, desc)) + && OB_BACKUP_FILE_NOT_EXIST != ret) { + CLOG_LOG(WARN, "get single piece file failed", K(ret), K_(id)); + } else if (OB_BACKUP_FILE_NOT_EXIST == ret) { + exist = false; + ret = OB_SUCCESS; + CLOG_LOG(INFO, "ls not exist in cur piece", K_(id)); + } else if (OB_UNLIKELY(!desc.is_valid())) { + ret = OB_INVALID_DATA; + CLOG_LOG(WARN, "invalid single piece file", K(ret), K_(id), K(desc)); + } else { + exist = true; + min_lsn_ = palf::LSN(desc.min_lsn_); + max_lsn_ = palf::LSN(desc.max_lsn_); + } + return ret; +} + +bool ObLogRawPathPieceContext::piece_index_match(const palf::LSN &lsn) const +{ + bool bret = true; + if (min_lsn_.is_valid() && lsn.is_valid() && lsn < min_lsn_) { + bret = false; + } else if (max_lsn_.is_valid() && lsn.is_valid() && lsn >= max_lsn_) { + bret = false; + } + CLOG_LOG(INFO, "piece_index_match", KPC(this)); + return bret + && index_ >= 0 + && min_file_id_ <= file_id_ + && max_file_id_ >= file_id_; +} + +int ObLogRawPathPieceContext::cal_lsn_to_file_id(const palf::LSN &lsn) +{ + file_id_ = archive::cal_archive_file_id(lsn, palf::PALF_BLOCK_SIZE); + return OB_SUCCESS; +} + +int ObLogRawPathPieceContext::update_file( + const int64_t file_id, + const int64_t file_offset, + const palf::LSN &lsn) +{ + int ret = OB_SUCCESS; + if (OB_UNLIKELY(file_id > max_file_id_ || file_id < min_file_id_)) { + ret = OB_INVALID_ARGUMENT; + CLOG_LOG(WARN, "invalid argument", K(ret), K(file_id), K(file_offset), K(lsn)); + } else { + file_id_ = file_id; + file_offset_ = file_offset; + max_lsn_ = lsn; + } + return ret; +} + +int ObLogRawPathPieceContext::update_max_lsn(const palf::LSN &lsn) +{ + int ret = OB_SUCCESS; + max_lsn_ = lsn; + return ret; +} + +int ObLogRawPathPieceContext::update_min_lsn(const palf::LSN &lsn) +{ + int ret = OB_SUCCESS; + min_lsn_ = lsn; + return ret; +} + +int ObLogRawPathPieceContext::get_max_archive_log(palf::LSN &lsn, SCN &scn) +{ + int ret = OB_SUCCESS; + ObLogRawPathPieceContext orign_context; + if (OB_FAIL(deep_copy_to(orign_context))) { + CLOG_LOG(WARN, "piece context deep copy failed", K_(id), KPC(this)); + } else if (OB_FAIL(get_max_archive_log_(orign_context, lsn, scn))) { + CLOG_LOG(WARN, "get max archive log failed", K(ret), K_(id), KPC(this)); + } else { + CLOG_LOG(INFO, "get max archive log succ", K(ret), K_(id), K(lsn), K(scn), KPC(this)); + } + return ret; +} + +int ObLogRawPathPieceContext::get_max_archive_log_(const ObLogRawPathPieceContext &origin, palf::LSN &archive_lsn, SCN &archive_scn) +{ + int ret = OB_SUCCESS; + const int64_t max_index = array_.count() - 1; //assume that rawpath is sorted + ObString uri(array_[max_index].first.ptr()); + ObString storage_info_ptr(array_[max_index].second.ptr()); + share::ObBackupDest dest; + share::ObBackupStorageInfo storage_info; + int64_t min_file_id = 0; + int64_t max_file_id = 0; + share::ObBackupPath prefix; + share::ObArchiveStore archive_store; + share::ObSingleLSInfoDesc ls_info_desc; + + if (OB_FAIL(dest.set(uri.ptr(), array_[max_index].second.ptr()))) { + CLOG_LOG(WARN, "fial to set dest", K_(id), KPC(this)); + } else if (OB_FAIL(storage_info.set(uri.ptr(), storage_info_ptr.ptr()))) { + CLOG_LOG(WARN, "fail to set storage info", K(ret), K_(id), KPC(this)); + } else if (OB_FAIL(ObArchivePathUtil::build_restore_prefix(uri.ptr(), id_, prefix))) { + CLOG_LOG(WARN, "fail to build_restore_prefix", K(uri), K_(id)); + } else if (OB_FAIL(archive::ObArchiveFileUtils::get_file_range(ObString(prefix.get_obstr()), &storage_info, min_file_id, max_file_id))) { + CLOG_LOG(WARN, "fail to get file range", K(ret), K_(id)); + } else if (OB_FAIL(archive_store.init(dest))) { + CLOG_LOG(WARN, "fail to init archive store", K(ret), K_(id)); + } else if (OB_FAIL(archive_store.read_single_ls_info(id_, ls_info_desc)) + && OB_BACKUP_FILE_NOT_EXIST != ret) { + CLOG_LOG(WARN, "get single ls info failed", K(ret), K_(id)); + } else if (OB_BACKUP_FILE_NOT_EXIST == ret) { // active piece + CLOG_LOG(WARN, "ls not exist in cur piece", K_(id)); + ret = get_max_log_in_file_(origin, max_file_id_, archive_lsn, archive_scn); + } else if (OB_UNLIKELY(!ls_info_desc.is_valid())) { + ret = OB_INVALID_DATA; + CLOG_LOG(WARN, "invalid single piece file", K(ret), K_(id), K(ls_info_desc)); + } else { + archive_scn = ls_info_desc.checkpoint_scn_; + archive_lsn = palf::LSN(ls_info_desc.max_lsn_); + } + return ret; +} + +int ObLogRawPathPieceContext::get_max_log_in_file_(const ObLogRawPathPieceContext &origin, + const int64_t file_id, + palf::LSN &lsn, + SCN &scn) +{ + int ret = OB_SUCCESS; + char *buf = NULL; + const int64_t buf_size = archive::ARCHIVE_FILE_DATA_BUF_SIZE; + const int64_t header_size = archive::ARCHIVE_FILE_HEADER_SIZE; + int64_t read_size = 0; + palf::MemoryStorage mem_storage; + palf::MemPalfGroupBufferIterator iter; + bool exist; + + const int64_t file_offset = 0; + palf::LSN base_lsn = palf::LSN(palf::LOG_INVALID_LSN_VAL); + common::ObTimeGuard guard("get_max_log_in_file", 1000 * 1000L); + + if (OB_ISNULL(buf = (char *)mtl_malloc(buf_size, "ArcFile"))) { + ret = OB_ALLOCATE_MEMORY_FAILED; + CLOG_LOG(WARN, "alloc memory failed", K(ret), K_(id)); + } else if (OB_FAIL(read_part_file_(file_id, file_offset, buf, buf_size, read_size))) { + CLOG_LOG(WARN, "read part file failed", K(ret), K_(id)); + } else if (OB_FAIL(extract_file_base_lsn_(buf, buf_size, base_lsn))) { + CLOG_LOG(WARN, "extract base_lsn failed", KPC(this)); + } else { + guard.click("read_data"); + const char *log_buf = buf + header_size; + const int64_t log_buf_size = read_size - header_size; + if (OB_FAIL(mem_storage.init(base_lsn))) { + CLOG_LOG(WARN, "MemoryStorage init failed", K(ret), K(base_lsn), KPC(this)); + } else if (OB_FAIL(mem_storage.append(log_buf, log_buf_size))) { + CLOG_LOG(WARN, "MemoryStorage append failed", K(log_buf), K(log_buf_size), + K(file_id), K(file_offset), K(id_)); + } else if (OB_FAIL(iter.init(base_lsn, GetFileEndLSN(), &mem_storage))) { + CLOG_LOG(WARN, "iter init failed", K(id_), K(base_lsn), K(log_buf), K(log_buf_size)); + } else { + palf::LogGroupEntry entry; + while (OB_SUCC(ret)) { + if (OB_FAIL(iter.next())) { + if (OB_ITER_END != ret) { + CLOG_LOG(WARN, "iter next failed", K(ret), KPC(this), K(iter)); + } + } else if (OB_FAIL(iter.get_entry(entry, lsn))) { + CLOG_LOG(WARN, "get entry failed", K(ret)); + } else if (! entry.check_integrity()) { + ret = OB_INVALID_DATA; + CLOG_LOG(WARN, "invalid data", K(ret), KPC(this), K(iter), K(entry)); + } else { + lsn = lsn + entry.get_serialize_size(); + scn = entry.get_scn(); + exist = true; + } + } + if (OB_ITER_END == ret) { + ret = OB_SUCCESS; + if (exist) { + CLOG_LOG(INFO, "get max archive log through log iteration from archive log", + K(lsn), K(scn), K(base_lsn), K(origin), KPC(this)); + } + } + guard.click("iterate_log"); + } + } + if (NULL != buf) { + mtl_free(buf); + buf = NULL; + } + return ret; +} + +int ObLogRawPathPieceContext::read_part_file_(const int64_t file_id, + const int64_t file_offset, + char *buf, + const int64_t buf_size, + int64_t &read_size) +{ + int ret = OB_SUCCESS; + share::ObBackupPath path; + int64_t pos = 0; + archive::ObArchiveFileHeader header; + char uri_str[OB_MAX_BACKUP_DEST_LENGTH + 1] = { 0 }; + char storage_info_str[OB_MAX_BACKUP_STORAGE_INFO_LENGTH] = { 0 }; + share::ObBackupStorageInfo storage_info; + if (OB_FAIL((get_cur_uri(uri_str, sizeof(uri_str))))) { + CLOG_LOG(WARN, "fail to get current uri ptr", KPC(this)); + } else if (OB_FAIL(get_cur_storage_info(storage_info_str, sizeof(storage_info_str)))) { + CLOG_LOG(WARN, "fail to get current storage info ptr", KPC(this)); + } else if (OB_FAIL(ObArchivePathUtil::build_restore_path(uri_str, id_, file_id, path))) { + CLOG_LOG(WARN, "get ls archive file path failed", K(ret), KPC(this)); + } else if (OB_FAIL(storage_info.set(uri_str, storage_info_str))) { + CLOG_LOG(WARN, "fail to set storage info", K(ret)); + } else if (OB_FAIL(archive::ObArchiveFileUtils::range_read(path.get_ptr(), + &storage_info, buf, buf_size, file_offset, read_size))) { + CLOG_LOG(WARN, "range read failed", K(ret), K(path)); + } + return ret; +} + +int ObLogRawPathPieceContext::extract_file_base_lsn_(const char *buf, + const int64_t buf_size, + palf::LSN &base_lsn) +{ + int ret = OB_SUCCESS; + int64_t pos = 0; + archive::ObArchiveFileHeader header; + if (OB_ISNULL(buf) || OB_UNLIKELY(buf_size < archive::ARCHIVE_FILE_HEADER_SIZE)) { + ret = OB_INVALID_ARGUMENT; + CLOG_LOG(WARN, "invalid buffer", K(buf), K(buf_size), K(id_)); + } else if (OB_FAIL(header.deserialize(buf, buf_size, pos))) { + CLOG_LOG(WARN, "archive file header deserialize failed", K(buf), K(buf_size)); + } else if (OB_UNLIKELY(! header.is_valid())) { + ret = OB_INVALID_DATA; + CLOG_LOG(WARN, "archive file header not valid", K(header), K(buf), K(buf_size)); + } else { + base_lsn = palf::LSN(header.start_lsn_); + } + return ret; +} + } // namespace logservice } // namespace oceanbase diff --git a/src/logservice/restoreservice/ob_log_archive_piece_mgr.h b/src/logservice/restoreservice/ob_log_archive_piece_mgr.h index 3aefdecb06..1cd15a07ff 100644 --- a/src/logservice/restoreservice/ob_log_archive_piece_mgr.h +++ b/src/logservice/restoreservice/ob_log_archive_piece_mgr.h @@ -365,6 +365,73 @@ private: share::ObBackupDest archive_dest_; }; +class ObLogRawPathPieceContext +{ + class GetFileEndLSN + { + public: + palf::LSN operator()() const {return palf::LSN(palf::LOG_MAX_LSN_VAL);} + }; + +public: + ObLogRawPathPieceContext(); + ~ObLogRawPathPieceContext(); + +public: + int init(const share::ObLSID &id, const DirArray &array); + void reset(); + bool is_valid() const; + int deep_copy_to(ObLogRawPathPieceContext &other); + int get_cur_uri(char *buf, const int64_t buf_size); + int get_cur_storage_info(char *buf, const int64_t buf_size); + int get_file_id(int64_t &file_id); + int locate_precise_piece(palf::LSN &fetch_lsn); + int list_dir_files(const ObString &base, + const share::ObBackupStorageInfo *storage_info, + int64_t &min_file_id, + int64_t &max_file_id); + int get_ls_piece_info(const int64_t curr_index, bool &exist); + int get_max_lsn(palf::LSN &lsn); + bool piece_index_match(const palf::LSN &lsn) const; + int cal_lsn_to_file_id(const palf::LSN &lsn); + int update_file(const int64_t file_id, + const int64_t file_offset, + const palf::LSN &lsn); + int update_max_lsn(const palf::LSN &lsn); + int update_min_lsn(const palf::LSN &lsn); + int get_max_archive_log(palf::LSN &lsn, share::SCN &scn); + + TO_STRING_KV(K_(is_inited), K_(id), K_(array), K_(index), K_(file_id), + K_(min_file_id), K_(max_file_id), K_(min_lsn), K_(max_lsn), K_(file_offset)); + +private: + int get_max_archive_log_(const ObLogRawPathPieceContext &origin, palf::LSN &lsn, share::SCN &scn); + int get_max_log_in_file_(const ObLogRawPathPieceContext &origin, + const int64_t file_id, + palf::LSN &lsn, + share::SCN &scn); + int extract_file_base_lsn_(const char *buf, + const int64_t buf_size, + palf::LSN &base_lsn); + int read_part_file_(const int64_t file_id, + const int64_t file_offset, + char *buf, + const int64_t buf_size, + int64_t &read_size); + +private: + bool is_inited_; + share::ObLSID id_; + DirArray array_; // piece list + int64_t index_; // current read piece index + int64_t file_id_; // current read file id + int64_t min_file_id_; // min file id in current piece + int64_t max_file_id_; // max file id in current piece + palf::LSN min_lsn_; // min lsn in current piece + palf::LSN max_lsn_; // max lsn in current piece + int64_t file_offset_; // 当前piece已读最大文件内偏移 +}; + } // namespace logservice } // namespace oceanbase #endif /* OCEANBASE_LOGSERVICE_OB_LOG_ARCHIVE_PIECE_MGR_H_ */ diff --git a/src/logservice/restoreservice/ob_log_restore_archive_driver.cpp b/src/logservice/restoreservice/ob_log_restore_archive_driver.cpp index a0c2f70cc8..f6fc9d1a5c 100644 --- a/src/logservice/restoreservice/ob_log_restore_archive_driver.cpp +++ b/src/logservice/restoreservice/ob_log_restore_archive_driver.cpp @@ -93,6 +93,7 @@ int ObLogRestoreArchiveDriver::do_fetch_log_(ObLS &ls) version, max_fetch_lsn, last_fetch_ts, task_count))) { LOG_WARN("check need schedule failed", K(ret), K(id)); } else if (! need_schedule) { + LOG_TRACE("no need_schedule in do_fetch_log", K(need_schedule)); } else if (OB_FAIL(get_fetch_log_base_lsn_(ls, max_fetch_lsn, last_fetch_ts, pre_scn, lsn))) { LOG_WARN("get fetch log base lsn failed", K(ret), K(id)); } else if (OB_FAIL(submit_fetch_log_task_(ls, pre_scn, lsn, task_count, proposal_id, version))) { @@ -126,6 +127,7 @@ int ObLogRestoreArchiveDriver::check_need_schedule_(ObLS &ls, } else if (OB_FAIL(restore_handler->need_schedule(need_schedule, proposal_id, context))) { LOG_WARN("get fetch log context failed", K(ret), K(ls)); } else if (! need_schedule) { + LOG_TRACE("no need_schedule in check_need_schedule_", K(need_schedule)); // do nothing } else if (context.max_fetch_scn_ >= global_recovery_scn_) { need_schedule = false; @@ -136,10 +138,12 @@ int ObLogRestoreArchiveDriver::check_need_schedule_(ObLS &ls, } else if (FALSE_IT(concurrency = std::min(fetch_log_worker_count, MAX_LS_FETCH_LOG_TASK_CONCURRENCY))) { } else if (context.issue_task_num_ >= concurrency) { need_schedule = false; + LOG_TRACE("concurrency not enough check_need_schedule", K_(context.issue_task_num), K(concurrency)); } else if (OB_FAIL(check_need_delay_(ls.get_ls_id(), need_delay))) { LOG_WARN("check need delay failed", K(ret), K(ls)); } else if (need_delay) { need_schedule = false; + LOG_TRACE("need_delay in check_need_schedule", K(need_delay)); } else { version = context.issue_version_; lsn = context.max_submit_lsn_; diff --git a/src/logservice/restoreservice/ob_log_restore_define.h b/src/logservice/restoreservice/ob_log_restore_define.h index 946d7b36d8..dce6ce2025 100644 --- a/src/logservice/restoreservice/ob_log_restore_define.h +++ b/src/logservice/restoreservice/ob_log_restore_define.h @@ -21,6 +21,7 @@ #include "lib/utility/ob_print_utils.h" #include "logservice/palf/lsn.h" // LSN #include "share/ob_define.h" +#include "share/backup/ob_backup_struct.h" // ObBackupPathString namespace oceanbase { namespace logservice @@ -28,6 +29,9 @@ namespace logservice const int64_t MAX_FETCH_LOG_BUF_LEN = 4 * 1024 * 1024L; const int64_t MAX_LS_FETCH_LOG_TASK_CONCURRENCY = 4; +typedef std::pair DirInfo; +typedef common::ObSEArray, 1> DirArray; + struct ObLogRestoreErrorContext { enum class ErrorType diff --git a/src/logservice/restoreservice/ob_log_restore_handler.cpp b/src/logservice/restoreservice/ob_log_restore_handler.cpp index 2c6551a4d2..fb75705a06 100644 --- a/src/logservice/restoreservice/ob_log_restore_handler.cpp +++ b/src/logservice/restoreservice/ob_log_restore_handler.cpp @@ -47,6 +47,10 @@ #include "share/restore/ob_log_restore_source.h" #include "storage/ls/ob_ls_get_mod.h" #include "storage/tx_storage/ob_ls_handle.h" +#include "share/backup/ob_archive_path.h" // ObArchivePathUtil +#include "src/share/backup/ob_archive_store.h" // ObArchiveStore +#include "logservice/archiveservice/ob_archive_file_utils.h" // ObArchiveFileUtils +#include "src/share/backup/ob_archive_store.h" // ObArchiveStore namespace oceanbase { @@ -54,6 +58,7 @@ using namespace palf; namespace logservice { using namespace oceanbase::share; +using namespace oceanbase::archive; const char *restore_comment_str[static_cast(RestoreSyncStatus::MAX_RESTORE_SYNC_STATUS)] = { "Invalid restore status", @@ -241,11 +246,8 @@ int ObLogRestoreHandler::add_source(logservice::DirArray &array, const SCN &end_ if (IS_NOT_INIT) { ret = OB_NOT_INIT; CLOG_LOG(WARN, "ObLogRestoreHandler not init", K(ret), KPC(this)); - /* } else if (! is_strong_leader(role_)) { // not leader, just skip - ret = OB_NOT_MASTER; - */ } else if (OB_UNLIKELY(array.empty() || !end_scn.is_valid())) { ret = OB_INVALID_ARGUMENT; CLOG_LOG(WARN, "invalid argument", K(ret), K(array), K(end_scn), KPC(this)); @@ -727,6 +729,11 @@ int ObLogRestoreHandler::check_restore_to_newest(share::SCN &end_scn, share::SCN share::ObRestoreSourceServiceAttr *service_attr = NULL; service_source->get(service_attr, restore_scn); ret = check_restore_to_newest_from_service_(*service_attr, end_scn, archive_scn); + } else if (share::is_raw_path_log_source_type(source->get_source_type())) { + ObRemoteRawPathParent *rawpath_source = dynamic_cast(source); + ObLogRawPathPieceContext *rawpath_ctx = NULL; + rawpath_source->get(rawpath_ctx, restore_scn); + ret = check_restore_to_newest_from_rawpath_(*rawpath_ctx, end_lsn, end_scn, archive_scn); } else { ret = OB_NOT_SUPPORTED; } @@ -944,6 +951,35 @@ int ObLogRestoreHandler::check_restore_to_newest_from_archive_( return ret; } +int ObLogRestoreHandler::check_restore_to_newest_from_rawpath_(ObLogRawPathPieceContext &rawpath_ctx, + const palf::LSN &end_lsn, const share::SCN &end_scn, share::SCN &archive_scn) +{ + int ret = OB_SUCCESS; + const ObLSID ls_id(id_); + + if (! rawpath_ctx.is_valid()) { + ret = OB_INVALID_ARGUMENT; + CLOG_LOG(WARN, "rawpath ctx is invalid"); + } else { + palf::LSN archive_lsn; + if (OB_FAIL(rawpath_ctx.get_max_archive_log(archive_lsn, archive_scn))) { + CLOG_LOG(WARN, "fail to get max archive log", K_(id)); + } else if (archive_lsn <= end_lsn && archive_scn == SCN::min_scn()) { + archive_scn = end_scn; + CLOG_LOG(INFO, "rewrite archive_scn while end_lsn equals to archive_lsn and archive_scn not got", + K(id_), K(archive_lsn), K(archive_scn), K(end_lsn), K(end_scn)); + } else if (end_scn < archive_scn) { + CLOG_LOG(INFO, "end_scn smaller than archive_scn", K_(id), K(end_scn)); + } else if (end_lsn < archive_lsn) { + ret = OB_EAGAIN; + CLOG_LOG(INFO, "end_lsn smaller than archive_lsn", K_(id), K(end_lsn)); + } else { + CLOG_LOG(INFO, "check_restore_to_newest succ", K(id_), K(archive_scn), K(end_scn), K(end_lsn)); + } + } + return ret; +} + int ObLogRestoreHandler::check_if_ls_gc_(bool &done) { int ret = OB_SUCCESS; diff --git a/src/logservice/restoreservice/ob_log_restore_handler.h b/src/logservice/restoreservice/ob_log_restore_handler.h index 4f8ee1de48..c434d38f76 100644 --- a/src/logservice/restoreservice/ob_log_restore_handler.h +++ b/src/logservice/restoreservice/ob_log_restore_handler.h @@ -258,6 +258,8 @@ private: const share::SCN &end_scn, share::SCN &archive_scn); int check_restore_to_newest_from_archive_(ObLogArchivePieceContext &piece_context, const palf::LSN &end_lsn, const share::SCN &end_scn, share::SCN &archive_scn); + int check_restore_to_newest_from_rawpath_(ObLogRawPathPieceContext &rawpath_ctx, + const palf::LSN &end_lsn, const share::SCN &end_scn, share::SCN &archive_scn); bool restore_to_end_unlock_() const; int get_offline_scn_(share::SCN &scn); void deep_copy_source_(ObRemoteSourceGuard &guard); diff --git a/src/logservice/restoreservice/ob_remote_data_generator.cpp b/src/logservice/restoreservice/ob_remote_data_generator.cpp index 75c07e3a8d..a31965692f 100644 --- a/src/logservice/restoreservice/ob_remote_data_generator.cpp +++ b/src/logservice/restoreservice/ob_remote_data_generator.cpp @@ -25,6 +25,7 @@ #include "ob_log_restore_rpc.h" // proxy #include "share/backup/ob_backup_struct.h" #include "share/backup/ob_archive_path.h" // ObArchivePathUtil +#include "src/share/backup/ob_archive_store.h" // ObArchiveStore namespace oceanbase { @@ -90,6 +91,44 @@ int RemoteDataGenerator::update_next_fetch_lsn_(const palf::LSN &lsn) } return ret; } + +int RemoteDataGenerator::read_file_(const ObString &base, + const share::ObBackupStorageInfo *storage_info, + const share::ObLSID &id, + const int64_t file_id, + const int64_t offset, + char *data, + const int64_t data_len, + int64_t &data_size) +{ + int ret = OB_SUCCESS; + share::ObBackupPath path; + if (OB_FAIL(ObArchivePathUtil::build_restore_path(base.ptr(), id, file_id, path))) { + LOG_WARN("build restore path failed", K(ret)); + } else { + ObString uri(path.get_obstr()); + char storage_info_cstr[OB_MAX_BACKUP_STORAGE_INFO_LENGTH] = {'\0'}; + int64_t real_size = 0; + common::ObObjectStorageInfo storage_info_base; + if (OB_FAIL(storage_info_base.assign(*storage_info))) { + OB_LOG(WARN, "fail to assign storage info base!", K(ret), KP(storage_info)); + } else if (OB_FAIL(storage_info_base.get_storage_info_str(storage_info_cstr, OB_MAX_BACKUP_STORAGE_INFO_LENGTH))) { + LOG_WARN("get_storage_info_str failed", K(ret), K(uri), K(storage_info)); + } else { + ObString storage_info_ob_str(storage_info_cstr); + if (OB_FAIL(log_ext_handler_->pread(uri, storage_info_ob_str, offset, data, data_len, real_size))) { + LOG_WARN("read file failed", K(ret), K(uri), K(storage_info)); + } else if (0 == real_size) { + ret = OB_ITER_END; + LOG_INFO("read no data, need retry", K(ret), K(uri), K(storage_info), K(offset), K(real_size)); + } else { + data_size = real_size; + } + } + } + return ret; +} + // only handle orignal buffer without compression or encryption // only to check incomplete LogGroupEntry // compression and encryption will be supported in the future @@ -455,43 +494,6 @@ void LocationDataGenerator::cal_read_size_(const int64_t dest_id, } } -int LocationDataGenerator::read_file_(const ObString &base, - const share::ObBackupStorageInfo *storage_info, - const share::ObLSID &id, - const int64_t file_id, - const int64_t offset, - char *data, - const int64_t data_len, - int64_t &data_size) -{ - int ret = OB_SUCCESS; - share::ObBackupPath path; - if (OB_FAIL(ObArchivePathUtil::build_restore_path(base.ptr(), id, file_id, path))) { - LOG_WARN("build restore path failed", K(ret)); - } else { - ObString uri(path.get_obstr()); - char storage_info_cstr[OB_MAX_BACKUP_STORAGE_INFO_LENGTH] = {'\0'}; - int64_t real_size = 0; - common::ObObjectStorageInfo storage_info_base; - if (OB_FAIL(storage_info_base.assign(*storage_info))) { - OB_LOG(WARN, "fail to assign storage info base!", K(ret), KP(storage_info)); - } else if (OB_FAIL(storage_info_base.get_storage_info_str(storage_info_cstr, OB_MAX_BACKUP_STORAGE_INFO_LENGTH))) { - LOG_WARN("get_storage_info_str failed", K(ret), K(uri), K(storage_info)); - } else { - ObString storage_info_ob_str(storage_info_cstr); - if (OB_FAIL(log_ext_handler_->pread(uri, storage_info_ob_str, offset, data, data_len, real_size))) { - LOG_WARN("read file failed", K(ret), K(uri), K(storage_info)); - } else if (0 == real_size) { - ret = OB_ITER_END; - LOG_INFO("read no data, need retry", K(ret), K(uri), K(storage_info), K(offset), K(real_size)); - } else { - data_size = real_size; - } - } - } - return ret; -} - bool LocationDataGenerator::FileDesc::is_valid() const { return dest_id_ > 0 @@ -597,29 +599,21 @@ RawPathDataGenerator::RawPathDataGenerator(const uint64_t tenant_id, const ObLSID &id, const LSN &start_lsn, const LSN &end_lsn, - const DirArray &array, + ObLogRawPathPieceContext *rawpath_ctx, const SCN &end_scn, - const int64_t piece_index, - const int64_t min_file_id, - const int64_t max_file_id, ObLogExternalStorageHandler *log_ext_handler) : RemoteDataGenerator(tenant_id, id, start_lsn, end_lsn, end_scn, log_ext_handler), - array_(array), + rawpath_ctx_(rawpath_ctx), data_len_(0), - file_id_(0), - base_lsn_(), - index_(piece_index), - min_file_id_(min_file_id), - max_file_id_(max_file_id) + base_lsn_() { } RawPathDataGenerator::~RawPathDataGenerator() { - array_.reset(); + rawpath_ctx_ = NULL; data_len_ = 0; base_lsn_.reset(); - index_ = 0; } int RawPathDataGenerator::next_buffer(palf::LSN &lsn, char *&buf, int64_t &buf_size) @@ -631,11 +625,42 @@ int RawPathDataGenerator::next_buffer(palf::LSN &lsn, char *&buf, int64_t &buf_s } else if (is_fetch_to_end()) { ret = OB_ITER_END; } else if (OB_FAIL(fetch_log_from_dest_())) { - LOG_WARN("fetch log from dest failed", K(ret), KPC(this)); + if (OB_ITER_END != ret) { + LOG_WARN("fetch log from dest failed", K(ret), KPC(this)); + } } else { lsn = base_lsn_; buf = data_ + ARCHIVE_FILE_HEADER_SIZE; buf_size = data_len_ - ARCHIVE_FILE_HEADER_SIZE; + LOG_TRACE("after next_buffer", K(lsn), K(buf_size)); + } + return ret; +} + +int RawPathDataGenerator::advance_step_lsn(const palf::LSN &lsn) +{ + int ret = OB_SUCCESS; + if (OB_UNLIKELY((NULL == rawpath_ctx_) || ! rawpath_ctx_->is_valid())) { + LOG_TRACE("rawpath_ctx is invalid"); + } else if (OB_FAIL(RemoteDataGenerator::update_next_fetch_lsn_(lsn))) { + LOG_WARN("update_next_fetch_lsn_ failed", K(lsn), KPC(this)); + } else { + rawpath_ctx_->update_max_lsn(lsn); + LOG_TRACE("advance_step_lsn succ", KPC(this)); + } + return ret; +} + +int RawPathDataGenerator::update_max_lsn(const palf::LSN &lsn) +{ + int ret = OB_SUCCESS; + if (OB_UNLIKELY((NULL == rawpath_ctx_) || ! rawpath_ctx_->is_valid())) { + LOG_TRACE("rawpath_ctx is invalid"); + } else if (OB_FAIL(RemoteDataGenerator::update_next_fetch_lsn_(lsn))) { + LOG_WARN("update_next_fetch_lsn_ failed", K(lsn), KPC(this)); + } else { + rawpath_ctx_->update_max_lsn(lsn); + LOG_TRACE("update_file_info succ", K_(*rawpath_ctx)); } return ret; } @@ -643,39 +668,44 @@ int RawPathDataGenerator::next_buffer(palf::LSN &lsn, char *&buf, int64_t &buf_s int RawPathDataGenerator::fetch_log_from_dest_() { int ret = OB_SUCCESS; - ObString uri(array_[index_].first.ptr()); - share::ObBackupStorageInfo storage_info; - if (OB_FAIL(storage_info.set(uri.ptr(), array_[index_].second.ptr()))) { - LOG_WARN("failed to set storage info", K(ret)); - } else if (FALSE_IT(cal_lsn_to_file_id_(start_lsn_))) { - } else if (OB_FAIL(locate_precise_piece_())) { - LOG_WARN("locate precise piece failed", K(ret), KPC(this)); - } else if (OB_FAIL(read_file_(uri, &storage_info, file_id_))) { - LOG_WARN("read file failed", K(ret), K_(id), K_(file_id)); - } else if (OB_FAIL(extract_archive_file_header_())) { - LOG_WARN("extract archive file header failed", K(ret)); - } - return ret; -} - -// 为加速定位起点文件,依赖LSN -> file_id 规则 -int RawPathDataGenerator::cal_lsn_to_file_id_(const LSN &lsn) -{ - file_id_ = cal_archive_file_id(lsn, palf::PALF_BLOCK_SIZE); - return OB_SUCCESS; -} - -int RawPathDataGenerator::list_dir_files_(const ObString &base, - const share::ObBackupStorageInfo *storage_info, - int64_t &min_file_id, - int64_t &max_file_id) -{ - int ret = OB_SUCCESS; - share::ObBackupPath prefix; - if (OB_FAIL(ObArchivePathUtil::build_restore_prefix(base.ptr(), id_, prefix))) { + if (OB_UNLIKELY((NULL == rawpath_ctx_) || ! rawpath_ctx_->is_valid())) { + LOG_WARN("rawpath_ctx_ is invalid"); } else { - ObString uri(prefix.get_obstr()); - ret = ObArchiveFileUtils::get_file_range(uri, storage_info, min_file_id, max_file_id); + char uri_str[OB_MAX_BACKUP_DEST_LENGTH + 1] = { 0 }; + char storage_info_str[OB_MAX_BACKUP_STORAGE_INFO_LENGTH] = { 0 }; + share::ObBackupStorageInfo storage_info; + int64_t file_id = 0; + + if (OB_FAIL(rawpath_ctx_->cal_lsn_to_file_id(next_fetch_lsn_))) { // locate file_id_ by next_fetch_lsn_ + LOG_WARN("fail to cal lsn to file id", K(ret), K_(id), K_(next_fetch_lsn)); + } else if (OB_FAIL(rawpath_ctx_->locate_precise_piece(next_fetch_lsn_))) { + if (OB_ITER_END == ret) { + LOG_INFO("locate precise piece to end", K(ret), K_(id), KPC(this)); + } else { + LOG_WARN("locate precise piece failed", K(ret), KPC(this)); + } + } else if (OB_FAIL(rawpath_ctx_->get_file_id(file_id))) { + LOG_WARN("fail to get file id", KPC(this)); + } else if (OB_FAIL(rawpath_ctx_->get_cur_uri(uri_str, sizeof(uri_str)))) { + LOG_WARN("fail to get cur uri ptr", K(ret)); + } else if (OB_FAIL(rawpath_ctx_->get_cur_storage_info(storage_info_str, sizeof(storage_info_str)))) { + LOG_WARN("fail to get storage info ptr", K(ret)); + } else if (OB_FAIL(storage_info.set(uri_str, storage_info_str))) { + LOG_WARN("failed to set storage info", K(ret)); + } else if (OB_FAIL(read_file_(uri_str, &storage_info, file_id))) { + if (OB_ITER_END == ret) { + LOG_TRACE("read end of file", K(ret)); + } else { + LOG_WARN("read file failed", K(ret), K_(id), K(file_id)); + } + } else if (OB_FAIL(extract_archive_file_header_())) { + LOG_WARN("extract archive file header failed", K(ret)); + } + } + + if ((OB_SUCC(ret) && base_lsn_ > next_fetch_lsn_) || OB_ERR_OUT_OF_LOWER_BOUND == ret) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("read file base_lsn bigger than next_fetch_lsn", K(base_lsn_), K(next_fetch_lsn_)); } return ret; } @@ -723,47 +753,11 @@ int RawPathDataGenerator::extract_archive_file_header_() LOG_ERROR("invalid file header", K(ret), K(pos), K(file_header), KPC(this)); } else { base_lsn_ = LSN(file_header.start_lsn_); + rawpath_ctx_->update_min_lsn(base_lsn_); LOG_INFO("extract_archive_file_header_ succ", K(pos), K(file_header), KPC(this)); } return ret; } -int RawPathDataGenerator::locate_precise_piece_() -{ - int ret = OB_SUCCESS; - if (OB_UNLIKELY(0 == array_.count())) { - ret = OB_INVALID_ARGUMENT; - LOG_WARN("dest array count is 0", K(ret), KPC(this)); - } else if (piece_index_match_()) { - } else { - int64_t min_file_id = 0; - int64_t max_file_id = 0; - bool locate = false; - for (int64_t i = 0; i < array_.count(); i++) { - ObString uri(array_[i].first.ptr()); - share::ObBackupStorageInfo storage_info; - if (OB_FAIL(storage_info.set(uri.ptr(), array_[i].second.ptr()))) { - LOG_WARN("failed to set storage info", K(ret), KPC(this)); - } else if (OB_FAIL(list_dir_files_(uri, &storage_info, min_file_id, max_file_id))) { - LOG_WARN("list dir files failed", K(ret), KPC(this)); - } else if (file_id_ >= min_file_id && file_id_ <= max_file_id) { - locate = true; - index_ = i; - min_file_id_ = min_file_id; - max_file_id_ = max_file_id; - break; - } - } - if (OB_SUCC(ret) && ! locate) { - ret = OB_ENTRY_NOT_EXIST; - } - } - return ret; -} - -bool RawPathDataGenerator::piece_index_match_() const -{ - return index_ > 0 && min_file_id_ <= file_id_ && max_file_id_ >= file_id_; -} } // namespace logservice } // namespace oceanbase diff --git a/src/logservice/restoreservice/ob_remote_data_generator.h b/src/logservice/restoreservice/ob_remote_data_generator.h index 587ba061b9..8775213d9a 100644 --- a/src/logservice/restoreservice/ob_remote_data_generator.h +++ b/src/logservice/restoreservice/ob_remote_data_generator.h @@ -159,6 +159,14 @@ public: protected: int process_origin_data_(char *origin_buf, const int64_t origin_buf_size, char *buf, int64_t &buf_size); int update_next_fetch_lsn_(const palf::LSN &lsn); + int read_file_(const ObString &base, + const share::ObBackupStorageInfo *storage_info, + const share::ObLSID &id, + const int64_t file_id, + const int64_t offset, + char *data, + const int64_t data_len, + int64_t &data_size); protected: uint64_t tenant_id_; @@ -287,14 +295,7 @@ private: const int64_t file_id, const int64_t file_offset, int64_t &size); - int read_file_(const ObString &base, - const share::ObBackupStorageInfo *storage_info, - const share::ObLSID &id, - const int64_t file_id, - const int64_t offset, - char *data, - const int64_t data_len, - int64_t &real_read_size); + private: share::SCN pre_scn_; // base_lsn_ is the start_lsn from the archive file, while the next_fetch_lsn_ is the start_lsn to fetch, @@ -322,42 +323,28 @@ public: const ObLSID &id, const LSN &start_lsn, const LSN &end_lsn, - const DirArray &array, + ObLogRawPathPieceContext *rawpath_ctx, const share::SCN &end_scn, - const int64_t piece_index, - const int64_t min_file_id, - const int64_t max_file_id, logservice::ObLogExternalStorageHandler *log_ext_handler); virtual ~RawPathDataGenerator(); int next_buffer(palf::LSN &lsn, char *&buf, int64_t &buf_size); - int update_max_lsn(const palf::LSN &lsn) { UNUSED(lsn); return common::OB_SUCCESS; } - int advance_step_lsn(const palf::LSN &lsn) override { UNUSED(lsn); return common::OB_SUCCESS;} + int update_max_lsn(const palf::LSN &lsn); + int advance_step_lsn(const palf::LSN &lsn) override; - INHERIT_TO_STRING_KV("RemoteDataGenerator", RemoteDataGenerator, K_(array), K_(data_len), - K_(file_id), K_(base_lsn), K_(index), K_(min_file_id), K_(max_file_id)); + INHERIT_TO_STRING_KV("RemoteDataGenerator", RemoteDataGenerator, K_(rawpath_ctx), K_(data_len), K_(base_lsn)); private: int fetch_log_from_dest_(); - int cal_lsn_to_file_id_(const LSN &lsn); - int list_dir_files_(const ObString &uri, const share::ObBackupStorageInfo *storage_info, - int64_t &min_file_id, int64_t &max_file_id); int read_file_(const ObString &prefix, const share::ObBackupStorageInfo *storage_info, const int64_t file_id); int extract_archive_file_header_(); - int locate_precise_piece_(); - bool piece_index_match_() const; private: - DirArray array_; + ObLogRawPathPieceContext *rawpath_ctx_; int64_t data_len_; char data_[MAX_DATA_BUF_LEN]; - - int64_t file_id_; LSN base_lsn_; - int64_t index_; - int64_t min_file_id_; - int64_t max_file_id_; private: DISALLOW_COPY_AND_ASSIGN(RawPathDataGenerator); }; diff --git a/src/logservice/restoreservice/ob_remote_fetch_log.cpp b/src/logservice/restoreservice/ob_remote_fetch_log.cpp index c63d809089..341455d82f 100644 --- a/src/logservice/restoreservice/ob_remote_fetch_log.cpp +++ b/src/logservice/restoreservice/ob_remote_fetch_log.cpp @@ -85,11 +85,12 @@ int ObRemoteFetchLogImpl::do_schedule(const share::ObLogRestoreSourceItem &sourc ret = net_driver_->do_schedule(service_attr); net_driver_->set_global_recovery_scn(source.until_scn_); } - } else if (is_location_log_source_type(source.type_)) { + } else if (is_location_log_source_type(source.type_) || is_raw_path_log_source_type(source.type_)) { ret = archive_driver_->do_schedule(); archive_driver_->set_global_recovery_scn(source.until_scn_); } else { ret = OB_NOT_SUPPORTED; + CLOG_LOG(WARN, "unsupported log source type", K(ret), K_(source.type)); } net_driver_->scan_ls(source.type_); return ret; diff --git a/src/logservice/restoreservice/ob_remote_fetch_log_worker.cpp b/src/logservice/restoreservice/ob_remote_fetch_log_worker.cpp index d1c2874890..540e27826c 100644 --- a/src/logservice/restoreservice/ob_remote_fetch_log_worker.cpp +++ b/src/logservice/restoreservice/ob_remote_fetch_log_worker.cpp @@ -312,12 +312,14 @@ int ObRemoteFetchWorker::handle_fetch_log_task_(ObFetchLogTask *task) } else if (OB_FAIL(task->iter_.pre_read(empty))) { LOG_WARN("pre_read failed", K(ret), KPC(task)); } else if (empty) { + LOG_TRACE("pre read empty"); // do nothing } else if (OB_FAIL(push_submit_array_(*task))) { LOG_WARN("push submit array failed", K(ret)); } if (OB_SUCC(ret) && ! empty) { + LOG_TRACE("pre_read succ and push submit array succ, do nothing"); // pre_read succ and push submit array succ, do nothing, } else { if (is_fatal_error_(ret)) { diff --git a/src/logservice/restoreservice/ob_remote_location_adaptor.cpp b/src/logservice/restoreservice/ob_remote_location_adaptor.cpp index 95ea7527eb..0badcc369a 100644 --- a/src/logservice/restoreservice/ob_remote_location_adaptor.cpp +++ b/src/logservice/restoreservice/ob_remote_location_adaptor.cpp @@ -255,9 +255,42 @@ int ObRemoteLocationAdaptor::add_service_source_(const share::ObLogRestoreSource int ObRemoteLocationAdaptor::add_rawpath_source_(const share::ObLogRestoreSourceItem &item, ObLogRestoreHandler &restore_handler) { - UNUSED(item); - UNUSED(restore_handler); - return OB_NOT_SUPPORTED; + int ret = OB_SUCCESS; + SMART_VAR(logservice::DirArray, dir_array) { + ObSqlString tmp_str; + char *token = nullptr; + char *saveptr = nullptr; + + if (OB_FAIL(tmp_str.assign(item.value_))) { + LOG_WARN("fail to parse rawpath value", K(item)); + } else { + token = tmp_str.ptr(); + for (char *str = token; OB_SUCC(ret); str = nullptr) { + ObBackupDest dest; + SMART_VAR(logservice::DirInfo, dir_info) { + char storage_info_str[OB_MAX_BACKUP_STORAGE_INFO_LENGTH] = { 0 }; + token = ::STRTOK_R(str, ",", &saveptr); + if (nullptr == token) { + break; + } else if (OB_FAIL(dest.set(token))) { + LOG_WARN("fail to set dest", K(token)); + } else if (OB_FALSE_IT(dir_info.first = dest.get_root_path())) { + } else if (OB_FAIL(dest.get_storage_info()->get_storage_info_str(storage_info_str, sizeof(storage_info_str)))) { + LOG_WARN("fail to get storage info str", K(dest)); + } else if (OB_FALSE_IT(dir_info.second = storage_info_str)) { + } else if (OB_FAIL(dir_array.push_back(dir_info))) { + LOG_WARN("fail to push backup dir_array", K(ret), K(dir_array)); + } + } + } + } + if ((OB_SUCC(ret)) && OB_FAIL(restore_handler.add_source(dir_array, item.until_scn_))) { + LOG_WARN("fail to add rawpath source", K(ret), K(item), K(dir_array)); + } else { + LOG_INFO("add rawpath source", K(ret), K(dir_array.count()), K(dir_array)); + } + } + return ret; } } // namespace logservice } // namespace oceanbase diff --git a/src/logservice/restoreservice/ob_remote_log_iterator.ipp b/src/logservice/restoreservice/ob_remote_log_iterator.ipp index c011c08bda..766114a880 100644 --- a/src/logservice/restoreservice/ob_remote_log_iterator.ipp +++ b/src/logservice/restoreservice/ob_remote_log_iterator.ipp @@ -199,14 +199,11 @@ int ObRemoteLogIterator::build_dest_data_generator_(const share::S int ret = OB_SUCCESS; UNUSED(pre_scn); logservice::DirArray array; + ObLogRawPathPieceContext *rawpath_ctx = NULL; share::SCN end_scn; - int64_t piece_index = 0; - int64_t min_file_id = 0; - int64_t max_file_id = 0; - source->get(array, end_scn); - source->get_locate_info(piece_index, min_file_id, max_file_id); + source->get(rawpath_ctx, end_scn); gen_ = MTL_NEW(RawPathDataGenerator, "ResDataGen", tenant_id_, id_, start_lsn_, end_lsn_, - array, end_scn, piece_index, min_file_id, max_file_id, log_ext_handler_); + rawpath_ctx, end_scn, log_ext_handler_); if (OB_ISNULL(gen_)) { ret = OB_ALLOCATE_MEMORY_FAILED; CLOG_LOG(WARN, "alloc dest data generator failed", K(ret), KPC(this)); diff --git a/src/logservice/restoreservice/ob_remote_log_source.cpp b/src/logservice/restoreservice/ob_remote_log_source.cpp index 85cffaef9d..07b64a2d1b 100644 --- a/src/logservice/restoreservice/ob_remote_log_source.cpp +++ b/src/logservice/restoreservice/ob_remote_log_source.cpp @@ -237,10 +237,7 @@ int ObRemoteLocationParent::update_locate_info(ObRemoteLogParent &source) // =========================== ObRemoteRawPathParent ============================== // ObRemoteRawPathParent::ObRemoteRawPathParent(const share::ObLSID &ls_id) : ObRemoteLogParent(ObLogRestoreSourceType::RAWPATH, ls_id), - paths_(), - piece_index_(0), - min_file_id_(0), - max_file_id_(0) + raw_piece_ctx_() {} ObRemoteRawPathParent::~ObRemoteRawPathParent() @@ -249,15 +246,7 @@ ObRemoteRawPathParent::~ObRemoteRawPathParent() upper_limit_scn_.reset(); end_fetch_scn_.reset(); end_lsn_.reset(); - piece_index_ = 0; - min_file_id_ = 0; - max_file_id_ = 0; -} - -void ObRemoteRawPathParent::get(DirArray &array, SCN &end_scn) -{ - array.assign(paths_); - end_scn = upper_limit_scn_; + raw_piece_ctx_.reset(); } int ObRemoteRawPathParent::set(DirArray &array, const SCN &end_scn) @@ -265,25 +254,29 @@ int ObRemoteRawPathParent::set(DirArray &array, const SCN &end_scn) int ret = OB_SUCCESS; if (OB_UNLIKELY(array.empty() || !end_scn.is_valid())) { ret = OB_INVALID_ARGUMENT; + } else if (OB_FAIL(raw_piece_ctx_.init(ls_id_, array))) { + CLOG_LOG(WARN, "fail to init raw piece context"); } else { - paths_.assign(array); upper_limit_scn_ = end_scn; - to_end_ = end_fetch_scn_ <= end_scn; + to_end_ = end_fetch_scn_ >= end_scn; } CLOG_LOG(INFO, "add_source dest", KPC(this)); return ret; } +void ObRemoteRawPathParent::get(ObLogRawPathPieceContext *&raw_piece_ctx, SCN &end_scn) +{ + raw_piece_ctx = &raw_piece_ctx_; + end_scn = upper_limit_scn_; +} + int ObRemoteRawPathParent::deep_copy_to(ObRemoteLogParent &other) { int ret = OB_SUCCESS; ObRemoteRawPathParent &dst = static_cast(other); - if (OB_FAIL(dst.paths_.assign(paths_))) { + if (OB_FAIL(raw_piece_ctx_.deep_copy_to(dst.raw_piece_ctx_))) { CLOG_LOG(WARN, "dir array assign failed", K(ret), KPC(this)); } else { - dst.piece_index_ = piece_index_; - dst.min_file_id_ = min_file_id_; - dst.max_file_id_ = max_file_id_; base_copy_to_(other); } return ret; @@ -293,17 +286,25 @@ bool ObRemoteRawPathParent::is_valid() const { return is_valid_log_source_type(type_) && upper_limit_scn_.is_valid() - && ! paths_.empty(); + && raw_piece_ctx_.is_valid(); } -void ObRemoteRawPathParent::get_locate_info(int64_t &piece_index, - int64_t &min_file_id, - int64_t &max_file_id) const +int ObRemoteRawPathParent::update_locate_info(ObRemoteLogParent &source) { - piece_index= piece_index_; - min_file_id = min_file_id_; - max_file_id = max_file_id_; + int ret = OB_SUCCESS; + ObRemoteRawPathParent &dst = static_cast(source); + if (OB_UNLIKELY(! dst.is_valid())) { + ret = OB_INVALID_ARGUMENT; + CLOG_LOG(WARN, "invalid rawpath parent", K(ret), K(dst)); + } else if (OB_FAIL(dst.raw_piece_ctx_.deep_copy_to(raw_piece_ctx_))) { + CLOG_LOG(WARN, "fail to deep copy rawpath parent", K(ret)); + raw_piece_ctx_.reset(); + } else { + CLOG_LOG(TRACE, "update raw path locate info succ", KPC(this)); + } + return ret; } + // =========================== ObRemoteSourceGuard ==============================// ObRemoteSourceGuard::ObRemoteSourceGuard() : source_(NULL) diff --git a/src/logservice/restoreservice/ob_remote_log_source.h b/src/logservice/restoreservice/ob_remote_log_source.h index 75872cd4c6..eca34d9588 100644 --- a/src/logservice/restoreservice/ob_remote_log_source.h +++ b/src/logservice/restoreservice/ob_remote_log_source.h @@ -27,13 +27,13 @@ #include "share/ob_ls_id.h" #include "share/restore/ob_log_restore_source.h" // ObLogRestoreSourceType #include "ob_log_archive_piece_mgr.h" // ObLogArchivePieceContext +#include "ob_log_restore_define.h" namespace oceanbase { namespace logservice { using oceanbase::share::ObLogRestoreSourceType; //using oceanbase::share::DirArray; -typedef common::ObSEArray, 1> DirArray; typedef share::ObRestoreSourceServiceAttr RestoreServiceAttr; // The management of remote log source, three types are supported, LOCATION/SERVICE/RAWPATH class ObRemoteLogParent @@ -126,24 +126,17 @@ public: virtual ~ObRemoteRawPathParent(); public: - void get(DirArray &array, share::SCN &end_scn); - int set(const int64_t cluster_id, const ObAddr &addr); + void get(ObLogRawPathPieceContext *&raw_piece_ctx, share::SCN &end_scn); int deep_copy_to(ObRemoteLogParent &other) override; bool is_valid() const override; int set(DirArray &array, const share::SCN &end_scn); - int update_locate_info(ObRemoteLogParent &source) override { UNUSED(source); return OB_NOT_SUPPORTED; } - void get_locate_info(int64_t &piece_index, int64_t &min_file_id, int64_t &max_file_id) const; + int update_locate_info(ObRemoteLogParent &source) override; - TO_STRING_KV("ObRemoteLogParent", get_source_type_str(type_), K_(ls_id), - K_(upper_limit_scn), K_(to_end), K_(end_fetch_scn), K_(end_lsn), - K_(paths), K_(piece_index), K_(min_file_id), K_(max_file_id)); + TO_STRING_KV("ObRemoteLogParent", get_source_type_str(type_), K_(ls_id), K_(upper_limit_scn), K_(to_end), + K_(end_fetch_scn), K_(end_lsn), K_(raw_piece_ctx)); private: - DirArray paths_; - - int64_t piece_index_; - int64_t min_file_id_; - int64_t max_file_id_; + ObLogRawPathPieceContext raw_piece_ctx_; private: DISALLOW_COPY_AND_ASSIGN(ObRemoteRawPathParent); diff --git a/src/logservice/restoreservice/ob_remote_log_writer.cpp b/src/logservice/restoreservice/ob_remote_log_writer.cpp index 67150284b4..03b9a722c9 100644 --- a/src/logservice/restoreservice/ob_remote_log_writer.cpp +++ b/src/logservice/restoreservice/ob_remote_log_writer.cpp @@ -192,6 +192,7 @@ int ObRemoteLogWriter::foreach_ls_(const ObLSID &id) LOG_WARN("get sorted task failed", K(ret), K(id)); } } else if (NULL == task) { + LOG_TRACE("task is null", K(id)); break; } else if (OB_FAIL(submit_entries_(*task))) { if (OB_RESTORE_LOG_TO_END != ret) { diff --git a/src/observer/ob_rpc_processor_simple.cpp b/src/observer/ob_rpc_processor_simple.cpp index 33dfb8a68e..fdc1ccf466 100644 --- a/src/observer/ob_rpc_processor_simple.cpp +++ b/src/observer/ob_rpc_processor_simple.cpp @@ -169,7 +169,7 @@ int ObRpcLSMigrateReplicaP::process() COMMON_LOG(WARN, "can not migrate ls which local ls is exist", K(ret), K(arg_), K(is_exist)); } else { migration_op_arg.cluster_id_ = GCONF.cluster_id; - migration_op_arg.data_src_ = arg_.data_source_; + migration_op_arg.data_src_ = arg_.force_data_source_; migration_op_arg.dst_ = arg_.dst_; migration_op_arg.ls_id_ = arg_.ls_id_; //TODO(muwei.ym) need check priority in 4.2 RC3 diff --git a/src/observer/ob_service.cpp b/src/observer/ob_service.cpp index a85fc0c7a0..18c26600dc 100644 --- a/src/observer/ob_service.cpp +++ b/src/observer/ob_service.cpp @@ -1691,13 +1691,13 @@ int ObService::do_add_ls_replica(const obrpc::ObLSAddReplicaArg &arg) LOG_WARN("can not add ls which local ls is exist", KR(ret), K(arg), K(is_exist)); } else { migration_op_arg.cluster_id_ = GCONF.cluster_id; - migration_op_arg.data_src_ = arg.data_source_; + migration_op_arg.data_src_ = arg.force_data_source_; migration_op_arg.dst_ = arg.dst_; migration_op_arg.ls_id_ = arg.ls_id_; //TODO(muwei.ym) need check priority in 4.2 RC3 migration_op_arg.priority_ = ObMigrationOpPriority::PRIO_HIGH; migration_op_arg.paxos_replica_number_ = arg.new_paxos_replica_number_; - migration_op_arg.src_ = arg.data_source_; + migration_op_arg.src_ = arg.dst_; migration_op_arg.type_ = ObMigrationOpType::ADD_LS_OP; if (OB_FAIL(ls_service->create_ls_for_ha(arg.task_id_, migration_op_arg))) { LOG_WARN("failed to create ls for ha", KR(ret), K(arg), K(migration_op_arg)); diff --git a/src/observer/virtual_table/ob_tenant_show_restore_preview.cpp b/src/observer/virtual_table/ob_tenant_show_restore_preview.cpp index 4037f586e0..c29ecd6d2a 100644 --- a/src/observer/virtual_table/ob_tenant_show_restore_preview.cpp +++ b/src/observer/virtual_table/ob_tenant_show_restore_preview.cpp @@ -68,17 +68,18 @@ int ObTenantShowRestorePreview::init() ret = OB_BAD_NULL_ERROR; SHARE_LOG(WARN, "session should not be null", KR(ret)); } else if (!session_->user_variable_exists(OB_RESTORE_PREVIEW_BACKUP_DEST_SESSION_STR) - || !session_->user_variable_exists(OB_RESTORE_PREVIEW_SCN_SESSION_STR)) { + || !(session_->user_variable_exists(OB_RESTORE_PREVIEW_SCN_SESSION_STR) + || session_->user_variable_exists(OB_RESTORE_PREVIEW_TIMESTAMP_SESSION_STR)) ) { ret = OB_NOT_SUPPORTED; - SHARE_LOG(WARN, "no restore preview backup dest specified before", KR(ret)); - LOG_USER_ERROR(OB_NOT_SUPPORTED, "show restore preview do not specify backup dest"); + SHARE_LOG(WARN, "no ALTER SYSTEM RESTORE PREVIEW statement executed before", KR(ret)); + LOG_USER_ERROR(OB_NOT_SUPPORTED, "show restore preview before ALTER SYSTEM RESTORE PREVIEW is"); } else if (OB_FAIL(session_->get_user_variable_value(OB_RESTORE_PREVIEW_BACKUP_DEST_SESSION_STR, backup_dest_value))) { SHARE_LOG(WARN, "failed to get user variable value", KR(ret)); } else if (OB_FAIL(backup_dest_value.get_varchar(uri_))) { SHARE_LOG(WARN, "failed to varchar", KR(ret), K(backup_dest_value)); } else if (OB_FAIL(ObPhysicalRestoreUriParser::parse(uri_, allocator, tenant_path_array))) { - SHARE_LOG(WARN, "fail to parse uri", K(ret), K(uri_)); - } else if (OB_FAIL(rootserver::ObRestoreUtil::check_restore_using_complement_log_(tenant_path_array, only_contain_backup_set_))) { + SHARE_LOG(WARN, "fail to parse uri", K(ret)); + } else if (OB_FAIL(rootserver::ObRestoreUtil::check_restore_using_complement_log(tenant_path_array, only_contain_backup_set_))) { SHARE_LOG(WARN, "check restore using complement log failed", K(ret), K(tenant_path_array)); } else if (!session_->user_variable_exists(OB_BACKUP_DECRYPTION_PASSWD_ARRAY_SESSION_STR)) { } else if (OB_FAIL(session_->get_user_variable_value(OB_BACKUP_DECRYPTION_PASSWD_ARRAY_SESSION_STR, passwd))) { @@ -86,7 +87,6 @@ int ObTenantShowRestorePreview::init() } else if (OB_FAIL(passwd.get_varchar(backup_passwd))) { SHARE_LOG(WARN, "failed to parser passwd", K(ret)); } - if (OB_FAIL(ret)) { } else if (OB_FAIL(parse_restore_scn_from_session_(backup_passwd, tenant_path_array))) { SHARE_LOG(WARN, "failed to parse restore timestamp from session", KR(ret)); @@ -97,6 +97,7 @@ int ObTenantShowRestorePreview::init() idx_ = 0; total_cnt_ = backup_set_list_.count() + backup_piece_list_.count(); is_inited_ = true; + SHARE_LOG(INFO, "succeed to parse restore preview", K_(restore_scn)); } return ret; } @@ -110,6 +111,7 @@ int ObTenantShowRestorePreview::parse_restore_scn_from_session_( ObObj restore_timestamp_obj; ObString restore_scn_str; ObString restore_timestamp_str; + const share::SCN src_scn = share::SCN::min_scn(); ObFixedLengthString fixed_string; if (OB_ISNULL(session_)) { ret = OB_BAD_NULL_ERROR; @@ -128,13 +130,13 @@ int ObTenantShowRestorePreview::parse_restore_scn_from_session_( SHARE_LOG(WARN, "failed to assign tenant id str", KR(ret), K(restore_scn_str)); } else if (1 != sscanf(fixed_string.ptr(), "%lu", &restore_scn)) { ret = OB_INVALID_ARGUMENT; - SHARE_LOG(WARN, "failed to get uint64_t from value", KR(ret), K(restore_scn)); + SHARE_LOG(WARN, "failed to get uint64_t from value", KR(ret), K(fixed_string)); } else if (restore_scn != 0) { if (OB_FAIL(restore_scn_.convert_for_inner_table_field(restore_scn))) { SHARE_LOG(WARN, "failed to convert for inner table field", K(ret), K(restore_scn)); } - } else if (OB_FAIL(rootserver::ObRestoreUtil::fill_restore_scn_( - restore_scn_, restore_timestamp_str, false, tenant_path_array, backup_passwd, only_contain_backup_set_, restore_scn_))) { + } else if (OB_FAIL(rootserver::ObRestoreUtil::fill_restore_scn( + src_scn, restore_timestamp_str, false/*with_restore_scn*/, tenant_path_array, backup_passwd, only_contain_backup_set_, restore_scn_))) { SHARE_LOG(WARN, "failed to parse restore scn", K(ret)); } return ret; @@ -143,6 +145,7 @@ int ObTenantShowRestorePreview::parse_restore_scn_from_session_( int ObTenantShowRestorePreview::inner_get_next_row(common::ObNewRow *&row) { int ret = OB_SUCCESS; + int tmp_ret = OB_SUCCESS; if (!is_sys_tenant(effective_tenant_id_)) { ret = OB_OP_NOT_ALLOW; SHARE_LOG(WARN, "show restore preview is sys only", K(ret), K(effective_tenant_id_)); @@ -150,6 +153,30 @@ int ObTenantShowRestorePreview::inner_get_next_row(common::ObNewRow *&row) } else if (OB_FAIL(inner_get_next_row_())) { if (OB_ITER_END != ret) { SERVER_LOG(WARN, "failed to get next row", KR(ret), K(cur_row_)); + } else { + if (session_->user_variable_exists(OB_RESTORE_PREVIEW_BACKUP_DEST_SESSION_STR) + && OB_TMP_FAIL(session_->remove_user_variable(OB_RESTORE_PREVIEW_BACKUP_DEST_SESSION_STR))) { + ret = tmp_ret; + SHARE_LOG(WARN, "fail to remove session variable OB_RESTORE_PREVIEW_BACKUP_DEST_SESSION_STR", K(tmp_ret), KPC(session_)); + } + + if (session_->user_variable_exists(OB_RESTORE_PREVIEW_SCN_SESSION_STR) + && OB_TMP_FAIL(session_->remove_user_variable(OB_RESTORE_PREVIEW_SCN_SESSION_STR))) { + ret = tmp_ret; + SHARE_LOG(WARN, "fail to remove session variable OB_RESTORE_PREVIEW_SCN_SESSION_STR", K(tmp_ret), KPC(session_)); + } + + if (session_->user_variable_exists(OB_RESTORE_PREVIEW_TIMESTAMP_SESSION_STR) + && OB_TMP_FAIL(session_->remove_user_variable(OB_RESTORE_PREVIEW_TIMESTAMP_SESSION_STR))) { + ret = tmp_ret; + SHARE_LOG(WARN, "fail to remove session variable OB_RESTORE_PREVIEW_TIMESTAMP_SESSION_STR", K(tmp_ret), KPC(session_)); + } + + if (session_->user_variable_exists(OB_BACKUP_DECRYPTION_PASSWD_ARRAY_SESSION_STR) + && OB_TMP_FAIL(session_->remove_user_variable(OB_BACKUP_DECRYPTION_PASSWD_ARRAY_SESSION_STR))) { + ret = tmp_ret; + SHARE_LOG(WARN, "fail to remove session variable OB_BACKUP_DECRYPTION_PASSWD_ARRAY_SESSION_STR", K(tmp_ret), KPC(session_)); + } } } else { row = &cur_row_; @@ -220,6 +247,7 @@ int ObTenantShowRestorePreview::inner_get_next_row_() } } if (OB_SUCC(ret)) { + SHARE_LOG(WARN, "curr idx in show restore preview", K(idx_), K(cur_row_)); ++idx_; } } diff --git a/src/rootserver/ob_admin_drtask_util.cpp b/src/rootserver/ob_admin_drtask_util.cpp index 16c1fd8082..ac1e6e292b 100644 --- a/src/rootserver/ob_admin_drtask_util.cpp +++ b/src/rootserver/ob_admin_drtask_util.cpp @@ -127,7 +127,8 @@ int ObAdminDRTaskUtil::construct_arg_for_add_command_( uint64_t tenant_id = OB_INVALID_TENANT_ID; share::ObLSID ls_id; ObReplicaType replica_type = REPLICA_TYPE_FULL; - common::ObAddr data_source_server; + common::ObAddr force_data_source_server; + common::ObAddr leader_server; common::ObAddr target_server; int64_t orig_paxos_replica_number = 0; int64_t new_paxos_replica_number = 0; @@ -138,7 +139,7 @@ int ObAdminDRTaskUtil::construct_arg_for_add_command_( LOG_WARN("invalid argument", KR(ret), K(command_arg)); // STEP 1: parse parameters from ob_admin command directly } else if (OB_FAIL(parse_params_from_obadmin_command_arg( - command_arg, tenant_id, ls_id, replica_type, data_source_server, + command_arg, tenant_id, ls_id, replica_type, force_data_source_server, target_server, orig_paxos_replica_number, new_paxos_replica_number))) { LOG_WARN("fail to parse parameters provided in ob_admin command", KR(ret), K(command_arg)); } else if (OB_UNLIKELY(!ls_id.is_valid_with_tenant(tenant_id))) { @@ -149,23 +150,22 @@ int ObAdminDRTaskUtil::construct_arg_for_add_command_( || OB_UNLIKELY(REPLICA_TYPE_FULL != replica_type && REPLICA_TYPE_READONLY != replica_type)) { ret = OB_INVALID_ARGUMENT; LOG_WARN("invalid argument", KR(ret), K(replica_type), K(target_server)); - // STEP 2: construct orig_paxos_replica_number and data_source_server if not specified by ob_admin command - } else if (0 == orig_paxos_replica_number || !data_source_server.is_valid()) { - if (OB_FAIL(construct_default_params_for_add_command_( + // STEP 2: construct orig_paxos_replica_number and leader_server if not specified by ob_admin command + } else if (OB_FAIL(construct_default_params_for_add_command_( tenant_id, ls_id, orig_paxos_replica_number, - data_source_server))) { - LOG_WARN("fail to fetch ls info and construct related parameters", KR(ret), K(tenant_id), - K(ls_id), K(orig_paxos_replica_number), K(data_source_server)); - } + leader_server))) { + LOG_WARN("fail to fetch ls info and construct related parameters", KR(ret), K(tenant_id), + K(ls_id), K(orig_paxos_replica_number), K(leader_server)); } if (OB_SUCC(ret)) { new_paxos_replica_number = 0 == new_paxos_replica_number ? orig_paxos_replica_number : new_paxos_replica_number; - ObReplicaMember data_source_member(data_source_server, 0/*timstamp*/); + ObReplicaMember data_source_member(leader_server, 0/*timstamp*/); + ObReplicaMember force_data_source_member(force_data_source_server, 0/*timstamp*/); ObReplicaMember add_member(target_server, ObTimeUtility::current_time(), replica_type); // STEP 3: construct arg if (OB_ISNULL(ObCurTraceId::get_trace_id())) { @@ -179,9 +179,10 @@ int ObAdminDRTaskUtil::construct_arg_for_add_command_( data_source_member, orig_paxos_replica_number, new_paxos_replica_number, - false/*is_skip_change_member_list-not used*/))) { - LOG_WARN("fail to init arg", KR(ret), K(tenant_id), K(ls_id), K(add_member), - K(data_source_member), K(orig_paxos_replica_number), K(new_paxos_replica_number)); + false/*is_skip_change_member_list-not used*/, + force_data_source_member/*force_data_source*/))) { + LOG_WARN("fail to init arg", KR(ret), K(tenant_id), K(ls_id), K(add_member), K(data_source_member), + K(orig_paxos_replica_number), K(new_paxos_replica_number), K(force_data_source_member)); } } return ret; @@ -191,7 +192,7 @@ int ObAdminDRTaskUtil::construct_default_params_for_add_command_( const uint64_t &tenant_id, const share::ObLSID &ls_id, int64_t &orig_paxos_replica_number, - common::ObAddr &data_source_server) + common::ObAddr &leader_server) { int ret = OB_SUCCESS; share::ObLSInfo ls_info; @@ -212,14 +213,12 @@ int ObAdminDRTaskUtil::construct_default_params_for_add_command_( ret = OB_INVALID_ARGUMENT; LOG_WARN("invalid leader replica", KR(ret), K(ls_info)); } else { - // If [orig_paxos_replica_number] or [data_source_server] not specified in obadmin command, + // If [orig_paxos_replica_number] not specified in obadmin command, // need to construct from leader_replica, use leader replica as default if (0 == orig_paxos_replica_number) { orig_paxos_replica_number = leader_replica->get_paxos_replica_number(); } - if (!data_source_server.is_valid()) { - data_source_server = leader_replica->get_server(); - } + leader_server = leader_replica->get_server(); } return ret; } diff --git a/src/rootserver/ob_admin_drtask_util.h b/src/rootserver/ob_admin_drtask_util.h index 7df1e5df4f..d42f7e6a62 100644 --- a/src/rootserver/ob_admin_drtask_util.h +++ b/src/rootserver/ob_admin_drtask_util.h @@ -147,12 +147,12 @@ private: // params[in] tenant_id, specified tenant_id // params[in] ls_id, specified ls_id // params[out] orig_paxos_replica_number, orig paxos_replica_number - // params[out] data_source_server, data source + // params[out] leader_server, data source static int construct_default_params_for_add_command_( const uint64_t &tenant_id, const share::ObLSID &ls_id, int64_t &orig_paxos_replica_number, - common::ObAddr &data_source_server); + common::ObAddr &leader_server); // execute remove task // params[in] command_arg, arg which contains admin_command diff --git a/src/rootserver/ob_disaster_recovery_info.cpp b/src/rootserver/ob_disaster_recovery_info.cpp index 07c956ac9a..faefa1b716 100644 --- a/src/rootserver/ob_disaster_recovery_info.cpp +++ b/src/rootserver/ob_disaster_recovery_info.cpp @@ -581,3 +581,29 @@ int DRLSInfo::get_leader_and_member_list( } return ret; } + +int DRLSInfo::get_default_data_source( + ObReplicaMember &data_source, + int64_t &data_size) const +{ + int ret = OB_SUCCESS; + data_size = 0; + data_source.reset(); + const ObLSReplica *leader_replica = nullptr; + if (OB_UNLIKELY(!inited_)) { + ret = OB_NOT_INIT; + LOG_WARN("not init", KR(ret)); + } else if (OB_FAIL(inner_ls_info_.find_leader(leader_replica))) { + LOG_WARN("fail to find leader", KR(ret), K(inner_ls_info_)); + } else if (OB_ISNULL(leader_replica)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("leader replica ptr is null", KR(ret), KP(leader_replica)); + } else { + data_size = leader_replica->get_data_size(); + data_source = ObReplicaMember(leader_replica->get_server(), + leader_replica->get_member_time_us(), + leader_replica->get_replica_type(), + leader_replica->get_memstore_percent()); + } + return ret; +} \ No newline at end of file diff --git a/src/rootserver/ob_disaster_recovery_info.h b/src/rootserver/ob_disaster_recovery_info.h index 53218bb903..7340f4cca6 100644 --- a/src/rootserver/ob_disaster_recovery_info.h +++ b/src/rootserver/ob_disaster_recovery_info.h @@ -205,6 +205,13 @@ public: common::ObAddr &leader_addr, common::ObMemberList &member_list, GlobalLearnerList &learner_list); + + // get data_source from leader replcia + // @param [out] data_source, leader replica + // @param [out] data_size, leader replica data_size + int get_default_data_source( + ObReplicaMember &data_source, + int64_t &data_size) const; private: int construct_filtered_ls_info_to_use_( const share::ObLSInfo &input_ls_info, diff --git a/src/rootserver/ob_disaster_recovery_task.cpp b/src/rootserver/ob_disaster_recovery_task.cpp index 20de430bcf..b39b58e25e 100644 --- a/src/rootserver/ob_disaster_recovery_task.cpp +++ b/src/rootserver/ob_disaster_recovery_task.cpp @@ -347,7 +347,6 @@ int ObDRTask::deep_copy(const ObDRTask &that) transmit_data_size_ = that.transmit_data_size_; sibling_in_schedule_ = that.sibling_in_schedule_; invoked_source_ = that.invoked_source_; - skip_change_member_list_ = that.skip_change_member_list_; /* generated_time_ shall not be copied, * the generated_time_ is automatically set in the constructor func */ @@ -361,48 +360,6 @@ int ObDRTask::deep_copy(const ObDRTask &that) return ret; } -int ObDRTask::generate_skip_change_member_list( - const ObDRTaskType task_type, - const common::ObReplicaType src_type, - const common::ObReplicaType dst_type, - bool &skip_change_member_list) -{ - int ret = OB_SUCCESS; - if (OB_UNLIKELY(task_type >= ObDRTaskType::MAX_TYPE - || !ObReplicaTypeCheck::is_replica_type_valid(src_type) - || !ObReplicaTypeCheck::is_replica_type_valid(dst_type))) { - ret = OB_INVALID_ARGUMENT; - LOG_WARN("invalid argument", KR(ret), K(task_type), K(src_type), K(dst_type)); - } else if (ObDRTaskType::LS_BUILD_ONLY_IN_MEMBER_LIST == task_type - || ObDRTaskType::LS_REMOVE_NON_PAXOS_REPLICA == task_type - || ObDRTaskType::LS_MODIFY_PAXOS_REPLICA_NUMBER == task_type) { - skip_change_member_list = true; - } else if (ObDRTaskType::LS_REMOVE_PAXOS_REPLICA == task_type) { - skip_change_member_list = false; - } else if (ObDRTaskType::LS_MIGRATE_REPLICA == task_type - || ObDRTaskType::LS_ADD_REPLICA == task_type) { - // no need to modify memberlist when the destination is a non-paxos replica - const bool is_valid_paxos_replica = ObReplicaTypeCheck::is_paxos_replica_V2(dst_type); - if (is_valid_paxos_replica) { - skip_change_member_list = false; - } else { - skip_change_member_list = true; - } - } else if (ObDRTaskType::LS_TYPE_TRANSFORM == task_type) { - if (ObReplicaTypeCheck::is_paxos_replica_V2(dst_type) - != ObReplicaTypeCheck::is_paxos_replica_V2(src_type)) { - // need to modify the member list since the paxos replica number is changed - skip_change_member_list = false; - } else { - skip_change_member_list = true; - } - } else { - ret = OB_ERR_UNEXPECTED; - LOG_WARN("unexpeted rebalance task", K(ret), K(task_type)); - } - return ret; -} - int ObDRTask::build( const ObDRTaskKey &task_key, const uint64_t tenant_id, @@ -413,7 +370,6 @@ int ObDRTask::build( const int64_t cluster_id, const int64_t transmit_data_size, const obrpc::ObAdminClearDRTaskArg::TaskType invoked_source, - const bool skip_change_member_list, const ObDRTaskPriority priority, const ObString &comment) { @@ -450,7 +406,6 @@ int ObDRTask::build( cluster_id_ = cluster_id; transmit_data_size_ = transmit_data_size; invoked_source_ = invoked_source; - skip_change_member_list_ = skip_change_member_list; set_priority(priority); } } @@ -585,7 +540,8 @@ int ObMigrateLSReplicaTask::execute( get_dst_replica().get_member(), get_data_src_member(), get_paxos_replica_number(), - is_skip_change_member_list()))) { + false/*skip_change_member_list(not used)*/, + get_force_data_src_member()))) { LOG_WARN("fail to init arg", KR(ret)); } else if (OB_FAIL(rpc_proxy.to(get_dst_server()) .by(get_tenant_id()).ls_migrate_replica(arg))) { @@ -765,12 +721,12 @@ int ObMigrateLSReplicaTask::build( const int64_t cluster_id, const int64_t transmit_data_size, const obrpc::ObAdminClearDRTaskArg::TaskType invoked_source, - const bool skip_change_member_list, const ObDRTaskPriority priority, const ObString &comment, const ObDstReplica &dst_replica, const common::ObReplicaMember &src_member, const common::ObReplicaMember &data_src_member, + const common::ObReplicaMember &force_data_src_member, const int64_t paxos_replica_number) { int ret = OB_SUCCESS; @@ -794,7 +750,6 @@ int ObMigrateLSReplicaTask::build( cluster_id, transmit_data_size, invoked_source, - skip_change_member_list, priority, comment))) { LOG_WARN("fail to build ObDRTask", KR(ret), @@ -813,6 +768,7 @@ int ObMigrateLSReplicaTask::build( } else { set_src_member(src_member); set_data_src_member(data_src_member); + set_force_data_src_member(force_data_src_member); paxos_replica_number_ = paxos_replica_number; } } @@ -914,12 +870,12 @@ int ObMigrateLSReplicaTask::build_task_from_sql_result( GCONF.cluster_id, //(not used)cluster_id transmit_data_size, //(not used) obrpc::ObAdminClearDRTaskArg::TaskType::AUTO,//(not used)invoked_source - false, //(not used)skip_change_member_list priority_to_set, //(not used) comment_to_set.ptr(), //comment dst_replica, //(in used)dest_server ObReplicaMember(src_server, 0), //(in used)src_server ObReplicaMember(src_server, 0), //(not used)data_src_member + ObReplicaMember(src_server, 0), //(not used)data_src_member src_paxos_replica_number))) { //(not used) LOG_WARN("fail to build a ObMigrateLSReplicaTask", KR(ret)); } else { @@ -1041,7 +997,8 @@ int ObAddLSReplicaTask::execute( get_data_src_member(), get_orig_paxos_replica_number(), get_paxos_replica_number(), - is_skip_change_member_list()))) { + false/*skip_change_member_list(not used)*/, + get_force_data_src_member()))) { LOG_WARN("fail to init arg", KR(ret)); } else if (OB_FAIL(rpc_proxy.to(get_dst_server()) .by(get_tenant_id()).ls_add_replica(arg))) { @@ -1226,11 +1183,11 @@ int ObAddLSReplicaTask::build( const int64_t cluster_id, const int64_t transmit_data_size, const obrpc::ObAdminClearDRTaskArg::TaskType invoked_source, - const bool skip_change_member_list, const ObDRTaskPriority priority, const ObString &comment, const ObDstReplica &dst_replica, const common::ObReplicaMember &data_src_member, + const common::ObReplicaMember &force_data_src_member, const int64_t orig_paxos_replica_number, const int64_t paxos_replica_number) { @@ -1255,7 +1212,6 @@ int ObAddLSReplicaTask::build( cluster_id, transmit_data_size, invoked_source, - skip_change_member_list, priority, comment))) { LOG_WARN("fail to build ObDRTask", KR(ret), @@ -1271,6 +1227,7 @@ int ObAddLSReplicaTask::build( LOG_WARN("fail to assign dst replica", KR(ret), K(dst_replica)); } else { set_data_src_member(data_src_member); + set_force_data_src_member(force_data_src_member); orig_paxos_replica_number_ = orig_paxos_replica_number; paxos_replica_number_ = paxos_replica_number; } @@ -1375,11 +1332,11 @@ int ObAddLSReplicaTask::build_task_from_sql_result( GCONF.cluster_id, //(not used)cluster_id transmit_data_size, //(not used) obrpc::ObAdminClearDRTaskArg::TaskType::AUTO,//(not used)invoked_source - false, //(not used)skip_change_member_list priority_to_set, //(not used) comment_to_set.ptr(), //comments dst_replica, //(in used)dest_server ObReplicaMember(src_server, 0), //(in used)src_server + ObReplicaMember(src_server, 0), //(in used)src_server src_paxos_replica_number, //(in used) dest_paxos_replica_number))) { //(in used) LOG_WARN("fail to build a ObAddLSReplicaTask", KR(ret)); @@ -1515,7 +1472,7 @@ int ObLSTypeTransformTask::execute( get_data_src_member(), get_orig_paxos_replica_number(), get_paxos_replica_number(), - is_skip_change_member_list()))) { + false/*skip_change_member_list(not used)*/))) { LOG_WARN("fail to init arg", KR(ret)); } else if (OB_FAIL(rpc_proxy.to(get_dst_server()) .by(get_tenant_id()).ls_type_transform(arg))) { @@ -1682,7 +1639,6 @@ int ObLSTypeTransformTask::build( const int64_t cluster_id, const int64_t transmit_data_size, const obrpc::ObAdminClearDRTaskArg::TaskType invoked_source, - const bool skip_change_member_list, const ObDRTaskPriority priority, const ObString &comment, const ObDstReplica &dst_replica, @@ -1712,7 +1668,6 @@ int ObLSTypeTransformTask::build( cluster_id, transmit_data_size, invoked_source, - skip_change_member_list, priority, comment))) { LOG_WARN("fail to build ObDRTask", KR(ret), @@ -1857,7 +1812,6 @@ int ObLSTypeTransformTask::build_task_from_sql_result( GCONF.cluster_id, //(not used)cluster_id transmit_data_size, //(not used) obrpc::ObAdminClearDRTaskArg::TaskType::AUTO,//(not used)invoked_source - false, //(not used)skip_change_member_list priority_to_set, //(not used) comment_to_set.ptr(), //comment dst_replica, //(in used)dest_server @@ -2080,7 +2034,6 @@ int ObRemoveLSReplicaTask::build( const int64_t cluster_id, const int64_t transmit_data_size, const obrpc::ObAdminClearDRTaskArg::TaskType invoked_source, - const bool skip_change_member_list, const ObDRTaskPriority priority, const ObString &comment, const common::ObAddr &leader, @@ -2110,7 +2063,6 @@ int ObRemoveLSReplicaTask::build( cluster_id, transmit_data_size, invoked_source, - skip_change_member_list, priority, comment))) { LOG_WARN("fail to build ObDRTask", KR(ret), @@ -2230,7 +2182,6 @@ int ObRemoveLSReplicaTask::build_task_from_sql_result( GCONF.cluster_id, //(not used)cluster_id transmit_data_size, //(not used) obrpc::ObAdminClearDRTaskArg::TaskType::AUTO,//(not used)invoked_source - false, //(not used)skip_change_member_list priority_to_set, //(not used) comment_to_set.ptr(), //comment dest_server, //(in used)leader @@ -2432,7 +2383,6 @@ int ObLSModifyPaxosReplicaNumberTask::build( const int64_t cluster_id, const int64_t transmit_data_size, const obrpc::ObAdminClearDRTaskArg::TaskType invoked_source, - const bool skip_change_member_list, const ObDRTaskPriority priority, const ObString &comment, const common::ObAddr &dst_server, @@ -2461,7 +2411,6 @@ int ObLSModifyPaxosReplicaNumberTask::build( cluster_id, transmit_data_size, invoked_source, - skip_change_member_list, priority, comment))) { LOG_WARN("fail to build ObDRTask", KR(ret), @@ -2566,7 +2515,6 @@ int ObLSModifyPaxosReplicaNumberTask::build_task_from_sql_result( GCONF.cluster_id, //(not used)cluster_id transmit_data_size, //(not used) obrpc::ObAdminClearDRTaskArg::TaskType::AUTO,//(not used)invoked_source - true, //(not used)skip_change_member_list priority_to_set, //(not used) comment_to_set.ptr(), //comment dest_server, //(in used)leader diff --git a/src/rootserver/ob_disaster_recovery_task.h b/src/rootserver/ob_disaster_recovery_task.h index 5f688f1911..796bb80f4b 100644 --- a/src/rootserver/ob_disaster_recovery_task.h +++ b/src/rootserver/ob_disaster_recovery_task.h @@ -217,7 +217,6 @@ public: transmit_data_size_(0), sibling_in_schedule_(false), invoked_source_(obrpc::ObAdminClearDRTaskArg::TaskType::AUTO), - skip_change_member_list_(false), generate_time_(common::ObTimeUtility::current_time()), priority_(ObDRTaskPriority::MAX_PRI), comment_("DRTask"), @@ -245,7 +244,6 @@ public: const int64_t cluster_id, const int64_t transmit_data_size, const obrpc::ObAdminClearDRTaskArg::TaskType invoked_source, - const bool skip_change_member_list, const ObDRTaskPriority priority, const ObString &comment); @@ -253,13 +251,6 @@ public: const int ret_code, const ObDRTaskRetComment &ret_comment, ObSqlString &execute_result) const; - -public: - static int generate_skip_change_member_list( - const ObDRTaskType task_type, - const common::ObReplicaType src_type, - const common::ObReplicaType dst_type, - bool &skip_change_member_list); public: virtual const common::ObAddr &get_dst_server() const = 0; @@ -304,7 +295,6 @@ public: K_(transmit_data_size), K_(sibling_in_schedule), K_(invoked_source), - K_(skip_change_member_list), K_(generate_time), K_(priority), K_(comment), @@ -341,9 +331,6 @@ public: // operations of invoked_source_ obrpc::ObAdminClearDRTaskArg::TaskType get_invoked_source() const { return invoked_source_; } void set_invoked_source(obrpc::ObAdminClearDRTaskArg::TaskType t) { invoked_source_ = t; } - // operations of skip_change_member_list - bool is_skip_change_member_list() const { return skip_change_member_list_; } - void set_skip_change_member_list(const bool l) { skip_change_member_list_ = l; } // operations of generate_time_ int64_t get_generate_time() const { return generate_time_; } void set_generate_time(const int64_t generate_time) { generate_time_ = generate_time; } @@ -388,7 +375,6 @@ protected: int64_t transmit_data_size_; bool sibling_in_schedule_; obrpc::ObAdminClearDRTaskArg::TaskType invoked_source_; - bool skip_change_member_list_; int64_t generate_time_; ObDRTaskPriority priority_; ObSqlString comment_; @@ -404,6 +390,7 @@ public: dst_replica_(), src_member_(), data_src_member_(), + force_data_src_member_(), paxos_replica_number_(0) {} virtual ~ObMigrateLSReplicaTask() {} public: @@ -417,12 +404,12 @@ public: const int64_t cluster_id, const int64_t transmit_data_size, const obrpc::ObAdminClearDRTaskArg::TaskType invoked_source, - const bool skip_change_member_list, const ObDRTaskPriority priority, const ObString &comment, const ObDstReplica &dst_replica, const common::ObReplicaMember &src_member, const common::ObReplicaMember &data_src_member, + const common::ObReplicaMember &force_data_src_member, const int64_t paxos_replica_number ); @@ -442,6 +429,7 @@ public: K(dst_replica_), K(src_member_), K(data_src_member_), + K(force_data_src_member_), K(paxos_replica_number_)); virtual int get_execute_transmit_size( @@ -491,6 +479,8 @@ public: // operations of data_src_member_; void set_data_src_member(const common::ObReplicaMember &s) { data_src_member_ = s; } const common::ObReplicaMember &get_data_src_member() const { return data_src_member_; } + void set_force_data_src_member(const common::ObReplicaMember &s) { force_data_src_member_ = s; } + const common::ObReplicaMember &get_force_data_src_member() const { return force_data_src_member_; } // operations of paxos_replica_number_ void set_paxos_replica_number(const int64_t paxos_replica_number) { paxos_replica_number_ = paxos_replica_number; } int64_t get_paxos_replica_number() const { return paxos_replica_number_; } @@ -506,6 +496,7 @@ private: ObDstReplica dst_replica_; common::ObReplicaMember src_member_; common::ObReplicaMember data_src_member_; + common::ObReplicaMember force_data_src_member_; int64_t paxos_replica_number_; }; @@ -515,6 +506,7 @@ public: ObAddLSReplicaTask() : ObDRTask(), dst_replica_(), data_src_member_(), + force_data_src_member_(), orig_paxos_replica_number_(0), paxos_replica_number_(0) {} virtual ~ObAddLSReplicaTask() {} @@ -529,11 +521,11 @@ public: const int64_t cluster_id, const int64_t transmit_data_size, const obrpc::ObAdminClearDRTaskArg::TaskType invoked_source, - const bool skip_change_member_list, const ObDRTaskPriority priority, const ObString &comment, const ObDstReplica &dst_replica_, const common::ObReplicaMember &data_src_member, + const common::ObReplicaMember &force_data_src_member, const int64_t orig_paxos_replica_number, const int64_t paxos_replica_number); @@ -552,6 +544,7 @@ public: virtual INHERIT_TO_STRING_KV("ObDRTask", ObDRTask, K(dst_replica_), K(data_src_member_), + K(force_data_src_member_), K(orig_paxos_replica_number_), K(paxos_replica_number_)); virtual int get_execute_transmit_size( @@ -598,6 +591,8 @@ public: // operations of data_src_member_; void set_data_src_member(const common::ObReplicaMember &s) { data_src_member_ = s; } const common::ObReplicaMember &get_data_src_member() const { return data_src_member_; } + void set_force_data_src_member(const common::ObReplicaMember &s) { force_data_src_member_ = s; } + const common::ObReplicaMember &get_force_data_src_member() const { return force_data_src_member_; } // operations of orig_paxos_replica_number_ void set_orig_paxos_replica_number(const int64_t paxos_replica_number) { orig_paxos_replica_number_ = paxos_replica_number; } int64_t get_orig_paxos_replica_number() const { return orig_paxos_replica_number_; } @@ -615,6 +610,7 @@ private: private: ObDstReplica dst_replica_; common::ObReplicaMember data_src_member_; + common::ObReplicaMember force_data_src_member_; int64_t orig_paxos_replica_number_; int64_t paxos_replica_number_; }; @@ -640,7 +636,6 @@ public: const int64_t cluster_id, const int64_t transmit_data_size, const obrpc::ObAdminClearDRTaskArg::TaskType invoked_source, - const bool skip_change_member_list, const ObDRTaskPriority priority, const ObString &comment, const ObDstReplica &dst_replica_, @@ -757,7 +752,6 @@ public: const int64_t cluster_id, const int64_t transmit_data_size, const obrpc::ObAdminClearDRTaskArg::TaskType invoked_source, - const bool skip_change_member_list, const ObDRTaskPriority priority, const ObString &comment, const common::ObAddr &leader, @@ -873,7 +867,6 @@ public: const int64_t cluster_id, const int64_t transmit_data_size, const obrpc::ObAdminClearDRTaskArg::TaskType invoked_source, - const bool skip_change_member_list, const ObDRTaskPriority priority, const ObString &comment, const common::ObAddr &dst_server, diff --git a/src/rootserver/ob_disaster_recovery_worker.cpp b/src/rootserver/ob_disaster_recovery_worker.cpp index f723b2472b..fc549c5e90 100755 --- a/src/rootserver/ob_disaster_recovery_worker.cpp +++ b/src/rootserver/ob_disaster_recovery_worker.cpp @@ -2410,7 +2410,6 @@ int ObDRWorker::generate_remove_permanent_offline_replicas_and_push_into_task_ma GCONF.cluster_id, 0/*transmit_data_size*/, obrpc::ObAdminClearDRTaskArg::TaskType::AUTO, - false/*skip change member list*/, ObDRTaskPriority::HIGH_PRI, ObString(drtask::REMOVE_PERMANENT_OFFLINE_REPLICA), leader_addr, @@ -2651,14 +2650,11 @@ int ObDRWorker::construct_extra_infos_to_build_migrate_task( const DRUnitStatInfo &unit_stat_info, const DRUnitStatInfo &unit_in_group_stat_info, const ObReplicaMember &dst_member, - const ObReplicaMember &src_member, uint64_t &tenant_id, share::ObLSID &ls_id, share::ObTaskId &task_id, - ObReplicaMember &data_source, int64_t &data_size, ObDstReplica &dst_replica, - bool &skip_change_member_list, int64_t &old_paxos_replica_number) { int ret = OB_SUCCESS; @@ -2667,26 +2663,12 @@ int ObDRWorker::construct_extra_infos_to_build_migrate_task( LOG_WARN("not init", KR(ret)); } else if (FALSE_IT(task_id.init(self_addr_))) { //shall never be here - } else if (OB_FAIL(choose_disaster_recovery_data_source( - zone_mgr_, - dr_ls_info, - dst_member, - src_member, - data_source, - data_size))) { - LOG_WARN("fail to choose disaster recovery data source", KR(ret)); } else if (OB_FAIL(dst_replica.assign( unit_stat_info.get_unit().unit_id_, unit_in_group_stat_info.get_unit().unit_group_id_, ls_replica.get_zone(), dst_member))) { LOG_WARN("fail to assign dst replica", KR(ret)); - } else if (OB_FAIL(ObDRTask::generate_skip_change_member_list( - ObDRTaskType::LS_MIGRATE_REPLICA, - ls_replica.get_replica_type(), - ls_replica.get_replica_type(), - skip_change_member_list))) { - LOG_WARN("fail to generate skip change member list", KR(ret)); } else { tenant_id = ls_replica.get_tenant_id(); ls_id = ls_replica.get_ls_id(); @@ -2701,7 +2683,6 @@ int ObDRWorker::generate_replicate_to_unit_and_push_into_task_manager( const share::ObLSID &ls_id, const share::ObTaskId &task_id, const int64_t &data_size, - const bool &skip_change_member_list, const ObDstReplica &dst_replica, const ObReplicaMember &src_member, const ObReplicaMember &data_source, @@ -2727,12 +2708,12 @@ int ObDRWorker::generate_replicate_to_unit_and_push_into_task_manager( GCONF.cluster_id, data_size, obrpc::ObAdminClearDRTaskArg::TaskType::AUTO, - skip_change_member_list, ObDRTaskPriority::HIGH_PRI, task_comment, dst_replica, src_member, data_source, + ObReplicaMember()/*empty force_data_source*/, old_paxos_replica_number))) { LOG_WARN("fail to build migrate task", KR(ret)); } else if (OB_FAIL(disaster_recovery_task_mgr_->add_task(migrate_task))) { @@ -2821,7 +2802,6 @@ int ObDRWorker::generate_migrate_ls_task( share::ObTaskId task_id; ObReplicaMember data_source; int64_t data_size = 0; - bool skip_change_member_list = false; ObDstReplica dst_replica; int64_t old_paxos_replica_number = 0; const bool need_check_has_leader_while_remove_replica = false; @@ -2830,10 +2810,12 @@ int ObDRWorker::generate_migrate_ls_task( ObReplicaMember src_member( ls_replica.get_server(), ls_replica.get_member_time_us(), ls_replica.get_replica_type(), ls_replica.get_memstore_percent()); - if (OB_FAIL(construct_extra_infos_to_build_migrate_task( + if (OB_FAIL(dr_ls_info.get_default_data_source(data_source, data_size))) { + LOG_WARN("fail to get data_size", KR(ret), K(dr_ls_info)); + } else if (OB_FAIL(construct_extra_infos_to_build_migrate_task( dr_ls_info, ls_replica, unit_stat_info, unit_in_group_stat_info, - dst_member, src_member, tenant_id, ls_id, task_id, data_source, - data_size, dst_replica, skip_change_member_list, + dst_member, tenant_id, ls_id, task_id, + data_size, dst_replica, old_paxos_replica_number))) { LOG_WARN("fail to construct extra infos to build migrate task", KR(ret)); } else if (only_for_display) { @@ -2861,7 +2843,7 @@ int ObDRWorker::generate_migrate_ls_task( } else if (can_generate) { if (OB_FAIL(generate_replicate_to_unit_and_push_into_task_manager( task_key, tenant_id, ls_id, task_id, data_size, - skip_change_member_list, dst_replica, src_member, data_source, + dst_replica, src_member, data_source, old_paxos_replica_number, task_comment, acc_dr_task))) { LOG_WARN("fail to generate replicate to unit task", KR(ret)); } @@ -2926,7 +2908,6 @@ int ObDRWorker::try_generate_remove_replica_locality_alignment_task( GCONF.cluster_id, 0,/*transmit data size*/ obrpc::ObAdminClearDRTaskArg::TaskType::AUTO, - false,/*skip change member list*/ ObDRTaskPriority::HIGH_PRI, comment_to_set, leader_addr, @@ -2984,20 +2965,14 @@ int ObDRWorker::try_generate_add_replica_locality_alignment_task( if (FALSE_IT(task_id.init(self_addr_))) { //shall never be here - } else if (OB_FAIL(choose_disaster_recovery_data_source( - zone_mgr_, - dr_ls_info, - dst_member, - ObReplicaMember(),/*empty*/ - data_source, - data_size))) { - LOG_WARN("fail to choose disaster recovery data source", KR(ret)); } else if (OB_FAIL(dst_replica.assign( my_task->unit_id_, my_task->unit_group_id_, my_task->zone_, dst_member))) { LOG_WARN("fail to assign dst replica", KR(ret)); + } else if (OB_FAIL(dr_ls_info.get_default_data_source(data_source, data_size))) { + LOG_WARN("fail to get data_size", KR(ret), K(dr_ls_info)); } else if (OB_FAIL(add_replica_task.build( task_key, tenant_id, @@ -3008,11 +2983,11 @@ int ObDRWorker::try_generate_add_replica_locality_alignment_task( GCONF.cluster_id, data_size, obrpc::ObAdminClearDRTaskArg::TaskType::AUTO, - false,/*skip change member list*/ ObDRTaskPriority::HIGH_PRI, comment_to_set, dst_replica, data_source, + ObReplicaMember()/*empty force_data_source*/, my_task->orig_paxos_replica_number_, my_task->paxos_replica_number_))) { LOG_WARN("fail to build add replica task", KR(ret)); @@ -3048,7 +3023,6 @@ int ObDRWorker::try_generate_type_transform_locality_alignment_task( int64_t data_size = 0; ObReplicaMember data_source; ObDstReplica dst_replica; - bool skip_change_member_list = false; ObReplicaMember src_member(my_task->dst_server_, my_task->src_member_time_us_, my_task->src_replica_type_, @@ -3068,26 +3042,14 @@ int ObDRWorker::try_generate_type_transform_locality_alignment_task( LOG_WARN("fail to check has leader while member change", KR(ret), K(dr_ls_info)); } else if (!has_leader) { LOG_INFO("may has no leader while member change", K(dr_ls_info)); - } else if (OB_FAIL(choose_disaster_recovery_data_source( - zone_mgr_, - dr_ls_info, - dst_member, - src_member, - data_source, - data_size))) { - LOG_WARN("fail to choose disaster recovery data source", KR(ret)); } else if (OB_FAIL(dst_replica.assign( my_task->unit_id_, my_task->unit_group_id_, my_task->zone_, dst_member))) { LOG_WARN("fail to assign dst replica", KR(ret)); - } else if (OB_FAIL(ObDRTask::generate_skip_change_member_list( - ObDRTaskType::LS_TYPE_TRANSFORM, - my_task->src_replica_type_, - my_task->dst_replica_type_, - skip_change_member_list))) { - LOG_WARN("fail to generate skip change member list", KR(ret)); + } else if (OB_FAIL(dr_ls_info.get_default_data_source(data_source, data_size))) { + LOG_WARN("fail to get data_size", KR(ret), K(dr_ls_info)); } else if (OB_FAIL(type_transform_task.build( task_key, tenant_id, @@ -3098,7 +3060,6 @@ int ObDRWorker::try_generate_type_transform_locality_alignment_task( GCONF.cluster_id, data_size, obrpc::ObAdminClearDRTaskArg::TaskType::AUTO, - false,/*skip change member list*/ ObDRTaskPriority::HIGH_PRI, ObString(drtask::TRANSFORM_LOCALITY_REPLICA_TYPE), dst_replica, @@ -3157,7 +3118,6 @@ int ObDRWorker::try_generate_modify_paxos_replica_number_locality_alignment_task GCONF.cluster_id, 0,/*transmit data size*/ obrpc::ObAdminClearDRTaskArg::TaskType::AUTO, - true,/*skip change member list*/ ObDRTaskPriority::HIGH_PRI, ObString(drtask::MODIFY_PAXOS_REPLICA_NUMBER), leader_addr, @@ -3297,18 +3257,8 @@ int ObDRWorker::record_task_plan_for_locality_alignment( } case AddReplica: { const AddReplicaLATask *my_task = reinterpret_cast(task); - ObReplicaMember dst_member(my_task->dst_server_, - my_task->member_time_us_, - my_task->replica_type_, - my_task->memstore_percent_); - if (OB_FAIL(choose_disaster_recovery_data_source( - zone_mgr_, - dr_ls_info, - dst_member, - ObReplicaMember(),/*empty*/ - data_source, - data_size))) { - LOG_WARN("fail to choose data source", KR(ret)); + if (OB_FAIL(dr_ls_info.get_default_data_source(data_source, data_size))) { + LOG_WARN("fail to get data_size", KR(ret), K(dr_ls_info)); } else { task_type = ObDRTaskType::LS_ADD_REPLICA; source_replica_type = data_source.get_replica_type(); @@ -3329,22 +3279,8 @@ int ObDRWorker::record_task_plan_for_locality_alignment( } case TypeTransform: { const TypeTransformLATask *my_task = reinterpret_cast(task); - ObReplicaMember src_member(my_task->dst_server_, - my_task->src_member_time_us_, - my_task->src_replica_type_, - my_task->src_memstore_percent_); - ObReplicaMember dst_member(my_task->dst_server_, - my_task->dst_member_time_us_, - my_task->dst_replica_type_, - my_task->dst_memstore_percent_); - if (OB_FAIL(choose_disaster_recovery_data_source( - zone_mgr_, - dr_ls_info, - dst_member, - ObReplicaMember(),/*empty*/ - data_source, - data_size))) { - LOG_WARN("fail to choose data source", KR(ret)); + if (OB_FAIL(dr_ls_info.get_default_data_source(data_source, data_size))) { + LOG_WARN("fail to get data_size", KR(ret), K(dr_ls_info)); } else { task_type = ObDRTaskType::LS_TYPE_TRANSFORM; source_replica_type = my_task->src_replica_type_; @@ -3702,7 +3638,6 @@ int ObDRWorker::try_remove_readonly_replica_for_deleting_unit_( GCONF.cluster_id, 0/*transmit_data_size*/, obrpc::ObAdminClearDRTaskArg::TaskType::AUTO, - false/*skip change member list*/, ObDRTaskPriority::HIGH_PRI, "shrink unit task", leader_addr, @@ -3821,24 +3756,24 @@ int ObDRWorker::try_type_transform_for_deleting_unit_( target_replica.get_member_time_us(), REPLICA_TYPE_FULL, target_replica.get_memstore_percent()); - if (OB_FAIL(construct_extra_info_to_build_type_transform_task_( + if (OB_FAIL(dr_ls_info.get_default_data_source(data_source, data_size))) { + LOG_WARN("fail to get data_size", KR(ret), K(dr_ls_info)); + } else if (OB_FAIL(construct_extra_info_to_build_type_transform_task_( dr_ls_info, ls_replica, dst_member, - src_member, target_unit_id, target_unit_group_id, task_id, tenant_id, ls_id, leader_addr, - data_source, data_size, dst_replica, old_paxos_replica_number, new_paxos_replica_number))) { LOG_WARN("fail to construct extra info to build a type transform task", KR(ret), - K(dr_ls_info), K(ls_replica), K(dst_member), K(src_member), + K(dr_ls_info), K(ls_replica), K(dst_member), K(target_unit_id), K(target_unit_group_id)); } else if (only_for_display) { ObLSReplicaTaskDisplayInfo display_info; @@ -3888,8 +3823,8 @@ int ObDRWorker::try_type_transform_for_deleting_unit_( new_paxos_replica_number, acc_dr_task))) { LOG_WARN("fail to generate type transform task", KR(ret), K(task_key), - K(tenant_id), K(ls_id), K(task_id), K(data_size), K(dst_replica), - K(src_member), K(data_source), K(old_paxos_replica_number), + K(tenant_id), K(ls_id), K(task_id), K(data_size), K(data_source), K(dst_replica), + K(src_member), K(old_paxos_replica_number), K(new_paxos_replica_number), K(acc_dr_task)); } } @@ -3973,14 +3908,12 @@ int ObDRWorker::construct_extra_info_to_build_type_transform_task_( DRLSInfo &dr_ls_info, const share::ObLSReplica &ls_replica, const ObReplicaMember &dst_member, - const ObReplicaMember &src_member, const uint64_t &target_unit_id, const uint64_t &target_unit_group_id, share::ObTaskId &task_id, uint64_t &tenant_id, share::ObLSID &ls_id, common::ObAddr &leader_addr, - ObReplicaMember &data_source, int64_t &data_size, ObDstReplica &dst_replica, int64_t &old_paxos_replica_number, @@ -4006,15 +3939,6 @@ int ObDRWorker::construct_extra_info_to_build_type_transform_task_( LOG_WARN("fail to get leader address", KR(ret), K(dr_ls_info)); } else if (OB_FAIL(dr_ls_info.get_ls_id(tenant_id, ls_id))) { LOG_WARN("fail to get tenant and ls id", KR(ret), K(dr_ls_info), K(tenant_id), K(ls_id)); - } else if (OB_FAIL(choose_disaster_recovery_data_source( - zone_mgr_, - dr_ls_info, - dst_member, - src_member, - data_source, - data_size))) { - LOG_WARN("fail to choose disaster recovery data source", KR(ret), K(dr_ls_info), - K(dst_member), K(src_member)); } else if (OB_FAIL(dst_replica.assign( target_unit_id, target_unit_group_id, @@ -4057,7 +3981,6 @@ int ObDRWorker::generate_type_transform_task_( GCONF.cluster_id, data_size, obrpc::ObAdminClearDRTaskArg::TaskType::AUTO, - false,/*skip change member list*/ ObDRTaskPriority::HIGH_PRI, "shrink unit number", dst_replica, @@ -4198,7 +4121,6 @@ int ObDRWorker::generate_cancel_unit_migration_task( GCONF.cluster_id, 0/*transmit_data_size*/, obrpc::ObAdminClearDRTaskArg::TaskType::AUTO, - false/*skip change member list*/, ObDRTaskPriority::HIGH_PRI, comment_to_set, leader_addr, @@ -4416,15 +4338,12 @@ int ObDRWorker::construct_extra_infos_for_generate_migrate_to_unit_task( const DRUnitStatInfo &unit_stat_info, const DRUnitStatInfo &unit_in_group_stat_info, const ObReplicaMember &dst_member, - const ObReplicaMember &src_member, const bool &is_unit_in_group_related, uint64_t &tenant_id, share::ObLSID &ls_id, share::ObTaskId &task_id, - ObReplicaMember &data_source, int64_t &data_size, ObDstReplica &dst_replica, - bool &skip_change_member_list, int64_t &old_paxos_replica_number) { int ret = OB_SUCCESS; @@ -4433,14 +4352,6 @@ int ObDRWorker::construct_extra_infos_for_generate_migrate_to_unit_task( LOG_WARN("not init", KR(ret)); } else if (FALSE_IT(task_id.init(self_addr_))) { //shall never be here - } else if (OB_FAIL(choose_disaster_recovery_data_source( - zone_mgr_, - dr_ls_info, - dst_member, - src_member, - data_source, - data_size))) { - LOG_WARN("fail to choose disaster recovery data source", KR(ret)); } else if (OB_FAIL(dst_replica.assign( is_unit_in_group_related ? unit_in_group_stat_info.get_unit().unit_id_ @@ -4449,12 +4360,6 @@ int ObDRWorker::construct_extra_infos_for_generate_migrate_to_unit_task( ls_replica.get_zone(), dst_member))) { LOG_WARN("fail to assign dst replica", KR(ret)); - } else if (OB_FAIL(ObDRTask::generate_skip_change_member_list( - ObDRTaskType::LS_MIGRATE_REPLICA, - ls_replica.get_replica_type(), - ls_replica.get_replica_type(), - skip_change_member_list))) { - LOG_WARN("fail to generate skip change member list", KR(ret)); } else { tenant_id = ls_replica.get_tenant_id(); ls_id = ls_replica.get_ls_id(); @@ -4469,7 +4374,6 @@ int ObDRWorker::generate_migrate_to_unit_task( const share::ObLSID &ls_id, const share::ObTaskId &task_id, const int64_t &data_size, - const bool &skip_change_member_list, const ObDstReplica &dst_replica, const ObReplicaMember &src_member, const ObReplicaMember &data_source, @@ -4501,12 +4405,12 @@ int ObDRWorker::generate_migrate_to_unit_task( GCONF.cluster_id, data_size, obrpc::ObAdminClearDRTaskArg::TaskType::AUTO, - skip_change_member_list, ObDRTaskPriority::LOW_PRI, comment_to_set, dst_replica, src_member, data_source, + ObReplicaMember()/*empty force_data_source*/, old_paxos_replica_number))) { LOG_WARN("fail to build migrate task", KR(ret)); } else if (OB_FAIL(disaster_recovery_task_mgr_->add_task(migrate_task))) { @@ -4571,7 +4475,6 @@ int ObDRWorker::try_migrate_to_unit( ObReplicaMember data_source; int64_t data_size = 0; ObDstReplica dst_replica; - bool skip_change_member_list = false; int64_t old_paxos_replica_number = 0; bool can_generate = false; const bool need_check_has_leader_while_remove_replica = false; @@ -4595,21 +4498,20 @@ int ObDRWorker::try_migrate_to_unit( comment_to_set.assign_ptr(drtask::MIGRATE_REPLICA_DUE_TO_UNIT_NOT_MATCH, strlen(drtask::MIGRATE_REPLICA_DUE_TO_UNIT_NOT_MATCH)); } - if (OB_FAIL(construct_extra_infos_for_generate_migrate_to_unit_task( + if (OB_FAIL(dr_ls_info.get_default_data_source(data_source, data_size))) { + LOG_WARN("fail to get data_size", KR(ret), K(dr_ls_info)); + } else if (OB_FAIL(construct_extra_infos_for_generate_migrate_to_unit_task( dr_ls_info, *ls_replica, *unit_stat_info, *unit_in_group_stat_info, dst_member, - src_member, is_unit_in_group_related, tenant_id, ls_id, task_id, - data_source, data_size, dst_replica, - skip_change_member_list, old_paxos_replica_number))) { LOG_WARN("fail to construct extra infos for generate migrate to unit task", KR(ret)); } else if (only_for_display) { @@ -4653,7 +4555,6 @@ int ObDRWorker::try_migrate_to_unit( ls_id, task_id, data_size, - skip_change_member_list, dst_replica, src_member, data_source, @@ -4799,183 +4700,6 @@ int ObDRWorker::generate_disaster_recovery_paxos_replica_number( return ret; } -int ObDRWorker::choose_disaster_recovery_data_source( - ObZoneManager *zone_mgr, - DRLSInfo &dr_ls_info, - const ObReplicaMember &dst_member, - const ObReplicaMember &src_member, - ObReplicaMember &data_source, - int64_t &data_size) -{ - int ret = OB_SUCCESS; - ObServerInfoInTable server_info; - ObRegion dst_region; - ObDataSourceCandidateChecker type_checker(dst_member.get_replica_type()); - int64_t replica_cnt = 0; - share::ObLSReplica *ls_replica = nullptr; - DRServerStatInfo *server_stat_info = nullptr; - DRUnitStatInfo *unit_stat_info = nullptr; - DRUnitStatInfo *unit_in_group_stat_info = nullptr; - ObZone dst_zone; - - if (OB_ISNULL(zone_mgr)) { - ret = OB_ERR_UNEXPECTED; - LOG_WARN("zone_mgr is null", KR(ret), KP(zone_mgr)); - } else if (OB_FAIL(SVR_TRACER.get_server_zone(dst_member.get_server(), dst_zone))) { - LOG_WARN("fail to get server zone", KR(ret), K(dst_member.get_server())); - } else if (OB_FAIL(zone_mgr->get_region(dst_zone, dst_region))) { - LOG_WARN("fail to get region", KR(ret), K(dst_zone)); - } else if (OB_FAIL(dr_ls_info.get_replica_cnt(replica_cnt))) { - LOG_WARN("fail to get replica cnt", KR(ret)); - } else { - ObLSReplica *src_replica = nullptr; - // try task offline src - for (int64_t i = 0; - OB_SUCC(ret) && i < replica_cnt && src_member.is_valid() && nullptr == src_replica; - ++i) { - if (OB_FAIL(dr_ls_info.get_replica_stat( - i, - ls_replica, - server_stat_info, - unit_stat_info, - unit_in_group_stat_info))) { - LOG_WARN("fail to get replica stat", KR(ret)); - } else if (OB_ISNULL(ls_replica) - || OB_ISNULL(server_stat_info) - || OB_ISNULL(unit_stat_info) - || OB_ISNULL(unit_in_group_stat_info)) { - ret = OB_ERR_UNEXPECTED; - LOG_WARN("replica status ptr is null", KR(ret), - KP(ls_replica), - KP(server_stat_info), - KP(unit_stat_info), - KP(unit_in_group_stat_info)); - } else if (ls_replica->is_in_service() - && server_stat_info->is_alive() - && !server_stat_info->is_stopped() - && type_checker.is_candidate(ls_replica->get_replica_type()) - && ls_replica->get_server() == src_member.get_server() - && !ls_replica->get_restore_status().is_failed()) { - src_replica = ls_replica; - break; - } - } - // try the same zone replica - for (int64_t i = 0; - OB_SUCC(ret) && i < replica_cnt && nullptr == src_replica && !dst_zone.is_empty(); - ++i) { - if (OB_FAIL(dr_ls_info.get_replica_stat( - i, - ls_replica, - server_stat_info, - unit_stat_info, - unit_in_group_stat_info))) { - LOG_WARN("fail to get replica stat", KR(ret)); - } else if (OB_ISNULL(ls_replica) - || OB_ISNULL(server_stat_info) - || OB_ISNULL(unit_stat_info) - || OB_ISNULL(unit_in_group_stat_info)) { - ret = OB_ERR_UNEXPECTED; - LOG_WARN("replica status ptr is null", KR(ret), - KP(ls_replica), - KP(server_stat_info), - KP(unit_stat_info), - KP(unit_in_group_stat_info)); - } else if (ls_replica->is_in_service() - && server_stat_info->is_alive() - && !server_stat_info->is_stopped() - && type_checker.is_candidate(ls_replica->get_replica_type()) - && ls_replica->get_zone() == dst_zone - && ls_replica->get_server() != dst_member.get_server() - && !ls_replica->get_restore_status().is_failed()) { - src_replica = ls_replica; - break; - } - } - // try the same region replica - for (int64_t i = 0; - OB_SUCC(ret) && i < replica_cnt && nullptr == src_replica && !dst_region.is_empty(); - ++i) { - common::ObRegion ls_region; - if (OB_FAIL(dr_ls_info.get_replica_stat( - i, - ls_replica, - server_stat_info, - unit_stat_info, - unit_in_group_stat_info))) { - LOG_WARN("fail to get replica stat", KR(ret)); - } else if (OB_ISNULL(ls_replica) - || OB_ISNULL(server_stat_info) - || OB_ISNULL(unit_stat_info) - || OB_ISNULL(unit_in_group_stat_info)) { - ret = OB_ERR_UNEXPECTED; - LOG_WARN("replica status ptr is null", KR(ret), - KP(ls_replica), - KP(server_stat_info), - KP(unit_stat_info), - KP(unit_in_group_stat_info)); - } else if (OB_SUCCESS != zone_mgr->get_region(ls_replica->get_zone(), ls_region)) { - // ignore ret - LOG_WARN("fail to get region", KPC(ls_replica)); - } else if (ls_replica->is_in_service() - && server_stat_info->is_alive() - && !server_stat_info->is_stopped() - && type_checker.is_candidate(ls_replica->get_replica_type()) - && ls_region == dst_region - && ls_replica->get_server() != dst_member.get_server() - && !ls_replica->get_restore_status().is_failed()) { - src_replica = ls_replica; - break; - } - } - // try any qualified replica - for (int64_t i = 0; - OB_SUCC(ret) && i < replica_cnt && nullptr == src_replica; - ++i) { - if (OB_FAIL(dr_ls_info.get_replica_stat( - i, - ls_replica, - server_stat_info, - unit_stat_info, - unit_in_group_stat_info))) { - LOG_WARN("fail to get replica stat", KR(ret)); - } else if (OB_ISNULL(ls_replica) - || OB_ISNULL(server_stat_info) - || OB_ISNULL(unit_stat_info) - || OB_ISNULL(unit_in_group_stat_info)) { - ret = OB_ERR_UNEXPECTED; - LOG_WARN("replica status ptr is null", KR(ret), - KP(ls_replica), - KP(server_stat_info), - KP(unit_stat_info), - KP(unit_in_group_stat_info)); - } else if (ls_replica->is_in_service() - && server_stat_info->is_alive() - && !server_stat_info->is_stopped() - && type_checker.is_candidate(ls_replica->get_replica_type()) - && ls_replica->get_server() != dst_member.get_server() - && !ls_replica->get_restore_status().is_failed()) { - src_replica = ls_replica; - break; - } - } - - if (OB_SUCC(ret)) { - if (nullptr != src_replica) { - data_source = ObReplicaMember(src_replica->get_server(), - src_replica->get_member_time_us(), - src_replica->get_replica_type(), - src_replica->get_memstore_percent()); - data_size = src_replica->get_required_size(); - } else { - ret = OB_ENTRY_NOT_EXIST; - LOG_WARN("no valid source candidates", KR(ret)); - } - } - } - return ret; -} - int ObDRWorker::check_ls_only_in_member_list_or_with_flag_( const DRLSInfo &dr_ls_info) { diff --git a/src/rootserver/ob_disaster_recovery_worker.h b/src/rootserver/ob_disaster_recovery_worker.h index 79fa011eed..397b27bee2 100755 --- a/src/rootserver/ob_disaster_recovery_worker.h +++ b/src/rootserver/ob_disaster_recovery_worker.h @@ -161,15 +161,6 @@ private: const MemberChangeType member_change_type, int64_t &new_paxos_replica_number, bool &found); - - static int choose_disaster_recovery_data_source( - ObZoneManager *zone_mgr, - DRLSInfo &dr_ls_info, - const ObReplicaMember &dst_member, - const ObReplicaMember &src_member, - ObReplicaMember &data_source, - int64_t &transmit_data_size); - enum LATaskType { RemovePaxos = 0, @@ -764,14 +755,11 @@ private: const DRUnitStatInfo &unit_stat_info, const DRUnitStatInfo &unit_in_group_stat_info, const ObReplicaMember &dst_member, - const ObReplicaMember &src_member, uint64_t &tenant_id, share::ObLSID &ls_id, share::ObTaskId &task_id, - ObReplicaMember &data_source, int64_t &data_size, ObDstReplica &dst_replica, - bool &skip_change_member_list, int64_t &old_paxos_replica_number); int generate_replicate_to_unit_and_push_into_task_manager( @@ -780,7 +768,6 @@ private: const share::ObLSID &ls_id, const share::ObTaskId &task_id, const int64_t &data_size, - const bool &skip_change_member_list, const ObDstReplica &dst_replica, const ObReplicaMember &src_member, const ObReplicaMember &data_source, @@ -857,15 +844,12 @@ private: const DRUnitStatInfo &unit_stat_info, const DRUnitStatInfo &unit_in_group_stat_info, const ObReplicaMember &dst_member, - const ObReplicaMember &src_member, const bool &is_unit_in_group_related, uint64_t &tenant_id, share::ObLSID &ls_id, share::ObTaskId &task_id, - ObReplicaMember &data_source, int64_t &data_size, ObDstReplica &dst_replica, - bool &skip_change_member_list, int64_t &old_paxos_replica_number); int generate_migrate_to_unit_task( @@ -874,7 +858,6 @@ private: const share::ObLSID &ls_id, const share::ObTaskId &task_id, const int64_t &data_size, - const bool &skip_change_member_list, const ObDstReplica &dst_replica, const ObReplicaMember &src_member, const ObReplicaMember &data_source, @@ -989,14 +972,12 @@ private: // @params[in] dr_ls_info, disaster recovery infos of this log stream // @params[in] ls_replica, which replica to do type transform // @params[in] dst_member, dest replica - // @params[in] src_member, source replica // @params[in] target_unit_id, dest replica belongs to whcih unit // @params[in] target_unit_group_id, dest replica belongs to which unit group // @params[out] task_id, the unique task key // @params[out] tenant_id, which tenant's task // @params[out] ls_id, which log stream's task // @params[out] leader_addr, leader replica address - // @params[out] data_source, data source replica // @params[out] data_size, data_size of this replica // @params[out] dst_replica, dest replica infos // @params[out] old_paxos_replica_number, previous number of F-replica count @@ -1005,14 +986,12 @@ private: DRLSInfo &dr_ls_info, const share::ObLSReplica &ls_replica, const ObReplicaMember &dst_member, - const ObReplicaMember &src_member, const uint64_t &target_unit_id, const uint64_t &target_unit_group_id, share::ObTaskId &task_id, uint64_t &tenant_id, share::ObLSID &ls_id, common::ObAddr &leader_addr, - ObReplicaMember &data_source, int64_t &data_size, ObDstReplica &dst_replica, int64_t &old_paxos_replica_number, @@ -1026,7 +1005,7 @@ private: // @params[in] data_size, data_size of this replica // @params[in] dst_replica, dest replica // @params[in] src_member, source member - // @params[in] data_source, data source replica + // @params[in] data_source, data_source of this task // @params[in] old_paxos_replica_number, previous number of F-replica count // @params[in] new_paxos_replica_number, new number of F-replica count // @params[out] acc_dr_task, accumulated disaster recovery task count diff --git a/src/rootserver/ob_root_service.cpp b/src/rootserver/ob_root_service.cpp index 4fdd9a85a2..f486f48709 100755 --- a/src/rootserver/ob_root_service.cpp +++ b/src/rootserver/ob_root_service.cpp @@ -9257,7 +9257,13 @@ int ObRootService::physical_restore_tenant(const obrpc::ObPhysicalRestoreTenantA HEAP_VAR(ObPhysicalRestoreJob, job_info) { // just to check sys tenant's schema with latest schema version ObDDLSQLTransaction trans(schema_service_, false /*end_signal*/); - if (OB_FAIL(trans.start(&sql_proxy_, OB_SYS_TENANT_ID, refreshed_schema_version))) { + ObTimeoutCtx ctx; + const int64_t DEFAULT_TIMEOUT = 60_s; + const int64_t INNER_SQL_TIMEOUT = GCONF.internal_sql_execute_timeout; + const int64_t timeout = MAX(DEFAULT_TIMEOUT, INNER_SQL_TIMEOUT); + if (OB_FAIL(ObShareUtil::set_default_timeout_ctx(ctx, timeout))) { + LOG_WARN("failed to set default timeout ctx", K(ret), K(timeout)); + } else if (OB_FAIL(trans.start(&sql_proxy_, OB_SYS_TENANT_ID, refreshed_schema_version))) { LOG_WARN("failed to start trans, ", K(ret)); } else if (OB_FAIL(RS_JOB_CREATE_EXT(job_id, RESTORE_TENANT, trans, "sql_text", ObHexEscapeSqlStr(arg.get_sql_stmt())))) { diff --git a/src/rootserver/restore/ob_restore_scheduler.cpp b/src/rootserver/restore/ob_restore_scheduler.cpp index fc9290aa6d..2e5a4cd42d 100644 --- a/src/rootserver/restore/ob_restore_scheduler.cpp +++ b/src/rootserver/restore/ob_restore_scheduler.cpp @@ -982,6 +982,18 @@ int ObRestoreScheduler::restore_init_ls(const share::ObPhysicalRestoreJob &job_i } else if (1 == log_path_array.count() && OB_FAIL(restore_source_mgr.add_location_source(job_info.get_restore_scn(), log_path_array.at(0).str()))) { LOG_WARN("failed to add log restore source", KR(ret), K(job_info), K(log_path_array)); + } else if (0 == log_path_array.count()) /*add restore source*/ { + DirArray piece_dir_array; + const common::ObSArray piece_array = job_info.get_multi_restore_path_list().get_backup_piece_path_list(); + ARRAY_FOREACH_X(piece_array, i, cnt, OB_SUCC(ret)) { + ObBackupPiecePath piece_path = piece_array.at(i); + if (OB_FAIL(piece_dir_array.push_back(piece_path))) { + LOG_WARN("fail to push back", K(ret), K(piece_path), K(piece_dir_array)); + } + } + if (FAILEDx(restore_source_mgr.add_rawpath_source(job_info.get_restore_scn(), piece_dir_array))) { + LOG_WARN("fail to add raw path source", K(ret), K(job_info), K(piece_dir_array)); + } } } diff --git a/src/rootserver/restore/ob_restore_util.cpp b/src/rootserver/restore/ob_restore_util.cpp index 7bbe8bd80f..8580bf2acb 100644 --- a/src/rootserver/restore/ob_restore_util.cpp +++ b/src/rootserver/restore/ob_restore_util.cpp @@ -23,7 +23,6 @@ #include "rootserver/ob_rs_event_history_table_operator.h" #include "storage/backup/ob_backup_restore_util.h" #include "share/backup/ob_archive_store.h" -#include "storage/backup/ob_backup_data_store.h" #include "share/restore/ob_restore_persist_helper.h"//ObRestorePersistHelper ObRestoreProgressPersistInfo #include "logservice/palf/palf_base_info.h"//PalfBaseInfo #include "storage/ls/ob_ls_meta_package.h"//ls_meta @@ -235,12 +234,53 @@ int ObRestoreUtil::fill_backup_info_( return ret; } +//TODO(mingqiao): consider sql timeout int ObRestoreUtil::fill_multi_backup_path( const obrpc::ObPhysicalRestoreTenantArg &arg, share::ObPhysicalRestoreJob &job) { int ret = OB_SUCCESS; - // TODO: use restore preview url + ObArenaAllocator allocator; + ObArray multi_path_array; + ObArray backup_set_list; + ObArray backup_piece_list; + ObArray log_path_list; + ObString backup_dest_list; + ObArray backup_piece_array; + int64_t last_backup_set_idx = -1; + bool restore_using_compl_log = false; + share::SCN restore_scn; + if (arg.multi_uri_.empty()) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid args", K(ret), K(arg)); + } else if (OB_FAIL(ObPhysicalRestoreUriParser::parse(arg.multi_uri_, allocator, multi_path_array))) { + LOG_WARN("fail to parse uri", K(ret), K(arg)); + } else if (OB_FAIL(get_encrypt_backup_dest_format_str(multi_path_array, allocator, backup_dest_list))) { + LOG_WARN("failed to convert uri", K(ret), K(arg)); + } else if (OB_FAIL(job.set_backup_dest(backup_dest_list))) { + LOG_WARN("failed to copy backup dest", K(ret), K(arg)); + } else if (OB_FAIL(get_restore_scn_from_multi_path_(arg ,multi_path_array, restore_using_compl_log, restore_scn, backup_piece_array))) { + LOG_WARN("fail to get restore scn from multi path", K(ret), K(arg)); + } else if (OB_FALSE_IT(job.set_restore_scn(restore_scn))) { + } else if (OB_FAIL(get_restore_source_from_multi_path(restore_using_compl_log, multi_path_array, arg.passwd_array_, job.get_restore_scn(), + backup_set_list, backup_piece_list, log_path_list))) { + LOG_WARN("fail to get restore source", K(ret), K(arg)); + } else if (restore_using_compl_log) { + if (OB_FAIL(do_fill_backup_path_(backup_set_list, backup_piece_list, log_path_list, job))) { + LOG_WARN("fail to do fill backup path", K(ret), K(backup_set_list), K(backup_piece_array), K(log_path_list)); + } + } else if (OB_FAIL(do_fill_backup_path_with_full_pieces_(backup_set_list, backup_piece_array, log_path_list, job))) { + LOG_WARN("fail to do fill backup path with full pieces", K(ret), K(backup_set_list), K(backup_piece_array), K(log_path_list)); + } + + if (OB_FAIL(ret)) { + } else if (OB_FALSE_IT(last_backup_set_idx = backup_set_list.count() - 1)) { + } else if (last_backup_set_idx < 0) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("invalid idx", K(ret), K(last_backup_set_idx), K(backup_set_list)); + } else if (OB_FAIL(do_fill_backup_info_(backup_set_list.at(last_backup_set_idx).backup_set_path_, job))) { + LOG_WARN("fail to do fill backup info", K(ret)); + } return ret; } @@ -308,9 +348,9 @@ int ObRestoreUtil::fill_compat_backup_path( LOG_WARN("failed to convert uri", K(ret), K(arg), K(tenant_path_array)); } else if (OB_FAIL(job.set_backup_dest(tenant_dest_list))) { LOG_WARN("failed to copy backup dest", K(ret), K(arg)); - } else if (OB_FAIL(check_restore_using_complement_log_(tenant_path_array, restore_using_compl_log))) { + } else if (OB_FAIL(check_restore_using_complement_log(tenant_path_array, restore_using_compl_log))) { LOG_WARN("failed to check only contain backup set", K(ret), K(tenant_path_array)); - } else if (OB_FAIL(fill_restore_scn_( + } else if (OB_FAIL(fill_restore_scn( arg.restore_scn_, arg.restore_timestamp_, arg.with_restore_scn_, tenant_path_array, arg.passwd_array_, restore_using_compl_log, restore_scn))) { LOG_WARN("fail to fill restore scn", K(ret), K(arg), K(tenant_path_array)); @@ -330,7 +370,7 @@ int ObRestoreUtil::fill_compat_backup_path( return ret; } -int ObRestoreUtil::fill_restore_scn_( +int ObRestoreUtil::fill_restore_scn( const share::SCN &src_scn, const ObString ×tamp, const bool with_restore_scn, @@ -422,6 +462,103 @@ int ObRestoreUtil::fill_restore_scn_( return ret; } +int ObRestoreUtil::fill_multi_path_restore_scn_( + const obrpc::ObPhysicalRestoreTenantArg &arg, + const bool &restore_using_compl_log, + const ObIArray &multi_path_array, + const ObIArray &backup_set_array, + const ObIArray &backup_piece_array, + share::SCN &restore_scn) +{ + int ret = OB_SUCCESS; + if (multi_path_array.empty()) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("multi_path array is empty", K(ret)); + } else if (arg.with_restore_scn_) { + // restore scn which is specified by user + restore_scn = arg.restore_scn_; + } else if (!arg.restore_timestamp_.empty()) { + common::ObTimeZoneInfoWrap time_zone_wrap; + if (OB_FAIL(get_multi_path_backup_sys_time_zone_(multi_path_array, time_zone_wrap))) { + LOG_WARN("failed to get backup sys time zone", K(ret)); + } else if (OB_FAIL(convert_restore_timestamp_to_scn_(arg.restore_timestamp_, time_zone_wrap, restore_scn))) { + LOG_WARN("failed to convert restore timestamp to scn", K(ret), "timestamp", arg.restore_timestamp_, K(time_zone_wrap)); + } else { + LOG_INFO("restore scn converted from timestamp is", K(restore_scn)); + } + } else { + if (restore_using_compl_log) { + if (OB_FAIL(fill_multi_path_restore_scn_with_compl_log_(backup_set_array, arg.passwd_array_, restore_scn))) { + LOG_WARN(" fail to fill multi path restore scn with compl log", K(ret), K(restore_scn)); + } + } else if (OB_FAIL(fill_multi_path_restore_scn_without_compl_log_(backup_piece_array, restore_scn))) { + LOG_WARN(" fail to fill multi path restore scn withOUT compl log", K(ret), K(restore_scn)); + } + } + return ret; +} + +int ObRestoreUtil::fill_multi_path_restore_scn_with_compl_log_( + const ObIArray &backup_set_array, + const common::ObString &passwd, + share::SCN &restore_scn) +{ + int ret = OB_SUCCESS; + SCN min_restore_scn = SCN::min_scn(); + if (backup_set_array.empty()) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("backup set array is empty", K(ret)); + } else { + ARRAY_FOREACH_X(backup_set_array, i, cnt, OB_SUCC(ret)) { + const share::ObBackupSetFileDesc &backup_set_file = backup_set_array.at(i); + if (OB_FAIL(backup_set_file.check_passwd(passwd.ptr()))) { + LOG_WARN("fail to check passwd", K(ret)); + } else if (share::ObBackupSetFileDesc::BackupSetStatus::SUCCESS != backup_set_file.status_ + || share::ObBackupFileStatus::STATUS::BACKUP_FILE_AVAILABLE != backup_set_file.file_status_) { + LOG_INFO("invalid status backup set can not be used to restore", K(backup_set_file)); + } else { + min_restore_scn = MAX(backup_set_file.min_restore_scn_, min_restore_scn); + } + } + if (OB_SUCC(ret)) { + if (SCN::min_scn() == min_restore_scn) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("invalid min restore scn, do not find available backup tenant path to restore", K(ret)); + } else { + restore_scn = min_restore_scn; + } + } + } + + return ret; +} + +int ObRestoreUtil::fill_multi_path_restore_scn_without_compl_log_( + const ObIArray &backup_piece_array, + share::SCN &restore_scn) +{ + int ret = OB_SUCCESS; + SCN max_checkpoint_scn = SCN::min_scn(); + + if (backup_piece_array.empty()) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("backup piece array is empty", K(ret)); + } else { + ARRAY_FOREACH_X(backup_piece_array, i, cnt, OB_SUCC(ret)) { + const ObSinglePieceDesc &piece_info = backup_piece_array.at(i); + max_checkpoint_scn = MAX(max_checkpoint_scn, piece_info.piece_.checkpoint_scn_); + } + if (SCN::min_scn() == max_checkpoint_scn) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("invalid max checkpoint scn, no archive tenant path", K(ret)); + } else { + restore_scn = max_checkpoint_scn; + } + } + return ret; +} + + int ObRestoreUtil::fill_encrypt_info_( const obrpc::ObPhysicalRestoreTenantArg &arg, share::ObPhysicalRestoreJob &job) @@ -488,7 +625,37 @@ int ObRestoreUtil::get_restore_source( return ret; } -int ObRestoreUtil::check_restore_using_complement_log_( +int ObRestoreUtil::get_restore_source_from_multi_path( + const bool restore_using_compl_log, + const ObIArray& multi_path_array, + const common::ObString &passwd_array, + const share::SCN &restore_scn, + ObIArray &backup_set_list, + ObIArray &backup_piece_list, + ObIArray &log_path_list) +{ + int ret = OB_SUCCESS; + SCN restore_start_scn = SCN::min_scn(); + ObTimeZoneInfoWrap time_zone_wrap; + if (OB_FAIL(get_restore_backup_set_array_from_multi_path_(multi_path_array, passwd_array, restore_scn, + restore_start_scn, backup_set_list, time_zone_wrap))) { + LOG_WARN("fail to get restore backup set array", K(ret), K(restore_scn)); + } else if (!restore_using_compl_log && OB_FAIL(get_restore_log_piece_array_from_multi_path_( + multi_path_array, restore_start_scn, restore_scn, time_zone_wrap, backup_piece_list))) { + LOG_WARN("fail to get restore log piece array", K(ret), K(restore_start_scn), K(restore_scn)); + } else if (restore_using_compl_log && OB_FAIL(get_restore_log_array_for_complement_log_( + backup_set_list, restore_start_scn, restore_scn, backup_piece_list, log_path_list))) { + LOG_WARN("fail to get restore log piece array", K(ret), K(backup_set_list), K(restore_start_scn), K(restore_scn)); + } else if (backup_set_list.empty() || backup_piece_list.empty()) { //log_path_list can be empty when add restore source + ret = OB_ENTRY_NOT_EXIST; + LOG_WARN("no backup set path or log piece can be used to restore", K(ret), + K(backup_set_list), K(backup_piece_list), K(log_path_list), K(restore_start_scn), K(restore_scn)); + LOG_USER_ERROR(OB_ENTRY_NOT_EXIST, "no backup set path or log piece can be used to restore"); + } + return ret; +} + +int ObRestoreUtil::check_restore_using_complement_log( const ObIArray &tenant_path_array, bool &restore_using_compl_log) { @@ -519,6 +686,99 @@ int ObRestoreUtil::check_restore_using_complement_log_( return ret; } +int ObRestoreUtil::get_restore_scn_from_multi_path_( + const obrpc::ObPhysicalRestoreTenantArg &arg, + ObIArray &multi_path_array, + bool &use_complement_log, + share::SCN &restore_scn, + ObArray &backup_piece_array) +{ + int ret = OB_SUCCESS; + const int64_t OB_BACKUP_MAX_BACKUP_SET_ID = 20; + ObArray> path_set_pairs; + ObArray backup_set_array; + use_complement_log = true; + if (multi_path_array.empty()) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("multi path array is empty", K(ret)); + } else { + ObBackupDest backup_dest; + ObArchiveStore store; + ObSinglePieceDesc backup_piece; + ARRAY_FOREACH_X(multi_path_array, i, cnt, OB_SUCC(ret)) { + backup_dest.reset(); + store.reset(); + backup_piece.piece_.reset(); + ObExternBackupSetInfoDesc backup_set_info; + std::pair path_set_pair; + const ObString &multi_path = multi_path_array.at(i); + bool is_exist = false; + bool is_empty_piece = true; + if (OB_FAIL(backup_dest.set(multi_path))) { + LOG_WARN("fail to set backup dest", K(ret), K(multi_path)); + } else if (OB_FAIL(store.init(backup_dest))) { + LOG_WARN("fail to init ObArchiveStore", K(ret), K(backup_dest)); + } else if (OB_FAIL(store.is_tenant_archive_piece_infos_file_exist(is_exist))) { //read archive piece info + LOG_WARN("fail to check if tenant arhive piece info is exist", K(ret), K(backup_dest)); + } else if (OB_FALSE_IT(use_complement_log = (use_complement_log && !is_exist))) { + } else if (is_exist) { + if (OB_FAIL(store.get_single_piece_info(is_empty_piece, backup_piece))) { + LOG_WARN("fail to read single piece info", K(ret), K(backup_dest), K(store)); + } else if (!is_empty_piece) { + if (OB_FAIL(backup_piece.piece_.path_.assign(multi_path))) { + LOG_WARN("fail to assign backup piece path", K(ret), K(backup_piece)); + } else if (OB_FAIL(backup_piece_array.push_back(backup_piece))) { + LOG_WARN("fail to push back", K(ret), K(backup_piece)); + } + } + } else if (OB_FAIL(get_backup_set_info_from_multi_path_(multi_path, backup_set_info))) { //read backup set info + if (OB_BACKUP_FILE_NOT_EXIST == ret) { + ret = OB_SUCCESS; + LOG_INFO("ignore non backup set dir"); + } else { + LOG_WARN("fail to get backup set info from multi path", K(ret)); + } + } + + if (OB_FAIL(ret)) { + } else if (OB_FALSE_IT(path_set_pair.first = multi_path)) { + } else if (OB_FALSE_IT(path_set_pair.second = backup_set_info.backup_set_file_)) { + } else if (OB_FAIL(path_set_pairs.push_back(path_set_pair))) { + LOG_WARN("fail to push back", K(ret)); + } else if (!backup_set_info.is_valid()) { + } else if (OB_FAIL(backup_set_array.push_back(backup_set_info.backup_set_file_))) { + LOG_WARN("fail to push back", K(ret), K(backup_set_info)); + } + } + + if (OB_FAIL(ret)) { + } else if (OB_FAIL(sort_multi_paths_by_backup_set_id_(path_set_pairs, multi_path_array))) { + LOG_WARN("fail to sort multi paths by backup set id", K(ret)); + } else if (OB_FAIL(fill_multi_path_restore_scn_(arg, use_complement_log, multi_path_array, + backup_set_array, backup_piece_array, restore_scn))) { + LOG_WARN("fail to fill multi path restore scn", K(ret), K(arg), K(use_complement_log)); + } else if (!use_complement_log && OB_FAIL(sort_backup_piece_array_(backup_piece_array))) { + LOG_WARN("fail to sort backup piece array", K(ret)); + } else { + LOG_INFO("check if using complement log and get restore scn", K(use_complement_log), K(restore_scn)); + } + } + return ret; +} + +int ObRestoreUtil::sort_backup_piece_array_(ObArray &backup_piece_array) +{ + int ret = OB_SUCCESS; + if (backup_piece_array.empty()) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("empty backup_piece_array", K(ret)); + } else { + ObSinglePieceDescComparator cmp; + std::sort(backup_piece_array.begin(), backup_piece_array.end(), cmp); + } + return ret; +} + int ObRestoreUtil::get_restore_backup_set_array_( const ObIArray &tenant_path_array, const common::ObString &passwd_array, @@ -556,6 +816,264 @@ int ObRestoreUtil::get_restore_backup_set_array_( return ret; } +int ObRestoreUtil::get_restore_backup_set_array_from_multi_path_( + const ObIArray &multi_path_array, + const common::ObString &passwd_array, + const SCN &restore_scn, + SCN &restore_start_scn, + ObIArray &backup_set_list, + ObTimeZoneInfoWrap &time_zone_wrap) +{ + int ret = OB_SUCCESS; + int tmp_ret = OB_SUCCESS; + const int64_t OB_BACKUP_MAX_BACKUP_SET_ID = 20; + ObBackupSetFilter::BackupSetMap backup_set_map; + common::hash::ObHashMap backup_set_path_map; + share::SCN min_restore_scn = SCN::max_scn(); + bool has_inc_backup_set = false; + ObBackupSetFileDesc backup_set_file; + time_zone_wrap.reset(); + if (multi_path_array.empty()) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("multi path array is empty", K(ret)); + } else if (OB_FAIL(backup_set_map.create(OB_BACKUP_MAX_BACKUP_SET_ID, "BackupSetMap"))) { + LOG_WARN("fail to create backup set map", K(ret)); + } else if (OB_FAIL(backup_set_path_map.create(OB_BACKUP_MAX_BACKUP_SET_ID, "BackupPathMap"))) { + LOG_WARN("fail to create backup set path map", K(ret)); + } else { + uint64_t tenant_id = UINT64_MAX; + storage::ObBackupDataStore store; + share::ObBackupDest backup_dest; + storage::ObExternBackupSetInfoDesc backup_set_info; + ARRAY_FOREACH_X(multi_path_array, i, cnt, OB_SUCC(ret)) { + const ObString &multi_path = multi_path_array.at(i); + store.reset(); + backup_dest.reset(); + backup_set_info.backup_set_file_.reset(); + if (OB_FAIL(backup_dest.set(multi_path.ptr()))) { + LOG_WARN("fail to set backup dest", K(ret)); + } else if (OB_FAIL(store.init(backup_dest))) { + LOG_WARN("failed to init backup store", K(ret)); + } else if (OB_FAIL(store.read_backup_set_info(backup_set_info))) { //check if backup set + if (OB_BACKUP_FILE_NOT_EXIST == ret) { + ret = OB_SUCCESS; + LOG_INFO("skip log dir", K(ret), K(backup_dest)); + continue; + } else { + LOG_WARN("fail to read backup set info", K(ret), K(store)); + } + } else if (OB_FAIL(backup_set_file.assign(backup_set_info.backup_set_file_))) { + LOG_WARN("fail to assign backup set file", K(ret), "backup_set_file", backup_set_info.backup_set_file_); + } else if (share::ObBackupSetFileDesc::BackupSetStatus::SUCCESS != backup_set_file.status_ //check if available + || share::ObBackupFileStatus::STATUS::BACKUP_FILE_AVAILABLE != backup_set_file.file_status_) { + LOG_INFO("invalid status backup set can not be used to restore", K(backup_set_file)); + } else { // available backup sets + if (backup_set_file.backup_type_.is_inc_backup()) { + has_inc_backup_set = true; + } + // the min_retore_scn of the earliest full backup set is the minimum restorable scn + min_restore_scn = backup_set_file.backup_type_.is_full_backup() + ? MIN(min_restore_scn, backup_set_file.min_restore_scn_) : min_restore_scn; + // restoring from different tenants is not allowed + tenant_id = UINT64_MAX == tenant_id ? backup_set_file.tenant_id_ : tenant_id; + if (tenant_id != backup_set_file.tenant_id_) { + ret = OB_OP_NOT_ALLOW; + LOG_USER_ERROR(OB_OP_NOT_ALLOW, "restoring backupsets of different tenants"); + break; + } else if (OB_FAIL(backup_set_path_map.set_refactored(backup_set_file.backup_set_id_, multi_path))) { + LOG_WARN("fail to set refactored", K(ret), K(backup_set_file)); + } else if (OB_FAIL(backup_set_file.check_passwd(passwd_array.ptr()))) { + LOG_WARN("fail to check passwd", K(ret)); + } else if (!time_zone_wrap.is_valid() + && OB_FAIL(store.get_single_backup_set_sys_time_zone_wrap(time_zone_wrap))) { + LOG_WARN("fail to get backup set sys time zone wrap", K(ret), K(store)); + } else if (backup_set_file.min_restore_scn_ > restore_scn) { + // backup set file's min restore log ts > restore end log ts, can not be used to restore + LOG_INFO("min restore scn of backup set file is greater than restore scn. can't use to restore.", + K(ret), K(backup_set_file), K(restore_scn)); + } else if (OB_FAIL(fill_backup_set_map_(backup_set_file, + backup_set_map, + restore_start_scn))) { + LOG_WARN("fail to fill backup set map", K(ret), K(backup_set_info)); + } + } + } + + if(OB_SUCC(ret)) { + if (backup_set_map.empty()) { // no "usable" backup sets, three cases: + ret = OB_RESTORE_SOURCE_NOT_ENOUGH; + if (SCN::max_scn() > min_restore_scn) { // 1. do have full backup sets, but not enough to restore_scn + const bool is_too_small = true; + int64_t time_str_pos = 0; + int64_t msg_pos = 0; + char err_msg[OB_MAX_ERROR_MSG_LEN] = { 0 }; + char time_str[OB_MAX_TIME_STR_LENGTH] = { 0 }; + if (OB_TMP_FAIL(ObTimeConverter::scn_to_str(min_restore_scn.get_val_for_inner_table_field(), + time_zone_wrap.get_time_zone_info(), + time_str, OB_MAX_TIME_STR_LENGTH, time_str_pos))) { + LOG_WARN("fail to convert scn to str", K(ret), K(tmp_ret), K(min_restore_scn), K(time_zone_wrap)); + } else if (OB_TMP_FAIL(databuff_printf(err_msg, OB_MAX_ERROR_MSG_LEN, msg_pos, + "no full backup set can be used to restore to given time, minimum restorable time is %s", + time_str))) { + LOG_WARN("fail to databuff printf", K(ret), K(tmp_ret), K(msg_pos), K(time_str)); + } + LOG_WARN("min restore scn of all backup sets are greater than restore scn", K(min_restore_scn), K(restore_scn)); + LOG_USER_ERROR(OB_RESTORE_SOURCE_NOT_ENOUGH, err_msg); + } else { // 2. do NOT have full backup sets, may have inc backup sets + if (has_inc_backup_set) { + LOG_WARN("no full backup set exists", K(ret)); + LOG_USER_ERROR(OB_RESTORE_SOURCE_NOT_ENOUGH, "no full backup set exists"); + } else { // 3. do NOT have inc backup sets, which means all files are unavailable + LOG_WARN("no backup set is available", K(ret), K(restore_scn)); + LOG_USER_ERROR(OB_RESTORE_SOURCE_NOT_ENOUGH, "no backup set is available"); + } + } + } else if (OB_FAIL(get_restore_backup_set_array_from_backup_set_map_(backup_set_path_map, + backup_set_map, + backup_set_list))) { + LOG_WARN("fail to get restore backup set array from backup set map", K(ret)); + } else { + LOG_INFO("obtain restore start scn from backup sets", K(restore_start_scn)); + } + } + } + return ret; +} + +int ObRestoreUtil::sort_multi_paths_by_backup_set_id_( + const ObArray> &path_set_pairs, + ObIArray &multi_path_array) +{ + int ret = OB_SUCCESS; + if (path_set_pairs.empty() || path_set_pairs.count() != multi_path_array.count()) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("the number of path_set_pairs is invalid", K(ret), + "pair count", path_set_pairs.count(), + "array count", multi_path_array.count()); + } else { + multi_path_array.reset(); + + std::sort( + path_set_pairs.get_data(), path_set_pairs.get_data() + path_set_pairs.count(), + [](const std::pair &a, + const std::pair &b) { return a.second.backup_set_id_ < b.second.backup_set_id_; }); + + ARRAY_FOREACH_X(path_set_pairs, i, cnt, OB_SUCC(ret)) { + const ObString &path = path_set_pairs.at(i).first; + if (OB_FAIL(multi_path_array.push_back(path))) { + LOG_WARN("fail to push backup", K(ret)); + } + } + } + return ret; +} + +int ObRestoreUtil::get_backup_set_info_from_multi_path_(const ObString &multi_path, ObExternBackupSetInfoDesc &backup_set_info) +{ + int ret = OB_SUCCESS; + ObBackupDataStore store; + ObBackupDest dest; + backup_set_info.backup_set_file_.reset(); + if (multi_path.empty()) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("multi_path is empty!", K(ret)); + } else if (OB_FAIL(dest.set(multi_path))) { + LOG_WARN("fail to set backup dest", K(ret), K(multi_path)); + } else if (OB_FAIL(store.init(dest))) { + LOG_WARN("fail to init ObBackupDataStore", K(ret), K(dest)); + } else if (OB_FAIL(store.read_backup_set_info(backup_set_info))) { + LOG_WARN("fail to read backup set info", K(ret), K(store)); + } + return ret; +} + +int ObRestoreUtil::get_restore_backup_set_array_from_backup_set_map_( + const common::hash::ObHashMap &backup_set_path_map, + ObBackupSetFilter::BackupSetMap &backup_set_map, + ObIArray &backup_set_list) +{ + int ret = OB_SUCCESS; + ObString backup_set_path; + char encrypt_backup_set_dest[OB_MAX_BACKUP_DEST_LENGTH] = { 0 }; + ObBackupDest backup_dest; + ObRestoreBackupSetBriefInfo tmp_backup_set_brief_info; + ObBackupSetFilter::BackupSetMap::iterator iter = backup_set_map.begin(); + for ( ; OB_SUCC(ret) && iter != backup_set_map.end(); ++iter) { + // construct path which is include root_path, host and storage info + const share::ObBackupSetDesc &desc = iter->second; + tmp_backup_set_brief_info.reset(); + tmp_backup_set_brief_info.backup_set_desc_ = desc; + if (OB_FAIL(backup_set_path_map.get_refactored(desc.backup_set_id_, backup_set_path))) { + LOG_WARN("fail to get refactored", K(ret), K(desc)); + } else if (OB_FAIL(backup_dest.set(backup_set_path))) { + LOG_WARN("fail to set backup set", K(ret)); + } else if (OB_FAIL(backup_dest.get_backup_dest_str(encrypt_backup_set_dest, OB_MAX_BACKUP_DEST_LENGTH))) { + LOG_WARN("fail to get backup dest str", K(ret), K(backup_dest)); + } else if (OB_FAIL(tmp_backup_set_brief_info.backup_set_path_.assign(encrypt_backup_set_dest))) { + LOG_WARN("fail to assign", K(ret), K(backup_set_path)); + } else if (OB_FAIL(backup_set_list.push_back(tmp_backup_set_brief_info))) { + LOG_WARN("fail to push back", K(ret), K(tmp_backup_set_brief_info)); + } + } + return ret; +} + +int ObRestoreUtil::fill_backup_set_map_( + const share::ObBackupSetFileDesc &backup_set_file, + ObBackupSetFilter::BackupSetMap &backup_set_map, + share::SCN &restore_start_scn) +{ + int ret = OB_SUCCESS; + share::ObBackupSetDesc backup_set_desc; + if (!backup_set_file.is_valid()) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid backup set file desc", K(ret), K(backup_set_file)); + } else if (OB_FALSE_IT(backup_set_desc.backup_set_id_ = backup_set_file.backup_set_id_)) { + } else if (OB_FALSE_IT(backup_set_desc.backup_type_ = backup_set_file.backup_type_)) { + } else if (backup_set_file.backup_type_.is_full_backup()) { + backup_set_desc.min_restore_scn_ = backup_set_file.min_restore_scn_; + backup_set_desc.total_bytes_ = backup_set_file.stats_.output_bytes_; + if (OB_FAIL(backup_set_map.clear())) { + LOG_WARN("fail to clear backup set map", K(ret)); + } else if (OB_FAIL(backup_set_map.set_refactored(backup_set_file.backup_set_id_, backup_set_desc))) { + LOG_WARN("fail to set refactored backup set map", K(ret), "backup set id", backup_set_file.backup_set_id_, K(backup_set_desc)); + } else { + restore_start_scn = backup_set_file.start_replay_scn_; + LOG_INFO("find one full backup set", K(backup_set_file)); + } + } else if (backup_set_file.backup_type_.is_inc_backup()) { + share::ObBackupSetDesc value; + value.backup_set_id_ = backup_set_file.prev_full_backup_set_id_; + backup_set_desc.min_restore_scn_ = backup_set_file.min_restore_scn_; + backup_set_desc.total_bytes_ = backup_set_file.stats_.output_bytes_; + if (OB_FAIL(backup_set_map.get_refactored(backup_set_file.prev_full_backup_set_id_, value))) { + if (OB_ENTRY_NOT_EXIST == ret) { + ret = OB_SUCCESS; + LOG_INFO("prev full backup set not exist", K(backup_set_file)); + } else { + LOG_WARN("fail to get refactored", K(ret), K(backup_set_file)); + } + } else if (OB_FAIL(backup_set_map.get_refactored(backup_set_file.prev_inc_backup_set_id_, value))) { + if (OB_ENTRY_NOT_EXIST == ret) { + ret = OB_SUCCESS; + LOG_INFO("prev inc backup set not exist", K(backup_set_file)); + } else { + LOG_WARN("fail to get refactored", K(ret), K(backup_set_file)); + } + } else if (OB_FAIL(backup_set_map.set_refactored(backup_set_file.backup_set_id_, backup_set_desc))) { + LOG_WARN("fail to set refactored backup set map", K(ret), "backup set id", backup_set_file.backup_set_id_, + K(backup_set_desc)); + } else { + restore_start_scn = backup_set_file.start_replay_scn_; + LOG_INFO("find one inc backup set", K(backup_set_file)); + } + } else { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("Invalid backup set type", K(ret), K(backup_set_file)); + } + return ret; +} + int ObRestoreUtil::get_restore_backup_piece_list_( const ObBackupDest &dest, const ObArray &piece_array, @@ -673,6 +1191,323 @@ int ObRestoreUtil::get_restore_log_piece_array_( return ret; } +int ObRestoreUtil::get_restore_log_piece_array_from_multi_path_( + const ObIArray &multi_path_array, + const SCN &restore_start_scn, + const SCN &restore_end_scn, + const ObTimeZoneInfoWrap &time_zone_wrap, + ObIArray &backup_piece_list) +{ + int ret = OB_SUCCESS; + ObArray piece_keys; + common::hash::ObHashMap multi_path_map; + bool is_empty_piece = true; + ObExternPieceWholeInfo piece_whole_info; + ObBackupDest dest; + backup_piece_list.reset(); + if (multi_path_array.empty()) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invaldi argument", K(ret)); + } else if (restore_start_scn >= restore_end_scn) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", K(ret), K(restore_start_scn), K(restore_end_scn)); + } else if (OB_FAIL(multi_path_map.create(OB_MAX_BACKUP_PIECE_NUM, "MultiPathMap"))) { + LOG_WARN("fail to create multi path map", K(ret)); + } else if (OB_FAIL(get_all_piece_keys_(multi_path_array, piece_keys, multi_path_map))) { + LOG_WARN("fail to get all piece keys", K(ret)); + } else if (OB_FAIL(get_latest_non_empty_piece_(piece_keys, multi_path_map, piece_whole_info, is_empty_piece))) { + LOG_WARN("fail to get latest non empty piece", K(ret), K(piece_keys)); + } else if (is_empty_piece) { + ret = OB_ENTRY_NOT_EXIST; + LOG_WARN("no piece is found", K(ret), K(restore_start_scn), K(restore_end_scn)); + LOG_USER_ERROR(OB_RESTORE_SOURCE_NOT_ENOUGH, "No enough log for restore"); + } else if (OB_FAIL(piece_whole_info.his_frozen_pieces_.push_back(piece_whole_info.current_piece_))) { + LOG_WARN("failed to push backup piece", K(ret)); + } else if (OB_FAIL(get_piece_paths_in_range_from_multi_path_(piece_whole_info.his_frozen_pieces_, + multi_path_map, restore_start_scn, restore_end_scn, time_zone_wrap, backup_piece_list))) { + LOG_WARN("fail to get pieces paths in range from multi path", K(ret), + K(piece_whole_info), K(restore_start_scn), K(restore_end_scn)); + } + return ret; +} + +int ObRestoreUtil::get_all_piece_keys_(const ObIArray &multi_path_array, + ObArray &piece_keys, + common::hash::ObHashMap &multi_path_map) +{ + int ret = OB_SUCCESS; + piece_keys.reset(); + uint64_t tenant_id = UINT64_MAX; + ObPieceKey key; + ObArchiveStore store; + ObBackupDest dest; + ObSinglePieceDesc piece_desc; + bool is_empty_piece = true; + ARRAY_FOREACH_X(multi_path_array, i, cnt, OB_SUCC(ret)) { + ObString multi_path = multi_path_array.at(i); + key.reset(); + store.reset(); + dest.reset(); + piece_desc.piece_.reset(); + if (OB_FAIL(dest.set(multi_path))) { + LOG_WARN("fail to set backup dest", K(ret)); + } else if (OB_FAIL(store.init(dest))) { + LOG_WARN("fail to init ObArchiveStore", K(ret), K(dest)); + } else if (OB_FAIL(store.get_single_piece_info(is_empty_piece, piece_desc))) { + LOG_WARN("fail to get single piece info", K(ret), K(store)); + } else if (is_empty_piece + || ObBackupFileStatus::STATUS::BACKUP_FILE_AVAILABLE != piece_desc.piece_.file_status_) { + LOG_INFO("skip non log dir or unavailable piece", K(dest)); + } else { + key.dest_id_ = piece_desc.piece_.key_.dest_id_; + key.piece_id_ = piece_desc.piece_.key_.piece_id_; + key.round_id_ = piece_desc.piece_.key_.round_id_; + tenant_id = UINT64_MAX == tenant_id ? piece_desc.piece_.key_.tenant_id_ : tenant_id; + if (tenant_id != piece_desc.piece_.key_.tenant_id_) { + ret = OB_OP_NOT_ALLOW; + LOG_USER_ERROR(OB_OP_NOT_ALLOW, "restoring backup pieces of different tenants"); + } else if (OB_FAIL(piece_keys.push_back(key))) { + LOG_WARN("fail to push back", K(ret)); + } else if (OB_FAIL(multi_path_map.set_refactored(key, multi_path))) { + LOG_WARN("fail to set refactored", K(ret), K(key)); + } else { + LOG_INFO("found a piece", K(key)); + } + } + } + + if (OB_SUCC(ret)) { + if (!piece_keys.empty()) { + std::sort(piece_keys.begin(), piece_keys.end()); + } else { + ret = OB_RESTORE_SOURCE_NOT_ENOUGH; + LOG_USER_ERROR(OB_RESTORE_SOURCE_NOT_ENOUGH, "no usable log pieces"); + } + } + return ret; +} + +int ObRestoreUtil::get_latest_non_empty_piece_( + const ObArray &piece_keys, + const common::hash::ObHashMap &multi_path_map, + ObExternPieceWholeInfo &piece_whole_info, + bool &is_empty_piece) +{ + int ret = OB_SUCCESS; + is_empty_piece = true; + for (int64_t i = piece_keys.count() - 1; OB_SUCC(ret) && i >= 0; i--) { + const ObPieceKey &key = piece_keys.at(i); + ObString multi_path; + ObBackupDest dest; + ObArchiveStore store; + if (OB_FAIL(multi_path_map.get_refactored(key, multi_path))) { + LOG_WARN("fail to get refactored", K(ret), K(key)); + } else if (OB_FAIL(dest.set(multi_path))) { + LOG_WARN("fail to set backup dest", K(ret)); + } else if (OB_FAIL(store.init(dest))) { + LOG_WARN("fail to init ObArchiveStore", K(ret), K(dest)); + } else if (OB_FAIL(store.get_whole_piece_info(is_empty_piece, piece_whole_info))) { + LOG_WARN("failed to get whole piece info", K(ret), K(key)); + } else if (!is_empty_piece) { + break; + } + } + if (OB_SUCC(ret) && !is_empty_piece) { + LOG_INFO("get latest non empty piece", K(piece_whole_info)); + } + return ret; +} + +int ObRestoreUtil::get_piece_paths_in_range_from_multi_path_( + const ObArray &candidate_pieces, + const common::hash::ObHashMap &multi_path_map, + const SCN &restore_start_scn, + const SCN &restore_end_scn, + const ObTimeZoneInfoWrap &time_zone_wrap, + ObIArray &pieces) +{ + int ret = OB_SUCCESS; + int tmp_ret = OB_SUCCESS; + int64_t dest_id = 0; + int64_t last_piece_idx = -1; + int64_t i = 0; + int64_t pieces_cnt = candidate_pieces.count(); + if (candidate_pieces.empty()) { + LOG_WARN("candidate pieces is empty!", K(ret)); + } else { + dest_id = candidate_pieces.at(0).key_.dest_id_; + } + + while (OB_SUCC(ret) && i < pieces_cnt) { + const ObTenantArchivePieceAttr &cur = candidate_pieces.at(i); + ObBackupPath piece_path; + if (cur.key_.dest_id_ != dest_id) { + // Filter pieces archived at other path. + ++i; + continue; + } + + if (cur.file_status_ != ObBackupFileStatus::STATUS::BACKUP_FILE_AVAILABLE) { + // Filter unavailable piece + ++i; + continue; + } + + if (cur.end_scn_ <= restore_start_scn) { + ++i; + continue; + } + ObRestoreLogPieceBriefInfo piece_brief_info; + ObString path; + ObBackupDest dest; + ObPieceKey key; + key.dest_id_ = cur.key_.dest_id_; + key.round_id_ = cur.key_.round_id_; + key.piece_id_ = cur.key_.piece_id_; + if (cur.start_scn_ >= restore_end_scn) { + // this piece may be required for restore, consider the following case. + // Piece#1 : <2022-06-01 06:00:00, 2022-06-02 05:00:00, 2022-06-02 06:00:00> + // Piece#2 : <2022-06-02 06:00:00, 2022-06-03 05:00:00, 2022-06-03 06:00:00> + // Piece#3 : <2022-06-03 06:00:00, 2022-06-03 10:00:00, 2022-06-04 06:00:00> + // If 'end_scn' is indicated to ' 2022-06-03 05:30:00', Piece#3 is required. + if (!pieces.empty()) { + const ObTenantArchivePieceAttr &prev = candidate_pieces.at(last_piece_idx); + // If pieces are not enough, and current piece is continous with previous one. + if (prev.end_scn_ == cur.start_scn_ && prev.checkpoint_scn_ < restore_end_scn) { + if (OB_FAIL(multi_path_map.get_refactored(key, path))) { + if (OB_HASH_NOT_EXIST == ret) { + LOG_WARN("miss log archive piece", K(ret), K(key), K(candidate_pieces)); + LOG_USER_ERROR(OB_RESTORE_SOURCE_NOT_ENOUGH, "No enough log for restore"); + } else { + LOG_WARN("fail to get refactored", K(ret), K(key)); + } + } else if (OB_FAIL(dest.set(path))) { + LOG_WARN("fail to set backup dest", K(ret), K(path)); + } else if (OB_FAIL(dest.get_backup_dest_str(piece_brief_info.piece_path_.ptr(), + piece_brief_info.piece_path_.capacity()))) { + LOG_WARN("fail to get backup dest str from multi path", K(ret), K(dest)); + } else if (OB_FAIL(piece_brief_info.piece_path_.assign(dest.get_root_path()))) { + LOG_WARN("failed to assign piece path", K(ret)); + } else if (OB_FALSE_IT(piece_brief_info.piece_id_ = cur.key_.piece_id_)) { + } else if (OB_FALSE_IT(piece_brief_info.start_scn_ = cur.start_scn_)) { + } else if (OB_FALSE_IT(piece_brief_info.checkpoint_scn_ = cur.checkpoint_scn_)) { + } else if (OB_FAIL(pieces.push_back(piece_brief_info))) { + LOG_WARN("fail to push back path", K(ret), K(piece_brief_info)); + } else { + last_piece_idx = i; + LOG_INFO("add piece", K(last_piece_idx), K(cur)); + } + } + } + break; + } + if (pieces.empty()) { + // this piece may be used to restore. + if (cur.start_scn_ <= restore_start_scn) { + if (OB_FAIL(multi_path_map.get_refactored(key, path))) { + if (OB_HASH_NOT_EXIST == ret) { + LOG_WARN("miss log archive piece", K(ret), K(key), K(candidate_pieces)); + LOG_USER_ERROR(OB_RESTORE_SOURCE_NOT_ENOUGH, "No enough log for restore"); + } else { + LOG_WARN("fail to get refactored", K(ret), K(key)); + } + } else if (OB_FAIL(dest.set(path))) { + LOG_WARN("fail to set backup dest", K(ret), K(path)); + } else if (OB_FAIL(piece_brief_info.piece_path_.assign(dest.get_root_path()))) { + LOG_WARN("failed to assign piece path", K(ret)); + } else if (OB_FALSE_IT(piece_brief_info.piece_id_ = cur.key_.piece_id_)) { + } else if (OB_FALSE_IT(piece_brief_info.start_scn_ = cur.start_scn_)) { + } else if (OB_FALSE_IT(piece_brief_info.checkpoint_scn_ = cur.checkpoint_scn_)) { + } else if (OB_FAIL(pieces.push_back(piece_brief_info))) { + LOG_WARN("fail to push back path", K(ret), K(piece_brief_info)); + } else { + last_piece_idx = i; + ++i; + LOG_INFO("add piece", K(last_piece_idx), K(cur)); + } + } else { + ret = OB_RESTORE_SOURCE_NOT_ENOUGH; + LOG_WARN("no enough log for restore", + K(ret), K(cur), K(restore_start_scn), K(restore_end_scn)); + LOG_USER_ERROR(OB_RESTORE_SOURCE_NOT_ENOUGH, "no enough log for retore"); + break; + } + } else { + const ObTenantArchivePieceAttr &prev = candidate_pieces.at(last_piece_idx); + if (prev.end_scn_ != cur.start_scn_) { + // The of pieces are as following: + // Piece#1 : <2022-06-01 00:00:00, 2022-06-01 06:00:00, 2022-06-02 00:00:00> + // Piece#2 : <2022-06-01 08:00:00, 2022-06-02 07:59:00, 2022-06-02 08:00:00> + // Piece#3 : <2022-06-02 08:00:00, 2022-06-03 06:00:00, 2022-06-03 08:00:00> + + // And the input [start_scn, end_scn] pair is [2022-06-01 12:00:00, 2022-06-03 04:00:00]. + + // Previously, Piece#1 is required, and pushed into 'pieces'. However, when i = 1, + // we find that Piece#2 is not continous with Piece#1, and Piece#1 is not required actually. + // Then Piece#1 is abandoned, and recompute the required pieces. + pieces.reset(); + last_piece_idx = -1; + // Do not do ++i, recompute if current piece can be used to restore. + LOG_INFO("pieces are not continous", K(prev), K(cur), K(restore_start_scn), K(restore_end_scn)); + } else if (OB_FAIL(multi_path_map.get_refactored(key, path))) { + if (OB_HASH_NOT_EXIST == ret) { + LOG_WARN("miss log archive piece", K(ret), K(key), K(candidate_pieces)); + LOG_USER_ERROR(OB_ENTRY_NOT_EXIST, "No enough log for restore"); + } else { + LOG_WARN("fail to get refactored", K(ret), K(key)); + } + } else if (OB_FAIL(dest.set(path))) { + LOG_WARN("fail to set backup dest", K(ret), K(path)); + } else if (OB_FAIL(piece_brief_info.piece_path_.assign(dest.get_root_path()))) { + LOG_WARN("failed to assign piece path", K(ret)); + } else if (OB_FALSE_IT(piece_brief_info.piece_id_ = cur.key_.piece_id_)) { + } else if (OB_FALSE_IT(piece_brief_info.start_scn_ = cur.start_scn_)) { + } else if (OB_FALSE_IT(piece_brief_info.checkpoint_scn_ = cur.checkpoint_scn_)) { + } else if (OB_FAIL(pieces.push_back(piece_brief_info))) { + LOG_WARN("fail to push back path", K(ret), K(piece_path)); + } else { + last_piece_idx = i; + ++i; + LOG_INFO("add piece", K(last_piece_idx), K(cur)); + } + } + } + if (OB_FAIL(ret)) { + } else if (-1 == last_piece_idx) { + ret = OB_RESTORE_SOURCE_NOT_ENOUGH; + LOG_WARN("archive log pieces are behind the latest backup set, or pieces are not contiuous", + K(ret), K(last_piece_idx), K(restore_end_scn), K(candidate_pieces)); + LOG_USER_ERROR(OB_RESTORE_SOURCE_NOT_ENOUGH, "no enough log for restore"); + } else { + const ObTenantArchivePieceAttr &last_piece = candidate_pieces.at(last_piece_idx); + if (last_piece.checkpoint_scn_ < restore_end_scn) { + ret = OB_RESTORE_SOURCE_NOT_ENOUGH; + int64_t time_str_pos = 0; + int64_t msg_pos = 0; + char err_msg[OB_MAX_ERROR_MSG_LEN] = { 0 }; + char time_str[OB_MAX_TIME_STR_LENGTH] = { 0 }; + if (OB_TMP_FAIL(ObTimeConverter::scn_to_str(last_piece.checkpoint_scn_.get_val_for_inner_table_field(), + time_zone_wrap.get_time_zone_info(), + time_str, OB_MAX_TIME_STR_LENGTH, time_str_pos))) { + LOG_WARN("fail to convert scn to str", K(ret), K(tmp_ret), K(last_piece), K(time_zone_wrap)); + } else if (OB_TMP_FAIL(databuff_printf(err_msg, OB_MAX_ERROR_MSG_LEN, msg_pos, + "no enough log, maximum restorable time is %s", + time_str))) { + LOG_WARN("fail to databuff printf", K(ret), K(tmp_ret), K(msg_pos), K(time_str)); + } + LOG_WARN(err_msg, K(ret), K(last_piece), K(restore_end_scn)); + LOG_USER_ERROR(OB_RESTORE_SOURCE_NOT_ENOUGH, err_msg); + } + } + + if (OB_FAIL(ret)) { + pieces.reset(); + } else { + LOG_INFO("find pieces", K(ret), K(restore_start_scn), K(restore_end_scn), K(pieces)); + } + return ret; +} + int ObRestoreUtil::get_restore_log_array_for_complement_log_( const ObIArray &backup_set_list, const share::SCN &restore_start_scn, @@ -734,6 +1569,31 @@ int ObRestoreUtil::do_fill_backup_path_( return ret; } +int ObRestoreUtil::do_fill_backup_path_with_full_pieces_( + const ObIArray &backup_set_list, + const ObIArray &backup_piece_array, + const ObIArray &log_path_list, + share::ObPhysicalRestoreJob &job) +{ + int ret = OB_SUCCESS; + if (backup_set_list.empty() || backup_piece_array.empty()) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", K(ret), K(backup_set_list), K(backup_piece_array)); + } else { + ObArray backup_piece_path_list; + for (int64_t i = 0; OB_SUCC(ret) && i < backup_piece_array.count(); ++i) { + if (OB_FAIL(backup_piece_path_list.push_back(backup_piece_array.at(i).piece_.path_))) { + LOG_WARN("failed to push backup piece", K(ret)); + } + } + if (OB_FAIL(ret)) { + } else if (OB_FAIL(job.get_multi_restore_path_list().set(backup_set_list, backup_piece_path_list, log_path_list))) { + LOG_WARN("failed to set mutli restore path list", KR(ret)); + } + } + return ret; +} + int ObRestoreUtil::do_fill_backup_info_( const share::ObBackupSetPath & backup_set_path, share::ObPhysicalRestoreJob &job) @@ -795,11 +1655,11 @@ int ObRestoreUtil::check_backup_set_version_match_(share::ObBackupSetFileDesc &b LOG_WARN("invalid argument", K(ret), K(backup_file_desc)); } else if (!ObUpgradeChecker::check_cluster_version_exist(backup_file_desc.cluster_version_)) { ret = OB_INVALID_ARGUMENT; - LOG_WARN("cluster version are not exist", K(ret)); + LOG_WARN("cluster version are not exist", K(ret), K(backup_file_desc)); LOG_USER_ERROR(OB_INVALID_ARGUMENT, "cluster version of backup set"); } else if (!ObUpgradeChecker::check_data_version_exist(backup_file_desc.tenant_compatible_)) { ret = OB_INVALID_ARGUMENT; - LOG_WARN("data version are not exist", K(ret)); + LOG_WARN("data version are not exist", K(ret), K(backup_file_desc)); LOG_USER_ERROR(OB_INVALID_ARGUMENT, "tenant compatible of backup set"); } else if (GET_MIN_CLUSTER_VERSION() < backup_file_desc.cluster_version_) { ret = OB_OP_NOT_ALLOW; @@ -1099,7 +1959,7 @@ int ObRestoreUtil::convert_restore_timestamp_to_scn_( const ObTimeZoneInfo *time_zone_info = time_zone_wrap.get_time_zone_info(); if (timestamp.empty() || !time_zone_wrap.is_valid()) { ret = OB_INVALID_ARGUMENT; - LOG_WARN("invalid time zone wrap", K(ret)); + LOG_WARN("invalid time zone wrap", K(ret), K(timestamp), K(time_zone_wrap)); } else if (OB_FAIL(ObTimeConverter::str_to_scn_value(timestamp, time_zone_info, time_zone_info, ObTimeConverter::COMPAT_OLD_NLS_TIMESTAMP_FORMAT, true/*oracle mode*/, scn_value))) { LOG_WARN("failed to str to scn value", K(ret), K(timestamp), K(time_zone_info)); } else if (OB_FAIL(scn.convert_for_sql(scn_value))) { @@ -1135,6 +1995,36 @@ int ObRestoreUtil::get_backup_sys_time_zone_( return ret; } +int ObRestoreUtil::get_multi_path_backup_sys_time_zone_( + const ObIArray &multi_path_array, + common::ObTimeZoneInfoWrap &time_zone_wrap) +{ + int ret = OB_SUCCESS; + ObBackupDataStore store; + if (multi_path_array.empty()) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("fail to assign sorted array", K(ret)); + } else { + for (int i = multi_path_array.count() - 1; OB_SUCC(ret) && i >= 0; i--) { + const ObString &multi_path = multi_path_array.at(i); + store.reset(); + time_zone_wrap.reset(); + bool is_exist = false; + if (OB_FAIL(store.init(multi_path.ptr()))) { + LOG_WARN("fail to init ObBackupDataStore", K(ret)); + } else if (OB_FAIL(store.is_backup_set_info_file_exist(is_exist))) { + LOG_WARN("fail to check if backup set info file is exist", K(ret)); + } else if (!is_exist) { + } else if (OB_FAIL(store.get_single_backup_set_sys_time_zone_wrap(time_zone_wrap))){ + LOG_WARN("fail to get single backup set sys time zone wrap", K(ret), K(store)); + } else { + break; + } + } + } + return ret; +} + ObRestoreFailureChecker::ObRestoreFailureChecker() : is_inited_(false), job_() diff --git a/src/rootserver/restore/ob_restore_util.h b/src/rootserver/restore/ob_restore_util.h index 891c85bc2d..22f6bc91e6 100644 --- a/src/rootserver/restore/ob_restore_util.h +++ b/src/rootserver/restore/ob_restore_util.h @@ -16,13 +16,15 @@ #include "share/ob_rpc_struct.h" #include "share/restore/ob_physical_restore_table_operator.h"//PhysicalRestoreStatus #include "share/backup/ob_archive_struct.h" - +#include "share/backup/ob_archive_store.h" //ObSinglePieceDesc +#include "storage/backup/ob_backup_data_store.h" //ObBackupSetFilter::ObBackupSetMap namespace oceanbase { namespace share { struct ObHisRestoreJobPersistInfo; struct ObPhysicalRestoreJob; +struct ObExternPieceWholeInfo; } namespace palf { @@ -59,6 +61,14 @@ public: ObIArray &backup_set_list, ObIArray &backup_piece_list, ObIArray &log_path_list); + static int get_restore_source_from_multi_path( + const bool restore_using_compl_log, + const ObIArray& multi_path_array, + const common::ObString &passwd_array, + const share::SCN &restore_scn, + ObIArray &backup_set_list, + ObIArray &backup_piece_list, + ObIArray &log_path_list); static int insert_user_tenant_restore_job( common::ObISQLClient &sql_client, const ObString &tenant_name, @@ -74,7 +84,7 @@ public: static int check_physical_restore_finish(common::ObISQLClient &proxy, const int64_t job_id, bool &is_finish, bool &is_failed); static int get_restore_job_comment(common::ObISQLClient &proxy, const int64_t job_id, char *buf, const int64_t buf_size); static int get_restore_tenant_cpu_count(common::ObMySQLProxy &proxy, const uint64_t tenant_id, double &cpu_count); - static int fill_restore_scn_( + static int fill_restore_scn( const share::SCN &src_scn, const ObString ×tamp, const bool with_restore_scn, @@ -82,9 +92,38 @@ public: const common::ObString &passwd, const bool restore_using_compl_log, share::SCN &restore_scn); - static int check_restore_using_complement_log_( + static int fill_multi_path_restore_scn_( + const obrpc::ObPhysicalRestoreTenantArg &arg, + const bool &restore_using_compl_log, + const ObIArray &multi_path_array, + const ObIArray &backup_set_array, + const ObIArray &backup_piece_array, + share::SCN &restore_scn); + +static int fill_multi_path_restore_scn_with_compl_log_( + const ObIArray &backup_set_array, + const common::ObString &passwd, + share::SCN &restore_scn); + +static int fill_multi_path_restore_scn_without_compl_log_( + const ObIArray &backup_piece_array, + share::SCN &restore_scn); + + static int check_restore_using_complement_log( const ObIArray &tenant_path_array, bool &only_contain_backup_set); + + // check if using complement log, also sorts multi_path_array by backup_set_id + static int get_restore_scn_from_multi_path_( + const obrpc::ObPhysicalRestoreTenantArg &arg, + ObIArray &multi_path_array, + bool &use_complement_log, + share::SCN &restore_scn, + ObArray &backup_piece_array); + static int sort_backup_piece_array_(ObArray &backup_piece_array); + static int check_multi_path_using_complement_log_( + ObIArray &multi_path_array, + bool &use_complement_log); private: static int fill_backup_info_( const obrpc::ObPhysicalRestoreTenantArg &arg, @@ -101,12 +140,53 @@ private: const share::SCN &restore_scn, share::SCN &restore_start_scn, ObIArray &backup_set_list); + static int get_restore_backup_set_array_from_multi_path_( + const ObIArray &multi_path_array, + const common::ObString &passwd_array, + const share::SCN &restore_scn, + share::SCN &restore_start_scn, + ObIArray &backup_set_list, + ObTimeZoneInfoWrap &time_zone_wrap); + static int sort_multi_paths_by_backup_set_id_( + const ObArray> &path_set_pairs, + ObIArray &multi_path_array); + static int get_backup_set_info_from_multi_path_(const ObString &multi_path, ObExternBackupSetInfoDesc &backup_set_info); + static int fill_backup_set_map_( + const share::ObBackupSetFileDesc &backup_set_file, + ObBackupSetFilter::BackupSetMap &backup_set_map, + share::SCN &restore_start_scn); + static int get_restore_backup_set_array_from_backup_set_map_( + const common::hash::ObHashMap &backup_set_path_map, + ObBackupSetFilter::BackupSetMap &backup_set_map, + ObIArray &backup_set_list); static int get_restore_log_piece_array_( const ObIArray &tenant_path_array, const share::SCN &restore_start_scn, const share::SCN &restore_end_scn, ObIArray &backup_piece_list, ObIArray &log_path_list); + static int get_restore_log_piece_array_from_multi_path_( + const ObIArray &multi_path_array, + const SCN &restore_start_scn, + const SCN &restore_end_scn, + const ObTimeZoneInfoWrap &time_zone_wrap, + ObIArray &backup_piece_list); + static int get_all_piece_keys_( + const ObIArray &multi_path_array, + ObArray &piece_keys, + common::hash::ObHashMap &multi_path_map); + static int get_latest_non_empty_piece_( + const ObArray &piece_keys, + const common::hash::ObHashMap &multi_path_map, + ObExternPieceWholeInfo &piece_whole_info, + bool &is_empty_piece); + static int get_piece_paths_in_range_from_multi_path_( + const ObArray &candidate_pieces, + const common::hash::ObHashMap &multi_path_map, + const SCN &restore_start_scn, + const SCN &restore_end_scn, + const ObTimeZoneInfoWrap &time_zone_wrap, + ObIArray &pieces); static int get_restore_log_array_for_complement_log_( const ObIArray &backup_set_list, const share::SCN &restore_start_scn, @@ -129,6 +209,11 @@ private: const ObIArray &backup_piece_list, const ObIArray &log_path_list, share::ObPhysicalRestoreJob &job); + static int do_fill_backup_path_with_full_pieces_( + const ObIArray &backup_set_list, + const ObIArray &backup_piece_array, + const ObIArray &log_path_list, + share::ObPhysicalRestoreJob &job); static int do_fill_backup_info_( const share::ObBackupSetPath & backup_set_path, share::ObPhysicalRestoreJob &job); @@ -136,6 +221,9 @@ private: static int get_backup_sys_time_zone_( const ObIArray &tenant_path_array, common::ObTimeZoneInfoWrap &time_zone_wrap); + static int get_multi_path_backup_sys_time_zone_( + const ObIArray &multi_path_array, + common::ObTimeZoneInfoWrap &time_zone_wrap); static int convert_restore_timestamp_to_scn_( const ObString ×tamp, const common::ObTimeZoneInfoWrap &time_zone_wrap, diff --git a/src/share/backup/ob_archive_path.cpp b/src/share/backup/ob_archive_path.cpp index 7fe6c23ed2..76517822cf 100644 --- a/src/share/backup/ob_archive_path.cpp +++ b/src/share/backup/ob_archive_path.cpp @@ -193,6 +193,20 @@ int ObArchivePathUtil::get_single_piece_file_path(const ObBackupDest &dest, cons return ret; } +// oss://archive/[user_specified_path]/single_piece_info.obarc +int ObArchivePathUtil::get_single_piece_file_info_path(const ObBackupDest &dest, ObBackupPath &path) +{ + int ret = OB_SUCCESS; + int64_t pos = 0; + path.reset(); + if (OB_FAIL(path.init(dest.get_root_path()))) { + LOG_WARN("fail to init backup path", K(ret), K(dest)); + } else if (OB_FAIL(path.join("single_piece_info", ObBackupFileSuffix::ARCHIVE))) { + LOG_WARN("failed to join single piece info", K(ret), K(path)); + } + return ret; +} + // oss://archive/piece_d[dest_id]r[round_id]p[piece_id]/checkpoint int ObArchivePathUtil::get_piece_checkpoint_dir_path(const ObBackupDest &dest, const int64_t dest_id, const int64_t round_id, const int64_t piece_id, ObBackupPath &path) @@ -237,6 +251,22 @@ int ObArchivePathUtil::get_piece_checkpoint_file_path(const ObBackupDest &dest, return ret; } +// oss://[user_specified_path]/checkpoint/checkpoint_info.[file_id].obarc +int ObArchivePathUtil::get_piece_checkpoint_file_path(const ObBackupDest &dest, const int64_t file_id, ObBackupPath &path) +{ + int ret = OB_SUCCESS; + char file_name[OB_MAX_BACKUP_PATH_LENGTH] = { 0 }; + + if (OB_FAIL(get_piece_checkpoint_dir_path(dest, path))) { + LOG_WARN("failed to get piece dir path", K(ret), K(dest), K(file_id)); + } else if (OB_FAIL(databuff_printf(file_name, sizeof(file_name), "checkpoint_info.%ld", file_id))) { + LOG_WARN("failed to assign checkpoint info path", K(ret), K(dest), K(file_id)); + } else if (OB_FAIL(path.join(file_name, ObBackupFileSuffix::ARCHIVE))) { + LOG_WARN("failed to join file name", K(ret), K(path), K(file_name)); + } + return ret; +} + // oss://archive/piece_d[dest_id]r[round_id]p[piece_id]/piece_d[dest_id]r[round_id]p[piece_id]_20220601T120000_20220602T120000.obarc int ObArchivePathUtil::get_piece_inner_placeholder_file_path(const ObBackupDest &dest, const int64_t dest_id, const int64_t round_id, const int64_t piece_id, const SCN &start_scn, const SCN &end_scn, ObBackupPath &path) @@ -304,6 +334,21 @@ int ObArchivePathUtil::get_ls_file_info_path(const ObBackupDest &dest, const int return ret; } +// oss://[user_specified_path]/logstream_[ls_id]/file_info.obarc +int ObArchivePathUtil::get_ls_file_info_path(const ObBackupDest &dest, const ObLSID &ls_id, ObBackupPath &path) +{ + int ret = OB_SUCCESS; + path.reset(); + if (OB_FAIL(path.init(dest.get_root_path()))) { + LOG_WARN("fail to init path", K(ret), K(dest)); + } else if (OB_FAIL(path.join_ls(ls_id))) { + LOG_WARN("fail to join ls", K(ret), K(path), K(ls_id)); + } else if (OB_FAIL(path.join("file_info", ObBackupFileSuffix::ARCHIVE))) { + LOG_WARN("failed to join ls file info ", K(ret), K(path)); + } + return ret; +} + // oss://archive/piece_d[dest_id]r[round_id]p[piece_id]/file_info.obarc int ObArchivePathUtil::get_piece_info_file_path(const ObBackupDest &dest, const int64_t dest_id, const int64_t round_id, const int64_t piece_id, ObBackupPath &path) @@ -330,7 +375,7 @@ int ObArchivePathUtil::get_tenant_archive_piece_infos_file_path(const ObBackupDe return ret; } -// oss://archive/tenant_archive_piece_infos.obarc +// oss://archive/tenant_archive_piece_infos.obarc, or oss://[user_specified_path]/tenant_archive_piece_infos.obarc int ObArchivePathUtil::get_tenant_archive_piece_infos_file_path(const ObBackupDest &dest, ObBackupPath &path) { int ret = OB_SUCCESS; diff --git a/src/share/backup/ob_archive_path.h b/src/share/backup/ob_archive_path.h index b0e9be99c5..5586832c3f 100644 --- a/src/share/backup/ob_archive_path.h +++ b/src/share/backup/ob_archive_path.h @@ -66,18 +66,22 @@ public: // oss://archive/piece_d[dest_id]r[round_id]p[piece_id]/single_piece_info.obarc static int get_single_piece_file_path(const ObBackupDest &dest, const int64_t dest_id, const int64_t round_id, const int64_t piece_id, ObBackupPath &path); - + // oss://[user_specified_path]/single_piece_info.obarc + static int get_single_piece_file_info_path(const ObBackupDest &dest, ObBackupPath &path); // oss://archive/piece_d[dest_id]r[round_id]p[piece_id]/checkpoint static int get_piece_checkpoint_dir_path(const ObBackupDest &dest, const int64_t dest_id, const int64_t round_id, const int64_t piece_id, ObBackupPath &path); - // oss://archive/checkpoint + // oss://archive/checkpoint, or oss://[user_specified_path]/checkpoint static int get_piece_checkpoint_dir_path(const ObBackupDest &dest, ObBackupPath &path); // oss://archive/piece_d[dest_id]r[round_id]p[piece_id]/checkpoint/checkpoint_info.[file_id].obarc static int get_piece_checkpoint_file_path(const ObBackupDest &dest, const int64_t dest_id, const int64_t round_id, const int64_t piece_id, const int64_t file_id, ObBackupPath &path); + // oss://[user_specified_path]/checkpoint/checkpoint_info.[file_id].obarc + static int get_piece_checkpoint_file_path(const ObBackupDest &dest, const int64_t file_id, ObBackupPath &path); + // oss://archive/piece_d[dest_id]r[round_id]p[piece_id]/piece_d[dest_id]r[round_id]p[piece_id]_20220601T120000_20220602T120000.obarc static int get_piece_inner_placeholder_file_path(const ObBackupDest &dest, const int64_t dest_id, const int64_t round_id, const int64_t piece_id, const SCN &start_scn, @@ -91,6 +95,9 @@ public: static int get_ls_file_info_path(const ObBackupDest &dest, const int64_t dest_id, const int64_t round_id, const int64_t piece_id, const ObLSID &ls_id, ObBackupPath &path); + // oss://[user_specified_path]/logstream_[ls_id]/file_info.obarc + static int get_ls_file_info_path(const ObBackupDest &dest, const ObLSID &ls_id, ObBackupPath &path); + // oss://archive/piece_d[dest_id]r[round_id]p[piece_id]/file_info.obarc static int get_piece_info_file_path(const ObBackupDest &dest, const int64_t dest_id, const int64_t round_id, const int64_t piece_id, ObBackupPath &path); diff --git a/src/share/backup/ob_archive_store.cpp b/src/share/backup/ob_archive_store.cpp index d47049be5c..71c15cdd7f 100644 --- a/src/share/backup/ob_archive_store.cpp +++ b/src/share/backup/ob_archive_store.cpp @@ -407,6 +407,10 @@ ObArchiveStore::ObArchiveStore() : ObBackupStore() {} +void ObArchiveStore::reset() +{ + ObBackupStore::reset(); +} // oss://archive/rounds/round_d[dest_id]r[round_id]_start int ObArchiveStore::is_round_start_file_exist(const int64_t dest_id, const int64_t round_id, bool &is_exist) const @@ -816,6 +820,22 @@ int ObArchiveStore::write_single_piece(const int64_t dest_id, const int64_t roun return ret; } +int ObArchiveStore::read_single_piece(ObSinglePieceDesc &desc) +{ + int ret = OB_SUCCESS; + ObBackupPath full_path; + const ObBackupDest &dest = get_backup_dest(); + if (!is_init()) { + ret = OB_NOT_INIT; + LOG_WARN("ObArchiveStore not init", K(ret)); + } else if (OB_FAIL(ObArchivePathUtil::get_single_piece_file_info_path(dest, full_path))) { + LOG_WARN("fail to get single piece file path", K(ret), K(dest)); + } else if (OB_FAIL(read_single_file(full_path.get_ptr(), desc))) { + LOG_WARN("fail to read single file", K(ret), K(full_path)); + } + return ret; +} + // oss://archive/d[dest_id]r[round_id]p[piece_id]/checkpoint/checkpoint_info.[file_id].obarc int ObArchiveStore::is_piece_checkpoint_file_exist(const int64_t dest_id, const int64_t round_id, const int64_t piece_id, const int64_t file_id, bool &is_exist) const { @@ -906,6 +926,42 @@ int ObArchiveStore::write_piece_checkpoint(const int64_t dest_id, const int64_t return ret; } +int ObArchiveStore::read_piece_checkpoint(ObPieceCheckpointDesc &desc) const +{ + int ret = OB_SUCCESS; + ObBackupPath dir_path; + ObBackupPath meta_full_path; + const ObBackupDest &dest = get_backup_dest(); + if (!is_init()) { + ret = OB_NOT_INIT; + LOG_WARN("ObArchiveStore not init", K(ret)); + } else { + ObArchiveCheckpointMgr mgr; + uint64_t max_checkpoint_scn = 0; + if (OB_FAIL(ObArchivePathUtil::get_piece_checkpoint_file_path(dest, 0, meta_full_path))) { + LOG_WARN("failed to get checkpoint meta file path", K(ret), K(dest)); + } else if (OB_FAIL(read_single_file(meta_full_path.get_ptr(), desc))) { + LOG_WARN("failed to read mate file", K(ret), K(meta_full_path)); + } else if (OB_FAIL(ObArchivePathUtil::get_piece_checkpoint_dir_path(dest, dir_path))) { + LOG_WARN("failed to get piece checkpoint dir path", K(ret), K(dest)); + } else if (OB_FAIL(mgr.init(dir_path, OB_STR_CHECKPOINT_FILE_NAME, ObBackupFileSuffix::ARCHIVE, get_storage_info()))) { + LOG_WARN("failed to init ObArchiveCheckPointMgr", K(ret), K(dir_path)); + } else if (OB_FAIL(mgr.read(max_checkpoint_scn))) { + LOG_WARN("failed to read checkpoint scn", K(ret), K(max_checkpoint_scn)); + } else if (0 == max_checkpoint_scn) { + //do nothing, archive is not started yet + } else if (OB_FAIL(desc.checkpoint_scn_.convert_for_inner_table_field(max_checkpoint_scn))) { + LOG_WARN("failed to set checkpoint scn", K(ret), K(max_checkpoint_scn)); + } else if (OB_FAIL(desc.max_scn_.convert_for_inner_table_field(max_checkpoint_scn))) { + LOG_WARN("failed to set max scn", K(ret), K(max_checkpoint_scn)); + } + if (OB_SUCC(ret)) { + FLOG_INFO("succeed to read checkpoint desc.", K(desc)); + } + } + return ret; +} + // oss://archive/d[dest_id]r[round_id]p[piece_id]/piece_d[dest_id]r[round_id]p[piece_id]_20220601T120000_20220602T120000.obarc int ObArchiveStore::is_piece_inner_placeholder_file_exist(const int64_t dest_id, const int64_t round_id, const int64_t piece_id, const SCN &start_scn, const SCN &end_scn, bool &is_exist) const @@ -1115,6 +1171,40 @@ int ObArchiveStore::write_tenant_archive_piece_infos(const int64_t dest_id, cons return ret; } +int ObArchiveStore::read_tenant_archive_piece_infos(ObTenantArchivePieceInfosDesc &desc) const +{ + int ret = OB_SUCCESS; + ObBackupPath full_path; + const ObBackupDest &dest = get_backup_dest(); + if (!is_init()) { + ret = OB_NOT_INIT; + LOG_WARN("ObArchiveStore not init", K(ret)); + } else if (OB_FAIL(ObArchivePathUtil::get_tenant_archive_piece_infos_file_path(dest, full_path))) { + LOG_WARN("failed to get piece extend info file path", K(ret), K(dest)); + } else if (OB_FAIL(read_single_file(full_path.get_ptr(), desc))) { + LOG_WARN("failed to read piece extend info file", K(ret), K(full_path)); + } + return ret; +} + +int ObArchiveStore::is_tenant_archive_piece_infos_file_exist(bool &is_exist) const +{ + int ret = OB_SUCCESS; + ObBackupIoAdapter util; + ObBackupPath full_path; + const ObBackupStorageInfo *storage_info = get_storage_info(); + const ObBackupDest &dest = get_backup_dest(); + if (!is_init()) { + ret = OB_NOT_INIT; + LOG_WARN("ObArchiveStore not init", K(ret)); + } else if (OB_FAIL(ObArchivePathUtil::get_tenant_archive_piece_infos_file_path(dest, full_path))) { + LOG_WARN("failed to get piece extend info file path", K(ret), K(dest)); + } else if (OB_FAIL(util.is_exist(full_path.get_ptr(), storage_info, is_exist))) { + LOG_WARN("failed to check piece extend info file exist.", K(ret), K(full_path), K(storage_info)); + } + return ret; +} + int ObArchiveStore::is_archive_log_file_exist(const int64_t dest_id, const int64_t round_id, const int64_t piece_id, const ObLSID &ls_id, const int64_t file_id, bool &is_exist) const { @@ -1234,6 +1324,74 @@ int ObArchiveStore::get_whole_piece_info(const int64_t dest_id, const int64_t ro return ret; } +int ObArchiveStore::get_single_piece_info(bool &is_empty_piece, ObSinglePieceDesc &single_piece) +{ + int ret = OB_SUCCESS; + is_empty_piece = false; + if (!is_init()) { + ret = OB_NOT_INIT; + LOG_WARN("ObArchiveStore not init", K(ret)); + } else if (OB_FAIL(read_single_piece(single_piece))) { + // not a frozen piece, build single piece info with extend and checkpoint info. + if (OB_BACKUP_FILE_NOT_EXIST == ret) { + ObPieceCheckpointDesc checkpoint_desc; + ObTenantArchivePieceInfosDesc extend_desc; + ret = OB_SUCCESS; + if (OB_FAIL(read_piece_checkpoint(checkpoint_desc))) { + if (OB_BACKUP_FILE_NOT_EXIST == ret) { + ret = OB_SUCCESS; + is_empty_piece = true; + } else { + LOG_WARN("failed to read piece checkpoint info", K(ret)); + } + } else if (OB_FAIL(read_tenant_archive_piece_infos(extend_desc))) { + LOG_WARN("failed to read piece extend info", K(ret)); + } else { + // merge static piece attr with dynamic attr. + single_piece.piece_.key_.tenant_id_ = extend_desc.tenant_id_; + single_piece.piece_.key_.dest_id_ = extend_desc.dest_id_; + single_piece.piece_.key_.round_id_ = extend_desc.round_id_; + single_piece.piece_.key_.piece_id_ = extend_desc.piece_id_; + single_piece.piece_.incarnation_ = extend_desc.incarnation_; + single_piece.piece_.dest_no_ = extend_desc.dest_no_; + single_piece.piece_.start_scn_ = extend_desc.start_scn_; + single_piece.piece_.checkpoint_scn_ = checkpoint_desc.checkpoint_scn_; + single_piece.piece_.max_scn_ = checkpoint_desc.max_scn_; + single_piece.piece_.end_scn_ = extend_desc.end_scn_; + single_piece.piece_.compatible_ = extend_desc.compatible_; + single_piece.piece_.status_.set_active(); + single_piece.piece_.file_status_ = ObBackupFileStatus::STATUS::BACKUP_FILE_AVAILABLE; + single_piece.piece_.path_ = extend_desc.path_; + } + } else { + LOG_WARN("failed to get single piece info", K(ret)); + } + } + + return ret; +} + +int ObArchiveStore::get_whole_piece_info(bool &is_empty_piece, ObExternPieceWholeInfo &whole_piece_info) +{ + int ret = OB_SUCCESS; + ObTenantArchivePieceInfosDesc extend_desc; + ObSinglePieceDesc single_piece_desc; + if (!is_init()) { + ret = OB_NOT_INIT; + LOG_WARN("ObArchiveStore not init", K(ret)); + } else if (OB_FAIL(get_single_piece_info(is_empty_piece, single_piece_desc))) { + LOG_WARN("failed to get whole piece info", K(ret)); + } else if (is_empty_piece) { + } else if (OB_FAIL(read_tenant_archive_piece_infos(extend_desc))) { + LOG_WARN("failed to read piece extend info", K(ret)); + } else if (OB_FAIL(whole_piece_info.current_piece_.assign(single_piece_desc.piece_))) { + LOG_WARN("failed to assign piece", K(ret)); + } else if (OB_FAIL(whole_piece_info.his_frozen_pieces_.assign(extend_desc.his_frozen_pieces_))) { + LOG_WARN("failed to assign history frozen pieces", K(ret)); + } + return ret; +} + // Get pieces needed in the specific interval indicated by 'start_scn' and 'end_scn'. int ObArchiveStore::get_piece_paths_in_range(const SCN &start_scn, const SCN &end_scn, ObIArray &pieces) @@ -1518,6 +1676,21 @@ int ObArchiveStore::get_piece_max_checkpoint_scn(const int64_t dest_id, const in return ret; } +int ObArchiveStore::read_single_ls_info(const ObLSID &ls_id, ObSingleLSInfoDesc &desc) const +{ + int ret = OB_SUCCESS; + ObBackupPath full_path; + const ObBackupDest &dest = get_backup_dest(); + if (!is_init()) { + ret = OB_NOT_INIT; + LOG_WARN("ObArchiveStore not init", K(ret)); + } else if (OB_FAIL(ObArchivePathUtil::get_ls_file_info_path(dest, ls_id, full_path))) { + LOG_WARN("fail to get ls file info path", K(ret), K(dest), K(ls_id)); + } else if (OB_FAIL(read_single_file(full_path.get_ptr(), desc))) { + LOG_WARN("failed to read single file", K(ret), K(full_path)); + } + return ret; +} static int parse_piece_file_(ObString &dir_name, int64_t &dest_id, int64_t &round_id, int64_t &piece_id) { diff --git a/src/share/backup/ob_archive_store.h b/src/share/backup/ob_archive_store.h index 574e782b9a..67bd82d23f 100644 --- a/src/share/backup/ob_archive_store.h +++ b/src/share/backup/ob_archive_store.h @@ -208,6 +208,22 @@ public: INHERIT_TO_STRING_KV("ObExternArchiveDesc", ObExternArchiveDesc, K_(piece)); }; +struct ObSinglePieceDescComparator +{ + bool operator()(const ObSinglePieceDesc &lhs, const share::ObSinglePieceDesc &rhs) const + { + ObPieceKey lhs_key; + ObPieceKey rhs_key; + lhs_key.dest_id_ = lhs.piece_.key_.dest_id_; + lhs_key.round_id_ = lhs.piece_.key_.round_id_; + lhs_key.piece_id_ = lhs.piece_.key_.piece_id_; + rhs_key.dest_id_ = rhs.piece_.key_.dest_id_; + rhs_key.round_id_ = rhs.piece_.key_.round_id_; + rhs_key.piece_id_ = rhs.piece_.key_.piece_id_; + return lhs_key < rhs_key; + } +}; + // Define checkpoint file content. struct ObPieceCheckpointDesc final : public ObExternArchiveDesc @@ -325,6 +341,7 @@ class ObArchiveStore : public ObBackupStore { public: ObArchiveStore(); + void reset(); // oss://archive/rounds/round_d[dest_id]r[round_id]_start.obarc int is_round_start_file_exist(const int64_t dest_id, const int64_t round_id, bool &is_exist) const; @@ -356,15 +373,17 @@ public: int is_single_piece_file_exist(const int64_t dest_id, const int64_t round_id, const int64_t piece_id, bool &is_exist) const; int read_single_piece(const int64_t dest_id, const int64_t round_id, const int64_t piece_id, ObSinglePieceDesc &desc) const; int write_single_piece(const int64_t dest_id, const int64_t round_id, const int64_t piece_id, const ObSinglePieceDesc &desc) const; - + // oss://[user_specified_path]/single_piece_info.obarc, FOR ADD RESTORE SOURCE ONLY + int read_single_piece(ObSinglePieceDesc &desc); // oss://archive/d[dest_id]r[round_id]p[piece_id]/checkpoint/checkpoint_info.[file_id].obarc int is_piece_checkpoint_file_exist(const int64_t dest_id, const int64_t round_id, const int64_t piece_id, const int64_t file_id, bool &is_exist) const; int read_piece_checkpoint(const int64_t dest_id, const int64_t round_id, const int64_t piece_id, const int64_t file_id, ObPieceCheckpointDesc &desc) const; int write_piece_checkpoint(const int64_t dest_id, const int64_t round_id, const int64_t piece_id, const int64_t file_id, const ObPieceCheckpointDesc &desc) const; - + // oss://[user_specified_path]/checkpoint/checkpoint_info.0.obarc + int read_piece_checkpoint(ObPieceCheckpointDesc &desc) const; // oss://archive/d[dest_id]r[round_id]p[piece_id]/piece_d[dest_id]r[round_id]p[piece_id]_20220601T120000_20220602T120000.obarc int is_piece_inner_placeholder_file_exist(const int64_t dest_id, const int64_t round_id, const int64_t piece_id, const SCN &start_scn, - const SCN &end_scn, bool &is_exist) const; + const SCN &end_scn, bool &is_exist) const; int read_piece_inner_placeholder(const int64_t dest_id, const int64_t round_id, const int64_t piece_id, const SCN &start_scn, const SCN &end_scn, ObPieceInnerPlaceholderDesc &desc) const; int write_piece_inner_placeholder(const int64_t dest_id, const int64_t round_id, const int64_t piece_id, const SCN &start_scn, const SCN &end_scn, const ObPieceInnerPlaceholderDesc &desc) const; @@ -374,6 +393,9 @@ public: int read_single_ls_info(const int64_t dest_id, const int64_t round_id, const int64_t piece_id, const ObLSID &ls_id, ObSingleLSInfoDesc &desc) const; int write_single_ls_info(const int64_t dest_id, const int64_t round_id, const int64_t piece_id, const ObLSID &ls_id, const ObSingleLSInfoDesc &desc) const; + // oss://[user_specified_path]/[s_id].file_info.obarc + int read_single_ls_info(const ObLSID &ls_id, ObSingleLSInfoDesc &desc) const; + // oss://archive/d[dest_id]r[round_id]p[piece_id]/file_info.obarc int is_piece_info_file_exist(const int64_t dest_id, const int64_t round_id, const int64_t piece_id, bool &is_exist) const; int read_piece_info(const int64_t dest_id, const int64_t round_id, const int64_t piece_id, ObPieceInfoDesc &desc) const; @@ -383,7 +405,10 @@ public: int is_tenant_archive_piece_infos_file_exist(const int64_t dest_id, const int64_t round_id, const int64_t piece_id, bool &is_exist) const; int read_tenant_archive_piece_infos(const int64_t dest_id, const int64_t round_id, const int64_t piece_id, ObTenantArchivePieceInfosDesc &desc) const; int write_tenant_archive_piece_infos(const int64_t dest_id, const int64_t round_id, const int64_t piece_id, const ObTenantArchivePieceInfosDesc &desc) const; - + // oss://[user_specified_path]/tenant_archive_piece_infos.obarc + int read_tenant_archive_piece_infos(ObTenantArchivePieceInfosDesc &desc) const; + // oss:///tenant_archive_piece_infos.obarc, FOR ADD RESTORE SOURCE ONLY + int is_tenant_archive_piece_infos_file_exist(bool &is_exist) const; // oss://archive/d[dest_id]r[round_id]p[piece_id]/[ls_id]/[file_id] int is_archive_log_file_exist(const int64_t dest_id, const int64_t round_id, const int64_t piece_id, const ObLSID &ls_id, const int64_t file_id, bool &is_exist) const; @@ -403,6 +428,9 @@ public: bool &is_empty_piece, ObSinglePieceDesc &single_piece); int get_whole_piece_info(const int64_t dest_id, const int64_t round_id, const int64_t piece_id, bool &is_empty_piece, ObExternPieceWholeInfo &whole_piece_info); + // independent of piece dir name, ONLY FOR ADD RESTORE SOURCE + int get_single_piece_info(bool &is_empty_piece, ObSinglePieceDesc &single_piece); + int get_whole_piece_info(bool &is_empty_piece, ObExternPieceWholeInfo &whole_piece_info); // Get pieces needed in the specific interval indicated by 'start_scn' and 'end_scn'. // Return OB_ENTRY_NOT_EXIST if cannot find enough pieces. diff --git a/src/share/backup/ob_archive_struct.cpp b/src/share/backup/ob_archive_struct.cpp index 9687d9f7ae..bcead30df7 100644 --- a/src/share/backup/ob_archive_struct.cpp +++ b/src/share/backup/ob_archive_struct.cpp @@ -571,6 +571,17 @@ int ObArchivePieceStatus::set_status(const char *status) return ret; } +/** + * -----------------------------------ObPieceKey----------------------------------- + */ +uint64_t ObPieceKey::hash() const +{ + uint64_t hash_val = 0; + hash_val = murmurhash(&dest_id_, sizeof(dest_id_), hash_val); + hash_val = murmurhash(&round_id_, sizeof(round_id_), hash_val); + hash_val = murmurhash(&piece_id_, sizeof(piece_id_), hash_val); + return hash_val; +} /** * ------------------------------ObTenantArchivePieceAttr::Key--------------------- @@ -602,6 +613,13 @@ int ObTenantArchivePieceAttr::Key::fill_pkey_dml(share::ObDMLSqlSplicer &dml) co return ret; } +void ObTenantArchivePieceAttr::Key::reset() +{ + tenant_id_ = OB_INVALID_TENANT_ID; + dest_id_ = 0; + round_id_ = 0; + piece_id_ = 0; +} /** @@ -757,7 +775,25 @@ int ObTenantArchivePieceAttr::assign(const ObTenantArchivePieceAttr &other) return ret; } - +void ObTenantArchivePieceAttr::reset() +{ + key_.reset(); + incarnation_ = OB_START_INCARNATION; + dest_no_ = -1; + file_count_ = 0; + input_bytes_ = 0; + output_bytes_ = 0; + cp_file_id_ = 0; + cp_file_offset_ = 0; + start_scn_ = share::SCN::min_scn(); + checkpoint_scn_ = share::SCN::min_scn(); + max_scn_ = share::SCN::min_scn(); + end_scn_ = share::SCN::min_scn(); + compatible_.version_ = ObArchiveCompatible::Compatible::NONE; + status_.status_ = ObArchivePieceStatus::Status::MAX_STATUS; + file_status_ = ObBackupFileStatus::STATUS::BACKUP_FILE_MAX; + path_.reset(); +} /** * ------------------------------ObLSArchivePersistInfo::Key--------------------- diff --git a/src/share/backup/ob_archive_struct.h b/src/share/backup/ob_archive_struct.h index e05ace8ae8..8be7e58d7c 100644 --- a/src/share/backup/ob_archive_struct.h +++ b/src/share/backup/ob_archive_struct.h @@ -491,6 +491,19 @@ struct ObPieceKey final : dest_id_(other.dest_id_), round_id_(other.round_id_), piece_id_(other.piece_id_) {} + uint64_t hash() const; + + int hash(uint64_t &hash_val) const + { + hash_val = hash(); + return OB_SUCCESS; + } + void reset() + { + dest_id_ = 0; + round_id_ = 0; + piece_id_ = 0; + } void operator=(const ObPieceKey &other) { dest_id_ = other.dest_id_; @@ -573,6 +586,8 @@ struct ObTenantArchivePieceAttr final : public ObIInnerTableRow return !(*this == other); } + void reset(); + // Return if primary key valid. bool is_pkey_valid() const override; @@ -641,6 +656,7 @@ public: int fill_dml(ObDMLSqlSplicer &dml) const override; int assign(const ObTenantArchivePieceAttr &other); + void reset(); OB_INLINE int set_path(const ObBackupPathString &path) { diff --git a/src/share/backup/ob_backup_store.cpp b/src/share/backup/ob_backup_store.cpp index a985c0c2c7..1f7f629ae3 100644 --- a/src/share/backup/ob_backup_store.cpp +++ b/src/share/backup/ob_backup_store.cpp @@ -184,6 +184,12 @@ bool ObBackupStore::is_init() const return IS_INIT; } +void ObBackupStore::reset() +{ + is_inited_ = false; + backup_dest_.reset(); +} + const ObBackupDest &ObBackupStore::get_backup_dest() const { return backup_dest_; diff --git a/src/share/backup/ob_backup_store.h b/src/share/backup/ob_backup_store.h index 2b6ed6950c..7b8b879d84 100644 --- a/src/share/backup/ob_backup_store.h +++ b/src/share/backup/ob_backup_store.h @@ -126,6 +126,7 @@ public: int init(const share::ObBackupDest &backup_dest); bool is_init() const; + void reset(); const ObBackupDest &get_backup_dest() const; const ObBackupStorageInfo *get_storage_info() const; diff --git a/src/share/backup/ob_backup_struct.h b/src/share/backup/ob_backup_struct.h index 1e70eed733..4d98eb7ede 100644 --- a/src/share/backup/ob_backup_struct.h +++ b/src/share/backup/ob_backup_struct.h @@ -352,7 +352,7 @@ const char *const OB_BACKUP_DECRYPTION_PASSWD_ARRAY_SESSION_STR = "__ob_backup_d const char *const OB_RESTORE_SOURCE_NAME_SESSION_STR = "__ob_restore_source_name__"; const char *const OB_RESTORE_PREVIEW_TENANT_ID_SESSION_STR = "__ob_restore_preview_tenant_id__"; const char *const OB_RESTORE_PREVIEW_BACKUP_DEST_SESSION_STR = "__ob_restore_preview_backup_dest__"; -const char *const OB_RESTORE_PREVIEW_SCN_SESSION_STR = "__ob_restore_preview_timestamp__"; +const char *const OB_RESTORE_PREVIEW_SCN_SESSION_STR = "__ob_restore_preview_scn__"; const char *const OB_RESTORE_PREVIEW_TIMESTAMP_SESSION_STR = "__ob_restore_preview_timestamp__"; const char *const OB_RESTORE_PREVIEW_BACKUP_CLUSTER_NAME_SESSION_STR = "__ob_restore_preview_backup_cluster_name__"; const char *const OB_RESTORE_PREVIEW_BACKUP_CLUSTER_ID_SESSION_STR = "__ob_restore_preview_backup_cluster_id__"; diff --git a/src/share/config/ob_config_helper.h b/src/share/config/ob_config_helper.h index ee019c03c9..7067543257 100644 --- a/src/share/config/ob_config_helper.h +++ b/src/share/config/ob_config_helper.h @@ -786,6 +786,15 @@ private: DISALLOW_COPY_AND_ASSIGN(ObConfigTableStoreFormatChecker); }; +class ObConfigArchiveLagTargetChecker { +public: + ObConfigArchiveLagTargetChecker(){} + virtual ~ObConfigArchiveLagTargetChecker(){} + static bool check(const uint64_t tenant_id, const obrpc::ObAdminSetConfigItem &t); +private: + DISALLOW_COPY_AND_ASSIGN(ObConfigArchiveLagTargetChecker); +}; + class ObConfigMigrationChooseSourceChecker : public ObConfigChecker { @@ -797,15 +806,6 @@ private: DISALLOW_COPY_AND_ASSIGN(ObConfigMigrationChooseSourceChecker); }; -class ObConfigArchiveLagTargetChecker { -public: - ObConfigArchiveLagTargetChecker(){} - virtual ~ObConfigArchiveLagTargetChecker(){} - static bool check(const uint64_t tenant_id, const obrpc::ObAdminSetConfigItem &t); -private: - DISALLOW_COPY_AND_ASSIGN(ObConfigArchiveLagTargetChecker); -}; - typedef __ObConfigContainer ObConfigContainer; } // namespace common diff --git a/src/share/ob_rpc_struct.cpp b/src/share/ob_rpc_struct.cpp index 1c88ac94cf..22bc6bf1a5 100755 --- a/src/share/ob_rpc_struct.cpp +++ b/src/share/ob_rpc_struct.cpp @@ -3874,7 +3874,8 @@ int ObLSMigrateReplicaArg::init( const common::ObReplicaMember &dst, const common::ObReplicaMember &data_source, const int64_t paxos_replica_number, - const bool skip_change_member_list) + const bool skip_change_member_list, + const common::ObReplicaMember &force_data_source) { int ret = OB_SUCCESS; task_id_ = task_id; @@ -3885,6 +3886,7 @@ int ObLSMigrateReplicaArg::init( data_source_ = data_source; paxos_replica_number_ = paxos_replica_number; skip_change_member_list_ = skip_change_member_list; + force_data_source_ = force_data_source; return ret; } @@ -3925,7 +3927,8 @@ int ObLSAddReplicaArg::init( const common::ObReplicaMember &data_source, const int64_t orig_paxos_replica_number, const int64_t new_paxos_replica_number, - const bool skip_change_member_list) + const bool skip_change_member_list, + const common::ObReplicaMember &force_data_source) { int ret = OB_SUCCESS; task_id_ = task_id; @@ -3936,6 +3939,7 @@ int ObLSAddReplicaArg::init( orig_paxos_replica_number_ = orig_paxos_replica_number; new_paxos_replica_number_ = new_paxos_replica_number; skip_change_member_list_ = skip_change_member_list; + force_data_source_ = force_data_source; return ret; } diff --git a/src/share/ob_rpc_struct.h b/src/share/ob_rpc_struct.h index f4b5993c18..267d67cee2 100755 --- a/src/share/ob_rpc_struct.h +++ b/src/share/ob_rpc_struct.h @@ -4179,7 +4179,8 @@ public: const common::ObReplicaMember &dst, const common::ObReplicaMember &data_source, const int64_t paxos_replica_number, - const bool skip_change_member_list); + const bool skip_change_member_list, + const common::ObReplicaMember &force_data_source); TO_STRING_KV(K_(task_id), K_(tenant_id), @@ -4242,7 +4243,8 @@ public: const common::ObReplicaMember &data_source, const int64_t orig_paxos_replica_number, const int64_t new_paxos_replica_number, - const bool skip_change_member_list); + const bool skip_change_member_list, + const common::ObReplicaMember &force_data_source); TO_STRING_KV(K_(task_id), K_(tenant_id), @@ -5860,7 +5862,6 @@ public: K_(passwd_array), K_(kms_info), K_(table_items), - K_(multi_uri), K_(with_restore_scn), K_(encrypt_key), K_(kms_uri), diff --git a/src/share/parameter/ob_parameter_seed.ipp b/src/share/parameter/ob_parameter_seed.ipp index 48789fdb0b..b801f15226 100644 --- a/src/share/parameter/ob_parameter_seed.ipp +++ b/src/share/parameter/ob_parameter_seed.ipp @@ -1932,7 +1932,7 @@ DEF_TIME(_faststack_min_interval, OB_CLUSTER_PARAMETER, "30m", "[1s,)", "Minimum interval for OBServer to automatically collect the obstack. " "Default: 30min. Range: [1s,+∞)", ObParameterAttr(Section::OBSERVER, Source::DEFAULT, EditLevel::DYNAMIC_EFFECTIVE)); -DEF_STR_WITH_CHECKER(choose_migration_source_policy, OB_TENANT_PARAMETER, "idc", +DEF_STR_WITH_CHECKER(choose_migration_source_policy, OB_TENANT_PARAMETER, "region", common::ObConfigMigrationChooseSourceChecker, "the policy of choose source in migration and add replica. 'idc' means firstly choose follower replica of the same idc as source, " "'region' means firstly choose follower replica of the same region as source", diff --git a/src/share/restore/ob_log_restore_source_mgr.cpp b/src/share/restore/ob_log_restore_source_mgr.cpp index 1d53b23ed6..39947d90bb 100644 --- a/src/share/restore/ob_log_restore_source_mgr.cpp +++ b/src/share/restore/ob_log_restore_source_mgr.cpp @@ -137,7 +137,49 @@ int ObLogRestoreSourceMgr::add_location_source(const SCN &recovery_until_scn, int ObLogRestoreSourceMgr::add_rawpath_source(const SCN &recovery_until_scn, const DirArray &array) { - return OB_NOT_SUPPORTED; + int ret = OB_SUCCESS; + ObSqlString rawpath_value; + if (IS_NOT_INIT) { + ret = OB_NOT_INIT; + LOG_WARN("ObLogRestoreSourceMgr not init", K(ret), K_(is_inited)); + } else if (OB_UNLIKELY(array.empty() || !recovery_until_scn.is_valid())) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument to add rawpath source", K(ret), K(array), K(recovery_until_scn)); + } else { + for (int64_t i = 0; OB_SUCC(ret) && i < array.count(); i++) { + ObBackupDest dest; + ObBackupPathString rawpath = array[i]; + char dest_buf[OB_MAX_BACKUP_DEST_LENGTH] = { 0 }; + if (OB_UNLIKELY(rawpath.is_empty())) { + LOG_WARN("raw path is empty", K(array)); + } else if (OB_FAIL(dest.set(rawpath.ptr()))) { + LOG_WARN("set rawpath backup dest failed", K(ret), K(rawpath)); + } else if (OB_FAIL(dest.get_backup_dest_str(dest_buf, sizeof(dest_buf)))) { + LOG_WARN("get rawpath backup path failed", K(ret), K(dest)); + } else if (0 == i) { + if (OB_FAIL(rawpath_value.assign(dest_buf))) { + LOG_WARN("fail to assign rawpath", K(ret), K(dest_buf)); + } + } else if (OB_FAIL(rawpath_value.append(","))) { + LOG_WARN("fail to append rawpath", K(ret)); + } else if (OB_FAIL(rawpath_value.append(dest_buf))) { + LOG_WARN("fail to append rawpath", K(ret), K(dest_buf)); + } + } + if (OB_SUCC(ret)) { + ObLogRestoreSourceItem item(tenant_id_, + OB_DEFAULT_LOG_RESTORE_SOURCE_ID, + ObLogRestoreSourceType::RAWPATH, + ObString(rawpath_value.ptr()), + recovery_until_scn); + if (OB_FAIL(table_operator_.insert_source(item))) { + LOG_WARN("table_operator_ insert_source failed", K(ret), K(item)); + } else { + LOG_INFO("add rawpath source succ", K(recovery_until_scn), K(array)); + } + } + } + return ret; } int ObLogRestoreSourceMgr::get_source(ObLogRestoreSourceItem &item) diff --git a/src/sql/engine/cmd/ob_restore_executor.cpp b/src/sql/engine/cmd/ob_restore_executor.cpp index 7783700016..b11a0dd952 100644 --- a/src/sql/engine/cmd/ob_restore_executor.cpp +++ b/src/sql/engine/cmd/ob_restore_executor.cpp @@ -64,7 +64,21 @@ int ObPhysicalRestoreTenantExecutor::execute( } if (OB_FAIL(ret)) { } else { - if (!is_preview) { + const int64_t timeout = 10 * 60 * 1000 * 1000; // 10min + const int64_t abs_timeout = ObTimeUtility::current_time() + timeout; + const int64_t cur_time_us = ObTimeUtility::current_time(); + ObTimeoutCtx timeout_ctx; + ctx.get_physical_plan_ctx()->set_timeout_timestamp(abs_timeout); + if (ObTimeUtility::current_time() > abs_timeout) { + ret = OB_TIMEOUT; + LOG_WARN("physical restore tenant timeout", K(ret), "tenant_name", + restore_tenant_arg.tenant_name_, K(abs_timeout), "cur_time_us", ObTimeUtility::current_time()); + } else if (OB_FALSE_IT(THIS_WORKER.set_timeout_ts(abs_timeout))) { + } else if (OB_FAIL(timeout_ctx.set_trx_timeout_us(timeout))) { + LOG_WARN("failed to set trx timeout us", K(ret), K(timeout)); + } else if (OB_FAIL(timeout_ctx.set_abs_timeout(abs_timeout))) { + LOG_WARN("failed to set abs timeout", K(ret)); + } else if (!is_preview) { if (OB_ISNULL(task_exec_ctx = GET_TASK_EXECUTOR_CTX(ctx))) { ret = OB_NOT_INIT; LOG_WARN("get task executor context failed", K(ret)); @@ -98,21 +112,12 @@ int ObPhysicalRestoreTenantExecutor::sync_wait_tenant_created_( ObExecContext &ctx, const ObString &tenant_name, const int64_t job_id) { int ret = OB_SUCCESS; - const int64_t timeout = 10 * 60 * 1000 * 1000; // 10min - const int64_t abs_timeout = ObTimeUtility::current_time() + timeout; - const int64_t cur_time_us = ObTimeUtility::current_time(); - ObTimeoutCtx timeout_ctx; common::ObMySQLProxy *sql_proxy = nullptr; - ctx.get_physical_plan_ctx()->set_timeout_timestamp(abs_timeout); - LOG_INFO("sync wait tenant created start", K(timeout), K(abs_timeout), K(tenant_name)); + const int64_t cur_time_us = ObTimeUtility::current_time(); + LOG_INFO("sync wait tenant created start", K(tenant_name)); if (OB_ISNULL(sql_proxy = ctx.get_sql_proxy())) { ret = OB_ERR_UNEXPECTED; LOG_WARN("sql proxy must not be null", K(ret)); - } else if (OB_FALSE_IT(THIS_WORKER.set_timeout_ts(abs_timeout))) { - } else if (OB_FAIL(timeout_ctx.set_trx_timeout_us(timeout))) { - LOG_WARN("failed to set trx timeout us", K(ret), K(timeout)); - } else if (OB_FAIL(timeout_ctx.set_abs_timeout(abs_timeout))) { - LOG_WARN("failed to set abs timeout", K(ret)); } else { ObSchemaGetterGuard schema_guard; ObSchemaGetterGuard meta_tenant_scheam_guard; @@ -122,10 +127,7 @@ int ObPhysicalRestoreTenantExecutor::sync_wait_tenant_created_( schema_guard.reset(); meta_tenant_scheam_guard.reset(); const ObTenantSchema *tenant_info = nullptr; - if (ObTimeUtility::current_time() > abs_timeout) { - ret = OB_TIMEOUT; - LOG_WARN("wait restore tenant timeout", K(ret), K(tenant_name), K(abs_timeout), "cur_time_us", ObTimeUtility::current_time()); - } else if (OB_FAIL(ctx.check_status())) { + if (OB_FAIL(ctx.check_status())) { LOG_WARN("check exec ctx failed", K(ret)); } else if (OB_FAIL(GCTX.schema_service_->get_tenant_schema_guard(OB_SYS_TENANT_ID, schema_guard))) { LOG_WARN("failed to get_tenant_schema_guard", KR(ret)); @@ -186,46 +188,42 @@ int ObPhysicalRestoreTenantExecutor::physical_restore_preview( ObExecContext &ctx, ObPhysicalRestoreTenantStmt &stmt) { int ret = OB_SUCCESS; - ObSqlString set_backup_dest_sql; - ObSqlString set_scn_sql; - ObSqlString set_timestamp_sql; - sqlclient::ObISQLConnection *conn = NULL; - observer::ObInnerSQLConnectionPool *pool = NULL; - ObMySQLProxy *sql_proxy = ctx.get_sql_proxy(); ObSQLSessionInfo *session_info = ctx.get_my_session(); const obrpc::ObPhysicalRestoreTenantArg &restore_tenant_arg = stmt.get_rpc_arg(); - int64_t affected_rows = 0; if (OB_ISNULL(session_info)) { ret = OB_INVALID_ARGUMENT; LOG_WARN("invalid args", K(ret), KP(session_info)); - } else if (OB_ISNULL(sql_proxy = GCTX.sql_proxy_) || OB_ISNULL(sql_proxy->get_pool())) { - ret = OB_ERR_UNEXPECTED; - LOG_WARN("sql proxy must not null", K(ret), KP(GCTX.sql_proxy_)); - } else if (sqlclient::INNER_POOL != sql_proxy->get_pool()->get_type()) { - ret = OB_ERR_UNEXPECTED; - LOG_WARN("pool type must be inner", K(ret), "type", sql_proxy->get_pool()->get_type()); - } else if (OB_ISNULL(pool = static_cast(sql_proxy->get_pool()))) { - ret = OB_ERR_UNEXPECTED; - LOG_WARN("pool must not null", K(ret)); - } else if (OB_FAIL(set_backup_dest_sql.assign_fmt("set @%s = '%.*s'", - OB_RESTORE_PREVIEW_BACKUP_DEST_SESSION_STR, restore_tenant_arg.uri_.length(), restore_tenant_arg.uri_.ptr()))) { - LOG_WARN("failed to set backup dest", KR(ret), K(set_backup_dest_sql)); - } else if (OB_FAIL(set_scn_sql.assign_fmt("set @%s = '%ld'", - OB_RESTORE_PREVIEW_SCN_SESSION_STR, restore_tenant_arg.with_restore_scn_ ? restore_tenant_arg.restore_scn_.get_val_for_inner_table_field() : 0))) { - LOG_WARN("failed to set timestamp", KR(ret), K(set_scn_sql)); - } else if (OB_FAIL(set_scn_sql.assign_fmt("set @%s = '%.*s'", - OB_RESTORE_PREVIEW_TIMESTAMP_SESSION_STR, restore_tenant_arg.restore_timestamp_.length(), restore_tenant_arg.uri_.ptr()))) { - LOG_WARN("failed to set timestamp", KR(ret), K(set_scn_sql)); - } else if (OB_FAIL(pool->acquire(session_info, conn))) { - LOG_WARN("failed to get conn", K(ret)); - } else if (OB_FAIL(conn->execute_write(session_info->get_effective_tenant_id(), - set_backup_dest_sql.ptr(), affected_rows))) { - LOG_WARN("failed to set backup dest", K(ret), K(set_backup_dest_sql)); - } else if (OB_FAIL(conn->execute_write(session_info->get_effective_tenant_id(), - set_scn_sql.ptr(), affected_rows))) { - LOG_WARN("failed to set restore timestamp", K(ret), K(set_scn_sql)); + } else { + ObSessionVariable backup_dest; + backup_dest.value_.set_collation_type(CS_TYPE_UTF8MB4_GENERAL_CI); + backup_dest.value_.set_varchar(restore_tenant_arg.uri_.ptr(), restore_tenant_arg.uri_.length()); + backup_dest.meta_.set_meta(backup_dest.value_.meta_); + if (OB_FAIL(session_info->replace_user_variable(OB_RESTORE_PREVIEW_BACKUP_DEST_SESSION_STR, backup_dest))) { + LOG_WARN("fail to set session variable", "name", OB_RESTORE_PREVIEW_BACKUP_DEST_SESSION_STR, "value", backup_dest); + } else { + ObSessionVariable restore_scn; + restore_scn.value_.set_collation_type(CS_TYPE_UTF8MB4_GENERAL_CI); + char scn_str[OB_MAX_INTEGER_DISPLAY_WIDTH + 1] = { 0 }; + int64_t pos = 0; + if (OB_FAIL(databuff_printf(scn_str, OB_MAX_INTEGER_DISPLAY_WIDTH + 1, pos, "%lu", restore_tenant_arg.restore_scn_.get_val_for_inner_table_field()))) { + LOG_WARN("fail to databuff prinf", K(ret), K(restore_tenant_arg)); + } else { + restore_scn.value_.set_varchar(restore_tenant_arg.with_restore_scn_ ? scn_str : "0"); + restore_scn.meta_.set_meta(restore_scn.value_.meta_); + if (OB_FAIL(session_info->replace_user_variable(OB_RESTORE_PREVIEW_SCN_SESSION_STR, restore_scn))) { + LOG_WARN("fail to set session variable", "name", OB_RESTORE_PREVIEW_SCN_SESSION_STR, "value", restore_scn); + } else { + ObSessionVariable restore_timestamp; + restore_timestamp.value_.set_collation_type(CS_TYPE_UTF8MB4_GENERAL_CI); + restore_timestamp.value_.set_varchar(restore_tenant_arg.restore_timestamp_.ptr(), restore_tenant_arg.restore_timestamp_.length()); + restore_timestamp.meta_.set_meta(restore_timestamp.value_.meta_); + if (OB_FAIL(session_info->replace_user_variable(OB_RESTORE_PREVIEW_TIMESTAMP_SESSION_STR, restore_timestamp))) { + LOG_WARN("fail to set session variable", "name", OB_RESTORE_PREVIEW_TIMESTAMP_SESSION_STR, "value", restore_timestamp); + } + } + } + } } - return ret; } diff --git a/src/sql/resolver/cmd/ob_alter_system_resolver.cpp b/src/sql/resolver/cmd/ob_alter_system_resolver.cpp index a93a4ce757..2b702cb0c5 100644 --- a/src/sql/resolver/cmd/ob_alter_system_resolver.cpp +++ b/src/sql/resolver/cmd/ob_alter_system_resolver.cpp @@ -2982,6 +2982,8 @@ int ObPhysicalRestoreTenantResolver::resolve(const ParseNode &parse_tree) LOG_WARN("resolve string failed", K(ret)); } else if (OB_ISNULL(parse_tree.children_[1])) { stmt->get_rpc_arg().with_restore_scn_ = false; + ret = OB_OP_NOT_ALLOW; + LOG_USER_ERROR(OB_OP_NOT_ALLOW, "restore preview must have a scn or timestamp, otherwise"); } else if (0/*timestamp*/ == time_node->children_[0]->value_) { stmt->get_rpc_arg().restore_timestamp_.assign_ptr(time_node->children_[1]->str_value_, time_node->children_[1]->str_len_); stmt->get_rpc_arg().with_restore_scn_ = false; @@ -3009,7 +3011,7 @@ int ObPhysicalRestoreTenantResolver::resolve(const ParseNode &parse_tree) if (session_info_->user_variable_exists(OB_RESTORE_SOURCE_NAME_SESSION_STR)) { ret = OB_NOT_SUPPORTED; LOG_WARN("invalid sql syntax", KR(ret)); - LOG_USER_ERROR(OB_NOT_SUPPORTED, "should not have backup_dest and restore_source at the same time"); + LOG_USER_ERROR(OB_NOT_SUPPORTED, "have backup_dest and restore_source at the same time"); } else if (OB_FAIL(Util::resolve_string(parse_tree.children_[1], stmt->get_rpc_arg().uri_))) { LOG_WARN("resolve string failed", K(ret)); diff --git a/src/storage/backup/ob_backup_data_store.cpp b/src/storage/backup/ob_backup_data_store.cpp index 893826eb29..fec1882324 100644 --- a/src/storage/backup/ob_backup_data_store.cpp +++ b/src/storage/backup/ob_backup_data_store.cpp @@ -351,6 +351,14 @@ int ObBackupDataStore::init( return ret; } +void ObBackupDataStore::reset() +{ + ObBackupStore::reset(); + backup_desc_.reset(); + backup_set_dest_.reset(); + +} + int ObBackupDataStore::write_ls_attr(const int64_t turn_id, const ObBackupDataLSAttrDesc &ls_info) { int ret = OB_SUCCESS; @@ -756,6 +764,26 @@ int ObBackupDataStore::read_backup_set_info(ObExternBackupSetInfoDesc &backup_se return ret; } +int ObBackupDataStore::is_backup_set_info_file_exist(bool &is_exist) const +{ + int ret = OB_SUCCESS; + ObBackupIoAdapter util; + share::ObBackupPath path; + ObBackupPathString full_path; + const ObBackupStorageInfo *storage_info = get_storage_info(); + if (!is_init()) { + ret = OB_NOT_INIT; + LOG_WARN("ObBackupDataStore not init", K(ret)); + } else if (OB_FAIL(ObBackupPathUtil::get_backup_set_info_path(backup_set_dest_, path))) { + LOG_WARN("fail to get tenant ls attr info path", K(ret)); + } else if (OB_FAIL(full_path.assign(path.get_obstr()))) { + LOG_WARN("fail to assign full path", K(ret)); + } else if (OB_FAIL(util.is_exist(full_path.str(), storage_info, is_exist))) { + LOG_WARN("failed to check backup set info file exist.", K(ret), K(full_path), K(storage_info)); + } + return ret; +} + int ObBackupDataStore::write_root_key_info(const uint64_t tenant_id) { int ret = OB_SUCCESS; @@ -990,6 +1018,45 @@ int ObBackupDataStore::get_backup_sys_time_zone_wrap(common::ObTimeZoneInfoWrap return ret; } +int ObBackupDataStore::get_single_backup_set_sys_time_zone_wrap(common::ObTimeZoneInfoWrap & time_zone_wrap) +{ + int ret = OB_SUCCESS; + if (!is_init()) { + ret = OB_NOT_INIT; + LOG_WARN("ObBackupDataStore not init", K(ret)); + } else { + HEAP_VARS_2((storage::ObExternTenantLocalityInfoDesc, locality_info), + (storage::ObExternBackupSetInfoDesc, backup_set_info)) { + if (OB_FAIL(read_tenant_locality_info(locality_info))) { + LOG_WARN("fail to read backup set info", K(ret), K_(backup_set_dest)); + } else if (OB_FAIL(read_backup_set_info(backup_set_info))) { + if (OB_BACKUP_FILE_NOT_EXIST == ret) { + LOG_WARN("backup set info not exist", K(ret), K_(backup_set_dest)); + ret = OB_SUCCESS; + } else { + LOG_WARN("fail to read backup set info", K(ret), K_(backup_set_dest)); + } + } else if (backup_set_info.backup_set_file_.tenant_compatible_ < DATA_VERSION_4_2_0_0) { + const char *time_zone = "+08:00"; + int32_t offset = 0; + int ret_more = OB_SUCCESS; + bool is_oracle_mode = locality_info.compat_mode_ == lib::Worker::CompatMode::ORACLE; + if (OB_FAIL(ObTimeConverter::str_to_offset(time_zone, + offset, + ret_more, + is_oracle_mode))) { + LOG_WARN("invalid time zone offset", K(ret), K(time_zone), K(offset), K(is_oracle_mode)); + } else { + time_zone_wrap.set_tz_info_offset(offset); + } + } else if (OB_FAIL(time_zone_wrap.deep_copy(locality_info.sys_time_zone_wrap_))) { + LOG_WARN("failed to deep copy time zone wrap", K(ret), K(locality_info)); + } + } + } + return ret; +} + int ObBackupDataStore::do_get_backup_set_array_(const common::ObString &passwd_array, const SCN &restore_scn, const ObBackupSetFilter &op, common::ObIArray &tmp_backup_set_list, diff --git a/src/storage/backup/ob_backup_data_store.h b/src/storage/backup/ob_backup_data_store.h index c7ea6c13e3..7d4bc983c7 100644 --- a/src/storage/backup/ob_backup_data_store.h +++ b/src/storage/backup/ob_backup_data_store.h @@ -325,6 +325,7 @@ public: int init(const share::ObBackupDest &backup_dest, const share::ObBackupSetDesc &backup_desc); const share::ObBackupSetDesc &get_backup_set_desc() const { return backup_desc_; } const share::ObBackupDest &get_backup_set_dest() const { return backup_set_dest_; } + void reset(); int write_ls_attr(const int64_t turn_id, const ObBackupDataLSAttrDesc &ls_info); int read_ls_attr_info(ObBackupDataLSAttrDesc &ls_info); @@ -362,11 +363,12 @@ public: // write and read backup set info int write_backup_set_info(const ObExternBackupSetInfoDesc &backup_set_info); int read_backup_set_info(ObExternBackupSetInfoDesc &backup_set_info); - + int is_backup_set_info_file_exist(bool &is_exist) const; int get_backup_set_array(const common::ObString &passwd_array, const share::SCN &restore_scn, share::SCN &restore_start_scn, common::ObIArray &backup_set_list); int get_max_backup_set_file_info(const common::ObString &passwd_array, ObBackupSetFileDesc &output_desc); int get_backup_sys_time_zone_wrap(common::ObTimeZoneInfoWrap & time_zone_wrap); + int get_single_backup_set_sys_time_zone_wrap(common::ObTimeZoneInfoWrap & time_zone_wrap); int get_max_sys_ls_retry_id( const share::ObBackupPath &backup_path, const share::ObLSID &ls_id, const int64_t turn_id, int64_t &retry_id); int write_root_key_info(const uint64_t tenant_id); diff --git a/src/storage/high_availability/ob_ls_member_list_service.cpp b/src/storage/high_availability/ob_ls_member_list_service.cpp index 08711feec9..e39b5f99fd 100644 --- a/src/storage/high_availability/ob_ls_member_list_service.cpp +++ b/src/storage/high_availability/ob_ls_member_list_service.cpp @@ -328,8 +328,7 @@ int ObLSMemberListService::check_ls_transfer_scn_(const share::SCN &transfer_scn int ObLSMemberListService::get_ls_member_list_(common::ObIArray &addr_list) { int ret = OB_SUCCESS; - ObStorageHASrcProvider provider; - ObMigrationOpType::TYPE type = ObMigrationOpType::MIGRATE_LS_OP; + ObStorageHAGetMemberHelper get_member_helper; ObLSService *ls_svr = NULL; ObStorageRpc *storage_rpc = NULL; if (OB_ISNULL(ls_)) { @@ -341,9 +340,9 @@ int ObLSMemberListService::get_ls_member_list_(common::ObIArray } else if (OB_ISNULL(storage_rpc = ls_svr->get_storage_rpc())) { ret = OB_ERR_UNEXPECTED; STORAGE_LOG(WARN, "storage rpc should not be NULL", K(ret), KP(storage_rpc)); - } else if (OB_FAIL(provider.init(ls_->get_tenant_id(), type, storage_rpc))) { - STORAGE_LOG(WARN, "failed to init src provider", K(ret), KP_(ls)); - } else if (OB_FAIL(provider.get_ls_member_list(ls_->get_tenant_id(), ls_->get_ls_id(), addr_list))) { + } else if (OB_FAIL(get_member_helper.init(storage_rpc))) { + STORAGE_LOG(WARN, "failed to init palf helper", K(ret), KP_(ls)); + } else if (OB_FAIL(get_member_helper.get_ls_member_list(ls_->get_tenant_id(), ls_->get_ls_id(), addr_list))) { STORAGE_LOG(WARN, "failed to get ls member list", K(ret), KP_(ls)); } return ret; diff --git a/src/storage/high_availability/ob_ls_migration.cpp b/src/storage/high_availability/ob_ls_migration.cpp index b11552915f..d12772eab8 100644 --- a/src/storage/high_availability/ob_ls_migration.cpp +++ b/src/storage/high_availability/ob_ls_migration.cpp @@ -22,7 +22,6 @@ #include "lib/hash/ob_hashset.h" #include "lib/time/ob_time_utility.h" #include "observer/ob_server_event_history_table_operator.h" -#include "ob_storage_ha_src_provider.h" #include "storage/tablet/ob_tablet_iterator.h" #include "ob_storage_ha_utils.h" #include "storage/tablet/ob_tablet.h" @@ -30,6 +29,7 @@ #include "ob_rebuild_service.h" #include "share/ob_cluster_version.h" #include "ob_storage_ha_utils.h" +#include "ob_storage_ha_src_provider.h" namespace oceanbase { @@ -1113,6 +1113,14 @@ int ObStartMigrationTask::deal_with_local_ls_() ctx_->local_rebuild_seq_ = local_ls_meta.get_rebuild_seq(); } } +#ifdef ERRSIM + if (OB_SUCC(ret) && !ctx_->local_clog_checkpoint_scn_.is_min()) { + SERVER_EVENT_ADD("storage_ha", "before_choose_source", + "tenant_id", ctx_->tenant_id_, + "ls_id", ctx_->arg_.ls_id_.id()); + DEBUG_SYNC(BEFORE_CHOOSE_SOURCE); + } +#endif return ret; } @@ -1143,7 +1151,11 @@ int ObStartMigrationTask::report_ls_meta_table_() int ObStartMigrationTask::choose_src_() { int ret = OB_SUCCESS; - storage::ObStorageHASrcProvider src_provider; + ObStorageHAChooseSrcHelper choose_src_helper; + ObStorageHASrcProvider::ChooseSourcePolicy policy = ObStorageHASrcProvider::ChooseSourcePolicy::IDC; + const char *str = "idc"; + ObStorageHAGetMemberHelper member_helper; + bool enable_choose_source_policy = true; if (!is_inited_) { ret = OB_NOT_INIT; LOG_WARN("start migration task do not init", K(ret)); @@ -1156,12 +1168,26 @@ int ObStartMigrationTask::choose_src_() ObStorageHASrcInfo src_info; obrpc::ObCopyLSInfo ls_info; SCN local_clog_checkpoint_scn = SCN::min_scn(); + omt::ObTenantConfigGuard tenant_config(TENANT_CONF(tenant_id)); if (OB_FAIL(get_local_ls_checkpoint_scn_(local_clog_checkpoint_scn))) { LOG_WARN("failed to get local ls checkpoint ts", K(ret)); - } else if (OB_FAIL(src_provider.init(tenant_id, ctx_->arg_.type_, storage_rpc_))) { - LOG_WARN("failed to init src provider", K(ret), K(tenant_id), "type", ctx_->arg_.type_); - } else if (OB_FAIL(src_provider.choose_ob_src(ls_id, local_clog_checkpoint_scn, src_info))) { - LOG_WARN("failed to choose ob src", K(ret), K(tenant_id), K(ls_id), K(local_clog_checkpoint_scn)); + } else if (!tenant_config.is_valid()) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("tenant config is invalid", K(ret)); + } else if (FALSE_IT(str = tenant_config->choose_migration_source_policy.str())) { + } else if (FALSE_IT(enable_choose_source_policy = tenant_config->_enable_choose_migration_source_policy)) { + } else if (OB_FAIL(ObStorageHAChooseSrcHelper::get_policy_type(ctx_->arg_, tenant_id, + enable_choose_source_policy, str, policy))) { + LOG_WARN("failed to get policy type", K(ret), K(ctx_->arg_), K(tenant_id), + K(enable_choose_source_policy), K(str)); + } else if (OB_FAIL(member_helper.init(storage_rpc_))) { + LOG_WARN("failed to init member helper", K(ret), KP(storage_rpc_)); + } else if (OB_FAIL(choose_src_helper.init(tenant_id, ls_id, local_clog_checkpoint_scn, ctx_->arg_, policy, + storage_rpc_, &member_helper))) { + LOG_WARN("failed to init src provider.", K(ret), K(tenant_id), K(ls_id), K(local_clog_checkpoint_scn), + K(ctx_->arg_), K(policy), KP(storage_rpc_)); + } else if (OB_FAIL(choose_src_helper.get_available_src(ctx_->arg_, src_info))) { + LOG_WARN("failed to choose ob src", K(ret), K(tenant_id), K(ls_id), K(local_clog_checkpoint_scn), K(ctx_->arg_)); } else if (OB_FAIL(fetch_ls_info_(tenant_id, ls_id, src_info.src_addr_, ls_info))) { LOG_WARN("failed to fetch ls info", K(ret), K(tenant_id), K(ls_id), K(src_info)); } else if (OB_FAIL(ObStorageHAUtils::check_server_version(ls_info.version_))) { @@ -2393,10 +2419,18 @@ int ObTabletMigrationTask::process() LOG_INFO("start do tablet migration task", KPC(copy_tablet_ctx_)); const int64_t start_ts = ObTimeUtility::current_time(); ObCopyTabletStatus::STATUS status = ObCopyTabletStatus::MAX_STATUS; + bool need_rebuild = false; if (OB_NOT_NULL(copy_tablet_ctx_)) { if (copy_tablet_ctx_->tablet_id_.is_inner_tablet() || copy_tablet_ctx_->tablet_id_.is_ls_inner_tablet()) { } else { +#ifdef ERRSIM + if (OB_SUCC(ret) && is_inited_) { + SERVER_EVENT_ADD("storage_ha", "check_log_need_rebuild_before_migration_sstable", + "tenant_id", ctx_->tenant_id_, + "ls_id", ctx_->arg_.ls_id_.id()); + } +#endif DEBUG_SYNC(BEFORE_MIGRATION_TABLET_COPY_SSTABLE); } } @@ -2406,6 +2440,14 @@ int ObTabletMigrationTask::process() LOG_WARN("tablet migration task do not init", K(ret), KPC(copy_tablet_ctx_)); } else if (ctx_->is_failed()) { //do nothing + } else if (!copy_tablet_ctx_->tablet_id_.is_ls_inner_tablet() + && OB_FAIL(ObStorageHAUtils::check_log_need_rebuild(ctx_->tenant_id_, ctx_->arg_.ls_id_, need_rebuild))) { + LOG_WARN("failed to check if can replay log", K(ret), KPC(ctx_)); + } else if (need_rebuild) { + LOG_INFO("can not replay log, it will retry", K(need_rebuild), KPC(ctx_)); + if (OB_FAIL(ctx_->set_result(OB_LS_NEED_REBUILD/*result*/, true/*need_retry*/, this->get_dag()->get_type()))) { + LOG_WARN("failed to set result", K(ret), K(tmp_ret), KPC(ctx_)); + } } else if (OB_FAIL(check_tablet_replica_validity_(copy_tablet_ctx_->tablet_id_))) { LOG_WARN("failed to check tablet replica validity", K(ret), KPC(copy_tablet_ctx_)); } else if (OB_FAIL(try_update_tablet_())) { @@ -3285,6 +3327,7 @@ int ObDataTabletsMigrationTask::process() { int ret = OB_SUCCESS; int tmp_ret = OB_SUCCESS; + bool need_rebuild = false; LOG_INFO("start do data tablets migration task", K(ret), KPC(ctx_)); #ifdef ERRSIM SERVER_EVENT_SYNC_ADD("storage_ha", "before_data_tablets_migration_task", @@ -3304,6 +3347,13 @@ int ObDataTabletsMigrationTask::process() LOG_WARN("failed to add to learner list", K(ret)); } else if (OB_FAIL(ls_online_())) { LOG_WARN("failed to start replay log", K(ret), K(*ctx_)); + } else if (OB_FAIL(ObStorageHAUtils::check_log_need_rebuild(ctx_->tenant_id_, ctx_->arg_.ls_id_, need_rebuild))) { + LOG_WARN("failed to check log need rebuild", K(ret), KPC(ctx_)); + } else if (need_rebuild) { + LOG_INFO("can not replay log, it will retry", K(need_rebuild), KPC(ctx_)); + if (OB_FAIL(ctx_->set_result(OB_LS_NEED_REBUILD/*result*/, true/*need_retry*/, this->get_dag()->get_type()))) { + LOG_WARN("failed to set result", K(ret), KPC(ctx_)); + } } else if (OB_FAIL(build_tablet_group_info_())) { LOG_WARN("failed to build tablet group info", K(ret), KPC(ctx_)); } else { diff --git a/src/storage/high_availability/ob_storage_ha_src_provider.cpp b/src/storage/high_availability/ob_storage_ha_src_provider.cpp index 45e9cde775..a602ca2346 100644 --- a/src/storage/high_availability/ob_storage_ha_src_provider.cpp +++ b/src/storage/high_availability/ob_storage_ha_src_provider.cpp @@ -18,111 +18,165 @@ #include "storage/tx_storage/ob_ls_handle.h" #include "storage/tx_storage/ob_ls_service.h" #include "storage/high_availability/ob_storage_ha_utils.h" +#include "storage/ob_locality_manager.h" namespace oceanbase { using namespace share; namespace storage { +/** + * ------------------------------ObStorageHAGetMemberHelper--------------------- + */ +ObStorageHAGetMemberHelper::ObStorageHAGetMemberHelper() + : storage_rpc_(nullptr), + is_inited_(false) +{ +} -ObStorageHASrcProvider::ObStorageHASrcProvider() - : is_inited_(false), - tenant_id_(OB_INVALID_ID), - type_(ObMigrationOpType::MAX_LS_OP), - storage_rpc_(nullptr) -{} +ObStorageHAGetMemberHelper::~ObStorageHAGetMemberHelper() +{ +} -ObStorageHASrcProvider::~ObStorageHASrcProvider() -{} - -int ObStorageHASrcProvider::init(const uint64_t tenant_id, const ObMigrationOpType::TYPE &type, - storage::ObStorageRpc *storage_rpc) +int ObStorageHAGetMemberHelper::init(storage::ObStorageRpc *storage_rpc) { int ret = OB_SUCCESS; if (is_inited_) { ret = OB_INIT_TWICE; - LOG_WARN("storage ha src provider init twice", K(ret)); - } else if (OB_INVALID_ID == tenant_id || OB_ISNULL(storage_rpc) - || type < ObMigrationOpType::ADD_LS_OP || type >= ObMigrationOpType::MAX_LS_OP) { + LOG_WARN("ObStorageHAGetMemberHelper init twice", K(ret)); + } else if (OB_ISNULL(storage_rpc)) { ret = OB_INVALID_ARGUMENT; - LOG_WARN("get invalid argument", K(ret), K(tenant_id), K(type), KP(storage_rpc)); + LOG_WARN("invalid argument", K(ret), KP(storage_rpc)); } else { - tenant_id_ = tenant_id; - type_ = type; storage_rpc_ = storage_rpc; is_inited_ = true; } return ret; } -int ObStorageHASrcProvider::choose_ob_src(const share::ObLSID &ls_id, const SCN &local_clog_checkpoint_scn, - ObStorageHASrcInfo &src_info) -{ - int ret = OB_SUCCESS; - src_info.reset(); - common::ObAddr leader_addr; - common::ObArray addr_list; - int64_t choose_member_idx = -1; - ObAddr chosen_src_addr; - if (IS_NOT_INIT) { - ret = OB_NOT_INIT; - LOG_WARN("start migration task do not init", K(ret)); - } else if (OB_FAIL(get_ls_leader_(tenant_id_, ls_id, leader_addr))) { - LOG_WARN("failed to get ls leader", K(ret), K_(tenant_id), K(ls_id)); - } else if (OB_FAIL(fetch_ls_member_list_(tenant_id_, ls_id, leader_addr, addr_list))) { - LOG_WARN("failed to fetch ls leader member list", K(ret), K_(tenant_id), K(ls_id), K(leader_addr)); - } else if (OB_FAIL(inner_choose_ob_src_(tenant_id_, ls_id, local_clog_checkpoint_scn, addr_list, chosen_src_addr))) { - LOG_WARN("failed to inner choose ob src", K(ret), K_(tenant_id), K(ls_id), K(local_clog_checkpoint_scn), K(addr_list)); - } else { - src_info.src_addr_ = chosen_src_addr; - src_info.cluster_id_ = GCONF.cluster_id; -#ifdef ERRSIM - if (ObMigrationOpType::ADD_LS_OP == type_ || ObMigrationOpType::MIGRATE_LS_OP == type_) { - const ObString &errsim_server = GCONF.errsim_migration_src_server_addr.str(); - if (!errsim_server.empty()) { - common::ObAddr tmp_errsim_addr; - if (OB_FAIL(tmp_errsim_addr.parse_from_string(errsim_server))) { - LOG_WARN("failed to parse from string", K(ret), K(errsim_server)); - } else { - src_info.src_addr_ = tmp_errsim_addr; - src_info.cluster_id_ = GCONF.cluster_id; - LOG_INFO("storage ha choose errsim src", K(tmp_errsim_addr)); - } - } - } -#endif - SERVER_EVENT_ADD("storage_ha", "choose_src", - "tenant_id", tenant_id_, - "ls_id", ls_id.id(), - "src_addr", src_info.src_addr_, - "op_type", ObMigrationOpType::get_str(type_)); - } - return ret; -} - -int ObStorageHASrcProvider::get_ls_member_list(const uint64_t tenant_id, +int ObStorageHAGetMemberHelper::get_ls_member_list(const uint64_t tenant_id, const share::ObLSID &ls_id, common::ObIArray &addr_list) { int ret = OB_SUCCESS; addr_list.reset(); + common::GlobalLearnerList learner_list; common::ObAddr leader_addr; if (IS_NOT_INIT) { ret = OB_NOT_INIT; - LOG_WARN("start migration task do not init", K(ret)); + LOG_WARN("ObStorageHAGetMemberHelper do not init", K(ret)); } else if (OB_INVALID_ID == tenant_id || !ls_id.is_valid()) { ret = OB_INVALID_ARGUMENT; LOG_WARN("get invalid args", K(ret), K(tenant_id), K(ls_id)); - } else if (OB_FAIL(get_ls_leader_(tenant_id_, ls_id, leader_addr))) { - LOG_WARN("failed to get ls leader", K(ret), K_(tenant_id), K(ls_id)); - } else if (OB_FAIL(fetch_ls_member_list_(tenant_id_, ls_id, leader_addr, addr_list))) { - LOG_WARN("failed to fetch ls leader member list", K(ret), K_(tenant_id), K(ls_id), K(leader_addr)); + } else if (OB_FAIL(get_ls_leader(tenant_id, ls_id, leader_addr))) { + LOG_WARN("failed to get ls leader", K(ret), K(tenant_id), K(ls_id)); + } else if (OB_FAIL(fetch_ls_member_list_and_learner_list_(tenant_id, ls_id, false/*need_learner_list*/, leader_addr, + learner_list, addr_list))) { + LOG_WARN("failed to fetch ls member list", K(ret), K(tenant_id), K(ls_id), K(leader_addr)); } return ret; } -int ObStorageHASrcProvider::get_ls_leader_(const uint64_t tenant_id, const share::ObLSID &ls_id, common::ObAddr &leader) +int ObStorageHAGetMemberHelper::get_ls_member_list_and_learner_list( + const uint64_t tenant_id, const share::ObLSID &ls_id, const bool need_learner_list, + common::ObAddr &leader_addr, common::GlobalLearnerList &learner_list, + common::ObIArray &member_list) +{ + int ret = OB_SUCCESS; + leader_addr.reset(); + member_list.reset(); + learner_list.reset(); + common::ObArray learner_addr_array; + if (IS_NOT_INIT) { + ret = OB_NOT_INIT; + LOG_WARN("ObStorageHAGetMemberHelper do not init", K(ret)); + } else if (OB_INVALID_ID == tenant_id || !ls_id.is_valid()) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("get invalid args", K(ret), K(tenant_id), K(ls_id)); + } else if (OB_FAIL(get_ls_leader(tenant_id, ls_id, leader_addr))) { + LOG_WARN("failed to get ls leader", K(ret), K(tenant_id), K(ls_id)); + } else if (OB_FAIL(fetch_ls_member_list_and_learner_list_(tenant_id, ls_id, need_learner_list, + leader_addr, learner_list, member_list))) { + LOG_WARN("failed to fetch ls member list and learner list", K(ret), K(tenant_id), K(ls_id), + K(leader_addr), K(need_learner_list)); + } + return ret; +} + +int ObStorageHAGetMemberHelper::get_ls_member_list_and_learner_list_( + const uint64_t tenant_id, + const share::ObLSID &ls_id, + const bool need_learner_list, + common::ObAddr &leader_addr, + common::GlobalLearnerList &learner_list, + common::ObIArray &member_list) +{ + int ret = OB_SUCCESS; + member_list.reset(); + learner_list.reset(); + ObLSHandle ls_handle; + ObLS *ls = nullptr; + ObStorageHASrcInfo src_info; + src_info.src_addr_ = leader_addr; + src_info.cluster_id_ = GCONF.cluster_id; + obrpc::ObFetchLSMemberListInfo member_info; + obrpc::ObFetchLSMemberAndLearnerListInfo member_and_learner_info; + ObLSService *ls_service = nullptr; + if (OB_FAIL(get_ls(ls_id, ls_handle))) { + LOG_WARN("failed to get ls handle", K(ret), K(ls_id)); + } else if (OB_ISNULL(ls = ls_handle.get_ls())) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("ls should not be NULL", K(ret), KP(ls), K(tenant_id), K(ls_id)); + } else if (need_learner_list) { + if (OB_FAIL(storage_rpc_->fetch_ls_member_and_learner_list(tenant_id, ls_id, src_info, member_and_learner_info))) { + LOG_WARN("failed to fetch ls member list and learner list request", K(ret), K(tenant_id), K(src_info), K(ls_id)); + //overwrite ret + member_and_learner_info.reset(); + if (OB_FAIL(ls->get_log_handler()->get_election_leader(src_info.src_addr_))) { + LOG_WARN("failed to get election leader", K(ret), K(tenant_id), K(ls_id)); + } else { + leader_addr = src_info.src_addr_; + if (OB_FAIL(storage_rpc_->fetch_ls_member_and_learner_list(tenant_id, ls_id, src_info, member_and_learner_info))) { + LOG_WARN("failed to post ls member list and learner list request", K(ret), K(tenant_id), K(src_info), K(ls_id)); + } + } + } + if (OB_FAIL(ret)) { + } else if (OB_FAIL(member_and_learner_info.member_list_.get_addr_array(member_list))) { + LOG_WARN("failed to get member addr array", K(ret), K(member_and_learner_info)); + } else if (OB_FAIL(member_and_learner_info.learner_list_.deep_copy_to(learner_list))) { + LOG_WARN("failed to get learner addr array", K(ret), K(member_and_learner_info)); + } + } else { + if (OB_FAIL(storage_rpc_->post_ls_member_list_request(tenant_id, src_info, ls_id, member_info))) { + LOG_WARN("failed to post ls member list request", K(ret), K(tenant_id), K(src_info), K(ls_id)); + //overwrite ret + member_info.reset(); + if (OB_FAIL(ls->get_log_handler()->get_election_leader(src_info.src_addr_))) { + LOG_WARN("failed to get election leader", K(ret), K(tenant_id), K(ls_id)); + } else { + leader_addr = src_info.src_addr_; + if (OB_FAIL(storage_rpc_->post_ls_member_list_request(tenant_id, src_info, ls_id, member_info))) { + LOG_WARN("failed to post ls member list request", K(ret), K(tenant_id), K(src_info), K(ls_id)); + } + } + } + if (OB_FAIL(ret)) { + } else if (OB_FAIL(member_info.member_list_.get_addr_array(member_list))) { + LOG_WARN("failed to get member addr array", K(ret), K(member_info)); + } else { + FLOG_INFO("fetch ls member list", K(tenant_id), K(ls_id), K(src_info), K(member_and_learner_info), + K(member_info), K(member_list), K(learner_list)); + } + } + return ret; +} + +int ObStorageHAGetMemberHelper::get_ls_leader(const uint64_t tenant_id, const share::ObLSID &ls_id, common::ObAddr &leader) { int ret = OB_SUCCESS; leader.reset(); - if (OB_INVALID_ID == tenant_id || !ls_id.is_valid()) { + if (IS_NOT_INIT) { + ret = OB_NOT_INIT; + LOG_WARN("ObStorageHAGetMemberHelper do not init", K(ret)); + } else if (OB_INVALID_ID == tenant_id || !ls_id.is_valid()) { ret = OB_INVALID_ARGUMENT; LOG_WARN("get invalid args", K(ret), K(tenant_id), K(ls_id)); } else if (OB_FAIL(ObStorageHAUtils::get_ls_leader(tenant_id, ls_id, leader))) { @@ -131,19 +185,146 @@ int ObStorageHASrcProvider::get_ls_leader_(const uint64_t tenant_id, const share return ret; } -int ObStorageHASrcProvider::fetch_ls_member_list_(const uint64_t tenant_id, const share::ObLSID &ls_id, - const common::ObAddr &leader_addr, common::ObIArray &addr_list) +int ObStorageHAGetMemberHelper::fetch_ls_member_list_and_learner_list_(const uint64_t tenant_id, const share::ObLSID &ls_id, + const bool need_learner_list, common::ObAddr &leader_addr, + common::GlobalLearnerList &learner_list, + common::ObIArray &member_list) { int ret = OB_SUCCESS; - addr_list.reset(); + member_list.reset(); + learner_list.reset(); if (OB_ISNULL(storage_rpc_)) { ret = OB_ERR_UNEXPECTED; LOG_WARN("storage rpc should not be null", K(ret)); } else if (OB_INVALID_ID == tenant_id || !ls_id.is_valid() || !leader_addr.is_valid()) { ret = OB_INVALID_ARGUMENT; LOG_WARN("get invalid args", K(ret), K(tenant_id), K(ls_id), K(leader_addr)); - } else if (OB_FAIL(get_ls_member_list_(tenant_id, ls_id, leader_addr, addr_list))) { - LOG_WARN("failed to get ls member list", K(ret), K(tenant_id), K(ls_id), K(leader_addr)); + } else if (OB_FAIL(get_ls_member_list_and_learner_list_(tenant_id, ls_id, + need_learner_list, leader_addr, learner_list, member_list))) { + LOG_WARN("failed to get ls member list and learner list", K(ret), K(tenant_id), K(ls_id), + K(need_learner_list), K(leader_addr)); + } + return ret; +} + +int ObStorageHAGetMemberHelper::get_ls(const share::ObLSID &ls_id, ObLSHandle &ls_handle) +{ + int ret = OB_SUCCESS; + ls_handle.reset(); + if (IS_NOT_INIT) { + ret = OB_NOT_INIT; + LOG_WARN("ObStorageHAGetMemberHelper do not init", K(ret)); + } else if (!ls_id.is_valid()) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("get invalid args", K(ret), K(ls_id)); + } else if (OB_FAIL(ObStorageHADagUtils::get_ls(ls_id, ls_handle))) { + LOG_WARN("failed to get ls", K(ret), K(ls_id)); + } + return ret; +} + +bool ObStorageHAGetMemberHelper::check_tenant_primary() +{ + return MTL_TENANT_ROLE_CACHE_IS_PRIMARY(); +} +/** + * ------------------------------ObStorageHASrcProvider--------------------- + */ +ObStorageHASrcProvider::ObStorageHASrcProvider() + : is_inited_(false), + tenant_id_(OB_INVALID_ID), + ls_id_(), + type_(ObMigrationOpType::MAX_LS_OP), + local_clog_checkpoint_scn_(), + palf_parent_checkpoint_scn_(), + member_helper_(nullptr), + storage_rpc_(nullptr), + policy_type_(ChooseSourcePolicy::IDC) +{} + +ObStorageHASrcProvider::~ObStorageHASrcProvider() +{ + member_helper_ = nullptr; + storage_rpc_ = nullptr; +} + +int ObStorageHASrcProvider::init(const uint64_t tenant_id, const share::ObLSID &ls_id, + const ObMigrationOpType::TYPE &type, const share::SCN &local_clog_checkpoint_scn, + const ChooseSourcePolicy policy_type, + const common::ObReplicaType replica_type, storage::ObStorageRpc *storage_rpc, + ObStorageHAGetMemberHelper *member_helper) +{ + int ret = OB_SUCCESS; + if (is_inited_) { + ret = OB_INIT_TWICE; + LOG_WARN("storage ha src provider init twice", K(ret)); + } else if (OB_INVALID_ID == tenant_id || !ls_id.is_valid() || OB_ISNULL(storage_rpc) + || !local_clog_checkpoint_scn.is_valid() + || policy_type < ChooseSourcePolicy::IDC + || policy_type >= ChooseSourcePolicy::MAX_POLICY + || replica_type < common::ObReplicaType::REPLICA_TYPE_FULL || replica_type >= common::ObReplicaType::REPLICA_TYPE_MAX + || type < ObMigrationOpType::ADD_LS_OP || type >= ObMigrationOpType::MAX_LS_OP + || OB_ISNULL(member_helper)) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("get invalid argument", K(ret), K(tenant_id), K(ls_id), K(type), + KP(storage_rpc), K(replica_type), K(local_clog_checkpoint_scn), KP(member_helper)); + } else { + storage_rpc_ = storage_rpc; + member_helper_ = member_helper; + if (OB_FAIL(init_palf_parent_checkpoint_scn_(tenant_id, ls_id, local_clog_checkpoint_scn, replica_type))) { + LOG_WARN("failed to init palf parent checkpoint scn", K(ret), K(tenant_id), K(ls_id), + K(local_clog_checkpoint_scn), K(replica_type), KP(storage_rpc_)); + } else { + tenant_id_ = tenant_id; + ls_id_ = ls_id; + type_ = type; + local_clog_checkpoint_scn_ = local_clog_checkpoint_scn; + policy_type_ = policy_type; + } + } + return ret; +} + +// TODO(zhixing.yh) : 1.learner_list and addr_list have data, it is unreasonable +// 2. For F replica, learner_list is empty, it is unreasonable +int ObStorageHASrcProvider::get_replica_addr_list( + const common::ObReplicaMember &dst, + common::ObAddr &leader_addr, common::GlobalLearnerList &learner_list, + common::ObIArray &addr_list) +{ + int ret = OB_SUCCESS; + leader_addr.reset(); + addr_list.reset(); + learner_list.reset(); + common::ObArray learner_addr_array; + bool need_learner_list = false; + if (IS_NOT_INIT) { + ret = OB_NOT_INIT; + LOG_WARN("ObStorageHASrcProvider do not init", K(ret)); + } else if (!dst.is_valid()) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument!", K(ret), K(dst)); + } else { + if (common::ObReplicaType::REPLICA_TYPE_FULL == dst.get_replica_type()) { + need_learner_list = false; + } else if (common::ObReplicaType::REPLICA_TYPE_READONLY == dst.get_replica_type()) { + need_learner_list = true; + } else { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("unexpected dst replica type", K(ret), K(dst)); + } + + if (FAILEDx(member_helper_->get_ls_member_list_and_learner_list(tenant_id_, ls_id_, need_learner_list, + leader_addr, learner_list, addr_list))) { + LOG_WARN("failed to fetch ls leader member list and learner list", K(ret), K_(tenant_id), K_(ls_id), + K(leader_addr), K(need_learner_list)); + } else if (learner_list.is_valid()) { + if (OB_FAIL(learner_list.get_addr_array(learner_addr_array))) { + LOG_WARN("failed to get addr array from learner list", K(ret), K(learner_list)); + } else if (OB_FAIL(common::append(addr_list, learner_addr_array))) { + LOG_WARN("failed to append addr list", K(ret), K(addr_list), K(learner_addr_array)); + } + } } return ret; } @@ -162,116 +343,961 @@ int ObStorageHASrcProvider::fetch_ls_meta_info_(const uint64_t tenant_id, const } else if (OB_INVALID_ID == tenant_id || !ls_id.is_valid() || !member_addr.is_valid()) { ret = OB_INVALID_ARGUMENT; LOG_WARN("get invalid args", K(ret), K(tenant_id), K(ls_id), K(member_addr)); - } else if (OB_FAIL(storage_rpc_->post_ls_meta_info_request(tenant_id, src_info, ls_id, ls_meta_info))) { - LOG_WARN("failed to post ls info request", K(ret), K(tenant_id), K(src_info), K(ls_id)); + } else { + uint32_t renew_count = 0; + const uint32_t max_renew_count = 3; + const int64_t retry_us = 200 * 1000; + const int64_t start_ts = ObTimeUtility::current_time(); + const int64_t DEFAULT_GET_LS_META_RPC_TIMEOUT = 1 * 60 * 1000 * 1000L; + do { + if (OB_FAIL(storage_rpc_->post_ls_meta_info_request(tenant_id, src_info, ls_id, ls_meta_info))) { + if (renew_count++ < max_renew_count) { // retry three times + LOG_WARN("failed to post ls info request", K(ret), K(tenant_id), K(ls_id), K(src_info), KP(storage_rpc_)); + if (ObTimeUtility::current_time() - start_ts > DEFAULT_GET_LS_META_RPC_TIMEOUT) { + renew_count = max_renew_count; + } else { + ob_usleep(retry_us); + } + } + } else { + LOG_INFO("succeed to get ls meta", K(tenant_id), K(ls_id), K(ls_meta_info)); + break; + } + } while (renew_count < max_renew_count); } return ret; } -int ObStorageHASrcProvider::inner_choose_ob_src_(const uint64_t tenant_id, const share::ObLSID &ls_id, - const SCN &local_clog_checkpoint_scn, const common::ObIArray &addr_list, +int ObStorageHASrcProvider::check_replica_type_( + const common::ObAddr &addr, + const common::ObReplicaMember &dst, + const common::GlobalLearnerList &learner_list, + bool &is_replica_type_valid) +{ + int ret = OB_SUCCESS; + is_replica_type_valid = false; + if (!addr.is_valid() || !dst.is_valid()) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument!", K(ret), K(addr), K(dst)); + } else if (learner_list.is_valid() && learner_list.contains(addr)) { // src is R + if (common::ObReplicaType::REPLICA_TYPE_FULL == dst.get_replica_type()) { // dst is F + is_replica_type_valid = false; + } else if (common::ObReplicaType::REPLICA_TYPE_READONLY == dst.get_replica_type()) { + is_replica_type_valid = true; + } else { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("unexpected dst replica type", K(ret), K(dst), K(learner_list)); + } + } else { // src is F + if (common::ObReplicaType::REPLICA_TYPE_FULL == dst.get_replica_type() + || common::ObReplicaType::REPLICA_TYPE_READONLY == dst.get_replica_type()) { + is_replica_type_valid = true; + } else { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("unexpected dst replica type", K(ret), K(dst), K(learner_list)); + } + } + return ret; +} + +int ObStorageHASrcProvider::init_palf_parent_checkpoint_scn_(const uint64_t tenant_id, const share::ObLSID &ls_id, + const share::SCN &local_clog_checkpoint_scn, const common::ObReplicaType replica_type) +{ + int ret = OB_SUCCESS; + // local_clog_checkpoint_scn is min means firstly run migration. + // local_clog_checkpoint_scn is not min scn when Rebuild and migretion retry. + // The first migration run does not determine the parent checkpoint scn, + // Rebuild and migretion retry will compare parent checkpoint scn and replica checkpoint scn + if (local_clog_checkpoint_scn.is_min()) { + palf_parent_checkpoint_scn_.set_min(); + LOG_INFO("palf_parent_checkpoint_scn_ set min", K(local_clog_checkpoint_scn)); + } else { + uint32_t renew_count = 0; + const uint32_t max_renew_count = 3; + const int64_t retry_us = 200 * 1000; + const int64_t start_ts = ObTimeUtility::current_time(); + const int64_t DEFAULT_GET_PARENT_CHECKPOINT_TIMEOUT = 1 * 60 * 1000 * 1000L; + do { + if (OB_FAIL(get_palf_parent_checkpoint_scn_from_rpc_(tenant_id, ls_id, replica_type, palf_parent_checkpoint_scn_))) { + if (renew_count++ < max_renew_count) { // retry three times + LOG_WARN("failed to get parent checkpoint scn", K(ret), K(tenant_id), K(ls_id), K(replica_type), KP(storage_rpc_)); + if (ObTimeUtility::current_time() - start_ts > DEFAULT_GET_PARENT_CHECKPOINT_TIMEOUT) { + renew_count = max_renew_count; + } else { + ob_usleep(retry_us); + } + } + } else { + LOG_INFO("get parent checkpoint scn", K(tenant_id), K(ls_id)); + break; + } + } while (renew_count < max_renew_count); + // if get parent fail or rpc fail, it will overwrite ret and set scn is min. + // For ensuring migration can run and ensuring upgrade compatibility + if (OB_FAIL(ret)) { + palf_parent_checkpoint_scn_.set_min(); + ret = OB_SUCCESS; + LOG_INFO("after retry palf_parent_checkpoint_scn_ set min", K(local_clog_checkpoint_scn)); + } + } + return ret; +} + +int ObStorageHASrcProvider::get_palf_parent_checkpoint_scn_from_rpc_(const uint64_t tenant_id, const share::ObLSID &ls_id, + const common::ObReplicaType replica_type, share::SCN &parent_checkpoint_scn) +{ + int ret = OB_SUCCESS; + common::ObAddr parent_addr; + obrpc::ObFetchLSMetaInfoResp ls_info; + parent_checkpoint_scn.reset(); + if (OB_INVALID_ID == tenant_id || !ls_id.is_valid()) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("get invalid args", K(ret), K(tenant_id), K(ls_id)); + } else if (OB_FAIL(get_palf_parent_addr_(tenant_id, ls_id, replica_type, parent_addr))) { + LOG_WARN("failed to get palf parent addr", K(ret), K(tenant_id), K(ls_id), K(replica_type)); + } else if (OB_FAIL(fetch_ls_meta_info_(tenant_id, ls_id, parent_addr, ls_info))) { + LOG_WARN("failed to fetch palf parent ls meta", K(ret), K(tenant_id), K(ls_id), K(parent_addr), KP(storage_rpc_)); + } else { + parent_checkpoint_scn = ls_info.ls_meta_package_.ls_meta_.get_clog_checkpoint_scn(); + LOG_INFO("succeed to get palf parent checkpoint scn", K(tenant_id), K(ls_id), K(parent_addr), K(parent_checkpoint_scn)); + } + return ret; +} + +int ObStorageHASrcProvider::get_palf_parent_addr_(const uint64_t tenant_id, const share::ObLSID &ls_id, + const common::ObReplicaType replica_type, common::ObAddr &parent_addr) +{ + int ret = OB_SUCCESS; + ObLSHandle ls_handle; + ObLS *ls = nullptr; + parent_addr.reset(); + if (OB_FAIL(member_helper_->get_ls(ls_id, ls_handle))) { + LOG_WARN("failed to get ls", K(ret), K(ls_id)); + } else if (OB_ISNULL(ls = ls_handle.get_ls())) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("ls should not be NULL", K(ret), KP(ls), K(ls_id)); + } else if (common::ObReplicaType::REPLICA_TYPE_FULL == replica_type) { + if (OB_FAIL(member_helper_->get_ls_leader(tenant_id, ls_id, parent_addr))) { + LOG_WARN("failed to get leader addr", K(ret), K(tenant_id), K(ls_id)); + } + } else if (common::ObReplicaType::REPLICA_TYPE_READONLY == replica_type) { + if (OB_FAIL(ls->get_log_handler()->get_parent(parent_addr))) { + LOG_WARN("failed to get parent addr", K(ret), K(tenant_id), K(ls_id)); + } + } else { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("unexpected dst replica type", K(ret), K(replica_type)); + } + return ret; +} + +int ObStorageHASrcProvider::check_replica_validity( + const common::ObAddr &addr, const common::ObReplicaMember &dst, + const common::GlobalLearnerList &learner_list, obrpc::ObFetchLSMetaInfoResp &ls_info) +{ + int ret = OB_SUCCESS; + ls_info.reset(); + bool is_replica_type_valid; + if (!is_inited_) { + ret = OB_NOT_INIT; + LOG_WARN("ObStorageHASrcProvider is not init.", K(ret)); + } else if (!addr.is_valid() || !dst.is_valid()) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument!", K(ret), K(addr), K(dst)); + } else if (OB_FAIL(fetch_ls_meta_info_(tenant_id_, ls_id_, addr, ls_info))) { + LOG_WARN("failed to fetch ls meta info", K(ret), K(tenant_id_), K(ls_id_), K(addr)); + } else if (!ls_info.is_valid()) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("ls_info is invalid!", K(ret), K(tenant_id_), K(ls_id_), K(addr), K(ls_info)); + } else if (OB_FAIL(ObStorageHAUtils::check_replica_validity(ls_info))) { + LOG_WARN("failed to check replica validity", K(ret), K(ls_info)); + } else if (local_clog_checkpoint_scn_ > ls_info.ls_meta_package_.ls_meta_.get_clog_checkpoint_scn()) { + ret = OB_DATA_SOURCE_NOT_VALID; + LOG_WARN("do not choose this src, local checkpoint scn check failed", K(ret), K(tenant_id_), K(ls_id_), K(addr), K(dst), K(learner_list), + K(local_clog_checkpoint_scn_), K(ls_info)); + } else if (palf_parent_checkpoint_scn_ > ls_info.ls_meta_package_.ls_meta_.get_clog_checkpoint_scn()) { + ret = OB_DATA_SOURCE_NOT_VALID; + LOG_WARN("do not choose this src, parent checkpoint scn check failed", K(ret), K(tenant_id_), K(ls_id_), K(addr), K(dst), K(learner_list), + K(palf_parent_checkpoint_scn_), K(ls_info)); + } else if (OB_FAIL(check_replica_type_(addr, dst, learner_list, is_replica_type_valid))) { + LOG_WARN("failed to check replica type", K(ret), K(tenant_id_), K(ls_id_), K(addr), + K(dst), K(learner_list), K(ls_info)); + } else if (!is_replica_type_valid) { + ret = OB_DATA_SOURCE_NOT_VALID; + LOG_WARN("do not choose this src, replica type check failed", K(ret), K(tenant_id_), K(ls_id_), K(addr), K(dst), K(learner_list), K(ls_info)); + } + return ret; +} + +const char *ObStorageHASrcProvider::ObChooseSourcePolicyStr[static_cast(ChooseSourcePolicy::MAX_POLICY)] = { + "idc", + "region", + "checkpoint", + "recommend" +}; + +const char *ObStorageHASrcProvider::get_policy_str(const ChooseSourcePolicy policy_type) +{ + const char *str = ""; + if (policy_type >= ChooseSourcePolicy::MAX_POLICY || policy_type < ChooseSourcePolicy::IDC) { + str = "invalid_type"; + } else { + str = ObChooseSourcePolicyStr[static_cast(policy_type)]; + } + return str; +} + +int ObStorageHASrcProvider::check_tenant_primary(bool &is_primary) +{ + int ret = OB_SUCCESS; + is_primary = false; + if (!is_inited_) { + ret = OB_NOT_INIT; + LOG_WARN("ObStorageHASrcProvider is not init.", K(ret)); + } else { + is_primary = member_helper_->check_tenant_primary(); + } + return ret; +} +/** + * ------------------------------ObMigrationSrcByLocationProvider--------------------- + */ +ObMigrationSrcByLocationProvider::ObMigrationSrcByLocationProvider() + : ObStorageHASrcProvider(), + locality_manager_(nullptr) +{ +} + +ObMigrationSrcByLocationProvider::~ObMigrationSrcByLocationProvider() +{ +} + +int ObMigrationSrcByLocationProvider::init(const uint64_t tenant_id, const share::ObLSID &ls_id, + const ObMigrationOpType::TYPE &type, const share::SCN &local_clog_checkpoint_scn, + const ChooseSourcePolicy policy_type, + const common::ObReplicaType replica_type, + storage::ObStorageRpc *storage_rpc, + ObStorageHAGetMemberHelper *member_helper) +{ + int ret = OB_SUCCESS; + if (is_inited_) { + ret = OB_INIT_TWICE; + LOG_WARN("ObMigrationSrcByLocationProvider init twice", K(ret)); + } else if (OB_INVALID_ID == tenant_id || !ls_id.is_valid() || OB_ISNULL(storage_rpc) + || !local_clog_checkpoint_scn.is_valid() + || policy_type < ChooseSourcePolicy::IDC + || policy_type >= ChooseSourcePolicy::MAX_POLICY + || replica_type < common::ObReplicaType::REPLICA_TYPE_FULL || replica_type >= common::ObReplicaType::REPLICA_TYPE_MAX + || type < ObMigrationOpType::ADD_LS_OP || type >= ObMigrationOpType::MAX_LS_OP + || OB_ISNULL(member_helper)) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("get invalid argument", K(ret), K(tenant_id), K(ls_id), K(type), + K(local_clog_checkpoint_scn), K(policy_type), K(replica_type), KP(storage_rpc), KP(member_helper)); + } else if (OB_FAIL(ObStorageHASrcProvider::init(tenant_id, ls_id, type, local_clog_checkpoint_scn, policy_type, + replica_type, storage_rpc, member_helper))) { + LOG_WARN("failed to init src provider", K(ret), K(tenant_id), K(ls_id), K(type), K(local_clog_checkpoint_scn), + K(replica_type), KP(storage_rpc), K(policy_type), KP(member_helper)); + } else { + locality_manager_ = GCTX.locality_manager_; + is_inited_ = true; + } + return ret; +} + +int ObMigrationSrcByLocationProvider::choose_ob_src( + const ObMigrationOpArg &arg, common::ObAddr &chosen_src_addr) +{ + int ret = OB_SUCCESS; + common::ObAddr leader_addr; + common::ObArray addr_list; + common::GlobalLearnerList learner_list; + if (!is_inited_) { + ret = OB_NOT_INIT; + LOG_WARN("ObMigrationSrcByLocationProvider is not init.", K(ret)); + } else if (!arg.is_valid()) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument!", K(ret), K(arg)); + } else if (OB_FAIL(get_replica_addr_list(arg.dst_, leader_addr, learner_list, addr_list))) { + LOG_WARN("failed to get leader_addr, learner_list and addr_list", K(ret), "tenant_id", get_tenant_id(), "ls_id", get_ls_id()); + } else if (OB_FAIL(inner_choose_ob_src( + leader_addr, learner_list, addr_list, arg, chosen_src_addr))) { + LOG_WARN("failed to inner choose ob src", K(ret), "tenant_id", get_tenant_id(), "ls_id", get_ls_id(), + K(leader_addr), K(learner_list), K(addr_list), K(arg)); + } + return ret; +} + +int ObMigrationSrcByLocationProvider::inner_choose_ob_src( + const common::ObAddr &leader_addr, const common::GlobalLearnerList &learner_list, + const common::ObIArray &addr_list, const ObMigrationOpArg &arg, + common::ObAddr &choosen_src_addr) +{ + int ret = OB_SUCCESS; + common::ObArray sorted_addr_list; + int64_t idc_end_index = 0; + int64_t region_end_index = 0; + choosen_src_addr.reset(); + if (!is_inited_) { + ret = OB_NOT_INIT; + LOG_WARN("ObMigrationSrcByLocationProvider is not init.", K(ret)); + } else if (!leader_addr.is_valid() || addr_list.empty() || !arg.is_valid()) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument!", K(ret), K(addr_list), K(arg)); + } else if (OB_FAIL(divide_addr_list(addr_list, arg.dst_, sorted_addr_list, idc_end_index, region_end_index))) { + LOG_WARN("failed to divide addr", K(ret), K(addr_list), K(arg.dst_)); + } else { + bool is_idc_policy = ChooseSourcePolicy::IDC == get_policy_type() ? true : false; + int64_t region_start_index = is_idc_policy ? idc_end_index + 1 : 0; + if (is_idc_policy && OB_FAIL(find_src(sorted_addr_list, 0/*idc_start_index*/, idc_end_index, + learner_list, leader_addr, arg.dst_, choosen_src_addr))) { // find in same idc + LOG_WARN("failed to find source in same idc scope", K(ret), K(sorted_addr_list), + K(idc_end_index), K(learner_list), K(leader_addr), K(arg.dst_)); + } else if (!choosen_src_addr.is_valid() && OB_FAIL(find_src(sorted_addr_list, region_start_index, region_end_index, + learner_list, leader_addr, arg.dst_, choosen_src_addr))) { // find in same region + LOG_WARN("failed to find source in same region scope", K(ret), K(sorted_addr_list), + K(region_start_index), K(region_end_index), K(learner_list), K(leader_addr), K(arg.dst_)); + } else if (!choosen_src_addr.is_valid() && OB_FAIL(find_src(sorted_addr_list, region_end_index + 1, addr_list.count() - 1, + learner_list, leader_addr, arg.dst_, choosen_src_addr))) { // find in different region + LOG_WARN("failed to find source in different region scope", K(ret), K(sorted_addr_list), + K(region_end_index), K(addr_list.count()), K(learner_list), K(leader_addr), K(arg.dst_)); + } else if (!choosen_src_addr.is_valid()) { + ret = OB_DATA_SOURCE_NOT_EXIST; + LOG_WARN("all region no available data source exist", K(ret), "tenant_id", get_tenant_id(), + "ls_id", get_ls_id(), K(leader_addr), K(learner_list), K(addr_list), K(arg)); + } + } + return ret; +} + +int ObMigrationSrcByLocationProvider::divide_addr_list( + const common::ObIArray &addr_list, + const common::ObReplicaMember &dst, + common::ObIArray &sorted_addr_list, + int64_t &idc_end_index, + int64_t ®ion_end_index) +{ + int ret = OB_SUCCESS; + sorted_addr_list.reset(); + common::ObRegion dst_region; + common::ObIDC dst_idc; + common::ObRegion src_region; + common::ObIDC src_idc; + int64_t same_idc_count = 0; + int64_t same_region_count = 0; + common::ObArray same_idc_addr; + common::ObArray same_region_addr; + common::ObArray different_region_addr; + idc_end_index = 0; + region_end_index = 0; + if (!is_inited_) { + ret = OB_NOT_INIT; + LOG_WARN("ObMigrationSrcByLocationProvider is not init.", K(ret)); + } else if (addr_list.empty() || !dst.is_valid()) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument!", K(ret), K(addr_list), K(dst)); + } else if (OB_ISNULL(locality_manager_)) { + ret = OB_ERR_UNEXPECTED; + LOG_ERROR("locality manager is null", K(ret), KP(locality_manager_)); + } else if (OB_FAIL(get_server_region_and_idc_(dst.get_server(), dst_region, dst_idc))) { + LOG_WARN("failed to get dst idc", K(ret), "addr", dst.get_server()); + } else { + LOG_INFO("succeed to get dst region and idc", K(dst_idc), K(dst_region)); + for (int64_t i = 0; OB_SUCC(ret) && i < addr_list.count(); ++i) { + if (OB_FAIL(get_server_region_and_idc_(addr_list.at(i), src_region, src_idc))) { + LOG_WARN("failed to get src region and idc", K(ret), "addr", addr_list.at(i), K(dst)); + } else if (src_region == dst_region && src_idc == dst_idc) { // get distinct region server + same_idc_count++; + if (OB_FAIL(same_idc_addr.push_back(addr_list.at(i)))) { + LOG_WARN("failed to add distinct region addr", K(ret), "addr", addr_list.at(i), K(dst)); + } + } else if (src_region == dst_region) { + same_region_count++; + if (OB_FAIL(same_region_addr.push_back(addr_list.at(i)))) { + LOG_WARN("failed to add distinct region addr", K(ret), "addr", addr_list.at(i), K(dst)); + } + } else if (src_region != dst_region) { + if (OB_FAIL(different_region_addr.push_back(addr_list.at(i)))) { + LOG_WARN("failed to add distinct region addr", K(ret), "addr", addr_list.at(i), K(dst)); + } + } else { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("unexpected error", K(ret), K(src_idc), K(dst_idc), K(src_region), K(dst)); + } + } + if (OB_FAIL(ret)) { + // do nothing + } else if (OB_FAIL(common::append(sorted_addr_list, same_idc_addr))) { + LOG_WARN("failed to append same_idc_addr to sorted_addr_list", K(ret), K(sorted_addr_list), K(same_idc_addr)); + } else if (OB_FAIL(common::append(sorted_addr_list, same_region_addr))) { + LOG_WARN("failed to append same_region_addr to sorted_addr_list", K(ret), K(sorted_addr_list), K(same_region_addr)); + } else if (OB_FAIL(common::append(sorted_addr_list, different_region_addr))) { + LOG_WARN("failed to append different_region_addr to sorted_addr_list", K(ret), K(sorted_addr_list), K(different_region_addr)); + } else { + idc_end_index = same_idc_count - 1; + region_end_index = same_idc_count + same_region_count - 1; + LOG_INFO("succeed to divide addr list", K(ret), K(sorted_addr_list), K(same_idc_addr), + K(same_region_addr), K(different_region_addr), K(idc_end_index), K(region_end_index)); + } + } + return ret; +} + +int ObMigrationSrcByLocationProvider::find_src( + const common::ObIArray &addr_list, + const int64_t start_index, + const int64_t end_index, + const common::GlobalLearnerList &learner_list, + const common::ObAddr &leader_addr, + const common::ObReplicaMember &dst, common::ObAddr &choosen_src_addr) { int ret = OB_SUCCESS; int tmp_ret = OB_SUCCESS; int64_t choose_member_idx = -1; - SCN max_clog_checkpoint_scn = SCN::min_scn(); - for (int64_t i = 0; OB_SUCC(ret) && i < addr_list.count(); ++i) { - const common::ObAddr &addr = addr_list.at(i); - obrpc::ObFetchLSMetaInfoResp ls_info; - ObMigrationStatus migration_status; - share::ObLSRestoreStatus restore_status; - if (OB_TMP_FAIL(fetch_ls_meta_info_(tenant_id, ls_id, addr, ls_info))) { - LOG_WARN("failed to fetch ls meta info", K(tmp_ret), K(tenant_id), K(ls_id), K(addr)); - } else if (OB_FAIL(ObStorageHAUtils::check_server_version(ls_info.version_))) { - if (OB_MIGRATE_NOT_COMPATIBLE == ret) { - LOG_INFO("do not choose this src", K(ret), K(tenant_id), K(ls_id), K(ls_info)); - ret = OB_SUCCESS; + bool is_leader = false; + obrpc::ObFetchLSMetaInfoResp ls_info; + common::ObArray candidate_addr_list; + int64_t leader_index = -1; + choosen_src_addr.reset(); + bool is_primary = false; + LOG_INFO("start find source", K(start_index), K(end_index)); + if (!is_inited_) { + ret = OB_NOT_INIT; + LOG_WARN("ObMigrationSrcByLocationProvider is not init.", K(ret)); + } else if (addr_list.empty() || start_index < 0 || end_index < -1 + || !leader_addr.is_valid() || !dst.is_valid()) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument!", K(ret), K(addr_list), K(start_index), K(end_index), K(leader_addr), K(dst)); + } else if (OB_FAIL(check_tenant_primary(is_primary))) { + LOG_WARN("failed to check tenant primary", K(ret), "tenant_id", get_tenant_id()); + } else { + for (int64_t i = start_index; OB_SUCC(ret) && i <= end_index && i < addr_list.count(); ++i) { + if (OB_TMP_FAIL(check_replica_validity(addr_list.at(i), dst, learner_list, ls_info))) { + if (OB_DATA_SOURCE_NOT_EXIST == tmp_ret) { + // overwrite ret + ret = tmp_ret; + LOG_WARN("failed to check replica validity", K(ret), K(tmp_ret), "tenant_id", get_tenant_id(), + "ls_id", get_ls_id(), "addr", addr_list.at(i), K(dst), K(learner_list)); + break; + } else { + LOG_WARN("this address is not suitable.", K(ret), K(tmp_ret), "tenant_id", get_tenant_id(), + "ls_id", get_ls_id(), "addr", addr_list.at(i), K(dst), K(learner_list)); + } } else { - LOG_WARN("failed to check version", K(ret), K(tenant_id), K(ls_id), K(ls_info)); - } - } else if (!ObReplicaTypeCheck::is_full_replica(REPLICA_TYPE_FULL)) { - LOG_INFO("do not choose this src", K(tenant_id), K(ls_id), K(addr), K(ls_info)); - } else if (local_clog_checkpoint_scn > ls_info.ls_meta_package_.ls_meta_.get_clog_checkpoint_scn()) { - LOG_INFO("do not choose this src", K(tenant_id), K(ls_id), K(addr), K(local_clog_checkpoint_scn), K(ls_info)); - } else if (OB_FAIL(ls_info.ls_meta_package_.ls_meta_.get_migration_status(migration_status))) { - LOG_WARN("failed to get migration status", K(ret), K(ls_info)); - } else if (!ObMigrationStatusHelper::check_can_migrate_out(migration_status)) { - LOG_INFO("do not choose this src", K(tenant_id), K(ls_id), K(addr), K(ls_info)); - } else if (OB_FAIL(ls_info.ls_meta_package_.ls_meta_.get_restore_status(restore_status))) { - LOG_WARN("failed to get restore status", K(ret), K(ls_info)); - } else if (restore_status.is_failed()) { - choose_member_idx = -1; - LOG_INFO("some ls replica restore failed, can not migrate", K(ls_info)); - break; - } else { - if (ls_info.ls_meta_package_.ls_meta_.get_clog_checkpoint_scn() > max_clog_checkpoint_scn) { - max_clog_checkpoint_scn = ls_info.ls_meta_package_.ls_meta_.get_clog_checkpoint_scn(); - choose_member_idx = i; - } else if (ls_info.ls_meta_package_.ls_meta_.get_clog_checkpoint_scn() == max_clog_checkpoint_scn - && !ls_info.has_transfer_table_) { - max_clog_checkpoint_scn = ls_info.ls_meta_package_.ls_meta_.get_clog_checkpoint_scn(); - choose_member_idx = i; + if (addr_list.at(i) != leader_addr || !is_primary) { + if (OB_FAIL(candidate_addr_list.push_back(i))) { + LOG_WARN("failed to push back to candidate_addr_list", K(ret), "index", i, K(addr_list)); + } + } else { + leader_index = i; + } } } - } - if (OB_SUCC(ret)) { - if (-1 == choose_member_idx) { - ret = OB_DATA_SOURCE_NOT_EXIST; - LOG_WARN("no available data source exist", K(ret), K(tenant_id), K(ls_id), K(addr_list)); - } else { - choosen_src_addr = addr_list.at(choose_member_idx); + if (OB_SUCC(ret)) { + if (candidate_addr_list.empty() && -1 == leader_index) { + LOG_INFO("no available data source exist in this area", K(ret), "tenant_id", get_tenant_id(), + "ls_id", get_ls_id(), K(addr_list), K(dst), K(learner_list)); + } else if (!candidate_addr_list.empty()) { + int64_t num = candidate_addr_list.count(); + choosen_src_addr = addr_list.at(candidate_addr_list.at(rand() % num)); + LOG_INFO("found available data follower source in this area", "tenant_id", get_tenant_id(), + "ls_id", get_ls_id(), K(addr_list), K(dst), K(learner_list), K(choosen_src_addr)); + } else { + choosen_src_addr = addr_list.at(leader_index); + LOG_INFO("found available data leader source in this area", "tenant_id", get_tenant_id(), + "ls_id", get_ls_id(), K(addr_list), K(dst), K(learner_list), K(choosen_src_addr), K(leader_index)); + } } } return ret; } -int ObStorageHASrcProvider::get_ls_member_list_( - const uint64_t tenant_id, - const share::ObLSID &ls_id, - const common::ObAddr &leader_addr, - common::ObIArray &addr_list) +int ObMigrationSrcByLocationProvider::get_server_region_and_idc_( + const common::ObAddr &addr, common::ObRegion ®ion, common::ObIDC &idc) { int ret = OB_SUCCESS; - addr_list.reset(); - ObLSHandle ls_handle; - ObLS *ls = nullptr; - ObStorageHASrcInfo src_info; - src_info.src_addr_ = leader_addr; - src_info.cluster_id_ = GCONF.cluster_id; - obrpc::ObFetchLSMemberListInfo member_info; - ObLSService *ls_service = nullptr; + region.reset(); + idc.reset(); + if (OB_ISNULL(locality_manager_)) { + ret = OB_ERR_UNEXPECTED; + LOG_ERROR("locality manager is null", K(ret), KP(locality_manager_)); + } else if (OB_FAIL(locality_manager_->get_server_region(addr, region))) { + LOG_WARN("failed to get src region", K(ret), K(addr)); + } else if (OB_FAIL(locality_manager_->get_server_idc(addr, idc))) { + LOG_WARN("failed to get src idc", K(ret), K(addr)); + } else { + LOG_INFO("succeed to get region and idc", K(addr), K(region), K(idc)); + } + return ret; +} - if (OB_INVALID_ID == tenant_id || !ls_id.is_valid() || !leader_addr.is_valid()) { +void ObMigrationSrcByLocationProvider::set_locality_manager_(ObLocalityManager *locality_manager) +{ + int ret = OB_SUCCESS; + if (OB_ISNULL(locality_manager)) { ret = OB_INVALID_ARGUMENT; - LOG_WARN("get ls member list get invalid argument", K(ret), K(tenant_id), K(ls_id), K(leader_addr)); - } else if (OB_ISNULL(ls_service = MTL(ObLSService*))) { + LOG_WARN("argument invalid, locality_manager is null", K(ret)); + } else { + locality_manager_ = locality_manager; + } +} +/** + * ------------------------------ObMigrationSrcByCheckpointProvider--------------------- + */ +ObMigrationSrcByCheckpointProvider::ObMigrationSrcByCheckpointProvider() + : ObStorageHASrcProvider() +{ +} + +ObMigrationSrcByCheckpointProvider::~ObMigrationSrcByCheckpointProvider() +{ +} + +int ObMigrationSrcByCheckpointProvider::init(const uint64_t tenant_id, const share::ObLSID &ls_id, + const ObMigrationOpType::TYPE &type, const share::SCN &local_clog_checkpoint_scn, + const ChooseSourcePolicy policy_type, + const common::ObReplicaType replica_type, storage::ObStorageRpc *storage_rpc, + ObStorageHAGetMemberHelper *member_helper) +{ + int ret = OB_SUCCESS; + if (is_inited_) { + ret = OB_INIT_TWICE; + LOG_WARN("ObMigrationSrcByCheckpointProvider init twice", K(ret)); + } else if (OB_INVALID_ID == tenant_id || !ls_id.is_valid() || OB_ISNULL(storage_rpc) + || !local_clog_checkpoint_scn.is_valid() + || policy_type < ChooseSourcePolicy::IDC + || policy_type >= ChooseSourcePolicy::MAX_POLICY + || replica_type < common::ObReplicaType::REPLICA_TYPE_FULL || replica_type >= common::ObReplicaType::REPLICA_TYPE_MAX + || type < ObMigrationOpType::ADD_LS_OP || type >= ObMigrationOpType::MAX_LS_OP + || OB_ISNULL(member_helper)) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("get invalid argument", K(ret), K(tenant_id), K(ls_id), K(type), K(local_clog_checkpoint_scn), + K(replica_type), KP(storage_rpc), KP(member_helper)); + } else if (OB_FAIL(ObStorageHASrcProvider::init(tenant_id, ls_id, type, local_clog_checkpoint_scn, policy_type, + replica_type, storage_rpc, member_helper))) { + LOG_WARN("failed to init src provider", K(ret), K(tenant_id), K(ls_id), K(type), K(local_clog_checkpoint_scn), + K(replica_type), KP(storage_rpc), K(policy_type), KP(member_helper)); + } else { + is_inited_ = true; + } + return ret; +} + +int ObMigrationSrcByCheckpointProvider::choose_ob_src( + const ObMigrationOpArg &arg, common::ObAddr &chosen_src_addr) +{ + int ret = OB_SUCCESS; + common::ObAddr leader_addr; + common::ObArray addr_list; + common::GlobalLearnerList learner_list; + if (!is_inited_) { + ret = OB_NOT_INIT; + LOG_WARN("ObMigrationSrcByCheckpointProvider is not init.", K(ret)); + } else if (!arg.is_valid()) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument!", K(ret), K(arg)); + } else if (OB_FAIL(get_replica_addr_list(arg.dst_, leader_addr, learner_list, addr_list))) { + LOG_WARN("failed to get leader_addr, learner_list and addr_list", K(ret), "tenant_id", get_tenant_id(), "ls_id", get_ls_id()); + } else if (OB_FAIL(inner_choose_ob_src_( + leader_addr, learner_list, addr_list, arg, chosen_src_addr))) { + LOG_WARN("failed to inner choose ob src", K(ret), "tenant_id", get_tenant_id(), "ls_id", get_ls_id(), + K(leader_addr), K(learner_list), K(addr_list), K(arg)); + } + return ret; +} + +int ObMigrationSrcByCheckpointProvider::inner_choose_ob_src_( + const common::ObAddr &leader_addr, const common::GlobalLearnerList &learner_list, + const common::ObIArray &addr_list, const ObMigrationOpArg &arg, + common::ObAddr &choosen_src_addr) +{ + int ret = OB_SUCCESS; + int tmp_ret = OB_SUCCESS; + choosen_src_addr.reset(); + share::SCN max_clog_checkpoint_scn = share::SCN::min_scn(); + int64_t choose_member_idx = -1; + obrpc::ObFetchLSMetaInfoResp ls_info; + if (!is_inited_) { + ret = OB_NOT_INIT; + LOG_WARN("ObMigrationSrcByCheckpointProvider is not init.", K(ret)); + } else if (!leader_addr.is_valid() || addr_list.empty() || !arg.is_valid()) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument!", K(ret), K(leader_addr), K(addr_list), K(arg)); + } else { + for (int64_t i = 0; OB_SUCC(ret) && i < addr_list.count(); ++i) { + if (OB_TMP_FAIL(check_replica_validity(addr_list.at(i), arg.dst_, learner_list, ls_info))) { + // OB_DATA_SOURCE_NOT_EXIST make migration exit. It is used to return when check restore fail. + // Only check restore fail use OB_DATA_SOURCE_NOT_EXIST currently + if (OB_DATA_SOURCE_NOT_EXIST == tmp_ret) { + // overwrite ret + ret = tmp_ret; + LOG_WARN("failed to check replica validity", K(ret), K(tmp_ret), "tenant_id", get_tenant_id(), + "ls_id", get_ls_id(), "addr", addr_list.at(i), K(arg.dst_), K(learner_list)); + break; + } else { + LOG_WARN("this address is not suitable.", K(ret), K(tmp_ret), "tenant_id", get_tenant_id(), + "ls_id", get_ls_id(), "addr", addr_list.at(i), K(arg.dst_), K(learner_list)); + } + } else { + if (ls_info.ls_meta_package_.ls_meta_.get_clog_checkpoint_scn() >= max_clog_checkpoint_scn) { + max_clog_checkpoint_scn = ls_info.ls_meta_package_.ls_meta_.get_clog_checkpoint_scn(); + choose_member_idx = i; + } + } + } + if (OB_SUCC(ret)) { + if (-1 == choose_member_idx) { + ret = OB_DATA_SOURCE_NOT_EXIST; + LOG_WARN("no available data source exist", K(ret), "tenant_id", get_tenant_id(), + "ls_id", get_ls_id(), K(learner_list), K(addr_list)); + } else { + choosen_src_addr = addr_list.at(choose_member_idx); + } + } + } + return ret; +} +/** + * ------------------------------ObRSRecommendSrcProvider--------------------- + */ +ObRSRecommendSrcProvider::ObRSRecommendSrcProvider() + : ObStorageHASrcProvider() +{ +} + +ObRSRecommendSrcProvider::~ObRSRecommendSrcProvider() +{ +} + +int ObRSRecommendSrcProvider::init(const uint64_t tenant_id, const share::ObLSID &ls_id, + const ObMigrationOpType::TYPE &type, const share::SCN &local_clog_checkpoint_scn, + const ChooseSourcePolicy policy_type, + const common::ObReplicaType replica_type, storage::ObStorageRpc *storage_rpc, + ObStorageHAGetMemberHelper *member_helper) +{ + int ret = OB_SUCCESS; + if (is_inited_) { + ret = OB_INIT_TWICE; + LOG_WARN("ObRSRecommendSrcProvider init twice", K(ret)); + } else if (OB_INVALID_ID == tenant_id || !ls_id.is_valid() || OB_ISNULL(storage_rpc) + || !local_clog_checkpoint_scn.is_valid() + || policy_type < ChooseSourcePolicy::IDC + || policy_type >= ChooseSourcePolicy::MAX_POLICY + || replica_type < common::ObReplicaType::REPLICA_TYPE_FULL || replica_type >= common::ObReplicaType::REPLICA_TYPE_MAX + || type < ObMigrationOpType::ADD_LS_OP || type >= ObMigrationOpType::MAX_LS_OP + || OB_ISNULL(member_helper)) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("get invalid argument", K(ret), K(tenant_id), K(ls_id), K(type), K(local_clog_checkpoint_scn), K(replica_type), + KP(storage_rpc), KP(member_helper)); + } else if (OB_FAIL(ObStorageHASrcProvider::init(tenant_id, ls_id, type, local_clog_checkpoint_scn, policy_type, replica_type, + storage_rpc, member_helper))) { + LOG_WARN("failed to init src provider", K(ret), K(tenant_id), K(ls_id), K(type), K(local_clog_checkpoint_scn), + K(replica_type), KP(storage_rpc), K(policy_type), KP(member_helper)); + } else { + is_inited_ = true; + } + return ret; +} + +int ObRSRecommendSrcProvider::check_replica_validity_( + const int64_t cluster_id, + const common::ObIArray &addr_list, + const common::ObAddr &addr, const common::ObReplicaMember &dst, + const common::GlobalLearnerList &learner_list) +{ + int ret = OB_SUCCESS; + obrpc::ObFetchLSMetaInfoResp ls_info; + int64_t gconf_cluster_id = GCONF.cluster_id; + if (cluster_id != gconf_cluster_id) { ret = OB_ERR_UNEXPECTED; - LOG_WARN("failed to get ObLSService from MTL", K(ret), KP(ls_service)); - } else if (OB_FAIL(ls_service->get_ls(ls_id, ls_handle, ObLSGetMod::HA_MOD))) { - LOG_WARN("fail to get log stream", KR(ret), K(ls_id)); - } else if (OB_ISNULL(ls = ls_handle.get_ls())) { - ret = OB_ERR_UNEXPECTED; - LOG_WARN("ls should not be NULL", K(ret), KP(ls), K(tenant_id), K(ls_id)); - } else if (OB_FAIL(storage_rpc_->post_ls_member_list_request(tenant_id, src_info, ls_id, member_info))) { - LOG_WARN("failed to post ls member list request", K(ret), K(tenant_id), K(src_info), K(ls_id)); - //overwrite ret - member_info.reset(); - if (OB_FAIL(ls->get_log_handler()->get_election_leader(src_info.src_addr_))) { - LOG_WARN("failed to get election leader", K(ret), K(tenant_id), K(ls_id)); - } else if (OB_FAIL(storage_rpc_->post_ls_member_list_request(tenant_id, src_info, ls_id, member_info))) { - LOG_WARN("failed to post ls member list request", K(ret), K(tenant_id), K(src_info), K(ls_id)); + LOG_WARN("rs recommend source cluster id is invalid", K(ret), K(cluster_id), K(gconf_cluster_id)); + } else if (OB_FAIL(check_replica_validity(addr, dst, learner_list, ls_info))) { + LOG_WARN("failed to check replica validity", K(ret), K(addr), K(dst), K(learner_list)); + } else { + bool is_exist = false; + for (int64_t i = 0; OB_SUCC(ret) && i < addr_list.count(); i++) { + if (addr_list.at(i) == addr) { + is_exist = true; + break; + } + } + if (!is_exist) { + ret = OB_DATA_SOURCE_NOT_EXIST; + LOG_WARN("addr not in addr_list", K(ret), K(addr), K(addr_list)); + } + } + return ret; +} + +int ObRSRecommendSrcProvider::choose_ob_src( + const ObMigrationOpArg &arg, common::ObAddr &chosen_src_addr) +{ + int ret = OB_SUCCESS; + common::ObAddr leader_addr; + common::ObArray addr_list; + common::GlobalLearnerList learner_list; + if (!is_inited_) { + ret = OB_NOT_INIT; + LOG_WARN("ObRSRecommendSrcProvider is not init.", K(ret)); + } else if (!arg.is_valid()) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument!", K(ret), K(arg)); + } else if (OB_FAIL(get_replica_addr_list(arg.dst_, leader_addr, learner_list, addr_list))) { + LOG_WARN("failed to get leader_addr, learner_list and addr_list", K(ret), "tenant_id", get_tenant_id(), "ls_id", get_ls_id()); + } else if (OB_FAIL(check_replica_validity_(arg.cluster_id_, addr_list, arg.data_src_.get_server(), arg.dst_, learner_list))) { + LOG_WARN("failed to check replica validity", K(ret), K(addr_list), K(arg), K(learner_list)); + } else { + chosen_src_addr = arg.data_src_.get_server(); + } + return ret; +} +/** + * ------------------------------ObStorageHAChooseSrcHelper--------------------- + */ +ObStorageHAChooseSrcHelper::ObStorageHAChooseSrcHelper() + : provider_(nullptr), + storage_rpc_(nullptr), + allocator_(), + is_inited_(false) +{ +} + +ObStorageHAChooseSrcHelper::~ObStorageHAChooseSrcHelper() +{ + if (OB_NOT_NULL(provider_)) { + provider_->~ObStorageHASrcProvider(); + provider_ = nullptr; + } + storage_rpc_ = nullptr; +} + +int ObStorageHAChooseSrcHelper::init(const uint64_t tenant_id, const share::ObLSID &ls_id, + const share::SCN &local_clog_checkpoint_scn, const ObMigrationOpArg &arg, + const ObStorageHASrcProvider::ChooseSourcePolicy policy, storage::ObStorageRpc *storage_rpc, + ObStorageHAGetMemberHelper *member_helper) +{ + int ret = OB_SUCCESS; + if (is_inited_) { + ret = OB_INIT_TWICE; + LOG_WARN("storage ha src helper init twice", K(ret)); + } else if (OB_INVALID_ID == tenant_id || !ls_id.is_valid() + || !local_clog_checkpoint_scn.is_valid() || !arg.is_valid() + || policy >= ObStorageHASrcProvider::ChooseSourcePolicy::MAX_POLICY + || policy < ObStorageHASrcProvider::ChooseSourcePolicy::IDC + || OB_ISNULL(storage_rpc) + || OB_ISNULL(member_helper)) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument!", K(ret), K(tenant_id), K(ls_id), K(local_clog_checkpoint_scn), + K(policy), K(arg), KP(storage_rpc), KP(member_helper)); + } else { + switch (policy) { + case ObStorageHASrcProvider::ChooseSourcePolicy::IDC: + case ObStorageHASrcProvider::ChooseSourcePolicy::REGION: { + if (OB_FAIL(init_choose_source_by_location_provider_(tenant_id, ls_id, local_clog_checkpoint_scn, arg, policy, + storage_rpc, member_helper))) { + LOG_WARN("failed to init choose source by location provider", K(ret), K(tenant_id), + K(ls_id), K(local_clog_checkpoint_scn), K(arg), K(policy)); + } + break; + } + case ObStorageHASrcProvider::ChooseSourcePolicy::CHECKPOINT: { + if (OB_FAIL(init_choose_source_by_checkpoint_provider_(tenant_id, ls_id, local_clog_checkpoint_scn, arg, + storage_rpc, member_helper))) { + LOG_WARN("failed to init choose source by checkpoint provider", K(ret), K(tenant_id), + K(ls_id), K(local_clog_checkpoint_scn), K(arg), K(policy)); + } + break; + } + case ObStorageHASrcProvider::ChooseSourcePolicy::RECOMMEND: { + if (OB_FAIL(init_rs_recommend_source_provider_(tenant_id, ls_id, local_clog_checkpoint_scn, arg, + storage_rpc, member_helper))) { + LOG_WARN("failed to init choose source by rs recommend provider", K(ret), K(tenant_id), + K(ls_id), K(local_clog_checkpoint_scn), K(arg), K(policy)); + } + break; + } + default: { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("unexpected choose source policy", K(ret), K(arg), K(policy)); + } } } if (OB_FAIL(ret)) { - } else if (OB_FAIL(member_info.member_list_.get_addr_array(addr_list))) { - LOG_WARN("failed to get addr array", K(ret), K(member_info)); + // do nothing } else { - FLOG_INFO("fetch ls member list", K(tenant_id), K(ls_id), K(src_info), K(member_info)); + storage_rpc_ = storage_rpc; + is_inited_ = true; } return ret; } +int ObStorageHAChooseSrcHelper::get_available_src(const ObMigrationOpArg &arg, ObStorageHASrcInfo &src_info) +{ + int ret = OB_SUCCESS; + common::ObAddr chosen_src_addr; + if (!is_inited_) { + ret = OB_NOT_INIT; + LOG_WARN("src helper not init", K(ret)); + } else if (OB_FAIL(provider_->choose_ob_src(arg, chosen_src_addr))) { + LOG_WARN("failed to choose ob src", K(ret), K(arg), K(src_info)); + } else { + src_info.src_addr_ = chosen_src_addr; + src_info.cluster_id_ = GCONF.cluster_id; + LOG_INFO("succeed to choose src", K(src_info)); + errsim_test_(arg, src_info); + } + SERVER_EVENT_ADD("storage_ha", "choose_src", + "tenant_id", provider_->get_tenant_id(), + "ls_id", provider_->get_ls_id().id(), + "src_addr", src_info.src_addr_, + "op_type", ObMigrationOpType::get_str(provider_->get_type())); + return ret; +} + +int ObStorageHAChooseSrcHelper::init_rs_recommend_source_provider_( + const uint64_t tenant_id, const share::ObLSID &ls_id, + const share::SCN &local_clog_checkpoint_scn, const ObMigrationOpArg &arg, + storage::ObStorageRpc *storage_rpc, ObStorageHAGetMemberHelper *member_helper) +{ + int ret = OB_SUCCESS; + void *buf = nullptr; + ObRSRecommendSrcProvider *recommend_src_provider = nullptr; + const ObStorageHASrcProvider::ChooseSourcePolicy policy = ObStorageHASrcProvider::RECOMMEND; + if (OB_ISNULL(buf = allocator_.alloc(sizeof(ObRSRecommendSrcProvider)))) { + ret = OB_ALLOCATE_MEMORY_FAILED; + LOG_WARN("fail to alloc memory", K(ret)); + } else if (OB_ISNULL(recommend_src_provider = (new (buf) ObRSRecommendSrcProvider()))) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("provider is nullptr", K(ret)); + } else if (OB_FAIL(recommend_src_provider->init(tenant_id, ls_id, arg.type_, + local_clog_checkpoint_scn, policy, arg.dst_.get_replica_type(), storage_rpc, member_helper))) { + LOG_WARN("failed to init rs recommend src provider", K(ret), K(tenant_id), K(ls_id), + "type", arg.type_, K(local_clog_checkpoint_scn), K(arg.dst_), KP(storage_rpc), KP(member_helper)); + } else { + provider_ = recommend_src_provider; + recommend_src_provider = nullptr; + } + if (OB_NOT_NULL(recommend_src_provider)) { + recommend_src_provider->~ObRSRecommendSrcProvider(); + recommend_src_provider = nullptr; + } + buf = nullptr; + return ret; +} + +int ObStorageHAChooseSrcHelper::init_choose_source_by_location_provider_( + const uint64_t tenant_id, const share::ObLSID &ls_id, + const share::SCN &local_clog_checkpoint_scn, const ObMigrationOpArg &arg, + const ObStorageHASrcProvider::ChooseSourcePolicy policy, + storage::ObStorageRpc *storage_rpc, ObStorageHAGetMemberHelper *member_helper) +{ + int ret = OB_SUCCESS; + void *buf = nullptr; + ObMigrationSrcByLocationProvider *choose_src_by_location_provider = nullptr; + if (OB_ISNULL(buf = allocator_.alloc(sizeof(ObMigrationSrcByLocationProvider)))) { + ret = OB_ALLOCATE_MEMORY_FAILED; + LOG_WARN("fail to alloc memory", K(ret)); + } else if (OB_ISNULL(choose_src_by_location_provider = (new (buf) ObMigrationSrcByLocationProvider()))) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("provider is nullptr", K(ret)); + } else if (OB_FAIL(choose_src_by_location_provider->init(tenant_id, ls_id, arg.type_, + local_clog_checkpoint_scn, policy, arg.dst_.get_replica_type(), storage_rpc, member_helper))) { + LOG_WARN("failed to init src by location provider", K(ret), K(tenant_id), K(ls_id), + "type", arg.type_, K(local_clog_checkpoint_scn), K(policy), K(arg.dst_), KP(storage_rpc), KP(member_helper)); + } else { + provider_ = choose_src_by_location_provider; + choose_src_by_location_provider = nullptr; + } + if (OB_NOT_NULL(choose_src_by_location_provider)) { + choose_src_by_location_provider->~ObMigrationSrcByLocationProvider(); + choose_src_by_location_provider = nullptr; + } + buf = nullptr; + return ret; +} + +int ObStorageHAChooseSrcHelper::init_choose_source_by_checkpoint_provider_( + const uint64_t tenant_id, const share::ObLSID &ls_id, + const share::SCN &local_clog_checkpoint_scn, const ObMigrationOpArg &arg, + storage::ObStorageRpc *storage_rpc, ObStorageHAGetMemberHelper *member_helper) +{ + int ret = OB_SUCCESS; + void *buf = nullptr; + ObMigrationSrcByCheckpointProvider *choose_src_by_checkpoint_provider = nullptr; + const ObStorageHASrcProvider::ChooseSourcePolicy policy = ObStorageHASrcProvider::CHECKPOINT; + if (OB_ISNULL(buf = allocator_.alloc(sizeof(ObMigrationSrcByCheckpointProvider)))) { + ret = OB_ALLOCATE_MEMORY_FAILED; + LOG_WARN("fail to alloc memory", K(ret)); + } else if (OB_ISNULL(choose_src_by_checkpoint_provider = (new (buf) ObMigrationSrcByCheckpointProvider()))) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("provider is nullptr", K(ret)); + } else if (OB_FAIL(choose_src_by_checkpoint_provider->init(tenant_id, ls_id, arg.type_, local_clog_checkpoint_scn, + policy, arg.dst_.get_replica_type(), storage_rpc, member_helper))) { + LOG_WARN("failed to init src by checkpoint provider", K(ret), K(tenant_id), K(ls_id), + "type", arg.type_, K(local_clog_checkpoint_scn), K(arg.dst_), KP(storage_rpc), KP(member_helper)); + } else { + provider_ = choose_src_by_checkpoint_provider; + choose_src_by_checkpoint_provider = nullptr; + } + if (OB_NOT_NULL(choose_src_by_checkpoint_provider)) { + choose_src_by_checkpoint_provider->~ObMigrationSrcByCheckpointProvider(); + choose_src_by_checkpoint_provider = nullptr; + } + buf = nullptr; + return ret; +} + +int ObStorageHAChooseSrcHelper::get_policy_type( + const ObMigrationOpArg &arg, + const uint64_t tenant_id, + bool enable_choose_source_policy, + const char *policy_str, + ObStorageHASrcProvider::ChooseSourcePolicy &policy) +{ + int ret = OB_SUCCESS; + policy = ObStorageHASrcProvider::ChooseSourcePolicy::IDC; + if (!arg.is_valid() + || OB_INVALID_TENANT_ID == tenant_id) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", K(ret), K(arg), K(tenant_id)); + } else if (arg.data_src_.is_valid() && ObMigrationOpType::TYPE::REBUILD_LS_OP != arg.type_) { // TODO (zhixing.yh) modify condition after repairing compat + policy = ObStorageHASrcProvider::ChooseSourcePolicy::RECOMMEND; + LOG_INFO("rs recommend source", K(arg.data_src_), K(tenant_id)); + } else if (!enable_choose_source_policy) { + policy = ObStorageHASrcProvider::ChooseSourcePolicy::CHECKPOINT; + LOG_INFO("set checkpoint policy", K(tenant_id)); + } else if (0 == strcmp(policy_str, ObStorageHASrcProvider::get_policy_str(ObStorageHASrcProvider::ChooseSourcePolicy::IDC))) { + policy = ObStorageHASrcProvider::ChooseSourcePolicy::IDC; + LOG_INFO("set idc policy", K(tenant_id)); + } else if (0 == strcmp(policy_str, ObStorageHASrcProvider::get_policy_str(ObStorageHASrcProvider::ChooseSourcePolicy::REGION))) { + policy = ObStorageHASrcProvider::ChooseSourcePolicy::REGION; + LOG_INFO("set region policy", K(tenant_id)); + } else { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("unexpected type", K(ret), K(tenant_id), K(policy_str)); + } + return ret; +} + +void ObStorageHAChooseSrcHelper::errsim_test_(const ObMigrationOpArg &arg, ObStorageHASrcInfo &src_info) +{ + int ret = OB_SUCCESS; +#ifdef ERRSIM + if (ObMigrationOpType::ADD_LS_OP == arg.type_ || ObMigrationOpType::MIGRATE_LS_OP == arg.type_) { + const ObString &errsim_server = GCONF.errsim_migration_src_server_addr.str(); + if (!errsim_server.empty()) { + common::ObAddr tmp_errsim_addr; + if (OB_FAIL(tmp_errsim_addr.parse_from_string(errsim_server))) { + LOG_WARN("failed to parse from string", K(ret), K(errsim_server)); + } else { + src_info.src_addr_ = tmp_errsim_addr; + src_info.cluster_id_ = GCONF.cluster_id; + LOG_INFO("storage ha choose errsim src", K(tmp_errsim_addr)); + } + } + } +#endif +} } // namespace storage } // namespace oceanbase diff --git a/src/storage/high_availability/ob_storage_ha_src_provider.h b/src/storage/high_availability/ob_storage_ha_src_provider.h index 3917d0724d..7773779c23 100644 --- a/src/storage/high_availability/ob_storage_ha_src_provider.h +++ b/src/storage/high_availability/ob_storage_ha_src_provider.h @@ -16,40 +16,240 @@ #include "share/ob_srv_rpc_proxy.h" // ObPartitionServiceRpcProxy #include "storage/ob_storage_rpc.h" #include "ob_storage_ha_struct.h" +#include "common/ob_learner_list.h" namespace oceanbase { namespace storage { -class ObStorageHASrcProvider { +class ObStorageHAGetMemberHelper +{ public: - ObStorageHASrcProvider(); - virtual ~ObStorageHASrcProvider(); - int init(const uint64_t tenant_id, const ObMigrationOpType::TYPE &type, storage::ObStorageRpc *storage_rpc); - int choose_ob_src(const share::ObLSID &ls_id, const share::SCN &local_clog_checkpoint_scn, - ObStorageHASrcInfo &src_info); + ObStorageHAGetMemberHelper(); + virtual ~ObStorageHAGetMemberHelper(); + int init(storage::ObStorageRpc *storage_rpc); int get_ls_member_list(const uint64_t tenant_id, const share::ObLSID &ls_id, common::ObIArray &addr_list); + int get_ls_member_list_and_learner_list( + const uint64_t tenant_id, const share::ObLSID &ls_id, const bool need_learner_list, + common::ObAddr &leader_addr, common::GlobalLearnerList &learner_list, + common::ObIArray &member_list); + virtual int get_ls_leader(const uint64_t tenant_id, const share::ObLSID &ls_id, common::ObAddr &addr); + virtual int get_ls(const share::ObLSID &ls_id, ObLSHandle &ls_handle); + virtual bool check_tenant_primary(); private: - int get_ls_leader_(const uint64_t tenant_id, const share::ObLSID &ls_id, common::ObAddr &addr); - int fetch_ls_member_list_(const uint64_t tenant_id, const share::ObLSID &ls_id, const common::ObAddr &addr, + int fetch_ls_member_list_and_learner_list_(const uint64_t tenant_id, const share::ObLSID &ls_id, const bool need_learner_list, + common::ObAddr &addr, common::GlobalLearnerList &learner_list, common::ObIArray &member_list); + virtual int get_ls_member_list_and_learner_list_(const uint64_t tenant_id, const share::ObLSID &ls_id, + const bool need_learner_list, common::ObAddr &leader_addr, + common::GlobalLearnerList &learner_list, common::ObIArray &member_list); + +private: + storage::ObStorageRpc *storage_rpc_; + bool is_inited_; + DISALLOW_COPY_AND_ASSIGN(ObStorageHAGetMemberHelper); +}; + +class ObStorageHASrcProvider { +public: + enum ChooseSourcePolicy : uint8_t + { + IDC = 0, + REGION = 1, + CHECKPOINT = 2, + RECOMMEND = 3, + MAX_POLICY + }; + + ObStorageHASrcProvider(); + virtual ~ObStorageHASrcProvider(); + int init(const uint64_t tenant_id, const share::ObLSID &ls_id, + const ObMigrationOpType::TYPE &type, const share::SCN &local_clog_checkpoint_scn, + const ChooseSourcePolicy policy_type, + const common::ObReplicaType replica_type, storage::ObStorageRpc *storage_rpc, + ObStorageHAGetMemberHelper *member_helper); + virtual int choose_ob_src( + const ObMigrationOpArg &arg, common::ObAddr &chosen_src_addr) = 0; + + inline uint64_t get_tenant_id() const { return tenant_id_; } + const share::ObLSID &get_ls_id() const { return ls_id_; } + ObMigrationOpType::TYPE get_type() const { return type_; } + storage::ObStorageRpc *get_storage_rpc() const { return storage_rpc_; } + const share::SCN &get_local_clog_checkpoint_scn() const { return local_clog_checkpoint_scn_; } + const share::SCN &get_palf_parent_checkpoint_scn() const { return palf_parent_checkpoint_scn_; } + ChooseSourcePolicy get_policy_type() const { return policy_type_; } + + const static char *ObChooseSourcePolicyStr[static_cast(ChooseSourcePolicy::MAX_POLICY)]; + const static char *get_policy_str(const ChooseSourcePolicy policy_type); + int check_tenant_primary(bool &is_primary); + +protected: + // The validity assessment of replicas includes: + // server_version: dest server_version >= src server_version + // restore_status: if restore_status of ls is fail, migration needs to wait. + // migration_status: OB_MIGRATION_STATUS_NONE + // replica type: F replica could serve as the source of F replica and R replica, + // while R replica could only serve as the source of R replica + // source checkpoint scn must be greater than or equal than palf_parent_checkpoint_scn_ and local_clog_checkpoint_scn_ + int check_replica_validity( + const common::ObAddr &addr, const common::ObReplicaMember &dst, + const common::GlobalLearnerList &learner_list, obrpc::ObFetchLSMetaInfoResp &ls_info); + // According to the replica type, determine whether to get learner list. + // F replica get member list, R replica get member list and learner list. + int get_replica_addr_list( + const common::ObReplicaMember &dst, + common::ObAddr &leader_addr, common::GlobalLearnerList &learner_list, common::ObIArray &addr_list); - int inner_choose_ob_src_(const uint64_t tenant_id, const share::ObLSID &ls_id, - const share::SCN &local_clog_checkpoint_scn, const common::ObIArray &addr_list, - common::ObAddr &choosen_src_addr); +protected: + bool is_inited_; + +private: int fetch_ls_meta_info_(const uint64_t tenant_id, const share::ObLSID &ls_id, const common::ObAddr &member_addr, obrpc::ObFetchLSMetaInfoResp &ls_meta_info); - int get_ls_member_list_(const uint64_t tenant_id, const share::ObLSID &ls_id, const common::ObAddr &leader_addr, - common::ObIArray &addr_list); + int check_replica_type_( + const common::ObAddr &addr, + const common::ObReplicaMember &dst, + const common::GlobalLearnerList &learner_list, + bool &is_replica_type_valid); + int init_palf_parent_checkpoint_scn_(const uint64_t tenant_id, const share::ObLSID &ls_id, + const share::SCN &local_clog_checkpoint_scn, const common::ObReplicaType replica_type); + + int get_palf_parent_checkpoint_scn_from_rpc_(const uint64_t tenant_id, const share::ObLSID &ls_id, + const common::ObReplicaType replica_type, share::SCN &parent_checkpoint_scn); + int get_palf_parent_addr_(const uint64_t tenant_id, const share::ObLSID &ls_id, + const common::ObReplicaType replica_type, common::ObAddr &parent_addr); private: - bool is_inited_; uint64_t tenant_id_; + share::ObLSID ls_id_; ObMigrationOpType::TYPE type_; + share::SCN local_clog_checkpoint_scn_; + share::SCN palf_parent_checkpoint_scn_; + ObStorageHAGetMemberHelper *member_helper_; storage::ObStorageRpc *storage_rpc_; + ChooseSourcePolicy policy_type_; DISALLOW_COPY_AND_ASSIGN(ObStorageHASrcProvider); }; +class ObMigrationSrcByLocationProvider : public ObStorageHASrcProvider +{ +public: + ObMigrationSrcByLocationProvider(); + virtual ~ObMigrationSrcByLocationProvider(); + int init(const uint64_t tenant_id, const share::ObLSID &ls_id, + const ObMigrationOpType::TYPE &type, const share::SCN &local_clog_checkpoint_scn, + const ChooseSourcePolicy policy_type, + const common::ObReplicaType replica_type, storage::ObStorageRpc *storage_rpc, + ObStorageHAGetMemberHelper *member_helper); + virtual int choose_ob_src( + const ObMigrationOpArg &arg, common::ObAddr &chosen_src_addr) override; + +protected: + int inner_choose_ob_src( + const common::ObAddr &leader_addr, const common::GlobalLearnerList &learner_list, + const common::ObIArray &addr_list, const ObMigrationOpArg &arg, + common::ObAddr &choosen_src_addr); + int divide_addr_list( + const common::ObIArray &addr_list, + const common::ObReplicaMember &dst, + common::ObIArray &sorted_addr_list, + int64_t &idc_end_index, + int64_t ®ion_end_index); + int find_src( + const common::ObIArray &sorted_addr_list, + const int64_t start_index, + const int64_t end_index, + const common::GlobalLearnerList &learner_list, + const common::ObAddr &leader_addr, + const common::ObReplicaMember &dst, + common::ObAddr &choosen_src_addr); +private: + void set_locality_manager_(ObLocalityManager *locality_manager); + int get_server_region_and_idc_( + const common::ObAddr &addr, common::ObRegion ®ion, common::ObIDC &idc); +private: + ObLocalityManager *locality_manager_; + + DISALLOW_COPY_AND_ASSIGN(ObMigrationSrcByLocationProvider); +}; + +class ObMigrationSrcByCheckpointProvider : public ObStorageHASrcProvider +{ +public: + ObMigrationSrcByCheckpointProvider(); + virtual ~ObMigrationSrcByCheckpointProvider(); + int init(const uint64_t tenant_id, const share::ObLSID &ls_id, + const ObMigrationOpType::TYPE &type, const share::SCN &local_clog_checkpoint_scn, + const ChooseSourcePolicy policy_type, + const common::ObReplicaType replica_type, storage::ObStorageRpc *storage_rpc, + ObStorageHAGetMemberHelper *member_helper); + virtual int choose_ob_src( + const ObMigrationOpArg &arg, common::ObAddr &chosen_src_addr) override; + +private: + int inner_choose_ob_src_( + const common::ObAddr &leader_addr, const common::GlobalLearnerList &learner_list, + const common::ObIArray &addr_list, const ObMigrationOpArg &arg, + common::ObAddr &choosen_src_addr); + + DISALLOW_COPY_AND_ASSIGN(ObMigrationSrcByCheckpointProvider); +}; + +class ObRSRecommendSrcProvider : public ObStorageHASrcProvider +{ +public: + ObRSRecommendSrcProvider(); + virtual ~ObRSRecommendSrcProvider(); + int init(const uint64_t tenant_id, const share::ObLSID &ls_id, + const ObMigrationOpType::TYPE &type, const share::SCN &local_clog_checkpoint_scn, + const ChooseSourcePolicy policy_type, + const common::ObReplicaType replica_type, storage::ObStorageRpc *storage_rpc, + ObStorageHAGetMemberHelper *member_helper); + int choose_ob_src( + const ObMigrationOpArg &arg, common::ObAddr &chosen_src_addr); +private: + int check_replica_validity_(const int64_t cluster_id, const common::ObIArray &addr_list, + const common::ObAddr &addr, const common::ObReplicaMember &dst, + const common::GlobalLearnerList &learner_list); + DISALLOW_COPY_AND_ASSIGN(ObRSRecommendSrcProvider); +}; + +class ObStorageHAChooseSrcHelper final +{ +public: + ObStorageHAChooseSrcHelper(); + ~ObStorageHAChooseSrcHelper(); + int init(const uint64_t tenant_id, const share::ObLSID &ls_id, const share::SCN &local_clog_checkpoint_scn, + const ObMigrationOpArg &arg, const ObStorageHASrcProvider::ChooseSourcePolicy policy, + storage::ObStorageRpc *storage_rpc, ObStorageHAGetMemberHelper *member_helper); + int get_available_src(const ObMigrationOpArg &arg, ObStorageHASrcInfo &src_info); + static int get_policy_type(const ObMigrationOpArg &arg, const uint64_t tenant_id, + bool enable_choose_source_policy, const char *policy_str, + ObStorageHASrcProvider::ChooseSourcePolicy &policy); +private: + int init_rs_recommend_source_provider_(const uint64_t tenant_id, const share::ObLSID &ls_id, + const share::SCN &local_clog_checkpoint_scn, const ObMigrationOpArg &arg, storage::ObStorageRpc *storage_rpc, + ObStorageHAGetMemberHelper *member_helper); + int init_choose_source_by_location_provider_( + const uint64_t tenant_id, const share::ObLSID &ls_id, + const share::SCN &local_clog_checkpoint_scn, const ObMigrationOpArg &arg, + const ObStorageHASrcProvider::ChooseSourcePolicy policy, storage::ObStorageRpc *storage_rpc, + ObStorageHAGetMemberHelper *member_helper); + int init_choose_source_by_checkpoint_provider_( + const uint64_t tenant_id, const share::ObLSID &ls_id, + const share::SCN &local_clog_checkpoint_scn, const ObMigrationOpArg &arg, storage::ObStorageRpc *storage_rpc, + ObStorageHAGetMemberHelper *member_helper); + void errsim_test_(const ObMigrationOpArg &arg, ObStorageHASrcInfo &src_info); + ObStorageHASrcProvider * get_provider() const { return provider_; } + +private: + ObStorageHASrcProvider *provider_; + storage::ObStorageRpc *storage_rpc_; + common::ObArenaAllocator allocator_; + bool is_inited_; + DISALLOW_COPY_AND_ASSIGN(ObStorageHAChooseSrcHelper); +}; + } // namespace storage } // namespace oceanbase #endif diff --git a/src/storage/high_availability/ob_storage_ha_struct.cpp b/src/storage/high_availability/ob_storage_ha_struct.cpp index 7835324689..e8ce8c7a48 100644 --- a/src/storage/high_availability/ob_storage_ha_struct.cpp +++ b/src/storage/high_availability/ob_storage_ha_struct.cpp @@ -869,8 +869,9 @@ bool ObMigrationOpArg::is_valid() const && cluster_id_ > 0 && src_.is_valid() && dst_.is_valid() - && data_src_.is_valid() - && (paxos_replica_number_ > 0 || ObMigrationOpType::REBUILD_LS_OP == type_); + && (paxos_replica_number_ > 0 || ObMigrationOpType::REBUILD_LS_OP == type_) + && ObMigrationOpType::MIGRATE_LS_OP == type_ ? + (src_.get_server() != dst_.get_server()) : true; } void ObMigrationOpArg::reset() diff --git a/src/storage/high_availability/ob_storage_ha_utils.cpp b/src/storage/high_availability/ob_storage_ha_utils.cpp index 0a61e90224..fcbbf0e71b 100644 --- a/src/storage/high_availability/ob_storage_ha_utils.cpp +++ b/src/storage/high_availability/ob_storage_ha_utils.cpp @@ -48,6 +48,8 @@ namespace storage { ERRSIM_POINT_DEF(EN_TRANSFER_ALLOW_RETRY); +ERRSIM_POINT_DEF(EN_CHECK_LOG_NEED_REBUILD); + int ObStorageHAUtils::get_ls_leader(const uint64_t tenant_id, const share::ObLSID &ls_id, common::ObAddr &leader) { int ret = OB_SUCCESS; @@ -445,6 +447,34 @@ int ObStorageHAUtils::check_tenant_will_be_deleted( return ret; } +int ObStorageHAUtils::check_replica_validity(const obrpc::ObFetchLSMetaInfoResp &ls_info) +{ + int ret = OB_SUCCESS; + ObMigrationStatus migration_status; + share::ObLSRestoreStatus restore_status; + if (!ls_info.is_valid()) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument!", K(ls_info)); + } else if (OB_FAIL(check_server_version(ls_info.version_))) { + if (OB_MIGRATE_NOT_COMPATIBLE == ret) { + LOG_WARN("this src is not compatible", K(ret), K(ls_info)); + } else { + LOG_WARN("failed to check version", K(ret), K(ls_info)); + } + } else if (OB_FAIL(ls_info.ls_meta_package_.ls_meta_.get_migration_status(migration_status))) { + LOG_WARN("failed to get migration status", K(ret), K(ls_info)); + } else if (!ObMigrationStatusHelper::check_can_migrate_out(migration_status)) { + ret = OB_DATA_SOURCE_NOT_VALID; + LOG_WARN("this src is not suitable, migration status check failed", K(ret), K(ls_info)); + } else if (OB_FAIL(ls_info.ls_meta_package_.ls_meta_.get_restore_status(restore_status))) { + LOG_WARN("failed to get restore status", K(ret), K(ls_info)); + } else if (restore_status.is_failed()) { + ret = OB_DATA_SOURCE_NOT_EXIST; + LOG_WARN("some ls replica restore failed, can not migrate", K(ret), K(ls_info)); + } + return ret; +} + bool ObTransferUtils::is_need_retry_error(const int err) { bool bool_ret = false; @@ -596,6 +626,46 @@ int64_t ObStorageHAUtils::get_rpc_timeout() return rpc_timeout; } +int ObStorageHAUtils::check_log_need_rebuild(const uint64_t tenant_id, const share::ObLSID &ls_id, bool &need_rebuild) +{ + int ret = OB_SUCCESS; + ObLS *ls = nullptr; + common::ObAddr parent_addr; + ObLSHandle ls_handle; + bool is_log_sync = false; + + if (OB_INVALID_TENANT_ID == tenant_id || !ls_id.is_valid()) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("argument is not valid", K(ret), K(tenant_id), K(ls_id)); + } else if (OB_FAIL(ObStorageHADagUtils::get_ls(ls_id, ls_handle))) { + LOG_WARN("failed to get ls", K(ret), K(tenant_id), K(ls_id)); + } else if (OB_ISNULL(ls = ls_handle.get_ls())) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("ls should not be NULL", K(ret), KP(ls), K(tenant_id), K(ls_id)); + } else if (OB_ISNULL(ls->get_log_handler())) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("log handler should not be NULL", K(ret), K(tenant_id), K(ls_id)); + } else if (OB_FAIL(ls->get_log_handler()->is_in_sync(is_log_sync, need_rebuild))) { + LOG_WARN("failed to get is_in_sync", K(ret), K(tenant_id), K(ls_id)); + } + +#ifdef ERRSIM + if (OB_SUCC(ret)) { + int tmp_ret = OB_SUCCESS; + tmp_ret = EN_CHECK_LOG_NEED_REBUILD ? : OB_SUCCESS; + if (OB_TMP_FAIL(tmp_ret)) { + need_rebuild = true; + SERVER_EVENT_ADD("storage_ha", "check_log_need_rebuild", + "tenant_id", tenant_id, + "ls_id", ls_id.id(), + "result", tmp_ret); + DEBUG_SYNC(AFTER_CHECK_LOG_NEED_REBUILD); + } + } +#endif + return ret; +} + void ObTransferUtils::set_transfer_module() { #ifdef ERRSIM diff --git a/src/storage/high_availability/ob_storage_ha_utils.h b/src/storage/high_availability/ob_storage_ha_utils.h index ecf21ae867..19b0cbfa95 100644 --- a/src/storage/high_availability/ob_storage_ha_utils.h +++ b/src/storage/high_availability/ob_storage_ha_utils.h @@ -16,6 +16,7 @@ #include "ob_storage_ha_struct.h" #include "share/ob_storage_ha_diagnose_struct.h" #include "ob_transfer_struct.h" +#include "storage/ob_storage_rpc.h" namespace oceanbase { @@ -56,6 +57,9 @@ public: static int check_tenant_will_be_deleted( bool &is_deleted); + static int check_replica_validity(const obrpc::ObFetchLSMetaInfoResp &ls_info); + static int check_log_need_rebuild(const uint64_t tenant_id, const share::ObLSID &ls_id, bool &need_rebuild); + private: static int check_merge_error_(const uint64_t tenant_id, common::ObISQLClient &sql_client); static int fetch_src_tablet_meta_info_(const uint64_t tenant_id, const common::ObTabletID &tablet_id, diff --git a/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/desc_virtual_table_in_sys.result b/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/desc_virtual_table_in_sys.result index 952dca9152..730c820224 100644 --- a/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/desc_virtual_table_in_sys.result +++ b/tools/deploy/mysql_test/test_suite/inner_table/r/mysql/desc_virtual_table_in_sys.result @@ -1657,7 +1657,7 @@ backup_id bigint(20) NO NULL backup_dest varchar(2048) YES NULL description varchar(2048) YES NULL select /*+QUERY_TIMEOUT(60000000)*/ IF(count(*) >= 0, 1, 0) from oceanbase.__tenant_virtual_show_restore_preview; -ERROR 0A000: show restore preview do not specify backup dest not supported +ERROR 0A000: show restore preview before ALTER SYSTEM RESTORE PREVIEW is not supported desc oceanbase.__all_virtual_master_key_version_info; Field Type Null Key Default Extra svr_ip varchar(46) NO PRI NULL diff --git a/unittest/storage/CMakeLists.txt b/unittest/storage/CMakeLists.txt index d276d07b57..c8cc490903 100644 --- a/unittest/storage/CMakeLists.txt +++ b/unittest/storage/CMakeLists.txt @@ -115,6 +115,7 @@ storage_dml_unittest(test_major_rows_merger) storage_dml_unittest(test_tablet tablet/test_tablet.cpp) storage_unittest(test_medium_list_checker compaction/test_medium_list_checker.cpp) storage_unittest(test_protected_memtable_mgr_handle test_protected_memtable_mgr_handle.cpp) +storage_unittest(test_choose_migration_source_policy migration/test_choose_migration_source_policy.cpp) if(OB_BUILD_CLOSE_MODULES) storage_dml_unittest(test_compaction_policy) diff --git a/unittest/storage/migration/test_choose_migration_source_policy.cpp b/unittest/storage/migration/test_choose_migration_source_policy.cpp new file mode 100644 index 0000000000..658b304965 --- /dev/null +++ b/unittest/storage/migration/test_choose_migration_source_policy.cpp @@ -0,0 +1,1108 @@ +/** + * Copyright (c) 2021 OceanBase + * OceanBase CE is licensed under Mulan PubL v2. + * You can use this software according to the terms and conditions of the Mulan PubL v2. + * You may obtain a copy of Mulan PubL v2 at: + * http://license.coscl.org.cn/MulanPubL-2.0 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PubL v2 for more details. + */ +#define USING_LOG_PREFIX STORAGE +#include +#include +#define private public +#define protected public +#include "storage/high_availability/ob_storage_ha_src_provider.h" +#include "storage/ls/ob_ls.h" +#include "storage/ls/ob_ls_meta_package.h" +#include "test_migration.h" +#include "rpc/mock_ob_common_rpc_proxy.h" +#include "storage/ob_locality_manager.h" +#include "lib/ob_errno.h" +#include "logservice/palf/palf_handle_impl.h" + +namespace oceanbase +{ +using namespace common; +using namespace share; +using namespace testing::internal; +using ::testing::_; +using ::testing::Invoke; +namespace storage +{ +class MockStorageRpc : public ObStorageRpc +{ +public: + MockStorageRpc() + : ObStorageRpc() + { + } + virtual ~MockStorageRpc() {} + + MOCK_METHOD4(post_ls_meta_info_request, int(const uint64_t, const ObStorageHASrcInfo &, + const share::ObLSID &, obrpc::ObFetchLSMetaInfoResp &)); +}; + +class MockGetMemberHelper : public ObStorageHAGetMemberHelper +{ +public: + MockGetMemberHelper() + : ObStorageHAGetMemberHelper() + { + } + virtual ~MockGetMemberHelper() {} + + MOCK_METHOD6(get_ls_member_list_and_learner_list_, int(const uint64_t, const share::ObLSID &, + const bool, common::ObAddr &, common::GlobalLearnerList &, common::ObIArray &)); + MOCK_METHOD3(get_ls_leader, int(const uint64_t, const share::ObLSID &, common::ObAddr &)); + MOCK_METHOD2(get_ls, int(const share::ObLSID &, ObLSHandle &)); + MOCK_METHOD0(check_tenant_primary, bool()); +}; + +class MockMemberList +{ +public: + MockMemberList() {} + virtual ~MockMemberList() {} + + int get_ls_member_list_for_checkpoint(const uint64_t, const share::ObLSID &, + const bool, common::ObAddr &leader, common::GlobalLearnerList &, common::ObIArray &addr_list) + { + int ret = OB_SUCCESS; + if (OB_FAIL(mock_leader_addr(leader))) { + LOG_WARN("failed to mock addr", K(ret)); + } else if (OB_FAIL(mock_addr_list(4/*addr_count*/, addr_list))) { + LOG_WARN("failed to mock addr list", K(ret)); + } + return ret; + } + + int get_ls_member_list_for_rs_recommand(const uint64_t, const share::ObLSID &, + const bool, common::ObAddr &leader, common::GlobalLearnerList &learner_list, common::ObIArray &addr_list) + { + int ret = OB_SUCCESS; + common::ObAddr rs_recommand_addr; + if (OB_FAIL(mock_leader_addr(leader))) { + LOG_WARN("failed to mock addr", K(ret)); + } else if (OB_FAIL(mock_addr_list(3/*addr_count*/, addr_list))) { + LOG_WARN("failed to mock addr list", K(ret)); + } else if (OB_FAIL(mock_rs_recommand_addr(rs_recommand_addr))) { + LOG_WARN("failed to mock rs recommand addr", K(ret)); + } else if (OB_FAIL(mock_learner_list(rs_recommand_addr, learner_list))) { + LOG_WARN("failed to mock learner list", K(ret)); + } + return ret; + } + + int get_ls_member_list_for_idc_mode_idc_leader(const uint64_t, const share::ObLSID &, + const bool, common::ObAddr &leader, common::GlobalLearnerList &learner_list, common::ObIArray &addr_list) + { + int ret = OB_SUCCESS; + if (OB_FAIL(mock_leader_addr(leader))) { + LOG_WARN("failed to mock leader addr", K(ret)); + } else if (OB_FAIL(mock_addr_list(5/*addr_count*/, addr_list))) { + LOG_WARN("failed to mock addr list", K(ret)); + } + return ret; + } + + int get_ls_member_list_for_idc_mode_idc_follower(const uint64_t, const share::ObLSID &, + const bool, common::ObAddr &leader, common::GlobalLearnerList &learner_list, common::ObIArray &addr_list) + { + int ret = OB_SUCCESS; + if (OB_FAIL(mock_leader_addr(leader))) { + LOG_WARN("failed to mock leader addr", K(ret)); + } else if (OB_FAIL(mock_addr_list(5/*addr_count*/, addr_list))) { + LOG_WARN("failed to mock addr list", K(ret)); + } + return ret; + } + + int get_ls_member_list_for_idc_mode_region_leader(const uint64_t, const share::ObLSID &, + const bool, common::ObAddr &leader, common::GlobalLearnerList &learner_list, common::ObIArray &addr_list) + { + int ret = OB_SUCCESS; + if (OB_FAIL(mock_leader_addr(leader))) { + LOG_WARN("failed to mock leader addr", K(ret)); + } else if (OB_FAIL(mock_addr_list(3/*addr_count*/, addr_list))) { + LOG_WARN("failed to mock addr list", K(ret)); + } + return ret; + } + + int get_ls_member_list_for_idc_mode_region_follower(const uint64_t, const share::ObLSID &, + const bool, common::ObAddr &leader, common::GlobalLearnerList &learner_list, common::ObIArray &addr_list) + { + int ret = OB_SUCCESS; + if (OB_FAIL(mock_leader_addr(leader))) { + LOG_WARN("failed to mock leader addr", K(ret)); + } else if (OB_FAIL(mock_addr_list(3/*addr_count*/, addr_list))) { + LOG_WARN("failed to mock addr list", K(ret)); + } + return ret; + } + + int get_ls_member_list_for_idc_mode_diff_region_leader(const uint64_t, const share::ObLSID &, + const bool, common::ObAddr &leader, common::GlobalLearnerList &learner_list, common::ObIArray &addr_list) + { + int ret = OB_SUCCESS; + if (OB_FAIL(mock_leader_addr(leader))) { + LOG_WARN("failed to mock leader addr", K(ret)); + } else if (OB_FAIL(mock_addr_list(1/*addr_count*/, addr_list))) { + LOG_WARN("failed to mock addr list", K(ret)); + } + return ret; + } + + int get_ls_member_list_for_idc_mode_diff_region_follower(const uint64_t, const share::ObLSID &, + const bool, common::ObAddr &leader, common::GlobalLearnerList &learner_list, common::ObIArray &addr_list) + { + int ret = OB_SUCCESS; + if (OB_FAIL(mock_leader_addr(leader))) { + LOG_WARN("failed to mock leader addr", K(ret)); + } else if (OB_FAIL(mock_addr_list(2/*addr_count*/, addr_list))) { + LOG_WARN("failed to mock addr list", K(ret)); + } + return ret; + } + + int get_ls_member_list_for_region_mode_region_follower(const uint64_t, const share::ObLSID &, + const bool, common::ObAddr &leader, common::GlobalLearnerList &learner_list, common::ObIArray &addr_list) + { + int ret = OB_SUCCESS; + if (OB_FAIL(mock_leader_addr(leader))) { + LOG_WARN("failed to mock leader addr", K(ret)); + } else if (OB_FAIL(mock_addr_list(4/*addr_count*/, addr_list))) { + LOG_WARN("failed to mock addr list", K(ret)); + } + return ret; + } + + int get_ls_member_list_for_region_mode_region_leader(const uint64_t, const share::ObLSID &, + const bool, common::ObAddr &leader, common::GlobalLearnerList &learner_list, common::ObIArray &addr_list) + { + int ret = OB_SUCCESS; + if (OB_FAIL(mock_leader_addr(leader))) { + LOG_WARN("failed to mock leader addr", K(ret)); + } else if (OB_FAIL(mock_addr_list(3/*addr_count*/, addr_list))) { + LOG_WARN("failed to mock addr list", K(ret)); + } + return ret; + } + + int get_ls_member_list_for_region_mode_diff_region_follower(const uint64_t, const share::ObLSID &, + const bool, common::ObAddr &leader, common::GlobalLearnerList &learner_list, common::ObIArray &addr_list) + { + int ret = OB_SUCCESS; + if (OB_FAIL(mock_leader_addr(leader))) { + LOG_WARN("failed to mock leader addr", K(ret)); + } else if (OB_FAIL(mock_addr_list(2/*addr_count*/, addr_list))) { + LOG_WARN("failed to mock addr list", K(ret)); + } + return ret; + } + + int get_ls_member_list_for_region_mode_diff_region_leader(const uint64_t, const share::ObLSID &, + const bool, common::ObAddr &leader, common::GlobalLearnerList &learner_list, common::ObIArray &addr_list) + { + int ret = OB_SUCCESS; + if (OB_FAIL(mock_leader_addr(leader))) { + LOG_WARN("failed to mock leader addr", K(ret)); + } else if (OB_FAIL(mock_addr_list(1/*addr_count*/, addr_list))) { + LOG_WARN("failed to mock addr list", K(ret)); + } + return ret; + } + + int get_ls_member_list_for_member_helper(const uint64_t, const share::ObLSID &, + const bool, common::ObAddr &leader, common::GlobalLearnerList &learner_list, common::ObIArray &addr_list) + { + int ret = OB_SUCCESS; + if (OB_FAIL(mock_addr_list(5/*addr_count*/, addr_list))) { + LOG_WARN("failed to mock addr list", K(ret)); + } + return ret; + } + + int get_ls_member_list_for_rebuild_mode(const uint64_t, const share::ObLSID &, + const bool, common::ObAddr &leader, common::GlobalLearnerList &learner_list, common::ObIArray &addr_list) + { + int ret = OB_SUCCESS; + if (OB_FAIL(mock_addr_list(5/*addr_count*/, addr_list))) { + LOG_WARN("failed to mock addr list", K(ret)); + } + return ret; + } + + int get_ls_member_list_for_replica_type_failed(const uint64_t, const share::ObLSID &, + const bool, common::ObAddr &leader, common::GlobalLearnerList &learner_list, common::ObIArray &addr_list) + { + int ret = OB_SUCCESS; + common::ObAddr addr; + if (OB_FAIL(mock_leader_addr(leader))) { + LOG_WARN("failed to mock addr", K(ret)); + } else if (OB_FAIL(mock_addr_list(3/*addr_count*/, addr_list))) { + LOG_WARN("failed to mock addr list", K(ret)); + } else if (OB_FAIL(mock_check_replica_type_addr(addr))) { + LOG_WARN("failed to mock rs recommand addr", K(ret)); + } else if (OB_FAIL(mock_learner_list(addr, learner_list))) { + LOG_WARN("failed to mock learner list", K(ret)); + } + return ret; + } + + int get_ls_leader_succ(const uint64_t, const share::ObLSID &, common::ObAddr &leader) + { + int ret = OB_SUCCESS; + if (OB_FAIL(mock_leader_addr(leader))) { + LOG_WARN("failed to mock leader addr", K(ret)); + } + return ret; + } + + int get_ls_succ(const share::ObLSID &, ObLSHandle &ls_handle) + { + int ret = OB_SUCCESS; + ls_handle.ls_ = &mock_ls_; + return ret; + } + + int get_ls_succ_with_palf(const share::ObLSID &, ObLSHandle &ls_handle) + { + int ret = OB_SUCCESS; + common::ObAddr parent; + if (OB_FAIL(mock_addr("192.168.1.1:1234", parent))) { + LOG_WARN("failed to mock addr", K(ret)); + } else { + mock_ls_.log_handler_.palf_handle_.palf_handle_impl_ = &mock_palf_handle_impl_; + mock_palf_handle_impl_.is_inited_ = true; + mock_palf_handle_impl_.config_mgr_.parent_ = parent; + ls_handle.ls_ = &mock_ls_; + } + return ret; + } + + int get_ls_fail(const share::ObLSID &, ObLSHandle &ls_handle) + { + int ret = OB_ERR_UNEXPECTED; + ls_handle.ls_ = &mock_ls_; + return ret; + } + + bool check_tenant_primary_true() + { + return true; + } + bool check_tenant_primary_false() + { + return false; + } + +public: + palf::PalfHandleImpl mock_palf_handle_impl_; + ObLS mock_ls_; +}; + +class MockLsMetaInfo +{ +public: + MockLsMetaInfo() {} + virtual ~MockLsMetaInfo() {} + int post_ls_meta_info_request_succ(const uint64_t, const ObStorageHASrcInfo &, + const share::ObLSID &, obrpc::ObFetchLSMetaInfoResp &res) + { + int ret = OB_SUCCESS; + ret = mock_valid_ls_meta(res); + return ret; + } + + int post_ls_meta_info_request_min_checkpoint(const uint64_t, const ObStorageHASrcInfo &, + const share::ObLSID &, obrpc::ObFetchLSMetaInfoResp &res) + { + int ret = OB_SUCCESS; + ret = mock_valid_ls_meta(res); + res.ls_meta_package_.ls_meta_.clog_checkpoint_scn_.set_min(); + return ret; + } + + int post_ls_meta_info_request_max_checkpoint(const uint64_t, const ObStorageHASrcInfo &, + const share::ObLSID &, obrpc::ObFetchLSMetaInfoResp &res) + { + int ret = OB_SUCCESS; + ret = mock_valid_ls_meta(res); + res.ls_meta_package_.ls_meta_.clog_checkpoint_scn_.set_max(); + return ret; + } + + int post_ls_meta_info_request_invalid_type_checkpoint(const uint64_t, const ObStorageHASrcInfo &, + const share::ObLSID &, obrpc::ObFetchLSMetaInfoResp &res) + { + int ret = OB_SUCCESS; + ret = mock_valid_ls_meta(res); + res.ls_meta_package_.ls_meta_.clog_checkpoint_scn_.set_max(); + return ret; + } + + int post_ls_meta_info_request_base_checkpoint(const uint64_t, const ObStorageHASrcInfo &, + const share::ObLSID &, obrpc::ObFetchLSMetaInfoResp &res) + { + int ret = OB_SUCCESS; + ret = mock_valid_ls_meta(res); + res.ls_meta_package_.ls_meta_.clog_checkpoint_scn_.set_base(); + return ret; + } + + int post_ls_meta_info_request_parent_checkpoint(const uint64_t, const ObStorageHASrcInfo &, + const share::ObLSID &, obrpc::ObFetchLSMetaInfoResp &res) + { + int ret = OB_SUCCESS; + ret = mock_valid_ls_meta(res); + res.ls_meta_package_.ls_meta_.clog_checkpoint_scn_.set_base(); + res.ls_meta_package_.ls_meta_.clog_checkpoint_scn_ = mock_ckpt_inc(res.ls_meta_package_.ls_meta_.clog_checkpoint_scn_); + return ret; + } + + int post_ls_meta_info_request_large_checkpoint(const uint64_t, const ObStorageHASrcInfo &, + const share::ObLSID &, obrpc::ObFetchLSMetaInfoResp &res) + { + int ret = OB_SUCCESS; + ret = mock_valid_ls_meta(res); + res.ls_meta_package_.ls_meta_.clog_checkpoint_scn_.set_base(); + res.ls_meta_package_.ls_meta_.clog_checkpoint_scn_ = mock_ckpt_inc(res.ls_meta_package_.ls_meta_.clog_checkpoint_scn_); + res.ls_meta_package_.ls_meta_.clog_checkpoint_scn_ = mock_ckpt_inc(res.ls_meta_package_.ls_meta_.clog_checkpoint_scn_); + return ret; + } +}; + +class TestChooseMigrationSourcePolicy : public ::testing::Test +{ +public: + TestChooseMigrationSourcePolicy(); + virtual ~TestChooseMigrationSourcePolicy(); + virtual void SetUp(); + virtual void TearDown(); +private: + ObStorageHAChooseSrcHelper choose_src_helper_; + MockStorageRpc storage_rpc_; + ObStorageRpcProxy storage_rpc_proxy_; + ObCommonRpcProxy common_rpc_proxy_; + MockLocalityManager locality_manager_; + MockGetMemberHelper member_helper_; +}; + +TestChooseMigrationSourcePolicy::TestChooseMigrationSourcePolicy() +{} + +TestChooseMigrationSourcePolicy::~TestChooseMigrationSourcePolicy() +{} + +void TestChooseMigrationSourcePolicy::SetUp() +{ + int ret = OB_SUCCESS; + const share::ObLSID ls_id(1); + share::SCN local_ls_checkpoint_scn; + local_ls_checkpoint_scn.set_base(); + common::ObAddr addr; + EXPECT_EQ(OB_SUCCESS, mock_dst_addr(addr)); + EXPECT_EQ(OB_SUCCESS, storage_rpc_.init(&storage_rpc_proxy_, addr, &common_rpc_proxy_)); + EXPECT_EQ(OB_SUCCESS, member_helper_.init(&storage_rpc_)); + EXPECT_EQ(OB_SUCCESS, locality_manager_.init_manager(addr)); +} + +void TestChooseMigrationSourcePolicy::TearDown() +{ + locality_manager_.destroy(); + storage_rpc_.destroy(); +} +// test checkpoint policy +// candidate addr: ["192.168.1.1:1234", "192.168.1.2:1234", "192.168.1.3:1234", "192.168.1.4:1234"] +// mock condition: +// 192.168.1.1:1234: checkpoint -> OB_MAX_SCN_TS_NS, type -> F +// 192.168.1.2:1234, 192.168.1.3:1234, 192.168.1.4:1234: checkpoint -> OB_MIN_SCN_TS_NS, status -> F +// output addr:192.168.1.1:1234 +TEST_F(TestChooseMigrationSourcePolicy, get_available_src_with_checkpoint_policy) +{ + MockLsMetaInfo ls_meta; + EXPECT_CALL(storage_rpc_, post_ls_meta_info_request(_, _, _, _)) + .WillOnce(Invoke(&ls_meta, &MockLsMetaInfo::post_ls_meta_info_request_min_checkpoint)) + .WillOnce(Invoke(&ls_meta, &MockLsMetaInfo::post_ls_meta_info_request_max_checkpoint)) + .WillRepeatedly(Invoke(&ls_meta, &MockLsMetaInfo::post_ls_meta_info_request_min_checkpoint)); + MockMemberList member_list; + EXPECT_CALL(member_helper_, get_ls_member_list_and_learner_list_(_, _, _, _, _, _)) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::get_ls_member_list_for_checkpoint)); + EXPECT_CALL(member_helper_, get_ls_leader(_, _, _)) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::get_ls_leader_succ)); + EXPECT_CALL(member_helper_, get_ls(_, _)) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::get_ls_succ)); + const uint64_t tenant_id = 1001; + const share::ObLSID ls_id(1); + share::SCN local_ls_checkpoint_scn; + local_ls_checkpoint_scn.set_base(); + ObMigrationOpArg mock_arg; + EXPECT_EQ(OB_SUCCESS, mock_migrate_arg_for_checkpoint(mock_arg)); + ObStorageHASrcProvider::ChooseSourcePolicy policy; + EXPECT_EQ(OB_SUCCESS, get_checkpoint_policy(mock_arg, tenant_id, policy)); + ObStorageHASrcInfo src_info; + EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(tenant_id, ls_id, local_ls_checkpoint_scn, mock_arg, policy, &storage_rpc_, &member_helper_)); + EXPECT_EQ(OB_SUCCESS, choose_src_helper_.get_available_src(mock_arg, src_info)); + common::ObAddr expect_addr; + EXPECT_EQ(OB_SUCCESS, mock_addr("192.168.1.1:1234", expect_addr)); + EXPECT_EQ(expect_addr, src_info.src_addr_); +} +// test rs recommand policy +// candidate addr: ["192.168.1.1:1234", "192.168.1.2:1234", "192.168.1.3:1234", "192.168.1.4:1234"] +// mock condition: +// 192.168.1.1:1234, 192.168.1.2:1234, 192.168.1.3:1234: checkpoint -> OB_BASE_SCN_TS_NS, type -> F +// 192.168.1.4:1234: checkpoint -> OB_BASE_SCN_TS_NS, type -> R +// recommand addr: 192.168.1.4:1234 +// output addr:192.168.1.4:1234 +TEST_F(TestChooseMigrationSourcePolicy, get_available_src_with_rs_recommend) +{ + MockLsMetaInfo ls_meta; + EXPECT_CALL(storage_rpc_, post_ls_meta_info_request(_, _, _, _)) + .WillRepeatedly(Invoke(&ls_meta, &MockLsMetaInfo::post_ls_meta_info_request_succ)); + MockMemberList member_list; + EXPECT_CALL(member_helper_, get_ls_member_list_and_learner_list_(_, _, _, _, _, _)) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::get_ls_member_list_for_rs_recommand)); + EXPECT_CALL(member_helper_, get_ls_leader(_, _, _)) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::get_ls_leader_succ)); + EXPECT_CALL(member_helper_, get_ls(_, _)) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::get_ls_fail)); + const uint64_t tenant_id = 1001; + const share::ObLSID ls_id(1); + share::SCN local_ls_checkpoint_scn; + local_ls_checkpoint_scn.set_base(); + ObMigrationOpArg mock_arg; + EXPECT_EQ(OB_SUCCESS, mock_migrate_arg_for_rs_recommand(mock_arg)); + ObStorageHASrcProvider::ChooseSourcePolicy policy; + EXPECT_EQ(OB_SUCCESS, get_recommand_policy(mock_arg, tenant_id, policy)); + ObStorageHASrcInfo src_info; + EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(tenant_id, ls_id, local_ls_checkpoint_scn, mock_arg, policy, &storage_rpc_, &member_helper_)); + EXPECT_EQ(OB_SUCCESS, choose_src_helper_.get_available_src(mock_arg, src_info)); + common::ObAddr expect_addr; + EXPECT_EQ(OB_SUCCESS, mock_addr("192.168.1.4:1234", expect_addr)); + EXPECT_EQ(expect_addr, src_info.src_addr_); +} +// test idc policy +// candidate addr: ["192.168.1.1:1234", "192.168.1.2:1234", "192.168.1.3:1234", "192.168.1.4:1234", "192.168.1.5:1234"] +// 192.168.1.1:1234 : idc -> idc1, region -> region1, checkpoint -> OB_BASE_SCN_TS_NS, type -> F, leader +// 192.168.1.2:1234 : idc -> idc2, region -> region1, checkpoint -> OB_BASE_SCN_TS_NS, type -> F +// 192.168.1.3:1234 : idc -> idc2, region -> region1, checkpoint -> OB_BASE_SCN_TS_NS, type -> F +// 192.168.1.4:1234 : idc -> idc1, region -> region2, checkpoint -> OB_BASE_SCN_TS_NS, type -> F +// 192.168.1.5:1234 : idc -> idc1, region -> region2, checkpoint -> OB_BASE_SCN_TS_NS, type -> F +// output addr:192.168.1.1:1234 +TEST_F(TestChooseMigrationSourcePolicy, idc_mode_idc_leader) +{ + MockLsMetaInfo ls_meta; + EXPECT_CALL(storage_rpc_, post_ls_meta_info_request(_, _, _, _)) + .WillRepeatedly(Invoke(&ls_meta, &MockLsMetaInfo::post_ls_meta_info_request_succ)); + MockMemberList member_list; + EXPECT_CALL(member_helper_, get_ls_member_list_and_learner_list_(_, _, _, _, _, _)) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::get_ls_member_list_for_idc_mode_idc_leader)); + EXPECT_CALL(member_helper_, get_ls_leader(_, _, _)) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::get_ls_leader_succ)); + EXPECT_CALL(member_helper_, check_tenant_primary()) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::check_tenant_primary_true)); + const uint64_t tenant_id = 1001; + const share::ObLSID ls_id(1); + share::SCN local_ls_checkpoint_scn; + local_ls_checkpoint_scn.set_min(); + ObMigrationOpArg mock_arg; + EXPECT_EQ(OB_SUCCESS, mock_migrate_arg_for_location(mock_arg)); + ObStorageHASrcProvider::ChooseSourcePolicy policy; + EXPECT_EQ(OB_SUCCESS, get_idc_policy(mock_arg, tenant_id, policy)); + ObStorageHASrcInfo src_info; + EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(tenant_id, ls_id, local_ls_checkpoint_scn, mock_arg, policy, &storage_rpc_, &member_helper_)); + EXPECT_EQ(OB_SUCCESS, mock_locality_manager(MOCKLOCALITY::IDC_MODE_IDC_LEADER, locality_manager_)); + EXPECT_EQ(ObStorageHASrcProvider::ChooseSourcePolicy::IDC, choose_src_helper_.get_provider()->get_policy_type()); + static_cast(choose_src_helper_.get_provider())->set_locality_manager_(&locality_manager_); + EXPECT_EQ(OB_SUCCESS, choose_src_helper_.get_available_src(mock_arg, src_info)); + common::ObAddr expect_addr; + EXPECT_EQ(OB_SUCCESS, mock_addr("192.168.1.1:1234", expect_addr)); + EXPECT_EQ(expect_addr, src_info.src_addr_); +} +// test idc policy +// candidate addr: ["192.168.1.1:1234", "192.168.1.2:1234", "192.168.1.3:1234", "192.168.1.4:1234", "192.168.1.5:1234"] +// 192.168.1.1:1234 : idc -> idc1, region -> region1, checkpoint -> OB_BASE_SCN_TS_NS, type -> F, leader +// 192.168.1.2:1234 : idc -> idc1, region -> region1, checkpoint -> OB_BASE_SCN_TS_NS, type -> F +// 192.168.1.3:1234 : idc -> idc2, region -> region1, checkpoint -> OB_BASE_SCN_TS_NS, type -> F +// 192.168.1.4:1234 : idc -> idc2, region -> region1, checkpoint -> OB_BASE_SCN_TS_NS, type -> F +// 192.168.1.5:1234 : idc -> idc1, region -> region2, checkpoint -> OB_BASE_SCN_TS_NS, type -> F +// output addr:192.168.1.2:1234 +TEST_F(TestChooseMigrationSourcePolicy, idc_mode_idc_follower) +{ + MockLsMetaInfo ls_meta; + EXPECT_CALL(storage_rpc_, post_ls_meta_info_request(_, _, _, _)) + .WillRepeatedly(Invoke(&ls_meta, &MockLsMetaInfo::post_ls_meta_info_request_succ)); + MockMemberList member_list; + EXPECT_CALL(member_helper_, get_ls_member_list_and_learner_list_(_, _, _, _, _, _)) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::get_ls_member_list_for_idc_mode_idc_follower)); + EXPECT_CALL(member_helper_, get_ls_leader(_, _, _)) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::get_ls_leader_succ)); + EXPECT_CALL(member_helper_, check_tenant_primary()) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::check_tenant_primary_true)); + const uint64_t tenant_id = 1001; + const share::ObLSID ls_id(1); + share::SCN local_ls_checkpoint_scn; + local_ls_checkpoint_scn.set_min(); + ObMigrationOpArg mock_arg; + EXPECT_EQ(OB_SUCCESS, mock_migrate_arg_for_location(mock_arg)); + ObStorageHASrcProvider::ChooseSourcePolicy policy; + EXPECT_EQ(OB_SUCCESS, get_idc_policy(mock_arg, tenant_id, policy)); + ObStorageHASrcInfo src_info; + EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(tenant_id, ls_id, local_ls_checkpoint_scn, mock_arg, policy, &storage_rpc_, &member_helper_)); + EXPECT_EQ(OB_SUCCESS, mock_locality_manager(MOCKLOCALITY::IDC_MODE_IDC_FOLLOWER, locality_manager_)); + EXPECT_EQ(ObStorageHASrcProvider::ChooseSourcePolicy::IDC, choose_src_helper_.get_provider()->get_policy_type()); + static_cast(choose_src_helper_.get_provider())->set_locality_manager_(&locality_manager_); + EXPECT_EQ(OB_SUCCESS, choose_src_helper_.get_available_src(mock_arg, src_info)); + common::ObAddr expect_addr; + EXPECT_EQ(OB_SUCCESS, mock_addr("192.168.1.2:1234", expect_addr)); + EXPECT_EQ(expect_addr, src_info.src_addr_); +} +// test idc policy +// candidate addr: ["192.168.1.1:1234", "192.168.1.2:1234", "192.168.1.3:1234"] +// 192.168.1.1:1234 : idc -> idc2, region -> region1, checkpoint -> OB_BASE_SCN_TS_NS, type -> F, leader +// 192.168.1.2:1234 : idc -> idc1, region -> region2, checkpoint -> OB_BASE_SCN_TS_NS, type -> F +// 192.168.1.3:1234 : idc -> idc1, region -> region2, checkpoint -> OB_BASE_SCN_TS_NS, type -> F +// output addr:192.168.1.1:1234 +TEST_F(TestChooseMigrationSourcePolicy, idc_mode_region_leader) +{ + MockLsMetaInfo ls_meta; + EXPECT_CALL(storage_rpc_, post_ls_meta_info_request(_, _, _, _)) + .WillRepeatedly(Invoke(&ls_meta, &MockLsMetaInfo::post_ls_meta_info_request_succ)); + MockMemberList member_list; + EXPECT_CALL(member_helper_, get_ls_member_list_and_learner_list_(_, _, _, _, _, _)) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::get_ls_member_list_for_idc_mode_region_leader)); + EXPECT_CALL(member_helper_, get_ls_leader(_, _, _)) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::get_ls_leader_succ)); + EXPECT_CALL(member_helper_, check_tenant_primary()) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::check_tenant_primary_true)); + const uint64_t tenant_id = 1001; + const share::ObLSID ls_id(1); + share::SCN local_ls_checkpoint_scn; + local_ls_checkpoint_scn.set_min(); + ObMigrationOpArg mock_arg; + EXPECT_EQ(OB_SUCCESS, mock_migrate_arg_for_location(mock_arg)); + ObStorageHASrcProvider::ChooseSourcePolicy policy; + EXPECT_EQ(OB_SUCCESS, get_idc_policy(mock_arg, tenant_id, policy)); + ObStorageHASrcInfo src_info; + EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(tenant_id, ls_id, local_ls_checkpoint_scn, mock_arg, policy, &storage_rpc_, &member_helper_)); + EXPECT_EQ(OB_SUCCESS, mock_locality_manager(MOCKLOCALITY::IDC_MODE_REGION_LEADER, locality_manager_)); + EXPECT_EQ(ObStorageHASrcProvider::ChooseSourcePolicy::IDC, choose_src_helper_.get_provider()->get_policy_type()); + static_cast(choose_src_helper_.get_provider())->set_locality_manager_(&locality_manager_); + EXPECT_EQ(OB_SUCCESS, choose_src_helper_.get_available_src(mock_arg, src_info)); + common::ObAddr expect_addr; + EXPECT_EQ(OB_SUCCESS, mock_addr("192.168.1.1:1234", expect_addr)); + EXPECT_EQ(expect_addr, src_info.src_addr_); +} +// test idc policy +// candidate addr: ["192.168.1.1:1234", "192.168.1.2:1234", "192.168.1.3:1234"] +// 192.168.1.1:1234 : idc -> idc2, region -> region1, checkpoint -> OB_BASE_SCN_TS_NS, type -> F, leader +// 192.168.1.2:1234 : idc -> idc2, region -> region1, checkpoint -> OB_BASE_SCN_TS_NS, type -> F +// 192.168.1.3:1234 : idc -> idc1, region -> region2, checkpoint -> OB_BASE_SCN_TS_NS, type -> F +// output addr:192.168.1.2:1234 +TEST_F(TestChooseMigrationSourcePolicy, idc_mode_region_follower) +{ + MockLsMetaInfo ls_meta; + EXPECT_CALL(storage_rpc_, post_ls_meta_info_request(_, _, _, _)) + .WillRepeatedly(Invoke(&ls_meta, &MockLsMetaInfo::post_ls_meta_info_request_succ)); + MockMemberList member_list; + EXPECT_CALL(member_helper_, get_ls_member_list_and_learner_list_(_, _, _, _, _, _)) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::get_ls_member_list_for_idc_mode_region_follower)); + EXPECT_CALL(member_helper_, get_ls_leader(_, _, _)) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::get_ls_leader_succ)); + EXPECT_CALL(member_helper_, check_tenant_primary()) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::check_tenant_primary_true)); + const uint64_t tenant_id = 1001; + const share::ObLSID ls_id(1); + share::SCN local_ls_checkpoint_scn; + local_ls_checkpoint_scn.set_min(); + ObMigrationOpArg mock_arg; + EXPECT_EQ(OB_SUCCESS, mock_migrate_arg_for_location(mock_arg)); + ObStorageHASrcProvider::ChooseSourcePolicy policy; + EXPECT_EQ(OB_SUCCESS, get_idc_policy(mock_arg, tenant_id, policy)); + ObStorageHASrcInfo src_info; + EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(tenant_id, ls_id, local_ls_checkpoint_scn, mock_arg, policy, &storage_rpc_, &member_helper_)); + EXPECT_EQ(OB_SUCCESS, mock_locality_manager(MOCKLOCALITY::IDC_MODE_REGION_FOLLOWER, locality_manager_)); + EXPECT_EQ(ObStorageHASrcProvider::ChooseSourcePolicy::IDC, choose_src_helper_.get_provider()->get_policy_type()); + static_cast(choose_src_helper_.get_provider())->set_locality_manager_(&locality_manager_); + EXPECT_EQ(OB_SUCCESS, choose_src_helper_.get_available_src(mock_arg, src_info)); + common::ObAddr expect_addr; + EXPECT_EQ(OB_SUCCESS, mock_addr("192.168.1.2:1234", expect_addr)); + EXPECT_EQ(expect_addr, src_info.src_addr_); +} +// test idc policy +// candidate addr: ["192.168.1.1:1234"] +// 192.168.1.1:1234 : idc -> idc1, region -> region2, checkpoint -> OB_BASE_SCN_TS_NS, type -> F, leader +// output addr:192.168.1.1:1234 +TEST_F(TestChooseMigrationSourcePolicy, idc_mode_diff_region_leader) +{ + MockLsMetaInfo ls_meta; + EXPECT_CALL(storage_rpc_, post_ls_meta_info_request(_, _, _, _)) + .WillRepeatedly(Invoke(&ls_meta, &MockLsMetaInfo::post_ls_meta_info_request_succ)); + MockMemberList member_list; + EXPECT_CALL(member_helper_, get_ls_member_list_and_learner_list_(_, _, _, _, _, _)) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::get_ls_member_list_for_idc_mode_diff_region_leader)); + EXPECT_CALL(member_helper_, get_ls_leader(_, _, _)) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::get_ls_leader_succ)); + EXPECT_CALL(member_helper_, check_tenant_primary()) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::check_tenant_primary_true)); + const uint64_t tenant_id = 1001; + const share::ObLSID ls_id(1); + share::SCN local_ls_checkpoint_scn; + local_ls_checkpoint_scn.set_min(); + ObMigrationOpArg mock_arg; + EXPECT_EQ(OB_SUCCESS, mock_migrate_arg_for_location(mock_arg)); + ObStorageHASrcProvider::ChooseSourcePolicy policy; + EXPECT_EQ(OB_SUCCESS, get_idc_policy(mock_arg, tenant_id, policy)); + ObStorageHASrcInfo src_info; + EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(tenant_id, ls_id, local_ls_checkpoint_scn, mock_arg, policy, &storage_rpc_, &member_helper_)); + EXPECT_EQ(OB_SUCCESS, mock_locality_manager(MOCKLOCALITY::IDC_MODE_DIFF_REGION_LEADER, locality_manager_)); + EXPECT_EQ(ObStorageHASrcProvider::ChooseSourcePolicy::IDC, choose_src_helper_.get_provider()->get_policy_type()); + static_cast(choose_src_helper_.get_provider())->set_locality_manager_(&locality_manager_); + EXPECT_EQ(OB_SUCCESS, choose_src_helper_.get_available_src(mock_arg, src_info)); + common::ObAddr expect_addr; + EXPECT_EQ(OB_SUCCESS, mock_addr("192.168.1.1:1234", expect_addr)); + EXPECT_EQ(expect_addr, src_info.src_addr_); +} +// test idc policy +// candidate addr: ["192.168.1.1:1234", "192.168.1.2:1234"] +// 192.168.1.1:1234 : idc -> idc1, region -> region2, checkpoint -> OB_BASE_SCN_TS_NS, type -> F, leader +// 192.168.1.2:1234 : idc -> idc1, region -> region2, checkpoint -> OB_BASE_SCN_TS_NS, type -> F +// output addr:192.168.1.2:1234 +TEST_F(TestChooseMigrationSourcePolicy, idc_mode_diff_region_follower) +{ + MockLsMetaInfo ls_meta; + EXPECT_CALL(storage_rpc_, post_ls_meta_info_request(_, _, _, _)) + .WillRepeatedly(Invoke(&ls_meta, &MockLsMetaInfo::post_ls_meta_info_request_succ)); + MockMemberList member_list; + EXPECT_CALL(member_helper_, get_ls_member_list_and_learner_list_(_, _, _, _, _, _)) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::get_ls_member_list_for_idc_mode_diff_region_follower)); + EXPECT_CALL(member_helper_, get_ls_leader(_, _, _)) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::get_ls_leader_succ)); + EXPECT_CALL(member_helper_, check_tenant_primary()) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::check_tenant_primary_true)); + const uint64_t tenant_id = 1001; + const share::ObLSID ls_id(1); + share::SCN local_ls_checkpoint_scn; + local_ls_checkpoint_scn.set_min(); + ObMigrationOpArg mock_arg; + EXPECT_EQ(OB_SUCCESS, mock_migrate_arg_for_location(mock_arg)); + ObStorageHASrcProvider::ChooseSourcePolicy policy; + EXPECT_EQ(OB_SUCCESS, get_idc_policy(mock_arg, tenant_id, policy)); + ObStorageHASrcInfo src_info; + EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(tenant_id, ls_id, local_ls_checkpoint_scn, mock_arg, policy, &storage_rpc_, &member_helper_)); + EXPECT_EQ(OB_SUCCESS, mock_locality_manager(MOCKLOCALITY::IDC_MODE_DIFF_REGION_FOLLOWER, locality_manager_)); + EXPECT_EQ(ObStorageHASrcProvider::ChooseSourcePolicy::IDC, choose_src_helper_.get_provider()->get_policy_type()); + static_cast(choose_src_helper_.get_provider())->set_locality_manager_(&locality_manager_); + EXPECT_EQ(OB_SUCCESS, choose_src_helper_.get_available_src(mock_arg, src_info)); + common::ObAddr expect_addr; + EXPECT_EQ(OB_SUCCESS, mock_addr("192.168.1.2:1234", expect_addr)); + EXPECT_EQ(expect_addr, src_info.src_addr_); +} +// test region policy +// 192.168.1.1:1234 : idc -> idc1, region -> region1, checkpoint -> OB_BASE_SCN_TS_NS, type -> F, leader +// 192.168.1.2:1234 : idc -> idc2, region -> region1, checkpoint -> OB_BASE_SCN_TS_NS, type -> F +// 192.168.1.3:1234 : idc -> idc1, region -> region2, checkpoint -> OB_BASE_SCN_TS_NS, type -> F +// 192.168.1.4:1234 : idc -> idc1, region -> region2, checkpoint -> OB_BASE_SCN_TS_NS, type -> F +// output addr:192.168.1.2:1234 +TEST_F(TestChooseMigrationSourcePolicy, region_mode_region_follower) +{ + MockLsMetaInfo ls_meta; + EXPECT_CALL(storage_rpc_, post_ls_meta_info_request(_, _, _, _)) + .WillRepeatedly(Invoke(&ls_meta, &MockLsMetaInfo::post_ls_meta_info_request_succ)); + MockMemberList member_list; + EXPECT_CALL(member_helper_, get_ls_member_list_and_learner_list_(_, _, _, _, _, _)) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::get_ls_member_list_for_region_mode_region_follower)); + EXPECT_CALL(member_helper_, get_ls_leader(_, _, _)) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::get_ls_leader_succ)); + EXPECT_CALL(member_helper_, check_tenant_primary()) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::check_tenant_primary_true)); + const uint64_t tenant_id = 1001; + const share::ObLSID ls_id(1); + share::SCN local_ls_checkpoint_scn; + local_ls_checkpoint_scn.set_min(); + ObMigrationOpArg mock_arg; + EXPECT_EQ(OB_SUCCESS, mock_migrate_arg_for_location(mock_arg)); + ObStorageHASrcProvider::ChooseSourcePolicy policy; + EXPECT_EQ(OB_SUCCESS, get_region_policy(mock_arg, tenant_id, policy)); + ObStorageHASrcInfo src_info; + EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(tenant_id, ls_id, local_ls_checkpoint_scn, mock_arg, policy, &storage_rpc_, &member_helper_)); + EXPECT_EQ(OB_SUCCESS, mock_locality_manager(MOCKLOCALITY::REGION_MODE_REGION_FOLLOWER, locality_manager_)); + EXPECT_EQ(ObStorageHASrcProvider::ChooseSourcePolicy::REGION, choose_src_helper_.get_provider()->get_policy_type()); + static_cast(choose_src_helper_.get_provider())->set_locality_manager_(&locality_manager_); + EXPECT_EQ(OB_SUCCESS, choose_src_helper_.get_available_src(mock_arg, src_info)); + common::ObAddr expect_addr; + EXPECT_EQ(OB_SUCCESS, mock_addr("192.168.1.2:1234", expect_addr)); + EXPECT_EQ(expect_addr, src_info.src_addr_); +} +// test region policy +// candidate addr: ["192.168.1.1:1234", "192.168.1.2:1234", "192.168.1.3:1234"] +// 192.168.1.1:1234 : idc -> idc2, region -> region1, checkpoint -> OB_BASE_SCN_TS_NS, type -> F, leader +// 192.168.1.2:1234 : idc -> idc1, region -> region2, checkpoint -> OB_BASE_SCN_TS_NS, type -> F +// 192.168.1.3:1234 : idc -> idc1, region -> region2, checkpoint -> OB_BASE_SCN_TS_NS, type -> F +// output addr:192.168.1.1:1234 +TEST_F(TestChooseMigrationSourcePolicy, region_mode_region_leader) +{ + MockLsMetaInfo ls_meta; + EXPECT_CALL(storage_rpc_, post_ls_meta_info_request(_, _, _, _)) + .WillRepeatedly(Invoke(&ls_meta, &MockLsMetaInfo::post_ls_meta_info_request_succ)); + MockMemberList member_list; + EXPECT_CALL(member_helper_, get_ls_member_list_and_learner_list_(_, _, _, _, _, _)) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::get_ls_member_list_for_region_mode_region_leader)); + EXPECT_CALL(member_helper_, get_ls_leader(_, _, _)) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::get_ls_leader_succ)); + EXPECT_CALL(member_helper_, check_tenant_primary()) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::check_tenant_primary_true)); + const uint64_t tenant_id = 1001; + const share::ObLSID ls_id(1); + share::SCN local_ls_checkpoint_scn; + local_ls_checkpoint_scn.set_min(); + ObMigrationOpArg mock_arg; + EXPECT_EQ(OB_SUCCESS, mock_migrate_arg_for_location(mock_arg)); + ObStorageHASrcProvider::ChooseSourcePolicy policy; + EXPECT_EQ(OB_SUCCESS, get_region_policy(mock_arg, tenant_id, policy)); + ObStorageHASrcInfo src_info; + EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(tenant_id, ls_id, local_ls_checkpoint_scn, mock_arg, policy, &storage_rpc_, &member_helper_)); + EXPECT_EQ(OB_SUCCESS, mock_locality_manager(MOCKLOCALITY::REGION_MODE_REGION_LEADER, locality_manager_)); + EXPECT_EQ(ObStorageHASrcProvider::ChooseSourcePolicy::REGION, choose_src_helper_.get_provider()->get_policy_type()); + static_cast(choose_src_helper_.get_provider())->set_locality_manager_(&locality_manager_); + EXPECT_EQ(OB_SUCCESS, choose_src_helper_.get_available_src(mock_arg, src_info)); + common::ObAddr expect_addr; + EXPECT_EQ(OB_SUCCESS, mock_addr("192.168.1.1:1234", expect_addr)); + EXPECT_EQ(expect_addr, src_info.src_addr_); +} +// test region policy +// candidate addr: ["192.168.1.1:1234", "192.168.1.2:1234"] +// 192.168.1.1:1234 : idc -> idc1, region -> region2, checkpoint -> OB_BASE_SCN_TS_NS, type -> F, leader +// 192.168.1.2:1234 : idc -> idc1, region -> region2, checkpoint -> OB_BASE_SCN_TS_NS, type -> F +// output addr:192.168.1.2:1234 +TEST_F(TestChooseMigrationSourcePolicy, region_mode_diff_region_follower) +{ + MockLsMetaInfo ls_meta; + EXPECT_CALL(storage_rpc_, post_ls_meta_info_request(_, _, _, _)) + .WillRepeatedly(Invoke(&ls_meta, &MockLsMetaInfo::post_ls_meta_info_request_succ)); + MockMemberList member_list; + EXPECT_CALL(member_helper_, get_ls_member_list_and_learner_list_(_, _, _, _, _, _)) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::get_ls_member_list_for_region_mode_diff_region_follower)); + EXPECT_CALL(member_helper_, get_ls_leader(_, _, _)) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::get_ls_leader_succ)); + EXPECT_CALL(member_helper_, check_tenant_primary()) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::check_tenant_primary_true)); + const uint64_t tenant_id = 1001; + const share::ObLSID ls_id(1); + share::SCN local_ls_checkpoint_scn; + local_ls_checkpoint_scn.set_min(); + ObMigrationOpArg mock_arg; + EXPECT_EQ(OB_SUCCESS, mock_migrate_arg_for_location(mock_arg)); + ObStorageHASrcProvider::ChooseSourcePolicy policy; + EXPECT_EQ(OB_SUCCESS, get_region_policy(mock_arg, tenant_id, policy)); + ObStorageHASrcInfo src_info; + EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(tenant_id, ls_id, local_ls_checkpoint_scn, mock_arg, policy, &storage_rpc_, &member_helper_)); + EXPECT_EQ(OB_SUCCESS, mock_locality_manager(MOCKLOCALITY::REGION_MODE_DIFF_REGION_FOLLOWER, locality_manager_)); + EXPECT_EQ(ObStorageHASrcProvider::ChooseSourcePolicy::REGION, choose_src_helper_.get_provider()->get_policy_type()); + static_cast(choose_src_helper_.get_provider())->set_locality_manager_(&locality_manager_); + EXPECT_EQ(OB_SUCCESS, choose_src_helper_.get_available_src(mock_arg, src_info)); + common::ObAddr expect_addr; + EXPECT_EQ(OB_SUCCESS, mock_addr("192.168.1.2:1234", expect_addr)); + EXPECT_EQ(expect_addr, src_info.src_addr_); +} +// test region policy +// candidate addr: ["192.168.1.1:1234"] +// 192.168.1.1:1234 : idc -> idc1, region -> region2, checkpoint -> OB_BASE_SCN_TS_NS, type -> F, leader +// output addr:192.168.1.1:1234 +TEST_F(TestChooseMigrationSourcePolicy, region_mode_diff_region_leader) +{ + MockLsMetaInfo ls_meta; + EXPECT_CALL(storage_rpc_, post_ls_meta_info_request(_, _, _, _)) + .WillRepeatedly(Invoke(&ls_meta, &MockLsMetaInfo::post_ls_meta_info_request_succ)); + MockMemberList member_list; + EXPECT_CALL(member_helper_, get_ls_member_list_and_learner_list_(_, _, _, _, _, _)) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::get_ls_member_list_for_region_mode_diff_region_leader)); + EXPECT_CALL(member_helper_, get_ls_leader(_, _, _)) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::get_ls_leader_succ)); + EXPECT_CALL(member_helper_, check_tenant_primary()) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::check_tenant_primary_true)); + const uint64_t tenant_id = 1001; + const share::ObLSID ls_id(1); + share::SCN local_ls_checkpoint_scn; + local_ls_checkpoint_scn.set_min(); + ObMigrationOpArg mock_arg; + EXPECT_EQ(OB_SUCCESS, mock_migrate_arg_for_location(mock_arg)); + ObStorageHASrcProvider::ChooseSourcePolicy policy; + EXPECT_EQ(OB_SUCCESS, get_region_policy(mock_arg, tenant_id, policy)); + ObStorageHASrcInfo src_info; + EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(tenant_id, ls_id, local_ls_checkpoint_scn, mock_arg, policy, &storage_rpc_, &member_helper_)); + EXPECT_EQ(OB_SUCCESS, mock_locality_manager(MOCKLOCALITY::REGION_MODE_DIFF_REGION_LEADER, locality_manager_)); + EXPECT_EQ(ObStorageHASrcProvider::ChooseSourcePolicy::REGION, choose_src_helper_.get_provider()->get_policy_type()); + static_cast(choose_src_helper_.get_provider())->set_locality_manager_(&locality_manager_); + EXPECT_EQ(OB_SUCCESS, choose_src_helper_.get_available_src(mock_arg, src_info)); + common::ObAddr expect_addr; + EXPECT_EQ(OB_SUCCESS, mock_addr("192.168.1.1:1234", expect_addr)); + EXPECT_EQ(expect_addr, src_info.src_addr_); +} +// test rebuild policy +// candidate addr: ["192.168.1.1:1234", "192.168.1.2:1234", "192.168.1.3:1234", "192.168.1.4:1234", "192.168.1.5:1234"] +// 192.168.1.1:1234 : idc -> idc1, region -> region1, checkpoint -> OB_BASE_SCN_TS_NS, type -> F, leader +// 192.168.1.2:1234 : idc -> idc2, region -> region1, checkpoint -> OB_BASE_SCN_TS_NS, type -> F +// 192.168.1.3:1234 : idc -> idc2, region -> region1, checkpoint -> OB_BASE_SCN_TS_NS, type -> F +// 192.168.1.4:1234 : idc -> idc1, region -> region2, checkpoint -> OB_BASE_SCN_TS_NS, type -> F +// 192.168.1.5:1234 : idc -> idc1, region -> region2, checkpoint -> OB_BASE_SCN_TS_NS, type -> F +// output addr:192.168.1.1:1234 +TEST_F(TestChooseMigrationSourcePolicy, get_available_src_with_rebuild) +{ + MockLsMetaInfo ls_meta; + EXPECT_CALL(storage_rpc_, post_ls_meta_info_request(_, _, _, _)) + .WillRepeatedly(Invoke(&ls_meta, &MockLsMetaInfo::post_ls_meta_info_request_succ)); + MockMemberList member_list; + EXPECT_CALL(member_helper_, get_ls_member_list_and_learner_list_(_, _, _, _, _, _)) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::get_ls_member_list_for_rebuild_mode)); + EXPECT_CALL(member_helper_, get_ls_leader(_, _, _)) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::get_ls_leader_succ)); + EXPECT_CALL(member_helper_, check_tenant_primary()) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::check_tenant_primary_true)); + const uint64_t tenant_id = 1001; + const share::ObLSID ls_id(1); + share::SCN local_ls_checkpoint_scn; + local_ls_checkpoint_scn.set_min(); + ObMigrationOpArg mock_arg; + EXPECT_EQ(OB_SUCCESS, mock_migrate_arg_for_rebuild(mock_arg)); + ObStorageHASrcProvider::ChooseSourcePolicy policy; + EXPECT_EQ(OB_SUCCESS, get_idc_policy(mock_arg, tenant_id, policy)); + ObStorageHASrcInfo src_info; + EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(tenant_id, ls_id, local_ls_checkpoint_scn, mock_arg, policy, &storage_rpc_, &member_helper_)); + EXPECT_EQ(OB_SUCCESS, mock_locality_manager(MOCKLOCALITY::IDC_MODE_IDC_LEADER, locality_manager_)); + EXPECT_EQ(ObStorageHASrcProvider::ChooseSourcePolicy::IDC, choose_src_helper_.get_provider()->get_policy_type()); + static_cast(choose_src_helper_.get_provider())->set_locality_manager_(&locality_manager_); + EXPECT_EQ(OB_SUCCESS, choose_src_helper_.get_available_src(mock_arg, src_info)); + common::ObAddr expect_addr; + EXPECT_EQ(OB_SUCCESS, mock_addr("192.168.1.1:1234", expect_addr)); + EXPECT_EQ(expect_addr, src_info.src_addr_); +} + +TEST_F(TestChooseMigrationSourcePolicy, member_helper_get_member_list) +{ + MockMemberList member_list; + EXPECT_CALL(member_helper_, get_ls_member_list_and_learner_list_(_, _, _, _, _, _)) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::get_ls_member_list_for_member_helper)); + EXPECT_CALL(member_helper_, get_ls_leader(_, _, _)) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::get_ls_leader_succ)); + const uint64_t tenant_id = 1001; + const share::ObLSID ls_id(1); + common::ObArray addr_list; + EXPECT_EQ(OB_SUCCESS, member_helper_.get_ls_member_list(tenant_id, ls_id, addr_list)); +} +// test ObMigrationSrcByLocationProvider init fail +TEST_F(TestChooseMigrationSourcePolicy, src_provider_init_idc_fail) +{ + const uint64_t tenant_id = 1001; + const share::ObLSID ls_id(1); + share::SCN local_ls_checkpoint_scn; + local_ls_checkpoint_scn.set_min(); + ObMigrationOpArg mock_arg; + EXPECT_EQ(OB_SUCCESS, mock_migrate_arg_init_fail(mock_arg)); + ObStorageHASrcProvider::ChooseSourcePolicy policy; + EXPECT_EQ(OB_SUCCESS, get_idc_policy(mock_arg, tenant_id, policy)); + ObStorageHASrcInfo src_info; + EXPECT_EQ(OB_INVALID_ARGUMENT, choose_src_helper_.init(tenant_id, ls_id, local_ls_checkpoint_scn, mock_arg, policy, &storage_rpc_, &member_helper_)); +} +// test ObMigrationSrcByLocationProvider init fail +TEST_F(TestChooseMigrationSourcePolicy, src_provider_init_region_fail) +{ + const uint64_t tenant_id = 1001; + const share::ObLSID ls_id(1); + share::SCN local_ls_checkpoint_scn; + local_ls_checkpoint_scn.set_min(); + ObMigrationOpArg mock_arg; + EXPECT_EQ(OB_SUCCESS, mock_migrate_arg_init_fail(mock_arg)); + ObStorageHASrcProvider::ChooseSourcePolicy policy; + EXPECT_EQ(OB_SUCCESS, get_region_policy(mock_arg, tenant_id, policy)); + ObStorageHASrcInfo src_info; + EXPECT_EQ(OB_INVALID_ARGUMENT, choose_src_helper_.init(tenant_id, ls_id, local_ls_checkpoint_scn, mock_arg, policy, &storage_rpc_, &member_helper_)); +} +// test ObRSRecommendSrcProvider init fail +TEST_F(TestChooseMigrationSourcePolicy, src_provider_init_recommand_fail) +{ + const uint64_t tenant_id = 1001; + const share::ObLSID ls_id(1); + share::SCN local_ls_checkpoint_scn; + local_ls_checkpoint_scn.set_min(); + ObMigrationOpArg mock_arg; + EXPECT_EQ(OB_SUCCESS, mock_migrate_arg_init_fail(mock_arg)); + ObStorageHASrcProvider::ChooseSourcePolicy policy; + EXPECT_EQ(OB_SUCCESS, get_recommand_policy(mock_arg, tenant_id, policy)); + ObStorageHASrcInfo src_info; + EXPECT_EQ(OB_INVALID_ARGUMENT, choose_src_helper_.init(tenant_id, ls_id, local_ls_checkpoint_scn, mock_arg, policy, &storage_rpc_, &member_helper_)); +} +// test ObMigrationSrcByCheckpointProvider init fail +TEST_F(TestChooseMigrationSourcePolicy, src_provider_init_checkpoint_fail) +{ + const uint64_t tenant_id = 1001; + const share::ObLSID ls_id(1); + share::SCN local_ls_checkpoint_scn; + local_ls_checkpoint_scn.set_min(); + ObMigrationOpArg mock_arg; + EXPECT_EQ(OB_SUCCESS, mock_migrate_arg_init_fail(mock_arg)); + ObStorageHASrcProvider::ChooseSourcePolicy policy; + EXPECT_EQ(OB_SUCCESS, get_checkpoint_policy(mock_arg, tenant_id, policy)); + ObStorageHASrcInfo src_info; + EXPECT_EQ(OB_INVALID_ARGUMENT, choose_src_helper_.init(tenant_id, ls_id, local_ls_checkpoint_scn, mock_arg, policy, &storage_rpc_, &member_helper_)); +} +// test check replica valid fail +// candidate addr: ["192.168.1.1:1234", "192.168.1.2:1234", "192.168.1.3:1234", "192.168.1.4:1234", "192.168.1.5:1234"] +// local checkpoint -> OB_BASE_SCN_TS_NS +// parent checkpoint -> OB_BASE_SCN_TS_NS + 1 +// dst type -> F +// 192.168.1.1:1234 : checkpoint -> OB_BASE_SCN_TS_NS, type -> F, leader +// 192.168.1.2:1234 : checkpoint -> OB_MIN_SCN_TS_NS, type -> F +// 192.168.1.3:1234 : checkpoint -> OB_BASE_SCN_TS_NS, type -> F +// 192.168.1.4:1234 : checkpoint -> OB_BASE_SCN_TS_NS + 2, type -> F +// 192.168.1.5:1234 : checkpoint -> OB_MAX_SCN_TS_NS, type -> R +// output addr:192.168.1.3:1234 +TEST_F(TestChooseMigrationSourcePolicy, get_available_src_condition_fail) +{ + MockLsMetaInfo ls_meta; + EXPECT_CALL(storage_rpc_, post_ls_meta_info_request(_, _, _, _)) + .WillOnce(Invoke(&ls_meta, &MockLsMetaInfo::post_ls_meta_info_request_parent_checkpoint)) + .WillOnce(Invoke(&ls_meta, &MockLsMetaInfo::post_ls_meta_info_request_min_checkpoint)) + .WillOnce(Invoke(&ls_meta, &MockLsMetaInfo::post_ls_meta_info_request_base_checkpoint)) + .WillOnce(Invoke(&ls_meta, &MockLsMetaInfo::post_ls_meta_info_request_large_checkpoint)) + .WillOnce(Invoke(&ls_meta, &MockLsMetaInfo::post_ls_meta_info_request_invalid_type_checkpoint)) + .WillRepeatedly(Invoke(&ls_meta, &MockLsMetaInfo::post_ls_meta_info_request_min_checkpoint)); + MockMemberList member_list; + EXPECT_CALL(member_helper_, get_ls_member_list_and_learner_list_(_, _, _, _, _, _)) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::get_ls_member_list_for_replica_type_failed)); + EXPECT_CALL(member_helper_, get_ls_leader(_, _, _)) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::get_ls_leader_succ)); + EXPECT_CALL(member_helper_, get_ls(_, _)) + .WillOnce(Invoke(&member_list, &MockMemberList::get_ls_fail)) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::get_ls_succ)); + EXPECT_CALL(member_helper_, check_tenant_primary()) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::check_tenant_primary_true)); + const uint64_t tenant_id = 1001; + const share::ObLSID ls_id(1); + share::SCN local_ls_checkpoint_scn; + local_ls_checkpoint_scn.set_base(); + ObMigrationOpArg mock_arg; + EXPECT_EQ(OB_SUCCESS, mock_migrate_arg_for_checkpoint(mock_arg)); + ObStorageHASrcProvider::ChooseSourcePolicy policy; + EXPECT_EQ(OB_SUCCESS, get_checkpoint_policy(mock_arg, tenant_id, policy)); + ObStorageHASrcInfo src_info; + EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(tenant_id, ls_id, local_ls_checkpoint_scn, mock_arg, policy, &storage_rpc_, &member_helper_)); + EXPECT_EQ(OB_SUCCESS, choose_src_helper_.get_available_src(mock_arg, src_info)); + common::ObAddr expect_addr; + EXPECT_EQ(OB_SUCCESS, mock_addr("192.168.1.3:1234", expect_addr)); + EXPECT_EQ(expect_addr, src_info.src_addr_); +} +// test check replica valid fail +// candidate addr: ["192.168.1.1:1234", "192.168.1.2:1234", "192.168.1.3:1234", "192.168.1.4:1234", "192.168.1.5:1234"] +// local checkpoint -> OB_BASE_SCN_TS_NS +// parent checkpoint -> OB_BASE_SCN_TS_NS + 1 +// dst type -> F +// 192.168.1.1:1234 : idc -> idc1, region -> region1, checkpoint -> OB_MIN_SCN_TS_NS, type -> F, leader +// 192.168.1.2:1234 : idc -> idc2, region -> region1, checkpoint -> OB_BASE_SCN_TS_NS, type -> F +// 192.168.1.3:1234 : idc -> idc2, region -> region1, checkpoint -> OB_BASE_SCN_TS_NS, type -> F +// 192.168.1.4:1234 : idc -> idc1, region -> region2, checkpoint -> OB_BASE_SCN_TS_NS + 2, type -> F +// 192.168.1.5:1234 : idc -> idc1, region -> region2, checkpoint -> OB_MIN_SCN_TS_NS, type -> F +// output addr:192.168.1.4:1234 +TEST_F(TestChooseMigrationSourcePolicy, idc_mode_check_replica_fail) +{ + MockLsMetaInfo ls_meta; + EXPECT_CALL(storage_rpc_, post_ls_meta_info_request(_, _, _, _)) + .WillOnce(Invoke(&ls_meta, &MockLsMetaInfo::post_ls_meta_info_request_parent_checkpoint)) + .WillOnce(Invoke(&ls_meta, &MockLsMetaInfo::post_ls_meta_info_request_min_checkpoint)) + .WillOnce(Invoke(&ls_meta, &MockLsMetaInfo::post_ls_meta_info_request_base_checkpoint)) + .WillOnce(Invoke(&ls_meta, &MockLsMetaInfo::post_ls_meta_info_request_base_checkpoint)) + .WillOnce(Invoke(&ls_meta, &MockLsMetaInfo::post_ls_meta_info_request_large_checkpoint)) + .WillRepeatedly(Invoke(&ls_meta, &MockLsMetaInfo::post_ls_meta_info_request_min_checkpoint)); + MockMemberList member_list; + EXPECT_CALL(member_helper_, get_ls_member_list_and_learner_list_(_, _, _, _, _, _)) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::get_ls_member_list_for_idc_mode_idc_leader)); + EXPECT_CALL(member_helper_, get_ls_leader(_, _, _)) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::get_ls_leader_succ)); + EXPECT_CALL(member_helper_, get_ls(_, _)) + .WillOnce(Invoke(&member_list, &MockMemberList::get_ls_fail)) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::get_ls_succ)); + EXPECT_CALL(member_helper_, check_tenant_primary()) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::check_tenant_primary_true)); + const uint64_t tenant_id = 1001; + const share::ObLSID ls_id(1); + share::SCN local_ls_checkpoint_scn; + local_ls_checkpoint_scn.set_base(); + ObMigrationOpArg mock_arg; + EXPECT_EQ(OB_SUCCESS, mock_migrate_arg_for_location(mock_arg)); + ObStorageHASrcProvider::ChooseSourcePolicy policy; + EXPECT_EQ(OB_SUCCESS, get_idc_policy(mock_arg, tenant_id, policy)); + ObStorageHASrcInfo src_info; + EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(tenant_id, ls_id, local_ls_checkpoint_scn, mock_arg, policy, &storage_rpc_, &member_helper_)); + EXPECT_EQ(OB_SUCCESS, mock_locality_manager(MOCKLOCALITY::IDC_MODE_IDC_LEADER, locality_manager_)); + EXPECT_EQ(ObStorageHASrcProvider::ChooseSourcePolicy::IDC, choose_src_helper_.get_provider()->get_policy_type()); + static_cast(choose_src_helper_.get_provider())->set_locality_manager_(&locality_manager_); + EXPECT_EQ(OB_SUCCESS, choose_src_helper_.get_available_src(mock_arg, src_info)); + common::ObAddr expect_addr; + EXPECT_EQ(OB_SUCCESS, mock_addr("192.168.1.4:1234", expect_addr)); + EXPECT_EQ(expect_addr, src_info.src_addr_); +} +// test check replica valid fail +// candidate addr: ["192.168.1.1:1234", "192.168.1.2:1234", "192.168.1.3:1234", "192.168.1.4:1234", "192.168.1.5:1234"] +// local checkpoint -> OB_BASE_SCN_TS_NS +// parent checkpoint -> OB_BASE_SCN_TS_NS + 1 +// dst type -> F +// 192.168.1.1:1234 : idc -> idc1, region -> region1, checkpoint -> OB_MIN_SCN_TS_NS, type -> F, leader +// 192.168.1.2:1234 : idc -> idc2, region -> region1, checkpoint -> OB_BASE_SCN_TS_NS, type -> F +// 192.168.1.3:1234 : idc -> idc2, region -> region1, checkpoint -> OB_BASE_SCN_TS_NS, type -> F +// 192.168.1.4:1234 : idc -> idc1, region -> region2, checkpoint -> OB_BASE_SCN_TS_NS + 2, type -> F +// 192.168.1.5:1234 : idc -> idc1, region -> region2, checkpoint -> OB_MIN_SCN_TS_NS, type -> F +// output addr:192.168.1.4:1234 +TEST_F(TestChooseMigrationSourcePolicy, idc_mode_r_replica_init) +{ + MockLsMetaInfo ls_meta; + EXPECT_CALL(storage_rpc_, post_ls_meta_info_request(_, _, _, _)) + .WillOnce(Invoke(&ls_meta, &MockLsMetaInfo::post_ls_meta_info_request_parent_checkpoint)) + .WillOnce(Invoke(&ls_meta, &MockLsMetaInfo::post_ls_meta_info_request_min_checkpoint)) + .WillOnce(Invoke(&ls_meta, &MockLsMetaInfo::post_ls_meta_info_request_base_checkpoint)) + .WillOnce(Invoke(&ls_meta, &MockLsMetaInfo::post_ls_meta_info_request_base_checkpoint)) + .WillOnce(Invoke(&ls_meta, &MockLsMetaInfo::post_ls_meta_info_request_large_checkpoint)) + .WillRepeatedly(Invoke(&ls_meta, &MockLsMetaInfo::post_ls_meta_info_request_min_checkpoint)); + MockMemberList member_list; + EXPECT_CALL(member_helper_, get_ls_member_list_and_learner_list_(_, _, _, _, _, _)) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::get_ls_member_list_for_idc_mode_idc_leader)); + EXPECT_CALL(member_helper_, get_ls_leader(_, _, _)) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::get_ls_leader_succ)); + EXPECT_CALL(member_helper_, get_ls(_, _)) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::get_ls_succ_with_palf)); + EXPECT_CALL(member_helper_, check_tenant_primary()) + .WillRepeatedly(Invoke(&member_list, &MockMemberList::check_tenant_primary_true)); + const uint64_t tenant_id = 1001; + const share::ObLSID ls_id(1); + share::SCN local_ls_checkpoint_scn; + local_ls_checkpoint_scn.set_base(); + ObMigrationOpArg mock_arg; + EXPECT_EQ(OB_SUCCESS, mock_migrate_arg_for_r_type(mock_arg)); + ObStorageHASrcProvider::ChooseSourcePolicy policy; + EXPECT_EQ(OB_SUCCESS, get_idc_policy(mock_arg, tenant_id, policy)); + ObStorageHASrcInfo src_info; + EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(tenant_id, ls_id, local_ls_checkpoint_scn, mock_arg, policy, &storage_rpc_, &member_helper_)); + EXPECT_EQ(OB_SUCCESS, mock_locality_manager(MOCKLOCALITY::IDC_MODE_IDC_LEADER, locality_manager_)); + EXPECT_EQ(ObStorageHASrcProvider::ChooseSourcePolicy::IDC, choose_src_helper_.get_provider()->get_policy_type()); + static_cast(choose_src_helper_.get_provider())->set_locality_manager_(&locality_manager_); + EXPECT_EQ(OB_SUCCESS, choose_src_helper_.get_available_src(mock_arg, src_info)); + common::ObAddr expect_addr; + EXPECT_EQ(OB_SUCCESS, mock_addr("192.168.1.4:1234", expect_addr)); + EXPECT_EQ(expect_addr, src_info.src_addr_); +} + +} +} + +int main(int argc, char **argv) +{ + system("rm -f test_choose_migration_source_policy.log"); + ObLogger &logger = ObLogger::get_logger(); + logger.set_file_name("test_choose_migration_source_policy.log", true); + logger.set_log_level("info"); + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/unittest/storage/migration/test_migration.h b/unittest/storage/migration/test_migration.h new file mode 100644 index 0000000000..1f23f8d12b --- /dev/null +++ b/unittest/storage/migration/test_migration.h @@ -0,0 +1,575 @@ +/** + * Copyright (c) 2021 OceanBase + * OceanBase CE is licensed under Mulan PubL v2. + * You can use this software according to the terms and conditions of the Mulan PubL v2. + * You may obtain a copy of Mulan PubL v2 at: + * http://license.coscl.org.cn/MulanPubL-2.0 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PubL v2 for more details. + */ + +#define private public +#define protected public + +#include "lib/ob_errno.h" +#include "lib/net/ob_addr.h" +#include "common/ob_member.h" +#include "common/ob_learner_list.h" +#include "storage/ob_locality_manager.h" +namespace oceanbase +{ +namespace storage +{ +enum MOCKLOCALITY : int8_t +{ + IDC_MODE_IDC_LEADER = 0, + IDC_MODE_IDC_FOLLOWER = 1, + IDC_MODE_REGION_LEADER = 2, + IDC_MODE_REGION_FOLLOWER = 3, + IDC_MODE_DIFF_REGION_LEADER = 4, + IDC_MODE_DIFF_REGION_FOLLOWER = 5, + REGION_MODE_REGION_FOLLOWER = 6, + REGION_MODE_REGION_LEADER = 7, + REGION_MODE_DIFF_REGION_FOLLOWER = 8, + REGION_MODE_DIFF_REGION_LEADER = 9, + MAX_LOCALITY_MANAGER +}; + +class MockLocalityManager : public ObLocalityManager +{ +public: + MockLocalityManager() : ObLocalityManager() {} + virtual ~MockLocalityManager() {} + int init_manager(const common::ObAddr &self) + { + int ret = OB_SUCCESS; + if (OB_UNLIKELY(is_inited_)) { + ret = OB_INIT_TWICE; + LOG_WARN("ObLocalityManager init twice", K(ret)); + } else if (!self.is_valid()) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", K(ret), K(self)); + } else if (OB_FAIL(server_locality_cache_.init())) { + LOG_WARN("server_locality_cache_ init failed", K(ret), K(self)); + } else { + self_ = self; + is_inited_ = true; + } + return ret; + } +}; + +static int mock_replica_member(const common::ObAddr &addr, const common::ObRegion ®ion, + const common::ObReplicaType type, common::ObReplicaMember &replica) +{ + int ret = OB_SUCCESS; + replica.reset(); + replica.replica_type_ = type; + replica.region_ = region; + replica.memstore_percent_ = 0; + replica.server_ = addr; + replica.timestamp_ = 0; + replica.flag_ = 0; + return ret; +} + +static int mock_addr(const char *ipport, common::ObAddr &addr) +{ + int ret = OB_SUCCESS; + addr.reset(); + if (OB_ISNULL(ipport)) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("ipport is nullptr", K(ret)); + } else if (OB_FAIL(addr.parse_from_cstring(ipport))) { + LOG_WARN("failed to parse from cstring", K(ret)); + } + return ret; +} + +static int mock_learner_list(const common::ObAddr &addr, common::GlobalLearnerList &learner_list) +{ + int ret = OB_SUCCESS; + learner_list.reset(); + if (OB_FAIL(learner_list.add_server(addr))) { + LOG_WARN("failed to add server", K(ret), K(addr)); + } else if (!learner_list.contains(addr)) { + ret = OB_ERR_UNEXPECTED; + LOG_WARN("unexpected error", K(ret), K(learner_list), K(addr)); + } + return ret; +} + +static int mock_addr_list(const int addr_count, common::ObIArray &addr_list) +{ + int ret = OB_SUCCESS; + addr_list.reset(); + const int64_t addr_num = 6; + const char * addr_array[addr_num] = {"192.168.1.1:1234", "192.168.1.2:1234", "192.168.1.3:1234", "192.168.1.4:1234", "192.168.1.5:1234", "192.168.1.6:1234"}; + if (addr_count < 0 || addr_count > 6) { + ret = OB_INVALID_ARGUMENT; + LOG_WARN("invalid argument", K(ret), K(addr_count)); + } else { + common::ObAddr addr; + for (int i = 0; i < addr_count && OB_SUCC(ret); i++) { + if (OB_FAIL(mock_addr(addr_array[i], addr))) { + LOG_WARN("failed to mock addr", K(ret)); + } else if (OB_FAIL(addr_list.push_back(addr))) { + LOG_WARN("failed to add addr", K(ret), K(addr)); + } + } + } + return ret; +} + +static int mock_dst_addr(common::ObAddr &addr) +{ + int ret = OB_SUCCESS; + if (OB_FAIL(mock_addr("192.168.1.7:1234", addr))) { + LOG_WARN("failed to mock dst addr", K(ret)); + } + return ret; +} + +static int mock_locality_manager(const MOCKLOCALITY mode, MockLocalityManager &locality_manager) +{ + int ret = OB_SUCCESS; + common::ObArray addr_list; + common::ObAddr addr; + switch (mode) { + case MOCKLOCALITY::IDC_MODE_IDC_LEADER: { + if (OB_FAIL(mock_addr_list(5/*addr_count*/, addr_list))) { + LOG_WARN("failed to mock addr list", K(ret)); + } else if (OB_FAIL(locality_manager.record_server_idc(addr_list.at(0), "idc1"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(0))); + } else if (OB_FAIL(locality_manager.record_server_region(addr_list.at(0), "region1"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(0))); + } else if (OB_FAIL(locality_manager.record_server_idc(addr_list.at(1), "idc2"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(1))); + } else if (OB_FAIL(locality_manager.record_server_region(addr_list.at(1), "region1"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(1))); + } else if (OB_FAIL(locality_manager.record_server_idc(addr_list.at(2), "idc2"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(2))); + } else if (OB_FAIL(locality_manager.record_server_region(addr_list.at(2), "region1"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(2))); + } else if (OB_FAIL(locality_manager.record_server_idc(addr_list.at(3), "idc1"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(3))); + } else if (OB_FAIL(locality_manager.record_server_region(addr_list.at(3), "region2"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(3))); + } else if (OB_FAIL(locality_manager.record_server_idc(addr_list.at(4), "idc1"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(4))); + } else if (OB_FAIL(locality_manager.record_server_region(addr_list.at(4), "region2"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(4))); + } + break; + } + case MOCKLOCALITY::IDC_MODE_IDC_FOLLOWER: { + if (OB_FAIL(mock_addr_list(5/*addr_count*/, addr_list))) { + LOG_WARN("failed to mock addr list", K(ret)); + } else if (OB_FAIL(locality_manager.record_server_idc(addr_list.at(0), "idc1"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(0))); + } else if (OB_FAIL(locality_manager.record_server_region(addr_list.at(0), "region1"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(0))); + } else if (OB_FAIL(locality_manager.record_server_idc(addr_list.at(1), "idc1"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(1))); + } else if (OB_FAIL(locality_manager.record_server_region(addr_list.at(1), "region1"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(1))); + } else if (OB_FAIL(locality_manager.record_server_idc(addr_list.at(2), "idc2"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(2))); + } else if (OB_FAIL(locality_manager.record_server_region(addr_list.at(2), "region1"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(2))); + } else if (OB_FAIL(locality_manager.record_server_idc(addr_list.at(3), "idc2"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(3))); + } else if (OB_FAIL(locality_manager.record_server_region(addr_list.at(3), "region1"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(3))); + } else if (OB_FAIL(locality_manager.record_server_idc(addr_list.at(4), "idc1"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(4))); + } else if (OB_FAIL(locality_manager.record_server_region(addr_list.at(4), "region2"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(4))); + } + break; + } + case MOCKLOCALITY::IDC_MODE_REGION_LEADER: { + if (OB_FAIL(mock_addr_list(3/*addr_count*/, addr_list))) { + LOG_WARN("failed to mock addr list", K(ret)); + } else if (OB_FAIL(locality_manager.record_server_idc(addr_list.at(0), "idc2"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(0))); + } else if (OB_FAIL(locality_manager.record_server_region(addr_list.at(0), "region1"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(0))); + } else if (OB_FAIL(locality_manager.record_server_idc(addr_list.at(1), "idc1"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(1))); + } else if (OB_FAIL(locality_manager.record_server_region(addr_list.at(1), "region2"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(1))); + } else if (OB_FAIL(locality_manager.record_server_idc(addr_list.at(2), "idc1"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(2))); + } else if (OB_FAIL(locality_manager.record_server_region(addr_list.at(2), "region2"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(2))); + } + break; + } + case MOCKLOCALITY::IDC_MODE_REGION_FOLLOWER: { + if (OB_FAIL(mock_addr_list(3/*addr_count*/, addr_list))) { + LOG_WARN("failed to mock addr list", K(ret)); + } else if (OB_FAIL(locality_manager.record_server_idc(addr_list.at(0), "idc2"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(0))); + } else if (OB_FAIL(locality_manager.record_server_region(addr_list.at(0), "region1"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(0))); + } else if (OB_FAIL(locality_manager.record_server_idc(addr_list.at(1), "idc2"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(1))); + } else if (OB_FAIL(locality_manager.record_server_region(addr_list.at(1), "region1"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(1))); + } else if (OB_FAIL(locality_manager.record_server_idc(addr_list.at(2), "idc1"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(2))); + } else if (OB_FAIL(locality_manager.record_server_region(addr_list.at(2), "region2"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(2))); + } + break; + } + case MOCKLOCALITY::IDC_MODE_DIFF_REGION_LEADER: { + if (OB_FAIL(mock_addr_list(1/*addr_count*/, addr_list))) { + LOG_WARN("failed to mock addr list", K(ret)); + } else if (OB_FAIL(locality_manager.record_server_idc(addr_list.at(0), "idc1"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(0))); + } else if (OB_FAIL(locality_manager.record_server_region(addr_list.at(0), "region2"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(0))); + } + break; + } + case MOCKLOCALITY::IDC_MODE_DIFF_REGION_FOLLOWER: { + if (OB_FAIL(mock_addr_list(2/*addr_count*/, addr_list))) { + LOG_WARN("failed to mock addr list", K(ret)); + } else if (OB_FAIL(locality_manager.record_server_idc(addr_list.at(0), "idc1"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(0))); + } else if (OB_FAIL(locality_manager.record_server_region(addr_list.at(0), "region2"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(0))); + } else if (OB_FAIL(locality_manager.record_server_idc(addr_list.at(1), "idc2"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(1))); + } else if (OB_FAIL(locality_manager.record_server_region(addr_list.at(1), "region2"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(1))); + } + break; + } + case MOCKLOCALITY::REGION_MODE_REGION_FOLLOWER: { + if (OB_FAIL(mock_addr_list(4/*addr_count*/, addr_list))) { + LOG_WARN("failed to mock addr list", K(ret)); + } else if (OB_FAIL(locality_manager.record_server_idc(addr_list.at(0), "idc1"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(0))); + } else if (OB_FAIL(locality_manager.record_server_region(addr_list.at(0), "region1"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(0))); + } else if (OB_FAIL(locality_manager.record_server_idc(addr_list.at(1), "idc2"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(1))); + } else if (OB_FAIL(locality_manager.record_server_region(addr_list.at(1), "region1"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(1))); + } else if (OB_FAIL(locality_manager.record_server_idc(addr_list.at(2), "idc1"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(2))); + } else if (OB_FAIL(locality_manager.record_server_region(addr_list.at(2), "region2"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(2))); + } else if (OB_FAIL(locality_manager.record_server_idc(addr_list.at(3), "idc2"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(3))); + } else if (OB_FAIL(locality_manager.record_server_region(addr_list.at(3), "region2"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(3))); + } + break; + } + case MOCKLOCALITY::REGION_MODE_REGION_LEADER: { + if (OB_FAIL(mock_addr_list(3/*addr_count*/, addr_list))) { + LOG_WARN("failed to mock addr list", K(ret)); + } else if (OB_FAIL(locality_manager.record_server_idc(addr_list.at(0), "idc1"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(0))); + } else if (OB_FAIL(locality_manager.record_server_region(addr_list.at(0), "region1"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(0))); + } else if (OB_FAIL(locality_manager.record_server_idc(addr_list.at(1), "idc1"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(1))); + } else if (OB_FAIL(locality_manager.record_server_region(addr_list.at(1), "region2"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(1))); + } else if (OB_FAIL(locality_manager.record_server_idc(addr_list.at(2), "idc1"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(2))); + } else if (OB_FAIL(locality_manager.record_server_region(addr_list.at(2), "region2"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(2))); + } + break; + } + case MOCKLOCALITY::REGION_MODE_DIFF_REGION_LEADER: { + if (OB_FAIL(mock_addr_list(1/*addr_count*/, addr_list))) { + LOG_WARN("failed to mock addr list", K(ret)); + } else if (OB_FAIL(locality_manager.record_server_idc(addr_list.at(0), "idc1"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(0))); + } else if (OB_FAIL(locality_manager.record_server_region(addr_list.at(0), "region2"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(0))); + } + break; + } + case MOCKLOCALITY::REGION_MODE_DIFF_REGION_FOLLOWER: { + if (OB_FAIL(mock_addr_list(2/*addr_count*/, addr_list))) { + LOG_WARN("failed to mock addr list", K(ret)); + } else if (OB_FAIL(locality_manager.record_server_idc(addr_list.at(0), "idc1"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(0))); + } else if (OB_FAIL(locality_manager.record_server_region(addr_list.at(0), "region2"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(0))); + } else if (OB_FAIL(locality_manager.record_server_idc(addr_list.at(1), "idc2"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(1))); + } else if (OB_FAIL(locality_manager.record_server_region(addr_list.at(1), "region2"))) { + LOG_WARN("failed to record server region", K(ret), K(addr_list.at(1))); + } + break; + } + default: + break; + } + + if (OB_FAIL(ret)) { + // do nothing + } else if (OB_FAIL(mock_dst_addr(addr))) { + LOG_WARN("failed to mock dst addr", K(ret)); + } else if (OB_FAIL(locality_manager.record_server_idc(addr, "idc1"))) { + LOG_WARN("failed to record server idc", K(ret)); + } else if (OB_FAIL(locality_manager.record_server_region(addr, "region1"))) { + LOG_WARN("failed to record server region", K(ret)); + } + return ret; +} + +static int mock_leader_addr(common::ObAddr &addr) +{ + int ret = OB_SUCCESS; + if (OB_FAIL(mock_addr("192.168.1.1:1234", addr))) { + LOG_WARN("failed to mock leader addr", K(ret)); + } + return ret; +} + +static int mock_rs_recommand_addr(common::ObAddr &addr) +{ + int ret = OB_SUCCESS; + if (OB_FAIL(mock_addr("192.168.1.4:1234", addr))) { + LOG_WARN("failed to mock recommand addr", K(ret)); + } + return ret; +} + +static int mock_migrate_arg(const common::ObReplicaMember &replica, ObMigrationOpArg &mock_arg) +{ + int ret = OB_SUCCESS; + mock_arg.reset(); + const share::ObLSID ls_id(1); + mock_arg.ls_id_ = ls_id; + mock_arg.type_ = ObMigrationOpType::TYPE::MIGRATE_LS_OP; + mock_arg.cluster_id_ = 0; + mock_arg.priority_ = ObMigrationOpPriority::PRIO_HIGH; + mock_arg.src_ = replica; + common::ObReplicaMember dst_replica; + common::ObAddr addr; + common::ObReplicaType type = common::ObReplicaType::REPLICA_TYPE_FULL; + common::ObRegion region("shanghai"); + if (OB_FAIL(mock_dst_addr(addr))) { + LOG_WARN("failed to mock addr", K(ret)); + } else if (OB_FAIL(mock_replica_member(addr, region, type, dst_replica))) { + LOG_WARN("failed to mock replica member", K(ret)); + } + mock_arg.dst_ = dst_replica; + mock_arg.paxos_replica_number_ = 3; + return ret; +} + +static int mock_valid_ls_meta(obrpc::ObFetchLSMetaInfoResp &res) +{ + int ret = OB_SUCCESS; + const share::ObLSID ls_id(1); + res.version_ = CLUSTER_CURRENT_VERSION; + res.has_transfer_table_ = false; + res.ls_meta_package_.ls_meta_.tenant_id_ = 1001; + res.ls_meta_package_.ls_meta_.ls_id_ = ls_id; + res.ls_meta_package_.ls_meta_.gc_state_ = logservice::LSGCState::NORMAL; + res.ls_meta_package_.ls_meta_.rebuild_seq_ = 0; + res.ls_meta_package_.ls_meta_.clog_checkpoint_scn_.set_base(); + res.ls_meta_package_.ls_meta_.migration_status_ = ObMigrationStatus::OB_MIGRATION_STATUS_NONE; + res.ls_meta_package_.ls_meta_.restore_status_ = share::ObLSRestoreStatus::NONE; + res.ls_meta_package_.dup_ls_meta_.ls_id_ = ls_id; + const palf::LSN lsn(184467440737095516); + res.ls_meta_package_.palf_meta_.prev_log_info_.lsn_ = lsn; + res.ls_meta_package_.palf_meta_.curr_lsn_ = lsn; + return ret; +} + +static int mock_migrate_arg_for_checkpoint(ObMigrationOpArg &mock_arg) +{ + int ret = OB_SUCCESS; + mock_arg.reset(); + common::ObReplicaMember src_replica; + common::ObReplicaType type = common::ObReplicaType::REPLICA_TYPE_FULL; + common::ObRegion region("shanghai"); + common::ObAddr src_addr; + if (OB_FAIL(mock_addr("192.168.1.1:1234", src_addr))) { + LOG_WARN("failed to mock ", K(ret)); + } else if (OB_FAIL(mock_replica_member(src_addr, region, type, src_replica))) { + LOG_WARN("failed to mock replica member", K(ret)); + } else if (OB_FAIL(mock_migrate_arg(src_replica, mock_arg))) { + LOG_WARN("failed to mock migrate arg", K(ret)); + } + return ret; +} + +static int mock_migrate_arg_for_rs_recommand(ObMigrationOpArg &mock_arg) +{ + int ret = OB_SUCCESS; + mock_arg.reset(); + common::ObReplicaMember src_replica; + common::ObReplicaType type = common::ObReplicaType::REPLICA_TYPE_READONLY; + common::ObRegion region("shanghai"); + common::ObAddr src_addr; + if (OB_FAIL(mock_rs_recommand_addr(src_addr))) { + LOG_WARN("failed to mock ", K(ret)); + } else if (OB_FAIL(mock_replica_member(src_addr, region, type, src_replica))) { + LOG_WARN("failed to mock replica member", K(ret)); + } else if (OB_FAIL(mock_migrate_arg(src_replica, mock_arg))) { + LOG_WARN("failed to mock migrate arg", K(ret)); + } else { + mock_arg.dst_.replica_type_ = common::ObReplicaType::REPLICA_TYPE_READONLY; + mock_arg.data_src_ = src_replica; + } + return ret; +} + +static int mock_migrate_arg_for_location(ObMigrationOpArg &mock_arg) +{ + int ret = OB_SUCCESS; + mock_arg.reset(); + common::ObReplicaMember src_replica; + common::ObReplicaType type = common::ObReplicaType::REPLICA_TYPE_READONLY; + common::ObRegion region("shanghai"); + common::ObAddr src_addr; + if (OB_FAIL(mock_addr("192.168.1.4:1234", src_addr))) { + LOG_WARN("failed to mock ", K(ret)); + } else if (OB_FAIL(mock_replica_member(src_addr, region, type, src_replica))) { + LOG_WARN("failed to mock replica member", K(ret)); + } else if (OB_FAIL(mock_migrate_arg(src_replica, mock_arg))) { + LOG_WARN("failed to mock migrate arg", K(ret)); + } + return ret; +} + +static int mock_migrate_arg_for_r_type(ObMigrationOpArg &mock_arg) +{ + int ret = OB_SUCCESS; + mock_arg.reset(); + + if (OB_FAIL(mock_migrate_arg_for_location(mock_arg))) { + LOG_WARN("failed to mock ", K(ret), K(mock_arg)); + } else { + mock_arg.dst_.replica_type_ = common::ObReplicaType::REPLICA_TYPE_READONLY; + } + return ret; +} + +static share::SCN mock_ckpt_inc(share::SCN &local_ls_checkpoint_scn) +{ + share::SCN result; + result = share::SCN::scn_inc(local_ls_checkpoint_scn); + return result; +} + +static int get_checkpoint_policy(const ObMigrationOpArg &arg, const uint64_t tenant_id, + ObStorageHASrcProvider::ChooseSourcePolicy &policy) +{ + int ret = OB_SUCCESS; + bool enable_choose_source_policy = false; + const char *str = "idc"; + if (OB_FAIL(ObStorageHAChooseSrcHelper::get_policy_type(arg, tenant_id, + enable_choose_source_policy, str, policy))) { + LOG_WARN("failed to get policy type", K(ret), K(arg), K(tenant_id)); + } + return ret; +} + + +static int get_recommand_policy(const ObMigrationOpArg &arg, const uint64_t tenant_id, + ObStorageHASrcProvider::ChooseSourcePolicy &policy) +{ + int ret = OB_SUCCESS; + bool enable_choose_source_policy = true; + const char *str = "idc"; + if (OB_FAIL(ObStorageHAChooseSrcHelper::get_policy_type(arg, tenant_id, + enable_choose_source_policy, str, policy))) { + LOG_WARN("failed to get policy type", K(ret), K(arg), K(tenant_id)); + } + return ret; +} + +static int get_idc_policy(const ObMigrationOpArg &arg, const uint64_t tenant_id, + ObStorageHASrcProvider::ChooseSourcePolicy &policy) +{ + int ret = OB_SUCCESS; + bool enable_choose_source_policy = true; + const char *str = "idc"; + if (OB_FAIL(ObStorageHAChooseSrcHelper::get_policy_type(arg, tenant_id, + enable_choose_source_policy, str, policy))) { + LOG_WARN("failed to get policy type", K(ret), K(arg), K(tenant_id)); + } + return ret; +} + +static int get_region_policy(const ObMigrationOpArg &arg, const uint64_t tenant_id, + ObStorageHASrcProvider::ChooseSourcePolicy &policy) +{ + int ret = OB_SUCCESS; + bool enable_choose_source_policy = true; + const char *str = "region"; + if (OB_FAIL(ObStorageHAChooseSrcHelper::get_policy_type(arg, tenant_id, + enable_choose_source_policy, str, policy))) { + LOG_WARN("failed to get policy type", K(ret), K(arg), K(tenant_id)); + } + return ret; +} + +static int mock_migrate_arg_for_rebuild(ObMigrationOpArg &mock_arg) +{ + int ret = OB_SUCCESS; + mock_arg.reset(); + common::ObReplicaMember src_replica; + common::ObReplicaType type = common::ObReplicaType::REPLICA_TYPE_FULL; + common::ObRegion region("shanghai"); + common::ObAddr src_addr; + if (OB_FAIL(mock_addr("192.168.1.4:1234", src_addr))) { + LOG_WARN("failed to mock ", K(ret)); + } else if (OB_FAIL(mock_replica_member(src_addr, region, type, src_replica))) { + LOG_WARN("failed to mock replica member", K(ret)); + } else if (OB_FAIL(mock_migrate_arg(src_replica, mock_arg))) { + LOG_WARN("failed to mock migrate arg", K(ret)); + } else { + mock_arg.type_ = ObMigrationOpType::TYPE::REBUILD_LS_OP; + } + return ret; +} + +static int mock_migrate_arg_init_fail(ObMigrationOpArg &mock_arg) +{ + int ret = OB_SUCCESS; + mock_arg.reset(); + common::ObReplicaMember src_replica; + common::ObReplicaType type = common::ObReplicaType::REPLICA_TYPE_FULL; + common::ObRegion region("shanghai"); + common::ObAddr src_addr; + if (OB_FAIL(mock_addr("192.168.1.4:1234", src_addr))) { + LOG_WARN("failed to mock ", K(ret)); + } else if (OB_FAIL(mock_replica_member(src_addr, region, type, src_replica))) { + LOG_WARN("failed to mock replica member", K(ret)); + } else if (OB_FAIL(mock_migrate_arg(src_replica, mock_arg))) { + LOG_WARN("failed to mock migrate arg", K(ret)); + } else { + mock_arg.type_ = ObMigrationOpType::TYPE::MAX_LS_OP; + } + return ret; +} + +static int mock_check_replica_type_addr(common::ObAddr &addr) +{ + int ret = OB_SUCCESS; + if (OB_FAIL(mock_addr("192.168.1.4:1234", addr))) { + LOG_WARN("failed to mock leader addr", K(ret)); + } + return ret; +} + +} +} diff --git a/unittest/storage/mock_ob_log_handler.h b/unittest/storage/mock_ob_log_handler.h index 6aa0f6b555..1715d3189a 100644 --- a/unittest/storage/mock_ob_log_handler.h +++ b/unittest/storage/mock_ob_log_handler.h @@ -507,6 +507,11 @@ public: UNUSED(addr); return OB_SUCCESS; } + int get_parent(common::ObAddr &parent) const + { + UNUSED(parent); + return OB_SUCCESS; + } int register_rebuild_cb(palf::PalfRebuildCb *rebuild_cb) { UNUSED(rebuild_cb);