[FEAT MERGE] 日志流副本并行迁移
Co-authored-by: JunkoPF <polyethylenego@gmail.com>
This commit is contained in:
parent
6e0dd8ead7
commit
ed93a2410b
@ -103,6 +103,7 @@ ob_unittest_observer(test_arbitration_service_table_operator test_arbitration_se
|
||||
ob_unittest_observer(test_add_remove_replace_arbitration_service test_add_remove_replace_arbitration_service.cpp)
|
||||
ob_unittest_observer(test_create_tenant_with_arbitration_service test_create_tenant_with_arbitration_service.cpp)
|
||||
ob_unittest_observer(test_arbitration_service_replica_task_table_operator test_arbitration_service_replica_task_table_operator.cpp)
|
||||
ob_unittest_observer(test_ob_parallel_migration_mode test_ob_parallel_migration_mode.cpp)
|
||||
ob_unittest_observer(test_change_arb_service_status test_change_arb_service_status.cpp)
|
||||
ob_unittest_observer(test_big_tx_data test_big_tx_data.cpp)
|
||||
ob_unittest_observer(test_fast_commit_report fast_commit_report.cpp)
|
||||
|
183
mittest/simple_server/test_ob_parallel_migration_mode.cpp
Normal file
183
mittest/simple_server/test_ob_parallel_migration_mode.cpp
Normal file
@ -0,0 +1,183 @@
|
||||
/**
|
||||
* Copyright (c) 2021 OceanBase
|
||||
* OceanBase CE is licensed under Mulan PubL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PubL v2.
|
||||
* You may obtain a copy of Mulan PubL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPubL-2.0
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PubL v2 for more details.
|
||||
*/
|
||||
|
||||
#define USING_LOG_PREFIX SHARE
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include <gmock/gmock.h>
|
||||
#include "env/ob_simple_cluster_test_base.h"
|
||||
#include "lib/ob_errno.h"
|
||||
#include "rootserver/ob_disaster_recovery_task_mgr.h"
|
||||
#include "rootserver/ob_disaster_recovery_task.h"
|
||||
|
||||
namespace oceanbase
|
||||
{
|
||||
using namespace unittest;
|
||||
namespace share
|
||||
{
|
||||
using ::testing::_;
|
||||
using ::testing::Invoke;
|
||||
using ::testing::Return;
|
||||
|
||||
using namespace schema;
|
||||
using namespace common;
|
||||
|
||||
class TestParallelMigrationMode : public unittest::ObSimpleClusterTestBase
|
||||
{
|
||||
public:
|
||||
TestParallelMigrationMode() : unittest::ObSimpleClusterTestBase("test_ob_parallel_migration_mode") {}
|
||||
};
|
||||
|
||||
TEST_F(TestParallelMigrationMode, test_parse_from_string)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
rootserver::ObParallelMigrationMode mode;
|
||||
ObString auto_mode("auto");
|
||||
ObString on_mode("on");
|
||||
ObString off_mode("off");
|
||||
ObString not_valid_mode("onoff");
|
||||
ret = mode.parse_from_string(auto_mode);
|
||||
ASSERT_EQ(true, mode.is_auto_mode());
|
||||
ASSERT_EQ(OB_SUCCESS, ret);
|
||||
|
||||
mode.reset();
|
||||
ret = mode.parse_from_string(on_mode);
|
||||
ASSERT_EQ(true, mode.is_on_mode());
|
||||
ASSERT_EQ(OB_SUCCESS, ret);
|
||||
|
||||
mode.reset();
|
||||
ret = mode.parse_from_string(off_mode);
|
||||
ASSERT_EQ(true, mode.is_off_mode());
|
||||
ASSERT_EQ(OB_SUCCESS, ret);
|
||||
|
||||
mode.reset();
|
||||
ret = mode.parse_from_string(not_valid_mode);
|
||||
ASSERT_EQ(OB_INVALID_ARGUMENT, ret);
|
||||
}
|
||||
|
||||
TEST_F(TestParallelMigrationMode, test_get_mode_str)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
rootserver::ObParallelMigrationMode auto_mode(rootserver::ObParallelMigrationMode::ParallelMigrationMode::AUTO);
|
||||
rootserver::ObParallelMigrationMode on_mode(rootserver::ObParallelMigrationMode::ParallelMigrationMode::ON);
|
||||
rootserver::ObParallelMigrationMode off_mode(rootserver::ObParallelMigrationMode::ParallelMigrationMode::OFF);
|
||||
rootserver::ObParallelMigrationMode max_mode(rootserver::ObParallelMigrationMode::ParallelMigrationMode::MAX);
|
||||
ObString auto_str(auto_mode.get_mode_str());
|
||||
ASSERT_EQ("AUTO", std::string(auto_str.ptr(), auto_str.length()));
|
||||
|
||||
ObString on_str(on_mode.get_mode_str());
|
||||
ASSERT_EQ("ON", std::string(on_str.ptr(), on_str.length()));
|
||||
|
||||
ObString off_str(off_mode.get_mode_str());
|
||||
ASSERT_EQ("OFF", std::string(off_str.ptr(), off_str.length()));
|
||||
|
||||
ObString max_str(max_mode.get_mode_str());
|
||||
ASSERT_EQ("", std::string(max_str.ptr(), max_str.length()));
|
||||
}
|
||||
|
||||
TEST_F(TestParallelMigrationMode, test_to_string)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
rootserver::ObParallelMigrationMode auto_mode(rootserver::ObParallelMigrationMode::ParallelMigrationMode::AUTO);
|
||||
rootserver::ObParallelMigrationMode on_mode(rootserver::ObParallelMigrationMode::ParallelMigrationMode::ON);
|
||||
rootserver::ObParallelMigrationMode off_mode(rootserver::ObParallelMigrationMode::ParallelMigrationMode::OFF);
|
||||
rootserver::ObParallelMigrationMode max_mode(rootserver::ObParallelMigrationMode::ParallelMigrationMode::MAX);
|
||||
|
||||
char auto_buf[100];
|
||||
int auto_len = sizeof(auto_buf);
|
||||
int pos1 = auto_mode.to_string(auto_buf, auto_len);
|
||||
ASSERT_EQ("{mode:0, mode:\"AUTO\"}", std::string(auto_buf, pos1));
|
||||
|
||||
char on_buf[100];
|
||||
int on_len = sizeof(on_buf);
|
||||
int pos2 = on_mode.to_string(on_buf, on_len);
|
||||
ASSERT_EQ("{mode:1, mode:\"ON\"}", std::string(on_buf, pos2));
|
||||
|
||||
char off_buf[100];
|
||||
int off_len = sizeof(off_buf);
|
||||
int pos3 = off_mode.to_string(off_buf, off_len);
|
||||
ASSERT_EQ("{mode:2, mode:\"OFF\"}", std::string(off_buf, pos3));
|
||||
|
||||
char max_buf[100];
|
||||
int max_len = sizeof(max_buf);
|
||||
int pos4 = max_mode.to_string(max_buf, max_len);
|
||||
ASSERT_EQ("{mode:3, mode:null}", std::string(max_buf, pos4));
|
||||
}
|
||||
|
||||
TEST_F(TestParallelMigrationMode, test_task_type)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
ObString migrate("MIGRATE REPLICA");
|
||||
ObString add("ADD REPLICA");
|
||||
ObString build("BUILD ONLY IN MEMBER LIST");
|
||||
ObString transform("TYPE TRANSFORM");
|
||||
ObString remove_paxos("REMOVE PAXOS REPLICA");
|
||||
ObString remove_non_paxos("REMOVE NON PAXOS REPLICA");
|
||||
ObString modify("MODIFY PAXOS REPLICA NUMBER");
|
||||
ObString max("MAX_TYPE");
|
||||
ObString no_valid("NOT_VALID_TYPE");
|
||||
rootserver::ObDRTaskType task_type = rootserver::ObDRTaskType::MAX_TYPE;
|
||||
ret = parse_disaster_recovery_task_type_from_string(migrate, task_type);
|
||||
ASSERT_EQ(rootserver::ObDRTaskType::LS_MIGRATE_REPLICA, task_type);
|
||||
ASSERT_EQ(OB_SUCCESS, ret);
|
||||
task_type = rootserver::ObDRTaskType::MAX_TYPE;
|
||||
|
||||
ret = parse_disaster_recovery_task_type_from_string(add, task_type);
|
||||
ASSERT_EQ(rootserver::ObDRTaskType::LS_ADD_REPLICA, task_type);
|
||||
ASSERT_EQ(OB_SUCCESS, ret);
|
||||
task_type = rootserver::ObDRTaskType::MAX_TYPE;
|
||||
|
||||
ret = parse_disaster_recovery_task_type_from_string(build, task_type);
|
||||
ASSERT_EQ(rootserver::ObDRTaskType::LS_BUILD_ONLY_IN_MEMBER_LIST, task_type);
|
||||
ASSERT_EQ(OB_SUCCESS, ret);
|
||||
task_type = rootserver::ObDRTaskType::MAX_TYPE;
|
||||
|
||||
ret = parse_disaster_recovery_task_type_from_string(transform, task_type);
|
||||
ASSERT_EQ(rootserver::ObDRTaskType::LS_TYPE_TRANSFORM, task_type);
|
||||
ASSERT_EQ(OB_SUCCESS, ret);
|
||||
task_type = rootserver::ObDRTaskType::MAX_TYPE;
|
||||
|
||||
ret = parse_disaster_recovery_task_type_from_string(remove_paxos, task_type);
|
||||
ASSERT_EQ(rootserver::ObDRTaskType::LS_REMOVE_PAXOS_REPLICA, task_type);
|
||||
ASSERT_EQ(OB_SUCCESS, ret);
|
||||
task_type = rootserver::ObDRTaskType::MAX_TYPE;
|
||||
|
||||
ret = parse_disaster_recovery_task_type_from_string(remove_non_paxos, task_type);
|
||||
ASSERT_EQ(rootserver::ObDRTaskType::LS_REMOVE_NON_PAXOS_REPLICA, task_type);
|
||||
ASSERT_EQ(OB_SUCCESS, ret);
|
||||
task_type = rootserver::ObDRTaskType::MAX_TYPE;
|
||||
|
||||
ret = parse_disaster_recovery_task_type_from_string(modify, task_type);
|
||||
ASSERT_EQ(rootserver::ObDRTaskType::LS_MODIFY_PAXOS_REPLICA_NUMBER, task_type);
|
||||
ASSERT_EQ(OB_SUCCESS, ret);
|
||||
task_type = rootserver::ObDRTaskType::MAX_TYPE;
|
||||
|
||||
ret = parse_disaster_recovery_task_type_from_string(max, task_type);
|
||||
ASSERT_EQ(rootserver::ObDRTaskType::MAX_TYPE, task_type);
|
||||
ASSERT_EQ(OB_SUCCESS, ret);
|
||||
task_type = rootserver::ObDRTaskType::MAX_TYPE;
|
||||
|
||||
ret = parse_disaster_recovery_task_type_from_string(no_valid, task_type);
|
||||
ASSERT_EQ(OB_INVALID_ARGUMENT, ret);
|
||||
}
|
||||
|
||||
} // namespace share
|
||||
} // namespace oceanbase
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
oceanbase::unittest::init_log_and_gtest(argc, argv);
|
||||
OB_LOGGER.set_log_level("INFO");
|
||||
::testing::InitGoogleTest(&argc, argv);
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
@ -2008,6 +2008,10 @@ int ObService::do_migrate_ls_replica(const obrpc::ObLSMigrateReplicaArg &arg)
|
||||
migration_op_arg.paxos_replica_number_ = arg.paxos_replica_number_;
|
||||
migration_op_arg.src_ = arg.src_;
|
||||
migration_op_arg.type_ = ObMigrationOpType::MIGRATE_LS_OP;
|
||||
migration_op_arg.prioritize_same_zone_src_ = arg.prioritize_same_zone_src_;
|
||||
#ifdef ERRSIM
|
||||
migration_op_arg.prioritize_same_zone_src_ = GCONF.enable_parallel_migration;
|
||||
#endif
|
||||
if (OB_FAIL(ls_service->create_ls_for_ha(arg.task_id_, migration_op_arg))) {
|
||||
LOG_WARN("failed to create ls for ha", KR(ret), K(arg), K(migration_op_arg));
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -127,9 +127,9 @@ enum ObDRTaskRetComment
|
||||
|
||||
class ObDRTaskQueue;
|
||||
const char *ob_disaster_recovery_task_type_strs(const rootserver::ObDRTaskType type);
|
||||
int parse_disaster_recovery_task_type_from_string(const ObString &task_type_str, rootserver::ObDRTaskType& task_type);
|
||||
const char *ob_disaster_recovery_task_priority_strs(const rootserver::ObDRTaskPriority task_priority);
|
||||
const char* ob_disaster_recovery_task_ret_comment_strs(const rootserver::ObDRTaskRetComment ret_comment);
|
||||
const char *ob_replica_type_strs(const ObReplicaType type);
|
||||
bool is_manual_dr_task_data_version_match(uint64_t tenant_data_version);
|
||||
int build_execute_result(
|
||||
const int ret_code,
|
||||
@ -179,50 +179,6 @@ private:
|
||||
};
|
||||
|
||||
class ObDRTaskMgr;
|
||||
enum class ObDRTaskKeyType : int64_t
|
||||
{
|
||||
INVALID = -1,
|
||||
FORMAL_DR_KEY = 0,
|
||||
};
|
||||
|
||||
class ObDRTaskKey
|
||||
{
|
||||
public:
|
||||
ObDRTaskKey() : key_1_(-1),
|
||||
key_2_(-1),
|
||||
key_3_(-1),
|
||||
key_4_(-1),
|
||||
key_type_(ObDRTaskKeyType::INVALID),
|
||||
hash_value_(0) {}
|
||||
virtual ~ObDRTaskKey() {}
|
||||
public:
|
||||
bool is_valid() const;
|
||||
bool operator==(const ObDRTaskKey &that) const;
|
||||
ObDRTaskKey &operator=(const ObDRTaskKey &that);
|
||||
uint64_t hash() const;
|
||||
int hash(uint64_t &hash_val) const { hash_val = hash(); return OB_SUCCESS; }
|
||||
int init(const uint64_t key_1,
|
||||
const uint64_t key_2,
|
||||
const uint64_t key_3,
|
||||
const uint64_t key_4,
|
||||
const ObDRTaskKeyType key_type);
|
||||
int init(const ObDRTaskKey &that);
|
||||
ObDRTaskKeyType get_key_type() const { return key_type_; }
|
||||
TO_STRING_KV(K_(key_1),
|
||||
K_(key_2),
|
||||
K_(key_3),
|
||||
K_(key_4),
|
||||
K_(key_type));
|
||||
private:
|
||||
uint64_t inner_hash() const;
|
||||
private:
|
||||
uint64_t key_1_;
|
||||
uint64_t key_2_;
|
||||
uint64_t key_3_;
|
||||
uint64_t key_4_;
|
||||
ObDRTaskKeyType key_type_;
|
||||
uint64_t hash_value_;
|
||||
};
|
||||
|
||||
enum class ObDRTaskType : int64_t
|
||||
{
|
||||
@ -236,6 +192,40 @@ enum class ObDRTaskType : int64_t
|
||||
MAX_TYPE,
|
||||
};
|
||||
|
||||
class ObDRTaskKey
|
||||
{
|
||||
public:
|
||||
ObDRTaskKey() : tenant_id_(OB_INVALID_TENANT_ID),
|
||||
ls_id_(),
|
||||
task_execute_zone_(),
|
||||
task_type_(ObDRTaskType::MAX_TYPE) {}
|
||||
virtual ~ObDRTaskKey() {}
|
||||
public:
|
||||
void reset();
|
||||
bool is_valid() const;
|
||||
bool operator==(const ObDRTaskKey &that) const;
|
||||
int init(const uint64_t tenant_id,
|
||||
const share::ObLSID &ls_id,
|
||||
const common::ObZone &task_execute_zone,
|
||||
const ObDRTaskType &task_type);
|
||||
int assign(const ObDRTaskKey &that);
|
||||
ObDRTaskKey& operator=(const ObDRTaskKey&) = delete;
|
||||
TO_STRING_KV(K_(tenant_id),
|
||||
K_(ls_id),
|
||||
K_(task_execute_zone),
|
||||
K_(task_type));
|
||||
|
||||
uint64_t get_tenant_id() const { return tenant_id_; }
|
||||
const share::ObLSID &get_ls_id() const { return ls_id_; }
|
||||
const common::ObZone &get_zone() const { return task_execute_zone_; }
|
||||
const ObDRTaskType &get_task_type() const { return task_type_; }
|
||||
private:
|
||||
uint64_t tenant_id_;
|
||||
share::ObLSID ls_id_;
|
||||
common::ObZone task_execute_zone_;
|
||||
ObDRTaskType task_type_;
|
||||
};
|
||||
|
||||
enum class ObDRTaskPriority : int64_t
|
||||
{
|
||||
HIGH_PRI = 0,
|
||||
@ -255,7 +245,6 @@ public:
|
||||
ls_id_(),
|
||||
cluster_id_(-1),
|
||||
transmit_data_size_(0),
|
||||
sibling_in_schedule_(false),
|
||||
invoked_source_(obrpc::ObAdminClearDRTaskArg::TaskType::AUTO),
|
||||
generate_time_(common::ObTimeUtility::current_time()),
|
||||
priority_(ObDRTaskPriority::MAX_PRI),
|
||||
@ -332,7 +321,6 @@ public:
|
||||
K_(ls_id),
|
||||
K_(cluster_id),
|
||||
K_(transmit_data_size),
|
||||
K_(sibling_in_schedule),
|
||||
K_(invoked_source),
|
||||
K_(generate_time),
|
||||
K_(priority),
|
||||
@ -346,12 +334,6 @@ public:
|
||||
const ObDRTaskKey &get_task_key() const { return task_key_; }
|
||||
int set_task_key(
|
||||
const ObDRTaskKey &task_key);
|
||||
int set_task_key(
|
||||
const uint64_t key_1,
|
||||
const uint64_t key_2,
|
||||
const uint64_t key_3,
|
||||
const uint64_t key_4,
|
||||
const ObDRTaskKeyType key_type);
|
||||
// operations of tenant_id
|
||||
uint64_t get_tenant_id() const { return tenant_id_; }
|
||||
void set_tenant_id(const uint64_t tenant_id) { tenant_id_ = tenant_id; }
|
||||
@ -364,9 +346,6 @@ public:
|
||||
// operations of transmit_data_size
|
||||
int64_t get_transmit_data_size() const { return transmit_data_size_; }
|
||||
void set_transmit_data_size(const int64_t size) { transmit_data_size_ = size; }
|
||||
// operations of sibling_in_schedule
|
||||
bool is_sibling_in_schedule() const { return sibling_in_schedule_; }
|
||||
void set_sibling_in_schedule(bool is_schedule) { sibling_in_schedule_ = is_schedule; }
|
||||
// operations of invoked_source_
|
||||
obrpc::ObAdminClearDRTaskArg::TaskType get_invoked_source() const { return invoked_source_; }
|
||||
void set_invoked_source(obrpc::ObAdminClearDRTaskArg::TaskType t) { invoked_source_ = t; }
|
||||
@ -412,7 +391,6 @@ protected:
|
||||
* transmitted, so the tranmit_data_size_ is set to zero.
|
||||
*/
|
||||
int64_t transmit_data_size_;
|
||||
bool sibling_in_schedule_;
|
||||
obrpc::ObAdminClearDRTaskArg::TaskType invoked_source_;
|
||||
int64_t generate_time_;
|
||||
ObDRTaskPriority priority_;
|
||||
@ -430,7 +408,8 @@ public:
|
||||
src_member_(),
|
||||
data_src_member_(),
|
||||
force_data_src_member_(),
|
||||
paxos_replica_number_(0) {}
|
||||
paxos_replica_number_(0),
|
||||
prioritize_same_zone_src_(false) {}
|
||||
virtual ~ObMigrateLSReplicaTask() {}
|
||||
public:
|
||||
int build(
|
||||
@ -449,8 +428,7 @@ public:
|
||||
const common::ObReplicaMember &src_member,
|
||||
const common::ObReplicaMember &data_src_member,
|
||||
const common::ObReplicaMember &force_data_src_member,
|
||||
const int64_t paxos_replica_number
|
||||
);
|
||||
const int64_t paxos_replica_number);
|
||||
|
||||
// only use some necessary information build a ObMigrateLSReplicaTask
|
||||
// Specifically, this method is only used when manually executing operation and maintenance commands
|
||||
@ -480,7 +458,8 @@ public:
|
||||
K(src_member_),
|
||||
K(data_src_member_),
|
||||
K(force_data_src_member_),
|
||||
K(paxos_replica_number_));
|
||||
K(paxos_replica_number_),
|
||||
K(prioritize_same_zone_src_));
|
||||
|
||||
virtual int get_execute_transmit_size(
|
||||
int64_t &execute_transmit_size) const override;
|
||||
@ -514,6 +493,8 @@ public:
|
||||
void *input_ptr,
|
||||
ObDRTask *&output_task) const override;
|
||||
public:
|
||||
bool get_prioritize_same_zone_src() const { return prioritize_same_zone_src_; };
|
||||
void set_prioritize_same_zone_src(bool p) { prioritize_same_zone_src_ = p; };
|
||||
// operations of dst_replica_
|
||||
int set_dst_replica(
|
||||
const ObDstReplica &that);
|
||||
@ -548,6 +529,7 @@ private:
|
||||
common::ObReplicaMember data_src_member_;
|
||||
common::ObReplicaMember force_data_src_member_;
|
||||
int64_t paxos_replica_number_;
|
||||
bool prioritize_same_zone_src_;
|
||||
};
|
||||
|
||||
class ObAddLSReplicaTask : public ObDRTask
|
||||
|
@ -48,12 +48,59 @@ using namespace share;
|
||||
|
||||
namespace rootserver
|
||||
{
|
||||
|
||||
static const char* ls_replica_parallel_migration_mode[] = {
|
||||
"AUTO",
|
||||
"ON",
|
||||
"OFF"
|
||||
};
|
||||
|
||||
const char* ObParallelMigrationMode::get_mode_str() const {
|
||||
STATIC_ASSERT(ARRAYSIZEOF(ls_replica_parallel_migration_mode) == (int64_t)MAX,
|
||||
"ls_replica_parallel_migration_mode string array size mismatch enum ParallelMigrationMode count");
|
||||
const char *str = NULL;
|
||||
if (mode_ >= AUTO && mode_ < MAX) {
|
||||
str = ls_replica_parallel_migration_mode[static_cast<int64_t>(mode_)];
|
||||
} else {
|
||||
LOG_WARN_RET(OB_ERR_UNEXPECTED, "invalid ParallelMigrationMode", K_(mode));
|
||||
}
|
||||
return str;
|
||||
}
|
||||
|
||||
int64_t ObParallelMigrationMode::to_string(char *buf, const int64_t buf_len) const
|
||||
{
|
||||
int64_t pos = 0;
|
||||
J_OBJ_START();
|
||||
J_KV(K_(mode), "mode", get_mode_str());
|
||||
J_OBJ_END();
|
||||
return pos;
|
||||
}
|
||||
|
||||
int ObParallelMigrationMode::parse_from_string(const ObString &mode)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
bool found = false;
|
||||
STATIC_ASSERT(ARRAYSIZEOF(ls_replica_parallel_migration_mode) == (int64_t)MAX,
|
||||
"ls_replica_parallel_migration_mode string array size mismatch enum ParallelMigrationMode count");
|
||||
for (int64_t i = 0; i < ARRAYSIZEOF(ls_replica_parallel_migration_mode) && !found; i++) {
|
||||
if (0 == mode.case_compare(ls_replica_parallel_migration_mode[i])) {
|
||||
mode_ = static_cast<ParallelMigrationMode>(i);
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!found) {
|
||||
ret = OB_INVALID_ARGUMENT;
|
||||
LOG_WARN("fail to parse type from string", KR(ret), K(mode), K_(mode));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
ObDRTaskQueue::ObDRTaskQueue() : inited_(false),
|
||||
config_(nullptr),
|
||||
task_alloc_(),
|
||||
wait_list_(),
|
||||
schedule_list_(),
|
||||
task_map_(),
|
||||
rpc_proxy_(nullptr),
|
||||
priority_(ObDRTaskPriority::MAX_PRI)
|
||||
{
|
||||
@ -69,20 +116,18 @@ void ObDRTaskQueue::reuse()
|
||||
{
|
||||
while (!wait_list_.is_empty()) {
|
||||
ObDRTask *t = wait_list_.remove_first();
|
||||
remove_task_from_map_and_free_it_(t);
|
||||
free_task_(t);
|
||||
}
|
||||
while (!schedule_list_.is_empty()) {
|
||||
ObDRTask *t = schedule_list_.remove_first();
|
||||
remove_task_from_map_and_free_it_(t);
|
||||
free_task_(t);
|
||||
}
|
||||
task_map_.clear();
|
||||
}
|
||||
|
||||
void ObDRTaskQueue::reset()
|
||||
{
|
||||
wait_list_.reset();
|
||||
schedule_list_.reset();
|
||||
task_map_.clear();
|
||||
}
|
||||
|
||||
void ObDRTaskQueue::free_task_(ObDRTask *&task)
|
||||
@ -94,17 +139,8 @@ void ObDRTaskQueue::free_task_(ObDRTask *&task)
|
||||
}
|
||||
}
|
||||
|
||||
void ObDRTaskQueue::remove_task_from_map_and_free_it_(ObDRTask *&task)
|
||||
{
|
||||
if (OB_NOT_NULL(task)) {
|
||||
task_map_.erase_refactored(task->get_task_key());
|
||||
free_task_(task);
|
||||
}
|
||||
}
|
||||
|
||||
int ObDRTaskQueue::init(
|
||||
common::ObServerConfig &config,
|
||||
const int64_t bucket_num,
|
||||
obrpc::ObSrvRpcProxy *rpc_proxy,
|
||||
ObDRTaskPriority priority)
|
||||
{
|
||||
@ -112,13 +148,10 @@ int ObDRTaskQueue::init(
|
||||
if (OB_UNLIKELY(inited_)) {
|
||||
ret = OB_INIT_TWICE;
|
||||
LOG_WARN("init twice", KR(ret));
|
||||
} else if (OB_UNLIKELY(bucket_num <= 0)
|
||||
|| OB_ISNULL(rpc_proxy)
|
||||
|| (ObDRTaskPriority::LOW_PRI != priority && ObDRTaskPriority::HIGH_PRI != priority)) {
|
||||
} else if (OB_ISNULL(rpc_proxy)
|
||||
|| (ObDRTaskPriority::LOW_PRI != priority && ObDRTaskPriority::HIGH_PRI != priority)) {
|
||||
ret = OB_INVALID_ARGUMENT;
|
||||
LOG_WARN("invalid argument", KR(ret), K(bucket_num), KP(rpc_proxy), K(priority));
|
||||
} else if (OB_FAIL(task_map_.create(bucket_num, "DRTaskMap"))) {
|
||||
LOG_WARN("fail to create task map", KR(ret), K(bucket_num));
|
||||
LOG_WARN("invalid argument", KR(ret), KP(rpc_proxy), K(priority));
|
||||
} else if (OB_FAIL(task_alloc_.init(
|
||||
ObMallocAllocator::get_instance(), OB_MALLOC_MIDDLE_BLOCK_SIZE,
|
||||
ObMemAttr(common::OB_SERVER_TENANT_ID, "DRTaskAlloc")))) {
|
||||
@ -132,100 +165,11 @@ int ObDRTaskQueue::init(
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ObDRTaskQueue::check_task_in_scheduling(
|
||||
const ObDRTaskKey &task_key,
|
||||
bool &task_in_scheduling) const
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
if (OB_UNLIKELY(!inited_)) {
|
||||
ret = OB_NOT_INIT;
|
||||
LOG_WARN("not init", KR(ret));
|
||||
} else if (OB_UNLIKELY(!task_key.is_valid())) {
|
||||
ret = OB_INVALID_ARGUMENT;
|
||||
LOG_WARN("invalid argument", KR(ret), K(task_key));
|
||||
} else {
|
||||
ObDRTask *task = nullptr;
|
||||
int tmp_ret = task_map_.get_refactored(task_key, task);
|
||||
if (OB_SUCCESS == tmp_ret) {
|
||||
if (OB_ISNULL(task)) {
|
||||
ret = OB_ERR_UNEXPECTED;
|
||||
LOG_WARN("a null task ptr getted from task_map", KR(ret), K(task_key));
|
||||
} else {
|
||||
task_in_scheduling = task->in_schedule();
|
||||
}
|
||||
} else if (OB_HASH_NOT_EXIST == tmp_ret) {
|
||||
// task not exist means task not executing
|
||||
task_in_scheduling = false;
|
||||
} else {
|
||||
ret = tmp_ret;
|
||||
LOG_WARN("fail to get from map", KR(ret), K(task_key));
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ObDRTaskQueue::check_task_exist(
|
||||
const ObDRTaskKey &task_key,
|
||||
bool &task_exist)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
if (OB_UNLIKELY(!inited_)) {
|
||||
ret = OB_NOT_INIT;
|
||||
LOG_WARN("not init", KR(ret));
|
||||
} else if (OB_UNLIKELY(!task_key.is_valid())) {
|
||||
ret = OB_INVALID_ARGUMENT;
|
||||
LOG_WARN("invalid argument", KR(ret), K(task_key));
|
||||
} else {
|
||||
ObDRTask *task = nullptr;
|
||||
int tmp_ret = task_map_.get_refactored(task_key, task);
|
||||
if (OB_SUCCESS == tmp_ret) {
|
||||
task_exist = true;
|
||||
} else if (OB_HASH_NOT_EXIST == tmp_ret) {
|
||||
task_exist = false;
|
||||
} else {
|
||||
ret = OB_ERR_UNEXPECTED;
|
||||
LOG_WARN("fail to get from task_map", KR(ret), K(task_key));
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ObDRTaskQueue::push_task_in_wait_list(
|
||||
ObDRTaskMgr &task_mgr,
|
||||
const ObDRTaskQueue &sibling_queue,
|
||||
const ObDRTask &task,
|
||||
bool &has_task_in_schedule)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
has_task_in_schedule = false;
|
||||
if (OB_UNLIKELY(!inited_)) {
|
||||
ret = OB_NOT_INIT;
|
||||
LOG_WARN("not init", KR(ret));
|
||||
} else {
|
||||
const ObDRTaskKey &task_key = task.get_task_key();
|
||||
ObDRTask *task_ptr = nullptr;
|
||||
int tmp_ret = task_map_.get_refactored(task_key, task_ptr);
|
||||
if (OB_HASH_NOT_EXIST == tmp_ret) {
|
||||
if (OB_FAIL(do_push_task_in_wait_list_(task_mgr, sibling_queue, task, has_task_in_schedule))) {
|
||||
LOG_WARN("fail to push back", KR(ret));
|
||||
}
|
||||
} else if (OB_SUCCESS == tmp_ret) {
|
||||
ret = OB_ENTRY_EXIST;
|
||||
LOG_INFO("disaster recovery task exist", KR(ret), K(task_key), K(task), KP(this));
|
||||
} else {
|
||||
ret = tmp_ret;
|
||||
LOG_WARN("fail to check task exist", KR(ret), K(task_key));
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ObDRTaskQueue::push_task_in_schedule_list(
|
||||
const ObDRTask &task)
|
||||
{
|
||||
// STEP 1: push task into schedule list
|
||||
// STEP 2: push task into task_map
|
||||
// STEP 3: set task in schedule
|
||||
// STEP 2: set task in schedule
|
||||
int ret = OB_SUCCESS;
|
||||
void *raw_ptr = nullptr;
|
||||
ObDRTask *new_task = nullptr;
|
||||
@ -241,8 +185,6 @@ int ObDRTaskQueue::push_task_in_schedule_list(
|
||||
} else if (OB_ISNULL(new_task)) {
|
||||
ret = OB_ERR_UNEXPECTED;
|
||||
LOG_WARN("new_task is nullptr", KR(ret));
|
||||
} else if (OB_FAIL(task_map_.set_refactored(new_task->get_task_key(), new_task))) {
|
||||
LOG_WARN("fail to set map", KR(ret), "task_key", new_task->get_task_key());
|
||||
} else {
|
||||
// set schedule_time for this task
|
||||
new_task->set_schedule();
|
||||
@ -256,7 +198,7 @@ int ObDRTaskQueue::push_task_in_schedule_list(
|
||||
|
||||
if (OB_FAIL(ret)) {
|
||||
if (OB_NOT_NULL(new_task)) {
|
||||
remove_task_from_map_and_free_it_(new_task);
|
||||
free_task_(new_task);
|
||||
} else if (OB_NOT_NULL(raw_ptr)) {
|
||||
task_alloc_.free(raw_ptr);
|
||||
raw_ptr = nullptr;
|
||||
@ -265,11 +207,9 @@ int ObDRTaskQueue::push_task_in_schedule_list(
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ObDRTaskQueue::do_push_task_in_wait_list_(
|
||||
int ObDRTaskQueue::do_push_task_in_wait_list(
|
||||
ObDRTaskMgr &task_mgr,
|
||||
const ObDRTaskQueue &sibling_queue,
|
||||
const ObDRTask &task,
|
||||
bool &has_task_in_schedule)
|
||||
const ObDRTask &task)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
void *raw_ptr = nullptr;
|
||||
@ -277,11 +217,7 @@ int ObDRTaskQueue::do_push_task_in_wait_list_(
|
||||
if (OB_UNLIKELY(!inited_)) {
|
||||
ret = OB_NOT_INIT;
|
||||
LOG_WARN("not init", KR(ret));
|
||||
} else if (OB_ISNULL(config_)) {
|
||||
ret = OB_ERR_UNEXPECTED;
|
||||
LOG_WARN("config_ ptr is null", KR(ret), KP(config_));
|
||||
} else if (OB_ISNULL(raw_ptr = task_alloc_.alloc(
|
||||
task.get_clone_size()))) {
|
||||
} else if (OB_ISNULL(raw_ptr = task_alloc_.alloc(task.get_clone_size()))) {
|
||||
ret = OB_ALLOCATE_MEMORY_FAILED;
|
||||
LOG_WARN("fail to alloc task", KR(ret), "size", task.get_clone_size());
|
||||
} else if (OB_FAIL(task.clone(raw_ptr, new_task))) {
|
||||
@ -293,36 +229,12 @@ int ObDRTaskQueue::do_push_task_in_wait_list_(
|
||||
ret = OB_ERR_UNEXPECTED;
|
||||
LOG_WARN("fail to add new task to wait list", KR(ret), "task", *new_task);
|
||||
} else {
|
||||
has_task_in_schedule = false;
|
||||
bool sibling_in_schedule = false;
|
||||
if (OB_FAIL(sibling_queue.check_task_in_scheduling(
|
||||
new_task->get_task_key(), sibling_in_schedule))) {
|
||||
LOG_WARN("fail to check has in schedule task", KR(ret),
|
||||
"task_key", new_task->get_task_key());
|
||||
} else if (OB_FAIL(task_map_.set_refactored(
|
||||
new_task->get_task_key(), new_task))) {
|
||||
LOG_WARN("fail to set map", KR(ret), "task_key", new_task->get_task_key());
|
||||
} else if (task_mgr.get_reach_concurrency_limit() && !sibling_in_schedule) {
|
||||
task_mgr.clear_reach_concurrency_limit();
|
||||
}
|
||||
if (OB_SUCC(ret)) {
|
||||
has_task_in_schedule = sibling_in_schedule;
|
||||
if (OB_FAIL(set_sibling_in_schedule(new_task->get_task_key(), sibling_in_schedule))) {
|
||||
LOG_WARN("fail to set sibling in schedule", KR(ret),
|
||||
"task_key", new_task->get_task_key(), K(sibling_in_schedule));
|
||||
}
|
||||
}
|
||||
|
||||
if (OB_FAIL(ret)) {
|
||||
wait_list_.remove(new_task);
|
||||
} else {
|
||||
LOG_INFO("success to push a task in waiting list", K(task), K(has_task_in_schedule));
|
||||
}
|
||||
LOG_INFO("success to push a task in waiting list", K(task));
|
||||
}
|
||||
|
||||
if (OB_FAIL(ret)) {
|
||||
if (OB_NOT_NULL(new_task)) {
|
||||
remove_task_from_map_and_free_it_(new_task);
|
||||
free_task_(new_task);
|
||||
} else if (OB_NOT_NULL(raw_ptr)) {
|
||||
task_alloc_.free(raw_ptr);
|
||||
raw_ptr = nullptr;
|
||||
@ -344,13 +256,8 @@ int ObDRTaskQueue::pop_task(
|
||||
LOG_WARN("config_ ptr is null", KR(ret), KP(config_));
|
||||
} else {
|
||||
DLIST_FOREACH(t, wait_list_) {
|
||||
if (t->is_sibling_in_schedule()) {
|
||||
// task can not pop
|
||||
LOG_INFO("can not pop this task because a sibling task already in schedule", KPC(t));
|
||||
} else {
|
||||
task = t;
|
||||
break;
|
||||
}
|
||||
task = t; // any task can be pop, no other task in double queue is conflict with it.
|
||||
break;
|
||||
}
|
||||
if (OB_NOT_NULL(task)) {
|
||||
// when task not empty, we move it from wait to schedule list,
|
||||
@ -365,39 +272,56 @@ int ObDRTaskQueue::pop_task(
|
||||
}
|
||||
// if fail to add to schedule list, clean it directly
|
||||
if (OB_FAIL(ret)) {
|
||||
remove_task_from_map_and_free_it_(task);
|
||||
free_task_(task);
|
||||
}
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ObDRTaskQueue::get_task(
|
||||
int ObDRTaskQueue::get_task_by_task_id(
|
||||
const share::ObTaskId &task_id,
|
||||
const ObDRTaskKey &task_key,
|
||||
ObDRTask *&task)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
task = nullptr;
|
||||
if (OB_UNLIKELY(!inited_)) {
|
||||
ret = OB_NOT_INIT;
|
||||
LOG_WARN("not init", KR(ret));
|
||||
} else if (OB_UNLIKELY(!task_id.is_valid())) {
|
||||
ret = OB_INVALID_ARGUMENT;
|
||||
LOG_WARN("invalid argument", KR(ret), K(task_id));
|
||||
} else if (OB_FAIL(get_task_by_task_id_in_list_(task_id, wait_list_, task))) {
|
||||
LOG_WARN("check task exist in wait list failed", KR(ret), K(task_id), K(wait_list_));
|
||||
} else if (OB_NOT_NULL(task)) {
|
||||
} else if (OB_FAIL(get_task_by_task_id_in_list_(task_id, schedule_list_, task))) {
|
||||
LOG_WARN("check task exist in schedule list failed", KR(ret), K(task_id), K(schedule_list_));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ObDRTaskQueue::get_task_by_task_id_in_list_(
|
||||
const share::ObTaskId &task_id,
|
||||
TaskList &list,
|
||||
ObDRTask *&task)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
task = nullptr;
|
||||
if (OB_UNLIKELY(!inited_)) {
|
||||
ret = OB_NOT_INIT;
|
||||
LOG_WARN("not init", KR(ret));
|
||||
} else if (OB_UNLIKELY(!task_id.is_valid())) {
|
||||
ret = OB_INVALID_ARGUMENT;
|
||||
LOG_WARN("invalid argument", KR(ret), K(task_id));
|
||||
} else {
|
||||
ObDRTask *my_task = nullptr;
|
||||
int tmp_ret = task_map_.get_refactored(task_key, my_task);
|
||||
if (OB_HASH_NOT_EXIST == tmp_ret) {
|
||||
task = nullptr;
|
||||
} else if (OB_SUCCESS == tmp_ret) {
|
||||
if (OB_ISNULL(my_task)) {
|
||||
DLIST_FOREACH(task_in_list, list) {
|
||||
if (OB_ISNULL(task_in_list)) {
|
||||
ret = OB_ERR_UNEXPECTED;
|
||||
LOG_WARN("my_task ptr is null", KR(ret), KP(my_task));
|
||||
} else if (my_task->get_task_id() == task_id) {
|
||||
task = my_task;
|
||||
} else {
|
||||
task = nullptr;
|
||||
LOG_WARN("task_in_list is null ptr", KR(ret), K(task_id), K(list));
|
||||
} else if (task_in_list->get_task_id() == task_id) {
|
||||
task = task_in_list;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
ret = tmp_ret;
|
||||
LOG_WARN("fail to get task from map", KR(ret), K(task_key), K(task_id));
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
@ -528,7 +452,7 @@ int ObDRTaskQueue::try_clean_and_cancel_task(
|
||||
FLOG_INFO("need cancel migrate task in wait list", K(*t));
|
||||
// remove from wait_list_, need use DLIST_FOREACH_REMOVESAFE
|
||||
wait_list_.remove(t);
|
||||
remove_task_from_map_and_free_it_(t);
|
||||
free_task_(t);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -552,45 +476,19 @@ int ObDRTaskQueue::finish_schedule(
|
||||
// remove from schedule_list_
|
||||
schedule_list_.remove(task);
|
||||
FLOG_INFO("[DRTASK_NOTICE] success to finish schedule task", KR(ret), KPC(task));
|
||||
remove_task_from_map_and_free_it_(task);
|
||||
free_task_(task);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ObDRTaskQueue::set_sibling_in_schedule(
|
||||
const ObDRTask &task,
|
||||
const bool in_schedule)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
if (OB_UNLIKELY(!inited_)) {
|
||||
ret = OB_NOT_INIT;
|
||||
LOG_WARN("not init", KR(ret));
|
||||
} else {
|
||||
const ObDRTaskKey &task_key = task.get_task_key();
|
||||
ObDRTask *my_task = nullptr;
|
||||
int tmp_ret = task_map_.get_refactored(task_key, my_task);
|
||||
if (OB_HASH_NOT_EXIST == tmp_ret) {
|
||||
// bypass
|
||||
} else if (OB_SUCCESS == tmp_ret) {
|
||||
if (OB_ISNULL(my_task)) {
|
||||
ret = OB_ERR_UNEXPECTED;
|
||||
LOG_WARN("my_task ptr is null", KR(ret), K(task));
|
||||
} else {
|
||||
my_task->set_sibling_in_schedule(in_schedule);
|
||||
}
|
||||
} else {
|
||||
ret = tmp_ret;
|
||||
LOG_WARN("fail to get task from map", KR(ret), K(task_key));
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ObDRTaskQueue::set_sibling_in_schedule(
|
||||
int ObDRTaskQueue::check_whether_task_conflict_in_list_(
|
||||
const ObDRTaskKey &task_key,
|
||||
const bool in_schedule)
|
||||
const TaskList &list,
|
||||
const bool enable_parallel_migration,
|
||||
bool &is_conflict) const
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
is_conflict = false;
|
||||
if (OB_UNLIKELY(!inited_)) {
|
||||
ret = OB_NOT_INIT;
|
||||
LOG_WARN("not init", KR(ret));
|
||||
@ -598,25 +496,63 @@ int ObDRTaskQueue::set_sibling_in_schedule(
|
||||
ret = OB_INVALID_ARGUMENT;
|
||||
LOG_WARN("invalid argument", KR(ret), K(task_key));
|
||||
} else {
|
||||
ObDRTask *my_task = nullptr;
|
||||
int tmp_ret = task_map_.get_refactored(task_key, my_task);
|
||||
if (OB_HASH_NOT_EXIST == tmp_ret) {
|
||||
// bypass
|
||||
} else if (OB_SUCCESS == tmp_ret) {
|
||||
if (OB_ISNULL(my_task)) {
|
||||
DLIST_FOREACH(task_in_list, list) {
|
||||
if (OB_ISNULL(task_in_list)) {
|
||||
ret = OB_ERR_UNEXPECTED;
|
||||
LOG_WARN("my_task ptr is null", KR(ret), K(task_key), KP(my_task));
|
||||
} else {
|
||||
my_task->set_sibling_in_schedule(in_schedule);
|
||||
}
|
||||
} else {
|
||||
ret = tmp_ret;
|
||||
LOG_WARN("fail to get task from map", KR(ret), K(task_key));
|
||||
LOG_WARN("task_in_list is null ptr", KR(ret), K(task_key), K(list));
|
||||
} else if (task_in_list->get_task_key().get_tenant_id() == task_key.get_tenant_id()
|
||||
&& task_in_list->get_task_key().get_ls_id() == task_key.get_ls_id()) {
|
||||
// tenant_id + ls_id is same
|
||||
if (!enable_parallel_migration) {
|
||||
LOG_INFO("enable_parallel_migration is false, tenant_id + ls_id is the same, prohibit parallel",
|
||||
KR(ret), K(enable_parallel_migration), K(task_key));
|
||||
// if tenant_id + ls_id is the same, then task conflict
|
||||
is_conflict = true;
|
||||
} else if (ObDRTaskType::LS_MIGRATE_REPLICA != task_key.get_task_type()
|
||||
|| ObDRTaskType::LS_MIGRATE_REPLICA != task_in_list->get_disaster_recovery_task_type()) {
|
||||
// if one of the two task is not a migration task, the task is conflict.
|
||||
LOG_INFO("there is a task is not migration task, prohibit parallel", KR(ret),
|
||||
K(task_key), K(task_in_list->get_disaster_recovery_task_type()));
|
||||
is_conflict = true;
|
||||
} else if (task_in_list->get_task_key().get_zone() == task_key.get_zone()) {
|
||||
LOG_INFO("two migrate task conflict, task execution zone is the same, prohibit parallel",
|
||||
KR(ret), K(task_key), K(task_in_list->get_task_key()));
|
||||
is_conflict = true;
|
||||
}
|
||||
if (is_conflict) {
|
||||
break;
|
||||
}
|
||||
} // end with tenant_id + ls_id is same
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ObDRTaskQueue::check_whether_task_conflict(
|
||||
const ObDRTaskKey &task_key,
|
||||
const bool enable_parallel_migration,
|
||||
bool &is_conflict)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
is_conflict = false;
|
||||
if (OB_UNLIKELY(!inited_)) {
|
||||
ret = OB_NOT_INIT;
|
||||
LOG_WARN("not init", KR(ret));
|
||||
} else if (OB_UNLIKELY(!task_key.is_valid())) {
|
||||
ret = OB_INVALID_ARGUMENT;
|
||||
LOG_WARN("invalid argument", KR(ret), K(task_key));
|
||||
} else if (OB_FAIL(check_whether_task_conflict_in_list_(task_key, wait_list_, enable_parallel_migration, is_conflict))) {
|
||||
LOG_WARN("check task conflict in wait list failed", KR(ret), K(task_key), K(wait_list_), K(enable_parallel_migration));
|
||||
} else if (is_conflict) {
|
||||
LOG_INFO("task conflict in wait list", KR(ret), K(task_key), K(enable_parallel_migration), K(priority_));
|
||||
} else if (OB_FAIL(check_whether_task_conflict_in_list_(task_key, schedule_list_, enable_parallel_migration, is_conflict))) {
|
||||
LOG_WARN("check task conflict in schedule list failed", KR(ret), K(task_key), K(schedule_list_), K(enable_parallel_migration));
|
||||
} else if (is_conflict) {
|
||||
LOG_INFO("task conflict in schedule list", KR(ret), K(task_key), K(enable_parallel_migration), K(priority_));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ObDRTaskQueue::dump_statistic() const
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
@ -668,10 +604,10 @@ int ObDRTaskMgr::init(
|
||||
sql_proxy_ = sql_proxy;
|
||||
schema_service_ = schema_service;
|
||||
if (OB_FAIL(high_task_queue_.init(
|
||||
config, TASK_QUEUE_LIMIT, rpc_proxy_, ObDRTaskPriority::HIGH_PRI))) {
|
||||
config, rpc_proxy_, ObDRTaskPriority::HIGH_PRI))) {
|
||||
LOG_WARN("fail to init high priority task queue", KR(ret));
|
||||
} else if (OB_FAIL(low_task_queue_.init(
|
||||
config, TASK_QUEUE_LIMIT, rpc_proxy_, ObDRTaskPriority::LOW_PRI))) {
|
||||
config, rpc_proxy_, ObDRTaskPriority::LOW_PRI))) {
|
||||
LOG_WARN("fail to init low priority task queue", KR(ret));
|
||||
} else if (OB_FAIL(disaster_recovery_task_table_updater_.init(sql_proxy, this))) {
|
||||
LOG_WARN("fail to init a ObDRTaskTableUpdater", KR(ret));
|
||||
@ -800,55 +736,7 @@ void ObDRTaskMgr::run3()
|
||||
FLOG_INFO("disaster task mgr exits");
|
||||
}
|
||||
|
||||
int ObDRTaskMgr::check_task_in_executing(
|
||||
const ObDRTaskKey &task_key,
|
||||
const ObDRTaskPriority priority,
|
||||
bool &task_in_executing)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
if (OB_FAIL(check_inner_stat_())) {
|
||||
LOG_WARN("fail to check inner stat", KR(ret), K_(inited), K_(stopped), K_(loaded));
|
||||
} else if (ObDRTaskPriority::HIGH_PRI != priority
|
||||
&& ObDRTaskPriority::LOW_PRI != priority) {
|
||||
ret = OB_INVALID_ARGUMENT;
|
||||
LOG_WARN("invalid argument", KR(ret), K(priority));
|
||||
} else {
|
||||
ObThreadCondGuard guard(cond_);
|
||||
ObDRTaskQueue &queue = ObDRTaskPriority::LOW_PRI == priority
|
||||
? low_task_queue_
|
||||
: high_task_queue_;
|
||||
if (OB_FAIL(queue.check_task_in_scheduling(task_key, task_in_executing))) {
|
||||
LOG_WARN("fail to check task exist", KR(ret), K(task_key));
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ObDRTaskMgr::check_task_exist(
|
||||
const ObDRTaskKey &task_key,
|
||||
const ObDRTaskPriority priority,
|
||||
bool &task_exist)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
if (OB_FAIL(check_inner_stat_())) {
|
||||
LOG_WARN("fail to check inner stat", KR(ret), K_(inited), K_(stopped), K_(loaded));
|
||||
} else if (ObDRTaskPriority::HIGH_PRI != priority
|
||||
&& ObDRTaskPriority::LOW_PRI != priority) {
|
||||
ret = OB_INVALID_ARGUMENT;
|
||||
LOG_WARN("invalid argument", KR(ret), K(priority));
|
||||
} else {
|
||||
ObThreadCondGuard guard(cond_);
|
||||
ObDRTaskQueue &queue = ObDRTaskPriority::LOW_PRI == priority
|
||||
? low_task_queue_
|
||||
: high_task_queue_;
|
||||
if (OB_FAIL(queue.check_task_exist(task_key, task_exist))) {
|
||||
LOG_WARN("fail to check task exist", KR(ret), K(task_key));
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ObDRTaskMgr::add_task_in_queue_and_execute(const ObDRTask &task)
|
||||
int ObDRTaskMgr::add_task_in_queue_and_execute(ObDRTask &task)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
int tmp_ret = OB_SUCCESS;
|
||||
@ -859,31 +747,19 @@ int ObDRTaskMgr::add_task_in_queue_and_execute(const ObDRTask &task)
|
||||
LOG_WARN("invalid dr task", KR(ret), K(task));
|
||||
} else {
|
||||
ObThreadCondGuard guard(cond_);
|
||||
bool task_exist = false;
|
||||
bool sibling_in_schedule = false;
|
||||
bool is_conflict = false;
|
||||
ObDRTaskQueue &queue = task.is_high_priority_task() ? high_task_queue_ : low_task_queue_;
|
||||
ObDRTaskQueue &sibling_queue = task.is_high_priority_task() ? low_task_queue_ : high_task_queue_;
|
||||
if (OB_UNLIKELY(queue.task_cnt() >= TASK_QUEUE_LIMIT)) {
|
||||
ret = OB_SIZE_OVERFLOW;
|
||||
LOG_WARN("disaster recovery task queue is full", KR(ret), "task_cnt", queue.task_cnt());
|
||||
} else if (OB_FAIL(queue.check_task_exist(task.get_task_key(), task_exist))) {
|
||||
LOG_WARN("fail to check task in scheduling", KR(ret), K(task));
|
||||
} else if (task_exist) {
|
||||
} else if (OB_FAIL(check_whether_task_conflict_(task, is_conflict))) {
|
||||
LOG_WARN("fail to check whether task conflict", KR(ret), K(task));
|
||||
} else if (is_conflict) {
|
||||
// task conflict return error code, report an error to user
|
||||
ret = OB_ENTRY_EXIST;
|
||||
LOG_WARN("ls disaster recovery task has existed in queue", KR(ret), K(task), K(task_exist));
|
||||
} else if (OB_FAIL(sibling_queue.check_task_in_scheduling(task.get_task_key(), sibling_in_schedule))) {
|
||||
LOG_WARN("fail to check task in scheduling", KR(ret), K(task));
|
||||
} else if (sibling_in_schedule) {
|
||||
ret = OB_ENTRY_EXIST;
|
||||
LOG_WARN("ls disaster recovery task has existed in sibling_queue",
|
||||
KR(ret), K(task), K(sibling_in_schedule));
|
||||
LOG_WARN("ls disaster recovery task has existed in queue", KR(ret), K(task), K(is_conflict));
|
||||
} else if (OB_FAIL(queue.push_task_in_schedule_list(task))) {
|
||||
LOG_WARN("fail to add task to schedule list", KR(ret), K(task));
|
||||
} else if (OB_FAIL(set_sibling_in_schedule(task, true/*in_schedule*/))) {
|
||||
//after successfully adding the scheduling queue,
|
||||
//need mark in both queues that the task has been scheduled in the queue.
|
||||
//if there is a task in the waiting queue of another queue, need update its mark
|
||||
LOG_WARN("set sibling in schedule failed", KR(ret), K(task));
|
||||
} else {
|
||||
if (OB_SUCCESS != (tmp_ret = task.log_execute_start())) {
|
||||
LOG_WARN("fail to log task start", KR(tmp_ret), K(task));
|
||||
@ -898,45 +774,30 @@ int ObDRTaskMgr::add_task_in_queue_and_execute(const ObDRTask &task)
|
||||
}
|
||||
|
||||
int ObDRTaskMgr::add_task(
|
||||
const ObDRTask &task)
|
||||
ObDRTask &task)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
if (OB_FAIL(check_inner_stat_())) {
|
||||
ret = OB_NOT_INIT;
|
||||
LOG_WARN("fail to check inner stat", KR(ret), K_(inited), K_(loaded), K_(stopped));
|
||||
} else if (OB_UNLIKELY(!task.is_valid())) {
|
||||
ret = OB_INVALID_ARGUMENT;
|
||||
LOG_WARN("invalid dr task", KR(ret), K(task));
|
||||
} else {
|
||||
ObThreadCondGuard guard(cond_);
|
||||
ObDRTaskQueue &queue = task.is_high_priority_task()
|
||||
? high_task_queue_
|
||||
: low_task_queue_;
|
||||
ObDRTaskQueue &sibling_queue = task.is_high_priority_task()
|
||||
? low_task_queue_
|
||||
: high_task_queue_;
|
||||
bool has_task_in_schedule = false;
|
||||
ObDRTaskQueue &queue = task.is_high_priority_task() ? high_task_queue_ : low_task_queue_;
|
||||
bool is_conflict = false;
|
||||
if (OB_UNLIKELY(queue.task_cnt() >= TASK_QUEUE_LIMIT)) {
|
||||
ret = OB_SIZE_OVERFLOW;
|
||||
LOG_WARN("disaster recovery task queue is full", KR(ret), "task_cnt", queue.task_cnt());
|
||||
} else if (OB_FAIL(queue.push_task_in_wait_list(*this, sibling_queue, task, has_task_in_schedule))) {
|
||||
if (OB_ENTRY_EXIST != ret) {
|
||||
LOG_WARN("fail to push task", KR(ret), K(task));
|
||||
} else {
|
||||
ret = OB_SUCCESS;
|
||||
LOG_INFO("task already exist in queue", K(task));
|
||||
}
|
||||
} else if (OB_FAIL(check_whether_task_conflict_(task, is_conflict))) {
|
||||
LOG_WARN("fail to check whether task conflict", KR(ret), K(task));
|
||||
} else if (is_conflict) {
|
||||
ret = OB_ENTRY_EXIST;
|
||||
LOG_WARN("ls disaster recovery task has existed in queue", KR(ret), K(task), K(is_conflict));
|
||||
} else if (OB_FAIL(queue.do_push_task_in_wait_list(*this, task))) {
|
||||
LOG_WARN("fail to push task", KR(ret), K(task));
|
||||
} else {
|
||||
int64_t wait_cnt = 0;
|
||||
int64_t schedule_cnt = 0;
|
||||
if (OB_FAIL(inner_get_task_cnt_(wait_cnt, schedule_cnt))) {
|
||||
LOG_WARN("fail to get task cnt", KR(ret));
|
||||
} else if (!has_task_in_schedule
|
||||
&& 0 == get_reach_concurrency_limit()) {
|
||||
cond_.broadcast();
|
||||
LOG_INFO("success to broad cast cond_", K(wait_cnt), K(schedule_cnt));
|
||||
}
|
||||
clear_reach_concurrency_limit();
|
||||
cond_.broadcast();
|
||||
LOG_INFO("[DRTASK_NOTICE] add task to disaster recovery task mgr finish", KR(ret), K(task));
|
||||
}
|
||||
}
|
||||
@ -947,22 +808,13 @@ int ObDRTaskMgr::deal_with_task_reply(
|
||||
const ObDRTaskReplyResult &reply)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
ObDRTaskKey task_key;
|
||||
if (OB_FAIL(check_inner_stat_())) {
|
||||
ret = OB_NOT_INIT;
|
||||
LOG_WARN("fail to check inner stat", KR(ret), K_(inited), K_(loaded), K_(stopped));
|
||||
} else if (OB_FAIL(task_key.init(
|
||||
reply.tenant_id_,
|
||||
reply.ls_id_.id(),
|
||||
0, /* set to 0 */
|
||||
0, /* set to 0 */
|
||||
ObDRTaskKeyType::FORMAL_DR_KEY))) {
|
||||
LOG_WARN("fail to init task key", KR(ret), K(reply));
|
||||
} else {
|
||||
int tmp_ret = OB_SUCCESS;
|
||||
ObDRTask *task = nullptr;
|
||||
ObThreadCondGuard guard(cond_);
|
||||
if (OB_SUCCESS != (tmp_ret = get_task_by_id_(reply.task_id_, task_key, task))) {
|
||||
if (OB_SUCCESS != (tmp_ret = get_task_by_id_(reply.task_id_, task))) {
|
||||
if (OB_ENTRY_NOT_EXIST == tmp_ret) {
|
||||
// task not exist, try record this reply result
|
||||
ROOTSERVICE_EVENT_ADD("disaster_recovery", "finish_disaster_recovery_task",
|
||||
@ -972,7 +824,7 @@ int ObDRTaskMgr::deal_with_task_reply(
|
||||
"execute_result", reply.result_,
|
||||
"ret_comment", ob_disaster_recovery_task_ret_comment_strs(ObDRTaskRetComment::RECEIVE_FROM_STORAGE_RPC));
|
||||
} else {
|
||||
LOG_WARN("fail to get task from task manager", KR(tmp_ret), K(reply), K(task_key));
|
||||
LOG_WARN("fail to get task from task manager", KR(tmp_ret), K(reply));
|
||||
}
|
||||
} else if (OB_SUCCESS != (tmp_ret = task->log_execute_result(reply.result_, ObDRTaskRetComment::RECEIVE_FROM_STORAGE_RPC))){
|
||||
LOG_WARN("fail to log execute result", KR(tmp_ret), K(reply));
|
||||
@ -980,7 +832,7 @@ int ObDRTaskMgr::deal_with_task_reply(
|
||||
|
||||
if (OB_FAIL(async_add_cleaning_task_to_updater(
|
||||
reply.task_id_,
|
||||
task_key,
|
||||
task->get_task_key(),
|
||||
reply.result_,
|
||||
false,/*need_record_event*/
|
||||
ObDRTaskRetComment::RECEIVE_FROM_STORAGE_RPC,
|
||||
@ -1003,12 +855,12 @@ int ObDRTaskMgr::async_add_cleaning_task_to_updater(
|
||||
ObDRTask *task = nullptr;
|
||||
if (OB_FAIL(check_inner_stat_())) {
|
||||
LOG_WARN("fail to check inner stat", KR(ret), K_(inited), K_(stopped), K_(loaded));
|
||||
} else if (OB_FAIL(get_task_by_id_(task_id, task_key, task))) {
|
||||
} else if (OB_FAIL(get_task_by_id_(task_id, task))) {
|
||||
if (OB_ENTRY_NOT_EXIST == ret) {
|
||||
LOG_WARN("fail to get task, task may be cleaned earlier", KR(ret), K(task_id), K(task_key));
|
||||
LOG_WARN("fail to get task, task may be cleaned earlier", KR(ret), K(task_id));
|
||||
ret = OB_SUCCESS;
|
||||
} else {
|
||||
LOG_WARN("fail to get task from task manager", KR(ret), K(task_id), K(task_key));
|
||||
LOG_WARN("fail to get task from task manager", KR(ret), K(task_id));
|
||||
}
|
||||
}
|
||||
if (OB_SUCC(ret)
|
||||
@ -1031,7 +883,6 @@ int ObDRTaskMgr::async_add_cleaning_task_to_updater(
|
||||
|
||||
int ObDRTaskMgr::do_cleaning(
|
||||
const share::ObTaskId &task_id,
|
||||
const ObDRTaskKey &task_key,
|
||||
const int ret_code,
|
||||
const bool need_clear_server_data_in_limit,
|
||||
const bool need_record_event,
|
||||
@ -1046,7 +897,7 @@ int ObDRTaskMgr::do_cleaning(
|
||||
ObDRTask *task = nullptr;
|
||||
common::ObAddr dst_server;
|
||||
for (int64_t i = 0; OB_SUCC(ret) && i < ARRAYSIZEOF(queues_); ++i) {
|
||||
if (OB_FAIL(queues_[i].get_task(task_id, task_key, task))) {
|
||||
if (OB_FAIL(queues_[i].get_task_by_task_id(task_id, task))) {
|
||||
LOG_WARN("fail to get schedule task from queue", KR(ret), "priority", queues_[i].get_priority_str());
|
||||
} else if (OB_NOT_NULL(task)) {
|
||||
task_queue = &queues_[i];
|
||||
@ -1057,21 +908,18 @@ int ObDRTaskMgr::do_cleaning(
|
||||
if (OB_SUCC(ret)) {
|
||||
if (OB_ISNULL(task)) {
|
||||
LOG_INFO("in schedule taks not found, maybe not sync because of network traffic",
|
||||
K(task_id), K(task_key), K(ret_code));
|
||||
K(task_id), K(ret_code));
|
||||
} else {
|
||||
if (need_record_event) {
|
||||
(void)log_task_result(*task, ret_code, ret_comment);
|
||||
}
|
||||
dst_server = task->get_dst_server();
|
||||
if (OB_FAIL(set_sibling_in_schedule(*task, false/* not in schedule*/))) {
|
||||
LOG_WARN("fail to set sibling in schedule", KR(ret), KPC(task));
|
||||
} else if (OB_ISNULL(task_queue)) {
|
||||
if (OB_ISNULL(task_queue)) {
|
||||
LOG_INFO("task_queue is null"); // by pass
|
||||
} else if (OB_FAIL(task_queue->finish_schedule(task))) {
|
||||
LOG_WARN("fail to finish scheduling task", KR(ret), KPC(task));
|
||||
}
|
||||
}
|
||||
clear_reach_concurrency_limit();
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
@ -1085,7 +933,6 @@ int ObDRTaskMgr::get_all_task_count(
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
if (OB_FAIL(check_inner_stat_())) {
|
||||
ret = OB_NOT_INIT;
|
||||
LOG_WARN("not init", KR(ret), K_(inited), K_(loaded), K_(stopped));
|
||||
} else {
|
||||
ObThreadCondGuard guard(cond_);
|
||||
@ -1111,14 +958,13 @@ int ObDRTaskMgr::log_task_result(
|
||||
|
||||
int ObDRTaskMgr::get_task_by_id_(
|
||||
const share::ObTaskId &task_id,
|
||||
const ObDRTaskKey &task_key,
|
||||
ObDRTask *&task)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
ObDRTask *task_to_get = nullptr;
|
||||
void *raw_ptr = nullptr;
|
||||
for (int64_t i = 0; OB_SUCC(ret) && i < ARRAYSIZEOF(queues_); ++i) {
|
||||
if (OB_FAIL(queues_[i].get_task(task_id, task_key, task_to_get))) {
|
||||
if (OB_FAIL(queues_[i].get_task_by_task_id(task_id, task_to_get))) {
|
||||
LOG_WARN("fail to get schedule task from queue", KR(ret), "priority", queues_[i].get_priority_str());
|
||||
} else if (OB_NOT_NULL(task_to_get)) {
|
||||
break;
|
||||
@ -1127,7 +973,7 @@ int ObDRTaskMgr::get_task_by_id_(
|
||||
if (OB_SUCC(ret) && OB_ISNULL(task_to_get)) {
|
||||
task = nullptr;
|
||||
ret = OB_ENTRY_NOT_EXIST;
|
||||
LOG_WARN("task not exist, maybe cleaned earier", KR(ret), K(task_id), K(task_key));
|
||||
LOG_WARN("task not exist, maybe cleaned earier", KR(ret), K(task_id));
|
||||
} else {
|
||||
task = task_to_get;
|
||||
}
|
||||
@ -1164,7 +1010,6 @@ int ObDRTaskMgr::load_task_to_schedule_list_()
|
||||
for (int64_t i = 0; i < static_cast<int64_t>(ObDRTaskPriority::MAX_PRI); ++i) {
|
||||
queues_[i].reuse();
|
||||
}
|
||||
clear_reach_concurrency_limit();
|
||||
for (int64_t i = 0; OB_SUCC(ret) && i < tenant_id_array.count(); ++i) {
|
||||
// load this tenant's task info into schedule_list
|
||||
// TODO@jingyu.cr: need to isolate different tenant
|
||||
@ -1449,8 +1294,7 @@ int ObDRTaskMgr::try_pop_task(
|
||||
LOG_WARN("fail to check inner stat", KR(ret), K_(inited), K_(stopped), K_(loaded));
|
||||
} else if (OB_FAIL(inner_get_task_cnt_(wait_cnt, in_schedule_cnt))) {
|
||||
LOG_WARN("fail to get task cnt", KR(ret));
|
||||
} else if (wait_cnt > 0
|
||||
&& 0 == concurrency_limited_ts_) {
|
||||
} else if (wait_cnt > 0) {
|
||||
if (OB_FAIL(pop_task(my_task))) {
|
||||
LOG_WARN("fail to pop task", KR(ret));
|
||||
} else if (OB_ISNULL(my_task)) {
|
||||
@ -1478,13 +1322,9 @@ int ObDRTaskMgr::try_pop_task(
|
||||
} else {
|
||||
int64_t now = ObTimeUtility::current_time();
|
||||
cond_.wait(get_schedule_interval());
|
||||
if (get_reach_concurrency_limit() + CONCURRENCY_LIMIT_INTERVAL < now) {
|
||||
clear_reach_concurrency_limit();
|
||||
LOG_TRACE("success to clear concurrency limit");
|
||||
}
|
||||
}
|
||||
if (OB_SUCC(ret) && OB_NOT_NULL(task)) {
|
||||
LOG_INFO("[DRTASK_NOTICE] success to pop a task", KPC(task), K_(concurrency_limited_ts),
|
||||
LOG_INFO("[DRTASK_NOTICE] success to pop a task", KPC(task),
|
||||
K(in_schedule_cnt));
|
||||
}
|
||||
return ret;
|
||||
@ -1509,18 +1349,6 @@ int ObDRTaskMgr::pop_task(
|
||||
}
|
||||
}
|
||||
}
|
||||
if (OB_SUCC(ret)) {
|
||||
if (OB_ISNULL(task)) {
|
||||
if (wait_cnt > 0) {
|
||||
set_reach_concurrency_limit();
|
||||
}
|
||||
} else {
|
||||
const bool in_schedule = true;
|
||||
if (OB_FAIL(set_sibling_in_schedule(*task, in_schedule))) {
|
||||
LOG_WARN("set sibling in schedule failed", KR(ret), KPC(task));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@ -1595,22 +1423,112 @@ int ObDRTaskMgr::execute_task(
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ObDRTaskMgr::set_sibling_in_schedule(
|
||||
const ObDRTask &task,
|
||||
const bool in_schedule)
|
||||
int ObDRTaskMgr::check_whether_task_conflict_(
|
||||
ObDRTask &task,
|
||||
bool &is_conflict)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
is_conflict = false;
|
||||
bool enable_parallel_migration = false;
|
||||
if (OB_FAIL(check_inner_stat_())) {
|
||||
LOG_WARN("fail to check inner stat", KR(ret), K_(inited), K_(stopped), K_(loaded));
|
||||
} else if (OB_UNLIKELY(!task.is_valid())) {
|
||||
ret = OB_INVALID_ARGUMENT;
|
||||
LOG_WARN("invalid argument", KR(ret), K(task));
|
||||
} else if (OB_FAIL(check_tenant_enable_parallel_migration_(task.get_tenant_id(), enable_parallel_migration))) {
|
||||
LOG_WARN("check tenant enable parallel migration failed", KR(ret), K(task), K(enable_parallel_migration));
|
||||
} else if (OB_FAIL(high_task_queue_.check_whether_task_conflict(task.get_task_key(), enable_parallel_migration, is_conflict))) {
|
||||
LOG_WARN("fail to check task conflict", KR(ret), K(task), K(enable_parallel_migration));
|
||||
} else if (is_conflict) {
|
||||
LOG_INFO("task conflict in high_task_queue", K(task.get_task_key()), K(enable_parallel_migration));
|
||||
} else if (OB_FAIL(low_task_queue_.check_whether_task_conflict(task.get_task_key(), enable_parallel_migration, is_conflict))) {
|
||||
LOG_WARN("fail to check task conflict", KR(ret), K(task), K(enable_parallel_migration));
|
||||
} else if (is_conflict) {
|
||||
LOG_INFO("task conflict in low_task_queue", K(task.get_task_key()), K(enable_parallel_migration));
|
||||
} else if (enable_parallel_migration && OB_FAIL(set_migrate_task_prioritize_src_(enable_parallel_migration, task))) {
|
||||
// migrate task need set prioritize_same_zone_src_
|
||||
LOG_WARN("failed to set enable_parallel_migration", KR(ret), K(enable_parallel_migration), K(task));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ObDRTaskMgr::check_tenant_enable_parallel_migration_(
|
||||
const uint64_t &tenant_id,
|
||||
bool &enable_parallel_migration)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
const char *str = "auto";
|
||||
ObParallelMigrationMode mode;
|
||||
enable_parallel_migration = false;
|
||||
uint64_t tenant_data_version = 0;
|
||||
if (OB_FAIL(check_inner_stat_())) {
|
||||
LOG_WARN("fail to check inner stat", KR(ret), K_(inited), K_(loaded), K_(stopped));
|
||||
} else if (OB_UNLIKELY(!is_valid_tenant_id(tenant_id))) {
|
||||
ret = OB_INVALID_ARGUMENT;
|
||||
LOG_WARN("invalid argument", KR(ret), K(tenant_id));
|
||||
} else {
|
||||
omt::ObTenantConfigGuard tenant_config(TENANT_CONF(tenant_id));
|
||||
share::ObTenantRole tenant_role;
|
||||
if (OB_FAIL(GET_MIN_DATA_VERSION(gen_meta_tenant_id(tenant_id), tenant_data_version))) {
|
||||
LOG_WARN("fail to get min data version", KR(ret), K(tenant_id));
|
||||
} else if (!((tenant_data_version >= DATA_VERSION_4_3_5_0)
|
||||
|| (tenant_data_version >= MOCK_DATA_VERSION_4_2_5_0 && tenant_data_version < DATA_VERSION_4_3_0_0))) {
|
||||
enable_parallel_migration = false;
|
||||
} else if (OB_UNLIKELY(!tenant_config.is_valid())) {
|
||||
ret = OB_ERR_UNEXPECTED;
|
||||
LOG_WARN("tenant config is invalid", KR(ret), K(tenant_id));
|
||||
} else if (FALSE_IT(str = tenant_config->replica_parallel_migration_mode.str())) {
|
||||
} else if (OB_FAIL(mode.parse_from_string(str))) {
|
||||
LOG_WARN("mode parse failed", KR(ret), K(str));
|
||||
} else if (!mode.is_valid()) {
|
||||
ret = OB_ERR_UNEXPECTED;
|
||||
LOG_WARN("parallel migration mode is invalid", KR(ret), K(mode));
|
||||
} else if (mode.is_on_mode()) {
|
||||
enable_parallel_migration = true;
|
||||
} else if (mode.is_off_mode()) {
|
||||
enable_parallel_migration = false;
|
||||
} else if (mode.is_auto_mode()) {
|
||||
if (!is_user_tenant(tenant_id)) {
|
||||
// sys and meta tenant is primary tenant
|
||||
enable_parallel_migration = false;
|
||||
} else if (OB_FAIL(ObAllTenantInfoProxy::get_tenant_role(GCTX.sql_proxy_, tenant_id, tenant_role))) {
|
||||
LOG_WARN("fail to get tenant_role", KR(ret), K(tenant_id));
|
||||
} else if (!tenant_role.is_valid()) {
|
||||
ret = OB_ERR_UNEXPECTED;
|
||||
LOG_WARN("tenant_role is invalid", KR(ret), K(tenant_role));
|
||||
} else if (tenant_role.is_primary()) {
|
||||
enable_parallel_migration = false;
|
||||
} else {
|
||||
enable_parallel_migration = true;
|
||||
// in auto mode, other tenant(clone restore standby) enable_parallel_migration is true
|
||||
}
|
||||
} else {
|
||||
ret = OB_ERR_UNEXPECTED;
|
||||
LOG_WARN("parallel migration mode is invalid", KR(ret), K(mode));
|
||||
}
|
||||
LOG_INFO("check tenant enable_parallel_migration over", KR(ret),
|
||||
K(tenant_id), K(enable_parallel_migration), K(tenant_role), K(mode));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ObDRTaskMgr::set_migrate_task_prioritize_src_(
|
||||
const bool enable_parallel_migration,
|
||||
ObDRTask &task)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
if (OB_FAIL(check_inner_stat_())) {
|
||||
LOG_WARN("fail to check inner stat", KR(ret), K_(inited), K_(stopped), K_(loaded));
|
||||
} else {
|
||||
for (int64_t i = 0; OB_SUCC(ret) && i < ARRAYSIZEOF(queues_); ++i) {
|
||||
if (OB_FAIL(queues_[i].set_sibling_in_schedule(task, in_schedule))) {
|
||||
if (i == 0) {
|
||||
LOG_WARN("fail to set sibling in schedule in high priority queue", KR(ret), K(task));
|
||||
} else {
|
||||
LOG_WARN("fail to set sibling in schedule in low priority queue", KR(ret), K(task));
|
||||
}
|
||||
}
|
||||
LOG_WARN("fail to check inner stat", KR(ret), K_(inited), K_(loaded), K_(stopped));
|
||||
} else if (OB_UNLIKELY(!task.is_valid())) {
|
||||
ret = OB_INVALID_ARGUMENT;
|
||||
LOG_WARN("invalid dr task", KR(ret), K(task));
|
||||
} else if (ObDRTaskType::LS_MIGRATE_REPLICA == task.get_task_key().get_task_type()) {
|
||||
ObMigrateLSReplicaTask *migrate_task = static_cast<ObMigrateLSReplicaTask*>(&task);
|
||||
if (OB_ISNULL(migrate_task)) {
|
||||
ret = OB_ERR_UNEXPECTED;
|
||||
LOG_WARN("migrate_task is nullptr", KR(ret), K(task));
|
||||
} else {
|
||||
migrate_task->set_prioritize_same_zone_src(enable_parallel_migration);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
|
@ -35,13 +35,42 @@ namespace rootserver
|
||||
class ObDRTaskExecutor;
|
||||
class ObDRTaskMgr;
|
||||
|
||||
class ObParallelMigrationMode
|
||||
{
|
||||
OB_UNIS_VERSION(1);
|
||||
public:
|
||||
enum ParallelMigrationMode
|
||||
{
|
||||
AUTO = 0,
|
||||
ON,
|
||||
OFF,
|
||||
MAX
|
||||
};
|
||||
public:
|
||||
ObParallelMigrationMode() : mode_(MAX) {}
|
||||
ObParallelMigrationMode(ParallelMigrationMode mode) : mode_(mode) {}
|
||||
ObParallelMigrationMode &operator=(const ParallelMigrationMode mode) { mode_ = mode; return *this; }
|
||||
ObParallelMigrationMode &operator=(const ObParallelMigrationMode &other) { mode_ = other.mode_; return *this; }
|
||||
bool operator==(const ObParallelMigrationMode &other) const { return other.mode_ == mode_; }
|
||||
bool operator!=(const ObParallelMigrationMode &other) const { return other.mode_ != mode_; }
|
||||
void reset() { mode_ = MAX; }
|
||||
void assign(const ObParallelMigrationMode &other) { mode_ = other.mode_; }
|
||||
bool is_auto_mode() const { return AUTO == mode_; }
|
||||
bool is_on_mode() const { return ON == mode_; }
|
||||
bool is_off_mode() const { return OFF == mode_; }
|
||||
bool is_valid() const { return MAX != mode_; }
|
||||
const ParallelMigrationMode &get_mode() const { return mode_; }
|
||||
int parse_from_string(const ObString &mode);
|
||||
int64_t to_string(char *buf, const int64_t buf_len) const;
|
||||
const char* get_mode_str() const;
|
||||
private:
|
||||
ParallelMigrationMode mode_;
|
||||
};
|
||||
|
||||
class ObDRTaskQueue
|
||||
{
|
||||
public:
|
||||
typedef common::ObDList<ObDRTask> TaskList;
|
||||
typedef common::hash::ObHashMap<ObDRTaskKey,
|
||||
ObDRTask *,
|
||||
common::hash::NoPthreadDefendMode> TaskMap;
|
||||
public:
|
||||
ObDRTaskQueue();
|
||||
virtual ~ObDRTaskQueue();
|
||||
@ -50,39 +79,35 @@ public:
|
||||
void reset();
|
||||
// init a ObDRTaskQueue
|
||||
// @param [in] config, server config
|
||||
// @param [in] bucket_num, the size of task_map
|
||||
// @param [in] rpc_proxy, to send rpc
|
||||
int init(
|
||||
common::ObServerConfig &config,
|
||||
const int64_t bucket_num,
|
||||
obrpc::ObSrvRpcProxy *rpc_proxy,
|
||||
ObDRTaskPriority priority);
|
||||
|
||||
public:
|
||||
// check whether a task in task_map is executing
|
||||
// @param [in] task_key, task's tenant_id and ls_id
|
||||
// @param [out] task_in_scheduling, whether this task is in scheduling
|
||||
int check_task_in_scheduling(
|
||||
// check whether task is conflict with task in queue
|
||||
// @param [in] task_key, task's tenant_id and ls_id and zone
|
||||
// @param [in] enable_parallel_migration, if enable parallel migration
|
||||
// @param [out] is_conflict, whether this task is conflict with any task in queue
|
||||
int check_whether_task_conflict(
|
||||
const ObDRTaskKey &task_key,
|
||||
bool &task_in_scheduling) const;
|
||||
const bool enable_parallel_migration,
|
||||
bool &is_conflict);
|
||||
|
||||
// check whether a task exist in task_map
|
||||
// @param [in] task_key, task's tenant_id and ls_id
|
||||
// @param [out] task_exist, whether task exist in task_map
|
||||
int check_task_exist(
|
||||
const ObDRTaskKey &task_key,
|
||||
bool &task_exist);
|
||||
|
||||
// push a task into this queue's wait_list
|
||||
// do push a task in wait list
|
||||
// @param [in] task_mgr, to deal with concurrency_limit
|
||||
// @param [in] sibling_queue, to check whether another task is scheduling in sibling_queue
|
||||
// @param [in] task, the task to push in
|
||||
// @param [out] has_task_in_schedule, whether another task is scheduling in sibling_queue
|
||||
int push_task_in_wait_list(
|
||||
int do_push_task_in_wait_list(
|
||||
ObDRTaskMgr &task_mgr,
|
||||
const ObDRTaskQueue &sibling_queue,
|
||||
const ObDRTask &task,
|
||||
bool &has_task_in_schedule);
|
||||
const ObDRTask &task);
|
||||
|
||||
// get a certain task from queue by task_id
|
||||
// @param [in] task_id, the only id to identify a task
|
||||
// @param [out] task, the task getted
|
||||
int get_task_by_task_id(
|
||||
const share::ObTaskId &task_id,
|
||||
ObDRTask *&task);
|
||||
|
||||
// push a task into this queue's schedule_list
|
||||
// @param [in] task, the task to push in
|
||||
@ -94,15 +119,6 @@ public:
|
||||
int pop_task(
|
||||
ObDRTask *&task);
|
||||
|
||||
// get a certain task from task_map
|
||||
// @param [in] task_id, the only id to identify a task
|
||||
// @param [in] task_key, tenant_id and ls_id to locate task quickly in task_map
|
||||
// @param [out] the task getted
|
||||
int get_task(
|
||||
const share::ObTaskId &task_id,
|
||||
const ObDRTaskKey &task_key,
|
||||
ObDRTask *&task);
|
||||
|
||||
// to deal with those not running tasks in schedule_list
|
||||
// @param [in] task_mgr, to execute over a task
|
||||
int try_clean_and_cancel_task(
|
||||
@ -113,23 +129,9 @@ public:
|
||||
int finish_schedule(
|
||||
ObDRTask *task);
|
||||
|
||||
// set task's sibling_in_schedule
|
||||
// @param [in] task, to locate which task to modify
|
||||
// @oaram [in] in_schedule, whether another task is scheduling in sibling queue
|
||||
int set_sibling_in_schedule(
|
||||
const ObDRTask &task,
|
||||
const bool in_schedule);
|
||||
|
||||
// set task's sibling_in_schedule
|
||||
// @param [in] task_key, to locate which task to modify quickly
|
||||
// @oaram [in] in_schedule, whether another task is scheduling in sibling queue
|
||||
int set_sibling_in_schedule(
|
||||
const ObDRTaskKey &task_key,
|
||||
const bool in_schedule);
|
||||
|
||||
int64_t wait_task_cnt() const { return wait_list_.get_size(); }
|
||||
int64_t in_schedule_task_cnt() const { return schedule_list_.get_size(); }
|
||||
int64_t task_cnt() const { return task_map_.size(); }
|
||||
int64_t task_cnt() const { return wait_list_.get_size() + schedule_list_.get_size(); }
|
||||
int dump_statistic() const;
|
||||
|
||||
const char* get_priority_str() const {
|
||||
@ -153,17 +155,25 @@ public:
|
||||
TaskList &get_schedule_list() { return schedule_list_; }
|
||||
|
||||
private:
|
||||
// do push a task in wait list
|
||||
// @param [in] task_mgr, to deal with concurrency limit
|
||||
// @param [in] sibling_queue, another queue
|
||||
// @param [in] task, the task to push in
|
||||
// @param [out] has_task_in_schedule, whether task is schedule in sibling queue
|
||||
int do_push_task_in_wait_list_(
|
||||
ObDRTaskMgr &task_mgr,
|
||||
const ObDRTaskQueue &sibling_queue,
|
||||
const ObDRTask &task,
|
||||
bool &has_task_in_schedule);
|
||||
// check whether a task is conflict with task in list
|
||||
// @param [in] task_key, task's tenant_id and ls_id and zone
|
||||
// @param [in] list, target list to check
|
||||
// @param [in] enable_parallel_migration, if enable parallel migration
|
||||
// @param [out] is_conflict, whether this task is conflict with any task in list
|
||||
int check_whether_task_conflict_in_list_(
|
||||
const ObDRTaskKey &task_key,
|
||||
const TaskList &list,
|
||||
const bool enable_parallel_migration,
|
||||
bool &is_conflict) const;
|
||||
|
||||
// get a certain task from list by task_id
|
||||
// @param [in] task_id, the only id to identify a task
|
||||
// @param [in] list, target list to find
|
||||
// @param [out] task, the task getted
|
||||
int get_task_by_task_id_in_list_(
|
||||
const share::ObTaskId &task_id,
|
||||
TaskList &list,
|
||||
ObDRTask *&task);
|
||||
// check whether to clean this task
|
||||
// @param [in] task, the task to check
|
||||
// @param [out] need_cleaning, whether to clean this task
|
||||
@ -176,16 +186,12 @@ private:
|
||||
// @param [in] task, task to free
|
||||
void free_task_(ObDRTask *&task);
|
||||
|
||||
// remove task from task_map and free it
|
||||
// @param [in] task, task to clean
|
||||
void remove_task_from_map_and_free_it_(ObDRTask *&task);
|
||||
private:
|
||||
bool inited_;
|
||||
common::ObServerConfig *config_;
|
||||
common::ObFIFOAllocator task_alloc_;
|
||||
TaskList wait_list_;
|
||||
TaskList schedule_list_;
|
||||
TaskMap task_map_;
|
||||
obrpc::ObSrvRpcProxy *rpc_proxy_;
|
||||
ObDRTaskPriority priority_;
|
||||
private:
|
||||
@ -207,7 +213,6 @@ public:
|
||||
stopped_(true),
|
||||
loaded_(false),
|
||||
config_(nullptr),
|
||||
concurrency_limited_ts_(0),
|
||||
cond_(),
|
||||
queues_(),
|
||||
high_task_queue_(queues_[0]),
|
||||
@ -255,32 +260,14 @@ public:
|
||||
int send_rpc_to_cancel_migrate_task(
|
||||
const ObDRTask &task);
|
||||
|
||||
// check whether a task is in schedule
|
||||
// @param [in] task_key, the task to check
|
||||
// @param [in] priority, which queue to check
|
||||
// @param [out] task_in_executing, whether task is scheduling
|
||||
virtual int check_task_in_executing(
|
||||
const ObDRTaskKey &task_key,
|
||||
const ObDRTaskPriority priority,
|
||||
bool &task_in_executing);
|
||||
|
||||
// check whether a task exist
|
||||
// @param [in] task_key, the task to check
|
||||
// @param [in] priority, which queue to check
|
||||
// @param [out] task_exist, whether task is exist
|
||||
virtual int check_task_exist(
|
||||
const ObDRTaskKey &task_key,
|
||||
const ObDRTaskPriority priority,
|
||||
bool &task_exist);
|
||||
|
||||
// add task in schedule list and execute task
|
||||
// @param [in] task, target task
|
||||
virtual int add_task_in_queue_and_execute(
|
||||
const ObDRTask &task);
|
||||
ObDRTask &task);
|
||||
// add a task into queue
|
||||
// @param [in] task, the task to push in
|
||||
virtual int add_task(
|
||||
const ObDRTask &task);
|
||||
ObDRTask &task);
|
||||
|
||||
// to do something after receive task reply
|
||||
// param [in] reply, the execute result of this task
|
||||
@ -302,27 +289,16 @@ public:
|
||||
|
||||
// finish schedule this task and clean it
|
||||
// param [in] task_id, to identify a task
|
||||
// param [in] task_key, to locate a task quickly
|
||||
// param [in] ret_code, execute result of this task
|
||||
// param [in] need_clear_server_data_in_limit, whether clear data_in_limit
|
||||
// param [in] ret_comment, ret comment
|
||||
int do_cleaning(
|
||||
const share::ObTaskId &task_id,
|
||||
const ObDRTaskKey &task_key,
|
||||
const int ret_code,
|
||||
const bool need_clear_server_data_in_limit,
|
||||
const bool need_record_event,
|
||||
const ObDRTaskRetComment &ret_comment);
|
||||
|
||||
// operations of reach_concurrency_limit
|
||||
void set_reach_concurrency_limit() {
|
||||
concurrency_limited_ts_ = common::ObTimeUtility::current_time();
|
||||
}
|
||||
void clear_reach_concurrency_limit() {
|
||||
concurrency_limited_ts_ = 0;
|
||||
}
|
||||
int64_t get_reach_concurrency_limit() const {
|
||||
return concurrency_limited_ts_;
|
||||
}
|
||||
int64_t get_schedule_interval() const {
|
||||
return 1000L; // 1s
|
||||
}
|
||||
@ -355,18 +331,34 @@ private:
|
||||
const uint64_t tenant_id,
|
||||
const common::ObAddr &server_addr,
|
||||
bool &has_unit);
|
||||
// check whether a task conflict with any task in double queue
|
||||
// @param [in] task, the task to check
|
||||
// @param [out] is_conflict, whether task is conflict
|
||||
int check_whether_task_conflict_(
|
||||
ObDRTask &task,
|
||||
bool &is_conflict);
|
||||
// check if tenant enable parallel migration
|
||||
// @param [in] tenant_id, tenant_id to check
|
||||
// @param [out] enable_parallel_migration, if enable parallel migration
|
||||
int check_tenant_enable_parallel_migration_(
|
||||
const uint64_t &tenant_id,
|
||||
bool &enable_parallel_migration);
|
||||
// set migrate task prioritize_same_zone_src field
|
||||
// @param [in] enable_parallel_migration, if enable parallel migration
|
||||
// @param [out] task, target task to set
|
||||
int set_migrate_task_prioritize_src_(
|
||||
const bool enable_parallel_migration,
|
||||
ObDRTask &task);
|
||||
ObDRTaskQueue &get_high_priority_queue_() { return high_task_queue_; }
|
||||
ObDRTaskQueue &get_low_priority_queue_() { return low_task_queue_; }
|
||||
int check_inner_stat_() const;
|
||||
|
||||
// get a task by task id
|
||||
// @param [in] task_id, to identify a certain task
|
||||
// @param [in] task_key, to quickly locate a task
|
||||
// @param [out] task, the task to get
|
||||
// ATTENTION: need to lock task memory before use this function
|
||||
int get_task_by_id_(
|
||||
const share::ObTaskId &task_id,
|
||||
const ObDRTaskKey &task_key,
|
||||
ObDRTask *&task);
|
||||
|
||||
// free a task
|
||||
@ -428,13 +420,6 @@ private:
|
||||
int execute_manual_task_(
|
||||
const ObDRTask &task);
|
||||
|
||||
// set sibling in schedule
|
||||
// @param [in] task, which task to deal with
|
||||
// @param [in] in_schedule, whether in schedule
|
||||
int set_sibling_in_schedule(
|
||||
const ObDRTask &task,
|
||||
const bool in_schedule);
|
||||
|
||||
private:
|
||||
bool inited_;
|
||||
bool stopped_;
|
||||
@ -443,7 +428,6 @@ private:
|
||||
/* has waiting task but cannot be scheduled,
|
||||
* since mgr reaches server_data_copy_[in/out]_concurrency
|
||||
*/
|
||||
volatile int64_t concurrency_limited_ts_;
|
||||
mutable common::ObThreadCond cond_;
|
||||
ObDRTaskQueue queues_[static_cast<int64_t>(ObDRTaskPriority::MAX_PRI)];
|
||||
ObDRTaskQueue &high_task_queue_; // queues_[0]
|
||||
|
@ -46,12 +46,13 @@ int ObDRTaskTableUpdateTask::init(
|
||||
ret = OB_INVALID_ARGUMENT;
|
||||
LOG_WARN("task init failed", KR(ret), K(tenant_id), K(ls_id),
|
||||
K(task_type), K(task_id), K(add_timestamp));
|
||||
} else if (OB_FAIL(task_key_.assign(task_key))) {
|
||||
LOG_WARN("failed to assign task_key", KR(ret), K(task_key));
|
||||
} else {
|
||||
tenant_id_ = tenant_id;
|
||||
ls_id_ = ls_id;
|
||||
task_type_ = task_type;
|
||||
task_id_ = task_id;
|
||||
task_key_ = task_key;
|
||||
ret_code_ = ret_code;
|
||||
need_clear_server_data_in_limit_ = need_clear_server_data_in_limit;
|
||||
need_record_event_ = need_record_event;
|
||||
@ -65,16 +66,19 @@ int ObDRTaskTableUpdateTask::assign(const ObDRTaskTableUpdateTask &other)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
if (this != &other) {
|
||||
tenant_id_ = other.tenant_id_;
|
||||
ls_id_ = other.ls_id_;
|
||||
task_type_ = other.task_type_;
|
||||
task_id_ = other.task_id_;
|
||||
task_key_ = other.task_key_;
|
||||
ret_code_ = other.ret_code_;
|
||||
need_clear_server_data_in_limit_ = other.need_clear_server_data_in_limit_;
|
||||
need_record_event_ = other.need_record_event_;
|
||||
ret_comment_ = other.ret_comment_;
|
||||
add_timestamp_ = other.add_timestamp_;
|
||||
if (OB_FAIL(task_key_.assign(other.task_key_))) {
|
||||
LOG_WARN("failed to assign task_key", KR(ret), K(other));
|
||||
} else {
|
||||
tenant_id_ = other.tenant_id_;
|
||||
ls_id_ = other.ls_id_;
|
||||
task_type_ = other.task_type_;
|
||||
task_id_ = other.task_id_;
|
||||
ret_code_ = other.ret_code_;
|
||||
need_clear_server_data_in_limit_ = other.need_clear_server_data_in_limit_;
|
||||
need_record_event_ = other.need_record_event_;
|
||||
ret_comment_ = other.ret_comment_;
|
||||
add_timestamp_ = other.add_timestamp_;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@ -314,7 +318,6 @@ int ObDRTaskTableUpdater::process_task_(
|
||||
// must be committed successfully, if memory cleaning fails, rely on detection to clean up the memory.
|
||||
if (FAILEDx(task_mgr_->do_cleaning(
|
||||
task.get_task_id(),
|
||||
task.get_task_key(),
|
||||
task.get_ret_code(),
|
||||
task.get_need_clear_server_data_in_limit(),
|
||||
task.get_need_record_event(),
|
||||
|
@ -35,27 +35,6 @@ public:
|
||||
need_record_event_(true),
|
||||
ret_comment_(ObDRTaskRetComment::MAX),
|
||||
add_timestamp_(OB_INVALID_TIMESTAMP) {}
|
||||
explicit ObDRTaskTableUpdateTask(
|
||||
const uint64_t tenant_id,
|
||||
const share::ObLSID &ls_id,
|
||||
const ObDRTaskType &task_type,
|
||||
const share::ObTaskId &task_id,
|
||||
const ObDRTaskKey &task_key,
|
||||
const int ret_code,
|
||||
const bool need_clear_server_data_in_limit,
|
||||
const bool need_record_event,
|
||||
const ObDRTaskRetComment &ret_comment,
|
||||
const int64_t add_timestamp)
|
||||
: tenant_id_(tenant_id),
|
||||
ls_id_(ls_id),
|
||||
task_type_(task_type),
|
||||
task_id_(task_id),
|
||||
task_key_(),
|
||||
ret_code_(ret_code),
|
||||
need_clear_server_data_in_limit_(need_clear_server_data_in_limit),
|
||||
need_record_event_(need_record_event),
|
||||
ret_comment_(ret_comment),
|
||||
add_timestamp_(add_timestamp) { task_key_ = task_key; }
|
||||
virtual ~ObDRTaskTableUpdateTask() {}
|
||||
int init(
|
||||
const uint64_t tenant_id,
|
||||
|
@ -1496,8 +1496,8 @@ int ObDRWorker::LocalityAlignment::try_get_normal_locality_alignment_task(
|
||||
LOG_WARN("this task ptr is null", KR(ret));
|
||||
} else {
|
||||
switch (this_task->get_task_type()) {
|
||||
case RemovePaxos:
|
||||
case RemoveNonPaxos:
|
||||
case ObDRTaskType::LS_REMOVE_PAXOS_REPLICA:
|
||||
case ObDRTaskType::LS_REMOVE_NON_PAXOS_REPLICA:
|
||||
if (OB_FAIL(try_review_remove_replica_task(
|
||||
unit_provider,
|
||||
this_task,
|
||||
@ -1508,7 +1508,7 @@ int ObDRWorker::LocalityAlignment::try_get_normal_locality_alignment_task(
|
||||
LOG_INFO("success to try review remove replica task", KR(ret), KPC(this_task), K(found));
|
||||
}
|
||||
break;
|
||||
case AddReplica:
|
||||
case ObDRTaskType::LS_ADD_REPLICA:
|
||||
if (OB_FAIL(try_review_add_replica_task(
|
||||
unit_provider,
|
||||
this_task,
|
||||
@ -1519,7 +1519,7 @@ int ObDRWorker::LocalityAlignment::try_get_normal_locality_alignment_task(
|
||||
LOG_INFO("success to try review add replica task", KR(ret), KPC(this_task), K(found));
|
||||
}
|
||||
break;
|
||||
case TypeTransform:
|
||||
case ObDRTaskType::LS_TYPE_TRANSFORM:
|
||||
if (OB_FAIL(try_review_type_transform_task(
|
||||
unit_provider,
|
||||
this_task,
|
||||
@ -1530,7 +1530,7 @@ int ObDRWorker::LocalityAlignment::try_get_normal_locality_alignment_task(
|
||||
LOG_INFO("success to try review type transform task", KR(ret), KPC(this_task), K(found));
|
||||
}
|
||||
break;
|
||||
case ModifyPaxosReplicaNumber:
|
||||
case ObDRTaskType::LS_MODIFY_PAXOS_REPLICA_NUMBER:
|
||||
if (OB_FAIL(try_review_modify_paxos_replica_number_task(
|
||||
unit_provider,
|
||||
this_task,
|
||||
@ -2470,7 +2470,38 @@ int ObDRWorker::do_cancel_ls_replica_task(
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ObDRWorker::add_task_in_queue_and_execute_(const ObDRTask &task)
|
||||
int ObDRWorker::add_task_to_task_mgr_(
|
||||
ObDRTask &task,
|
||||
int64_t &acc_dr_task)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
if (OB_UNLIKELY(!inited_)) {
|
||||
ret = OB_NOT_INIT;
|
||||
LOG_WARN("DRWorker not init", KR(ret));
|
||||
} else if (OB_UNLIKELY(!task.is_valid())) {
|
||||
ret = OB_INVALID_ARGUMENT;
|
||||
LOG_WARN("invalid argument", KR(ret), K(task));
|
||||
} else if (OB_ISNULL(disaster_recovery_task_mgr_)) {
|
||||
ret = OB_ERR_UNEXPECTED;
|
||||
LOG_WARN("disaster_recovery_task_mgr_ null", KR(ret), KP(disaster_recovery_task_mgr_));
|
||||
} else if (OB_FAIL(disaster_recovery_task_mgr_->add_task(task))) {
|
||||
if (OB_ENTRY_EXIST == ret) {
|
||||
ret = OB_SUCCESS;
|
||||
acc_dr_task++;
|
||||
// if this task is conflict, all subsequent tasks in this round are conflicting.
|
||||
// so set acc_dr_task++, no need continue to check
|
||||
LOG_INFO("task has conflict in task mgr", KR(ret), K(task));
|
||||
} else {
|
||||
LOG_WARN("fail to add task", KR(ret), K(task));
|
||||
}
|
||||
} else {
|
||||
acc_dr_task++;
|
||||
LOG_INFO("success to add a task to task manager", KR(ret), K(task));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ObDRWorker::add_task_in_queue_and_execute_(ObDRTask &task)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
FLOG_INFO("add task in schedule list and execute", K(task));
|
||||
@ -3313,26 +3344,27 @@ int ObDRWorker::check_other_inactive_server_count_(
|
||||
|
||||
int ObDRWorker::generate_task_key(
|
||||
const DRLSInfo &dr_ls_info,
|
||||
const common::ObAddr &task_exe_server,
|
||||
const ObDRTaskType &task_type,
|
||||
ObDRTaskKey &task_key) const
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
uint64_t tenant_id = OB_INVALID_TENANT_ID;
|
||||
ObLSID ls_id;
|
||||
common::ObZone zone;
|
||||
task_key.reset();
|
||||
if (OB_UNLIKELY(!inited_)) {
|
||||
ret = OB_NOT_INIT;
|
||||
LOG_WARN("not init", KR(ret));
|
||||
} else {
|
||||
uint64_t tenant_id = OB_INVALID_ID;
|
||||
ObLSID ls_id;
|
||||
if (OB_FAIL(dr_ls_info.get_ls_id(
|
||||
tenant_id, ls_id))) {
|
||||
LOG_WARN("fail to get log stream id", KR(ret));
|
||||
} else if (OB_FAIL(task_key.init(
|
||||
tenant_id,
|
||||
ls_id.id(),
|
||||
0/* set to 0 */,
|
||||
0/* set to 0 */,
|
||||
ObDRTaskKeyType::FORMAL_DR_KEY))) {
|
||||
LOG_WARN("fail to init task key", KR(ret), K(tenant_id), K(ls_id));
|
||||
}
|
||||
} else if (OB_UNLIKELY(ObDRTaskType::MAX_TYPE == task_type || !task_exe_server.is_valid())) {
|
||||
ret = OB_INVALID_ARGUMENT;
|
||||
LOG_WARN("invalid argument", KR(ret), K(task_type), K(task_exe_server));
|
||||
} else if (OB_FAIL(dr_ls_info.get_ls_id(tenant_id, ls_id))) {
|
||||
LOG_WARN("fail to get log stream id", KR(ret), K(dr_ls_info));
|
||||
} else if (OB_FAIL(SVR_TRACER.get_server_zone(task_exe_server, zone))) {
|
||||
LOG_WARN("get server zone failed", KR(ret), K(task_exe_server));
|
||||
} else if (OB_FAIL(task_key.init(tenant_id, ls_id, zone, task_type))) {
|
||||
LOG_WARN("fail to init task key", KR(ret), K(tenant_id), K(ls_id), K(zone), K(task_type));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@ -3443,81 +3475,37 @@ int ObDRWorker::check_has_leader_while_remove_replica(
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ObDRWorker::check_task_already_exist(
|
||||
const ObDRTaskKey &task_key,
|
||||
const DRLSInfo &dr_ls_info,
|
||||
const int64_t &priority,
|
||||
bool &task_exist)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
task_exist = true;
|
||||
bool sibling_task_executing = false;
|
||||
ObDRTaskPriority task_priority = priority == 0 ? ObDRTaskPriority::HIGH_PRI : ObDRTaskPriority::LOW_PRI;
|
||||
ObDRTaskPriority sibling_priority = priority == 0
|
||||
? ObDRTaskPriority::LOW_PRI
|
||||
: ObDRTaskPriority::HIGH_PRI;
|
||||
if (OB_UNLIKELY(!inited_)) {
|
||||
ret = OB_NOT_INIT;
|
||||
LOG_WARN("not init", KR(ret));
|
||||
} else if (OB_ISNULL(disaster_recovery_task_mgr_)) {
|
||||
ret = OB_ERR_UNEXPECTED;
|
||||
LOG_WARN("disaster recovery task mgr ptr is null", KR(ret));
|
||||
} else if (OB_FAIL(disaster_recovery_task_mgr_->check_task_exist(
|
||||
task_key, task_priority, task_exist))) {
|
||||
LOG_WARN("fail to check task exist", KR(ret), K(task_key));
|
||||
} else if (task_exist) {
|
||||
FLOG_INFO("high prio task exist for this ls", K(task_key));
|
||||
} else if (OB_FAIL(disaster_recovery_task_mgr_->check_task_in_executing(
|
||||
task_key, sibling_priority, sibling_task_executing))) {
|
||||
LOG_WARN("fail to check task in executing", KR(ret), K(task_key));
|
||||
} else if (sibling_task_executing) {
|
||||
task_exist = true;
|
||||
FLOG_INFO("has sibling task in executing for this ls", K(task_key));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ObDRWorker::check_can_generate_task(
|
||||
const int64_t acc_dr_task,
|
||||
const bool need_check_has_leader_while_remove_replica,
|
||||
const bool is_high_priority_task,
|
||||
const ObAddr &server_addr,
|
||||
DRLSInfo &dr_ls_info,
|
||||
ObDRTaskKey &task_key,
|
||||
const ObDRTaskType &task_type,
|
||||
bool &can_generate)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
bool task_exist = false;
|
||||
bool has_leader_while_remove_replica = false;
|
||||
int64_t task_pri = is_high_priority_task ? 0 : 1;
|
||||
can_generate = false;
|
||||
can_generate = true;
|
||||
if (OB_UNLIKELY(!inited_)) {
|
||||
ret = OB_NOT_INIT;
|
||||
LOG_WARN("not init", KR(ret));
|
||||
} else if (acc_dr_task != 0) {
|
||||
} else if (ObDRTaskType::LS_MIGRATE_REPLICA != task_type && acc_dr_task != 0) {
|
||||
// if a disaster recovery task is generated in this round, or a task is currently executing in the mgr queue
|
||||
// and a conflict is detected, acc_dr_task++ will be used to prohibit subsequent task generation.
|
||||
// in order to implement parallel migration, when the task of the first replica of LS is generated or a conflict
|
||||
// in the migration of the first replica is detected, it is necessary to continue to generate the migration task of the next replica.
|
||||
can_generate = false;
|
||||
LOG_INFO("can not generate task because another task is generated", K(dr_ls_info), K(is_high_priority_task));
|
||||
} else if (OB_FAIL(generate_task_key(dr_ls_info, task_key))) {
|
||||
LOG_WARN("fail to generate task key", KR(ret), K(dr_ls_info));
|
||||
} else if (OB_FAIL(check_task_already_exist(task_key, dr_ls_info, task_pri, task_exist))) {
|
||||
LOG_WARN("task exist in task manager", KR(ret), K(task_key), K(dr_ls_info), K(is_high_priority_task));
|
||||
} else if (task_exist) {
|
||||
can_generate = false;
|
||||
LOG_INFO("can not generate task because already exist", K(dr_ls_info), K(is_high_priority_task));
|
||||
LOG_INFO("can not generate task because another task is generated", K(dr_ls_info), K(task_type), K(acc_dr_task));
|
||||
} else if (need_check_has_leader_while_remove_replica) {
|
||||
if (OB_FAIL(check_has_leader_while_remove_replica(
|
||||
server_addr,
|
||||
dr_ls_info,
|
||||
has_leader_while_remove_replica))) {
|
||||
LOG_WARN("fail to check has leader while member change", KR(ret), K(dr_ls_info), K(server_addr));
|
||||
} else if (has_leader_while_remove_replica) {
|
||||
can_generate = true;
|
||||
} else {
|
||||
} else if (!has_leader_while_remove_replica) {
|
||||
can_generate = false;
|
||||
LOG_INFO("can not generate task because has no leader while remove replica", K(dr_ls_info), K(is_high_priority_task));
|
||||
LOG_INFO("can not generate task because has no leader", K(dr_ls_info), K(task_type), K(acc_dr_task), K(has_leader_while_remove_replica));
|
||||
}
|
||||
} else {
|
||||
can_generate = true;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@ -3590,10 +3578,8 @@ int ObDRWorker::generate_remove_permanent_offline_replicas_and_push_into_task_ma
|
||||
replica_type))) {
|
||||
LOG_WARN("fail to build remove member task", KR(ret), K(task_key), K(tenant_id), K(ls_id), K(leader_addr),
|
||||
K(remove_member), K(old_paxos_replica_number), K(new_paxos_replica_number), K(replica_type));
|
||||
} else if (OB_FAIL(disaster_recovery_task_mgr_->add_task(remove_replica_task))) {
|
||||
} else if (OB_FAIL(add_task_to_task_mgr_(remove_replica_task, acc_dr_task))) {
|
||||
LOG_WARN("fail to add task", KR(ret), K(remove_replica_task));
|
||||
} else {
|
||||
acc_dr_task++;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@ -3696,7 +3682,6 @@ int ObDRWorker::do_single_replica_permanent_offline_(
|
||||
common::ObAddr leader_addr;
|
||||
const common::ObAddr source_server; // not useful
|
||||
const bool need_check_has_leader_while_remove_replica = ObReplicaTypeCheck::is_paxos_replica_V2(replica_type);
|
||||
const bool is_high_priority_task = true;
|
||||
const int64_t memstore_percent = 100;
|
||||
ObDRTaskKey task_key;
|
||||
bool can_generate = false;
|
||||
@ -3740,17 +3725,18 @@ int ObDRWorker::do_single_replica_permanent_offline_(
|
||||
} else {
|
||||
LOG_INFO("success to add display info", KR(ret), K(display_info));
|
||||
}
|
||||
} else if (OB_FAIL(generate_task_key(dr_ls_info, leader_addr, task_type, task_key))) {
|
||||
LOG_WARN("fail to generate task key", KR(ret), K(dr_ls_info), K(leader_addr), K(task_type));
|
||||
} else if (OB_FAIL(check_can_generate_task(
|
||||
acc_dr_task,
|
||||
need_check_has_leader_while_remove_replica,
|
||||
is_high_priority_task,
|
||||
member_to_remove.get_server(),
|
||||
dr_ls_info,
|
||||
task_key,
|
||||
task_type,
|
||||
can_generate))) {
|
||||
LOG_WARN("fail to check can generate remove permanent offline task", KR(ret), K(acc_dr_task),
|
||||
K(need_check_has_leader_while_remove_replica), K(is_high_priority_task), K(member_to_remove),
|
||||
K(dr_ls_info), K(task_key), K(can_generate));
|
||||
K(need_check_has_leader_while_remove_replica), K(member_to_remove),
|
||||
K(dr_ls_info), K(can_generate));
|
||||
} else if (can_generate) {
|
||||
if (OB_FAIL(generate_remove_permanent_offline_replicas_and_push_into_task_manager(
|
||||
task_key,
|
||||
@ -3888,10 +3874,8 @@ int ObDRWorker::generate_replicate_to_unit_and_push_into_task_manager(
|
||||
ObReplicaMember()/*empty force_data_source*/,
|
||||
old_paxos_replica_number))) {
|
||||
LOG_WARN("fail to build migrate task", KR(ret));
|
||||
} else if (OB_FAIL(disaster_recovery_task_mgr_->add_task(migrate_task))) {
|
||||
} else if (OB_FAIL(add_task_to_task_mgr_(migrate_task, acc_dr_task))) {
|
||||
LOG_WARN("fail to add task", KR(ret), K(migrate_task));
|
||||
} else {
|
||||
++acc_dr_task;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@ -3977,7 +3961,6 @@ int ObDRWorker::generate_migrate_ls_task(
|
||||
ObDstReplica dst_replica;
|
||||
int64_t old_paxos_replica_number = 0;
|
||||
const bool need_check_has_leader_while_remove_replica = false;
|
||||
const bool is_high_priority_task = true;
|
||||
bool can_generate = false;
|
||||
ObReplicaMember src_member(
|
||||
ls_replica.get_server(), ls_replica.get_member_time_us(),
|
||||
@ -4007,10 +3990,16 @@ int ObDRWorker::generate_migrate_ls_task(
|
||||
} else {
|
||||
LOG_INFO("success to add display info", KR(ret), K(display_info));
|
||||
}
|
||||
} else if (OB_FAIL(generate_task_key(dr_ls_info, dst_replica.get_server(), ObDRTaskType::LS_MIGRATE_REPLICA, task_key))) {
|
||||
LOG_WARN("fail to generate task key", KR(ret), K(dr_ls_info), K(dst_replica));
|
||||
} else if (OB_FAIL(check_can_generate_task(
|
||||
acc_dr_task, need_check_has_leader_while_remove_replica,
|
||||
is_high_priority_task, server_stat_info.get_server(), dr_ls_info,
|
||||
task_key, can_generate))) {
|
||||
/*when shrink unit num, both dup log stream and normal log stream are here*/
|
||||
acc_dr_task,
|
||||
need_check_has_leader_while_remove_replica,
|
||||
server_stat_info.get_server(),
|
||||
dr_ls_info,
|
||||
ObDRTaskType::LS_MIGRATE_REPLICA,
|
||||
can_generate))) {
|
||||
LOG_WARN("fail to check can generate replicate to unit task", KR(ret));
|
||||
} else if (can_generate) {
|
||||
if (OB_FAIL(generate_replicate_to_unit_and_push_into_task_manager(
|
||||
@ -4026,7 +4015,6 @@ int ObDRWorker::generate_migrate_ls_task(
|
||||
|
||||
int ObDRWorker::try_generate_remove_replica_locality_alignment_task(
|
||||
DRLSInfo &dr_ls_info,
|
||||
const ObDRTaskKey &task_key,
|
||||
const LATask *task,
|
||||
int64_t &acc_dr_task)
|
||||
{
|
||||
@ -4035,12 +4023,13 @@ int ObDRWorker::try_generate_remove_replica_locality_alignment_task(
|
||||
bool sibling_task_executing = false;
|
||||
uint64_t tenant_id = OB_INVALID_ID;
|
||||
share::ObLSID ls_id;
|
||||
if (OB_UNLIKELY(!task_key.is_valid()) || OB_ISNULL(task)) {
|
||||
if (OB_ISNULL(task)) {
|
||||
ret = OB_INVALID_ARGUMENT;
|
||||
LOG_WARN("invalid argument", KR(ret), K(task_key), KP(task));
|
||||
LOG_WARN("invalid argument", KR(ret), KP(task));
|
||||
} else if (OB_FAIL(dr_ls_info.get_ls_id(tenant_id, ls_id))) {
|
||||
LOG_WARN("fail to get ls id", KR(ret));
|
||||
} else {
|
||||
ObDRTaskKey task_key;
|
||||
const RemoveReplicaLATask *my_task = reinterpret_cast<const RemoveReplicaLATask *>(task);
|
||||
ObReplicaMember remove_member(my_task->remove_server_,
|
||||
my_task->member_time_us_,
|
||||
@ -4070,6 +4059,8 @@ int ObDRWorker::try_generate_remove_replica_locality_alignment_task(
|
||||
LOG_INFO("may has no leader while member change", K(dr_ls_info));
|
||||
} else if (OB_FAIL(dr_ls_info.get_leader(leader_addr))) {
|
||||
LOG_WARN("fail to get leader", KR(ret));
|
||||
} else if (OB_FAIL(generate_task_key(dr_ls_info, leader_addr, task->get_task_type(), task_key))) {
|
||||
LOG_WARN("fail to generate task key", KR(ret));
|
||||
} else if (OB_FAIL(remove_paxos_task.build(
|
||||
task_key,
|
||||
tenant_id,
|
||||
@ -4089,11 +4080,8 @@ int ObDRWorker::try_generate_remove_replica_locality_alignment_task(
|
||||
my_task->replica_type_))) {
|
||||
LOG_WARN("fail to build task", KR(ret), K(task_key), K(tenant_id), K(ls_id), K(task_id),
|
||||
K(leader_addr), K(remove_member), KPC(my_task));
|
||||
} else if (OB_FAIL(disaster_recovery_task_mgr_->add_task(remove_paxos_task))) {
|
||||
LOG_WARN("fail to add task", KR(ret));
|
||||
} else {
|
||||
LOG_INFO("success to add a ObRemoveLSReplicaTask to task manager", KR(ret), K(remove_paxos_task));
|
||||
acc_dr_task++;
|
||||
} else if (OB_FAIL(add_task_to_task_mgr_(remove_paxos_task, acc_dr_task))) {
|
||||
LOG_WARN("fail to add task", KR(ret), K(remove_paxos_task));
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
@ -4101,7 +4089,6 @@ int ObDRWorker::try_generate_remove_replica_locality_alignment_task(
|
||||
|
||||
int ObDRWorker::try_generate_add_replica_locality_alignment_task(
|
||||
DRLSInfo &dr_ls_info,
|
||||
const ObDRTaskKey &task_key,
|
||||
const LATask *task,
|
||||
int64_t &acc_dr_task)
|
||||
{
|
||||
@ -4110,12 +4097,13 @@ int ObDRWorker::try_generate_add_replica_locality_alignment_task(
|
||||
bool sibling_task_executing = false;
|
||||
uint64_t tenant_id = OB_INVALID_ID;
|
||||
share::ObLSID ls_id;
|
||||
if (OB_UNLIKELY(!task_key.is_valid() || nullptr == task)) {
|
||||
if (OB_ISNULL(task)) {
|
||||
ret = OB_INVALID_ARGUMENT;
|
||||
LOG_WARN("invalid argument", KR(ret), K(task_key), KP(task));
|
||||
LOG_WARN("invalid argument", KR(ret), KP(task));
|
||||
} else if (OB_FAIL(dr_ls_info.get_ls_id(tenant_id, ls_id))) {
|
||||
LOG_WARN("fail to get ls id", KR(ret));
|
||||
} else {
|
||||
ObDRTaskKey task_key;
|
||||
const AddReplicaLATask *my_task = reinterpret_cast<const AddReplicaLATask *>(task);
|
||||
int64_t data_size = 0;
|
||||
ObReplicaMember data_source;
|
||||
@ -4145,6 +4133,8 @@ int ObDRWorker::try_generate_add_replica_locality_alignment_task(
|
||||
LOG_WARN("fail to assign dst replica", KR(ret));
|
||||
} else if (OB_FAIL(dr_ls_info.get_default_data_source(data_source, data_size))) {
|
||||
LOG_WARN("fail to get data_size", KR(ret), K(dr_ls_info));
|
||||
} else if (OB_FAIL(generate_task_key(dr_ls_info, my_task->dst_server_, task->get_task_type(), task_key))) {
|
||||
LOG_WARN("fail to generate task key", KR(ret));
|
||||
} else if (OB_FAIL(add_replica_task.build(
|
||||
task_key,
|
||||
tenant_id,
|
||||
@ -4163,11 +4153,8 @@ int ObDRWorker::try_generate_add_replica_locality_alignment_task(
|
||||
my_task->orig_paxos_replica_number_,
|
||||
my_task->paxos_replica_number_))) {
|
||||
LOG_WARN("fail to build add replica task", KR(ret));
|
||||
} else if (OB_FAIL(disaster_recovery_task_mgr_->add_task(add_replica_task))) {
|
||||
LOG_WARN("fail to add task", KR(ret));
|
||||
} else {
|
||||
LOG_INFO("success to add a ObAddLSReplicaTask to task manager", KR(ret), K(add_replica_task));
|
||||
acc_dr_task++;
|
||||
} else if (OB_FAIL(add_task_to_task_mgr_(add_replica_task, acc_dr_task))) {
|
||||
LOG_WARN("fail to add task", KR(ret), K(add_replica_task));
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
@ -4175,7 +4162,6 @@ int ObDRWorker::try_generate_add_replica_locality_alignment_task(
|
||||
|
||||
int ObDRWorker::try_generate_type_transform_locality_alignment_task(
|
||||
DRLSInfo &dr_ls_info,
|
||||
const ObDRTaskKey &task_key,
|
||||
const LATask *task,
|
||||
int64_t &acc_dr_task)
|
||||
{
|
||||
@ -4184,12 +4170,13 @@ int ObDRWorker::try_generate_type_transform_locality_alignment_task(
|
||||
bool sibling_task_executing = false;
|
||||
uint64_t tenant_id = OB_INVALID_ID;
|
||||
share::ObLSID ls_id;
|
||||
if (OB_UNLIKELY(!task_key.is_valid() || nullptr == task)) {
|
||||
if (OB_ISNULL(task)) {
|
||||
ret = OB_INVALID_ARGUMENT;
|
||||
LOG_WARN("invalid argument", KR(ret), K(task_key), KP(task));
|
||||
LOG_WARN("invalid argument", KR(ret), KP(task));
|
||||
} else if (OB_FAIL(dr_ls_info.get_ls_id(tenant_id, ls_id))) {
|
||||
LOG_WARN("fail to get ls id", KR(ret));
|
||||
} else {
|
||||
ObDRTaskKey task_key;
|
||||
const TypeTransformLATask *my_task = reinterpret_cast<const TypeTransformLATask *>(task);
|
||||
bool has_leader = false;
|
||||
int64_t data_size = 0;
|
||||
@ -4222,6 +4209,8 @@ int ObDRWorker::try_generate_type_transform_locality_alignment_task(
|
||||
LOG_WARN("fail to assign dst replica", KR(ret));
|
||||
} else if (OB_FAIL(dr_ls_info.get_default_data_source(data_source, data_size))) {
|
||||
LOG_WARN("fail to get data_size", KR(ret), K(dr_ls_info));
|
||||
} else if (OB_FAIL(generate_task_key(dr_ls_info, my_task->dst_server_, task->get_task_type(), task_key))) {
|
||||
LOG_WARN("fail to generate task key", KR(ret));
|
||||
} else if (OB_FAIL(type_transform_task.build(
|
||||
task_key,
|
||||
tenant_id,
|
||||
@ -4240,11 +4229,8 @@ int ObDRWorker::try_generate_type_transform_locality_alignment_task(
|
||||
my_task->orig_paxos_replica_number_,
|
||||
my_task->paxos_replica_number_))) {
|
||||
LOG_WARN("fail to build type transform task", KR(ret));
|
||||
} else if (OB_FAIL(disaster_recovery_task_mgr_->add_task(type_transform_task))) {
|
||||
LOG_WARN("fail to add task", KR(ret));
|
||||
} else {
|
||||
LOG_INFO("success to add a ObLSTypeTransformTask to task manager", KR(ret), K(type_transform_task));
|
||||
acc_dr_task++;
|
||||
} else if (OB_FAIL(add_task_to_task_mgr_(type_transform_task, acc_dr_task))) {
|
||||
LOG_WARN("fail to add task", KR(ret), K(type_transform_task));
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
@ -4252,7 +4238,6 @@ int ObDRWorker::try_generate_type_transform_locality_alignment_task(
|
||||
|
||||
int ObDRWorker::try_generate_modify_paxos_replica_number_locality_alignment_task(
|
||||
DRLSInfo &dr_ls_info,
|
||||
const ObDRTaskKey &task_key,
|
||||
const LATask *task,
|
||||
int64_t &acc_dr_task_cnt)
|
||||
{
|
||||
@ -4262,12 +4247,13 @@ int ObDRWorker::try_generate_modify_paxos_replica_number_locality_alignment_task
|
||||
uint64_t tenant_id = OB_INVALID_ID;
|
||||
share::ObLSID ls_id;
|
||||
GlobalLearnerList learner_list;
|
||||
if (OB_UNLIKELY(!task_key.is_valid() || nullptr == task)) {
|
||||
if (OB_ISNULL(task)) {
|
||||
ret = OB_INVALID_ARGUMENT;
|
||||
LOG_WARN("invalid argument", KR(ret), K(task_key), KP(task));
|
||||
LOG_WARN("invalid argument", KR(ret), KP(task));
|
||||
} else if (OB_FAIL(dr_ls_info.get_ls_id(tenant_id, ls_id))) {
|
||||
LOG_WARN("fail to get ls id", KR(ret));
|
||||
} else {
|
||||
ObDRTaskKey task_key;
|
||||
const ModifyPaxosReplicaNumberLATask *my_task = reinterpret_cast<const ModifyPaxosReplicaNumberLATask *>(task);
|
||||
common::ObAddr leader_addr;
|
||||
common::ObMemberList member_list;
|
||||
@ -4280,6 +4266,8 @@ int ObDRWorker::try_generate_modify_paxos_replica_number_locality_alignment_task
|
||||
member_list,
|
||||
learner_list))) {
|
||||
LOG_WARN("fail to get leader", KR(ret));
|
||||
} else if (OB_FAIL(generate_task_key(dr_ls_info, leader_addr, task->get_task_type(), task_key))) {
|
||||
LOG_WARN("fail to generate task key", KR(ret));
|
||||
} else if (OB_FAIL(modify_paxos_replica_number_task.build(
|
||||
task_key,
|
||||
tenant_id,
|
||||
@ -4297,11 +4285,8 @@ int ObDRWorker::try_generate_modify_paxos_replica_number_locality_alignment_task
|
||||
my_task->paxos_replica_number_,
|
||||
member_list))) {
|
||||
LOG_WARN("fail to build a modify paxos replica number task", KR(ret));
|
||||
} else if (OB_FAIL(disaster_recovery_task_mgr_->add_task(modify_paxos_replica_number_task))) {
|
||||
LOG_WARN("fail to add task", KR(ret));
|
||||
} else {
|
||||
LOG_INFO("success to add a ObLSModifyPaxosReplicaNumberTask to task manager", KR(ret), K(modify_paxos_replica_number_task), K(member_list));
|
||||
acc_dr_task_cnt++;
|
||||
} else if (OB_FAIL(add_task_to_task_mgr_(modify_paxos_replica_number_task, acc_dr_task_cnt))) {
|
||||
LOG_WARN("fail to add task", KR(ret), K(modify_paxos_replica_number_task));
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
@ -4313,56 +4298,49 @@ int ObDRWorker::try_generate_locality_alignment_task(
|
||||
int64_t &acc_dr_task_cnt)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
ObDRTaskKey task_key;
|
||||
if (OB_ISNULL(task)) {
|
||||
ret = OB_INVALID_ARGUMENT;
|
||||
LOG_WARN("invalid argument", KR(ret), KP(task));
|
||||
} else if (OB_FAIL(generate_task_key(dr_ls_info, task_key))) {
|
||||
LOG_WARN("fail to generate task key", KR(ret));
|
||||
} else {
|
||||
switch (task->get_task_type()) {
|
||||
case RemovePaxos:
|
||||
case RemoveNonPaxos: {
|
||||
case ObDRTaskType::LS_REMOVE_PAXOS_REPLICA:
|
||||
case ObDRTaskType::LS_REMOVE_NON_PAXOS_REPLICA: {
|
||||
if (OB_FAIL(try_generate_remove_replica_locality_alignment_task(
|
||||
dr_ls_info,
|
||||
task_key,
|
||||
task,
|
||||
acc_dr_task_cnt))) {
|
||||
LOG_WARN("fail to try generate remove replica task",
|
||||
KR(ret), K(task_key), KPC(task));
|
||||
KR(ret), KPC(task));
|
||||
}
|
||||
break;
|
||||
}
|
||||
case AddReplica: {
|
||||
case ObDRTaskType::LS_ADD_REPLICA: {
|
||||
if (OB_FAIL(try_generate_add_replica_locality_alignment_task(
|
||||
dr_ls_info,
|
||||
task_key,
|
||||
task,
|
||||
acc_dr_task_cnt))) {
|
||||
LOG_WARN("fail to try generate add replica paxos task",
|
||||
KR(ret), K(task_key), KPC(task));
|
||||
KR(ret), KPC(task));
|
||||
}
|
||||
break;
|
||||
}
|
||||
case TypeTransform: {
|
||||
case ObDRTaskType::LS_TYPE_TRANSFORM: {
|
||||
if (OB_FAIL(try_generate_type_transform_locality_alignment_task(
|
||||
dr_ls_info,
|
||||
task_key,
|
||||
task,
|
||||
acc_dr_task_cnt))) {
|
||||
LOG_WARN("fail to try generate type transform paxos task",
|
||||
KR(ret), K(task_key), KPC(task));
|
||||
KR(ret), KPC(task));
|
||||
}
|
||||
break;
|
||||
}
|
||||
case ModifyPaxosReplicaNumber: {
|
||||
case ObDRTaskType::LS_MODIFY_PAXOS_REPLICA_NUMBER: {
|
||||
if (OB_FAIL(try_generate_modify_paxos_replica_number_locality_alignment_task(
|
||||
dr_ls_info,
|
||||
task_key,
|
||||
task,
|
||||
acc_dr_task_cnt))) {
|
||||
LOG_WARN("fail to try generate modify paxos replica number task",
|
||||
KR(ret), K(task_key), KPC(task));
|
||||
KR(ret), KPC(task));
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -4409,10 +4387,10 @@ int ObDRWorker::record_task_plan_for_locality_alignment(
|
||||
} else {
|
||||
ObLSReplicaTaskDisplayInfo display_info;
|
||||
switch (task->get_task_type()) {
|
||||
case RemovePaxos:
|
||||
case RemoveNonPaxos: {
|
||||
case ObDRTaskType::LS_REMOVE_PAXOS_REPLICA:
|
||||
case ObDRTaskType::LS_REMOVE_NON_PAXOS_REPLICA: {
|
||||
const RemoveReplicaLATask *my_task = reinterpret_cast<const RemoveReplicaLATask *>(task);
|
||||
task_type = RemovePaxos == task->get_task_type() ? ObDRTaskType::LS_REMOVE_PAXOS_REPLICA : ObDRTaskType::LS_REMOVE_NON_PAXOS_REPLICA;
|
||||
task_type = task->get_task_type();
|
||||
source_replica_type = REPLICA_TYPE_INVALID;
|
||||
target_replica_type = my_task->replica_type_;
|
||||
task_priority = task_type == ObDRTaskType::LS_REMOVE_PAXOS_REPLICA ? ObDRTaskPriority::HIGH_PRI : ObDRTaskPriority::LOW_PRI;
|
||||
@ -4427,7 +4405,7 @@ int ObDRWorker::record_task_plan_for_locality_alignment(
|
||||
}
|
||||
break;
|
||||
}
|
||||
case AddReplica: {
|
||||
case ObDRTaskType::LS_ADD_REPLICA: {
|
||||
const AddReplicaLATask *my_task = reinterpret_cast<const AddReplicaLATask *>(task);
|
||||
if (OB_FAIL(dr_ls_info.get_default_data_source(data_source, data_size))) {
|
||||
LOG_WARN("fail to get data_size", KR(ret), K(dr_ls_info));
|
||||
@ -4449,7 +4427,7 @@ int ObDRWorker::record_task_plan_for_locality_alignment(
|
||||
}
|
||||
break;
|
||||
}
|
||||
case TypeTransform: {
|
||||
case ObDRTaskType::LS_TYPE_TRANSFORM: {
|
||||
const TypeTransformLATask *my_task = reinterpret_cast<const TypeTransformLATask *>(task);
|
||||
if (OB_FAIL(dr_ls_info.get_default_data_source(data_source, data_size))) {
|
||||
LOG_WARN("fail to get data_size", KR(ret), K(dr_ls_info));
|
||||
@ -4467,7 +4445,7 @@ int ObDRWorker::record_task_plan_for_locality_alignment(
|
||||
}
|
||||
break;
|
||||
}
|
||||
case ModifyPaxosReplicaNumber: {
|
||||
case ObDRTaskType::LS_MODIFY_PAXOS_REPLICA_NUMBER: {
|
||||
const ModifyPaxosReplicaNumberLATask *my_task = reinterpret_cast<const ModifyPaxosReplicaNumberLATask *>(task);
|
||||
task_type = ObDRTaskType::LS_MODIFY_PAXOS_REPLICA_NUMBER;
|
||||
source_replica_type = REPLICA_TYPE_FULL;
|
||||
@ -4542,9 +4520,7 @@ int ObDRWorker::try_locality_alignment(
|
||||
while (OB_SUCC(ret) && OB_SUCC(locality_alignment.get_next_locality_alignment_task(task))) {
|
||||
bool can_generate = false;
|
||||
const bool need_check_has_leader_while_remove_replica = false;
|
||||
const bool is_high_priority_task = task->get_task_type() != LATaskType::RemoveNonPaxos;
|
||||
ObAddr server_addr; //useless
|
||||
ObDRTaskKey task_key;
|
||||
if (OB_ISNULL(task)) {
|
||||
// bypass, there is no task to generate
|
||||
} else if (only_for_display) {
|
||||
@ -4556,10 +4532,9 @@ int ObDRWorker::try_locality_alignment(
|
||||
} else if (OB_FAIL(check_can_generate_task(
|
||||
acc_dr_task,
|
||||
need_check_has_leader_while_remove_replica,
|
||||
is_high_priority_task,
|
||||
server_addr,
|
||||
dr_ls_info,
|
||||
task_key,
|
||||
task->get_task_type(),
|
||||
can_generate))) {
|
||||
LOG_WARN("fail to check can generate locality alignment task", KR(ret));
|
||||
} else if (!can_generate) {
|
||||
@ -4787,17 +4762,18 @@ int ObDRWorker::try_remove_non_paxos_replica_for_deleting_unit_(
|
||||
} else {
|
||||
LOG_INFO("success to add display info", KR(ret), K(display_info));
|
||||
}
|
||||
} else if (OB_FAIL(generate_task_key(dr_ls_info, leader_addr, task_type, task_key))) {
|
||||
LOG_WARN("fail to generate task key", KR(ret), K(dr_ls_info), K(leader_addr), K(task_type));
|
||||
} else if (OB_FAIL(check_can_generate_task(
|
||||
acc_dr_task,
|
||||
need_check_has_leader_while_remove_replica,
|
||||
is_high_priority_task,
|
||||
ls_replica.get_server(),
|
||||
dr_ls_info,
|
||||
task_key,
|
||||
task_type,
|
||||
can_generate))) {
|
||||
LOG_WARN("fail to check can generate remove permanent offline task", KR(ret), K(acc_dr_task),
|
||||
K(need_check_has_leader_while_remove_replica), K(is_high_priority_task), K(ls_replica),
|
||||
K(dr_ls_info), K(task_key), K(can_generate));
|
||||
K(need_check_has_leader_while_remove_replica), K(ls_replica),
|
||||
K(dr_ls_info), K(can_generate));
|
||||
} else if (can_generate) {
|
||||
ObRemoveLSReplicaTask remove_replica_task;
|
||||
if (OB_FAIL(remove_replica_task.build(
|
||||
@ -4819,10 +4795,8 @@ int ObDRWorker::try_remove_non_paxos_replica_for_deleting_unit_(
|
||||
ls_replica.get_replica_type()))) {
|
||||
LOG_WARN("fail to build remove member task", KR(ret), K(task_key), K(tenant_id), K(ls_id), K(leader_addr),
|
||||
K(remove_learner), K(old_paxos_replica_number), K(new_paxos_replica_number));
|
||||
} else if (OB_FAIL(disaster_recovery_task_mgr_->add_task(remove_replica_task))) {
|
||||
} else if (OB_FAIL(add_task_to_task_mgr_(remove_replica_task, acc_dr_task))) {
|
||||
LOG_WARN("fail to add task", KR(ret), K(remove_replica_task));
|
||||
} else {
|
||||
acc_dr_task++;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -4970,13 +4944,14 @@ int ObDRWorker::try_type_transform_for_deleting_unit_(
|
||||
} else {
|
||||
LOG_INFO("success to add display info", KR(ret), K(display_info));
|
||||
}
|
||||
} else if (OB_FAIL(generate_task_key(dr_ls_info, target_replica.get_server(), ObDRTaskType::LS_TYPE_TRANSFORM, task_key))) {
|
||||
LOG_WARN("fail to generate task key", KR(ret), K(dr_ls_info), K(target_replica));
|
||||
} else if (OB_FAIL(check_can_generate_task(
|
||||
acc_dr_task,
|
||||
false/*need_check_has_leader_while_remove_replica*/,
|
||||
true/*is_high_priority_task*/,
|
||||
target_replica.get_server(),
|
||||
dr_ls_info,
|
||||
task_key,
|
||||
ObDRTaskType::LS_TYPE_TRANSFORM,
|
||||
can_generate))) {
|
||||
LOG_WARN("fail to check whether can generate task", KR(ret), K(acc_dr_task),
|
||||
K(target_replica), K(dr_ls_info));
|
||||
@ -5163,11 +5138,8 @@ int ObDRWorker::generate_type_transform_task_(
|
||||
LOG_WARN("fail to build type transform task", KR(ret), K(task_key), K(tenant_id), K(ls_id),
|
||||
K(data_size), K(dst_replica), K(src_member), K(data_source), K(old_paxos_replica_number),
|
||||
K(new_paxos_replica_number));
|
||||
} else if (OB_FAIL(disaster_recovery_task_mgr_->add_task(type_transform_task))) {
|
||||
} else if (OB_FAIL(add_task_to_task_mgr_(type_transform_task, acc_dr_task))) {
|
||||
LOG_WARN("fail to add task", KR(ret), K(type_transform_task));
|
||||
} else {
|
||||
LOG_INFO("success to add a ObLSTypeTransformTask to task manager", KR(ret), K(type_transform_task));
|
||||
acc_dr_task++;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@ -5301,10 +5273,8 @@ int ObDRWorker::generate_cancel_unit_migration_task(
|
||||
new_paxos_replica_number,
|
||||
replica_type))) {
|
||||
LOG_WARN("fail to build remove member task", KR(ret), K(task_key), K(task_id));
|
||||
} else if (OB_FAIL(disaster_recovery_task_mgr_->add_task(remove_member_task))) {
|
||||
} else if (OB_FAIL(add_task_to_task_mgr_(remove_member_task, acc_dr_task))) {
|
||||
LOG_WARN("fail to add task", KR(ret), K(remove_member_task));
|
||||
} else {
|
||||
++acc_dr_task;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@ -5329,8 +5299,6 @@ int ObDRWorker::try_cancel_unit_migration(
|
||||
LOG_WARN("config_ ptr is null", KR(ret), KP(config_));
|
||||
} else if (!config_->is_rereplication_enabled()) {
|
||||
// bypass
|
||||
} else if (OB_FAIL(generate_task_key(dr_ls_info, task_key))) {
|
||||
LOG_WARN("fail to generate task key", KR(ret));
|
||||
} else if (!dr_ls_info.has_leader()) {
|
||||
LOG_WARN("has no leader, maybe not report yet",
|
||||
KR(ret), K(dr_ls_info));
|
||||
@ -5418,13 +5386,14 @@ int ObDRWorker::try_cancel_unit_migration(
|
||||
} else {
|
||||
LOG_INFO("success to add display info", KR(ret), K(display_info));
|
||||
}
|
||||
} else if (OB_FAIL(generate_task_key(dr_ls_info, leader_addr, task_type, task_key))) {
|
||||
LOG_WARN("fail to generate task key", KR(ret), K(dr_ls_info), K(leader_addr), K(task_type));
|
||||
} else if (OB_FAIL(check_can_generate_task(
|
||||
acc_dr_task,
|
||||
need_check_has_leader_while_remove_replica,
|
||||
is_paxos_replica_related,
|
||||
ls_replica->get_server(),
|
||||
dr_ls_info,
|
||||
task_key,
|
||||
task_type,
|
||||
can_generate))) {
|
||||
LOG_WARN("fail to check can generate cancel unit migration task", KR(ret));
|
||||
} else if (can_generate) {
|
||||
@ -5585,10 +5554,8 @@ int ObDRWorker::generate_migrate_to_unit_task(
|
||||
ObReplicaMember()/*empty force_data_source*/,
|
||||
old_paxos_replica_number))) {
|
||||
LOG_WARN("fail to build migrate task", KR(ret));
|
||||
} else if (OB_FAIL(disaster_recovery_task_mgr_->add_task(migrate_task))) {
|
||||
} else if (OB_FAIL(add_task_to_task_mgr_(migrate_task, acc_dr_task))) {
|
||||
LOG_WARN("fail to add task", KR(ret), K(migrate_task));
|
||||
} else {
|
||||
++acc_dr_task;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@ -5599,8 +5566,6 @@ int ObDRWorker::try_migrate_to_unit(
|
||||
int64_t &acc_dr_task)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
ObDRTaskKey task_key;
|
||||
bool task_exist = false;
|
||||
bool sibling_task_executing = false;
|
||||
int64_t replica_cnt = 0;
|
||||
@ -5612,8 +5577,6 @@ int ObDRWorker::try_migrate_to_unit(
|
||||
LOG_WARN("config_ ptr is null", KR(ret), KP(config_));
|
||||
} else if (!config_->is_rereplication_enabled()) {
|
||||
// bypass
|
||||
} else if (OB_FAIL(generate_task_key(dr_ls_info, task_key))) {
|
||||
LOG_WARN("fail to generate task key", KR(ret));
|
||||
} else if (!dr_ls_info.has_leader()) {
|
||||
LOG_WARN("has no leader, maybe not report yet",
|
||||
KR(ret), K(dr_ls_info));
|
||||
@ -5649,8 +5612,8 @@ int ObDRWorker::try_migrate_to_unit(
|
||||
ObDstReplica dst_replica;
|
||||
int64_t old_paxos_replica_number = 0;
|
||||
bool can_generate = false;
|
||||
ObDRTaskKey task_key;
|
||||
const bool need_check_has_leader_while_remove_replica = false;
|
||||
const bool is_high_priority_task = false;
|
||||
ObReplicaMember src_member(ls_replica->get_server(),
|
||||
ls_replica->get_member_time_us(),
|
||||
ls_replica->get_replica_type(),
|
||||
@ -5711,14 +5674,15 @@ int ObDRWorker::try_migrate_to_unit(
|
||||
} else {
|
||||
LOG_INFO("success to add display info", KR(ret), K(display_info));
|
||||
}
|
||||
} else if (OB_FAIL(generate_task_key(dr_ls_info, dst_member.get_server(), ObDRTaskType::LS_MIGRATE_REPLICA, task_key))) {
|
||||
LOG_WARN("fail to generate task key", KR(ret), K(dr_ls_info), K(dst_member));
|
||||
} else if (OB_FAIL(check_can_generate_task(
|
||||
acc_dr_task,
|
||||
need_check_has_leader_while_remove_replica,
|
||||
is_high_priority_task,
|
||||
ls_replica->get_server(),
|
||||
dr_ls_info,
|
||||
task_key,
|
||||
can_generate))) {
|
||||
acc_dr_task,
|
||||
need_check_has_leader_while_remove_replica,
|
||||
ls_replica->get_server(),
|
||||
dr_ls_info,
|
||||
ObDRTaskType::LS_MIGRATE_REPLICA,
|
||||
can_generate))) {
|
||||
LOG_WARN("fail to check can generate migrate to unir task", KR(ret));
|
||||
} else if (can_generate) {
|
||||
if (OB_FAIL(generate_migrate_to_unit_task(
|
||||
@ -5736,8 +5700,8 @@ int ObDRWorker::try_migrate_to_unit(
|
||||
LOG_WARN("fail to generate migrate to unit task", KR(ret));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} // end need_generate
|
||||
} // end for
|
||||
}
|
||||
// no need to print task key, since the previous log contains that
|
||||
LOG_INFO("finish try migrate to unit", KR(ret), K(acc_dr_task));
|
||||
|
@ -135,10 +135,15 @@ public:
|
||||
common::ObSArray<ObLSReplicaTaskDisplayInfo> &task_plan);
|
||||
|
||||
private:
|
||||
|
||||
// add task to queue of task mgr
|
||||
// @param [out] task, the task to execute
|
||||
// @param [out] acc_dr_task, acc_dr_task
|
||||
int add_task_to_task_mgr_(
|
||||
ObDRTask &task,
|
||||
int64_t &acc_dr_task);
|
||||
// add task in queue in mgr and execute task
|
||||
// @param [in] task, the task to execute
|
||||
int add_task_in_queue_and_execute_(const ObDRTask &task);
|
||||
int add_task_in_queue_and_execute_(ObDRTask &task);
|
||||
// check ls exist and init dr_ls_info
|
||||
// @param [in] arg, task info
|
||||
// @param [out] dr_ls_info, target dr_ls_info to init
|
||||
@ -294,14 +299,6 @@ private:
|
||||
const MemberChangeType member_change_type,
|
||||
int64_t &new_paxos_replica_number,
|
||||
bool &found);
|
||||
enum LATaskType
|
||||
{
|
||||
RemovePaxos = 0,
|
||||
RemoveNonPaxos,
|
||||
AddReplica,
|
||||
TypeTransform,
|
||||
ModifyPaxosReplicaNumber,
|
||||
};
|
||||
|
||||
enum class LATaskPrio : int64_t
|
||||
{
|
||||
@ -324,7 +321,7 @@ private:
|
||||
LATask() {}
|
||||
virtual ~LATask() {}
|
||||
public:
|
||||
virtual LATaskType get_task_type() const = 0;
|
||||
virtual ObDRTaskType get_task_type() const = 0;
|
||||
virtual LATaskPrio get_task_priority() const = 0;
|
||||
virtual int64_t to_string(char *buf, const int64_t buf_len) const = 0;
|
||||
};
|
||||
@ -355,9 +352,9 @@ private:
|
||||
paxos_replica_number_(0) {}
|
||||
virtual ~RemoveReplicaLATask() {}
|
||||
public:
|
||||
virtual LATaskType get_task_type() const override { return ObReplicaTypeCheck::is_paxos_replica_V2(replica_type_)
|
||||
? RemovePaxos
|
||||
: RemoveNonPaxos; }
|
||||
virtual ObDRTaskType get_task_type() const override { return ObReplicaTypeCheck::is_paxos_replica_V2(replica_type_)
|
||||
? ObDRTaskType::LS_REMOVE_PAXOS_REPLICA
|
||||
: ObDRTaskType::LS_REMOVE_NON_PAXOS_REPLICA; }
|
||||
virtual LATaskPrio get_task_priority() const override {
|
||||
LATaskPrio priority = ObReplicaTypeCheck::is_paxos_replica_V2(replica_type_)
|
||||
? LATaskPrio::LA_P_REMOVE_PAXOS
|
||||
@ -396,7 +393,7 @@ private:
|
||||
paxos_replica_number_(0) {}
|
||||
virtual ~AddReplicaLATask() {}
|
||||
public:
|
||||
virtual LATaskType get_task_type() const override{ return AddReplica; }
|
||||
virtual ObDRTaskType get_task_type() const override { return ObDRTaskType::LS_ADD_REPLICA; }
|
||||
virtual LATaskPrio get_task_priority() const override {
|
||||
LATaskPrio priority = LATaskPrio::LA_P_MAX;
|
||||
if (common::REPLICA_TYPE_FULL == replica_type_) {
|
||||
@ -452,7 +449,7 @@ private:
|
||||
paxos_replica_number_(0) {}
|
||||
virtual ~TypeTransformLATask() {}
|
||||
public:
|
||||
virtual LATaskType get_task_type() const override { return TypeTransform; }
|
||||
virtual ObDRTaskType get_task_type() const override { return ObDRTaskType::LS_TYPE_TRANSFORM; }
|
||||
virtual LATaskPrio get_task_priority() const override {
|
||||
LATaskPrio priority = LATaskPrio::LA_P_MAX;
|
||||
if (common::REPLICA_TYPE_FULL == dst_replica_type_
|
||||
@ -503,7 +500,7 @@ private:
|
||||
paxos_replica_number_(0) {}
|
||||
virtual ~ModifyPaxosReplicaNumberLATask() {}
|
||||
public:
|
||||
virtual LATaskType get_task_type() const override { return ModifyPaxosReplicaNumber; }
|
||||
virtual ObDRTaskType get_task_type() const override { return ObDRTaskType::LS_MODIFY_PAXOS_REPLICA_NUMBER; }
|
||||
virtual LATaskPrio get_task_priority() const override {
|
||||
LATaskPrio priority = LATaskPrio::LA_P_MODIFY_PAXOS_REPLICA_NUMBER;
|
||||
return priority;
|
||||
@ -794,12 +791,6 @@ private:
|
||||
private:
|
||||
void reset_task_plans_() { display_tasks_.reset(); }
|
||||
|
||||
int check_task_already_exist(
|
||||
const ObDRTaskKey &task_key,
|
||||
const DRLSInfo &dr_ls_info,
|
||||
const int64_t &priority,
|
||||
bool &task_exist);
|
||||
|
||||
int check_whether_the_tenant_role_can_exec_dr_(const uint64_t tenant_id);
|
||||
|
||||
int try_remove_permanent_offline_replicas(
|
||||
@ -822,10 +813,9 @@ private:
|
||||
int check_can_generate_task(
|
||||
const int64_t acc_dr_task,
|
||||
const bool need_check_has_leader_while_remove_replica,
|
||||
const bool is_high_priority_task,
|
||||
const ObAddr &server_addr,
|
||||
DRLSInfo &dr_ls_info,
|
||||
ObDRTaskKey &task_key,
|
||||
const ObDRTaskType &task_type,
|
||||
bool &can_generate);
|
||||
|
||||
int construct_extra_infos_to_build_remove_replica_task(
|
||||
@ -991,6 +981,8 @@ private:
|
||||
|
||||
int generate_task_key(
|
||||
const DRLSInfo &dr_ls_info,
|
||||
const common::ObAddr &task_exe_server,
|
||||
const ObDRTaskType &task_type,
|
||||
ObDRTaskKey &task_key) const;
|
||||
|
||||
int add_display_info(const ObLSReplicaTaskDisplayInfo &display_info);
|
||||
@ -1006,25 +998,21 @@ private:
|
||||
|
||||
int try_generate_remove_replica_locality_alignment_task(
|
||||
DRLSInfo &dr_ls_info,
|
||||
const ObDRTaskKey &task_key,
|
||||
const LATask *task,
|
||||
int64_t &acc_dr_task);
|
||||
|
||||
int try_generate_add_replica_locality_alignment_task(
|
||||
DRLSInfo &dr_ls_info,
|
||||
const ObDRTaskKey &task_key,
|
||||
const LATask *task,
|
||||
int64_t &acc_dr_task);
|
||||
|
||||
int try_generate_type_transform_locality_alignment_task(
|
||||
DRLSInfo &dr_ls_info,
|
||||
const ObDRTaskKey &task_key,
|
||||
const LATask *task,
|
||||
int64_t &acc_dr_task);
|
||||
|
||||
int try_generate_modify_paxos_replica_number_locality_alignment_task(
|
||||
DRLSInfo &dr_ls_info,
|
||||
const ObDRTaskKey &task_key,
|
||||
const LATask *task,
|
||||
int64_t &acc_dr_task);
|
||||
|
||||
|
@ -1476,6 +1476,14 @@ bool ObConfigRegexpEngineChecker::check(const ObConfigItem &t) const
|
||||
return valid;
|
||||
}
|
||||
|
||||
bool ObConfigReplicaParallelMigrationChecker::check(const ObConfigItem &t) const
|
||||
{
|
||||
ObString v_str(t.str());
|
||||
return 0 == v_str.case_compare("auto")
|
||||
|| 0 == v_str.case_compare("on")
|
||||
|| 0 == v_str.case_compare("off");
|
||||
}
|
||||
|
||||
bool ObConfigS3URLEncodeTypeChecker::check(const ObConfigItem &t) const
|
||||
{
|
||||
// When compliantRfc3986Encoding is set to true:
|
||||
|
@ -963,6 +963,16 @@ private:
|
||||
DISALLOW_COPY_AND_ASSIGN(ObConfigS3URLEncodeTypeChecker);
|
||||
};
|
||||
|
||||
class ObConfigReplicaParallelMigrationChecker : public ObConfigChecker
|
||||
{
|
||||
public:
|
||||
ObConfigReplicaParallelMigrationChecker() {}
|
||||
virtual ~ObConfigReplicaParallelMigrationChecker() {}
|
||||
bool check(const ObConfigItem &t) const;
|
||||
private:
|
||||
DISALLOW_COPY_AND_ASSIGN(ObConfigReplicaParallelMigrationChecker);
|
||||
};
|
||||
|
||||
class ObConfigDegradationPolicyChecker : public ObConfigChecker
|
||||
{
|
||||
public:
|
||||
|
@ -4465,7 +4465,8 @@ int ObLSMigrateReplicaArg::init(
|
||||
const common::ObReplicaMember &discarded_data_source,
|
||||
const int64_t paxos_replica_number,
|
||||
const bool skip_change_member_list,
|
||||
const common::ObReplicaMember &force_data_source)
|
||||
const common::ObReplicaMember &force_data_source,
|
||||
const bool prioritize_same_zone_src)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
task_id_ = task_id;
|
||||
@ -4477,6 +4478,7 @@ int ObLSMigrateReplicaArg::init(
|
||||
paxos_replica_number_ = paxos_replica_number;
|
||||
skip_change_member_list_ = skip_change_member_list;
|
||||
force_data_source_ = force_data_source;
|
||||
prioritize_same_zone_src_ = prioritize_same_zone_src;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -4873,7 +4873,8 @@ public:
|
||||
const common::ObReplicaMember &discarded_data_source,
|
||||
const int64_t paxos_replica_number,
|
||||
const bool skip_change_member_list,
|
||||
const common::ObReplicaMember &force_data_source);
|
||||
const common::ObReplicaMember &force_data_source,
|
||||
const bool prioritize_same_zone_src);
|
||||
|
||||
TO_STRING_KV(K_(task_id),
|
||||
K_(tenant_id),
|
||||
|
@ -842,6 +842,15 @@ DEF_BOOL(enable_sys_unit_standalone, OB_CLUSTER_PARAMETER, "False",
|
||||
"Value: True:turned on False: turned off",
|
||||
ObParameterAttr(Section::LOAD_BALANCE, Source::DEFAULT, EditLevel::DYNAMIC_EFFECTIVE));
|
||||
|
||||
DEF_STR_WITH_CHECKER(replica_parallel_migration_mode, OB_TENANT_PARAMETER, "auto",
|
||||
common::ObConfigReplicaParallelMigrationChecker,
|
||||
"specify the strategy for parallel migration of LS replicas. "
|
||||
"'auto' means to allow parallel migration of LS replica of standby tenant "
|
||||
"and prohibit the parallel migration of LS replica of primary tenant. "
|
||||
"'on' means to allow parallel migration of LS replica of primary tenant and standby tenant. "
|
||||
"'off' means to prohibit parallel migration of LS replica of primary tenant and standby tenant",
|
||||
ObParameterAttr(Section::TENANT, Source::DEFAULT, EditLevel::DYNAMIC_EFFECTIVE));
|
||||
|
||||
//// daily merge config
|
||||
// set to disable if don't want major freeze launch auto
|
||||
DEF_MOMENT(major_freeze_duty_time, OB_TENANT_PARAMETER, "02:00",
|
||||
@ -2287,6 +2296,11 @@ ERRSIM_DEF_INT(errsim_rebuild_ls_id, OB_CLUSTER_PARAMETER, "0", "[0,)",
|
||||
ERRSIM_DEF_STR(errsim_rebuild_addr, OB_CLUSTER_PARAMETER, "",
|
||||
"rebuild addr (ip:port)",
|
||||
ObParameterAttr(Section::OBSERVER, Source::DEFAULT, EditLevel::DYNAMIC_EFFECTIVE));
|
||||
|
||||
ERRSIM_DEF_BOOL(enable_parallel_migration, OB_CLUSTER_PARAMETER, "False",
|
||||
"turn on parallel migration, observer preferentially choose same zone as src",
|
||||
ObParameterAttr(Section::OBSERVER, Source::DEFAULT, EditLevel::DYNAMIC_EFFECTIVE));
|
||||
|
||||
DEF_BOOL(_enable_adaptive_auto_dop, OB_CLUSTER_PARAMETER, "False",
|
||||
"Enable or disable adaptive auto dop feature.",
|
||||
ObParameterAttr(Section::OBSERVER, Source::DEFAULT, EditLevel::DYNAMIC_EFFECTIVE));
|
||||
|
@ -1223,6 +1223,7 @@ int ObStartMigrationTask::choose_src_()
|
||||
omt::ObTenantConfigGuard tenant_config(TENANT_CONF(tenant_id));
|
||||
ObLS* ls = nullptr;
|
||||
ObLSHandle ls_handle;
|
||||
bool use_c_replica_policy = false;
|
||||
if (OB_FAIL(get_local_ls_checkpoint_scn_(local_clog_checkpoint_scn))) {
|
||||
LOG_WARN("failed to get local ls checkpoint ts", K(ret));
|
||||
} else if (!tenant_config.is_valid()) {
|
||||
@ -1240,12 +1241,11 @@ int ObStartMigrationTask::choose_src_()
|
||||
ctx_->arg_.dst_, param.info_, param.is_first_c_replica_))) {
|
||||
LOG_WARN("failed to get member list.", K(ret), K(tenant_id), "ls_id", ctx_->arg_.ls_id_, "dst", ctx_->arg_.dst_);
|
||||
} else if (OB_FAIL(ObStorageHAChooseSrcHelper::get_policy_type(ctx_->arg_, tenant_id,
|
||||
enable_choose_source_policy, str, param.info_.learner_list_, policy))) {
|
||||
enable_choose_source_policy, str, param.info_.learner_list_, param.policy_, param.use_c_replica_policy_))) {
|
||||
LOG_WARN("failed to get policy type", K(ret), K(ctx_->arg_), K(tenant_id),
|
||||
K(enable_choose_source_policy), K(str));
|
||||
} else if (OB_FAIL(choose_src_helper.init(param, policy,
|
||||
storage_rpc_, &member_helper))) {
|
||||
LOG_WARN("failed to init src provider.", K(ret), K(param), K(policy), KP(storage_rpc_));
|
||||
K(enable_choose_source_policy), K(str), K(param));
|
||||
} else if (OB_FAIL(choose_src_helper.init(param, storage_rpc_, &member_helper))) {
|
||||
LOG_WARN("failed to init src provider.", K(ret), K(param), KP(storage_rpc_));
|
||||
} else if (OB_FAIL(choose_src_helper.get_available_src(ctx_->arg_, src_info))) {
|
||||
LOG_WARN("failed to choose ob src", K(ret), K(tenant_id), K(ls_id), K(local_clog_checkpoint_scn), K(ctx_->arg_));
|
||||
} else if (OB_FAIL(fetch_ls_info_(tenant_id, ls_id, src_info.src_addr_, ls_info))) {
|
||||
|
@ -1147,6 +1147,7 @@ int ObLSRebuildMgr::generate_rebuild_task_()
|
||||
arg.priority_ = ObMigrationOpPriority::PRIO_MID;
|
||||
arg.src_ = src_replica_member;
|
||||
arg.type_ = ObMigrationOpType::REBUILD_LS_OP;
|
||||
arg.prioritize_same_zone_src_ = false;
|
||||
|
||||
if (OB_FAIL(ls->get_ls_migration_handler()->add_ls_migration_task(rebuild_ctx_.task_id_, arg))) {
|
||||
LOG_WARN("failed to add ls migration task", K(ret), K(arg), KPC(ls));
|
||||
|
760
src/storage/high_availability/ob_storage_ha_src_provider.cpp
Normal file → Executable file
760
src/storage/high_availability/ob_storage_ha_src_provider.cpp
Normal file → Executable file
File diff suppressed because it is too large
Load Diff
222
src/storage/high_availability/ob_storage_ha_src_provider.h
Normal file → Executable file
222
src/storage/high_availability/ob_storage_ha_src_provider.h
Normal file → Executable file
@ -21,6 +21,7 @@
|
||||
|
||||
namespace oceanbase {
|
||||
namespace storage {
|
||||
struct ObMigrationChooseSrcHelperInitParam;
|
||||
|
||||
class ObStorageHAGetMemberHelper
|
||||
{
|
||||
@ -68,51 +69,91 @@ class ObStorageHASrcProvider {
|
||||
public:
|
||||
enum ChooseSourcePolicy : uint8_t
|
||||
{
|
||||
IDC = 0,
|
||||
REGION = 1,
|
||||
CHECKPOINT = 2,
|
||||
RECOMMEND = 3,
|
||||
C_TYPE_REPLICA = 4,
|
||||
ZONE = 0,
|
||||
IDC = 1,
|
||||
REGION = 2,
|
||||
DIFFERENT_REGION = 3, // cannot set manually
|
||||
// above policies are used for choosing source by location
|
||||
CHECKPOINT = 4,
|
||||
RECOMMEND = 5,
|
||||
MAX_POLICY
|
||||
};
|
||||
|
||||
static const int64_t LOCATION_POLICY_COUNT = DIFFERENT_REGION + 1;
|
||||
|
||||
struct ChooseSourcePolicyDetailedInfo
|
||||
{
|
||||
public:
|
||||
ChooseSourcePolicyDetailedInfo():
|
||||
policy_type_(ChooseSourcePolicy::MAX_POLICY),
|
||||
chosen_policy_type_(ChooseSourcePolicy::MAX_POLICY),
|
||||
use_c_replica_policy_(false),
|
||||
is_first_c_replica_(false)
|
||||
{}
|
||||
~ChooseSourcePolicyDetailedInfo() {}
|
||||
|
||||
TO_STRING_KV(
|
||||
"policy_type", get_policy_str(policy_type_),
|
||||
"chosen_policy_type_", get_policy_str(chosen_policy_type_),
|
||||
K_(use_c_replica_policy),
|
||||
K_(is_first_c_replica));
|
||||
ChooseSourcePolicy policy_type_;
|
||||
ChooseSourcePolicy chosen_policy_type_;
|
||||
bool use_c_replica_policy_;
|
||||
bool is_first_c_replica_;
|
||||
};
|
||||
|
||||
ObStorageHASrcProvider();
|
||||
virtual ~ObStorageHASrcProvider();
|
||||
int init(const ObMigrationChooseSrcHelperInitParam ¶m,
|
||||
const ChooseSourcePolicy policy_type,
|
||||
storage::ObStorageRpc *storage_rpc,
|
||||
ObStorageHAGetMemberHelper *member_helper);
|
||||
virtual int choose_ob_src(
|
||||
const ObMigrationOpArg &arg, common::ObAddr &chosen_src_addr) = 0;
|
||||
const ObMigrationOpArg &arg, common::ObAddr &chosen_src_addr);
|
||||
|
||||
inline uint64_t get_tenant_id() const { return tenant_id_; }
|
||||
const share::ObLSID &get_ls_id() const { return ls_id_; }
|
||||
ObMigrationOpType::TYPE get_type() const { return type_; }
|
||||
ObMigrationOpType::TYPE get_migration_op_type() const { return type_; }
|
||||
ChooseSourcePolicy get_policy_type() const { return policy_type_; }
|
||||
storage::ObStorageRpc *get_storage_rpc() const { return storage_rpc_; }
|
||||
const share::SCN &get_local_clog_checkpoint_scn() const { return local_clog_checkpoint_scn_; }
|
||||
const share::SCN &get_palf_parent_checkpoint_scn() const { return palf_parent_checkpoint_scn_; }
|
||||
ChooseSourcePolicy get_policy_type() const { return policy_type_; }
|
||||
bool is_first_c_replica() const { return is_first_c_replica_; }
|
||||
|
||||
const static char *ObChooseSourcePolicyStr[static_cast<int64_t>(ChooseSourcePolicy::MAX_POLICY)];
|
||||
const static char *get_policy_str(const ChooseSourcePolicy policy_type);
|
||||
int get_policy_detailed_info_str(char *buf, const int64_t buf_len);
|
||||
int check_tenant_primary(bool &is_primary);
|
||||
|
||||
protected:
|
||||
// The validity assessment of replicas includes:
|
||||
// server_version: dest server_version >= src server_version
|
||||
// restore_status: if restore_status of ls is fail, migration needs to wait.
|
||||
// migration_status: OB_MIGRATION_STATUS_NONE
|
||||
// replica type: F replica could serve as the source of F replica and R replica,
|
||||
// while R replica could only serve as the source of R replica
|
||||
// source checkpoint scn must be greater than or equal than palf_parent_checkpoint_scn_ and local_clog_checkpoint_scn_
|
||||
/*
|
||||
* The validity assessment of replicas includes:
|
||||
* server_version: dest server_version >= src server_version
|
||||
* restore_status: if restore_status of ls is fail, migration needs to wait.
|
||||
* migration_status: OB_MIGRATION_STATUS_NONE
|
||||
* replica type:
|
||||
* 1. F replica could serve as the source of F/R/C replica
|
||||
* 2. R replica could only serve as the source of R/C
|
||||
* 3. C replica could only serve as the source of C replica
|
||||
* clog_checkpoint: source checkpoint scn must be greater than or equal than palf_parent_checkpoint_scn_ and local_clog_checkpoint_scn_
|
||||
*
|
||||
* If must_choose_c_replica is true, the source must be C replica.
|
||||
*/
|
||||
int check_replica_validity(
|
||||
const common::ObAddr &addr, const common::ObReplicaMember &dst,
|
||||
const common::GlobalLearnerList &learner_list, obrpc::ObFetchLSMetaInfoResp &ls_info);
|
||||
const common::GlobalLearnerList &learner_list, const bool &must_choose_c_replica,
|
||||
obrpc::ObFetchLSMetaInfoResp &ls_info);
|
||||
|
||||
virtual int inner_choose_ob_src(
|
||||
const common::ObAddr &leader_addr, const common::GlobalLearnerList &learner_list,
|
||||
const common::ObIArray<common::ObAddr> &addr_list, const ObMigrationOpArg &arg, const bool &must_choose_c_replica,
|
||||
common::ObAddr &chosen_src_addr) = 0;
|
||||
protected:
|
||||
bool is_inited_;
|
||||
ObLSMemberListInfo member_list_info_;
|
||||
bool is_first_c_replica_;
|
||||
ChooseSourcePolicy chosen_policy_type_; // the policy type finally chosen after checking
|
||||
bool use_c_replica_policy_; // true if dst is c replica and there already exists c replica in the cluster
|
||||
private:
|
||||
int fetch_ls_meta_info_(const uint64_t tenant_id, const share::ObLSID &ls_id, const common::ObAddr &member_addr,
|
||||
obrpc::ObFetchLSMetaInfoResp &ls_meta_info);
|
||||
@ -120,6 +161,7 @@ private:
|
||||
const common::ObAddr &addr,
|
||||
const common::ObReplicaMember &dst,
|
||||
const common::GlobalLearnerList &learner_list,
|
||||
const bool &must_choose_c_replica,
|
||||
bool &is_replica_type_valid);
|
||||
int init_palf_parent_checkpoint_scn_(const uint64_t tenant_id, const share::ObLSID &ls_id,
|
||||
const share::SCN &local_clog_checkpoint_scn, const common::ObReplicaType replica_type);
|
||||
@ -147,7 +189,7 @@ private:
|
||||
share::SCN palf_parent_checkpoint_scn_;
|
||||
ObStorageHAGetMemberHelper *member_helper_;
|
||||
storage::ObStorageRpc *storage_rpc_;
|
||||
ChooseSourcePolicy policy_type_;
|
||||
ChooseSourcePolicy policy_type_; // the policy type chosen by the user
|
||||
DISALLOW_COPY_AND_ASSIGN(ObStorageHASrcProvider);
|
||||
};
|
||||
|
||||
@ -156,36 +198,49 @@ class ObMigrationSrcByLocationProvider : public ObStorageHASrcProvider
|
||||
public:
|
||||
ObMigrationSrcByLocationProvider();
|
||||
virtual ~ObMigrationSrcByLocationProvider();
|
||||
int init(const ObMigrationChooseSrcHelperInitParam ¶m,
|
||||
const ChooseSourcePolicy policy_type,
|
||||
int init(
|
||||
const ObMigrationChooseSrcHelperInitParam ¶m,
|
||||
storage::ObStorageRpc *storage_rpc,
|
||||
ObStorageHAGetMemberHelper *member_helper);
|
||||
virtual int choose_ob_src(
|
||||
const ObMigrationOpArg &arg, common::ObAddr &chosen_src_addr) override;
|
||||
|
||||
protected:
|
||||
int inner_choose_ob_src(
|
||||
virtual int inner_choose_ob_src(
|
||||
const common::ObAddr &leader_addr, const common::GlobalLearnerList &learner_list,
|
||||
const common::ObIArray<common::ObAddr> &addr_list, const ObMigrationOpArg &arg,
|
||||
common::ObAddr &choosen_src_addr);
|
||||
const common::ObIArray<common::ObAddr> &addr_list, const ObMigrationOpArg &arg, const bool &must_choose_c_replica,
|
||||
common::ObAddr &chosen_src_addr) override;
|
||||
private:
|
||||
void set_locality_manager_(ObLocalityManager *locality_manager);
|
||||
int get_server_geography_info_(const common::ObAddr &addr, common::ObRegion ®ion, common::ObIDC &idc, common::ObZone &zone);
|
||||
/*
|
||||
* Divide (sort) the addr_list by zone, idc, region, different region.
|
||||
* The layout of the sorted_addr_list is as follows:
|
||||
* |<---- same zone ----->|<------ same idc ------>|<----- same region ---->|<-- different region -->|
|
||||
* {addr[0], ..., addr[p], addr[p+1], ..., addr[q], addr[q+1], ..., addr[s], addr[s+1], ..., addr[t]}
|
||||
* | | |
|
||||
* zone_end_index idc_end_index region_end_index
|
||||
*/
|
||||
int divide_addr_list(
|
||||
const common::ObIArray<common::ObAddr> &addr_list,
|
||||
const common::ObReplicaMember &dst,
|
||||
common::ObIArray<common::ObAddr> &sorted_addr_list,
|
||||
int64_t &zone_end_index,
|
||||
int64_t &idc_end_index,
|
||||
int64_t ®ion_end_index);
|
||||
int find_src(
|
||||
/*
|
||||
* Find source from the sorted addr list.
|
||||
* Will only choose the src in [start_index, end_index]
|
||||
* The chosen source must be valid (see ObStorageHASrcProvider::check_replica_validity).
|
||||
*/
|
||||
int find_src_in_sorted_addr_list_(
|
||||
const common::ObIArray<common::ObAddr> &sorted_addr_list,
|
||||
const int64_t start_index,
|
||||
const int64_t end_index,
|
||||
const common::GlobalLearnerList &learner_list,
|
||||
const common::ObAddr &leader_addr,
|
||||
const common::ObReplicaMember &dst,
|
||||
common::ObAddr &choosen_src_addr);
|
||||
private:
|
||||
void set_locality_manager_(ObLocalityManager *locality_manager);
|
||||
int get_server_region_and_idc_(
|
||||
const common::ObAddr &addr, common::ObRegion ®ion, common::ObIDC &idc);
|
||||
const bool &must_choose_c_replica,
|
||||
common::ObAddr &chosen_src_addr);
|
||||
|
||||
|
||||
private:
|
||||
ObLocalityManager *locality_manager_;
|
||||
|
||||
@ -197,18 +252,15 @@ class ObMigrationSrcByCheckpointProvider : public ObStorageHASrcProvider
|
||||
public:
|
||||
ObMigrationSrcByCheckpointProvider();
|
||||
virtual ~ObMigrationSrcByCheckpointProvider();
|
||||
int init(const ObMigrationChooseSrcHelperInitParam ¶m,
|
||||
const ChooseSourcePolicy policy_type,
|
||||
int init(
|
||||
const ObMigrationChooseSrcHelperInitParam ¶m,
|
||||
storage::ObStorageRpc *storage_rpc,
|
||||
ObStorageHAGetMemberHelper *member_helper);
|
||||
virtual int choose_ob_src(
|
||||
const ObMigrationOpArg &arg, common::ObAddr &chosen_src_addr) override;
|
||||
|
||||
private:
|
||||
int inner_choose_ob_src_(
|
||||
protected:
|
||||
virtual int inner_choose_ob_src(
|
||||
const common::ObAddr &leader_addr, const common::GlobalLearnerList &learner_list,
|
||||
const common::ObIArray<common::ObAddr> &addr_list, const ObMigrationOpArg &arg,
|
||||
common::ObAddr &choosen_src_addr);
|
||||
const common::ObIArray<common::ObAddr> &addr_list, const ObMigrationOpArg &arg, const bool &must_choose_c_replica,
|
||||
common::ObAddr &chosen_src_addr) override;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(ObMigrationSrcByCheckpointProvider);
|
||||
};
|
||||
@ -218,71 +270,59 @@ class ObRSRecommendSrcProvider : public ObStorageHASrcProvider
|
||||
public:
|
||||
ObRSRecommendSrcProvider();
|
||||
virtual ~ObRSRecommendSrcProvider();
|
||||
int init(const ObMigrationChooseSrcHelperInitParam ¶m,
|
||||
const ChooseSourcePolicy policy_type,
|
||||
int init(
|
||||
const ObMigrationChooseSrcHelperInitParam ¶m,
|
||||
storage::ObStorageRpc *storage_rpc,
|
||||
ObStorageHAGetMemberHelper *member_helper);
|
||||
int choose_ob_src(
|
||||
const ObMigrationOpArg &arg, common::ObAddr &chosen_src_addr);
|
||||
|
||||
// overwrite choose_ob_src, if use specified policy, only need to call inner_choose_ob_src once (no need to retry when is_first_c_replica)
|
||||
virtual int choose_ob_src(
|
||||
const ObMigrationOpArg &arg, common::ObAddr &chosen_src_addr) override;
|
||||
protected:
|
||||
virtual int inner_choose_ob_src(
|
||||
const common::ObAddr &leader_addr, const common::GlobalLearnerList &learner_list,
|
||||
const common::ObIArray<common::ObAddr> &addr_list, const ObMigrationOpArg &arg, const bool &must_choose_c_replica,
|
||||
common::ObAddr &chosen_src_addr) override;
|
||||
private:
|
||||
int check_replica_validity_(const int64_t cluster_id, const common::ObIArray<common::ObAddr> &addr_list,
|
||||
const common::ObAddr &addr, const common::ObReplicaMember &dst,
|
||||
const common::GlobalLearnerList &learner_list);
|
||||
const common::GlobalLearnerList &learner_list, const bool &must_choose_c_replica);
|
||||
DISALLOW_COPY_AND_ASSIGN(ObRSRecommendSrcProvider);
|
||||
};
|
||||
|
||||
class ObCTypeReplicaSrcProvider : public ObStorageHASrcProvider
|
||||
{
|
||||
public:
|
||||
ObCTypeReplicaSrcProvider();
|
||||
virtual ~ObCTypeReplicaSrcProvider();
|
||||
int init(const ObMigrationChooseSrcHelperInitParam ¶m,
|
||||
const ChooseSourcePolicy policy_type,
|
||||
storage::ObStorageRpc *storage_rpc,
|
||||
ObStorageHAGetMemberHelper *member_helper);
|
||||
int choose_ob_src(
|
||||
const ObMigrationOpArg &arg, common::ObAddr &chosen_src_addr);
|
||||
|
||||
private:
|
||||
int inner_choose_ob_src_(
|
||||
const common::ObIArray<common::ObAddr> &addr_list,
|
||||
const common::GlobalLearnerList &learner_list,
|
||||
const ObMigrationOpArg &arg, common::ObAddr &choosen_src_addr);
|
||||
int get_c_replica_(
|
||||
const common::ObAddr &addr,
|
||||
const common::GlobalLearnerList &learner_list,
|
||||
ObIArray<common::ObAddr> &c_replica_list) const;
|
||||
DISALLOW_COPY_AND_ASSIGN(ObCTypeReplicaSrcProvider);
|
||||
};
|
||||
|
||||
class ObStorageHAChooseSrcHelper final
|
||||
{
|
||||
public:
|
||||
ObStorageHAChooseSrcHelper();
|
||||
~ObStorageHAChooseSrcHelper();
|
||||
int init(const ObMigrationChooseSrcHelperInitParam ¶m, const ObStorageHASrcProvider::ChooseSourcePolicy policy,
|
||||
storage::ObStorageRpc *storage_rpc, ObStorageHAGetMemberHelper *member_helper);
|
||||
int init(
|
||||
const ObMigrationChooseSrcHelperInitParam ¶m,
|
||||
storage::ObStorageRpc *storage_rpc,
|
||||
ObStorageHAGetMemberHelper *member_helper);
|
||||
int get_available_src(const ObMigrationOpArg &arg, ObStorageHASrcInfo &src_info);
|
||||
static int get_policy_type(const ObMigrationOpArg &arg, const uint64_t tenant_id,
|
||||
bool enable_choose_source_policy, const char *policy_str,
|
||||
static int get_policy_type(
|
||||
const ObMigrationOpArg &arg,
|
||||
const uint64_t tenant_id,
|
||||
const bool enable_choose_source_policy,
|
||||
const char *policy_str,
|
||||
const common::GlobalLearnerList &learner_list,
|
||||
ObStorageHASrcProvider::ChooseSourcePolicy &policy);
|
||||
ObStorageHASrcProvider::ChooseSourcePolicy &policy,
|
||||
bool &use_c_replica_policy);
|
||||
|
||||
private:
|
||||
int init_rs_recommend_source_provider_(const ObMigrationChooseSrcHelperInitParam ¶m,
|
||||
int init_rs_recommend_source_provider_(
|
||||
const ObMigrationChooseSrcHelperInitParam ¶m,
|
||||
storage::ObStorageRpc *storage_rpc,
|
||||
ObStorageHAGetMemberHelper *member_helper);
|
||||
int init_choose_source_by_location_provider_(
|
||||
const ObMigrationChooseSrcHelperInitParam ¶m,
|
||||
const ObStorageHASrcProvider::ChooseSourcePolicy policy, storage::ObStorageRpc *storage_rpc,
|
||||
storage::ObStorageRpc *storage_rpc,
|
||||
ObStorageHAGetMemberHelper *member_helper);
|
||||
int init_choose_source_by_checkpoint_provider_(
|
||||
const ObMigrationChooseSrcHelperInitParam ¶m,
|
||||
storage::ObStorageRpc *storage_rpc,
|
||||
ObStorageHAGetMemberHelper *member_helper);
|
||||
int init_c_type_replica_source_provider_(
|
||||
const ObMigrationChooseSrcHelperInitParam ¶m,
|
||||
storage::ObStorageRpc *storage_rpc, ObStorageHAGetMemberHelper *member_helper);
|
||||
|
||||
void errsim_test_(const ObMigrationOpArg &arg, ObStorageHASrcInfo &src_info);
|
||||
ObStorageHASrcProvider * get_provider() const { return provider_; }
|
||||
static int check_c_replica_migration_policy_(const uint64_t tenant_id, const share::ObLSID &ls_id,
|
||||
@ -297,6 +337,28 @@ private:
|
||||
DISALLOW_COPY_AND_ASSIGN(ObStorageHAChooseSrcHelper);
|
||||
};
|
||||
|
||||
struct ObMigrationChooseSrcHelperInitParam final
|
||||
{
|
||||
public:
|
||||
ObMigrationChooseSrcHelperInitParam();
|
||||
~ObMigrationChooseSrcHelperInitParam() = default;
|
||||
void reset();
|
||||
bool is_valid() const;
|
||||
int assign(const ObMigrationChooseSrcHelperInitParam ¶m);
|
||||
|
||||
TO_STRING_KV(
|
||||
K_(tenant_id), K_(ls_id), K_(local_clog_checkpoint_scn), K_(arg), K_(info), K_(policy), K_(use_c_replica_policy), K_(is_first_c_replica));
|
||||
uint64_t tenant_id_;
|
||||
share::ObLSID ls_id_;
|
||||
share::SCN local_clog_checkpoint_scn_;
|
||||
ObMigrationOpArg arg_;
|
||||
ObLSMemberListInfo info_;
|
||||
ObStorageHASrcProvider::ChooseSourcePolicy policy_;
|
||||
bool use_c_replica_policy_;
|
||||
bool is_first_c_replica_;
|
||||
private:
|
||||
DISALLOW_COPY_AND_ASSIGN(ObMigrationChooseSrcHelperInitParam);
|
||||
};
|
||||
} // namespace storage
|
||||
} // namespace oceanbase
|
||||
#endif
|
||||
|
@ -1101,7 +1101,8 @@ ObMigrationOpArg::ObMigrationOpArg()
|
||||
src_(),
|
||||
dst_(),
|
||||
data_src_(),
|
||||
paxos_replica_number_(0)
|
||||
paxos_replica_number_(0),
|
||||
prioritize_same_zone_src_(false)
|
||||
{
|
||||
}
|
||||
|
||||
@ -1127,6 +1128,7 @@ void ObMigrationOpArg::reset()
|
||||
dst_.reset();
|
||||
data_src_.reset();
|
||||
paxos_replica_number_ = 0;
|
||||
prioritize_same_zone_src_ = false;
|
||||
}
|
||||
|
||||
/******************ObTabletsTransferArg*********************/
|
||||
@ -2499,53 +2501,6 @@ int ObLSMemberListInfo::assign(const ObLSMemberListInfo &info)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ObMigrationChooseSrcHelperInitParam::ObMigrationChooseSrcHelperInitParam()
|
||||
: tenant_id_(OB_INVALID_ID),
|
||||
ls_id_(),
|
||||
local_clog_checkpoint_scn_(),
|
||||
arg_(),
|
||||
info_(),
|
||||
is_first_c_replica_(false)
|
||||
{
|
||||
}
|
||||
|
||||
void ObMigrationChooseSrcHelperInitParam::reset()
|
||||
{
|
||||
tenant_id_ = OB_INVALID_ID;
|
||||
ls_id_.reset();
|
||||
local_clog_checkpoint_scn_.reset();
|
||||
arg_.reset();
|
||||
info_.reset();
|
||||
is_first_c_replica_ = false;
|
||||
}
|
||||
|
||||
bool ObMigrationChooseSrcHelperInitParam::is_valid() const
|
||||
{
|
||||
return OB_INVALID_ID != tenant_id_
|
||||
&& ls_id_.is_valid()
|
||||
&& local_clog_checkpoint_scn_.is_valid()
|
||||
&& arg_.is_valid()
|
||||
&& info_.is_valid();
|
||||
}
|
||||
|
||||
int ObMigrationChooseSrcHelperInitParam::assign(const ObMigrationChooseSrcHelperInitParam ¶m)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
if (!param.is_valid()) {
|
||||
ret = OB_INVALID_ARGUMENT;
|
||||
LOG_WARN("invalid argument!", K(ret), K(param));
|
||||
} else if (OB_FAIL(info_.assign(param.info_))) {
|
||||
LOG_WARN("failed to assign param", K(ret), K(param));
|
||||
} else {
|
||||
tenant_id_ = param.tenant_id_;
|
||||
ls_id_ = param.ls_id_;
|
||||
local_clog_checkpoint_scn_ = param.local_clog_checkpoint_scn_;
|
||||
arg_ = param.arg_;
|
||||
is_first_c_replica_ = param.is_first_c_replica_;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ObMacroBlockReuseMgr::get_macro_block_reuse_info(
|
||||
const ObITable::TableKey &table_key,
|
||||
const blocksstable::ObLogicMacroBlockId &logic_id,
|
||||
|
@ -187,6 +187,7 @@ struct ObMigrationOpArg
|
||||
common::ObReplicaMember dst_;
|
||||
common::ObReplicaMember data_src_;
|
||||
int64_t paxos_replica_number_;
|
||||
bool prioritize_same_zone_src_;
|
||||
};
|
||||
|
||||
struct ObTabletsTransferArg
|
||||
@ -723,26 +724,6 @@ private:
|
||||
DISALLOW_COPY_AND_ASSIGN(ObLSMemberListInfo);
|
||||
};
|
||||
|
||||
struct ObMigrationChooseSrcHelperInitParam final
|
||||
{
|
||||
public:
|
||||
ObMigrationChooseSrcHelperInitParam();
|
||||
~ObMigrationChooseSrcHelperInitParam() = default;
|
||||
void reset();
|
||||
bool is_valid() const;
|
||||
int assign(const ObMigrationChooseSrcHelperInitParam ¶m);
|
||||
|
||||
TO_STRING_KV(K_(tenant_id), K_(ls_id), K_(local_clog_checkpoint_scn), K_(arg), K_(info), K_(is_first_c_replica));
|
||||
uint64_t tenant_id_;
|
||||
share::ObLSID ls_id_;
|
||||
share::SCN local_clog_checkpoint_scn_;
|
||||
ObMigrationOpArg arg_;
|
||||
ObLSMemberListInfo info_;
|
||||
bool is_first_c_replica_; // whether the dst is the first C replica in the learner list
|
||||
private:
|
||||
DISALLOW_COPY_AND_ASSIGN(ObMigrationChooseSrcHelperInitParam);
|
||||
};
|
||||
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -221,6 +221,7 @@ recover_table_concurrency
|
||||
recover_table_dop
|
||||
recyclebin_object_expire_time
|
||||
redundancy_level
|
||||
replica_parallel_migration_mode
|
||||
resource_hard_limit
|
||||
rootservice_async_task_queue_size
|
||||
rootservice_async_task_thread_count
|
||||
|
@ -560,10 +560,9 @@ TEST_F(TestChooseMigrationSourcePolicy, get_available_src_with_checkpoint_policy
|
||||
EXPECT_EQ(OB_SUCCESS, mock_migrate_choose_helper_param(tenant_id, ls_id, local_ls_checkpoint_scn, mock_arg, param));
|
||||
EXPECT_EQ(OB_SUCCESS, member_helper_.get_member_list_by_replica_type(tenant_id, ls_id,
|
||||
mock_arg.dst_, param.info_, param.is_first_c_replica_));
|
||||
ObStorageHASrcProvider::ChooseSourcePolicy policy;
|
||||
EXPECT_EQ(OB_SUCCESS, get_checkpoint_policy(mock_arg, tenant_id, param.info_.learner_list_, policy));
|
||||
EXPECT_EQ(OB_SUCCESS, get_checkpoint_policy(mock_arg, tenant_id, param.info_.learner_list_, param.policy_, param.use_c_replica_policy_));
|
||||
ObStorageHASrcInfo src_info;
|
||||
EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(param, policy, &storage_rpc_, &member_helper_));
|
||||
EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(param, &storage_rpc_, &member_helper_));
|
||||
EXPECT_EQ(OB_SUCCESS, choose_src_helper_.get_available_src(mock_arg, src_info));
|
||||
common::ObAddr expect_addr;
|
||||
EXPECT_EQ(OB_SUCCESS, mock_addr("192.168.1.1:1234", expect_addr));
|
||||
@ -597,11 +596,9 @@ TEST_F(TestChooseMigrationSourcePolicy, get_available_src_with_rs_recommend)
|
||||
ObMigrationChooseSrcHelperInitParam param;
|
||||
EXPECT_EQ(OB_SUCCESS, mock_migrate_choose_helper_param(tenant_id, ls_id, local_ls_checkpoint_scn, mock_arg, param));
|
||||
EXPECT_EQ(OB_SUCCESS, member_helper_.get_member_list_by_replica_type(tenant_id, ls_id,
|
||||
mock_arg.dst_, param.info_, param.is_first_c_replica_));
|
||||
ObStorageHASrcProvider::ChooseSourcePolicy policy;
|
||||
EXPECT_EQ(OB_SUCCESS, get_recommand_policy(mock_arg, tenant_id, param.info_.learner_list_, policy));
|
||||
mock_arg.dst_, param.info_, param.is_first_c_replica_)); EXPECT_EQ(OB_SUCCESS, get_recommand_policy(mock_arg, tenant_id, param.info_.learner_list_, param.policy_, param.use_c_replica_policy_));
|
||||
ObStorageHASrcInfo src_info;
|
||||
EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(param, policy, &storage_rpc_, &member_helper_));
|
||||
EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(param, &storage_rpc_, &member_helper_));
|
||||
EXPECT_EQ(OB_SUCCESS, choose_src_helper_.get_available_src(mock_arg, src_info));
|
||||
common::ObAddr expect_addr;
|
||||
EXPECT_EQ(OB_SUCCESS, mock_addr("192.168.1.4:1234", expect_addr));
|
||||
@ -636,11 +633,9 @@ TEST_F(TestChooseMigrationSourcePolicy, idc_mode_idc_leader)
|
||||
ObMigrationChooseSrcHelperInitParam param;
|
||||
EXPECT_EQ(OB_SUCCESS, mock_migrate_choose_helper_param(tenant_id, ls_id, local_ls_checkpoint_scn, mock_arg, param));
|
||||
EXPECT_EQ(OB_SUCCESS, member_helper_.get_member_list_by_replica_type(tenant_id, ls_id,
|
||||
mock_arg.dst_, param.info_, param.is_first_c_replica_));
|
||||
ObStorageHASrcProvider::ChooseSourcePolicy policy;
|
||||
EXPECT_EQ(OB_SUCCESS, get_idc_policy(mock_arg, tenant_id, param.info_.learner_list_, policy));
|
||||
mock_arg.dst_, param.info_, param.is_first_c_replica_)); EXPECT_EQ(OB_SUCCESS, get_idc_policy(mock_arg, tenant_id, param.info_.learner_list_, param.policy_, param.use_c_replica_policy_));
|
||||
ObStorageHASrcInfo src_info;
|
||||
EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(param, policy, &storage_rpc_, &member_helper_));
|
||||
EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(param, &storage_rpc_, &member_helper_));
|
||||
EXPECT_EQ(OB_SUCCESS, mock_locality_manager(MOCKLOCALITY::IDC_MODE_IDC_LEADER, locality_manager_));
|
||||
EXPECT_EQ(ObStorageHASrcProvider::ChooseSourcePolicy::IDC, choose_src_helper_.get_provider()->get_policy_type());
|
||||
static_cast<ObMigrationSrcByLocationProvider *>(choose_src_helper_.get_provider())->set_locality_manager_(&locality_manager_);
|
||||
@ -678,11 +673,9 @@ TEST_F(TestChooseMigrationSourcePolicy, idc_mode_idc_follower)
|
||||
ObMigrationChooseSrcHelperInitParam param;
|
||||
EXPECT_EQ(OB_SUCCESS, mock_migrate_choose_helper_param(tenant_id, ls_id, local_ls_checkpoint_scn, mock_arg, param));
|
||||
EXPECT_EQ(OB_SUCCESS, member_helper_.get_member_list_by_replica_type(tenant_id, ls_id,
|
||||
mock_arg.dst_, param.info_, param.is_first_c_replica_));
|
||||
ObStorageHASrcProvider::ChooseSourcePolicy policy;
|
||||
EXPECT_EQ(OB_SUCCESS, get_idc_policy(mock_arg, tenant_id, param.info_.learner_list_, policy));
|
||||
mock_arg.dst_, param.info_, param.is_first_c_replica_)); EXPECT_EQ(OB_SUCCESS, get_idc_policy(mock_arg, tenant_id, param.info_.learner_list_, param.policy_, param.use_c_replica_policy_));
|
||||
ObStorageHASrcInfo src_info;
|
||||
EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(param, policy, &storage_rpc_, &member_helper_));
|
||||
EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(param, &storage_rpc_, &member_helper_));
|
||||
EXPECT_EQ(OB_SUCCESS, mock_locality_manager(MOCKLOCALITY::IDC_MODE_IDC_FOLLOWER, locality_manager_));
|
||||
EXPECT_EQ(ObStorageHASrcProvider::ChooseSourcePolicy::IDC, choose_src_helper_.get_provider()->get_policy_type());
|
||||
static_cast<ObMigrationSrcByLocationProvider *>(choose_src_helper_.get_provider())->set_locality_manager_(&locality_manager_);
|
||||
@ -718,11 +711,9 @@ TEST_F(TestChooseMigrationSourcePolicy, idc_mode_region_leader)
|
||||
ObMigrationChooseSrcHelperInitParam param;
|
||||
EXPECT_EQ(OB_SUCCESS, mock_migrate_choose_helper_param(tenant_id, ls_id, local_ls_checkpoint_scn, mock_arg, param));
|
||||
EXPECT_EQ(OB_SUCCESS, member_helper_.get_member_list_by_replica_type(tenant_id, ls_id,
|
||||
mock_arg.dst_, param.info_, param.is_first_c_replica_));
|
||||
ObStorageHASrcProvider::ChooseSourcePolicy policy;
|
||||
EXPECT_EQ(OB_SUCCESS, get_idc_policy(mock_arg, tenant_id, param.info_.learner_list_, policy));
|
||||
mock_arg.dst_, param.info_, param.is_first_c_replica_)); EXPECT_EQ(OB_SUCCESS, get_idc_policy(mock_arg, tenant_id, param.info_.learner_list_, param.policy_, param.use_c_replica_policy_));
|
||||
ObStorageHASrcInfo src_info;
|
||||
EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(param, policy, &storage_rpc_, &member_helper_));
|
||||
EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(param, &storage_rpc_, &member_helper_));
|
||||
EXPECT_EQ(OB_SUCCESS, mock_locality_manager(MOCKLOCALITY::IDC_MODE_REGION_LEADER, locality_manager_));
|
||||
EXPECT_EQ(ObStorageHASrcProvider::ChooseSourcePolicy::IDC, choose_src_helper_.get_provider()->get_policy_type());
|
||||
static_cast<ObMigrationSrcByLocationProvider *>(choose_src_helper_.get_provider())->set_locality_manager_(&locality_manager_);
|
||||
@ -758,11 +749,9 @@ TEST_F(TestChooseMigrationSourcePolicy, idc_mode_region_follower)
|
||||
ObMigrationChooseSrcHelperInitParam param;
|
||||
EXPECT_EQ(OB_SUCCESS, mock_migrate_choose_helper_param(tenant_id, ls_id, local_ls_checkpoint_scn, mock_arg, param));
|
||||
EXPECT_EQ(OB_SUCCESS, member_helper_.get_member_list_by_replica_type(tenant_id, ls_id,
|
||||
mock_arg.dst_, param.info_, param.is_first_c_replica_));
|
||||
ObStorageHASrcProvider::ChooseSourcePolicy policy;
|
||||
EXPECT_EQ(OB_SUCCESS, get_idc_policy(mock_arg, tenant_id, param.info_.learner_list_, policy));
|
||||
mock_arg.dst_, param.info_, param.is_first_c_replica_)); EXPECT_EQ(OB_SUCCESS, get_idc_policy(mock_arg, tenant_id, param.info_.learner_list_, param.policy_, param.use_c_replica_policy_));
|
||||
ObStorageHASrcInfo src_info;
|
||||
EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(param, policy, &storage_rpc_, &member_helper_));
|
||||
EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(param, &storage_rpc_, &member_helper_));
|
||||
EXPECT_EQ(OB_SUCCESS, mock_locality_manager(MOCKLOCALITY::IDC_MODE_REGION_FOLLOWER, locality_manager_));
|
||||
EXPECT_EQ(ObStorageHASrcProvider::ChooseSourcePolicy::IDC, choose_src_helper_.get_provider()->get_policy_type());
|
||||
static_cast<ObMigrationSrcByLocationProvider *>(choose_src_helper_.get_provider())->set_locality_manager_(&locality_manager_);
|
||||
@ -796,11 +785,9 @@ TEST_F(TestChooseMigrationSourcePolicy, idc_mode_diff_region_leader)
|
||||
ObMigrationChooseSrcHelperInitParam param;
|
||||
EXPECT_EQ(OB_SUCCESS, mock_migrate_choose_helper_param(tenant_id, ls_id, local_ls_checkpoint_scn, mock_arg, param));
|
||||
EXPECT_EQ(OB_SUCCESS, member_helper_.get_member_list_by_replica_type(tenant_id, ls_id,
|
||||
mock_arg.dst_, param.info_, param.is_first_c_replica_));
|
||||
ObStorageHASrcProvider::ChooseSourcePolicy policy;
|
||||
EXPECT_EQ(OB_SUCCESS, get_idc_policy(mock_arg, tenant_id, param.info_.learner_list_, policy));
|
||||
mock_arg.dst_, param.info_, param.is_first_c_replica_)); EXPECT_EQ(OB_SUCCESS, get_idc_policy(mock_arg, tenant_id, param.info_.learner_list_, param.policy_, param.use_c_replica_policy_));
|
||||
ObStorageHASrcInfo src_info;
|
||||
EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(param, policy, &storage_rpc_, &member_helper_));
|
||||
EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(param, &storage_rpc_, &member_helper_));
|
||||
EXPECT_EQ(OB_SUCCESS, mock_locality_manager(MOCKLOCALITY::IDC_MODE_DIFF_REGION_LEADER, locality_manager_));
|
||||
EXPECT_EQ(ObStorageHASrcProvider::ChooseSourcePolicy::IDC, choose_src_helper_.get_provider()->get_policy_type());
|
||||
static_cast<ObMigrationSrcByLocationProvider *>(choose_src_helper_.get_provider())->set_locality_manager_(&locality_manager_);
|
||||
@ -835,11 +822,9 @@ TEST_F(TestChooseMigrationSourcePolicy, idc_mode_diff_region_follower)
|
||||
ObMigrationChooseSrcHelperInitParam param;
|
||||
EXPECT_EQ(OB_SUCCESS, mock_migrate_choose_helper_param(tenant_id, ls_id, local_ls_checkpoint_scn, mock_arg, param));
|
||||
EXPECT_EQ(OB_SUCCESS, member_helper_.get_member_list_by_replica_type(tenant_id, ls_id,
|
||||
mock_arg.dst_, param.info_, param.is_first_c_replica_));
|
||||
ObStorageHASrcProvider::ChooseSourcePolicy policy;
|
||||
EXPECT_EQ(OB_SUCCESS, get_idc_policy(mock_arg, tenant_id, param.info_.learner_list_, policy));
|
||||
mock_arg.dst_, param.info_, param.is_first_c_replica_)); EXPECT_EQ(OB_SUCCESS, get_idc_policy(mock_arg, tenant_id, param.info_.learner_list_, param.policy_, param.use_c_replica_policy_));
|
||||
ObStorageHASrcInfo src_info;
|
||||
EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(param, policy, &storage_rpc_, &member_helper_));
|
||||
EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(param, &storage_rpc_, &member_helper_));
|
||||
EXPECT_EQ(OB_SUCCESS, mock_locality_manager(MOCKLOCALITY::IDC_MODE_DIFF_REGION_FOLLOWER, locality_manager_));
|
||||
EXPECT_EQ(ObStorageHASrcProvider::ChooseSourcePolicy::IDC, choose_src_helper_.get_provider()->get_policy_type());
|
||||
static_cast<ObMigrationSrcByLocationProvider *>(choose_src_helper_.get_provider())->set_locality_manager_(&locality_manager_);
|
||||
@ -875,11 +860,9 @@ TEST_F(TestChooseMigrationSourcePolicy, region_mode_region_follower)
|
||||
ObMigrationChooseSrcHelperInitParam param;
|
||||
EXPECT_EQ(OB_SUCCESS, mock_migrate_choose_helper_param(tenant_id, ls_id, local_ls_checkpoint_scn, mock_arg, param));
|
||||
EXPECT_EQ(OB_SUCCESS, member_helper_.get_member_list_by_replica_type(tenant_id, ls_id,
|
||||
mock_arg.dst_, param.info_, param.is_first_c_replica_));
|
||||
ObStorageHASrcProvider::ChooseSourcePolicy policy;
|
||||
EXPECT_EQ(OB_SUCCESS, get_region_policy(mock_arg, tenant_id, param.info_.learner_list_, policy));
|
||||
mock_arg.dst_, param.info_, param.is_first_c_replica_)); EXPECT_EQ(OB_SUCCESS, get_region_policy(mock_arg, tenant_id, param.info_.learner_list_, param.policy_, param.use_c_replica_policy_));
|
||||
ObStorageHASrcInfo src_info;
|
||||
EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(param, policy, &storage_rpc_, &member_helper_));
|
||||
EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(param, &storage_rpc_, &member_helper_));
|
||||
EXPECT_EQ(OB_SUCCESS, mock_locality_manager(MOCKLOCALITY::REGION_MODE_REGION_FOLLOWER, locality_manager_));
|
||||
EXPECT_EQ(ObStorageHASrcProvider::ChooseSourcePolicy::REGION, choose_src_helper_.get_provider()->get_policy_type());
|
||||
static_cast<ObMigrationSrcByLocationProvider *>(choose_src_helper_.get_provider())->set_locality_manager_(&locality_manager_);
|
||||
@ -916,10 +899,9 @@ TEST_F(TestChooseMigrationSourcePolicy, region_mode_region_leader)
|
||||
EXPECT_EQ(OB_SUCCESS, mock_migrate_choose_helper_param(tenant_id, ls_id, local_ls_checkpoint_scn, mock_arg, param));
|
||||
EXPECT_EQ(OB_SUCCESS, member_helper_.get_member_list_by_replica_type(tenant_id, ls_id,
|
||||
mock_arg.dst_, param.info_, param.is_first_c_replica_));
|
||||
ObStorageHASrcProvider::ChooseSourcePolicy policy;
|
||||
EXPECT_EQ(OB_SUCCESS, get_region_policy(mock_arg, tenant_id, param.info_.learner_list_, policy));
|
||||
EXPECT_EQ(OB_SUCCESS, get_region_policy(mock_arg, tenant_id, param.info_.learner_list_, param.policy_, param.use_c_replica_policy_));
|
||||
ObStorageHASrcInfo src_info;
|
||||
EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(param, policy, &storage_rpc_, &member_helper_));
|
||||
EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(param, &storage_rpc_, &member_helper_));
|
||||
EXPECT_EQ(OB_SUCCESS, mock_locality_manager(MOCKLOCALITY::REGION_MODE_REGION_LEADER, locality_manager_));
|
||||
EXPECT_EQ(ObStorageHASrcProvider::ChooseSourcePolicy::REGION, choose_src_helper_.get_provider()->get_policy_type());
|
||||
static_cast<ObMigrationSrcByLocationProvider *>(choose_src_helper_.get_provider())->set_locality_manager_(&locality_manager_);
|
||||
@ -955,10 +937,9 @@ TEST_F(TestChooseMigrationSourcePolicy, region_mode_diff_region_follower)
|
||||
EXPECT_EQ(OB_SUCCESS, mock_migrate_choose_helper_param(tenant_id, ls_id, local_ls_checkpoint_scn, mock_arg, param));
|
||||
EXPECT_EQ(OB_SUCCESS, member_helper_.get_member_list_by_replica_type(tenant_id, ls_id,
|
||||
mock_arg.dst_, param.info_, param.is_first_c_replica_));
|
||||
ObStorageHASrcProvider::ChooseSourcePolicy policy;
|
||||
EXPECT_EQ(OB_SUCCESS, get_region_policy(mock_arg, tenant_id, param.info_.learner_list_, policy));
|
||||
EXPECT_EQ(OB_SUCCESS, get_region_policy(mock_arg, tenant_id, param.info_.learner_list_, param.policy_, param.use_c_replica_policy_));
|
||||
ObStorageHASrcInfo src_info;
|
||||
EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(param, policy, &storage_rpc_, &member_helper_));
|
||||
EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(param, &storage_rpc_, &member_helper_));
|
||||
EXPECT_EQ(OB_SUCCESS, mock_locality_manager(MOCKLOCALITY::REGION_MODE_DIFF_REGION_FOLLOWER, locality_manager_));
|
||||
EXPECT_EQ(ObStorageHASrcProvider::ChooseSourcePolicy::REGION, choose_src_helper_.get_provider()->get_policy_type());
|
||||
static_cast<ObMigrationSrcByLocationProvider *>(choose_src_helper_.get_provider())->set_locality_manager_(&locality_manager_);
|
||||
@ -993,10 +974,9 @@ TEST_F(TestChooseMigrationSourcePolicy, region_mode_diff_region_leader)
|
||||
EXPECT_EQ(OB_SUCCESS, mock_migrate_choose_helper_param(tenant_id, ls_id, local_ls_checkpoint_scn, mock_arg, param));
|
||||
EXPECT_EQ(OB_SUCCESS, member_helper_.get_member_list_by_replica_type(tenant_id, ls_id,
|
||||
mock_arg.dst_, param.info_, param.is_first_c_replica_));
|
||||
ObStorageHASrcProvider::ChooseSourcePolicy policy;
|
||||
EXPECT_EQ(OB_SUCCESS, get_region_policy(mock_arg, tenant_id, param.info_.learner_list_, policy));
|
||||
EXPECT_EQ(OB_SUCCESS, get_region_policy(mock_arg, tenant_id, param.info_.learner_list_, param.policy_, param.use_c_replica_policy_));
|
||||
ObStorageHASrcInfo src_info;
|
||||
EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(param, policy, &storage_rpc_, &member_helper_));
|
||||
EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(param, &storage_rpc_, &member_helper_));
|
||||
EXPECT_EQ(OB_SUCCESS, mock_locality_manager(MOCKLOCALITY::REGION_MODE_DIFF_REGION_LEADER, locality_manager_));
|
||||
EXPECT_EQ(ObStorageHASrcProvider::ChooseSourcePolicy::REGION, choose_src_helper_.get_provider()->get_policy_type());
|
||||
static_cast<ObMigrationSrcByLocationProvider *>(choose_src_helper_.get_provider())->set_locality_manager_(&locality_manager_);
|
||||
@ -1035,10 +1015,9 @@ TEST_F(TestChooseMigrationSourcePolicy, get_available_src_with_rebuild)
|
||||
EXPECT_EQ(OB_SUCCESS, mock_migrate_choose_helper_param(tenant_id, ls_id, local_ls_checkpoint_scn, mock_arg, param));
|
||||
EXPECT_EQ(OB_SUCCESS, member_helper_.get_member_list_by_replica_type(tenant_id, ls_id,
|
||||
mock_arg.dst_, param.info_, param.is_first_c_replica_));
|
||||
ObStorageHASrcProvider::ChooseSourcePolicy policy;
|
||||
EXPECT_EQ(OB_SUCCESS, get_idc_policy(mock_arg, tenant_id, param.info_.learner_list_, policy));
|
||||
EXPECT_EQ(OB_SUCCESS, get_idc_policy(mock_arg, tenant_id, param.info_.learner_list_, param.policy_, param.use_c_replica_policy_));
|
||||
ObStorageHASrcInfo src_info;
|
||||
EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(param, policy, &storage_rpc_, &member_helper_));
|
||||
EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(param, &storage_rpc_, &member_helper_));
|
||||
EXPECT_EQ(OB_SUCCESS, mock_locality_manager(MOCKLOCALITY::IDC_MODE_IDC_LEADER, locality_manager_));
|
||||
EXPECT_EQ(ObStorageHASrcProvider::ChooseSourcePolicy::IDC, choose_src_helper_.get_provider()->get_policy_type());
|
||||
static_cast<ObMigrationSrcByLocationProvider *>(choose_src_helper_.get_provider())->set_locality_manager_(&locality_manager_);
|
||||
@ -1084,10 +1063,9 @@ TEST_F(TestChooseMigrationSourcePolicy, src_provider_init_idc_fail)
|
||||
param.arg_ = mock_arg;
|
||||
EXPECT_EQ(OB_SUCCESS, member_helper_.get_member_list_by_replica_type(tenant_id, ls_id,
|
||||
mock_arg.dst_, param.info_, param.is_first_c_replica_));
|
||||
ObStorageHASrcProvider::ChooseSourcePolicy policy;
|
||||
EXPECT_EQ(OB_INVALID_ARGUMENT, get_idc_policy(mock_arg, tenant_id, param.info_.learner_list_, policy));
|
||||
EXPECT_EQ(OB_INVALID_ARGUMENT, get_idc_policy(mock_arg, tenant_id, param.info_.learner_list_, param.policy_, param.use_c_replica_policy_));
|
||||
ObStorageHASrcInfo src_info;
|
||||
EXPECT_EQ(OB_INVALID_ARGUMENT, choose_src_helper_.init(param, policy, &storage_rpc_, &member_helper_));
|
||||
EXPECT_EQ(OB_INVALID_ARGUMENT, choose_src_helper_.init(param, &storage_rpc_, &member_helper_));
|
||||
}
|
||||
// test ObMigrationSrcByLocationProvider init fail
|
||||
TEST_F(TestChooseMigrationSourcePolicy, src_provider_init_region_fail)
|
||||
@ -1112,10 +1090,9 @@ TEST_F(TestChooseMigrationSourcePolicy, src_provider_init_region_fail)
|
||||
param.arg_ = mock_arg;
|
||||
EXPECT_EQ(OB_SUCCESS, member_helper_.get_member_list_by_replica_type(tenant_id, ls_id,
|
||||
mock_arg.dst_, param.info_, param.is_first_c_replica_));
|
||||
ObStorageHASrcProvider::ChooseSourcePolicy policy;
|
||||
EXPECT_EQ(OB_INVALID_ARGUMENT, get_region_policy(mock_arg, tenant_id, param.info_.learner_list_, policy));
|
||||
EXPECT_EQ(OB_INVALID_ARGUMENT, get_region_policy(mock_arg, tenant_id, param.info_.learner_list_, param.policy_, param.use_c_replica_policy_));
|
||||
ObStorageHASrcInfo src_info;
|
||||
EXPECT_EQ(OB_INVALID_ARGUMENT, choose_src_helper_.init(param, policy, &storage_rpc_, &member_helper_));
|
||||
EXPECT_EQ(OB_INVALID_ARGUMENT, choose_src_helper_.init(param, &storage_rpc_, &member_helper_));
|
||||
}
|
||||
// test ObRSRecommendSrcProvider init fail
|
||||
TEST_F(TestChooseMigrationSourcePolicy, src_provider_init_recommand_fail)
|
||||
@ -1140,10 +1117,9 @@ TEST_F(TestChooseMigrationSourcePolicy, src_provider_init_recommand_fail)
|
||||
param.arg_ = mock_arg;
|
||||
EXPECT_EQ(OB_SUCCESS, member_helper_.get_member_list_by_replica_type(tenant_id, ls_id,
|
||||
mock_arg.dst_, param.info_, param.is_first_c_replica_));
|
||||
ObStorageHASrcProvider::ChooseSourcePolicy policy;
|
||||
EXPECT_EQ(OB_INVALID_ARGUMENT, get_recommand_policy(mock_arg, tenant_id, param.info_.learner_list_, policy));
|
||||
EXPECT_EQ(OB_INVALID_ARGUMENT, get_recommand_policy(mock_arg, tenant_id, param.info_.learner_list_, param.policy_, param.use_c_replica_policy_));
|
||||
ObStorageHASrcInfo src_info;
|
||||
EXPECT_EQ(OB_INVALID_ARGUMENT, choose_src_helper_.init(param, policy, &storage_rpc_, &member_helper_));
|
||||
EXPECT_EQ(OB_INVALID_ARGUMENT, choose_src_helper_.init(param, &storage_rpc_, &member_helper_));
|
||||
}
|
||||
// test ObMigrationSrcByCheckpointProvider init fail
|
||||
TEST_F(TestChooseMigrationSourcePolicy, src_provider_init_checkpoint_fail)
|
||||
@ -1168,10 +1144,9 @@ TEST_F(TestChooseMigrationSourcePolicy, src_provider_init_checkpoint_fail)
|
||||
param.arg_ = mock_arg;
|
||||
EXPECT_EQ(OB_SUCCESS, member_helper_.get_member_list_by_replica_type(tenant_id, ls_id,
|
||||
mock_arg.dst_, param.info_, param.is_first_c_replica_));
|
||||
ObStorageHASrcProvider::ChooseSourcePolicy policy;
|
||||
EXPECT_EQ(OB_INVALID_ARGUMENT, get_checkpoint_policy(mock_arg, tenant_id, param.info_.learner_list_, policy));
|
||||
EXPECT_EQ(OB_INVALID_ARGUMENT, get_checkpoint_policy(mock_arg, tenant_id, param.info_.learner_list_, param.policy_, param.use_c_replica_policy_));
|
||||
ObStorageHASrcInfo src_info;
|
||||
EXPECT_EQ(OB_INVALID_ARGUMENT, choose_src_helper_.init(param, policy, &storage_rpc_, &member_helper_));
|
||||
EXPECT_EQ(OB_INVALID_ARGUMENT, choose_src_helper_.init(param, &storage_rpc_, &member_helper_));
|
||||
}
|
||||
// test check replica valid fail
|
||||
// candidate addr: ["192.168.1.1:1234", "192.168.1.2:1234", "192.168.1.3:1234", "192.168.1.4:1234", "192.168.1.5:1234"]
|
||||
@ -1214,10 +1189,9 @@ TEST_F(TestChooseMigrationSourcePolicy, get_available_src_condition_fail)
|
||||
EXPECT_EQ(OB_SUCCESS, mock_migrate_choose_helper_param(tenant_id, ls_id, local_ls_checkpoint_scn, mock_arg, param));
|
||||
EXPECT_EQ(OB_SUCCESS, member_helper_.get_member_list_by_replica_type(tenant_id, ls_id,
|
||||
mock_arg.dst_, param.info_, param.is_first_c_replica_));
|
||||
ObStorageHASrcProvider::ChooseSourcePolicy policy;
|
||||
EXPECT_EQ(OB_SUCCESS, get_checkpoint_policy(mock_arg, tenant_id, param.info_.learner_list_, policy));
|
||||
EXPECT_EQ(OB_SUCCESS, get_checkpoint_policy(mock_arg, tenant_id, param.info_.learner_list_, param.policy_, param.use_c_replica_policy_));
|
||||
ObStorageHASrcInfo src_info;
|
||||
EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(param, policy, &storage_rpc_, &member_helper_));
|
||||
EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(param, &storage_rpc_, &member_helper_));
|
||||
EXPECT_EQ(OB_SUCCESS, choose_src_helper_.get_available_src(mock_arg, src_info));
|
||||
common::ObAddr expect_addr;
|
||||
EXPECT_EQ(OB_SUCCESS, mock_addr("192.168.1.4:1234", expect_addr));
|
||||
@ -1264,10 +1238,9 @@ TEST_F(TestChooseMigrationSourcePolicy, idc_mode_check_replica_fail)
|
||||
EXPECT_EQ(OB_SUCCESS, mock_migrate_choose_helper_param(tenant_id, ls_id, local_ls_checkpoint_scn, mock_arg, param));
|
||||
EXPECT_EQ(OB_SUCCESS, member_helper_.get_member_list_by_replica_type(tenant_id, ls_id,
|
||||
mock_arg.dst_, param.info_, param.is_first_c_replica_));
|
||||
ObStorageHASrcProvider::ChooseSourcePolicy policy;
|
||||
EXPECT_EQ(OB_SUCCESS, get_idc_policy(mock_arg, tenant_id, param.info_.learner_list_, policy));
|
||||
EXPECT_EQ(OB_SUCCESS, get_idc_policy(mock_arg, tenant_id, param.info_.learner_list_, param.policy_, param.use_c_replica_policy_));
|
||||
ObStorageHASrcInfo src_info;
|
||||
EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(param, policy, &storage_rpc_, &member_helper_));
|
||||
EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(param, &storage_rpc_, &member_helper_));
|
||||
EXPECT_EQ(OB_SUCCESS, mock_locality_manager(MOCKLOCALITY::IDC_MODE_IDC_LEADER, locality_manager_));
|
||||
EXPECT_EQ(ObStorageHASrcProvider::ChooseSourcePolicy::IDC, choose_src_helper_.get_provider()->get_policy_type());
|
||||
static_cast<ObMigrationSrcByLocationProvider *>(choose_src_helper_.get_provider())->set_locality_manager_(&locality_manager_);
|
||||
@ -1316,10 +1289,9 @@ TEST_F(TestChooseMigrationSourcePolicy, idc_mode_r_replica_init)
|
||||
EXPECT_EQ(OB_SUCCESS, mock_migrate_choose_helper_param(tenant_id, ls_id, local_ls_checkpoint_scn, mock_arg, param));
|
||||
EXPECT_EQ(OB_SUCCESS, member_helper_.get_member_list_by_replica_type(tenant_id, ls_id,
|
||||
mock_arg.dst_, param.info_, param.is_first_c_replica_));
|
||||
ObStorageHASrcProvider::ChooseSourcePolicy policy;
|
||||
EXPECT_EQ(OB_SUCCESS, get_idc_policy(mock_arg, tenant_id, param.info_.learner_list_, policy));
|
||||
EXPECT_EQ(OB_SUCCESS, get_idc_policy(mock_arg, tenant_id, param.info_.learner_list_, param.policy_, param.use_c_replica_policy_));
|
||||
ObStorageHASrcInfo src_info;
|
||||
EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(param, policy, &storage_rpc_, &member_helper_));
|
||||
EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(param, &storage_rpc_, &member_helper_));
|
||||
EXPECT_EQ(OB_SUCCESS, mock_locality_manager(MOCKLOCALITY::IDC_MODE_IDC_LEADER, locality_manager_));
|
||||
EXPECT_EQ(ObStorageHASrcProvider::ChooseSourcePolicy::IDC, choose_src_helper_.get_provider()->get_policy_type());
|
||||
static_cast<ObMigrationSrcByLocationProvider *>(choose_src_helper_.get_provider())->set_locality_manager_(&locality_manager_);
|
||||
@ -1369,10 +1341,9 @@ TEST_F(TestChooseMigrationSourcePolicy, c_replica_no_other)
|
||||
EXPECT_EQ(OB_SUCCESS, mock_migrate_choose_helper_param(tenant_id, ls_id, local_ls_checkpoint_scn, mock_arg, param));
|
||||
EXPECT_EQ(OB_SUCCESS, member_helper_.get_member_list_by_replica_type(tenant_id, ls_id,
|
||||
mock_arg.dst_, param.info_, param.is_first_c_replica_));
|
||||
ObStorageHASrcProvider::ChooseSourcePolicy policy;
|
||||
EXPECT_EQ(OB_SUCCESS, get_idc_policy(mock_arg, tenant_id, param.info_.learner_list_, policy));
|
||||
EXPECT_EQ(OB_SUCCESS, get_idc_policy(mock_arg, tenant_id, param.info_.learner_list_, param.policy_, param.use_c_replica_policy_));
|
||||
ObStorageHASrcInfo src_info;
|
||||
EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(param, policy, &storage_rpc_, &member_helper_));
|
||||
EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(param, &storage_rpc_, &member_helper_));
|
||||
EXPECT_EQ(OB_SUCCESS, mock_locality_manager(MOCKLOCALITY::IDC_MODE_IDC_LEADER, locality_manager_));
|
||||
EXPECT_EQ(ObStorageHASrcProvider::ChooseSourcePolicy::IDC, choose_src_helper_.get_provider()->get_policy_type());
|
||||
static_cast<ObMigrationSrcByLocationProvider *>(choose_src_helper_.get_provider())->set_locality_manager_(&locality_manager_);
|
||||
@ -1396,9 +1367,10 @@ TEST_F(TestChooseMigrationSourcePolicy, c_replica_no_other)
|
||||
TEST_F(TestChooseMigrationSourcePolicy, c_replica_have_other)
|
||||
{
|
||||
MockLsMetaInfo ls_meta;
|
||||
// when replica type is F, won't fetch ls_info
|
||||
EXPECT_CALL(storage_rpc_, post_ls_meta_info_request(_, _, _, _))
|
||||
.WillOnce(Invoke(&ls_meta, &MockLsMetaInfo::post_ls_meta_info_request_parent_checkpoint))
|
||||
.WillOnce(Invoke(&ls_meta, &MockLsMetaInfo::post_ls_meta_info_request_min_checkpoint))
|
||||
// .WillOnce(Invoke(&ls_meta, &MockLsMetaInfo::post_ls_meta_info_request_min_checkpoint))
|
||||
.WillOnce(Invoke(&ls_meta, &MockLsMetaInfo::post_ls_meta_info_request_min_checkpoint))
|
||||
.WillOnce(Invoke(&ls_meta, &MockLsMetaInfo::post_ls_meta_info_request_base_checkpoint))
|
||||
.WillOnce(Invoke(&ls_meta, &MockLsMetaInfo::post_ls_meta_info_request_large_checkpoint))
|
||||
@ -1422,11 +1394,11 @@ TEST_F(TestChooseMigrationSourcePolicy, c_replica_have_other)
|
||||
EXPECT_EQ(OB_SUCCESS, mock_migrate_choose_helper_param(tenant_id, ls_id, local_ls_checkpoint_scn, mock_arg, param));
|
||||
EXPECT_EQ(OB_SUCCESS, member_helper_.get_member_list_by_replica_type(tenant_id, ls_id,
|
||||
mock_arg.dst_, param.info_, param.is_first_c_replica_));
|
||||
ObStorageHASrcProvider::ChooseSourcePolicy policy;
|
||||
EXPECT_EQ(OB_SUCCESS, get_idc_policy(mock_arg, tenant_id, param.info_.learner_list_, policy));
|
||||
EXPECT_EQ(OB_SUCCESS, get_idc_policy(mock_arg, tenant_id, param.info_.learner_list_, param.policy_, param.use_c_replica_policy_));
|
||||
ObStorageHASrcInfo src_info;
|
||||
EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(param, policy, &storage_rpc_, &member_helper_));
|
||||
EXPECT_EQ(ObStorageHASrcProvider::ChooseSourcePolicy::C_TYPE_REPLICA, choose_src_helper_.get_provider()->get_policy_type());
|
||||
EXPECT_EQ(OB_SUCCESS, choose_src_helper_.init(param, &storage_rpc_, &member_helper_));
|
||||
EXPECT_EQ(true, param.use_c_replica_policy_);
|
||||
EXPECT_EQ(OB_SUCCESS, mock_locality_manager(MOCKLOCALITY::IDC_MODE_IDC_LEADER, locality_manager_));
|
||||
static_cast<ObMigrationSrcByLocationProvider *>(choose_src_helper_.get_provider())->set_locality_manager_(&locality_manager_);
|
||||
EXPECT_EQ(OB_SUCCESS, choose_src_helper_.get_available_src(mock_arg, src_info));
|
||||
common::ObAddr expect_addr;
|
||||
|
@ -511,58 +511,70 @@ static share::SCN mock_ckpt_inc(share::SCN &local_ls_checkpoint_scn)
|
||||
return result;
|
||||
}
|
||||
|
||||
static int get_checkpoint_policy(const ObMigrationOpArg &arg, const uint64_t tenant_id,
|
||||
static int get_checkpoint_policy(
|
||||
const ObMigrationOpArg &arg,
|
||||
const uint64_t tenant_id,
|
||||
const common::GlobalLearnerList &learner_list,
|
||||
ObStorageHASrcProvider::ChooseSourcePolicy &policy)
|
||||
ObStorageHASrcProvider::ChooseSourcePolicy &policy,
|
||||
bool &use_c_replica_policy)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
bool enable_choose_source_policy = false;
|
||||
const char *str = "idc";
|
||||
if (OB_FAIL(ObStorageHAChooseSrcHelper::get_policy_type(arg, tenant_id,
|
||||
enable_choose_source_policy, str, learner_list, policy))) {
|
||||
enable_choose_source_policy, str, learner_list, policy, use_c_replica_policy))) {
|
||||
LOG_WARN("failed to get policy type", K(ret), K(arg), K(tenant_id), K(learner_list));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static int get_recommand_policy(const ObMigrationOpArg &arg, const uint64_t tenant_id,
|
||||
static int get_recommand_policy(
|
||||
const ObMigrationOpArg &arg,
|
||||
const uint64_t tenant_id,
|
||||
const common::GlobalLearnerList &learner_list,
|
||||
ObStorageHASrcProvider::ChooseSourcePolicy &policy)
|
||||
ObStorageHASrcProvider::ChooseSourcePolicy &policy,
|
||||
bool &use_c_replica_policy)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
bool enable_choose_source_policy = true;
|
||||
const char *str = "idc";
|
||||
if (OB_FAIL(ObStorageHAChooseSrcHelper::get_policy_type(arg, tenant_id,
|
||||
enable_choose_source_policy, str, learner_list, policy))) {
|
||||
enable_choose_source_policy, str, learner_list, policy, use_c_replica_policy))) {
|
||||
LOG_WARN("failed to get policy type", K(ret), K(arg), K(tenant_id), K(learner_list));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int get_idc_policy(const ObMigrationOpArg &arg, const uint64_t tenant_id,
|
||||
static int get_idc_policy(
|
||||
const ObMigrationOpArg &arg,
|
||||
const uint64_t tenant_id,
|
||||
const common::GlobalLearnerList &learner_list,
|
||||
ObStorageHASrcProvider::ChooseSourcePolicy &policy)
|
||||
ObStorageHASrcProvider::ChooseSourcePolicy &policy,
|
||||
bool &use_c_replica_policy)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
bool enable_choose_source_policy = true;
|
||||
const char *str = "idc";
|
||||
if (OB_FAIL(ObStorageHAChooseSrcHelper::get_policy_type(arg, tenant_id,
|
||||
enable_choose_source_policy, str, learner_list, policy))) {
|
||||
enable_choose_source_policy, str, learner_list, policy, use_c_replica_policy))) {
|
||||
LOG_WARN("failed to get policy type", K(ret), K(arg), K(tenant_id), K(learner_list));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int get_region_policy(const ObMigrationOpArg &arg, const uint64_t tenant_id,
|
||||
static int get_region_policy(
|
||||
const ObMigrationOpArg &arg,
|
||||
const uint64_t tenant_id,
|
||||
const common::GlobalLearnerList &learner_list,
|
||||
ObStorageHASrcProvider::ChooseSourcePolicy &policy)
|
||||
ObStorageHASrcProvider::ChooseSourcePolicy &policy,
|
||||
bool &use_c_replica_policy)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
bool enable_choose_source_policy = true;
|
||||
const char *str = "region";
|
||||
if (OB_FAIL(ObStorageHAChooseSrcHelper::get_policy_type(arg, tenant_id,
|
||||
enable_choose_source_policy, str, learner_list, policy))) {
|
||||
enable_choose_source_policy, str, learner_list, policy, use_c_replica_policy))) {
|
||||
LOG_WARN("failed to get policy type", K(ret), K(arg), K(tenant_id), K(learner_list));
|
||||
}
|
||||
return ret;
|
||||
|
Loading…
x
Reference in New Issue
Block a user