fix ttl issue 42742997 & 42787173 & 42954103 & 42920788 & 42876827 & 42869248 & 42868078 & 42867515 & 42767501
This commit is contained in:
parent
dce23aca87
commit
81ddf03308
@ -32,7 +32,7 @@ int ObHColumnDescriptor::from_string(const common::ObString &str)
|
||||
} else if (OB_FAIL(json_parser.init(&allocator))) {
|
||||
LOG_WARN("failed to init json parser", K(ret));
|
||||
} else if (OB_FAIL(json_parser.parse(str.ptr(), str.length(), ast))) {
|
||||
LOG_WARN("failed to parse", K(ret), K(str));
|
||||
LOG_DEBUG("failed to parse", K(ret), K(str));
|
||||
ret = OB_SUCCESS;
|
||||
} else if (NULL != ast
|
||||
&& ast->get_type() == json::JT_OBJECT
|
||||
|
@ -1736,7 +1736,7 @@ int ObTableService::fill_query_table_param(uint64_t table_id,
|
||||
} else if (OB_FAIL(schema_guard.get_table_schema(table_id, table_schema))) {
|
||||
LOG_WARN("get table schema failed", K(table_id), K(ret));
|
||||
} else if (OB_ISNULL(table_schema)) {
|
||||
ret = OB_ERR_UNEXPECTED;
|
||||
ret = OB_TABLE_NOT_EXIST;
|
||||
LOG_ERROR("NULL ptr", K(ret), K(table_schema));
|
||||
} else if (OB_FAIL(get_index_id_by_name(schema_guard, table_id, index_name, index_id,
|
||||
rowkey_columns_type, index_schema))) {
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -35,7 +35,9 @@ public :
|
||||
task_end_time_(OB_INVALID_ID),
|
||||
failure_times_(0),
|
||||
rsp_time_(OB_INVALID_ID),
|
||||
is_dirty_(false) {}
|
||||
is_dirty_(false),
|
||||
is_moved_(false),
|
||||
need_refresh_(true) {}
|
||||
bool is_valid()
|
||||
{
|
||||
return task_info_.is_valid() && ttl_para_.is_valid();
|
||||
@ -43,10 +45,9 @@ public :
|
||||
|
||||
TO_STRING_KV(K_(task_info), K_(task_status), K_(ttl_para), K_(task_start_time),
|
||||
K_(last_modify_time), K_(task_end_time), K_(failure_times),
|
||||
K_(rsp_time), K_(is_dirty));
|
||||
K_(rsp_time), K_(is_dirty), K_(is_moved), K_(need_refresh));
|
||||
|
||||
public:
|
||||
|
||||
ObTTLTaskInfo task_info_;
|
||||
common::ObTTLTaskStatus task_status_;
|
||||
ObTTLPara ttl_para_;
|
||||
@ -57,7 +58,9 @@ public:
|
||||
|
||||
int64_t rsp_time_;
|
||||
bool is_invalid_;
|
||||
bool is_dirty_;
|
||||
bool is_dirty_; // should sync sys table for tasks
|
||||
bool is_moved_;
|
||||
bool need_refresh_; // should refresh task from task table
|
||||
};
|
||||
|
||||
class OBTTLTimerPeriodicTask : public common::ObTimerTask {
|
||||
@ -80,7 +83,7 @@ public:
|
||||
bool is_usr_trigger, obrpc::ObTTLRequestArg::TTLRequestType cmd);
|
||||
int report_task_status(ObTTLTaskInfo& task_info,
|
||||
ObTTLPara& task_para, bool& is_stop);
|
||||
void on_leader_active(storage::ObIPartitionGroup* partition);
|
||||
void on_leader_active(const ObPartitionKey& pkey);
|
||||
void on_schema_changed(uint64_t schema_changed_tenant_id);
|
||||
|
||||
/*timer handle function*/
|
||||
@ -105,26 +108,40 @@ private:
|
||||
cmd_type_(obrpc::ObTTLRequestArg::TTL_INVALID_TYPE),
|
||||
rsp_time_(OB_INVALID_ID),
|
||||
state_(common::ObTTLTaskStatus::OB_TTL_TASK_INVALID),
|
||||
is_droped_(false) {}
|
||||
PartTasksMap part_task_map_;
|
||||
common::ObArenaAllocator allocator_;
|
||||
uint64_t tenant_id_;
|
||||
uint64_t task_id_;
|
||||
bool is_usr_trigger_;
|
||||
bool need_check_; /*need scan partition & check*/
|
||||
bool is_dirty_; /*need check the current ctx task*/
|
||||
bool ttl_continue_;
|
||||
obrpc::ObTTLRequestArg::TTLRequestType cmd_type_;
|
||||
int64_t rsp_time_;
|
||||
common::ObTTLTaskStatus state_;
|
||||
bool is_droped_;
|
||||
|
||||
public:
|
||||
is_droped_(false),
|
||||
is_finished_(false)
|
||||
{}
|
||||
void destory()
|
||||
{
|
||||
part_task_map_.destroy();
|
||||
allocator_.reset();
|
||||
}
|
||||
TO_STRING_KV(K_(tenant_id),
|
||||
K_(task_id),
|
||||
K_(is_usr_trigger),
|
||||
K_(need_check),
|
||||
K_(is_dirty),
|
||||
K_(ttl_continue),
|
||||
K_(cmd_type),
|
||||
K_(rsp_time),
|
||||
K_(state),
|
||||
K_(is_droped),
|
||||
K_(is_finished));
|
||||
|
||||
public:
|
||||
PartTasksMap part_task_map_;
|
||||
common::ObArenaAllocator allocator_;
|
||||
uint64_t tenant_id_;
|
||||
uint64_t task_id_;
|
||||
bool is_usr_trigger_;
|
||||
bool need_check_; /*need scan partition & check*/
|
||||
bool is_dirty_; /*need check the current ctx task*/
|
||||
bool ttl_continue_;
|
||||
obrpc::ObTTLRequestArg::TTLRequestType cmd_type_;
|
||||
int64_t rsp_time_; // OB_INVALID_ID means no need response
|
||||
common::ObTTLTaskStatus state_;
|
||||
bool is_droped_; // tenant is droped
|
||||
bool is_finished_; // all delete task is finished (or canceled)
|
||||
};
|
||||
|
||||
typedef common::hash::ObHashMap<int64_t, ObTTLTenantInfo*> TenantPartsMap;
|
||||
@ -142,7 +159,7 @@ private:
|
||||
int generate_one_partition_task(ObTTLTaskInfo& task_info, ObTTLPara& para);
|
||||
int get_ttl_para_from_schema(const share::schema::ObTableSchema *table_schema,
|
||||
ObTTLPara& para, bool& is_tableapi_schema);
|
||||
int check_partition_can_gen_ttl(storage::ObIPartitionGroup *partition,
|
||||
int check_partition_can_gen_ttl(const ObPartitionKey& pkey,
|
||||
ObTTLPara ¶, bool& can_ttl);
|
||||
int check_and_do_rsp(uint64_t tenant_id);
|
||||
void mark_tenant_need_check(uint64_t tenant_id);
|
||||
@ -163,20 +180,22 @@ private:
|
||||
int sync_sys_table(ObPartitionKey& pkey);
|
||||
int construct_sys_table_record(ObTTLTaskCtx* ctx, common::ObTTLStatus& ttl_record);
|
||||
int try_schedule_task(ObTTLTenantInfo* tenant_info, ObTTLTaskCtx* ctx);
|
||||
int try_schedule_remaining_tasks(ObTTLTenantInfo* tenant_info);
|
||||
int try_schedule_remaining_tasks(ObTTLTenantInfo* tenant_info, const ObTTLTaskCtx *current_ctx);
|
||||
bool can_schedule_tenant(const ObTTLTenantInfo &tenant_info);
|
||||
bool can_schedule_task(const ObTTLTaskCtx &ttl_task);
|
||||
int check_cmd_state_valid(common::ObTTLTaskStatus current_state, common::ObTTLTaskStatus incoming_state);
|
||||
int deep_copy_all_tenant_ctxs(common::ObSArray<ObTTLTaskCtx>& ctx_array, common::ObArenaAllocator& allocator,
|
||||
uint64_t tenant_id);
|
||||
int from_ttl_record(ObPartitionKey& pkey, common::ObTTLStatus& record,
|
||||
common::ObArenaAllocator& allocator);
|
||||
int check_cmd_state_valid(const common::ObTTLTaskStatus current_state,
|
||||
const common::ObTTLTaskStatus incoming_state);
|
||||
int copy_all_tenant_ctxs(common::ObSArray<ObTTLTaskCtx *>& ctx_array, uint64_t tenant_id);
|
||||
int from_ttl_record(ObPartitionKey& pkey, common::ObTTLStatus& record, bool with_status = true, bool with_err_code = true);
|
||||
void mark_ttl_ctx_dirty(ObTTLTenantInfo* tenant_info, ObTTLTaskCtx* ctx);
|
||||
void check_ttl_tenant_state(uint64_t tenant_id);
|
||||
int transform_cmd_to_state(const obrpc::ObTTLRequestArg::TTLRequestType& cmd, common::ObTTLTaskStatus& state);
|
||||
int try_schedule_prepare_task(ObPartitionKey& pkey);
|
||||
void mark_tenant_checked(uint64_t tenant_id);
|
||||
int mark_tenant_droped(const uint64_t& tenant_id);
|
||||
int check_and_reset_droped_tenant();
|
||||
obrpc::ObTTLRequestArg::TTLRequestType transform_state_to_cmd(const int64_t state);
|
||||
int refresh_partition_task(ObTTLTaskCtx &ttl_task, bool refresh_status, bool refresh_retcode = false);
|
||||
|
||||
private:
|
||||
static const int64_t DEFAULT_TTL_BUCKET_NUM = 100;
|
||||
@ -192,6 +211,7 @@ private:
|
||||
common::ObTimer ttl_timer_;
|
||||
OBTTLTimerPeriodicTask periodic_task_;
|
||||
common::ObSpinLock lock_;
|
||||
bool is_first_cmd_; // recovery tenant info after restart
|
||||
|
||||
private:
|
||||
void stop();
|
||||
@ -203,7 +223,8 @@ private:
|
||||
periodic_delay_(TTL_PERIODIC_DELAY),
|
||||
ttl_timer_(),
|
||||
periodic_task_(),
|
||||
lock_()
|
||||
lock_(),
|
||||
is_first_cmd_(true)
|
||||
{}
|
||||
~ObTTLManager() {}
|
||||
};
|
||||
|
@ -55,10 +55,10 @@ int ObTableTTLDeleteTask::init(const ObTTLPara &ttl_para, ObTTLTaskInfo &ttl_inf
|
||||
ObRowkey rowkey;
|
||||
int64_t pos = 0;
|
||||
if (OB_FAIL(rowkey.deserialize(allocator_, ttl_info.row_key_.ptr(), ttl_info.row_key_.length(), pos))) {
|
||||
LOG_WARN("fail to deserialize rowkey", K(ret));
|
||||
LOG_WARN("fail to deserialize rowkey", K(ret), K(ttl_info.row_key_));
|
||||
} else if (rowkey.get_obj_cnt() != 2) {
|
||||
ret = OB_INVALID_ARGUMENT;
|
||||
LOG_WARN("invalid argument", K(ret), K(rowkey));
|
||||
LOG_WARN("invalid argument", K(ret), K(rowkey), K(ttl_info.row_key_));
|
||||
} else {
|
||||
first_key_ = rowkey.get_obj_ptr()[0].get_string();
|
||||
second_key_ = rowkey.get_obj_ptr()[1].get_string();
|
||||
@ -326,7 +326,7 @@ int ObTableTTLDeleteTask::process_one()
|
||||
char *buf = static_cast<char *>(allocator_.alloc(buf_len));
|
||||
int64_t pos = 0;
|
||||
if (OB_FAIL(row_key.serialize(buf, buf_len, pos))) {
|
||||
LOG_WARN("fail to deserialize", K(ret));
|
||||
LOG_WARN("fail to serialize", K(ret));
|
||||
} else {
|
||||
result_key.assign_ptr(buf, buf_len);
|
||||
}
|
||||
|
@ -23,16 +23,6 @@ namespace oceanbase
|
||||
namespace rootserver
|
||||
{
|
||||
|
||||
#define OB_TTL_RESPONSE_MASK (1 << 5)
|
||||
#define OB_TTL_STATUS_MASK (OB_TTL_RESPONSE_MASK - 1)
|
||||
|
||||
#define SET_TASK_PURE_STATUS(status, state) ((status) = ((state) & OB_TTL_STATUS_MASK) + ((status & OB_TTL_RESPONSE_MASK)))
|
||||
#define SET_TASK_RESPONSE(status, state) ((status) |= (((state) & 1) << 5))
|
||||
#define SET_TASK_STATUS(status, pure_status, is_responsed) { SET_TASK_PURE_STATUS(status, pure_status), SET_TASK_RESPONSE(status, is_responsed); }
|
||||
|
||||
#define EVAL_TASK_RESPONSE(status) (((status) & OB_TTL_RESPONSE_MASK) >> 5)
|
||||
#define EVAL_TASK_PURE_STATUS(status) (static_cast<ObTTLTaskStatus>((status) & OB_TTL_STATUS_MASK))
|
||||
|
||||
ObClearTTLStatusHistoryTask::ObClearTTLStatusHistoryTask(ObRootService& rs)
|
||||
: root_service_(rs)
|
||||
{
|
||||
@ -158,6 +148,8 @@ int ObTTLTenantTaskMgr::add_tenant(uint64_t tenant_id)
|
||||
tenant_task.tenant_id_ = tenant_id;
|
||||
if (OB_FAIL(ten_task_arr_.push_back(tenant_task))) {
|
||||
LOG_WARN("fail to store tenant task", K(ret));
|
||||
} else {
|
||||
LOG_INFO("add tenant to tenant task array", K(tenant_id), K(tenant_task));
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
@ -170,6 +162,7 @@ void ObTTLTenantTaskMgr::delete_tenant(uint64_t tenant_id)
|
||||
if (ten_task.tenant_id_ == tenant_id) {
|
||||
ten_task.reset();
|
||||
ten_task_arr_.remove(i);
|
||||
LOG_INFO("remove tennat task in tenant task array", K(tenant_id), K(ten_task_arr_));
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -306,7 +299,7 @@ int ObTTLTenantTaskMgr::add_ttl_task(uint64_t tenant_id,
|
||||
}
|
||||
|
||||
if (curr_state != next_state) {
|
||||
if (OB_FAIL(update_task_status(tenant_id, rs_task.ttl_status_.task_id_, static_cast<int64_t>(next_state)))) {
|
||||
if (OB_FAIL(update_task_status(tenant_id, rs_task.ttl_status_.task_id_, static_cast<int64_t>(next_state), *GCTX.sql_proxy_))) {
|
||||
LOG_WARN("fail to update ttl tasks", K(ret));
|
||||
} else {
|
||||
// update memory status only
|
||||
@ -348,7 +341,7 @@ int ObTTLTenantTaskMgr::add_ttl_task(uint64_t tenant_id,
|
||||
} else if (next_state != curr_state) {
|
||||
// update status
|
||||
if (OB_FAIL(update_task_status(tenant_id, rs_task.ttl_status_.task_id_,
|
||||
static_cast<int64_t>(next_state)))) {
|
||||
static_cast<int64_t>(next_state), *GCTX.sql_proxy_))) {
|
||||
LOG_WARN("fail to update ttl tasks", K(ret));
|
||||
} else {
|
||||
rs_task.all_responsed_ = false;
|
||||
@ -382,6 +375,12 @@ int ObTTLTenantTaskMgr::add_ttl_task(uint64_t tenant_id,
|
||||
}
|
||||
}
|
||||
|
||||
if (ret == OB_EAGAIN) {
|
||||
// it's a success cmd, cannot return OB_EAGAIN to user
|
||||
LOG_INFO("reset OB_EAGAIN to OB_SUCCESS, because ttl scheduler will resend later");
|
||||
ret = OB_SUCCESS;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -398,7 +397,7 @@ int ObTTLTenantTaskMgr::add_ttl_task_internal(uint64_t tenant_id,
|
||||
if (OB_FAIL(get_tenant_ptr(tenant_id, tenant_ptr))) {
|
||||
LOG_WARN("fail to get tenant task ptr", K(ret));
|
||||
} else if (OB_FAIL(in_active_time(tenant_id, is_active_time))) {
|
||||
LOG_WARN("fail to eval active time", K(ret));
|
||||
LOG_WARN("fail to eval active time", K(ret));
|
||||
} else {
|
||||
bool enable_ttl = is_enable_ttl(tenant_id);
|
||||
ObTTLTenantTask& tenant_ref = *tenant_ptr;
|
||||
@ -437,7 +436,7 @@ bool ObTTLTenantTaskMgr::need_retry_task(RsTenantTask& rs_task)
|
||||
{
|
||||
bool bool_ret = false;
|
||||
int64_t cur_time = ObTimeUtility::current_time();
|
||||
bool_ret = (cur_time - rs_task.ttl_status_.task_update_time_ < OB_TTL_TASK_RETRY_INTERVAL) ||
|
||||
bool_ret = (cur_time - rs_task.ttl_status_.task_update_time_ >= OB_TTL_TASK_RETRY_INTERVAL) ||
|
||||
(rs_task.server_infos_.count() == 0);
|
||||
return bool_ret;
|
||||
}
|
||||
@ -485,7 +484,7 @@ int ObTTLTenantTaskMgr::process_tenant_tasks(uint64_t tenant_id)
|
||||
ObTTLTaskType ttl_task_type = ObTTLTaskType::OB_TTL_INVALID;
|
||||
|
||||
LOG_INFO("process_tenant_tasks begin", K(tenant_id), K(task_count), K(status_responsed),
|
||||
K(curr_state), K(task_count));
|
||||
K(curr_state), K(task_count));
|
||||
|
||||
if (task_count > 1) {
|
||||
if (!(curr_state == ObTTLTaskStatus::OB_RS_TTL_TASK_CANCEL ||
|
||||
@ -510,8 +509,8 @@ int ObTTLTenantTaskMgr::process_tenant_tasks(uint64_t tenant_id)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
LOG_INFO("process_tenant_tasks begin", K(tenant_id), K(task_count),
|
||||
K(curr_state), K(task_count));
|
||||
LOG_INFO("process_tenant_tasks begin", K(tenant_id), K(task_count), K(status_responsed),
|
||||
K(curr_state), K(task_count));
|
||||
// task_count == 1
|
||||
if (status_responsed) {
|
||||
next_state = next_status(curr_state);
|
||||
@ -551,7 +550,7 @@ int ObTTLTenantTaskMgr::process_tenant_tasks(uint64_t tenant_id)
|
||||
|
||||
if (OB_SUCC(ret) && curr_state != next_state) {
|
||||
if (OB_FAIL(update_task_status(tenant_id, cur_task.ttl_status_.task_id_,
|
||||
static_cast<int64_t>(next_state)))) {
|
||||
static_cast<int64_t>(next_state), *GCTX.sql_proxy_))) {
|
||||
LOG_WARN("fail to update ttl tasks", K(ret));
|
||||
} else {
|
||||
cur_task.all_responsed_ = false;
|
||||
@ -589,7 +588,8 @@ int ObTTLTenantTaskMgr::insert_tenant_task(ObTTLStatus& ttl_task)
|
||||
|
||||
int ObTTLTenantTaskMgr::update_task_status(uint64_t tenant_id,
|
||||
uint64_t task_id,
|
||||
int64_t status)
|
||||
int64_t status,
|
||||
common::ObISQLClient& proxy)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
ObTTLStatusKey key(tenant_id, OB_INVALID_ID, OB_INVALID_ID, task_id);
|
||||
@ -611,7 +611,7 @@ int ObTTLTenantTaskMgr::update_task_status(uint64_t tenant_id,
|
||||
} else {
|
||||
if (OB_FAIL(ObTTLUtil::update_ttl_task(tenant_id,
|
||||
share::OB_ALL_KV_TTL_TASK_TNAME,
|
||||
*GCTX.sql_proxy_,
|
||||
proxy,
|
||||
key,
|
||||
update_fields))) {
|
||||
LOG_WARN("fail to update ttl task status.", K(ret), K(tenant_id), K(task_id), K(status));
|
||||
@ -627,12 +627,13 @@ int ObTTLTenantTaskMgr::delete_task(uint64_t tenant_id, uint64_t task_id)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
ObTTLStatusKey key(tenant_id, OB_INVALID_ID, OB_INVALID_ID, task_id);
|
||||
int64_t affected_rows = 0;
|
||||
if (OB_FAIL(ObTTLUtil::delete_ttl_task(tenant_id,
|
||||
share::OB_ALL_KV_TTL_TASK_TNAME,
|
||||
*GCTX.sql_proxy_, key))) {
|
||||
*GCTX.sql_proxy_, key, affected_rows))) {
|
||||
LOG_WARN("fail to delete ttl tasks status", K(ret), K(tenant_id), K(task_id));
|
||||
} else {
|
||||
LOG_DEBUG("success to delete ttl tasks status", K(ret), K(tenant_id), K(task_id));
|
||||
LOG_DEBUG("success to delete ttl tasks status", K(ret), K(tenant_id), K(task_id), K(affected_rows));
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -724,6 +725,8 @@ int ObTTLTenantTaskMgr::refresh_tenant(uint64_t tenant_id)
|
||||
} else if (OB_FAIL(update_tenant_tasks(tenant_id, ttl_tasks))) {
|
||||
LOG_WARN("fail to update tenant tasks", K(ret), K(tenant_id));
|
||||
}
|
||||
|
||||
LOG_INFO("refresh tenant task from system table", K(tenant_id));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@ -731,25 +734,19 @@ int ObTTLTenantTaskMgr::refresh_tenant(uint64_t tenant_id)
|
||||
int ObTTLTenantTaskMgr::refresh_all()
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
if (!need_refresh_) {
|
||||
ObArray<uint64_t> tenant_ids;
|
||||
if (OB_FAIL(get_tenant_ids(tenant_ids))) {
|
||||
LOG_WARN("fail to get tenant ids", K(ret));
|
||||
} else {
|
||||
ObArray<uint64_t> tenant_ids;
|
||||
|
||||
if (OB_FAIL(get_tenant_ids(tenant_ids))) {
|
||||
LOG_WARN("fail to get tenant ids", K(ret));
|
||||
} else {
|
||||
for (size_t i = 0; OB_SUCC(ret) && i < tenant_ids.count(); ++i) {
|
||||
uint64_t tenant_id = tenant_ids.at(i);
|
||||
if (OB_FAIL(refresh_tenant(tenant_id))) {
|
||||
LOG_WARN("fail to refresh tenant", K(ret), K(tenant_id));
|
||||
}
|
||||
LOG_INFO("get all tenant ids", K(tenant_ids));
|
||||
for (size_t i = 0; OB_SUCC(ret) && i < tenant_ids.count(); ++i) {
|
||||
uint64_t tenant_id = tenant_ids.at(i);
|
||||
if (OB_FAIL(refresh_tenant(tenant_id))) {
|
||||
LOG_WARN("fail to refresh tenant", K(ret), K(tenant_id));
|
||||
}
|
||||
}
|
||||
|
||||
if (OB_SUCC(ret) && tenant_ids.count() > 0) {
|
||||
need_refresh_ = false;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -803,8 +800,7 @@ int ObTTLTenantTaskMgr::alter_status_and_add_ttl_task(uint64_t tenant_id)
|
||||
if (OB_FAIL(in_active_time(tenant_id, is_active_time))) {
|
||||
LOG_WARN("fail to eval active time", K(ret));
|
||||
} else if (OB_FAIL(get_tenant_ptr(tenant_id, tenant_ptr))) {
|
||||
need_refresh_ = true;
|
||||
LOG_WARN("fail to get tenant task ptr", K(tenant_id), K(ret));
|
||||
LOG_WARN("fail to get tenant task ptr, need refresh", K(tenant_id), K(ret));
|
||||
} else {
|
||||
ObTTLTenantTask& tenant_ref = *tenant_ptr;
|
||||
size_t task_count = tenant_ref.tasks_.count();
|
||||
@ -824,7 +820,7 @@ int ObTTLTenantTaskMgr::alter_status_and_add_ttl_task(uint64_t tenant_id)
|
||||
if (!status_responsed) {
|
||||
int64_t tmp_status = 0;
|
||||
SET_TASK_STATUS(tmp_status, cur_state, 1);
|
||||
if (OB_FAIL(update_task_status(tenant_id, rs_task.ttl_status_.task_id_, tmp_status))) {
|
||||
if (OB_FAIL(update_task_status(tenant_id, rs_task.ttl_status_.task_id_, tmp_status, *GCTX.sql_proxy_))) {
|
||||
LOG_WARN("fail to update ttl tasks", K(ret));
|
||||
} else {
|
||||
rs_task.set_servers_not_responsed();
|
||||
@ -862,7 +858,7 @@ int ObTTLTenantTaskMgr::alter_status_and_add_ttl_task(uint64_t tenant_id)
|
||||
* send move to servers, update status
|
||||
*/
|
||||
LOG_INFO("alter status and add ttl task", K(next_state));
|
||||
if (OB_FAIL(update_task_status(tenant_id, rs_task.ttl_status_.task_id_, next_state))) {
|
||||
if (OB_FAIL(update_task_status(tenant_id, rs_task.ttl_status_.task_id_, next_state, *GCTX.sql_proxy_))) {
|
||||
LOG_WARN("fail to update ttl tasks", K(ret));
|
||||
} else {
|
||||
LOG_INFO("alter status and add ttl task", K(next_state));
|
||||
@ -1098,7 +1094,7 @@ int ObTTLTenantTaskMgr::dispatch_ttl_request(const TTLServerInfos& server_infos,
|
||||
}
|
||||
}
|
||||
|
||||
LOG_INFO("send ttl server ttl request", K(ret), K(arg), K(send_cnt), K(server_infos.count()));
|
||||
LOG_INFO("send ttl server ttl request", K(ret), K(arg), K(send_cnt), K(server_infos.count()), K(server_infos));
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1210,8 +1206,12 @@ void ObTTLTenantTaskMgr::refresh_deleted_tenants()
|
||||
exist = (del_ten_arr_.at(k) == tenant_id);
|
||||
}
|
||||
|
||||
if (!exist && OB_FAIL(del_ten_arr_.push_back(tenant_id))) {
|
||||
LOG_WARN("fail to store deleted tenant id", K(ret));
|
||||
if (!exist) {
|
||||
if (OB_FAIL(del_ten_arr_.push_back(tenant_id))) {
|
||||
LOG_WARN("fail to store deleted tenant id", K(ret));
|
||||
} else {
|
||||
LOG_INFO("add tennat id to del tenant array", K(ret), K(tenant_id), K(del_ten_arr_));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1248,6 +1248,7 @@ int ObTTLTenantTaskMgr::process_tenant_task_rsp(uint64_t tenant_id,
|
||||
const ObAddr& server_addr)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
TTLMGR.refresh_all();
|
||||
lib::ObMutexGuard guard(mutex_);
|
||||
RsTenantTask* rs_task_ptr = NULL;
|
||||
|
||||
@ -1255,8 +1256,10 @@ int ObTTLTenantTaskMgr::process_tenant_task_rsp(uint64_t tenant_id,
|
||||
LOG_WARN("fail to get tasks ptr", K(ret), K(tenant_id), K(task_id));
|
||||
} else {
|
||||
RsTenantTask& rs_task = *rs_task_ptr;
|
||||
|
||||
if (OB_FAIL(rsp_task_status(static_cast<ObTTLTaskType>(task_type), EVAL_TASK_PURE_STATUS(rs_task.ttl_status_.status_)))) {
|
||||
LOG_WARN("response task type incorrect", K(ret), K(tenant_id), K(task_id), K(task_type));
|
||||
LOG_WARN("response task type incorrect",
|
||||
K(ret), K(tenant_id), K(task_id), K(task_type), K(EVAL_TASK_PURE_STATUS(rs_task.ttl_status_.status_)));
|
||||
} else if (OB_FAIL(rs_task.set_server_responsed(server_addr))) {
|
||||
LOG_WARN("fail to set server responsed", K(ret), K(tenant_id), K(task_id));
|
||||
} else if (!EVAL_TASK_RESPONSE(rs_task.ttl_status_.status_) &&
|
||||
@ -1282,22 +1285,49 @@ int ObTTLTenantTaskMgr::update_task_on_all_responsed(RsTenantTask& task)
|
||||
ObTTLTaskStatus next_state, cur_state;
|
||||
cur_state = EVAL_TASK_PURE_STATUS(task.ttl_status_.status_);
|
||||
next_state = next_status(cur_state);
|
||||
bool is_move = false;
|
||||
|
||||
int64_t task_status = static_cast<int64_t>(next_state);
|
||||
if (next_state == cur_state) {
|
||||
// move or suspend
|
||||
SET_TASK_RESPONSE(task_status, 1);
|
||||
if (cur_state == OB_RS_TTL_TASK_MOVE) {
|
||||
is_move = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (task.ttl_status_.status_ == task_status) {
|
||||
// SUSPEND or MOVED
|
||||
} else if (OB_FAIL(update_task_status(tenant_id, task.ttl_status_.task_id_, task_status))) {
|
||||
LOG_WARN("fail to update ttl tasks", K(ret), K(task.ttl_status_.task_id_), K(cur_state), K(next_state), K(task_status));
|
||||
} else {
|
||||
// update stauts and update time
|
||||
task.ttl_status_.status_ = task_status;
|
||||
task.all_responsed_ = false;
|
||||
task.set_servers_not_responsed();
|
||||
ObMySQLTransaction trans;
|
||||
uint64_t task_id = task.ttl_status_.task_id_;
|
||||
if (OB_ISNULL(GCTX.sql_proxy_)) {
|
||||
ret = OB_ERR_UNEXPECTED;
|
||||
LOG_WARN("unexpected null GCTX.sql_proxy_", K(ret));
|
||||
} else if (OB_FAIL(trans.start(GCTX.sql_proxy_, tenant_id))) {
|
||||
LOG_WARN("fail to start transation", K(ret), K(tenant_id));
|
||||
} else if (OB_FAIL(update_task_status(tenant_id, task_id, task_status, *GCTX.sql_proxy_))) {
|
||||
LOG_WARN("fail to update ttl tasks", K(ret), K(task_id), K(cur_state), K(next_state), K(task_status));
|
||||
} else if (is_move && OB_FAIL(ObTTLUtil::remove_all_task_to_history_table(tenant_id, task_id, trans))) {
|
||||
// NOTE: if parition was removed and observer restart, task won't be moved by observer itself
|
||||
LOG_WARN("fail to move task to history table", K(tenant_id), K(task_id));
|
||||
} else {}
|
||||
|
||||
if (trans.is_started()) {
|
||||
bool commit = (OB_SUCCESS == ret);
|
||||
int tmp_ret = ret;
|
||||
if (OB_FAIL(trans.end(commit))) {
|
||||
LOG_WARN("faile to end trans", "commit", commit, K(ret));
|
||||
}
|
||||
ret = tmp_ret == OB_SUCCESS ? ret : tmp_ret;
|
||||
}
|
||||
|
||||
if (OB_SUCC(ret)) {
|
||||
// update stauts and update time
|
||||
task.ttl_status_.status_ = task_status;
|
||||
task.all_responsed_ = false;
|
||||
task.set_servers_not_responsed();
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -1389,17 +1419,23 @@ void ObTTLScheduler::runTimerTask()
|
||||
int RsTenantTask::set_server_responsed(const ObAddr& server_addr)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
bool find_server = false;
|
||||
TTLServerInfos& server_infos = server_infos_;
|
||||
if (OB_UNLIKELY(!server_addr.is_valid())) {
|
||||
ret = OB_INVALID_ARGUMENT;
|
||||
LOG_WARN("invalid arguments", K(ret), K(server_addr));
|
||||
} else {
|
||||
for (int64_t i = 0; i < server_infos.count(); ++i) {
|
||||
for (int64_t i = 0; i < server_infos.count() && !find_server; ++i) {
|
||||
if (server_addr == server_infos.at(i).addr_) {
|
||||
server_infos.at(i).is_responsed_ = true;
|
||||
break;
|
||||
find_server = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!find_server) {
|
||||
ret = OB_INVALID_ARGUMENT;
|
||||
LOG_WARN("cannot find addr in sever infos", K(ret), K(server_addr), K(server_infos));
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -163,7 +163,6 @@ private:
|
||||
: mutex_(),
|
||||
ten_task_arr_(),
|
||||
del_ten_arr_(),
|
||||
need_refresh_(true),
|
||||
is_inited_(false) {}
|
||||
|
||||
int update_task_on_all_responsed(RsTenantTask& task);
|
||||
@ -181,7 +180,8 @@ private:
|
||||
|
||||
virtual int update_task_status(uint64_t tenant_id,
|
||||
uint64_t task_id,
|
||||
int64_t rs_new_status);
|
||||
int64_t rs_new_status,
|
||||
common::ObISQLClient& proxy);
|
||||
|
||||
|
||||
bool tenant_exist(uint64_t tenant_id);
|
||||
@ -226,11 +226,9 @@ private:
|
||||
lib::ObMutex mutex_; // lib::ObMutexGuard guard(mutex_);
|
||||
ObArray<ObTTLTenantTask> ten_task_arr_;
|
||||
ObArray<uint64_t> del_ten_arr_;
|
||||
bool need_refresh_;
|
||||
bool is_inited_;
|
||||
|
||||
|
||||
const int64_t OB_TTL_TASK_RETRY_INTERVAL = 60*1000*1000; // 3min
|
||||
const int64_t OB_TTL_TASK_RETRY_INTERVAL = 15*1000*1000; // 15s
|
||||
};
|
||||
|
||||
#define TTLMGR ObTTLTenantTaskMgr::get_instance()
|
||||
|
@ -29,10 +29,11 @@ bool ObTTLTime::is_same_day(int64_t ttl_time1, int64_t ttl_time2)
|
||||
time_t param1 = static_cast<time_t>(ttl_time1 / 1000000l);
|
||||
time_t param2 = static_cast<time_t>(ttl_time2 / 1000000l);
|
||||
|
||||
struct tm *t1 = localtime(¶m1);
|
||||
struct tm *t2 = localtime(¶m1);
|
||||
struct tm tm1, tm2;
|
||||
::localtime_r(¶m1, &tm1);
|
||||
::localtime_r(¶m2, &tm2);
|
||||
|
||||
return (t1 && t2 && t1->tm_mday == t2->tm_mday);
|
||||
return (tm1.tm_yday == tm2.tm_yday);
|
||||
}
|
||||
|
||||
bool ObTTLUtil::extract_val(const char* ptr, uint64_t len, int& val)
|
||||
@ -135,12 +136,13 @@ int ObTTLUtil::insert_ttl_task(uint64_t tenant_id,
|
||||
" VALUE "
|
||||
"(now(), now(), %ld, %ld, %ld,"
|
||||
" %ld, %ld, %ld, %ld, %ld, "
|
||||
" %ld, %ld, %ld,'%s', '%s')", // 12
|
||||
" %ld, %ld, %ld,'%.*s', '%.*s')", // 12
|
||||
tname, // 0
|
||||
tenant_id, task.table_id_, task.partition_id_,
|
||||
task.task_id_, task.task_start_time_, task.task_update_time_, task.trigger_type_, task.status_,
|
||||
task.ttl_del_cnt_, task.max_version_del_cnt_,
|
||||
task.scan_cnt_, task.row_key_.ptr(), task.ret_code_.ptr()))) {
|
||||
task.scan_cnt_, task.row_key_.length(), task.row_key_.ptr(),
|
||||
task.ret_code_.length(), task.ret_code_.ptr()))) {
|
||||
LOG_WARN("sql assign fmt failed", K(ret));
|
||||
} else if (OB_FAIL(proxy.write(tenant_id, sql.ptr(), affect_rows))) {
|
||||
LOG_WARN("fail to execute sql", K(ret), K(sql));
|
||||
@ -167,12 +169,13 @@ int ObTTLUtil::update_ttl_task_all_fields(uint64_t tenant_id,
|
||||
|
||||
if (OB_FAIL(sql.assign_fmt("UPDATE %s SET "
|
||||
"task_start_time = %ld, task_update_time = %ld, trigger_type = %ld, status = %ld,"
|
||||
" ttl_del_cnt = %ld, max_version_del_cnt = %ld, scan_cnt = %ld, row_key = '%s', ret_code = '%s'"
|
||||
" WHERE "
|
||||
"tenant_id = %ld AND table_id = %ld AND partition_id = %ld AND task_id = %ld ",
|
||||
" ttl_del_cnt = %ld, max_version_del_cnt = %ld, scan_cnt = %ld, row_key = '%*.s', ret_code = '%*.s'"
|
||||
" WHERE tenant_id = %ld AND table_id = %ld AND partition_id = %ld AND task_id = %ld ",
|
||||
tname, // 0
|
||||
task.task_start_time_, task.task_update_time_, task.trigger_type_, task.status_,
|
||||
task.ttl_del_cnt_, task.max_version_del_cnt_, task.scan_cnt_, task.row_key_.ptr(), task.ret_code_.ptr(),
|
||||
task.ttl_del_cnt_, task.max_version_del_cnt_, task.scan_cnt_,
|
||||
task.row_key_.length(), task.row_key_.ptr(),
|
||||
task.ret_code_.length(), task.ret_code_.ptr(),
|
||||
tenant_id, task.table_id_, key.partition_id_, key.task_id_))) {
|
||||
LOG_WARN("sql assign fmt failed", K(ret));
|
||||
} else if (OB_FAIL(proxy.write(tenant_id, sql.ptr(), affect_rows))) {
|
||||
@ -260,12 +263,13 @@ int ObTTLUtil::update_ttl_task_all_fields(uint64_t tenant_id,
|
||||
|
||||
if (OB_FAIL(sql.assign_fmt("UPDATE %s SET "
|
||||
"task_start_time = %ld, task_update_time = %ld, trigger_type = %ld, status = %ld,"
|
||||
" ttl_del_cnt = %ld, max_version_del_cnt = %ld, scan_cnt = %ld, row_key = '%s', ret_code = '%s'"
|
||||
" ttl_del_cnt = %ld, max_version_del_cnt = %ld, scan_cnt = %ld, row_key = '%*.s', ret_code = '%*.s'"
|
||||
" WHERE "
|
||||
"tenant_id = %ld AND table_id = %ld AND partition_id = %ld AND task_id = %ld ",
|
||||
tname, // 0
|
||||
task.task_start_time_, task.task_update_time_, task.trigger_type_, task.status_,
|
||||
task.ttl_del_cnt_, task.max_version_del_cnt_, task.scan_cnt_, task.row_key_.ptr(), task.ret_code_.ptr(),
|
||||
task.ttl_del_cnt_, task.max_version_del_cnt_, task.scan_cnt_,
|
||||
task.row_key_.length(), task.row_key_.ptr(), task.ret_code_.length(), task.ret_code_.ptr(),
|
||||
tenant_id, task.table_id_, task.partition_id_, task.task_id_))) {
|
||||
LOG_WARN("sql assign fmt failed", K(ret));
|
||||
} else if (OB_FAIL(proxy.write(tenant_id, sql.ptr(), affect_rows))) {
|
||||
@ -280,11 +284,11 @@ int ObTTLUtil::update_ttl_task_all_fields(uint64_t tenant_id,
|
||||
int ObTTLUtil::delete_ttl_task(uint64_t tenant_id,
|
||||
const char* tname,
|
||||
common::ObISQLClient& proxy,
|
||||
ObTTLStatusKey& key)
|
||||
ObTTLStatusKey& key,
|
||||
int64_t &affect_rows)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
ObSqlString sql;
|
||||
int64_t affect_rows = 0;
|
||||
|
||||
if (OB_FAIL(sql.assign_fmt("DELETE FROM %s WHERE "
|
||||
"tenant_id = %ld AND table_id = %ld "
|
||||
@ -396,11 +400,13 @@ int ObTTLUtil::read_ttl_tasks(uint64_t tenant_id,
|
||||
ObString rowkey;
|
||||
char *rowkey_buf = nullptr;
|
||||
EXTRACT_VARCHAR_FIELD_MYSQL(*result, "row_key", rowkey);
|
||||
if (OB_ISNULL(rowkey_buf = static_cast<char *>(allocator->alloc(rowkey.length())))) {
|
||||
LOG_WARN("failt to allocate memory", K(ret));
|
||||
} else {
|
||||
MEMCPY(rowkey_buf, rowkey.ptr(), rowkey.length());
|
||||
result_arr.at(idx).row_key_.assign(rowkey_buf, rowkey.length());
|
||||
if (OB_SUCC(ret) && !rowkey.empty()) {
|
||||
if (OB_ISNULL(rowkey_buf = static_cast<char *>(allocator->alloc(rowkey.length())))) {
|
||||
LOG_WARN("failt to allocate memory", K(ret), K(rowkey));
|
||||
} else {
|
||||
MEMCPY(rowkey_buf, rowkey.ptr(), rowkey.length());
|
||||
result_arr.at(idx).row_key_.assign(rowkey_buf, rowkey.length());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -408,13 +414,15 @@ int ObTTLUtil::read_ttl_tasks(uint64_t tenant_id,
|
||||
ObString err_msg;
|
||||
char *err_buf = nullptr;
|
||||
EXTRACT_VARCHAR_FIELD_MYSQL(*result, "ret_code", err_msg);
|
||||
if (OB_ISNULL(err_buf = static_cast<char *>(allocator->alloc(err_msg.length())))) {
|
||||
LOG_WARN("failt to allocate memory", K(ret), K(err_msg.length()));
|
||||
} else {
|
||||
MEMCPY(err_buf, err_msg.ptr(), err_msg.length());
|
||||
result_arr.at(idx).ret_code_.assign(err_buf, err_msg.length());
|
||||
if (OB_SUCC(ret) && !err_msg.empty()) {
|
||||
if (OB_ISNULL(err_buf = static_cast<char *>(allocator->alloc(err_msg.length())))) {
|
||||
LOG_WARN("failt to allocate memory", K(ret), K(err_msg));
|
||||
} else {
|
||||
MEMCPY(err_buf, err_msg.ptr(), err_msg.length());
|
||||
result_arr.at(idx).ret_code_.assign(err_buf, err_msg.length());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -457,5 +465,58 @@ bool ObTTLUtil::check_can_process_tenant_tasks(uint64_t tenant_id)
|
||||
return bret;
|
||||
}
|
||||
|
||||
int ObTTLUtil::remove_all_task_to_history_table(uint64_t tenant_id, uint64_t task_id, common::ObISQLClient& proxy)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
ObSqlString sql;
|
||||
int64_t affect_rows = 0;
|
||||
if (OB_FAIL(sql.assign_fmt("insert into %s select * from %s "
|
||||
" where task_id = %ld and partition_id != -1 and table_id != -1",
|
||||
share::OB_ALL_KV_TTL_TASK_HISTORY_TNAME,
|
||||
share::OB_ALL_KV_TTL_TASK_TNAME,
|
||||
task_id))) {
|
||||
LOG_WARN("sql assign fmt failed", K(ret));
|
||||
} else if (OB_FAIL(proxy.write(tenant_id, sql.ptr(), affect_rows))) {
|
||||
LOG_WARN("fail to execute sql", K(ret), K(sql), K(tenant_id));
|
||||
} else {
|
||||
LOG_INFO("success to execute sql", K(ret), K(tenant_id), K(sql), K(affect_rows));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ObTTLUtil::replace_ttl_task(uint64_t tenant_id,
|
||||
const char* tname,
|
||||
common::ObISQLClient& proxy,
|
||||
ObTTLStatus& task)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
ObSqlString sql;
|
||||
int64_t affect_rows = 0;
|
||||
|
||||
if (OB_FAIL(sql.assign_fmt("REPLACE INTO %s "
|
||||
"(gmt_create, gmt_modified, tenant_id, table_id, partition_id, "
|
||||
"task_id, task_start_time, task_update_time, trigger_type, status,"
|
||||
" ttl_del_cnt, max_version_del_cnt, scan_cnt, row_key, ret_code)"
|
||||
" VALUE "
|
||||
"(now(), now(), %ld, %ld, %ld,"
|
||||
" %ld, %ld, %ld, %ld, %ld, "
|
||||
" %ld, %ld, %ld,'%.*s', '%.*s')", // 12
|
||||
tname, // 0
|
||||
tenant_id, task.table_id_, task.partition_id_,
|
||||
task.task_id_, task.task_start_time_, task.task_update_time_, task.trigger_type_, task.status_,
|
||||
task.ttl_del_cnt_, task.max_version_del_cnt_,
|
||||
task.scan_cnt_, task.row_key_.length(), task.row_key_.ptr(),
|
||||
task.ret_code_.length(), task.ret_code_.ptr()))) {
|
||||
LOG_WARN("sql assign fmt failed", K(ret));
|
||||
} else if (OB_FAIL(proxy.write(tenant_id, sql.ptr(), affect_rows))) {
|
||||
LOG_WARN("fail to execute sql", K(ret), K(sql));
|
||||
} else {
|
||||
LOG_INFO("success to execute sql", K(ret), K(sql));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
} // end namespace rootserver
|
||||
} // end namespace oceanbase
|
@ -24,6 +24,17 @@ namespace oceanbase
|
||||
namespace common
|
||||
{
|
||||
|
||||
#define OB_TTL_RESPONSE_MASK (1 << 5)
|
||||
#define OB_TTL_STATUS_MASK (OB_TTL_RESPONSE_MASK - 1)
|
||||
|
||||
#define SET_TASK_PURE_STATUS(status, state) ((status) = ((state) & OB_TTL_STATUS_MASK) + ((status & OB_TTL_RESPONSE_MASK)))
|
||||
#define SET_TASK_RESPONSE(status, state) ((status) |= (((state) & 1) << 5))
|
||||
#define SET_TASK_STATUS(status, pure_status, is_responsed) { SET_TASK_PURE_STATUS(status, pure_status), SET_TASK_RESPONSE(status, is_responsed); }
|
||||
|
||||
#define EVAL_TASK_RESPONSE(status) (((status) & OB_TTL_RESPONSE_MASK) >> 5)
|
||||
#define EVAL_TASK_PURE_STATUS(status) (static_cast<ObTTLTaskStatus>((status) & OB_TTL_STATUS_MASK))
|
||||
|
||||
|
||||
enum TRIGGER_TYPE
|
||||
{
|
||||
PERIODIC_TRIGGER = 0,
|
||||
@ -106,6 +117,7 @@ typedef struct ObTTLStatus {
|
||||
K_(ttl_del_cnt),
|
||||
K_(max_version_del_cnt),
|
||||
K_(scan_cnt),
|
||||
K_(row_key),
|
||||
K_(ret_code));
|
||||
} ObTTLStatus;
|
||||
|
||||
@ -210,6 +222,11 @@ public:
|
||||
common::ObISQLClient& proxy,
|
||||
ObTTLStatus& task);
|
||||
|
||||
static int replace_ttl_task(uint64_t tenant_id,
|
||||
const char* tname,
|
||||
common::ObISQLClient& proxy,
|
||||
ObTTLStatus& task);
|
||||
|
||||
static int update_ttl_task(uint64_t tenant_id,
|
||||
const char* tname,
|
||||
common::ObISQLClient& proxy,
|
||||
@ -230,7 +247,8 @@ public:
|
||||
static int delete_ttl_task(uint64_t tenant_id,
|
||||
const char* tname,
|
||||
common::ObISQLClient& proxy,
|
||||
ObTTLStatusKey& key);
|
||||
ObTTLStatusKey& key,
|
||||
int64_t &affect_rows);
|
||||
|
||||
static int read_ttl_tasks(uint64_t tenant_id,
|
||||
const char* tname,
|
||||
@ -240,6 +258,8 @@ public:
|
||||
bool for_update = false,
|
||||
common::ObIAllocator *allocator = NULL);
|
||||
|
||||
static int remove_all_task_to_history_table(uint64_t tenant_id, uint64_t task_id, common::ObISQLClient& proxy);
|
||||
|
||||
static bool check_can_do_work();
|
||||
static bool check_can_process_tenant_tasks(uint64_t tenant_id);
|
||||
|
||||
|
@ -10369,12 +10369,9 @@ int ObPartitionService::internal_leader_active(const ObCbTask& active_task)
|
||||
const bool is_normal_pg = !(guard.get_partition_group()->get_pg_storage().is_restore());
|
||||
if ((OB_SYS_TENANT_ID != pkey.get_tenant_id()) && is_normal_pg) {
|
||||
(void)clog_mgr_->add_pg_archive_task(partition);
|
||||
observer::ObTTLManager::get_instance().on_leader_active(pkey);
|
||||
}
|
||||
}
|
||||
|
||||
if (OB_SUCC(ret)) {
|
||||
observer::ObTTLManager::get_instance().on_leader_active(partition);
|
||||
}
|
||||
}
|
||||
|
||||
if (OB_FAIL(ret) && OB_NOT_NULL(partition)) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user