IOPS in resource manager not depend on cgroup

This commit is contained in:
renju96
2023-08-22 06:40:37 +00:00
committed by ob-robot
parent 843a90817a
commit 538a4eca82
3 changed files with 10 additions and 10 deletions

View File

@ -1626,9 +1626,7 @@ void ObTenant::check_resource_manager_plan()
ObResourceColMappingRuleManager &col_rule_mgr = G_RES_MGR.get_col_mapping_rule_mgr(); ObResourceColMappingRuleManager &col_rule_mgr = G_RES_MGR.get_col_mapping_rule_mgr();
char data[OB_MAX_RESOURCE_PLAN_NAME_LENGTH]; char data[OB_MAX_RESOURCE_PLAN_NAME_LENGTH];
ObDataBuffer allocator(data, OB_MAX_RESOURCE_PLAN_NAME_LENGTH); ObDataBuffer allocator(data, OB_MAX_RESOURCE_PLAN_NAME_LENGTH);
if (!cgroup_ctrl_.is_valid()) { if (OB_SYS_TENANT_ID != id_ && OB_MAX_RESERVED_TENANT_ID >= id_) {
// The cgroup is not initialized successfully, no need to refresh the resource manager plan
} else if (OB_SYS_TENANT_ID != id_ && OB_MAX_RESERVED_TENANT_ID >= id_) {
// Except for system rental outside, internal tenants do not use resource plan for internal isolation // Except for system rental outside, internal tenants do not use resource plan for internal isolation
} else if (OB_FAIL(ObSchemaUtils::get_tenant_varchar_variable( } else if (OB_FAIL(ObSchemaUtils::get_tenant_varchar_variable(
id_, id_,

View File

@ -42,7 +42,7 @@ int ObMClock::init(const int64_t min_iops, const int64_t max_iops, const int64_t
if (OB_UNLIKELY(is_inited_)) { if (OB_UNLIKELY(is_inited_)) {
ret = OB_INIT_TWICE; ret = OB_INIT_TWICE;
LOG_WARN("init twice", K(ret), K(is_inited_)); LOG_WARN("init twice", K(ret), K(is_inited_));
} else if (OB_UNLIKELY(min_iops <= 0 || max_iops < min_iops || weight < 0)) { } else if (OB_UNLIKELY(min_iops < 0 || max_iops < min_iops || weight < 0)) {
ret = OB_INVALID_ARGUMENT; ret = OB_INVALID_ARGUMENT;
LOG_WARN("invalid argument", K(ret), K(min_iops), K(max_iops), K(weight)); LOG_WARN("invalid argument", K(ret), K(min_iops), K(max_iops), K(weight));
} else { } else {
@ -64,7 +64,7 @@ int ObMClock::update(const int64_t min_iops, const int64_t max_iops, const int64
if (OB_UNLIKELY(!is_inited_)) { if (OB_UNLIKELY(!is_inited_)) {
ret = OB_NOT_INIT; ret = OB_NOT_INIT;
LOG_WARN("not init", K(ret), K(is_inited_)); LOG_WARN("not init", K(ret), K(is_inited_));
} else if (OB_UNLIKELY(min_iops <= 0 || max_iops < min_iops || weight < 0)) { } else if (OB_UNLIKELY(min_iops < 0 || max_iops < min_iops || weight < 0)) {
ret = OB_INVALID_ARGUMENT; ret = OB_INVALID_ARGUMENT;
LOG_WARN("invalid argument", K(ret), K(min_iops), K(max_iops), K(weight)); LOG_WARN("invalid argument", K(ret), K(min_iops), K(max_iops), K(weight));
} else { } else {
@ -139,7 +139,7 @@ int ObMClock::dial_back_reservation_clock(const double iops_scale)
ret = OB_INVALID_ARGUMENT; ret = OB_INVALID_ARGUMENT;
LOG_WARN("invalid argument", K(ret), K(iops_scale)); LOG_WARN("invalid argument", K(ret), K(iops_scale));
} else { } else {
const int64_t delta_ns = 1000L * 1000L * 1000L / (reservation_clock_.iops_ * iops_scale); const int64_t delta_ns = reservation_clock_.iops_ == 0 ? 1000L * 1000L * 1000L / INT64_MAX : 1000L * 1000L * 1000L / (reservation_clock_.iops_ * iops_scale);
ATOMIC_SAF(&reservation_clock_.last_ns_, delta_ns); ATOMIC_SAF(&reservation_clock_.last_ns_, delta_ns);
} }
return ret; return ret;

View File

@ -1387,9 +1387,9 @@ void ObTenantDagWorker::resume()
int ObTenantDagWorker::set_dag_resource(const uint64_t group_id) int ObTenantDagWorker::set_dag_resource(const uint64_t group_id)
{ {
int ret = OB_SUCCESS; int ret = OB_SUCCESS;
if (nullptr == GCTX.cgroup_ctrl_ || OB_UNLIKELY(!GCTX.cgroup_ctrl_->is_valid())) { if (OB_ISNULL(GCTX.cgroup_ctrl_)) {
//invalid cgroup, cannot bind thread and control resource //cgroup not init, cannot bind thread and control resource
} else { } else {
uint64_t consumer_group_id = 0; uint64_t consumer_group_id = 0;
if (group_id != 0) { if (group_id != 0) {
//user level //user level
@ -1399,9 +1399,11 @@ int ObTenantDagWorker::set_dag_resource(const uint64_t group_id)
LOG_WARN("fail to get group id by function", K(ret), K(MTL_ID()), K(function_type_), K(consumer_group_id)); LOG_WARN("fail to get group id by function", K(ret), K(MTL_ID()), K(function_type_), K(consumer_group_id));
} }
if (OB_SUCC(ret) && consumer_group_id != group_id_) { if (OB_SUCC(ret) && consumer_group_id != group_id_) {
if (OB_FAIL(GCTX.cgroup_ctrl_->add_self_to_group(MTL_ID(), consumer_group_id))) { // for CPU isolation, depend on cgroup
if (GCTX.cgroup_ctrl_->is_valid() && OB_FAIL(GCTX.cgroup_ctrl_->add_self_to_group(MTL_ID(), consumer_group_id))) {
LOG_WARN("bind back thread to group failed", K(ret), K(GETTID()), K(MTL_ID()), K(group_id)); LOG_WARN("bind back thread to group failed", K(ret), K(GETTID()), K(MTL_ID()), K(group_id));
} else { } else {
// for IOPS isolation, only depend on consumer_group_id
ATOMIC_SET(&group_id_, consumer_group_id); ATOMIC_SET(&group_id_, consumer_group_id);
THIS_WORKER.set_group_id(static_cast<int32_t>(consumer_group_id)); THIS_WORKER.set_group_id(static_cast<int32_t>(consumer_group_id));
} }