[branch-2.1]Pick IO limit/workload group usage table (#39839)

This commit is contained in:
wangbo
2024-08-23 18:51:47 +08:00
committed by GitHub
parent e716658fba
commit 6ceb574aa0
37 changed files with 753 additions and 32 deletions

View File

@ -81,7 +81,10 @@ public enum SchemaTableType {
SCH_FILE_CACHE_STATISTICS("FILE_CACHE_STATISTICS", "FILE_CACHE_STATISTICS",
TSchemaTableType.SCH_FILE_CACHE_STATISTICS),
SCH_WORKLOAD_GROUP_PRIVILEGES("WORKLOAD_GROUP_PRIVILEGES",
"WORKLOAD_GROUP_PRIVILEGES", TSchemaTableType.SCH_WORKLOAD_GROUP_PRIVILEGES);
"WORKLOAD_GROUP_PRIVILEGES", TSchemaTableType.SCH_WORKLOAD_GROUP_PRIVILEGES),
SCH_WORKLOAD_GROUP_RESOURCE_USAGE("WORKLOAD_GROUP_RESOURCE_USAGE",
"WORKLOAD_GROUP_RESOURCE_USAGE", TSchemaTableType.SCH_WORKLOAD_GROUP_RESOURCE_USAGE);
private static final String dbName = "INFORMATION_SCHEMA";
private static SelectList fullSelectLists;

View File

@ -488,6 +488,8 @@ public class SchemaTable extends Table {
.column("SPILL_THRESHOLD_LOW_WATERMARK", ScalarType.createVarchar(256))
.column("SPILL_THRESHOLD_HIGH_WATERMARK", ScalarType.createVarchar(256))
.column("TAG", ScalarType.createVarchar(256))
.column("READ_BYTES_PER_SECOND", ScalarType.createType(PrimitiveType.BIGINT))
.column("REMOTE_READ_BYTES_PER_SECOND", ScalarType.createType(PrimitiveType.BIGINT))
.build()))
.put("processlist",
new SchemaTable(SystemIdGenerator.getNextId(), "processlist", TableType.SCHEMA,
@ -532,6 +534,16 @@ public class SchemaTable extends Table {
.column("IS_GRANTABLE", ScalarType.createVarchar(IS_GRANTABLE_LEN))
.build())
)
.put("workload_group_resource_usage",
new SchemaTable(SystemIdGenerator.getNextId(), "workload_group_resource_usage", TableType.SCHEMA,
builder().column("BE_ID", ScalarType.createType(PrimitiveType.BIGINT))
.column("WORKLOAD_GROUP_ID", ScalarType.createType(PrimitiveType.BIGINT))
.column("MEMORY_USAGE_BYTES", ScalarType.createType(PrimitiveType.BIGINT))
.column("CPU_USAGE_PERCENT", ScalarType.createType(PrimitiveType.DOUBLE))
.column("LOCAL_SCAN_BYTES_PER_SECOND", ScalarType.createType(PrimitiveType.BIGINT))
.column("REMOTE_SCAN_BYTES_PER_SECOND", ScalarType.createType(PrimitiveType.BIGINT))
.build())
)
.build();
private boolean fetchAllFe = false;

View File

@ -65,6 +65,7 @@ public class BackendPartitionedSchemaScanNode extends SchemaScanNode {
BEACKEND_ID_COLUMN_SET.add("backend_id");
BACKEND_TABLE.add("backend_active_tasks");
BACKEND_TABLE.add("workload_group_resource_usage");
BEACKEND_ID_COLUMN_SET.add("be_id");
BACKEND_TABLE.add("file_cache_statistics");

View File

@ -75,6 +75,10 @@ public class WorkloadGroup implements Writable, GsonPostProcessable {
public static final String TAG = "tag";
public static final String READ_BYTES_PER_SECOND = "read_bytes_per_second";
public static final String REMOTE_READ_BYTES_PER_SECOND = "remote_read_bytes_per_second";
// NOTE(wb): all property is not required, some properties default value is set in be
// default value is as followed
// cpu_share=1024, memory_limit=0%(0 means not limit), enable_memory_overcommit=true
@ -83,7 +87,7 @@ public class WorkloadGroup implements Writable, GsonPostProcessable {
.add(MAX_QUEUE_SIZE).add(QUEUE_TIMEOUT).add(CPU_HARD_LIMIT).add(SCAN_THREAD_NUM)
.add(MAX_REMOTE_SCAN_THREAD_NUM).add(MIN_REMOTE_SCAN_THREAD_NUM)
.add(SPILL_THRESHOLD_LOW_WATERMARK).add(SPILL_THRESHOLD_HIGH_WATERMARK)
.add(TAG).build();
.add(TAG).add(READ_BYTES_PER_SECOND).add(REMOTE_READ_BYTES_PER_SECOND).build();
public static final int SPILL_LOW_WATERMARK_DEFAULT_VALUE = 50;
public static final int SPILL_HIGH_WATERMARK_DEFAULT_VALUE = 80;
@ -394,6 +398,35 @@ public class WorkloadGroup implements Writable, GsonPostProcessable {
}
}
if (properties.containsKey(READ_BYTES_PER_SECOND)) {
String readBytesVal = properties.get(READ_BYTES_PER_SECOND);
try {
long longVal = Long.parseLong(readBytesVal);
boolean isValidValue = longVal == -1 || longVal > 0;
if (!isValidValue) {
throw new NumberFormatException();
}
} catch (NumberFormatException e) {
throw new DdlException(
READ_BYTES_PER_SECOND + " should be -1 or an integer value bigger than 0, but input value is "
+ readBytesVal);
}
}
if (properties.containsKey(REMOTE_READ_BYTES_PER_SECOND)) {
String readBytesVal = properties.get(REMOTE_READ_BYTES_PER_SECOND);
try {
long longVal = Long.parseLong(readBytesVal);
boolean isValidValue = longVal == -1 || longVal > 0;
if (!isValidValue) {
throw new NumberFormatException();
}
} catch (NumberFormatException e) {
throw new DdlException(REMOTE_READ_BYTES_PER_SECOND
+ " should be -1 or an integer value bigger than 0, but input value is " + readBytesVal);
}
}
}
public long getId() {
@ -484,6 +517,13 @@ public class WorkloadGroup implements Writable, GsonPostProcessable {
} else {
row.add(val);
}
} else if (READ_BYTES_PER_SECOND.equals(key) || REMOTE_READ_BYTES_PER_SECOND.equals(key)) {
String val = properties.get(key);
if (StringUtils.isEmpty(val)) {
row.add("-1");
} else {
row.add(val);
}
} else {
row.add(properties.get(key));
}
@ -571,6 +611,16 @@ public class WorkloadGroup implements Writable, GsonPostProcessable {
tWorkloadGroupInfo.setTag(tagStr);
}
String readBytesPerSecStr = properties.get(READ_BYTES_PER_SECOND);
if (readBytesPerSecStr != null) {
tWorkloadGroupInfo.setReadBytesPerSecond(Long.valueOf(readBytesPerSecStr));
}
String remoteReadBytesPerSecStr = properties.get(REMOTE_READ_BYTES_PER_SECOND);
if (remoteReadBytesPerSecStr != null) {
tWorkloadGroupInfo.setRemoteReadBytesPerSecond(Long.valueOf(remoteReadBytesPerSecStr));
}
TopicInfo topicInfo = new TopicInfo();
topicInfo.setWorkloadGroupInfo(tWorkloadGroupInfo);
return topicInfo;

View File

@ -80,6 +80,7 @@ public class WorkloadGroupMgr extends MasterDaemon implements Writable, GsonPost
.add(WorkloadGroup.MIN_REMOTE_SCAN_THREAD_NUM)
.add(WorkloadGroup.SPILL_THRESHOLD_LOW_WATERMARK).add(WorkloadGroup.SPILL_THRESHOLD_HIGH_WATERMARK)
.add(WorkloadGroup.TAG)
.add(WorkloadGroup.READ_BYTES_PER_SECOND).add(WorkloadGroup.REMOTE_READ_BYTES_PER_SECOND)
.add(QueryQueue.RUNNING_QUERY_NUM).add(QueryQueue.WAITING_QUERY_NUM)
.build();

View File

@ -491,8 +491,11 @@ public class MetadataGenerator {
trow.addToColumnValue(new TCell().setStringVal(rGroupsInfo.get(12))); // spill low watermark
trow.addToColumnValue(new TCell().setStringVal(rGroupsInfo.get(13))); // spill high watermark
trow.addToColumnValue(new TCell().setStringVal(rGroupsInfo.get(14))); // tag
trow.addToColumnValue(new TCell().setStringVal(rGroupsInfo.get(15))); // running query num
trow.addToColumnValue(new TCell().setStringVal(rGroupsInfo.get(16))); // waiting query num
trow.addToColumnValue(new TCell().setLongVal(Long.valueOf(rGroupsInfo.get(15)))); // read bytes per second
trow.addToColumnValue(
new TCell().setLongVal(Long.valueOf(rGroupsInfo.get(16)))); // remote read bytes per second
trow.addToColumnValue(new TCell().setStringVal(rGroupsInfo.get(17))); // running query num
trow.addToColumnValue(new TCell().setStringVal(rGroupsInfo.get(18))); // waiting query num
dataBatch.add(trow);
}