This commit is contained in:
@ -44,6 +44,7 @@
|
||||
#include "exec/schema_scanner/schema_user_privileges_scanner.h"
|
||||
#include "exec/schema_scanner/schema_variables_scanner.h"
|
||||
#include "exec/schema_scanner/schema_views_scanner.h"
|
||||
#include "exec/schema_scanner/schema_workload_groups_scanner.h"
|
||||
#include "olap/hll.h"
|
||||
#include "runtime/define_primitive_type.h"
|
||||
#include "util/string_util.h"
|
||||
@ -155,6 +156,8 @@ std::unique_ptr<SchemaScanner> SchemaScanner::create(TSchemaTableType::type type
|
||||
return SchemaBackendActiveTasksScanner::create_unique();
|
||||
case TSchemaTableType::SCH_ACTIVE_QUERIES:
|
||||
return SchemaActiveQueriesScanner::create_unique();
|
||||
case TSchemaTableType::SCH_WORKLOAD_GROUPS:
|
||||
return SchemaWorkloadGroupsScanner::create_unique();
|
||||
default:
|
||||
return SchemaDummyScanner::create_unique();
|
||||
break;
|
||||
|
||||
@ -50,20 +50,17 @@ Status SchemaActiveQueriesScanner::start(RuntimeState* state) {
|
||||
Status SchemaActiveQueriesScanner::_get_active_queries_block_from_fe() {
|
||||
TNetworkAddress master_addr = ExecEnv::GetInstance()->master_info()->network_address;
|
||||
|
||||
TQueriesMetadataParams tqueries_meta_params;
|
||||
tqueries_meta_params.__set_relay_to_other_fe(true);
|
||||
|
||||
TMetadataTableRequestParams metadata_table_params;
|
||||
metadata_table_params.__set_metadata_type(TMetadataType::QUERIES);
|
||||
metadata_table_params.__set_queries_metadata_params(tqueries_meta_params);
|
||||
TSchemaTableRequestParams schema_table_params;
|
||||
for (int i = 0; i < _s_tbls_columns.size(); i++) {
|
||||
metadata_table_params.__isset.columns_name = true;
|
||||
metadata_table_params.columns_name.emplace_back(_s_tbls_columns[i].name);
|
||||
schema_table_params.__isset.columns_name = true;
|
||||
schema_table_params.columns_name.emplace_back(_s_tbls_columns[i].name);
|
||||
}
|
||||
schema_table_params.replay_to_other_fe = true;
|
||||
schema_table_params.__isset.replay_to_other_fe = true;
|
||||
|
||||
TFetchSchemaTableDataRequest request;
|
||||
request.__set_schema_table_name(TSchemaTableName::SCHEMA_TABLE);
|
||||
request.__set_metada_table_params(metadata_table_params);
|
||||
request.__set_schema_table_name(TSchemaTableName::ACTIVE_QUERIES);
|
||||
request.__set_schema_table_params(schema_table_params);
|
||||
|
||||
TFetchSchemaTableDataResult result;
|
||||
|
||||
|
||||
166
be/src/exec/schema_scanner/schema_workload_groups_scanner.cpp
Normal file
166
be/src/exec/schema_scanner/schema_workload_groups_scanner.cpp
Normal file
@ -0,0 +1,166 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#include "exec/schema_scanner/schema_workload_groups_scanner.h"
|
||||
|
||||
#include "runtime/client_cache.h"
|
||||
#include "runtime/exec_env.h"
|
||||
#include "runtime/runtime_state.h"
|
||||
#include "util/thrift_rpc_helper.h"
|
||||
#include "vec/common/string_ref.h"
|
||||
#include "vec/core/block.h"
|
||||
#include "vec/data_types/data_type_factory.hpp"
|
||||
|
||||
namespace doris {
|
||||
std::vector<SchemaScanner::ColumnDesc> SchemaWorkloadGroupsScanner::_s_tbls_columns = {
|
||||
{"ID", TYPE_BIGINT, sizeof(int64_t), true},
|
||||
{"NAME", TYPE_VARCHAR, sizeof(StringRef), true},
|
||||
{"CPU_SHARE", TYPE_BIGINT, sizeof(int64_t), true},
|
||||
{"MEMORY_LIMIT", TYPE_VARCHAR, sizeof(StringRef), true},
|
||||
{"ENABLE_MEMORY_OVERCOMMIT", TYPE_VARCHAR, sizeof(StringRef), true},
|
||||
{"MAX_CONCURRENCY", TYPE_BIGINT, sizeof(int64_t), true},
|
||||
{"MAX_QUEUE_SIZE", TYPE_BIGINT, sizeof(int64_t), true},
|
||||
{"QUEUE_TIMEOUT", TYPE_BIGINT, sizeof(int64_t), true},
|
||||
{"CPU_HARD_LIMIT", TYPE_STRING, sizeof(StringRef), true},
|
||||
{"SCAN_THREAD_NUM", TYPE_BIGINT, sizeof(int64_t), true},
|
||||
{"MAX_REMOTE_SCAN_THREAD_NUM", TYPE_BIGINT, sizeof(int64_t), true},
|
||||
{"MIN_REMOTE_SCAN_THREAD_NUM", TYPE_BIGINT, sizeof(int64_t), true}};
|
||||
|
||||
SchemaWorkloadGroupsScanner::SchemaWorkloadGroupsScanner()
|
||||
: SchemaScanner(_s_tbls_columns, TSchemaTableType::SCH_WORKLOAD_GROUPS) {}
|
||||
|
||||
SchemaWorkloadGroupsScanner::~SchemaWorkloadGroupsScanner() {}
|
||||
|
||||
Status SchemaWorkloadGroupsScanner::start(RuntimeState* state) {
|
||||
_block_rows_limit = state->batch_size();
|
||||
_rpc_timeout = state->execution_timeout() * 1000;
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status SchemaWorkloadGroupsScanner::_get_workload_groups_block_from_fe() {
|
||||
TNetworkAddress master_addr = ExecEnv::GetInstance()->master_info()->network_address;
|
||||
|
||||
TSchemaTableRequestParams schema_table_request_params;
|
||||
for (int i = 0; i < _s_tbls_columns.size(); i++) {
|
||||
schema_table_request_params.__isset.columns_name = true;
|
||||
schema_table_request_params.columns_name.emplace_back(_s_tbls_columns[i].name);
|
||||
}
|
||||
schema_table_request_params.__set_current_user_ident(*_param->common_param->current_user_ident);
|
||||
|
||||
TFetchSchemaTableDataRequest request;
|
||||
request.__set_schema_table_name(TSchemaTableName::WORKLOAD_GROUPS);
|
||||
request.__set_schema_table_params(schema_table_request_params);
|
||||
|
||||
TFetchSchemaTableDataResult result;
|
||||
|
||||
RETURN_IF_ERROR(ThriftRpcHelper::rpc<FrontendServiceClient>(
|
||||
master_addr.hostname, master_addr.port,
|
||||
[&request, &result](FrontendServiceConnection& client) {
|
||||
client->fetchSchemaTableData(result, request);
|
||||
},
|
||||
_rpc_timeout));
|
||||
|
||||
Status status(Status::create(result.status));
|
||||
if (!status.ok()) {
|
||||
LOG(WARNING) << "fetch workload groups from FE failed, errmsg=" << status;
|
||||
return status;
|
||||
}
|
||||
std::vector<TRow> result_data = result.data_batch;
|
||||
|
||||
_workload_groups_block = vectorized::Block::create_unique();
|
||||
for (int i = 0; i < _s_tbls_columns.size(); ++i) {
|
||||
TypeDescriptor descriptor(_s_tbls_columns[i].type);
|
||||
auto data_type = vectorized::DataTypeFactory::instance().create_data_type(descriptor, true);
|
||||
_workload_groups_block->insert(vectorized::ColumnWithTypeAndName(
|
||||
data_type->create_column(), data_type, _s_tbls_columns[i].name));
|
||||
}
|
||||
|
||||
_workload_groups_block->reserve(_block_rows_limit);
|
||||
|
||||
if (result_data.size() > 0) {
|
||||
int col_size = result_data[0].column_value.size();
|
||||
if (col_size != _s_tbls_columns.size()) {
|
||||
return Status::InternalError<false>(
|
||||
"workload groups schema is not match for FE and BE");
|
||||
}
|
||||
}
|
||||
|
||||
// todo(wb) reuse this callback function
|
||||
auto insert_string_value = [&](int col_index, std::string str_val, vectorized::Block* block) {
|
||||
vectorized::MutableColumnPtr mutable_col_ptr;
|
||||
mutable_col_ptr = std::move(*block->get_by_position(col_index).column).assume_mutable();
|
||||
auto* nullable_column =
|
||||
reinterpret_cast<vectorized::ColumnNullable*>(mutable_col_ptr.get());
|
||||
vectorized::IColumn* col_ptr = &nullable_column->get_nested_column();
|
||||
reinterpret_cast<vectorized::ColumnString*>(col_ptr)->insert_data(str_val.data(),
|
||||
str_val.size());
|
||||
nullable_column->get_null_map_data().emplace_back(0);
|
||||
};
|
||||
auto insert_int_value = [&](int col_index, int64_t int_val, vectorized::Block* block) {
|
||||
vectorized::MutableColumnPtr mutable_col_ptr;
|
||||
mutable_col_ptr = std::move(*block->get_by_position(col_index).column).assume_mutable();
|
||||
auto* nullable_column =
|
||||
reinterpret_cast<vectorized::ColumnNullable*>(mutable_col_ptr.get());
|
||||
vectorized::IColumn* col_ptr = &nullable_column->get_nested_column();
|
||||
reinterpret_cast<vectorized::ColumnVector<vectorized::Int64>*>(col_ptr)->insert_value(
|
||||
int_val);
|
||||
nullable_column->get_null_map_data().emplace_back(0);
|
||||
};
|
||||
|
||||
for (int i = 0; i < result_data.size(); i++) {
|
||||
TRow row = result_data[i];
|
||||
|
||||
for (int j = 0; j < _s_tbls_columns.size(); j++) {
|
||||
if (_s_tbls_columns[j].type == TYPE_BIGINT) {
|
||||
insert_int_value(j, row.column_value[j].longVal, _workload_groups_block.get());
|
||||
} else {
|
||||
insert_string_value(j, row.column_value[j].stringVal, _workload_groups_block.get());
|
||||
}
|
||||
}
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status SchemaWorkloadGroupsScanner::get_next_block(vectorized::Block* block, bool* eos) {
|
||||
if (!_is_init) {
|
||||
return Status::InternalError("Used before initialized.");
|
||||
}
|
||||
|
||||
if (nullptr == block || nullptr == eos) {
|
||||
return Status::InternalError("input pointer is nullptr.");
|
||||
}
|
||||
|
||||
if (_workload_groups_block == nullptr) {
|
||||
RETURN_IF_ERROR(_get_workload_groups_block_from_fe());
|
||||
_total_rows = _workload_groups_block->rows();
|
||||
}
|
||||
|
||||
if (_row_idx == _total_rows) {
|
||||
*eos = true;
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
int current_batch_rows = std::min(_block_rows_limit, _total_rows - _row_idx);
|
||||
vectorized::MutableBlock mblock = vectorized::MutableBlock::build_mutable_block(block);
|
||||
mblock.add_rows(_workload_groups_block.get(), _row_idx, current_batch_rows);
|
||||
_row_idx += current_batch_rows;
|
||||
|
||||
*eos = _row_idx == _total_rows;
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
} // namespace doris
|
||||
52
be/src/exec/schema_scanner/schema_workload_groups_scanner.h
Normal file
52
be/src/exec/schema_scanner/schema_workload_groups_scanner.h
Normal file
@ -0,0 +1,52 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "common/status.h"
|
||||
#include "exec/schema_scanner.h"
|
||||
|
||||
namespace doris {
|
||||
class RuntimeState;
|
||||
namespace vectorized {
|
||||
class Block;
|
||||
} // namespace vectorized
|
||||
|
||||
class SchemaWorkloadGroupsScanner : public SchemaScanner {
|
||||
ENABLE_FACTORY_CREATOR(SchemaWorkloadGroupsScanner);
|
||||
|
||||
public:
|
||||
SchemaWorkloadGroupsScanner();
|
||||
~SchemaWorkloadGroupsScanner() override;
|
||||
|
||||
Status start(RuntimeState* state) override;
|
||||
Status get_next_block(vectorized::Block* block, bool* eos) override;
|
||||
|
||||
static std::vector<SchemaScanner::ColumnDesc> _s_tbls_columns;
|
||||
|
||||
private:
|
||||
Status _get_workload_groups_block_from_fe();
|
||||
|
||||
int _block_rows_limit = 4096;
|
||||
int _row_idx = 0;
|
||||
int _total_rows = 0;
|
||||
std::unique_ptr<vectorized::Block> _workload_groups_block = nullptr;
|
||||
int _rpc_timeout = 3000;
|
||||
};
|
||||
}; // namespace doris
|
||||
@ -235,9 +235,6 @@ Status VMetaScanner::_fetch_metadata(const TMetaScanRange& meta_scan_range) {
|
||||
case TMetadataType::FRONTENDS_DISKS:
|
||||
RETURN_IF_ERROR(_build_frontends_disks_metadata_request(meta_scan_range, &request));
|
||||
break;
|
||||
case TMetadataType::WORKLOAD_GROUPS:
|
||||
RETURN_IF_ERROR(_build_workload_groups_metadata_request(meta_scan_range, &request));
|
||||
break;
|
||||
case TMetadataType::WORKLOAD_SCHED_POLICY:
|
||||
RETURN_IF_ERROR(_build_workload_sched_policy_metadata_request(meta_scan_range, &request));
|
||||
break;
|
||||
@ -253,9 +250,6 @@ Status VMetaScanner::_fetch_metadata(const TMetaScanRange& meta_scan_range) {
|
||||
case TMetadataType::TASKS:
|
||||
RETURN_IF_ERROR(_build_tasks_metadata_request(meta_scan_range, &request));
|
||||
break;
|
||||
case TMetadataType::QUERIES:
|
||||
RETURN_IF_ERROR(_build_queries_metadata_request(meta_scan_range, &request));
|
||||
break;
|
||||
default:
|
||||
_meta_eos = true;
|
||||
return Status::OK();
|
||||
@ -365,23 +359,6 @@ Status VMetaScanner::_build_frontends_disks_metadata_request(
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status VMetaScanner::_build_workload_groups_metadata_request(
|
||||
const TMetaScanRange& meta_scan_range, TFetchSchemaTableDataRequest* request) {
|
||||
VLOG_CRITICAL << "VMetaScanner::_build_workload_groups_metadata_request";
|
||||
|
||||
// create request
|
||||
request->__set_cluster_name("");
|
||||
request->__set_schema_table_name(TSchemaTableName::METADATA_TABLE);
|
||||
|
||||
// create TMetadataTableRequestParams
|
||||
TMetadataTableRequestParams metadata_table_params;
|
||||
metadata_table_params.__set_metadata_type(TMetadataType::WORKLOAD_GROUPS);
|
||||
metadata_table_params.__set_current_user_ident(_user_identity);
|
||||
|
||||
request->__set_metada_table_params(metadata_table_params);
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status VMetaScanner::_build_workload_sched_policy_metadata_request(
|
||||
const TMetaScanRange& meta_scan_range, TFetchSchemaTableDataRequest* request) {
|
||||
VLOG_CRITICAL << "VMetaScanner::_build_workload_sched_policy_metadata_request";
|
||||
@ -473,25 +450,6 @@ Status VMetaScanner::_build_tasks_metadata_request(const TMetaScanRange& meta_sc
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status VMetaScanner::_build_queries_metadata_request(const TMetaScanRange& meta_scan_range,
|
||||
TFetchSchemaTableDataRequest* request) {
|
||||
VLOG_CRITICAL << "VMetaScanner::_build_queries_metadata_request";
|
||||
if (!meta_scan_range.__isset.queries_params) {
|
||||
return Status::InternalError("Can not find TQueriesMetadataParams from meta_scan_range.");
|
||||
}
|
||||
// create request
|
||||
request->__set_cluster_name("");
|
||||
request->__set_schema_table_name(TSchemaTableName::METADATA_TABLE);
|
||||
|
||||
// create TMetadataTableRequestParams
|
||||
TMetadataTableRequestParams metadata_table_params;
|
||||
metadata_table_params.__set_metadata_type(TMetadataType::QUERIES);
|
||||
metadata_table_params.__set_queries_metadata_params(meta_scan_range.queries_params);
|
||||
|
||||
request->__set_metada_table_params(metadata_table_params);
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status VMetaScanner::close(RuntimeState* state) {
|
||||
VLOG_CRITICAL << "VMetaScanner::close";
|
||||
RETURN_IF_ERROR(VScanner::close(state));
|
||||
|
||||
@ -71,7 +71,8 @@ public enum SchemaTableType {
|
||||
SCH_METADATA_NAME_IDS("METADATA_NAME_IDS", "METADATA_NAME_IDS", TSchemaTableType.SCH_METADATA_NAME_IDS),
|
||||
SCH_PROFILING("PROFILING", "PROFILING", TSchemaTableType.SCH_PROFILING),
|
||||
SCH_BACKEND_ACTIVE_TASKS("BACKEND_ACTIVE_TASKS", "BACKEND_ACTIVE_TASKS", TSchemaTableType.SCH_BACKEND_ACTIVE_TASKS),
|
||||
SCH_ACTIVE_QUERIES("ACTIVE_QUERIES", "ACTIVE_QUERIES", TSchemaTableType.SCH_ACTIVE_QUERIES);
|
||||
SCH_ACTIVE_QUERIES("ACTIVE_QUERIES", "ACTIVE_QUERIES", TSchemaTableType.SCH_ACTIVE_QUERIES),
|
||||
SCH_WORKLOAD_GROUPS("WORKLOAD_GROUPS", "WORKLOAD_GROUPS", TSchemaTableType.SCH_WORKLOAD_GROUPS);
|
||||
private static final String dbName = "INFORMATION_SCHEMA";
|
||||
private static SelectList fullSelectLists;
|
||||
|
||||
|
||||
@ -31,7 +31,6 @@ import org.apache.doris.nereids.trees.expressions.functions.table.MvInfos;
|
||||
import org.apache.doris.nereids.trees.expressions.functions.table.Numbers;
|
||||
import org.apache.doris.nereids.trees.expressions.functions.table.S3;
|
||||
import org.apache.doris.nereids.trees.expressions.functions.table.Tasks;
|
||||
import org.apache.doris.nereids.trees.expressions.functions.table.WorkloadGroups;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
|
||||
@ -56,8 +55,7 @@ public class BuiltinTableValuedFunctions implements FunctionHelper {
|
||||
tableValued(S3.class, "s3"),
|
||||
tableValued(MvInfos.class, "mv_infos"),
|
||||
tableValued(Jobs.class, "jobs"),
|
||||
tableValued(Tasks.class, "tasks"),
|
||||
tableValued(WorkloadGroups.class, "workload_groups")
|
||||
tableValued(Tasks.class, "tasks")
|
||||
);
|
||||
|
||||
public static final BuiltinTableValuedFunctions INSTANCE = new BuiltinTableValuedFunctions();
|
||||
|
||||
@ -467,6 +467,20 @@ public class SchemaTable extends Table {
|
||||
.column("FRONTEND_INSTANCE", ScalarType.createVarchar(256))
|
||||
.column("SQL", ScalarType.createStringType())
|
||||
.build()))
|
||||
.put("workload_groups", new SchemaTable(SystemIdGenerator.getNextId(), "workload_groups", TableType.SCHEMA,
|
||||
builder().column("ID", ScalarType.createType(PrimitiveType.BIGINT))
|
||||
.column("NAME", ScalarType.createVarchar(256))
|
||||
.column("CPU_SHARE", ScalarType.createType(PrimitiveType.BIGINT))
|
||||
.column("MEMORY_LIMIT", ScalarType.createVarchar(256))
|
||||
.column("ENABLE_MEMORY_OVERCOMMIT", ScalarType.createVarchar(256))
|
||||
.column("MAX_CONCURRENCY", ScalarType.createType(PrimitiveType.BIGINT))
|
||||
.column("MAX_QUEUE_SIZE", ScalarType.createType(PrimitiveType.BIGINT))
|
||||
.column("QUEUE_TIMEOUT", ScalarType.createType(PrimitiveType.BIGINT))
|
||||
.column("CPU_HARD_LIMIT", ScalarType.createStringType())
|
||||
.column("SCAN_THREAD_NUM", ScalarType.createType(PrimitiveType.BIGINT))
|
||||
.column("MAX_REMOTE_SCAN_THREAD_NUM", ScalarType.createType(PrimitiveType.BIGINT))
|
||||
.column("MIN_REMOTE_SCAN_THREAD_NUM", ScalarType.createType(PrimitiveType.BIGINT))
|
||||
.build()))
|
||||
.build();
|
||||
|
||||
protected SchemaTable(long id, String name, TableType type, List<Column> baseSchema) {
|
||||
|
||||
@ -1,56 +0,0 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package org.apache.doris.nereids.trees.expressions.functions.table;
|
||||
|
||||
import org.apache.doris.catalog.FunctionSignature;
|
||||
import org.apache.doris.nereids.exceptions.AnalysisException;
|
||||
import org.apache.doris.nereids.trees.expressions.Properties;
|
||||
import org.apache.doris.nereids.trees.expressions.visitor.ExpressionVisitor;
|
||||
import org.apache.doris.nereids.types.coercion.AnyDataType;
|
||||
import org.apache.doris.tablefunction.TableValuedFunctionIf;
|
||||
import org.apache.doris.tablefunction.WorkloadGroupsTableValuedFunction;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/** workload_groups */
|
||||
public class WorkloadGroups extends TableValuedFunction {
|
||||
public WorkloadGroups(Properties properties) {
|
||||
super("workload_groups", properties);
|
||||
}
|
||||
|
||||
@Override
|
||||
public FunctionSignature customSignature() {
|
||||
return FunctionSignature.of(AnyDataType.INSTANCE_WITHOUT_INDEX, getArgumentsTypes());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TableValuedFunctionIf toCatalogFunction() {
|
||||
try {
|
||||
Map<String, String> arguments = getTVFProperties().getMap();
|
||||
return new WorkloadGroupsTableValuedFunction(arguments);
|
||||
} catch (Throwable t) {
|
||||
throw new AnalysisException("Can not build WorkloadGroupsTableValuedFunction by "
|
||||
+ this + ": " + t.getMessage(), t);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public <R, C> R accept(ExpressionVisitor<R, C> visitor, C context) {
|
||||
return visitor.visitWorkloadGroups(this, context);
|
||||
}
|
||||
}
|
||||
@ -32,7 +32,6 @@ import org.apache.doris.nereids.trees.expressions.functions.table.Numbers;
|
||||
import org.apache.doris.nereids.trees.expressions.functions.table.S3;
|
||||
import org.apache.doris.nereids.trees.expressions.functions.table.TableValuedFunction;
|
||||
import org.apache.doris.nereids.trees.expressions.functions.table.Tasks;
|
||||
import org.apache.doris.nereids.trees.expressions.functions.table.WorkloadGroups;
|
||||
|
||||
/** TableValuedFunctionVisitor */
|
||||
public interface TableValuedFunctionVisitor<R, C> {
|
||||
@ -93,8 +92,4 @@ public interface TableValuedFunctionVisitor<R, C> {
|
||||
default R visitS3(S3 s3, C context) {
|
||||
return visitTableValuedFunction(s3, context);
|
||||
}
|
||||
|
||||
default R visitWorkloadGroups(WorkloadGroups workloadGroups, C context) {
|
||||
return visitTableValuedFunction(workloadGroups, context);
|
||||
}
|
||||
}
|
||||
|
||||
@ -209,6 +209,7 @@ import org.apache.doris.thrift.TRestoreSnapshotRequest;
|
||||
import org.apache.doris.thrift.TRestoreSnapshotResult;
|
||||
import org.apache.doris.thrift.TRollbackTxnRequest;
|
||||
import org.apache.doris.thrift.TRollbackTxnResult;
|
||||
import org.apache.doris.thrift.TSchemaTableName;
|
||||
import org.apache.doris.thrift.TShowProcessListRequest;
|
||||
import org.apache.doris.thrift.TShowProcessListResult;
|
||||
import org.apache.doris.thrift.TShowVariableRequest;
|
||||
@ -2289,15 +2290,16 @@ public class FrontendServiceImpl implements FrontendService.Iface {
|
||||
|
||||
@Override
|
||||
public TFetchSchemaTableDataResult fetchSchemaTableData(TFetchSchemaTableDataRequest request) throws TException {
|
||||
switch (request.getSchemaTableName()) {
|
||||
case METADATA_TABLE:
|
||||
return MetadataGenerator.getMetadataTable(request);
|
||||
case SCHEMA_TABLE:
|
||||
return MetadataGenerator.getSchemaTableData(request);
|
||||
default:
|
||||
break;
|
||||
if (!request.isSetSchemaTableName()) {
|
||||
return MetadataGenerator.errorResult("Fetch schema table name is not set");
|
||||
}
|
||||
// tvf queries
|
||||
if (request.getSchemaTableName() == TSchemaTableName.METADATA_TABLE) {
|
||||
return MetadataGenerator.getMetadataTable(request);
|
||||
} else {
|
||||
// database information_schema's tables
|
||||
return MetadataGenerator.getSchemaTableData(request);
|
||||
}
|
||||
return MetadataGenerator.errorResult("Fetch schema table name is not set");
|
||||
}
|
||||
|
||||
private TNetworkAddress getClientAddr() {
|
||||
|
||||
@ -1,91 +0,0 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package org.apache.doris.tablefunction;
|
||||
|
||||
import org.apache.doris.catalog.Column;
|
||||
import org.apache.doris.catalog.PrimitiveType;
|
||||
import org.apache.doris.catalog.ScalarType;
|
||||
import org.apache.doris.common.AnalysisException;
|
||||
import org.apache.doris.thrift.TMetaScanRange;
|
||||
import org.apache.doris.thrift.TMetadataType;
|
||||
import org.apache.doris.thrift.TQueriesMetadataParams;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
public class ActiveQueriesTableValuedFunction extends MetadataTableValuedFunction {
|
||||
public static final String NAME = "active_queries";
|
||||
|
||||
private static final ImmutableList<Column> SCHEMA = ImmutableList.of(
|
||||
new Column("QueryId", ScalarType.createStringType()),
|
||||
new Column("StartTime", ScalarType.createStringType()),
|
||||
new Column("QueryTimeMs", PrimitiveType.BIGINT),
|
||||
new Column("WorkloadGroupId", PrimitiveType.BIGINT),
|
||||
new Column("Database", ScalarType.createStringType()),
|
||||
new Column("FrontendInstance", ScalarType.createStringType()),
|
||||
new Column("Sql", ScalarType.createStringType()));
|
||||
|
||||
private static final ImmutableMap<String, Integer> COLUMN_TO_INDEX;
|
||||
|
||||
static {
|
||||
ImmutableMap.Builder<String, Integer> builder = new ImmutableMap.Builder();
|
||||
for (int i = 0; i < SCHEMA.size(); i++) {
|
||||
builder.put(SCHEMA.get(i).getName().toLowerCase(), i);
|
||||
}
|
||||
COLUMN_TO_INDEX = builder.build();
|
||||
}
|
||||
|
||||
public static Integer getColumnIndexFromColumnName(String columnName) {
|
||||
return COLUMN_TO_INDEX.get(columnName.toLowerCase());
|
||||
}
|
||||
|
||||
public ActiveQueriesTableValuedFunction(Map<String, String> params) throws AnalysisException {
|
||||
if (params.size() != 0) {
|
||||
throw new AnalysisException("ActiveQueries table-valued-function does not support any params");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public TMetadataType getMetadataType() {
|
||||
return TMetadataType.QUERIES;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TMetaScanRange getMetaScanRange() {
|
||||
TMetaScanRange metaScanRange = new TMetaScanRange();
|
||||
metaScanRange.setMetadataType(TMetadataType.QUERIES);
|
||||
TQueriesMetadataParams queriesMetadataParams = new TQueriesMetadataParams();
|
||||
queriesMetadataParams.setClusterName("");
|
||||
queriesMetadataParams.setRelayToOtherFe(true);
|
||||
metaScanRange.setQueriesParams(queriesMetadataParams);
|
||||
return metaScanRange;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getTableName() {
|
||||
return "ActiveQueriesTableValuedFunction";
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Column> getTableColumns() throws AnalysisException {
|
||||
return SCHEMA;
|
||||
}
|
||||
}
|
||||
@ -57,9 +57,9 @@ import org.apache.doris.thrift.TMetadataTableRequestParams;
|
||||
import org.apache.doris.thrift.TMetadataType;
|
||||
import org.apache.doris.thrift.TNetworkAddress;
|
||||
import org.apache.doris.thrift.TPipelineWorkloadGroup;
|
||||
import org.apache.doris.thrift.TQueriesMetadataParams;
|
||||
import org.apache.doris.thrift.TQueryStatistics;
|
||||
import org.apache.doris.thrift.TRow;
|
||||
import org.apache.doris.thrift.TSchemaTableRequestParams;
|
||||
import org.apache.doris.thrift.TStatus;
|
||||
import org.apache.doris.thrift.TStatusCode;
|
||||
import org.apache.doris.thrift.TTasksMetadataParams;
|
||||
@ -100,12 +100,35 @@ public class MetadataGenerator {
|
||||
|
||||
private static final ImmutableMap<String, Integer> ACTIVE_QUERIES_COLUMN_TO_INDEX;
|
||||
|
||||
|
||||
private static final ImmutableList<Column> WORKLOAD_GROUPS_SCHEMA = ImmutableList.of(
|
||||
new Column("ID", ScalarType.BIGINT),
|
||||
new Column("NAME", ScalarType.createStringType()),
|
||||
new Column("CPU_SHARE", PrimitiveType.BIGINT),
|
||||
new Column("MEMORY_LIMIT", ScalarType.createStringType()),
|
||||
new Column("ENABLE_MEMORY_OVERCOMMIT", ScalarType.createStringType()),
|
||||
new Column("MAX_CONCURRENCY", PrimitiveType.BIGINT),
|
||||
new Column("MAX_QUEUE_SIZE", PrimitiveType.BIGINT),
|
||||
new Column("QUEUE_TIMEOUT", PrimitiveType.BIGINT),
|
||||
new Column("CPU_HARD_LIMIT", PrimitiveType.BIGINT),
|
||||
new Column("SCAN_THREAD_NUM", PrimitiveType.BIGINT),
|
||||
new Column("MAX_REMOTE_SCAN_THREAD_NUM", PrimitiveType.BIGINT),
|
||||
new Column("MIN_REMOTE_SCAN_THREAD_NUM", PrimitiveType.BIGINT));
|
||||
|
||||
private static final ImmutableMap<String, Integer> WORKLOAD_GROUPS_COLUMN_TO_INDEX;
|
||||
|
||||
static {
|
||||
ImmutableMap.Builder<String, Integer> builder = new ImmutableMap.Builder();
|
||||
ImmutableMap.Builder<String, Integer> activeQueriesbuilder = new ImmutableMap.Builder();
|
||||
for (int i = 0; i < ACTIVE_QUERIES_SCHEMA.size(); i++) {
|
||||
builder.put(ACTIVE_QUERIES_SCHEMA.get(i).getName().toLowerCase(), i);
|
||||
activeQueriesbuilder.put(ACTIVE_QUERIES_SCHEMA.get(i).getName().toLowerCase(), i);
|
||||
}
|
||||
ACTIVE_QUERIES_COLUMN_TO_INDEX = builder.build();
|
||||
ACTIVE_QUERIES_COLUMN_TO_INDEX = activeQueriesbuilder.build();
|
||||
|
||||
ImmutableMap.Builder<String, Integer> workloadGroupsBuilder = new ImmutableMap.Builder();
|
||||
for (int i = 0; i < WORKLOAD_GROUPS_SCHEMA.size(); i++) {
|
||||
workloadGroupsBuilder.put(WORKLOAD_GROUPS_SCHEMA.get(i).getName().toLowerCase(), i);
|
||||
}
|
||||
WORKLOAD_GROUPS_COLUMN_TO_INDEX = workloadGroupsBuilder.build();
|
||||
}
|
||||
|
||||
public static TFetchSchemaTableDataResult getMetadataTable(TFetchSchemaTableDataRequest request) throws TException {
|
||||
@ -127,9 +150,6 @@ public class MetadataGenerator {
|
||||
case FRONTENDS_DISKS:
|
||||
result = frontendsDisksMetadataResult(params);
|
||||
break;
|
||||
case WORKLOAD_GROUPS:
|
||||
result = workloadGroupsMetadataResult(params);
|
||||
break;
|
||||
case CATALOGS:
|
||||
result = catalogsMetadataResult(params);
|
||||
break;
|
||||
@ -156,23 +176,26 @@ public class MetadataGenerator {
|
||||
|
||||
public static TFetchSchemaTableDataResult getSchemaTableData(TFetchSchemaTableDataRequest request)
|
||||
throws TException {
|
||||
if (!request.isSetMetadaTableParams() || !request.getMetadaTableParams().isSetMetadataType()) {
|
||||
return errorResult("Metadata table params is not set. ");
|
||||
if (!request.isSetSchemaTableParams()) {
|
||||
return errorResult("schema table params is not set.");
|
||||
}
|
||||
TFetchSchemaTableDataResult result;
|
||||
TMetadataTableRequestParams params = request.getMetadaTableParams();
|
||||
TSchemaTableRequestParams schemaTableParams = request.getSchemaTableParams();
|
||||
ImmutableMap<String, Integer> columnIndex;
|
||||
// todo(wb) move workload group/workload scheduler policy here
|
||||
switch (request.getMetadaTableParams().getMetadataType()) {
|
||||
case QUERIES:
|
||||
result = queriesMetadataResult(params, request);
|
||||
switch (request.getSchemaTableName()) {
|
||||
case ACTIVE_QUERIES:
|
||||
result = queriesMetadataResult(schemaTableParams, request);
|
||||
columnIndex = ACTIVE_QUERIES_COLUMN_TO_INDEX;
|
||||
break;
|
||||
case WORKLOAD_GROUPS:
|
||||
result = workloadGroupsMetadataResult(schemaTableParams);
|
||||
columnIndex = WORKLOAD_GROUPS_COLUMN_TO_INDEX;
|
||||
break;
|
||||
default:
|
||||
return errorResult("schema table params is not set.");
|
||||
return errorResult("invalid schema table name.");
|
||||
}
|
||||
if (result.getStatus().getStatusCode() == TStatusCode.OK) {
|
||||
filterColumns(result, params.getColumnsName(), columnIndex);
|
||||
if (schemaTableParams.isSetColumnsName() && result.getStatus().getStatusCode() == TStatusCode.OK) {
|
||||
filterColumns(result, schemaTableParams.getColumnsName(), columnIndex);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
@ -406,7 +429,7 @@ public class MetadataGenerator {
|
||||
return result;
|
||||
}
|
||||
|
||||
private static TFetchSchemaTableDataResult workloadGroupsMetadataResult(TMetadataTableRequestParams params) {
|
||||
private static TFetchSchemaTableDataResult workloadGroupsMetadataResult(TSchemaTableRequestParams params) {
|
||||
if (!params.isSetCurrentUserIdent()) {
|
||||
return errorResult("current user ident is not set.");
|
||||
}
|
||||
@ -427,13 +450,11 @@ public class MetadataGenerator {
|
||||
trow.addToColumnValue(new TCell().setLongVal(Long.valueOf(rGroupsInfo.get(6)))); // max queue size
|
||||
trow.addToColumnValue(new TCell().setLongVal(Long.valueOf(rGroupsInfo.get(7)))); // queue timeout
|
||||
trow.addToColumnValue(new TCell().setStringVal(rGroupsInfo.get(8))); // cpu hard limit
|
||||
trow.addToColumnValue(new TCell().setIntVal(Integer.parseInt(rGroupsInfo.get(9)))); // scan thread num
|
||||
trow.addToColumnValue(new TCell().setLongVal(Long.valueOf(rGroupsInfo.get(9)))); // scan thread num
|
||||
// max remote scan thread num
|
||||
trow.addToColumnValue(new TCell().setIntVal(Integer.parseInt(rGroupsInfo.get(10))));
|
||||
trow.addToColumnValue(new TCell().setLongVal(Long.valueOf(rGroupsInfo.get(10))));
|
||||
// min remote scan thread num
|
||||
trow.addToColumnValue(new TCell().setIntVal(Integer.parseInt(rGroupsInfo.get(11))));
|
||||
trow.addToColumnValue(new TCell().setLongVal(Long.valueOf(rGroupsInfo.get(12)))); // running query num
|
||||
trow.addToColumnValue(new TCell().setLongVal(Long.valueOf(rGroupsInfo.get(13)))); // waiting query num
|
||||
trow.addToColumnValue(new TCell().setLongVal(Long.valueOf(rGroupsInfo.get(11))));
|
||||
dataBatch.add(trow);
|
||||
}
|
||||
|
||||
@ -516,13 +537,8 @@ public class MetadataGenerator {
|
||||
return trow;
|
||||
}
|
||||
|
||||
private static TFetchSchemaTableDataResult queriesMetadataResult(TMetadataTableRequestParams params,
|
||||
private static TFetchSchemaTableDataResult queriesMetadataResult(TSchemaTableRequestParams tSchemaTableParams,
|
||||
TFetchSchemaTableDataRequest parentRequest) {
|
||||
if (!params.isSetQueriesMetadataParams()) {
|
||||
return errorResult("queries metadata param is not set.");
|
||||
}
|
||||
|
||||
TQueriesMetadataParams queriesMetadataParams = params.getQueriesMetadataParams();
|
||||
TFetchSchemaTableDataResult result = new TFetchSchemaTableDataResult();
|
||||
|
||||
String selfNode = Env.getCurrentEnv().getSelfNode().getHost();
|
||||
@ -563,16 +579,14 @@ public class MetadataGenerator {
|
||||
}
|
||||
|
||||
/* Get the query results from other FE also */
|
||||
if (queriesMetadataParams.isRelayToOtherFe()) {
|
||||
TFetchSchemaTableDataRequest relayRequest = new TFetchSchemaTableDataRequest(parentRequest);
|
||||
TMetadataTableRequestParams relayParams = new TMetadataTableRequestParams(params);
|
||||
TQueriesMetadataParams relayQueryParams = new TQueriesMetadataParams(queriesMetadataParams);
|
||||
if (tSchemaTableParams.isReplayToOtherFe()) {
|
||||
TSchemaTableRequestParams replaySchemaTableParams = new TSchemaTableRequestParams(tSchemaTableParams);
|
||||
replaySchemaTableParams.setReplayToOtherFe(false);
|
||||
|
||||
relayQueryParams.setRelayToOtherFe(false);
|
||||
relayParams.setQueriesMetadataParams(relayQueryParams);
|
||||
relayRequest.setMetadaTableParams(relayParams);
|
||||
TFetchSchemaTableDataRequest replayFetchSchemaTableReq = new TFetchSchemaTableDataRequest(parentRequest);
|
||||
replayFetchSchemaTableReq.setSchemaTableParams(replaySchemaTableParams);
|
||||
|
||||
List<TFetchSchemaTableDataResult> relayResults = forwardToOtherFrontends(relayRequest);
|
||||
List<TFetchSchemaTableDataResult> relayResults = forwardToOtherFrontends(replayFetchSchemaTableReq);
|
||||
relayResults
|
||||
.forEach(rs -> rs.getDataBatch()
|
||||
.forEach(row -> dataBatch.add(row)));
|
||||
|
||||
@ -40,8 +40,6 @@ public abstract class MetadataTableValuedFunction extends TableValuedFunctionIf
|
||||
return FrontendsDisksTableValuedFunction.getColumnIndexFromColumnName(columnName);
|
||||
case ICEBERG:
|
||||
return IcebergTableValuedFunction.getColumnIndexFromColumnName(columnName);
|
||||
case WORKLOAD_GROUPS:
|
||||
return WorkloadGroupsTableValuedFunction.getColumnIndexFromColumnName(columnName);
|
||||
case CATALOGS:
|
||||
return CatalogsTableValuedFunction.getColumnIndexFromColumnName(columnName);
|
||||
case MATERIALIZED_VIEWS:
|
||||
|
||||
@ -62,8 +62,6 @@ public abstract class TableValuedFunctionIf {
|
||||
return new FrontendsTableValuedFunction(params);
|
||||
case FrontendsDisksTableValuedFunction.NAME:
|
||||
return new FrontendsDisksTableValuedFunction(params);
|
||||
case WorkloadGroupsTableValuedFunction.NAME:
|
||||
return new WorkloadGroupsTableValuedFunction(params);
|
||||
case CatalogsTableValuedFunction.NAME:
|
||||
return new CatalogsTableValuedFunction(params);
|
||||
case MvInfosTableValuedFunction.NAME:
|
||||
|
||||
@ -1,99 +0,0 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package org.apache.doris.tablefunction;
|
||||
|
||||
import org.apache.doris.catalog.Column;
|
||||
import org.apache.doris.catalog.PrimitiveType;
|
||||
import org.apache.doris.catalog.ScalarType;
|
||||
import org.apache.doris.nereids.exceptions.AnalysisException;
|
||||
import org.apache.doris.resource.workloadgroup.QueryQueue;
|
||||
import org.apache.doris.resource.workloadgroup.WorkloadGroup;
|
||||
import org.apache.doris.thrift.TMetaScanRange;
|
||||
import org.apache.doris.thrift.TMetadataType;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* The Implement of table valued function
|
||||
* workload_groups().
|
||||
*/
|
||||
public class WorkloadGroupsTableValuedFunction extends MetadataTableValuedFunction {
|
||||
public static final String NAME = "workload_groups";
|
||||
|
||||
private static final ImmutableList<Column> SCHEMA = ImmutableList.of(
|
||||
new Column("Id", ScalarType.createType(PrimitiveType.BIGINT)),
|
||||
new Column("Name", ScalarType.createStringType()),
|
||||
new Column(WorkloadGroup.CPU_SHARE, ScalarType.createType(PrimitiveType.BIGINT)),
|
||||
new Column(WorkloadGroup.MEMORY_LIMIT, ScalarType.createStringType()),
|
||||
new Column(WorkloadGroup.ENABLE_MEMORY_OVERCOMMIT, ScalarType.createStringType()),
|
||||
new Column(WorkloadGroup.MAX_CONCURRENCY, ScalarType.createType(PrimitiveType.BIGINT)),
|
||||
new Column(WorkloadGroup.MAX_QUEUE_SIZE, ScalarType.createType(PrimitiveType.BIGINT)),
|
||||
new Column(WorkloadGroup.QUEUE_TIMEOUT, ScalarType.createType(PrimitiveType.BIGINT)),
|
||||
new Column(WorkloadGroup.CPU_HARD_LIMIT, ScalarType.createStringType()),
|
||||
new Column(WorkloadGroup.SCAN_THREAD_NUM, ScalarType.createType(PrimitiveType.INT)),
|
||||
new Column(WorkloadGroup.MAX_REMOTE_SCAN_THREAD_NUM, ScalarType.createType(PrimitiveType.INT)),
|
||||
new Column(WorkloadGroup.MIN_REMOTE_SCAN_THREAD_NUM, ScalarType.createType(PrimitiveType.INT)),
|
||||
new Column(QueryQueue.RUNNING_QUERY_NUM, ScalarType.createType(PrimitiveType.BIGINT)),
|
||||
new Column(QueryQueue.WAITING_QUERY_NUM, ScalarType.createType(PrimitiveType.BIGINT)));
|
||||
|
||||
private static final ImmutableMap<String, Integer> COLUMN_TO_INDEX;
|
||||
|
||||
static {
|
||||
ImmutableMap.Builder<String, Integer> builder = new ImmutableMap.Builder();
|
||||
for (int i = 0; i < SCHEMA.size(); i++) {
|
||||
builder.put(SCHEMA.get(i).getName().toLowerCase(), i);
|
||||
}
|
||||
COLUMN_TO_INDEX = builder.build();
|
||||
}
|
||||
|
||||
public static Integer getColumnIndexFromColumnName(String columnName) {
|
||||
return COLUMN_TO_INDEX.get(columnName.toLowerCase());
|
||||
}
|
||||
|
||||
public WorkloadGroupsTableValuedFunction(Map<String, String> params) throws AnalysisException {
|
||||
if (params.size() != 0) {
|
||||
throw new AnalysisException("workload groups table-valued-function does not support any params");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public TMetadataType getMetadataType() {
|
||||
return TMetadataType.WORKLOAD_GROUPS;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TMetaScanRange getMetaScanRange() {
|
||||
TMetaScanRange metaScanRange = new TMetaScanRange();
|
||||
metaScanRange.setMetadataType(TMetadataType.WORKLOAD_GROUPS);
|
||||
return metaScanRange;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getTableName() {
|
||||
return "WorkloadGroupsTableValuedFunction";
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Column> getTableColumns() throws AnalysisException {
|
||||
return SCHEMA;
|
||||
}
|
||||
}
|
||||
@ -87,7 +87,7 @@ public class RefreshCatalogTest extends TestWithFeService {
|
||||
List<String> dbNames2 = test1.getDbNames();
|
||||
Assertions.assertEquals(4, dbNames2.size());
|
||||
ExternalInfoSchemaDatabase infoDb = (ExternalInfoSchemaDatabase) test1.getDb(InfoSchemaDb.DATABASE_NAME).get();
|
||||
Assertions.assertEquals(29, infoDb.getTables().size());
|
||||
Assertions.assertEquals(30, infoDb.getTables().size());
|
||||
TestExternalDatabase testDb = (TestExternalDatabase) test1.getDb("db1").get();
|
||||
Assertions.assertEquals(2, testDb.getTables().size());
|
||||
|
||||
@ -96,7 +96,7 @@ public class RefreshCatalogTest extends TestWithFeService {
|
||||
CatalogMgr mgr2 = GsonUtils.GSON.fromJson(json, CatalogMgr.class);
|
||||
test1 = mgr2.getCatalog("test1");
|
||||
infoDb = (ExternalInfoSchemaDatabase) test1.getDb(InfoSchemaDb.DATABASE_NAME).get();
|
||||
Assertions.assertEquals(29, infoDb.getTables().size());
|
||||
Assertions.assertEquals(30, infoDb.getTables().size());
|
||||
testDb = (TestExternalDatabase) test1.getDb("db1").get();
|
||||
Assertions.assertEquals(2, testDb.getTables().size());
|
||||
}
|
||||
|
||||
@ -127,7 +127,8 @@ enum TSchemaTableType {
|
||||
SCH_METADATA_NAME_IDS,
|
||||
SCH_PROFILING,
|
||||
SCH_BACKEND_ACTIVE_TASKS,
|
||||
SCH_ACTIVE_QUERIES;
|
||||
SCH_ACTIVE_QUERIES,
|
||||
SCH_WORKLOAD_GROUPS;
|
||||
}
|
||||
|
||||
enum THdfsCompression {
|
||||
|
||||
@ -901,7 +901,8 @@ struct TInitExternalCtlMetaResult {
|
||||
enum TSchemaTableName {
|
||||
// BACKENDS = 0,
|
||||
METADATA_TABLE = 1, // tvf
|
||||
SCHEMA_TABLE = 2, // db information_schema's table
|
||||
ACTIVE_QUERIES = 2, // db information_schema's table
|
||||
WORKLOAD_GROUPS = 3, // db information_schema's table
|
||||
}
|
||||
|
||||
struct TMetadataTableRequestParams {
|
||||
@ -917,10 +918,17 @@ struct TMetadataTableRequestParams {
|
||||
10: optional PlanNodes.TTasksMetadataParams tasks_metadata_params
|
||||
}
|
||||
|
||||
struct TSchemaTableRequestParams {
|
||||
1: optional list<string> columns_name
|
||||
2: optional Types.TUserIdentity current_user_ident
|
||||
3: optional bool replay_to_other_fe
|
||||
}
|
||||
|
||||
struct TFetchSchemaTableDataRequest {
|
||||
1: optional string cluster_name
|
||||
2: optional TSchemaTableName schema_table_name
|
||||
3: optional TMetadataTableRequestParams metada_table_params
|
||||
3: optional TMetadataTableRequestParams metada_table_params // used for tvf
|
||||
4: optional TSchemaTableRequestParams schema_table_params // used for request db information_schema's table
|
||||
}
|
||||
|
||||
struct TFetchSchemaTableDataResult {
|
||||
|
||||
@ -703,14 +703,12 @@ enum TSortType {
|
||||
enum TMetadataType {
|
||||
ICEBERG,
|
||||
BACKENDS,
|
||||
WORKLOAD_GROUPS,
|
||||
FRONTENDS,
|
||||
CATALOGS,
|
||||
FRONTENDS_DISKS,
|
||||
MATERIALIZED_VIEWS,
|
||||
JOBS,
|
||||
TASKS,
|
||||
QUERIES,
|
||||
WORKLOAD_SCHED_POLICY
|
||||
}
|
||||
|
||||
|
||||
@ -52,6 +52,7 @@ tables
|
||||
triggers
|
||||
user_privileges
|
||||
views
|
||||
workload_groups
|
||||
|
||||
-- !auto_default_t --
|
||||
0
|
||||
|
||||
@ -221,6 +221,7 @@ tables
|
||||
triggers
|
||||
user_privileges
|
||||
views
|
||||
workload_groups
|
||||
|
||||
-- !dt --
|
||||
2023-06-17T10:00 2023-06-17T10:00:01.100 2023-06-17T10:00:02.220 2023-06-17T10:00:03.333 2023-06-17T10:00:04.444400 2023-06-17T10:00:05.555550 2023-06-17T10:00:06.666666
|
||||
|
||||
@ -189,6 +189,7 @@ tables
|
||||
triggers
|
||||
user_privileges
|
||||
views
|
||||
workload_groups
|
||||
|
||||
-- !test_insert1 --
|
||||
doris1 18
|
||||
|
||||
@ -221,6 +221,7 @@ tables
|
||||
triggers
|
||||
user_privileges
|
||||
views
|
||||
workload_groups
|
||||
|
||||
-- !dt --
|
||||
2023-06-17T10:00 2023-06-17T10:00:01 2023-06-17T10:00:02 2023-06-17T10:00:03 2023-06-17T10:00:04 2023-06-17T10:00:05 2023-06-17T10:00:06
|
||||
|
||||
@ -117,7 +117,7 @@ suite("test_crud_wlg") {
|
||||
");"
|
||||
sql "set workload_group=test_group;"
|
||||
|
||||
qt_show_1 "select name,cpu_share,memory_limit,enable_memory_overcommit,max_concurrency,max_queue_size,queue_timeout,cpu_hard_limit,scan_thread_num from workload_groups() where name in ('normal','test_group') order by name;"
|
||||
qt_show_1 "select name,cpu_share,memory_limit,enable_memory_overcommit,max_concurrency,max_queue_size,queue_timeout,cpu_hard_limit,scan_thread_num from information_schema.workload_groups where name in ('normal','test_group') order by name;"
|
||||
|
||||
// test memory_limit
|
||||
test {
|
||||
@ -128,7 +128,7 @@ suite("test_crud_wlg") {
|
||||
|
||||
sql "alter workload group test_group properties ( 'memory_limit'='11%' );"
|
||||
qt_mem_limit_1 """ select count(1) from ${table_name} """
|
||||
qt_mem_limit_2 "select name,cpu_share,memory_limit,enable_memory_overcommit,max_concurrency,max_queue_size,queue_timeout,cpu_hard_limit,scan_thread_num from workload_groups() where name in ('normal','test_group') order by name;"
|
||||
qt_mem_limit_2 "select name,cpu_share,memory_limit,enable_memory_overcommit,max_concurrency,max_queue_size,queue_timeout,cpu_hard_limit,scan_thread_num from information_schema.workload_groups where name in ('normal','test_group') order by name;"
|
||||
|
||||
// test enable_memory_overcommit
|
||||
test {
|
||||
@ -141,7 +141,7 @@ suite("test_crud_wlg") {
|
||||
qt_mem_overcommit_1 """ select count(1) from ${table_name} """
|
||||
sql "alter workload group test_group properties ( 'enable_memory_overcommit'='false' );"
|
||||
qt_mem_overcommit_2 """ select count(1) from ${table_name} """
|
||||
qt_mem_overcommit_3 "select name,cpu_share,memory_limit,enable_memory_overcommit,max_concurrency,max_queue_size,queue_timeout,cpu_hard_limit,scan_thread_num from workload_groups() where name in ('normal','test_group') order by name;"
|
||||
qt_mem_overcommit_3 "select name,cpu_share,memory_limit,enable_memory_overcommit,max_concurrency,max_queue_size,queue_timeout,cpu_hard_limit,scan_thread_num from information_schema.workload_groups where name in ('normal','test_group') order by name;"
|
||||
|
||||
// test cpu_hard_limit
|
||||
test {
|
||||
@ -160,7 +160,7 @@ suite("test_crud_wlg") {
|
||||
|
||||
sql "alter workload group test_group properties ( 'cpu_hard_limit'='20%' );"
|
||||
qt_cpu_hard_limit_1 """ select count(1) from ${table_name} """
|
||||
qt_cpu_hard_limit_2 "select name,cpu_share,memory_limit,enable_memory_overcommit,max_concurrency,max_queue_size,queue_timeout,cpu_hard_limit,scan_thread_num from workload_groups() where name in ('normal','test_group') order by name;"
|
||||
qt_cpu_hard_limit_2 "select name,cpu_share,memory_limit,enable_memory_overcommit,max_concurrency,max_queue_size,queue_timeout,cpu_hard_limit,scan_thread_num from information_schema.workload_groups where name in ('normal','test_group') order by name;"
|
||||
|
||||
// test query queue
|
||||
test {
|
||||
@ -183,7 +183,7 @@ suite("test_crud_wlg") {
|
||||
|
||||
sql "alter workload group test_group properties ( 'max_concurrency'='100' );"
|
||||
qt_queue_1 """ select count(1) from ${table_name} """
|
||||
qt_show_queue "select name,cpu_share,memory_limit,enable_memory_overcommit,max_concurrency,max_queue_size,queue_timeout,cpu_hard_limit,scan_thread_num from workload_groups() where name in ('normal','test_group') order by name;"
|
||||
qt_show_queue "select name,cpu_share,memory_limit,enable_memory_overcommit,max_concurrency,max_queue_size,queue_timeout,cpu_hard_limit,scan_thread_num from information_schema.workload_groups where name in ('normal','test_group') order by name;"
|
||||
|
||||
// test create group failed
|
||||
// failed for cpu_share
|
||||
@ -261,7 +261,7 @@ suite("test_crud_wlg") {
|
||||
}
|
||||
|
||||
// test show workload groups
|
||||
qt_select_tvf_1 "select name,cpu_share,memory_limit,enable_memory_overcommit,max_concurrency,max_queue_size,queue_timeout,cpu_hard_limit,scan_thread_num from workload_groups() where name in ('normal','test_group') order by name;"
|
||||
qt_select_tvf_1 "select name,cpu_share,memory_limit,enable_memory_overcommit,max_concurrency,max_queue_size,queue_timeout,cpu_hard_limit,scan_thread_num from information_schema.workload_groups where name in ('normal','test_group') order by name;"
|
||||
|
||||
// test auth
|
||||
sql """drop user if exists test_wlg_user"""
|
||||
|
||||
Reference in New Issue
Block a user