From 759f1da32e223e4f71ea5f271be7eaf43af58fff Mon Sep 17 00:00:00 2001 From: Tiewei Fang <43782773+BePPPower@users.noreply.github.com> Date: Fri, 7 Apr 2023 08:30:42 +0800 Subject: [PATCH] [Enhencement](Backends) add `HostName` filed in backends table and delete backends table in information_schema (#18156) 1. Add `HostName` field for `show backends` statement and `backends()` tvf. 2. delete the `backends` table in `information_schema` database --- be/src/exec/CMakeLists.txt | 1 - be/src/exec/schema_scanner.cpp | 3 - be/src/exec/schema_scanner.h | 1 - .../schema_backends_scanner.cpp | 187 ------------------ .../schema_scanner/schema_backends_scanner.h | 43 ---- be/src/vec/exec/vschema_scan_node.cpp | 5 - .../admin-manual/system-table/backends.md | 87 -------- .../sql-functions/table-functions/backends.md | 122 ++++++++++++ .../Manipulation/EXPORT.md | 2 +- docs/sidebars.json | 6 +- .../admin-manual/system-table/backends.md | 89 --------- .../sql-functions/table-functions/backends.md | 122 ++++++++++++ .../Manipulation/EXPORT.md | 4 +- .../Data-Manipulation-Statements/OUTFILE.md | 4 +- .../doris/analysis/SchemaTableType.java | 3 +- .../doris/analysis/ShowBackendsStmt.java | 4 - .../doris/analysis/ShowFrontendsStmt.java | 4 - .../org/apache/doris/catalog/SchemaTable.java | 47 ----- .../doris/common/proc/FrontendsProcNode.java | 2 - .../apache/doris/planner/SchemaScanNode.java | 2 - .../org/apache/doris/qe/ShowExecutor.java | 7 - .../doris/service/FrontendServiceImpl.java | 2 - .../BackendsTableValuedFunction.java | 25 +-- .../tablefunction/MetadataGenerator.java | 106 +--------- gensrc/thrift/Descriptors.thrift | 1 - gensrc/thrift/FrontendService.thrift | 2 +- gensrc/thrift/PlanNodes.thrift | 9 +- .../system/test_query_sys_tables.out | 3 - .../query_p0/system/test_query_sys_tables.out | 3 - .../test_compaction_agg_keys.groovy | 2 +- ...est_compaction_agg_keys_with_delete.groovy | 2 +- .../test_compaction_dup_keys.groovy | 2 +- ...est_compaction_dup_keys_with_delete.groovy | 2 +- .../test_compaction_uniq_keys.groovy | 2 +- ...test_compaction_uniq_keys_row_store.groovy | 2 +- ...st_compaction_uniq_keys_with_delete.groovy | 2 +- .../test_vertical_compaction_agg_keys.groovy | 6 +- .../test_vertical_compaction_dup_keys.groovy | 6 +- .../test_vertical_compaction_uniq_keys.groovy | 6 +- .../test_backends_tvf.groovy} | 8 +- .../suites/demo_p0/httpTest_action.groovy | 4 +- .../test_map_load_and_compaction.groovy | 4 +- .../system/test_query_sys_tables.groovy | 4 - .../information_schema.groovy | 4 +- .../system/test_query_sys_tables.groovy | 4 - .../schema_change/test_number_overflow.groovy | 2 +- .../test_agg_keys_schema_change_datev2.groovy | 2 +- .../test_dup_keys_schema_change_datev2.groovy | 2 +- ...st_agg_keys_schema_change_decimalv3.groovy | 2 +- .../test_agg_keys_schema_change.groovy | 2 +- .../test_agg_mv_schema_change.groovy | 2 +- .../test_agg_rollup_schema_change.groovy | 2 +- .../test_agg_vals_schema_change.groovy | 2 +- .../test_dup_keys_schema_change.groovy | 2 +- .../test_dup_mv_schema_change.groovy | 2 +- .../test_dup_rollup_schema_change.groovy | 2 +- .../test_dup_vals_schema_change.groovy | 2 +- .../test_uniq_keys_schema_change.groovy | 2 +- .../test_uniq_mv_schema_change.groovy | 2 +- .../test_uniq_rollup_schema_change.groovy | 2 +- .../test_uniq_vals_schema_change.groovy | 2 +- .../test_varchar_schema_change.groovy | 2 +- .../test_segcompaction_agg_keys.groovy | 2 +- .../test_segcompaction_agg_keys_index.groovy | 2 +- .../test_segcompaction_dup_keys.groovy | 2 +- .../test_segcompaction_dup_keys_index.groovy | 2 +- .../test_segcompaction_unique_keys.groovy | 2 +- ...est_segcompaction_unique_keys_index.groovy | 2 +- .../test_segcompaction_unique_keys_mow.groovy | 2 +- ...segcompaction_unique_keys_mow_index.groovy | 2 +- 70 files changed, 324 insertions(+), 682 deletions(-) delete mode 100644 be/src/exec/schema_scanner/schema_backends_scanner.cpp delete mode 100644 be/src/exec/schema_scanner/schema_backends_scanner.h delete mode 100644 docs/en/docs/admin-manual/system-table/backends.md create mode 100644 docs/en/docs/sql-manual/sql-functions/table-functions/backends.md delete mode 100644 docs/zh-CN/docs/admin-manual/system-table/backends.md create mode 100644 docs/zh-CN/docs/sql-manual/sql-functions/table-functions/backends.md rename regression-test/suites/{correctness/test_backends_table.groovy => correctness_p0/table_valued_function/test_backends_tvf.groovy} (78%) diff --git a/be/src/exec/CMakeLists.txt b/be/src/exec/CMakeLists.txt index 5057a24ed8..2ce6e15697 100644 --- a/be/src/exec/CMakeLists.txt +++ b/be/src/exec/CMakeLists.txt @@ -51,7 +51,6 @@ set(EXEC_FILES schema_scanner/schema_files_scanner.cpp schema_scanner/schema_partitions_scanner.cpp schema_scanner/schema_rowsets_scanner.cpp - schema_scanner/schema_backends_scanner.cpp scan_node.cpp odbc_connector.cpp table_connector.cpp diff --git a/be/src/exec/schema_scanner.cpp b/be/src/exec/schema_scanner.cpp index 81b0824a97..ebca8591a5 100644 --- a/be/src/exec/schema_scanner.cpp +++ b/be/src/exec/schema_scanner.cpp @@ -19,7 +19,6 @@ #include -#include "exec/schema_scanner/schema_backends_scanner.h" #include "exec/schema_scanner/schema_charsets_scanner.h" #include "exec/schema_scanner/schema_collations_scanner.h" #include "exec/schema_scanner/schema_columns_scanner.h" @@ -128,8 +127,6 @@ SchemaScanner* SchemaScanner::create(TSchemaTableType::type type) { return new (std::nothrow) SchemaPartitionsScanner(); case TSchemaTableType::SCH_ROWSETS: return new (std::nothrow) SchemaRowsetsScanner(); - case TSchemaTableType::SCH_BACKENDS: - return new (std::nothrow) SchemaBackendsScanner(); default: return new (std::nothrow) SchemaDummyScanner(); break; diff --git a/be/src/exec/schema_scanner.h b/be/src/exec/schema_scanner.h index 85ff0093d7..3602defbd0 100644 --- a/be/src/exec/schema_scanner.h +++ b/be/src/exec/schema_scanner.h @@ -47,7 +47,6 @@ struct SchemaScannerParam { const std::string* ip; // frontend ip int32_t port; // frontend thrift port int64_t thread_id; - const std::vector* table_structure; const std::string* catalog; std::unique_ptr profile; diff --git a/be/src/exec/schema_scanner/schema_backends_scanner.cpp b/be/src/exec/schema_scanner/schema_backends_scanner.cpp deleted file mode 100644 index e66b656245..0000000000 --- a/be/src/exec/schema_scanner/schema_backends_scanner.cpp +++ /dev/null @@ -1,187 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -#include "exec/schema_scanner/schema_backends_scanner.h" - -#include -#include -#include - -#include "common/status.h" -#include "exec/schema_scanner.h" -#include "gen_cpp/FrontendService.h" -#include "runtime/client_cache.h" -#include "runtime/define_primitive_type.h" -#include "runtime/exec_env.h" -#include "runtime/primitive_type.h" -#include "util/thrift_rpc_helper.h" -#include "vec/common/string_ref.h" - -namespace doris { - -std::vector SchemaBackendsScanner::_s_tbls_columns = { - // name, type, size - {"BackendId", TYPE_BIGINT, sizeof(StringRef), false}, - {"TabletNum", TYPE_BIGINT, sizeof(StringRef), false}, - {"HeartbeatPort", TYPE_INT, sizeof(int), false}, - {"BePort", TYPE_INT, sizeof(int), false}, - {"HttpPort", TYPE_INT, sizeof(int), false}, - {"BrpcPort", TYPE_INT, sizeof(int), false}, - {"Cluster", TYPE_VARCHAR, sizeof(StringRef), false}, - {"IP", TYPE_VARCHAR, sizeof(StringRef), false}, - {"LastStartTime", TYPE_VARCHAR, sizeof(StringRef), false}, - {"LastHeartbeat", TYPE_VARCHAR, sizeof(StringRef), false}, - {"Alive", TYPE_VARCHAR, sizeof(StringRef), false}, - {"SystemDecommissioned", TYPE_VARCHAR, sizeof(StringRef), false}, - {"ClusterDecommissioned", TYPE_VARCHAR, sizeof(StringRef), false}, - {"DataUsedCapacity", TYPE_BIGINT, sizeof(int64_t), false}, - {"AvailCapacity", TYPE_BIGINT, sizeof(int64_t), false}, - {"TotalCapacity", TYPE_BIGINT, sizeof(int64_t), false}, - {"UsedPct", TYPE_DOUBLE, sizeof(double), false}, - {"MaxDiskUsedPct", TYPE_DOUBLE, sizeof(double), false}, - {"RemoteUsedCapacity", TYPE_BIGINT, sizeof(int64_t), false}, - {"Tag", TYPE_VARCHAR, sizeof(StringRef), false}, - {"ErrMsg", TYPE_VARCHAR, sizeof(StringRef), false}, - {"Version", TYPE_VARCHAR, sizeof(StringRef), false}, - {"Status", TYPE_VARCHAR, sizeof(StringRef), false}, -}; - -SchemaBackendsScanner::SchemaBackendsScanner() - : SchemaScanner(_s_tbls_columns, TSchemaTableType::SCH_BACKENDS) {} - -Status SchemaBackendsScanner::start(RuntimeState* state) { - if (!_is_init) { - return Status::InternalError("used before initialized."); - } - RETURN_IF_ERROR(_fetch_backends_info()); - RETURN_IF_ERROR(_set_col_name_to_type()); - return Status::OK(); -} - -Status SchemaBackendsScanner::get_next_block(vectorized::Block* block, bool* eos) { - if (!_is_init) { - return Status::InternalError("Used before initialized."); - } - if (nullptr == block || nullptr == eos) { - return Status::InternalError("input pointer is nullptr."); - } - *eos = true; - return _fill_block_impl(block); -} - -Status SchemaBackendsScanner::_fill_block_impl(vectorized::Block* block) { - SCOPED_TIMER(_fill_block_timer); - auto row_num = _batch_data.size(); - std::vector null_datas(row_num, nullptr); - std::vector datas(row_num); - - for (size_t col_idx = 0; col_idx < _columns.size(); ++col_idx) { - auto it = _col_name_to_type.find(_columns[col_idx].name); - if (it == _col_name_to_type.end()) { - if (_columns[col_idx].is_null) { - fill_dest_column_for_range(block, col_idx, null_datas); - } else { - return Status::InternalError( - "column {} is not found in BE, and {} is not nullable.", - _columns[col_idx].name, _columns[col_idx].name); - } - } else if (it->second == TYPE_BIGINT) { - for (int row_idx = 0; row_idx < row_num; ++row_idx) { - datas[row_idx] = &_batch_data[row_idx].column_value[col_idx].longVal; - } - fill_dest_column_for_range(block, col_idx, datas); - } else if (it->second == TYPE_INT) { - for (int row_idx = 0; row_idx < row_num; ++row_idx) { - datas[row_idx] = &_batch_data[row_idx].column_value[col_idx].intVal; - } - fill_dest_column_for_range(block, col_idx, datas); - } else if (it->second == TYPE_VARCHAR) { - for (int row_idx = 0; row_idx < row_num; ++row_idx) { - datas[row_idx] = &_batch_data[row_idx].column_value[col_idx].stringVal; - } - fill_dest_column_for_range(block, col_idx, datas); - } else if (it->second == TYPE_DOUBLE) { - for (int row_idx = 0; row_idx < row_num; ++row_idx) { - datas[row_idx] = &_batch_data[row_idx].column_value[col_idx].doubleVal; - } - fill_dest_column_for_range(block, col_idx, datas); - } else { - // other type - } - } - return Status::OK(); -} - -Status SchemaBackendsScanner::_fetch_backends_info() { - TFetchSchemaTableDataRequest request; - request.cluster_name = ""; - request.__isset.cluster_name = true; - request.schema_table_name = TSchemaTableName::BACKENDS; - request.__isset.schema_table_name = true; - TNetworkAddress master_addr = ExecEnv::GetInstance()->master_info()->network_address; - // TODO(ftw): if result will too large? - TFetchSchemaTableDataResult result; - - RETURN_IF_ERROR(ThriftRpcHelper::rpc( - master_addr.hostname, master_addr.port, - [&request, &result](FrontendServiceConnection& client) { - client->fetchSchemaTableData(result, request); - }, - config::txn_commit_rpc_timeout_ms)); - - Status status(result.status); - if (!status.ok()) { - LOG(WARNING) << "fetch schema table data from master failed, errmsg=" << status; - return status; - } - _batch_data = std::move(result.data_batch); - return Status::OK(); -} - -Status SchemaBackendsScanner::_set_col_name_to_type() { - _col_name_to_type.emplace("BackendId", TYPE_BIGINT); - _col_name_to_type.emplace("TabletNum", TYPE_BIGINT); - - _col_name_to_type.emplace("HeartbeatPort", TYPE_INT); - _col_name_to_type.emplace("BePort", TYPE_INT); - _col_name_to_type.emplace("HttpPort", TYPE_INT); - _col_name_to_type.emplace("BrpcPort", TYPE_INT); - - _col_name_to_type.emplace("Cluster", TYPE_VARCHAR); - _col_name_to_type.emplace("IP", TYPE_VARCHAR); - _col_name_to_type.emplace("LastStartTime", TYPE_VARCHAR); - _col_name_to_type.emplace("LastHeartbeat", TYPE_VARCHAR); - _col_name_to_type.emplace("Alive", TYPE_VARCHAR); - _col_name_to_type.emplace("SystemDecommissioned", TYPE_VARCHAR); - _col_name_to_type.emplace("ClusterDecommissioned", TYPE_VARCHAR); - - _col_name_to_type.emplace("DataUsedCapacity", TYPE_BIGINT); - _col_name_to_type.emplace("AvailCapacity", TYPE_BIGINT); - _col_name_to_type.emplace("TotalCapacity", TYPE_BIGINT); - - _col_name_to_type.emplace("UsedPct", TYPE_DOUBLE); - _col_name_to_type.emplace("MaxDiskUsedPct", TYPE_DOUBLE); - - _col_name_to_type.emplace("RemoteUsedCapacity", TYPE_BIGINT); - - _col_name_to_type.emplace("Tag", TYPE_VARCHAR); - _col_name_to_type.emplace("ErrMsg", TYPE_VARCHAR); - _col_name_to_type.emplace("Version", TYPE_VARCHAR); - _col_name_to_type.emplace("Status", TYPE_VARCHAR); - return Status::OK(); -} -} // namespace doris diff --git a/be/src/exec/schema_scanner/schema_backends_scanner.h b/be/src/exec/schema_scanner/schema_backends_scanner.h deleted file mode 100644 index 82ca121561..0000000000 --- a/be/src/exec/schema_scanner/schema_backends_scanner.h +++ /dev/null @@ -1,43 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -#pragma once - -#include "common/status.h" -#include "exec/schema_scanner.h" -namespace doris { - -class SchemaBackendsScanner : public SchemaScanner { -public: - SchemaBackendsScanner(); - ~SchemaBackendsScanner() override = default; - - Status start(RuntimeState* state) override; - Status get_next_block(vectorized::Block* block, bool* eos) override; - -private: - Status _fill_block_impl(vectorized::Block* block); - Status _fetch_backends_info(); - Status _set_col_name_to_type(); - - // column_name -> type, set by _set_col_name_to_type() - std::unordered_map _col_name_to_type; - static std::vector _s_tbls_columns; - - std::vector _batch_data; -}; -} // namespace doris diff --git a/be/src/vec/exec/vschema_scan_node.cpp b/be/src/vec/exec/vschema_scan_node.cpp index ad3f7f3310..65dcd8f17e 100644 --- a/be/src/vec/exec/vschema_scan_node.cpp +++ b/be/src/vec/exec/vschema_scan_node.cpp @@ -84,11 +84,6 @@ Status VSchemaScanNode::init(const TPlanNode& tnode, RuntimeState* state) { _scanner_param.thread_id = tnode.schema_scan_node.thread_id; } - if (tnode.schema_scan_node.__isset.table_structure) { - _scanner_param.table_structure = _pool->add( - new std::vector(tnode.schema_scan_node.table_structure)); - } - if (tnode.schema_scan_node.__isset.catalog) { _scanner_param.catalog = _pool->add(new std::string(tnode.schema_scan_node.catalog)); } diff --git a/docs/en/docs/admin-manual/system-table/backends.md b/docs/en/docs/admin-manual/system-table/backends.md deleted file mode 100644 index 669a064b31..0000000000 --- a/docs/en/docs/admin-manual/system-table/backends.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -{ - "title": "backends", - "language": "en" -} ---- - - - -## backends - -### Name - -backends - -### description - -`backends` is a built-in system table of doris, which is stored under the information_schema database. You can view the `BE` node information through the `backends` system table. - -The `backends` table schema is: -```sql -MySQL [information_schema]> desc information_schema.backends; -+-----------------------+-------------+------+-------+---------+-------+ -| Field | Type | Null | Key | Default | Extra | -+-----------------------+-------------+------+-------+---------+-------+ -| BackendId | BIGINT | Yes | false | NULL | | -| Cluster | VARCHAR(40) | Yes | false | NULL | | -| IP | VARCHAR(40) | Yes | false | NULL | | -| HeartbeatPort | INT | Yes | false | NULL | | -| BePort | INT | Yes | false | NULL | | -| HttpPort | INT | Yes | false | NULL | | -| BrpcPort | INT | Yes | false | NULL | | -| LastStartTime | VARCHAR(40) | Yes | false | NULL | | -| LastHeartbeat | VARCHAR(40) | Yes | false | NULL | | -| Alive | VARCHAR(40) | Yes | false | NULL | | -| SystemDecommissioned | VARCHAR(40) | Yes | false | NULL | | -| ClusterDecommissioned | VARCHAR(40) | Yes | false | NULL | | -| TabletNum | BIGINT | Yes | false | NULL | | -| DataUsedCapacity | BIGINT | Yes | false | NULL | | -| AvailCapacity | BIGINT | Yes | false | NULL | | -| TotalCapacity | BIGINT | Yes | false | NULL | | -| UsedPct | DOUBLE | Yes | false | NULL | | -| MaxDiskUsedPct | DOUBLE | Yes | false | NULL | | -| RemoteUsedCapacity | BIGINT | Yes | false | NULL | | -| Tag | VARCHAR(40) | Yes | false | NULL | | -| ErrMsg | VARCHAR(40) | Yes | false | NULL | | -| Version | VARCHAR(40) | Yes | false | NULL | | -| Status | VARCHAR(40) | Yes | false | NULL | | -+-----------------------+-------------+------+-------+---------+-------+ -``` -backends 系统表展示出来的信息基本与 `show backends` 语句展示出的信息一致。但是backends系统表的各个字段类型更加明确,且可以利用backends 系统表去做过滤、join等操作。 - -The information displayed by the `backends` system table is basically consistent with the information displayed by the `show backends` statement. However, the types of each field in the `backends` system table are more specific, and you can use the `backends` system table to perform operations such as filtering and joining. - -### Example - -```sql -MySQL [information_schema]> select * from information_schema.backends; -+-----------+-----------------+-----------+---------------+--------+----------+----------+---------------------+---------------------+-------+----------------------+-----------------------+-----------+------------------+---------------+---------------+--------------------+------------------+--------------------+--------------------------+--------+-----------------------------+-------------------------------------------------------------------------------------------------------------------------------+ -| BackendId | Cluster | IP | HeartbeatPort | BePort | HttpPort | BrpcPort | LastStartTime | LastHeartbeat | Alive | SystemDecommissioned | ClusterDecommissioned | TabletNum | DataUsedCapacity | AvailCapacity | TotalCapacity | UsedPct | MaxDiskUsedPct | RemoteUsedCapacity | Tag | ErrMsg | Version | Status | -+-----------+-----------------+-----------+---------------+--------+----------+----------+---------------------+---------------------+-------+----------------------+-----------------------+-----------+------------------+---------------+---------------+--------------------+------------------+--------------------+--------------------------+--------+-----------------------------+-------------------------------------------------------------------------------------------------------------------------------+ -| 10757 | default_cluster | 127.0.0.1 | 9159 | 9169 | 8149 | 8169 | 2022-11-24 11:16:31 | 2022-11-24 12:02:57 | true | false | false | 14 | 0 | 941359747073 | 3170529116160 | 70.309064746482065 | 70.3090647465136 | 0 | {"location" : "default"} | | doris-0.0.0-trunk-cc9545359 | {"lastSuccessReportTabletsTime":"2022-11-24 12:02:06","lastStreamLoadTime":-1,"isQueryDisabled":false,"isLoadDisabled":false} | -+-----------+-----------------+-----------+---------------+--------+----------+----------+---------------------+---------------------+-------+----------------------+-----------------------+-----------+------------------+---------------+---------------+--------------------+------------------+--------------------+--------------------------+--------+-----------------------------+-------------------------------------------------------------------------------------------------------------------------------+ -``` - -### KeyWords - - backends, information_schema - -### Best Practice \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/table-functions/backends.md b/docs/en/docs/sql-manual/sql-functions/table-functions/backends.md new file mode 100644 index 0000000000..a0c872a7e1 --- /dev/null +++ b/docs/en/docs/sql-manual/sql-functions/table-functions/backends.md @@ -0,0 +1,122 @@ +--- +{ + "title": "backends", + "language": "en" +} +--- + + + +## `backends` + +### Name + + + +backends + + + +### description + +Table-Value-Function, generate a temporary table named `backends`. This tvf is used to view the information of BE nodes in the doris cluster. + +This function is used in `FROM` clauses. + +grammar: + +``` +backends(); +``` + +The table schema of `backends()` tvf: +``` +mysql> desc function backends(); ++-------------------------+--------+------+-------+---------+-------+ +| Field | Type | Null | Key | Default | Extra | ++-------------------------+--------+------+-------+---------+-------+ +| BackendId | BIGINT | No | false | NULL | NONE | +| Cluster | TEXT | No | false | NULL | NONE | +| IP | TEXT | No | false | NULL | NONE | +| HostName | TEXT | No | false | NULL | NONE | +| HeartbeatPort | INT | No | false | NULL | NONE | +| BePort | INT | No | false | NULL | NONE | +| HttpPort | INT | No | false | NULL | NONE | +| BrpcPort | INT | No | false | NULL | NONE | +| LastStartTime | TEXT | No | false | NULL | NONE | +| LastHeartbeat | TEXT | No | false | NULL | NONE | +| Alive | TEXT | No | false | NULL | NONE | +| SystemDecommissioned | TEXT | No | false | NULL | NONE | +| ClusterDecommissioned | TEXT | No | false | NULL | NONE | +| TabletNum | BIGINT | No | false | NULL | NONE | +| DataUsedCapacity | BIGINT | No | false | NULL | NONE | +| AvailCapacity | BIGINT | No | false | NULL | NONE | +| TotalCapacity | BIGINT | No | false | NULL | NONE | +| UsedPct | DOUBLE | No | false | NULL | NONE | +| MaxDiskUsedPct | DOUBLE | No | false | NULL | NONE | +| RemoteUsedCapacity | BIGINT | No | false | NULL | NONE | +| Tag | TEXT | No | false | NULL | NONE | +| ErrMsg | TEXT | No | false | NULL | NONE | +| Version | TEXT | No | false | NULL | NONE | +| Status | TEXT | No | false | NULL | NONE | +| HeartbeatFailureCounter | INT | No | false | NULL | NONE | +| NodeRole | TEXT | No | false | NULL | NONE | ++-------------------------+--------+------+-------+---------+-------+ +26 rows in set (0.04 sec) +``` + +The information displayed by the `backends` tvf is basically consistent with the information displayed by the `show backends` statement. However, the types of each field in the `backends` tvf are more specific, and you can use the `backends` tvf to perform operations such as filtering and joining. + +### example +``` +mysql> select * from backends()\G +*************************** 1. row *************************** + BackendId: 10022 + Cluster: default_cluster + IP: 10.16.10.14 + HostName: 10.16.10.14 + HeartbeatPort: 9159 + BePort: 9169 + HttpPort: 8149 + BrpcPort: 8169 + LastStartTime: 2023-03-24 14:37:00 + LastHeartbeat: 2023-03-27 20:25:35 + Alive: true + SystemDecommissioned: false + ClusterDecommissioned: false + TabletNum: 21 + DataUsedCapacity: 0 + AvailCapacity: 787460558849 + TotalCapacity: 3169589592064 + UsedPct: 75.155756416520319 + MaxDiskUsedPct: 75.155756416551881 + RemoteUsedCapacity: 0 + Tag: {"location" : "default"} + ErrMsg: + Version: doris-0.0.0-trunk-8de51f96f3 + Status: {"lastSuccessReportTabletsTime":"2023-03-27 20:24:55","lastStreamLoadTime":-1,"isQueryDisabled":false,"isLoadDisabled":false} +HeartbeatFailureCounter: 0 + NodeRole: mix +1 row in set (0.03 sec) +``` + +### keywords + + backends \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Manipulation/EXPORT.md b/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Manipulation/EXPORT.md index d1e507baf4..7610722e6f 100644 --- a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Manipulation/EXPORT.md +++ b/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Manipulation/EXPORT.md @@ -72,7 +72,7 @@ illustrate: - `column_separator`: Specifies the exported column separator, default is \t. Only single byte is supported. - `line_delimiter`: Specifies the line delimiter for export, the default is \n. Only single byte is supported. - `exec_mem_limit`: Export the upper limit of the memory usage of a single BE node, the default is 2GB, and the unit is bytes. - - `timeout`: The timeout period of the import job, the default is 2 hours, the unit is seconds. + - `timeout`: The timeout period of the export job, the default is 2 hours, the unit is seconds. - `tablet_num_per_task`: The maximum number of tablets each subtask can allocate to scan. - `WITH BROKER` diff --git a/docs/sidebars.json b/docs/sidebars.json index 50b1bad219..4715285688 100644 --- a/docs/sidebars.json +++ b/docs/sidebars.json @@ -684,7 +684,8 @@ "sql-manual/sql-functions/table-functions/explode-numbers", "sql-manual/sql-functions/table-functions/s3", "sql-manual/sql-functions/table-functions/hdfs", - "sql-manual/sql-functions/table-functions/iceberg_meta" + "sql-manual/sql-functions/table-functions/iceberg_meta", + "sql-manual/sql-functions/table-functions/backends" ] }, { @@ -1105,7 +1106,6 @@ "type": "category", "label": "System Table", "items": [ - "admin-manual/system-table/backends", "admin-manual/system-table/rowsets" ] }, @@ -1222,4 +1222,4 @@ ] } ] -} +} \ No newline at end of file diff --git a/docs/zh-CN/docs/admin-manual/system-table/backends.md b/docs/zh-CN/docs/admin-manual/system-table/backends.md deleted file mode 100644 index c80a5cae6e..0000000000 --- a/docs/zh-CN/docs/admin-manual/system-table/backends.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -{ - "title": "backends", - "language": "zh-CN" -} ---- - - - -## backends - -### Name - - - -backends - - - -### description - -`backends` 是doris内置的一张系统表,存放在`information_schema`数据库下。通过`backends`系统表可以查看当前doris集群中的 `BE` 节点信息。 - -`backends` 表结构为: -```sql -MySQL [information_schema]> desc information_schema.backends; -+-----------------------+-------------+------+-------+---------+-------+ -| Field | Type | Null | Key | Default | Extra | -+-----------------------+-------------+------+-------+---------+-------+ -| BackendId | BIGINT | Yes | false | NULL | | -| Cluster | VARCHAR(40) | Yes | false | NULL | | -| IP | VARCHAR(40) | Yes | false | NULL | | -| HeartbeatPort | INT | Yes | false | NULL | | -| BePort | INT | Yes | false | NULL | | -| HttpPort | INT | Yes | false | NULL | | -| BrpcPort | INT | Yes | false | NULL | | -| LastStartTime | VARCHAR(40) | Yes | false | NULL | | -| LastHeartbeat | VARCHAR(40) | Yes | false | NULL | | -| Alive | VARCHAR(40) | Yes | false | NULL | | -| SystemDecommissioned | VARCHAR(40) | Yes | false | NULL | | -| ClusterDecommissioned | VARCHAR(40) | Yes | false | NULL | | -| TabletNum | BIGINT | Yes | false | NULL | | -| DataUsedCapacity | BIGINT | Yes | false | NULL | | -| AvailCapacity | BIGINT | Yes | false | NULL | | -| TotalCapacity | BIGINT | Yes | false | NULL | | -| UsedPct | DOUBLE | Yes | false | NULL | | -| MaxDiskUsedPct | DOUBLE | Yes | false | NULL | | -| RemoteUsedCapacity | BIGINT | Yes | false | NULL | | -| Tag | VARCHAR(40) | Yes | false | NULL | | -| ErrMsg | VARCHAR(40) | Yes | false | NULL | | -| Version | VARCHAR(40) | Yes | false | NULL | | -| Status | VARCHAR(40) | Yes | false | NULL | | -+-----------------------+-------------+------+-------+---------+-------+ -``` -`backends` 系统表展示出来的信息基本与 `show backends` 语句展示出的信息一致。但是`backends`系统表的各个字段类型更加明确,且可以利用 `backends` 系统表去做过滤、join等操作。 - -### Example - -```sql -MySQL [information_schema]> select * from information_schema.backends; -+-----------+-----------------+-----------+---------------+--------+----------+----------+---------------------+---------------------+-------+----------------------+-----------------------+-----------+------------------+---------------+---------------+--------------------+------------------+--------------------+--------------------------+--------+-----------------------------+-------------------------------------------------------------------------------------------------------------------------------+ -| BackendId | Cluster | IP | HeartbeatPort | BePort | HttpPort | BrpcPort | LastStartTime | LastHeartbeat | Alive | SystemDecommissioned | ClusterDecommissioned | TabletNum | DataUsedCapacity | AvailCapacity | TotalCapacity | UsedPct | MaxDiskUsedPct | RemoteUsedCapacity | Tag | ErrMsg | Version | Status | -+-----------+-----------------+-----------+---------------+--------+----------+----------+---------------------+---------------------+-------+----------------------+-----------------------+-----------+------------------+---------------+---------------+--------------------+------------------+--------------------+--------------------------+--------+-----------------------------+-------------------------------------------------------------------------------------------------------------------------------+ -| 10757 | default_cluster | 127.0.0.1 | 9159 | 9169 | 8149 | 8169 | 2022-11-24 11:16:31 | 2022-11-24 12:02:57 | true | false | false | 14 | 0 | 941359747073 | 3170529116160 | 70.309064746482065 | 70.3090647465136 | 0 | {"location" : "default"} | | doris-0.0.0-trunk-cc9545359 | {"lastSuccessReportTabletsTime":"2022-11-24 12:02:06","lastStreamLoadTime":-1,"isQueryDisabled":false,"isLoadDisabled":false} | -+-----------+-----------------+-----------+---------------+--------+----------+----------+---------------------+---------------------+-------+----------------------+-----------------------+-----------+------------------+---------------+---------------+--------------------+------------------+--------------------+--------------------------+--------+-----------------------------+-------------------------------------------------------------------------------------------------------------------------------+ -``` - -### KeyWords - - backends, information_schema - -### Best Practice diff --git a/docs/zh-CN/docs/sql-manual/sql-functions/table-functions/backends.md b/docs/zh-CN/docs/sql-manual/sql-functions/table-functions/backends.md new file mode 100644 index 0000000000..53a3fbf5dc --- /dev/null +++ b/docs/zh-CN/docs/sql-manual/sql-functions/table-functions/backends.md @@ -0,0 +1,122 @@ +--- +{ + "title": "backends", + "language": "zh-CN" +} +--- + + + +## `backends` + +### Name + + + +backends + + + +### description + +表函数,生成backends临时表,可以查看当前doris集群中的 BE 节点信息。 + +该函数用于from子句中。 + +语法: + +``` +backends(); +``` + +backends()表结构: +``` +mysql> desc function backends(); ++-------------------------+--------+------+-------+---------+-------+ +| Field | Type | Null | Key | Default | Extra | ++-------------------------+--------+------+-------+---------+-------+ +| BackendId | BIGINT | No | false | NULL | NONE | +| Cluster | TEXT | No | false | NULL | NONE | +| IP | TEXT | No | false | NULL | NONE | +| HostName | TEXT | No | false | NULL | NONE | +| HeartbeatPort | INT | No | false | NULL | NONE | +| BePort | INT | No | false | NULL | NONE | +| HttpPort | INT | No | false | NULL | NONE | +| BrpcPort | INT | No | false | NULL | NONE | +| LastStartTime | TEXT | No | false | NULL | NONE | +| LastHeartbeat | TEXT | No | false | NULL | NONE | +| Alive | TEXT | No | false | NULL | NONE | +| SystemDecommissioned | TEXT | No | false | NULL | NONE | +| ClusterDecommissioned | TEXT | No | false | NULL | NONE | +| TabletNum | BIGINT | No | false | NULL | NONE | +| DataUsedCapacity | BIGINT | No | false | NULL | NONE | +| AvailCapacity | BIGINT | No | false | NULL | NONE | +| TotalCapacity | BIGINT | No | false | NULL | NONE | +| UsedPct | DOUBLE | No | false | NULL | NONE | +| MaxDiskUsedPct | DOUBLE | No | false | NULL | NONE | +| RemoteUsedCapacity | BIGINT | No | false | NULL | NONE | +| Tag | TEXT | No | false | NULL | NONE | +| ErrMsg | TEXT | No | false | NULL | NONE | +| Version | TEXT | No | false | NULL | NONE | +| Status | TEXT | No | false | NULL | NONE | +| HeartbeatFailureCounter | INT | No | false | NULL | NONE | +| NodeRole | TEXT | No | false | NULL | NONE | ++-------------------------+--------+------+-------+---------+-------+ +26 rows in set (0.04 sec) +``` + +`backends()` tvf展示出来的信息基本与 `show backends` 语句展示出的信息一致,但是`backends()` tvf的各个字段类型更加明确,且可以利用tvf生成的表去做过滤、join等操作。 + +### example +``` +mysql> select * from backends()\G +*************************** 1. row *************************** + BackendId: 10022 + Cluster: default_cluster + IP: 10.16.10.14 + HostName: 10.16.10.14 + HeartbeatPort: 9159 + BePort: 9169 + HttpPort: 8149 + BrpcPort: 8169 + LastStartTime: 2023-03-24 14:37:00 + LastHeartbeat: 2023-03-27 20:25:35 + Alive: true + SystemDecommissioned: false + ClusterDecommissioned: false + TabletNum: 21 + DataUsedCapacity: 0 + AvailCapacity: 787460558849 + TotalCapacity: 3169589592064 + UsedPct: 75.155756416520319 + MaxDiskUsedPct: 75.155756416551881 + RemoteUsedCapacity: 0 + Tag: {"location" : "default"} + ErrMsg: + Version: doris-0.0.0-trunk-8de51f96f3 + Status: {"lastSuccessReportTabletsTime":"2023-03-27 20:24:55","lastStreamLoadTime":-1,"isQueryDisabled":false,"isLoadDisabled":false} +HeartbeatFailureCounter: 0 + NodeRole: mix +1 row in set (0.03 sec) +``` + +### keywords + + backends \ No newline at end of file diff --git a/docs/zh-CN/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Manipulation/EXPORT.md b/docs/zh-CN/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Manipulation/EXPORT.md index 6fee0a5c5e..4f14e79889 100644 --- a/docs/zh-CN/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Manipulation/EXPORT.md +++ b/docs/zh-CN/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Manipulation/EXPORT.md @@ -72,7 +72,7 @@ WITH BROKER - `column_separator`:指定导出的列分隔符,默认为\t。仅支持单字节。 - `line_delimiter`:指定导出的行分隔符,默认为\n。仅支持单字节。 - `exec_mem_limit`:导出在单个 BE 节点的内存使用上限,默认为 2GB,单位为字节。 - - `timeout`:导入作业的超时时间,默认为2小时,单位是秒。 + - `timeout`:导出作业的超时时间,默认为2小时,单位是秒。 - `tablet_num_per_task`:每个子任务能分配扫描的最大 Tablet 数量。 - `WITH BROKER` @@ -234,4 +234,4 @@ PROPERTIES ( - 如果 Export 作业运行成功,在远端存储中产生的 `__doris_export_tmp_xxx` 目录,根据远端存储的文件系统语义,可能会保留,也可能会被清除。比如在S3对象存储中,通过 rename 操作将一个目录中的最后一个文件移走后,该目录也会被删除。如果该目录没有被清除,用户可以手动清除。 - Export 作业只会导出 Base 表的数据,不会导出物化视图的数据。 - Export 作业会扫描数据,占用 IO 资源,可能会影响系统的查询延迟。 -- 一个集群内同时运行的 Export 作业最大个数为 5。之后提交的只作业将会排队。 +- 一个集群内同时运行的 Export 作业最大个数为 5。之后提交的作业将会排队。 diff --git a/docs/zh-CN/docs/sql-manual/sql-reference/Data-Manipulation-Statements/OUTFILE.md b/docs/zh-CN/docs/sql-manual/sql-reference/Data-Manipulation-Statements/OUTFILE.md index ef7bf286e3..55e805ea9e 100644 --- a/docs/zh-CN/docs/sql-manual/sql-reference/Data-Manipulation-Statements/OUTFILE.md +++ b/docs/zh-CN/docs/sql-manual/sql-reference/Data-Manipulation-Statements/OUTFILE.md @@ -326,10 +326,10 @@ INTO OUTFILE "file_path" 3. 导出到本地文件 - 导出到本地文件的功能不适用于公有云用户,仅适用于私有化部署的用户。并且默认用户对集群节点有完全的控制权限。Doris 对于用户填写的导出路径不会做合法性检查。如果 Doris 的进程用户对该路径无写权限,或路径不存在,则会报错。同时处于安全性考虑,如果该路径已存在同名的文件,则也会导出失败。 + 导出到本地文件的功能不适用于公有云用户,仅适用于私有化部署的用户。并且默认用户对集群节点有完全的控制权限。Doris 对于用户填写的导出路径不会做合法性检查。如果 Doris 的进程用户对该路径无写权限,或路径不存在,则会报错。同时出于安全性考虑,如果该路径已存在同名的文件,也会导出失败。 Doris 不会管理导出到本地的文件,也不会检查磁盘空间等。这些文件需要用户自行管理,如清理等。 4. 结果完整性保证 - 该命令是一个同步命令,因此有可能在执行过程中任务连接断开了,从而无法活着导出的数据是否正常结束,或是否完整。此时可以使用 `success_file_name` 参数要求任务成功后,在目录下生成一个成功文件标识。用户可以通过这个文件,来判断导出是否正常结束。 + 该命令是一个同步命令,因此有可能在执行过程中任务连接断开了,从而无法获知导出任务是否正常结束及导出数据是否完整。此时可以使用 `success_file_name` 参数要求任务成功后,在目录下生成一个成功文件标识。用户可以通过这个文件,来判断导出是否正常结束。 diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/SchemaTableType.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/SchemaTableType.java index 81fc050239..ca282015fa 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/SchemaTableType.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/SchemaTableType.java @@ -67,8 +67,7 @@ public enum SchemaTableType { SCH_VIEWS("VIEWS", "VIEWS", TSchemaTableType.SCH_VIEWS), SCH_CREATE_TABLE("CREATE_TABLE", "CREATE_TABLE", TSchemaTableType.SCH_CREATE_TABLE), SCH_INVALID("NULL", "NULL", TSchemaTableType.SCH_INVALID), - SCH_ROWSETS("ROWSETS", "ROWSETS", TSchemaTableType.SCH_ROWSETS), - SCH_BACKENDS("BACKENDS", "BACKENDS", TSchemaTableType.SCH_BACKENDS); + SCH_ROWSETS("ROWSETS", "ROWSETS", TSchemaTableType.SCH_ROWSETS); private static final String dbName = "INFORMATION_SCHEMA"; private static SelectList fullSelectLists; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowBackendsStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowBackendsStmt.java index 30c069954c..69e2708d3e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowBackendsStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowBackendsStmt.java @@ -46,10 +46,6 @@ public class ShowBackendsStmt extends ShowStmt { public ShowResultSetMetaData getMetaData() { ShowResultSetMetaData.Builder builder = ShowResultSetMetaData.builder(); for (String title : BackendsProcDir.TITLE_NAMES) { - // hide hostname for SHOW BACKENDS stmt - if (title.equals("HostName")) { - continue; - } builder.addColumn(new Column(title, ScalarType.createVarchar(30))); } return builder.build(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowFrontendsStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowFrontendsStmt.java index 0856a9df02..35e0e032f5 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowFrontendsStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowFrontendsStmt.java @@ -46,10 +46,6 @@ public class ShowFrontendsStmt extends ShowStmt { public ShowResultSetMetaData getMetaData() { ShowResultSetMetaData.Builder builder = ShowResultSetMetaData.builder(); for (String title : FrontendsProcNode.TITLE_NAMES) { - // hide hostname for SHOW FRONTENDS stmt - if (title.equals("HostName")) { - continue; - } builder.addColumn(new Column(title, ScalarType.createVarchar(30))); } return builder.build(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/SchemaTable.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/SchemaTable.java index af8cc101b2..160c6169bc 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/SchemaTable.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/SchemaTable.java @@ -20,7 +20,6 @@ package org.apache.doris.catalog; import org.apache.doris.analysis.SchemaTableType; import org.apache.doris.common.SystemIdGenerator; import org.apache.doris.thrift.TSchemaTable; -import org.apache.doris.thrift.TSchemaTableStructure; import org.apache.doris.thrift.TTableDescriptor; import org.apache.doris.thrift.TTableType; @@ -393,54 +392,8 @@ public class SchemaTable extends Table { .column("CREATION_TIME", ScalarType.createType(PrimitiveType.BIGINT)) .column("NEWEST_WRITE_TIMESTAMP", ScalarType.createType(PrimitiveType.BIGINT)) .build())) - .put("backends", new SchemaTable(SystemIdGenerator.getNextId(), "backends", TableType.SCHEMA, - builder().column("BackendId", ScalarType.createType(PrimitiveType.BIGINT)) - .column("Cluster", ScalarType.createVarchar(64)) - .column("IP", ScalarType.createVarchar(16)) - .column("HeartbeatPort", ScalarType.createType(PrimitiveType.INT)) - .column("BePort", ScalarType.createType(PrimitiveType.INT)) - .column("HttpPort", ScalarType.createType(PrimitiveType.INT)) - .column("BrpcPort", ScalarType.createType(PrimitiveType.INT)) - .column("LastStartTime", ScalarType.createVarchar(32)) - .column("LastHeartbeat", ScalarType.createVarchar(32)) - .column("Alive", ScalarType.createVarchar(8)) - .column("SystemDecommissioned", ScalarType.createVarchar(8)) - .column("ClusterDecommissioned", ScalarType.createVarchar(8)) - .column("TabletNum", ScalarType.createType(PrimitiveType.BIGINT)) - .column("DataUsedCapacity", ScalarType.createType(PrimitiveType.BIGINT)) - .column("AvailCapacity", ScalarType.createType(PrimitiveType.BIGINT)) - .column("TotalCapacity", ScalarType.createType(PrimitiveType.BIGINT)) - .column("UsedPct", ScalarType.createType(PrimitiveType.DOUBLE)) - .column("MaxDiskUsedPct", ScalarType.createType(PrimitiveType.DOUBLE)) - .column("RemoteUsedCapacity", ScalarType.createType(PrimitiveType.BIGINT)) - .column("Tag", ScalarType.createVarchar(128)) - .column("ErrMsg", ScalarType.createVarchar(2048)) - .column("Version", ScalarType.createVarchar(64)) - .column("Status", ScalarType.createVarchar(1024)) - .build())) .build(); - public static List getTableStructure(String tableName) { - List tSchemaTableStructureList = Lists.newArrayList(); - switch (tableName) { - case "backends": { - Table table = TABLE_MAP.get(tableName); - for (Column column : table.getFullSchema()) { - TSchemaTableStructure tSchemaTableStructure = new TSchemaTableStructure(); - tSchemaTableStructure.setColumnName(column.getName()); - tSchemaTableStructure.setType(column.getDataType().toThrift()); - tSchemaTableStructure.setLen(column.getDataType().getSlotSize()); - tSchemaTableStructure.setIsNull(column.isAllowNull()); - tSchemaTableStructureList.add(tSchemaTableStructure); - } - break; - } - default: - break; - } - return tSchemaTableStructureList; - } - protected SchemaTable(long id, String name, TableType type, List baseSchema) { super(id, name, type, baseSchema); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/FrontendsProcNode.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/FrontendsProcNode.java index c9d9a8377e..0161074628 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/FrontendsProcNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/FrontendsProcNode.java @@ -50,8 +50,6 @@ public class FrontendsProcNode implements ProcNodeInterface { .add("CurrentConnected") .build(); - public static final int HOSTNAME_INDEX = 2; - private Env env; public FrontendsProcNode(Env env) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/SchemaScanNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/SchemaScanNode.java index 10eda04f9b..9115677555 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/SchemaScanNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/SchemaScanNode.java @@ -122,8 +122,6 @@ public class SchemaScanNode extends ScanNode { TUserIdentity tCurrentUser = ConnectContext.get().getCurrentUserIdentity().toThrift(); msg.schema_scan_node.setCurrentUserIdent(tCurrentUser); - - msg.schema_scan_node.setTableStructure(SchemaTable.getTableStructure(tableName)); } /** diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java b/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java index cd6b9c98ff..124bd13e1c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java @@ -1820,9 +1820,6 @@ public class ShowExecutor { final ShowBackendsStmt showStmt = (ShowBackendsStmt) stmt; List> backendInfos = BackendsProcDir.getClusterBackendInfos(showStmt.getClusterName()); - for (List row : backendInfos) { - row.remove(BackendsProcDir.HOSTNAME_INDEX); - } backendInfos.sort(new Comparator>() { @Override public int compare(List o1, List o2) { @@ -1838,10 +1835,6 @@ public class ShowExecutor { List> infos = Lists.newArrayList(); FrontendsProcNode.getFrontendsInfo(Env.getCurrentEnv(), infos); - for (List row : infos) { - row.remove(FrontendsProcNode.HOSTNAME_INDEX); - } - resultSet = new ShowResultSet(showStmt.getMetaData(), infos); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java b/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java index 4b9bd2cc2d..276a48ea4c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java +++ b/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java @@ -1340,8 +1340,6 @@ public class FrontendServiceImpl implements FrontendService.Iface { @Override public TFetchSchemaTableDataResult fetchSchemaTableData(TFetchSchemaTableDataRequest request) throws TException { switch (request.getSchemaTableName()) { - case BACKENDS: - return MetadataGenerator.getBackendsSchemaTable(request); case METADATA_TABLE: return MetadataGenerator.getMetadataTable(request); default: diff --git a/fe/fe-core/src/main/java/org/apache/doris/tablefunction/BackendsTableValuedFunction.java b/fe/fe-core/src/main/java/org/apache/doris/tablefunction/BackendsTableValuedFunction.java index b140415f8a..864fcc8f80 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/tablefunction/BackendsTableValuedFunction.java +++ b/fe/fe-core/src/main/java/org/apache/doris/tablefunction/BackendsTableValuedFunction.java @@ -67,17 +67,18 @@ public class BackendsTableValuedFunction extends MetadataTableValuedFunction { public List getTableColumns() throws AnalysisException { List resColumns = Lists.newArrayList(); resColumns.add(new Column("BackendId", ScalarType.createType(PrimitiveType.BIGINT))); - resColumns.add(new Column("Cluster", ScalarType.createVarchar(64))); - resColumns.add(new Column("IP", ScalarType.createVarchar(16))); + resColumns.add(new Column("Cluster", ScalarType.createStringType())); + resColumns.add(new Column("IP", ScalarType.createStringType())); + resColumns.add(new Column("HostName", ScalarType.createStringType())); resColumns.add(new Column("HeartbeatPort", ScalarType.createType(PrimitiveType.INT))); resColumns.add(new Column("BePort", ScalarType.createType(PrimitiveType.INT))); resColumns.add(new Column("HttpPort", ScalarType.createType(PrimitiveType.INT))); resColumns.add(new Column("BrpcPort", ScalarType.createType(PrimitiveType.INT))); - resColumns.add(new Column("LastStartTime", ScalarType.createVarchar(32))); - resColumns.add(new Column("LastHeartbeat", ScalarType.createVarchar(32))); - resColumns.add(new Column("Alive", ScalarType.createVarchar(8))); - resColumns.add(new Column("SystemDecommissioned", ScalarType.createVarchar(8))); - resColumns.add(new Column("ClusterDecommissioned", ScalarType.createVarchar(8))); + resColumns.add(new Column("LastStartTime", ScalarType.createStringType())); + resColumns.add(new Column("LastHeartbeat", ScalarType.createStringType())); + resColumns.add(new Column("Alive", ScalarType.createStringType())); + resColumns.add(new Column("SystemDecommissioned", ScalarType.createStringType())); + resColumns.add(new Column("ClusterDecommissioned", ScalarType.createStringType())); resColumns.add(new Column("TabletNum", ScalarType.createType(PrimitiveType.BIGINT))); resColumns.add(new Column("DataUsedCapacity", ScalarType.createType(PrimitiveType.BIGINT))); resColumns.add(new Column("AvailCapacity", ScalarType.createType(PrimitiveType.BIGINT))); @@ -85,12 +86,12 @@ public class BackendsTableValuedFunction extends MetadataTableValuedFunction { resColumns.add(new Column("UsedPct", ScalarType.createType(PrimitiveType.DOUBLE))); resColumns.add(new Column("MaxDiskUsedPct", ScalarType.createType(PrimitiveType.DOUBLE))); resColumns.add(new Column("RemoteUsedCapacity", ScalarType.createType(PrimitiveType.BIGINT))); - resColumns.add(new Column("Tag", ScalarType.createVarchar(128))); - resColumns.add(new Column("ErrMsg", ScalarType.createVarchar(2048))); - resColumns.add(new Column("Version", ScalarType.createVarchar(64))); - resColumns.add(new Column("Status", ScalarType.createVarchar(1024))); + resColumns.add(new Column("Tag", ScalarType.createStringType())); + resColumns.add(new Column("ErrMsg", ScalarType.createStringType())); + resColumns.add(new Column("Version", ScalarType.createStringType())); + resColumns.add(new Column("Status", ScalarType.createStringType())); resColumns.add(new Column("HeartbeatFailureCounter", ScalarType.createType(PrimitiveType.INT))); - resColumns.add(new Column("NodeRole", ScalarType.createVarchar(64))); + resColumns.add(new Column("NodeRole", ScalarType.createStringType())); return resColumns; } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/tablefunction/MetadataGenerator.java b/fe/fe-core/src/main/java/org/apache/doris/tablefunction/MetadataGenerator.java index 03a0cc58ff..903c34225e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/tablefunction/MetadataGenerator.java +++ b/fe/fe-core/src/main/java/org/apache/doris/tablefunction/MetadataGenerator.java @@ -74,107 +74,6 @@ public class MetadataGenerator { return errorResult("Metadata table params is not set. "); } - // deprecated - public static TFetchSchemaTableDataResult getBackendsSchemaTable(TFetchSchemaTableDataRequest request) { - final SystemInfoService clusterInfoService = Env.getCurrentSystemInfo(); - List backendIds = null; - if (!Strings.isNullOrEmpty(request.cluster_name)) { - final Cluster cluster = Env.getCurrentEnv().getCluster(request.cluster_name); - // root not in any cluster - if (null == cluster) { - return errorResult("Cluster is not existed."); - } - backendIds = cluster.getBackendIdList(); - } else { - backendIds = clusterInfoService.getBackendIds(false); - } - - TFetchSchemaTableDataResult result = new TFetchSchemaTableDataResult(); - long start = System.currentTimeMillis(); - Stopwatch watch = Stopwatch.createUnstarted(); - - List dataBatch = Lists.newArrayList(); - for (long backendId : backendIds) { - Backend backend = clusterInfoService.getBackend(backendId); - if (backend == null) { - continue; - } - - watch.start(); - Integer tabletNum = Env.getCurrentInvertedIndex().getTabletNumByBackendId(backendId); - watch.stop(); - - TRow trow = new TRow(); - trow.addToColumnValue(new TCell().setLongVal(backendId)); - trow.addToColumnValue(new TCell().setStringVal(backend.getOwnerClusterName())); - trow.addToColumnValue(new TCell().setStringVal(backend.getIp())); - if (Strings.isNullOrEmpty(request.cluster_name)) { - trow.addToColumnValue(new TCell().setIntVal(backend.getHeartbeatPort())); - trow.addToColumnValue(new TCell().setIntVal(backend.getBePort())); - trow.addToColumnValue(new TCell().setIntVal(backend.getHttpPort())); - trow.addToColumnValue(new TCell().setIntVal(backend.getBrpcPort())); - } - trow.addToColumnValue(new TCell().setStringVal(TimeUtils.longToTimeString(backend.getLastStartTime()))); - trow.addToColumnValue(new TCell().setStringVal(TimeUtils.longToTimeString(backend.getLastUpdateMs()))); - trow.addToColumnValue(new TCell().setStringVal(String.valueOf(backend.isAlive()))); - if (backend.isDecommissioned() && backend.getDecommissionType() == DecommissionType.ClusterDecommission) { - trow.addToColumnValue(new TCell().setStringVal("false")); - trow.addToColumnValue(new TCell().setStringVal("true")); - } else if (backend.isDecommissioned() - && backend.getDecommissionType() == DecommissionType.SystemDecommission) { - trow.addToColumnValue(new TCell().setStringVal("true")); - trow.addToColumnValue(new TCell().setStringVal("false")); - } else { - trow.addToColumnValue(new TCell().setStringVal("false")); - trow.addToColumnValue(new TCell().setStringVal("false")); - } - trow.addToColumnValue(new TCell().setLongVal(tabletNum)); - - // capacity - // data used - trow.addToColumnValue(new TCell().setLongVal(backend.getDataUsedCapacityB())); - - // available - long availB = backend.getAvailableCapacityB(); - trow.addToColumnValue(new TCell().setLongVal(availB)); - - // total - long totalB = backend.getTotalCapacityB(); - trow.addToColumnValue(new TCell().setLongVal(totalB)); - - // used percent - double used = 0.0; - if (totalB <= 0) { - used = 0.0; - } else { - used = (double) (totalB - availB) * 100 / totalB; - } - trow.addToColumnValue(new TCell().setDoubleVal(used)); - trow.addToColumnValue(new TCell().setDoubleVal(backend.getMaxDiskUsedPct() * 100)); - - // remote used capacity - trow.addToColumnValue(new TCell().setLongVal(backend.getRemoteUsedCapacityB())); - - // tags - trow.addToColumnValue(new TCell().setStringVal(backend.getTagMapString())); - // err msg - trow.addToColumnValue(new TCell().setStringVal(backend.getHeartbeatErrMsg())); - // version - trow.addToColumnValue(new TCell().setStringVal(backend.getVersion())); - // status - trow.addToColumnValue(new TCell().setStringVal(new Gson().toJson(backend.getBackendStatus()))); - dataBatch.add(trow); - } - - // backends proc node get result too slow, add log to observer. - LOG.debug("backends proc get tablet num cost: {}, total cost: {}", - watch.elapsed(TimeUnit.MILLISECONDS), (System.currentTimeMillis() - start)); - - result.setDataBatch(dataBatch); - result.setStatus(new TStatus(TStatusCode.OK)); - return result; - } - @NotNull public static TFetchSchemaTableDataResult errorResult(String msg) { TFetchSchemaTableDataResult result = new TFetchSchemaTableDataResult(); @@ -266,6 +165,11 @@ public class MetadataGenerator { trow.addToColumnValue(new TCell().setLongVal(backendId)); trow.addToColumnValue(new TCell().setStringVal(backend.getOwnerClusterName())); trow.addToColumnValue(new TCell().setStringVal(backend.getIp())); + if (backend.getHostName() != null) { + trow.addToColumnValue(new TCell().setStringVal(backend.getHostName())); + } else { + trow.addToColumnValue(new TCell().setStringVal(backend.getIp())); + } if (Strings.isNullOrEmpty(backendsParam.cluster_name)) { trow.addToColumnValue(new TCell().setIntVal(backend.getHeartbeatPort())); trow.addToColumnValue(new TCell().setIntVal(backend.getBePort())); diff --git a/gensrc/thrift/Descriptors.thrift b/gensrc/thrift/Descriptors.thrift index 9a5043972c..388b169778 100644 --- a/gensrc/thrift/Descriptors.thrift +++ b/gensrc/thrift/Descriptors.thrift @@ -111,7 +111,6 @@ enum TSchemaTableType { SCH_VIEWS, SCH_INVALID, SCH_ROWSETS, - SCH_BACKENDS, SCH_COLUMN_STATISTICS } diff --git a/gensrc/thrift/FrontendService.thrift b/gensrc/thrift/FrontendService.thrift index f1cc722723..fc4ac1f663 100644 --- a/gensrc/thrift/FrontendService.thrift +++ b/gensrc/thrift/FrontendService.thrift @@ -719,7 +719,7 @@ struct TInitExternalCtlMetaResult { } enum TSchemaTableName { - BACKENDS = 0, + // BACKENDS = 0, METADATA_TABLE = 1, } diff --git a/gensrc/thrift/PlanNodes.thrift b/gensrc/thrift/PlanNodes.thrift index e5ef30f977..b5dda4e6d9 100644 --- a/gensrc/thrift/PlanNodes.thrift +++ b/gensrc/thrift/PlanNodes.thrift @@ -512,13 +512,6 @@ struct TCsvScanNode { 10:optional map column_function_mapping } -struct TSchemaTableStructure { - 1: optional string column_name - 2: optional Types.TPrimitiveType type - 3: optional i64 len - 4: optional bool is_null; -} - struct TSchemaScanNode { 1: required Types.TTupleId tuple_id @@ -533,7 +526,7 @@ struct TSchemaScanNode { 10: optional string user_ip // deprecated 11: optional Types.TUserIdentity current_user_ident // to replace the user and user_ip 12: optional bool show_hidden_cloumns = false - 13: optional list table_structure + // 13: optional list table_structure // deprecated 14: optional string catalog } diff --git a/regression-test/data/nereids_p0/system/test_query_sys_tables.out b/regression-test/data/nereids_p0/system/test_query_sys_tables.out index 6972d35ff6..5e6e5e527d 100644 --- a/regression-test/data/nereids_p0/system/test_query_sys_tables.out +++ b/regression-test/data/nereids_p0/system/test_query_sys_tables.out @@ -1,7 +1,4 @@ -- This file is automatically generated. You should know what you did if you want to edit this --- !backends -- -true - -- !charsets -- true diff --git a/regression-test/data/query_p0/system/test_query_sys_tables.out b/regression-test/data/query_p0/system/test_query_sys_tables.out index 81aa1eae9d..5ccb57f74e 100644 --- a/regression-test/data/query_p0/system/test_query_sys_tables.out +++ b/regression-test/data/query_p0/system/test_query_sys_tables.out @@ -1,7 +1,4 @@ -- This file is automatically generated. You should know what you did if you want to edit this --- !backends -- -true - -- !charsets -- true diff --git a/regression-test/suites/compaction/test_compaction_agg_keys.groovy b/regression-test/suites/compaction/test_compaction_agg_keys.groovy index c29675f7fa..3bbe8fb7af 100644 --- a/regression-test/suites/compaction/test_compaction_agg_keys.groovy +++ b/regression-test/suites/compaction/test_compaction_agg_keys.groovy @@ -29,7 +29,7 @@ suite("test_compaction_agg_keys") { def backendId_to_backendHttpPort = [:] for (String[] backend in backends) { backendId_to_backendIP.put(backend[0], backend[2]) - backendId_to_backendHttpPort.put(backend[0], backend[5]) + backendId_to_backendHttpPort.put(backend[0], backend[6]) } backend_id = backendId_to_backendIP.keySet()[0] diff --git a/regression-test/suites/compaction/test_compaction_agg_keys_with_delete.groovy b/regression-test/suites/compaction/test_compaction_agg_keys_with_delete.groovy index c641fa4cbe..3dc24551de 100644 --- a/regression-test/suites/compaction/test_compaction_agg_keys_with_delete.groovy +++ b/regression-test/suites/compaction/test_compaction_agg_keys_with_delete.groovy @@ -29,7 +29,7 @@ suite("test_compaction_agg_keys_with_delete") { def backendId_to_backendHttpPort = [:] for (String[] backend in backends) { backendId_to_backendIP.put(backend[0], backend[2]) - backendId_to_backendHttpPort.put(backend[0], backend[5]) + backendId_to_backendHttpPort.put(backend[0], backend[6]) } backend_id = backendId_to_backendIP.keySet()[0] diff --git a/regression-test/suites/compaction/test_compaction_dup_keys.groovy b/regression-test/suites/compaction/test_compaction_dup_keys.groovy index 374153c224..4f365b628f 100644 --- a/regression-test/suites/compaction/test_compaction_dup_keys.groovy +++ b/regression-test/suites/compaction/test_compaction_dup_keys.groovy @@ -29,7 +29,7 @@ suite("test_compaction_dup_keys") { def backendId_to_backendHttpPort = [:] for (String[] backend in backends) { backendId_to_backendIP.put(backend[0], backend[2]) - backendId_to_backendHttpPort.put(backend[0], backend[5]) + backendId_to_backendHttpPort.put(backend[0], backend[6]) } backend_id = backendId_to_backendIP.keySet()[0] diff --git a/regression-test/suites/compaction/test_compaction_dup_keys_with_delete.groovy b/regression-test/suites/compaction/test_compaction_dup_keys_with_delete.groovy index f57229072d..c292399399 100644 --- a/regression-test/suites/compaction/test_compaction_dup_keys_with_delete.groovy +++ b/regression-test/suites/compaction/test_compaction_dup_keys_with_delete.groovy @@ -29,7 +29,7 @@ suite("test_compaction_dup_keys_with_delete") { def backendId_to_backendHttpPort = [:] for (String[] backend in backends) { backendId_to_backendIP.put(backend[0], backend[2]) - backendId_to_backendHttpPort.put(backend[0], backend[5]) + backendId_to_backendHttpPort.put(backend[0], backend[6]) } backend_id = backendId_to_backendIP.keySet()[0] diff --git a/regression-test/suites/compaction/test_compaction_uniq_keys.groovy b/regression-test/suites/compaction/test_compaction_uniq_keys.groovy index 2ff2a7137d..02d30c560a 100644 --- a/regression-test/suites/compaction/test_compaction_uniq_keys.groovy +++ b/regression-test/suites/compaction/test_compaction_uniq_keys.groovy @@ -29,7 +29,7 @@ suite("test_compaction_uniq_keys") { def backendId_to_backendHttpPort = [:] for (String[] backend in backends) { backendId_to_backendIP.put(backend[0], backend[2]) - backendId_to_backendHttpPort.put(backend[0], backend[5]) + backendId_to_backendHttpPort.put(backend[0], backend[6]) } backend_id = backendId_to_backendIP.keySet()[0] diff --git a/regression-test/suites/compaction/test_compaction_uniq_keys_row_store.groovy b/regression-test/suites/compaction/test_compaction_uniq_keys_row_store.groovy index 7ef5f419b6..ff79af35f4 100644 --- a/regression-test/suites/compaction/test_compaction_uniq_keys_row_store.groovy +++ b/regression-test/suites/compaction/test_compaction_uniq_keys_row_store.groovy @@ -42,7 +42,7 @@ suite("test_compaction_uniq_keys_row_store") { def backendId_to_backendHttpPort = [:] for (String[] backend in backends) { backendId_to_backendIP.put(backend[0], backend[2]) - backendId_to_backendHttpPort.put(backend[0], backend[5]) + backendId_to_backendHttpPort.put(backend[0], backend[6]) } backend_id = backendId_to_backendIP.keySet()[0] diff --git a/regression-test/suites/compaction/test_compaction_uniq_keys_with_delete.groovy b/regression-test/suites/compaction/test_compaction_uniq_keys_with_delete.groovy index caffb945d5..8dab92aa29 100644 --- a/regression-test/suites/compaction/test_compaction_uniq_keys_with_delete.groovy +++ b/regression-test/suites/compaction/test_compaction_uniq_keys_with_delete.groovy @@ -29,7 +29,7 @@ suite("test_compaction_uniq_keys_with_delete") { def backendId_to_backendHttpPort = [:] for (String[] backend in backends) { backendId_to_backendIP.put(backend[0], backend[2]) - backendId_to_backendHttpPort.put(backend[0], backend[5]) + backendId_to_backendHttpPort.put(backend[0], backend[6]) } backend_id = backendId_to_backendIP.keySet()[0] diff --git a/regression-test/suites/compaction/test_vertical_compaction_agg_keys.groovy b/regression-test/suites/compaction/test_vertical_compaction_agg_keys.groovy index a68b6c3911..4f9aa94a28 100644 --- a/regression-test/suites/compaction/test_vertical_compaction_agg_keys.groovy +++ b/regression-test/suites/compaction/test_vertical_compaction_agg_keys.groovy @@ -28,7 +28,7 @@ suite("test_vertical_compaction_agg_keys") { setConfigCommand.append("curl -X POST http://") setConfigCommand.append(backend[2]) setConfigCommand.append(":") - setConfigCommand.append(backend[5]) + setConfigCommand.append(backend[6]) setConfigCommand.append("/api/update_config?") String command1 = setConfigCommand.toString() + "enable_vertical_compaction=true" logger.info(command1) @@ -45,7 +45,7 @@ suite("test_vertical_compaction_agg_keys") { setConfigCommand.append("curl -X POST http://") setConfigCommand.append(backend[2]) setConfigCommand.append(":") - setConfigCommand.append(backend[5]) + setConfigCommand.append(backend[6]) setConfigCommand.append("/api/update_config?") String command1 = setConfigCommand.toString() + "enable_vertical_compaction=false" logger.info(command1) @@ -64,7 +64,7 @@ suite("test_vertical_compaction_agg_keys") { def backendId_to_backendHttpPort = [:] for (String[] backend in backends) { backendId_to_backendIP.put(backend[0], backend[2]) - backendId_to_backendHttpPort.put(backend[0], backend[5]) + backendId_to_backendHttpPort.put(backend[0], backend[6]) } backend_id = backendId_to_backendIP.keySet()[0] diff --git a/regression-test/suites/compaction/test_vertical_compaction_dup_keys.groovy b/regression-test/suites/compaction/test_vertical_compaction_dup_keys.groovy index ac91094ed9..263ea3dcaf 100644 --- a/regression-test/suites/compaction/test_vertical_compaction_dup_keys.groovy +++ b/regression-test/suites/compaction/test_vertical_compaction_dup_keys.groovy @@ -28,7 +28,7 @@ suite("test_vertical_compaction_dup_keys") { setConfigCommand.append("curl -X POST http://") setConfigCommand.append(backend[2]) setConfigCommand.append(":") - setConfigCommand.append(backend[5]) + setConfigCommand.append(backend[6]) setConfigCommand.append("/api/update_config?") String command1 = setConfigCommand.toString() + "enable_vertical_compaction=true" logger.info(command1) @@ -45,7 +45,7 @@ suite("test_vertical_compaction_dup_keys") { setConfigCommand.append("curl -X POST http://") setConfigCommand.append(backend[2]) setConfigCommand.append(":") - setConfigCommand.append(backend[5]) + setConfigCommand.append(backend[6]) setConfigCommand.append("/api/update_config?") String command1 = setConfigCommand.toString() + "enable_vertical_compaction=false" logger.info(command1) @@ -64,7 +64,7 @@ suite("test_vertical_compaction_dup_keys") { def backendId_to_backendHttpPort = [:] for (String[] backend in backends) { backendId_to_backendIP.put(backend[0], backend[2]) - backendId_to_backendHttpPort.put(backend[0], backend[5]) + backendId_to_backendHttpPort.put(backend[0], backend[6]) } backend_id = backendId_to_backendIP.keySet()[0] diff --git a/regression-test/suites/compaction/test_vertical_compaction_uniq_keys.groovy b/regression-test/suites/compaction/test_vertical_compaction_uniq_keys.groovy index 02ec99aefd..08612f36bf 100644 --- a/regression-test/suites/compaction/test_vertical_compaction_uniq_keys.groovy +++ b/regression-test/suites/compaction/test_vertical_compaction_uniq_keys.groovy @@ -28,7 +28,7 @@ suite("test_vertical_compaction_uniq_keys") { setConfigCommand.append("curl -X POST http://") setConfigCommand.append(backend[2]) setConfigCommand.append(":") - setConfigCommand.append(backend[5]) + setConfigCommand.append(backend[6]) setConfigCommand.append("/api/update_config?") String command1 = setConfigCommand.toString() + "enable_vertical_compaction=true" logger.info(command1) @@ -45,7 +45,7 @@ suite("test_vertical_compaction_uniq_keys") { setConfigCommand.append("curl -X POST http://") setConfigCommand.append(backend[2]) setConfigCommand.append(":") - setConfigCommand.append(backend[5]) + setConfigCommand.append(backend[6]) setConfigCommand.append("/api/update_config?") String command1 = setConfigCommand.toString() + "enable_vertical_compaction=false" logger.info(command1) @@ -64,7 +64,7 @@ suite("test_vertical_compaction_uniq_keys") { def backendId_to_backendHttpPort = [:] for (String[] backend in backends) { backendId_to_backendIP.put(backend[0], backend[2]) - backendId_to_backendHttpPort.put(backend[0], backend[5]) + backendId_to_backendHttpPort.put(backend[0], backend[6]) } backend_id = backendId_to_backendIP.keySet()[0] diff --git a/regression-test/suites/correctness/test_backends_table.groovy b/regression-test/suites/correctness_p0/table_valued_function/test_backends_tvf.groovy similarity index 78% rename from regression-test/suites/correctness/test_backends_table.groovy rename to regression-test/suites/correctness_p0/table_valued_function/test_backends_tvf.groovy index 3b4771d825..19f524ee9c 100644 --- a/regression-test/suites/correctness/test_backends_table.groovy +++ b/regression-test/suites/correctness_p0/table_valued_function/test_backends_tvf.groovy @@ -15,9 +15,9 @@ // specific language governing permissions and limitations // under the License. -// This suit test the `backends` information_schema table -suite("test_backends_table") { - List> table = sql """ select * from information_schema.backends; """ +// This suit test the `backends` tvf +suite("test_backends_tvf") { + List> table = sql """ select * from backends(); """ assertTrue(table.size() > 0) // row should > 0 - assertTrue(table[0].size == 23) // column should be 23 + assertTrue(table[0].size == 26) // column should be 26 } \ No newline at end of file diff --git a/regression-test/suites/demo_p0/httpTest_action.groovy b/regression-test/suites/demo_p0/httpTest_action.groovy index 4eda6aba35..b28b6acb2b 100644 --- a/regression-test/suites/demo_p0/httpTest_action.groovy +++ b/regression-test/suites/demo_p0/httpTest_action.groovy @@ -24,9 +24,9 @@ suite("http_test_action") { def backendIdToBackendIP = [:] def backendIdToBackendBrpcPort = [:] for (String[] backend in backends) { - if (backend[9].equals("true")) { + if (backend[10].equals("true")) { backendIdToBackendIP.put(backend[0], backend[2]) - backendIdToBackendBrpcPort.put(backend[0], backend[6]) + backendIdToBackendBrpcPort.put(backend[0], backend[7]) } } diff --git a/regression-test/suites/load_p0/stream_load/test_map_load_and_compaction.groovy b/regression-test/suites/load_p0/stream_load/test_map_load_and_compaction.groovy index c8b0a98570..c5a7331529 100644 --- a/regression-test/suites/load_p0/stream_load/test_map_load_and_compaction.groovy +++ b/regression-test/suites/load_p0/stream_load/test_map_load_and_compaction.groovy @@ -115,7 +115,7 @@ suite("test_map_load_and_compaction", "p0") { def backendId_to_backendHttpPort = [:] for (String[] backend in backends) { backendId_to_backendIP.put(backend[0], backend[2]) - backendId_to_backendHttpPort.put(backend[0], backend[5]) + backendId_to_backendHttpPort.put(backend[0], backend[6]) } String tablet_id = tablet[0] backend_id = tablet[2] @@ -168,7 +168,7 @@ suite("test_map_load_and_compaction", "p0") { backends = sql """ show backends; """ assertTrue(backends.size() > 0) for (String[] b : backends) { - assertEquals("true", b[9]) + assertEquals("true", b[10]) } } finally { try_sql("DROP TABLE IF EXISTS ${testTable}") diff --git a/regression-test/suites/nereids_p0/system/test_query_sys_tables.groovy b/regression-test/suites/nereids_p0/system/test_query_sys_tables.groovy index d443eb1713..ea9c570134 100644 --- a/regression-test/suites/nereids_p0/system/test_query_sys_tables.groovy +++ b/regression-test/suites/nereids_p0/system/test_query_sys_tables.groovy @@ -28,10 +28,6 @@ suite("test_query_sys_tables", "query,p0") { sql("drop database IF EXISTS ${dbName2}") sql("drop database IF EXISTS ${dbName3}") - // test backends - sql("use information_schema") - qt_backends("select count(*) >= 1 from backends") - // test charsets sql("use information_schema") qt_charsets("select count(*) >= 1 from character_sets") diff --git a/regression-test/suites/nereids_syntax_p0/information_schema.groovy b/regression-test/suites/nereids_syntax_p0/information_schema.groovy index 501c0a5e8f..06ab2d875a 100644 --- a/regression-test/suites/nereids_syntax_p0/information_schema.groovy +++ b/regression-test/suites/nereids_syntax_p0/information_schema.groovy @@ -16,9 +16,9 @@ // under the License. suite("information_schema") { - List> table = sql """ select * from information_schema.backends; """ + List> table = sql """ select * from backends(); """ assertTrue(table.size() > 0) // row should > 0 - assertTrue(table[0].size == 23) // column should be 23 + assertTrue(table[0].size == 26) // column should be 26 sql "SELECT DATABASE();" sql "select USER();" diff --git a/regression-test/suites/query_p0/system/test_query_sys_tables.groovy b/regression-test/suites/query_p0/system/test_query_sys_tables.groovy index cc552b6397..190c2c7475 100644 --- a/regression-test/suites/query_p0/system/test_query_sys_tables.groovy +++ b/regression-test/suites/query_p0/system/test_query_sys_tables.groovy @@ -26,10 +26,6 @@ suite("test_query_sys_tables", "query,p0") { sql("drop database IF EXISTS ${dbName2}") sql("drop database IF EXISTS ${dbName3}") - // test backends - sql("use information_schema") - qt_backends("select count(*) >= 1 from backends") - // test charsets sql("use information_schema") qt_charsets("select count(*) >= 1 from character_sets") diff --git a/regression-test/suites/schema_change/test_number_overflow.groovy b/regression-test/suites/schema_change/test_number_overflow.groovy index 6e3d93c010..a193f57ecd 100644 --- a/regression-test/suites/schema_change/test_number_overflow.groovy +++ b/regression-test/suites/schema_change/test_number_overflow.groovy @@ -32,7 +32,7 @@ suite ("test_number_overflow") { def backendId_to_backendHttpPort = [:] for (String[] backend in backends) { backendId_to_backendIP.put(backend[0], backend[2]) - backendId_to_backendHttpPort.put(backend[0], backend[5]) + backendId_to_backendHttpPort.put(backend[0], backend[6]) } backend_id = backendId_to_backendIP.keySet()[0] diff --git a/regression-test/suites/schema_change_p0/datev2/test_agg_keys_schema_change_datev2.groovy b/regression-test/suites/schema_change_p0/datev2/test_agg_keys_schema_change_datev2.groovy index a890791287..f47d467386 100644 --- a/regression-test/suites/schema_change_p0/datev2/test_agg_keys_schema_change_datev2.groovy +++ b/regression-test/suites/schema_change_p0/datev2/test_agg_keys_schema_change_datev2.groovy @@ -31,7 +31,7 @@ suite("test_agg_keys_schema_change_datev2") { def backendId_to_backendHttpPort = [:] for (String[] backend in backends) { backendId_to_backendIP.put(backend[0], backend[2]) - backendId_to_backendHttpPort.put(backend[0], backend[5]) + backendId_to_backendHttpPort.put(backend[0], backend[6]) } backend_id = backendId_to_backendIP.keySet()[0] diff --git a/regression-test/suites/schema_change_p0/datev2/test_dup_keys_schema_change_datev2.groovy b/regression-test/suites/schema_change_p0/datev2/test_dup_keys_schema_change_datev2.groovy index 0af2d1f396..662e275ee4 100644 --- a/regression-test/suites/schema_change_p0/datev2/test_dup_keys_schema_change_datev2.groovy +++ b/regression-test/suites/schema_change_p0/datev2/test_dup_keys_schema_change_datev2.groovy @@ -31,7 +31,7 @@ suite("test_dup_keys_schema_change_datev2") { def backendId_to_backendHttpPort = [:] for (String[] backend in backends) { backendId_to_backendIP.put(backend[0], backend[2]) - backendId_to_backendHttpPort.put(backend[0], backend[5]) + backendId_to_backendHttpPort.put(backend[0], backend[6]) } backend_id = backendId_to_backendIP.keySet()[0] diff --git a/regression-test/suites/schema_change_p0/decimalv3/test_agg_keys_schema_change_decimalv3.groovy b/regression-test/suites/schema_change_p0/decimalv3/test_agg_keys_schema_change_decimalv3.groovy index 3290141987..321b1cd1a2 100644 --- a/regression-test/suites/schema_change_p0/decimalv3/test_agg_keys_schema_change_decimalv3.groovy +++ b/regression-test/suites/schema_change_p0/decimalv3/test_agg_keys_schema_change_decimalv3.groovy @@ -31,7 +31,7 @@ suite("test_agg_keys_schema_change_decimalv3") { def backendId_to_backendHttpPort = [:] for (String[] backend in backends) { backendId_to_backendIP.put(backend[0], backend[2]) - backendId_to_backendHttpPort.put(backend[0], backend[5]) + backendId_to_backendHttpPort.put(backend[0], backend[6]) } backend_id = backendId_to_backendIP.keySet()[0] diff --git a/regression-test/suites/schema_change_p0/test_agg_keys_schema_change.groovy b/regression-test/suites/schema_change_p0/test_agg_keys_schema_change.groovy index aa4e6d6b6c..fccff4e1c5 100644 --- a/regression-test/suites/schema_change_p0/test_agg_keys_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_agg_keys_schema_change.groovy @@ -34,7 +34,7 @@ suite ("test_agg_keys_schema_change") { def backendId_to_backendHttpPort = [:] for (String[] backend in backends) { backendId_to_backendIP.put(backend[0], backend[2]) - backendId_to_backendHttpPort.put(backend[0], backend[5]) + backendId_to_backendHttpPort.put(backend[0], backend[6]) } backend_id = backendId_to_backendIP.keySet()[0] diff --git a/regression-test/suites/schema_change_p0/test_agg_mv_schema_change.groovy b/regression-test/suites/schema_change_p0/test_agg_mv_schema_change.groovy index 3e4ab7e280..e5876bf91b 100644 --- a/regression-test/suites/schema_change_p0/test_agg_mv_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_agg_mv_schema_change.groovy @@ -37,7 +37,7 @@ suite ("test_agg_mv_schema_change") { def backendId_to_backendHttpPort = [:] for (String[] backend in backends) { backendId_to_backendIP.put(backend[0], backend[2]) - backendId_to_backendHttpPort.put(backend[0], backend[5]) + backendId_to_backendHttpPort.put(backend[0], backend[6]) } backend_id = backendId_to_backendIP.keySet()[0] diff --git a/regression-test/suites/schema_change_p0/test_agg_rollup_schema_change.groovy b/regression-test/suites/schema_change_p0/test_agg_rollup_schema_change.groovy index 158f8bd4d2..2dcb385908 100644 --- a/regression-test/suites/schema_change_p0/test_agg_rollup_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_agg_rollup_schema_change.groovy @@ -38,7 +38,7 @@ suite ("test_agg_rollup_schema_change") { def backendId_to_backendHttpPort = [:] for (String[] backend in backends) { backendId_to_backendIP.put(backend[0], backend[2]) - backendId_to_backendHttpPort.put(backend[0], backend[5]) + backendId_to_backendHttpPort.put(backend[0], backend[6]) } backend_id = backendId_to_backendIP.keySet()[0] diff --git a/regression-test/suites/schema_change_p0/test_agg_vals_schema_change.groovy b/regression-test/suites/schema_change_p0/test_agg_vals_schema_change.groovy index 9fc89a599f..23f7c888bd 100644 --- a/regression-test/suites/schema_change_p0/test_agg_vals_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_agg_vals_schema_change.groovy @@ -29,7 +29,7 @@ suite ("test_agg_vals_schema_change") { def backendId_to_backendHttpPort = [:] for (String[] backend in backends) { backendId_to_backendIP.put(backend[0], backend[2]) - backendId_to_backendHttpPort.put(backend[0], backend[5]) + backendId_to_backendHttpPort.put(backend[0], backend[6]) } backend_id = backendId_to_backendIP.keySet()[0] diff --git a/regression-test/suites/schema_change_p0/test_dup_keys_schema_change.groovy b/regression-test/suites/schema_change_p0/test_dup_keys_schema_change.groovy index 207a19928e..f299118144 100644 --- a/regression-test/suites/schema_change_p0/test_dup_keys_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_dup_keys_schema_change.groovy @@ -32,7 +32,7 @@ suite ("test_dup_keys_schema_change") { def backendId_to_backendHttpPort = [:] for (String[] backend in backends) { backendId_to_backendIP.put(backend[0], backend[2]) - backendId_to_backendHttpPort.put(backend[0], backend[5]) + backendId_to_backendHttpPort.put(backend[0], backend[6]) } backend_id = backendId_to_backendIP.keySet()[0] diff --git a/regression-test/suites/schema_change_p0/test_dup_mv_schema_change.groovy b/regression-test/suites/schema_change_p0/test_dup_mv_schema_change.groovy index c14b7a378d..012496ac7a 100644 --- a/regression-test/suites/schema_change_p0/test_dup_mv_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_dup_mv_schema_change.groovy @@ -36,7 +36,7 @@ suite ("test_dup_mv_schema_change") { def backendId_to_backendHttpPort = [:] for (String[] backend in backends) { backendId_to_backendIP.put(backend[0], backend[2]) - backendId_to_backendHttpPort.put(backend[0], backend[5]) + backendId_to_backendHttpPort.put(backend[0], backend[6]) } backend_id = backendId_to_backendIP.keySet()[0] diff --git a/regression-test/suites/schema_change_p0/test_dup_rollup_schema_change.groovy b/regression-test/suites/schema_change_p0/test_dup_rollup_schema_change.groovy index fb809f4f05..3e58cb9adb 100644 --- a/regression-test/suites/schema_change_p0/test_dup_rollup_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_dup_rollup_schema_change.groovy @@ -36,7 +36,7 @@ suite ("test_dup_rollup_schema_change") { def backendId_to_backendHttpPort = [:] for (String[] backend in backends) { backendId_to_backendIP.put(backend[0], backend[2]) - backendId_to_backendHttpPort.put(backend[0], backend[5]) + backendId_to_backendHttpPort.put(backend[0], backend[6]) } backend_id = backendId_to_backendIP.keySet()[0] diff --git a/regression-test/suites/schema_change_p0/test_dup_vals_schema_change.groovy b/regression-test/suites/schema_change_p0/test_dup_vals_schema_change.groovy index 544fffb173..0bccf610bd 100644 --- a/regression-test/suites/schema_change_p0/test_dup_vals_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_dup_vals_schema_change.groovy @@ -28,7 +28,7 @@ suite ("test_dup_vals_schema_change") { def backendId_to_backendHttpPort = [:] for (String[] backend in backends) { backendId_to_backendIP.put(backend[0], backend[2]) - backendId_to_backendHttpPort.put(backend[0], backend[5]) + backendId_to_backendHttpPort.put(backend[0], backend[6]) } backend_id = backendId_to_backendIP.keySet()[0] diff --git a/regression-test/suites/schema_change_p0/test_uniq_keys_schema_change.groovy b/regression-test/suites/schema_change_p0/test_uniq_keys_schema_change.groovy index 81632ab2f5..b21fcf31e8 100644 --- a/regression-test/suites/schema_change_p0/test_uniq_keys_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_uniq_keys_schema_change.groovy @@ -28,7 +28,7 @@ suite ("test_uniq_keys_schema_change") { def backendId_to_backendHttpPort = [:] for (String[] backend in backends) { backendId_to_backendIP.put(backend[0], backend[2]) - backendId_to_backendHttpPort.put(backend[0], backend[5]) + backendId_to_backendHttpPort.put(backend[0], backend[6]) } backend_id = backendId_to_backendIP.keySet()[0] diff --git a/regression-test/suites/schema_change_p0/test_uniq_mv_schema_change.groovy b/regression-test/suites/schema_change_p0/test_uniq_mv_schema_change.groovy index df365ad01a..8df54b968a 100644 --- a/regression-test/suites/schema_change_p0/test_uniq_mv_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_uniq_mv_schema_change.groovy @@ -32,7 +32,7 @@ suite ("test_uniq_mv_schema_change") { def backendId_to_backendHttpPort = [:] for (String[] backend in backends) { backendId_to_backendIP.put(backend[0], backend[2]) - backendId_to_backendHttpPort.put(backend[0], backend[5]) + backendId_to_backendHttpPort.put(backend[0], backend[6]) } backend_id = backendId_to_backendIP.keySet()[0] diff --git a/regression-test/suites/schema_change_p0/test_uniq_rollup_schema_change.groovy b/regression-test/suites/schema_change_p0/test_uniq_rollup_schema_change.groovy index 0ea05d2316..773b29d101 100644 --- a/regression-test/suites/schema_change_p0/test_uniq_rollup_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_uniq_rollup_schema_change.groovy @@ -36,7 +36,7 @@ suite ("test_uniq_rollup_schema_change") { def backendId_to_backendHttpPort = [:] for (String[] backend in backends) { backendId_to_backendIP.put(backend[0], backend[2]) - backendId_to_backendHttpPort.put(backend[0], backend[5]) + backendId_to_backendHttpPort.put(backend[0], backend[6]) } backend_id = backendId_to_backendIP.keySet()[0] diff --git a/regression-test/suites/schema_change_p0/test_uniq_vals_schema_change.groovy b/regression-test/suites/schema_change_p0/test_uniq_vals_schema_change.groovy index 20610a93d3..1e33841c71 100644 --- a/regression-test/suites/schema_change_p0/test_uniq_vals_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_uniq_vals_schema_change.groovy @@ -28,7 +28,7 @@ suite ("test_uniq_vals_schema_change") { def backendId_to_backendHttpPort = [:] for (String[] backend in backends) { backendId_to_backendIP.put(backend[0], backend[2]) - backendId_to_backendHttpPort.put(backend[0], backend[5]) + backendId_to_backendHttpPort.put(backend[0], backend[6]) } backend_id = backendId_to_backendIP.keySet()[0] diff --git a/regression-test/suites/schema_change_p0/test_varchar_schema_change.groovy b/regression-test/suites/schema_change_p0/test_varchar_schema_change.groovy index a3a66c68fd..261b928a53 100644 --- a/regression-test/suites/schema_change_p0/test_varchar_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_varchar_schema_change.groovy @@ -34,7 +34,7 @@ suite ("test_varchar_schema_change") { def backendId_to_backendHttpPort = [:] for (String[] backend in backends) { backendId_to_backendIP.put(backend[0], backend[2]) - backendId_to_backendHttpPort.put(backend[0], backend[5]) + backendId_to_backendHttpPort.put(backend[0], backend[6]) } backend_id = backendId_to_backendIP.keySet()[0] diff --git a/regression-test/suites/segcompaction_p1/test_segcompaction_agg_keys.groovy b/regression-test/suites/segcompaction_p1/test_segcompaction_agg_keys.groovy index d54a9ed66b..af4ea90ff1 100644 --- a/regression-test/suites/segcompaction_p1/test_segcompaction_agg_keys.groovy +++ b/regression-test/suites/segcompaction_p1/test_segcompaction_agg_keys.groovy @@ -35,7 +35,7 @@ suite("test_segcompaction_agg_keys") { def backendId_to_backendHttpPort = [:] for (String[] backend in backends) { backendId_to_backendIP.put(backend[0], backend[2]) - backendId_to_backendHttpPort.put(backend[0], backend[5]) + backendId_to_backendHttpPort.put(backend[0], backend[6]) } backend_id = backendId_to_backendIP.keySet()[0] diff --git a/regression-test/suites/segcompaction_p1/test_segcompaction_agg_keys_index.groovy b/regression-test/suites/segcompaction_p1/test_segcompaction_agg_keys_index.groovy index b639482e4e..94d2160e73 100644 --- a/regression-test/suites/segcompaction_p1/test_segcompaction_agg_keys_index.groovy +++ b/regression-test/suites/segcompaction_p1/test_segcompaction_agg_keys_index.groovy @@ -35,7 +35,7 @@ suite("test_segcompaction_agg_keys_index") { def backendId_to_backendHttpPort = [:] for (String[] backend in backends) { backendId_to_backendIP.put(backend[0], backend[2]) - backendId_to_backendHttpPort.put(backend[0], backend[5]) + backendId_to_backendHttpPort.put(backend[0], backend[6]) } backend_id = backendId_to_backendIP.keySet()[0] diff --git a/regression-test/suites/segcompaction_p1/test_segcompaction_dup_keys.groovy b/regression-test/suites/segcompaction_p1/test_segcompaction_dup_keys.groovy index 4213695c50..2eed88e0cf 100644 --- a/regression-test/suites/segcompaction_p1/test_segcompaction_dup_keys.groovy +++ b/regression-test/suites/segcompaction_p1/test_segcompaction_dup_keys.groovy @@ -35,7 +35,7 @@ suite("test_segcompaction_dup_keys") { def backendId_to_backendHttpPort = [:] for (String[] backend in backends) { backendId_to_backendIP.put(backend[0], backend[2]) - backendId_to_backendHttpPort.put(backend[0], backend[5]) + backendId_to_backendHttpPort.put(backend[0], backend[6]) } backend_id = backendId_to_backendIP.keySet()[0] diff --git a/regression-test/suites/segcompaction_p1/test_segcompaction_dup_keys_index.groovy b/regression-test/suites/segcompaction_p1/test_segcompaction_dup_keys_index.groovy index f47baadbd0..0b0897a7d8 100644 --- a/regression-test/suites/segcompaction_p1/test_segcompaction_dup_keys_index.groovy +++ b/regression-test/suites/segcompaction_p1/test_segcompaction_dup_keys_index.groovy @@ -35,7 +35,7 @@ suite("test_segcompaction_dup_keys_index") { def backendId_to_backendHttpPort = [:] for (String[] backend in backends) { backendId_to_backendIP.put(backend[0], backend[2]) - backendId_to_backendHttpPort.put(backend[0], backend[5]) + backendId_to_backendHttpPort.put(backend[0], backend[6]) } backend_id = backendId_to_backendIP.keySet()[0] diff --git a/regression-test/suites/segcompaction_p1/test_segcompaction_unique_keys.groovy b/regression-test/suites/segcompaction_p1/test_segcompaction_unique_keys.groovy index d7a9d53fa6..1eef65e939 100644 --- a/regression-test/suites/segcompaction_p1/test_segcompaction_unique_keys.groovy +++ b/regression-test/suites/segcompaction_p1/test_segcompaction_unique_keys.groovy @@ -35,7 +35,7 @@ suite("test_segcompaction_unique_keys") { def backendId_to_backendHttpPort = [:] for (String[] backend in backends) { backendId_to_backendIP.put(backend[0], backend[2]) - backendId_to_backendHttpPort.put(backend[0], backend[5]) + backendId_to_backendHttpPort.put(backend[0], backend[6]) } backend_id = backendId_to_backendIP.keySet()[0] diff --git a/regression-test/suites/segcompaction_p1/test_segcompaction_unique_keys_index.groovy b/regression-test/suites/segcompaction_p1/test_segcompaction_unique_keys_index.groovy index 94ef12961e..c53d73ae4e 100644 --- a/regression-test/suites/segcompaction_p1/test_segcompaction_unique_keys_index.groovy +++ b/regression-test/suites/segcompaction_p1/test_segcompaction_unique_keys_index.groovy @@ -35,7 +35,7 @@ suite("test_segcompaction_unique_keys_index") { def backendId_to_backendHttpPort = [:] for (String[] backend in backends) { backendId_to_backendIP.put(backend[0], backend[2]) - backendId_to_backendHttpPort.put(backend[0], backend[5]) + backendId_to_backendHttpPort.put(backend[0], backend[6]) } backend_id = backendId_to_backendIP.keySet()[0] diff --git a/regression-test/suites/segcompaction_p1/test_segcompaction_unique_keys_mow.groovy b/regression-test/suites/segcompaction_p1/test_segcompaction_unique_keys_mow.groovy index 0165b0d2ec..d3c2c05963 100644 --- a/regression-test/suites/segcompaction_p1/test_segcompaction_unique_keys_mow.groovy +++ b/regression-test/suites/segcompaction_p1/test_segcompaction_unique_keys_mow.groovy @@ -35,7 +35,7 @@ suite("test_segcompaction_unique_keys_mow") { def backendId_to_backendHttpPort = [:] for (String[] backend in backends) { backendId_to_backendIP.put(backend[0], backend[2]) - backendId_to_backendHttpPort.put(backend[0], backend[5]) + backendId_to_backendHttpPort.put(backend[0], backend[6]) } backend_id = backendId_to_backendIP.keySet()[0] diff --git a/regression-test/suites/segcompaction_p1/test_segcompaction_unique_keys_mow_index.groovy b/regression-test/suites/segcompaction_p1/test_segcompaction_unique_keys_mow_index.groovy index acdbbdf834..2e2bed246b 100644 --- a/regression-test/suites/segcompaction_p1/test_segcompaction_unique_keys_mow_index.groovy +++ b/regression-test/suites/segcompaction_p1/test_segcompaction_unique_keys_mow_index.groovy @@ -35,7 +35,7 @@ suite("test_segcompaction_unique_keys_mow_index") { def backendId_to_backendHttpPort = [:] for (String[] backend in backends) { backendId_to_backendIP.put(backend[0], backend[2]) - backendId_to_backendHttpPort.put(backend[0], backend[5]) + backendId_to_backendHttpPort.put(backend[0], backend[6]) } backend_id = backendId_to_backendIP.keySet()[0]