[Enhencement](Backends) add HostName filed in backends table and delete backends table in information_schema (#18156)

1.  Add `HostName` field for `show backends` statement and `backends()` tvf.
2. delete the `backends` table in `information_schema` database
This commit is contained in:
Tiewei Fang
2023-04-07 08:30:42 +08:00
committed by GitHub
parent 22deeecbe1
commit 759f1da32e
70 changed files with 324 additions and 682 deletions

View File

@ -51,7 +51,6 @@ set(EXEC_FILES
schema_scanner/schema_files_scanner.cpp
schema_scanner/schema_partitions_scanner.cpp
schema_scanner/schema_rowsets_scanner.cpp
schema_scanner/schema_backends_scanner.cpp
scan_node.cpp
odbc_connector.cpp
table_connector.cpp

View File

@ -19,7 +19,6 @@
#include <cstddef>
#include "exec/schema_scanner/schema_backends_scanner.h"
#include "exec/schema_scanner/schema_charsets_scanner.h"
#include "exec/schema_scanner/schema_collations_scanner.h"
#include "exec/schema_scanner/schema_columns_scanner.h"
@ -128,8 +127,6 @@ SchemaScanner* SchemaScanner::create(TSchemaTableType::type type) {
return new (std::nothrow) SchemaPartitionsScanner();
case TSchemaTableType::SCH_ROWSETS:
return new (std::nothrow) SchemaRowsetsScanner();
case TSchemaTableType::SCH_BACKENDS:
return new (std::nothrow) SchemaBackendsScanner();
default:
return new (std::nothrow) SchemaDummyScanner();
break;

View File

@ -47,7 +47,6 @@ struct SchemaScannerParam {
const std::string* ip; // frontend ip
int32_t port; // frontend thrift port
int64_t thread_id;
const std::vector<TSchemaTableStructure>* table_structure;
const std::string* catalog;
std::unique_ptr<RuntimeProfile> profile;

View File

@ -1,187 +0,0 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "exec/schema_scanner/schema_backends_scanner.h"
#include <gen_cpp/Descriptors_types.h>
#include <gen_cpp/FrontendService_types.h>
#include <gen_cpp/HeartbeatService_types.h>
#include "common/status.h"
#include "exec/schema_scanner.h"
#include "gen_cpp/FrontendService.h"
#include "runtime/client_cache.h"
#include "runtime/define_primitive_type.h"
#include "runtime/exec_env.h"
#include "runtime/primitive_type.h"
#include "util/thrift_rpc_helper.h"
#include "vec/common/string_ref.h"
namespace doris {
std::vector<SchemaScanner::ColumnDesc> SchemaBackendsScanner::_s_tbls_columns = {
// name, type, size
{"BackendId", TYPE_BIGINT, sizeof(StringRef), false},
{"TabletNum", TYPE_BIGINT, sizeof(StringRef), false},
{"HeartbeatPort", TYPE_INT, sizeof(int), false},
{"BePort", TYPE_INT, sizeof(int), false},
{"HttpPort", TYPE_INT, sizeof(int), false},
{"BrpcPort", TYPE_INT, sizeof(int), false},
{"Cluster", TYPE_VARCHAR, sizeof(StringRef), false},
{"IP", TYPE_VARCHAR, sizeof(StringRef), false},
{"LastStartTime", TYPE_VARCHAR, sizeof(StringRef), false},
{"LastHeartbeat", TYPE_VARCHAR, sizeof(StringRef), false},
{"Alive", TYPE_VARCHAR, sizeof(StringRef), false},
{"SystemDecommissioned", TYPE_VARCHAR, sizeof(StringRef), false},
{"ClusterDecommissioned", TYPE_VARCHAR, sizeof(StringRef), false},
{"DataUsedCapacity", TYPE_BIGINT, sizeof(int64_t), false},
{"AvailCapacity", TYPE_BIGINT, sizeof(int64_t), false},
{"TotalCapacity", TYPE_BIGINT, sizeof(int64_t), false},
{"UsedPct", TYPE_DOUBLE, sizeof(double), false},
{"MaxDiskUsedPct", TYPE_DOUBLE, sizeof(double), false},
{"RemoteUsedCapacity", TYPE_BIGINT, sizeof(int64_t), false},
{"Tag", TYPE_VARCHAR, sizeof(StringRef), false},
{"ErrMsg", TYPE_VARCHAR, sizeof(StringRef), false},
{"Version", TYPE_VARCHAR, sizeof(StringRef), false},
{"Status", TYPE_VARCHAR, sizeof(StringRef), false},
};
SchemaBackendsScanner::SchemaBackendsScanner()
: SchemaScanner(_s_tbls_columns, TSchemaTableType::SCH_BACKENDS) {}
Status SchemaBackendsScanner::start(RuntimeState* state) {
if (!_is_init) {
return Status::InternalError("used before initialized.");
}
RETURN_IF_ERROR(_fetch_backends_info());
RETURN_IF_ERROR(_set_col_name_to_type());
return Status::OK();
}
Status SchemaBackendsScanner::get_next_block(vectorized::Block* block, bool* eos) {
if (!_is_init) {
return Status::InternalError("Used before initialized.");
}
if (nullptr == block || nullptr == eos) {
return Status::InternalError("input pointer is nullptr.");
}
*eos = true;
return _fill_block_impl(block);
}
Status SchemaBackendsScanner::_fill_block_impl(vectorized::Block* block) {
SCOPED_TIMER(_fill_block_timer);
auto row_num = _batch_data.size();
std::vector<void*> null_datas(row_num, nullptr);
std::vector<void*> datas(row_num);
for (size_t col_idx = 0; col_idx < _columns.size(); ++col_idx) {
auto it = _col_name_to_type.find(_columns[col_idx].name);
if (it == _col_name_to_type.end()) {
if (_columns[col_idx].is_null) {
fill_dest_column_for_range(block, col_idx, null_datas);
} else {
return Status::InternalError(
"column {} is not found in BE, and {} is not nullable.",
_columns[col_idx].name, _columns[col_idx].name);
}
} else if (it->second == TYPE_BIGINT) {
for (int row_idx = 0; row_idx < row_num; ++row_idx) {
datas[row_idx] = &_batch_data[row_idx].column_value[col_idx].longVal;
}
fill_dest_column_for_range(block, col_idx, datas);
} else if (it->second == TYPE_INT) {
for (int row_idx = 0; row_idx < row_num; ++row_idx) {
datas[row_idx] = &_batch_data[row_idx].column_value[col_idx].intVal;
}
fill_dest_column_for_range(block, col_idx, datas);
} else if (it->second == TYPE_VARCHAR) {
for (int row_idx = 0; row_idx < row_num; ++row_idx) {
datas[row_idx] = &_batch_data[row_idx].column_value[col_idx].stringVal;
}
fill_dest_column_for_range(block, col_idx, datas);
} else if (it->second == TYPE_DOUBLE) {
for (int row_idx = 0; row_idx < row_num; ++row_idx) {
datas[row_idx] = &_batch_data[row_idx].column_value[col_idx].doubleVal;
}
fill_dest_column_for_range(block, col_idx, datas);
} else {
// other type
}
}
return Status::OK();
}
Status SchemaBackendsScanner::_fetch_backends_info() {
TFetchSchemaTableDataRequest request;
request.cluster_name = "";
request.__isset.cluster_name = true;
request.schema_table_name = TSchemaTableName::BACKENDS;
request.__isset.schema_table_name = true;
TNetworkAddress master_addr = ExecEnv::GetInstance()->master_info()->network_address;
// TODO(ftw): if result will too large?
TFetchSchemaTableDataResult result;
RETURN_IF_ERROR(ThriftRpcHelper::rpc<FrontendServiceClient>(
master_addr.hostname, master_addr.port,
[&request, &result](FrontendServiceConnection& client) {
client->fetchSchemaTableData(result, request);
},
config::txn_commit_rpc_timeout_ms));
Status status(result.status);
if (!status.ok()) {
LOG(WARNING) << "fetch schema table data from master failed, errmsg=" << status;
return status;
}
_batch_data = std::move(result.data_batch);
return Status::OK();
}
Status SchemaBackendsScanner::_set_col_name_to_type() {
_col_name_to_type.emplace("BackendId", TYPE_BIGINT);
_col_name_to_type.emplace("TabletNum", TYPE_BIGINT);
_col_name_to_type.emplace("HeartbeatPort", TYPE_INT);
_col_name_to_type.emplace("BePort", TYPE_INT);
_col_name_to_type.emplace("HttpPort", TYPE_INT);
_col_name_to_type.emplace("BrpcPort", TYPE_INT);
_col_name_to_type.emplace("Cluster", TYPE_VARCHAR);
_col_name_to_type.emplace("IP", TYPE_VARCHAR);
_col_name_to_type.emplace("LastStartTime", TYPE_VARCHAR);
_col_name_to_type.emplace("LastHeartbeat", TYPE_VARCHAR);
_col_name_to_type.emplace("Alive", TYPE_VARCHAR);
_col_name_to_type.emplace("SystemDecommissioned", TYPE_VARCHAR);
_col_name_to_type.emplace("ClusterDecommissioned", TYPE_VARCHAR);
_col_name_to_type.emplace("DataUsedCapacity", TYPE_BIGINT);
_col_name_to_type.emplace("AvailCapacity", TYPE_BIGINT);
_col_name_to_type.emplace("TotalCapacity", TYPE_BIGINT);
_col_name_to_type.emplace("UsedPct", TYPE_DOUBLE);
_col_name_to_type.emplace("MaxDiskUsedPct", TYPE_DOUBLE);
_col_name_to_type.emplace("RemoteUsedCapacity", TYPE_BIGINT);
_col_name_to_type.emplace("Tag", TYPE_VARCHAR);
_col_name_to_type.emplace("ErrMsg", TYPE_VARCHAR);
_col_name_to_type.emplace("Version", TYPE_VARCHAR);
_col_name_to_type.emplace("Status", TYPE_VARCHAR);
return Status::OK();
}
} // namespace doris

View File

@ -1,43 +0,0 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#pragma once
#include "common/status.h"
#include "exec/schema_scanner.h"
namespace doris {
class SchemaBackendsScanner : public SchemaScanner {
public:
SchemaBackendsScanner();
~SchemaBackendsScanner() override = default;
Status start(RuntimeState* state) override;
Status get_next_block(vectorized::Block* block, bool* eos) override;
private:
Status _fill_block_impl(vectorized::Block* block);
Status _fetch_backends_info();
Status _set_col_name_to_type();
// column_name -> type, set by _set_col_name_to_type()
std::unordered_map<std::string, PrimitiveType> _col_name_to_type;
static std::vector<SchemaScanner::ColumnDesc> _s_tbls_columns;
std::vector<TRow> _batch_data;
};
} // namespace doris

View File

@ -84,11 +84,6 @@ Status VSchemaScanNode::init(const TPlanNode& tnode, RuntimeState* state) {
_scanner_param.thread_id = tnode.schema_scan_node.thread_id;
}
if (tnode.schema_scan_node.__isset.table_structure) {
_scanner_param.table_structure = _pool->add(
new std::vector<TSchemaTableStructure>(tnode.schema_scan_node.table_structure));
}
if (tnode.schema_scan_node.__isset.catalog) {
_scanner_param.catalog = _pool->add(new std::string(tnode.schema_scan_node.catalog));
}

View File

@ -1,87 +0,0 @@
---
{
"title": "backends",
"language": "en"
}
---
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
## backends
### Name
backends
### description
`backends` is a built-in system table of doris, which is stored under the information_schema database. You can view the `BE` node information through the `backends` system table.
The `backends` table schema is:
```sql
MySQL [information_schema]> desc information_schema.backends;
+-----------------------+-------------+------+-------+---------+-------+
| Field | Type | Null | Key | Default | Extra |
+-----------------------+-------------+------+-------+---------+-------+
| BackendId | BIGINT | Yes | false | NULL | |
| Cluster | VARCHAR(40) | Yes | false | NULL | |
| IP | VARCHAR(40) | Yes | false | NULL | |
| HeartbeatPort | INT | Yes | false | NULL | |
| BePort | INT | Yes | false | NULL | |
| HttpPort | INT | Yes | false | NULL | |
| BrpcPort | INT | Yes | false | NULL | |
| LastStartTime | VARCHAR(40) | Yes | false | NULL | |
| LastHeartbeat | VARCHAR(40) | Yes | false | NULL | |
| Alive | VARCHAR(40) | Yes | false | NULL | |
| SystemDecommissioned | VARCHAR(40) | Yes | false | NULL | |
| ClusterDecommissioned | VARCHAR(40) | Yes | false | NULL | |
| TabletNum | BIGINT | Yes | false | NULL | |
| DataUsedCapacity | BIGINT | Yes | false | NULL | |
| AvailCapacity | BIGINT | Yes | false | NULL | |
| TotalCapacity | BIGINT | Yes | false | NULL | |
| UsedPct | DOUBLE | Yes | false | NULL | |
| MaxDiskUsedPct | DOUBLE | Yes | false | NULL | |
| RemoteUsedCapacity | BIGINT | Yes | false | NULL | |
| Tag | VARCHAR(40) | Yes | false | NULL | |
| ErrMsg | VARCHAR(40) | Yes | false | NULL | |
| Version | VARCHAR(40) | Yes | false | NULL | |
| Status | VARCHAR(40) | Yes | false | NULL | |
+-----------------------+-------------+------+-------+---------+-------+
```
backends 系统表展示出来的信息基本与 `show backends` 语句展示出的信息一致。但是backends系统表的各个字段类型更加明确,且可以利用backends 系统表去做过滤、join等操作。
The information displayed by the `backends` system table is basically consistent with the information displayed by the `show backends` statement. However, the types of each field in the `backends` system table are more specific, and you can use the `backends` system table to perform operations such as filtering and joining.
### Example
```sql
MySQL [information_schema]> select * from information_schema.backends;
+-----------+-----------------+-----------+---------------+--------+----------+----------+---------------------+---------------------+-------+----------------------+-----------------------+-----------+------------------+---------------+---------------+--------------------+------------------+--------------------+--------------------------+--------+-----------------------------+-------------------------------------------------------------------------------------------------------------------------------+
| BackendId | Cluster | IP | HeartbeatPort | BePort | HttpPort | BrpcPort | LastStartTime | LastHeartbeat | Alive | SystemDecommissioned | ClusterDecommissioned | TabletNum | DataUsedCapacity | AvailCapacity | TotalCapacity | UsedPct | MaxDiskUsedPct | RemoteUsedCapacity | Tag | ErrMsg | Version | Status |
+-----------+-----------------+-----------+---------------+--------+----------+----------+---------------------+---------------------+-------+----------------------+-----------------------+-----------+------------------+---------------+---------------+--------------------+------------------+--------------------+--------------------------+--------+-----------------------------+-------------------------------------------------------------------------------------------------------------------------------+
| 10757 | default_cluster | 127.0.0.1 | 9159 | 9169 | 8149 | 8169 | 2022-11-24 11:16:31 | 2022-11-24 12:02:57 | true | false | false | 14 | 0 | 941359747073 | 3170529116160 | 70.309064746482065 | 70.3090647465136 | 0 | {"location" : "default"} | | doris-0.0.0-trunk-cc9545359 | {"lastSuccessReportTabletsTime":"2022-11-24 12:02:06","lastStreamLoadTime":-1,"isQueryDisabled":false,"isLoadDisabled":false} |
+-----------+-----------------+-----------+---------------+--------+----------+----------+---------------------+---------------------+-------+----------------------+-----------------------+-----------+------------------+---------------+---------------+--------------------+------------------+--------------------+--------------------------+--------+-----------------------------+-------------------------------------------------------------------------------------------------------------------------------+
```
### KeyWords
backends, information_schema
### Best Practice

View File

@ -0,0 +1,122 @@
---
{
"title": "backends",
"language": "en"
}
---
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
## `backends`
### Name
<version since="dev">
backends
</version>
### description
Table-Value-Function, generate a temporary table named `backends`. This tvf is used to view the information of BE nodes in the doris cluster.
This function is used in `FROM` clauses.
grammar:
```
backends();
```
The table schema of `backends()` tvf:
```
mysql> desc function backends();
+-------------------------+--------+------+-------+---------+-------+
| Field | Type | Null | Key | Default | Extra |
+-------------------------+--------+------+-------+---------+-------+
| BackendId | BIGINT | No | false | NULL | NONE |
| Cluster | TEXT | No | false | NULL | NONE |
| IP | TEXT | No | false | NULL | NONE |
| HostName | TEXT | No | false | NULL | NONE |
| HeartbeatPort | INT | No | false | NULL | NONE |
| BePort | INT | No | false | NULL | NONE |
| HttpPort | INT | No | false | NULL | NONE |
| BrpcPort | INT | No | false | NULL | NONE |
| LastStartTime | TEXT | No | false | NULL | NONE |
| LastHeartbeat | TEXT | No | false | NULL | NONE |
| Alive | TEXT | No | false | NULL | NONE |
| SystemDecommissioned | TEXT | No | false | NULL | NONE |
| ClusterDecommissioned | TEXT | No | false | NULL | NONE |
| TabletNum | BIGINT | No | false | NULL | NONE |
| DataUsedCapacity | BIGINT | No | false | NULL | NONE |
| AvailCapacity | BIGINT | No | false | NULL | NONE |
| TotalCapacity | BIGINT | No | false | NULL | NONE |
| UsedPct | DOUBLE | No | false | NULL | NONE |
| MaxDiskUsedPct | DOUBLE | No | false | NULL | NONE |
| RemoteUsedCapacity | BIGINT | No | false | NULL | NONE |
| Tag | TEXT | No | false | NULL | NONE |
| ErrMsg | TEXT | No | false | NULL | NONE |
| Version | TEXT | No | false | NULL | NONE |
| Status | TEXT | No | false | NULL | NONE |
| HeartbeatFailureCounter | INT | No | false | NULL | NONE |
| NodeRole | TEXT | No | false | NULL | NONE |
+-------------------------+--------+------+-------+---------+-------+
26 rows in set (0.04 sec)
```
The information displayed by the `backends` tvf is basically consistent with the information displayed by the `show backends` statement. However, the types of each field in the `backends` tvf are more specific, and you can use the `backends` tvf to perform operations such as filtering and joining.
### example
```
mysql> select * from backends()\G
*************************** 1. row ***************************
BackendId: 10022
Cluster: default_cluster
IP: 10.16.10.14
HostName: 10.16.10.14
HeartbeatPort: 9159
BePort: 9169
HttpPort: 8149
BrpcPort: 8169
LastStartTime: 2023-03-24 14:37:00
LastHeartbeat: 2023-03-27 20:25:35
Alive: true
SystemDecommissioned: false
ClusterDecommissioned: false
TabletNum: 21
DataUsedCapacity: 0
AvailCapacity: 787460558849
TotalCapacity: 3169589592064
UsedPct: 75.155756416520319
MaxDiskUsedPct: 75.155756416551881
RemoteUsedCapacity: 0
Tag: {"location" : "default"}
ErrMsg:
Version: doris-0.0.0-trunk-8de51f96f3
Status: {"lastSuccessReportTabletsTime":"2023-03-27 20:24:55","lastStreamLoadTime":-1,"isQueryDisabled":false,"isLoadDisabled":false}
HeartbeatFailureCounter: 0
NodeRole: mix
1 row in set (0.03 sec)
```
### keywords
backends

View File

@ -72,7 +72,7 @@ illustrate:
- `column_separator`: Specifies the exported column separator, default is \t. Only single byte is supported.
- `line_delimiter`: Specifies the line delimiter for export, the default is \n. Only single byte is supported.
- `exec_mem_limit`: Export the upper limit of the memory usage of a single BE node, the default is 2GB, and the unit is bytes.
- `timeout`: The timeout period of the import job, the default is 2 hours, the unit is seconds.
- `timeout`: The timeout period of the export job, the default is 2 hours, the unit is seconds.
- `tablet_num_per_task`: The maximum number of tablets each subtask can allocate to scan.
- `WITH BROKER`

View File

@ -684,7 +684,8 @@
"sql-manual/sql-functions/table-functions/explode-numbers",
"sql-manual/sql-functions/table-functions/s3",
"sql-manual/sql-functions/table-functions/hdfs",
"sql-manual/sql-functions/table-functions/iceberg_meta"
"sql-manual/sql-functions/table-functions/iceberg_meta",
"sql-manual/sql-functions/table-functions/backends"
]
},
{
@ -1105,7 +1106,6 @@
"type": "category",
"label": "System Table",
"items": [
"admin-manual/system-table/backends",
"admin-manual/system-table/rowsets"
]
},
@ -1222,4 +1222,4 @@
]
}
]
}
}

View File

@ -1,89 +0,0 @@
---
{
"title": "backends",
"language": "zh-CN"
}
---
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
## backends
### Name
<version since="1.2">
backends
</version>
### description
`backends` 是doris内置的一张系统表,存放在`information_schema`数据库下。通过`backends`系统表可以查看当前doris集群中的 `BE` 节点信息。
`backends` 表结构为:
```sql
MySQL [information_schema]> desc information_schema.backends;
+-----------------------+-------------+------+-------+---------+-------+
| Field | Type | Null | Key | Default | Extra |
+-----------------------+-------------+------+-------+---------+-------+
| BackendId | BIGINT | Yes | false | NULL | |
| Cluster | VARCHAR(40) | Yes | false | NULL | |
| IP | VARCHAR(40) | Yes | false | NULL | |
| HeartbeatPort | INT | Yes | false | NULL | |
| BePort | INT | Yes | false | NULL | |
| HttpPort | INT | Yes | false | NULL | |
| BrpcPort | INT | Yes | false | NULL | |
| LastStartTime | VARCHAR(40) | Yes | false | NULL | |
| LastHeartbeat | VARCHAR(40) | Yes | false | NULL | |
| Alive | VARCHAR(40) | Yes | false | NULL | |
| SystemDecommissioned | VARCHAR(40) | Yes | false | NULL | |
| ClusterDecommissioned | VARCHAR(40) | Yes | false | NULL | |
| TabletNum | BIGINT | Yes | false | NULL | |
| DataUsedCapacity | BIGINT | Yes | false | NULL | |
| AvailCapacity | BIGINT | Yes | false | NULL | |
| TotalCapacity | BIGINT | Yes | false | NULL | |
| UsedPct | DOUBLE | Yes | false | NULL | |
| MaxDiskUsedPct | DOUBLE | Yes | false | NULL | |
| RemoteUsedCapacity | BIGINT | Yes | false | NULL | |
| Tag | VARCHAR(40) | Yes | false | NULL | |
| ErrMsg | VARCHAR(40) | Yes | false | NULL | |
| Version | VARCHAR(40) | Yes | false | NULL | |
| Status | VARCHAR(40) | Yes | false | NULL | |
+-----------------------+-------------+------+-------+---------+-------+
```
`backends` 系统表展示出来的信息基本与 `show backends` 语句展示出的信息一致。但是`backends`系统表的各个字段类型更加明确,且可以利用 `backends` 系统表去做过滤、join等操作。
### Example
```sql
MySQL [information_schema]> select * from information_schema.backends;
+-----------+-----------------+-----------+---------------+--------+----------+----------+---------------------+---------------------+-------+----------------------+-----------------------+-----------+------------------+---------------+---------------+--------------------+------------------+--------------------+--------------------------+--------+-----------------------------+-------------------------------------------------------------------------------------------------------------------------------+
| BackendId | Cluster | IP | HeartbeatPort | BePort | HttpPort | BrpcPort | LastStartTime | LastHeartbeat | Alive | SystemDecommissioned | ClusterDecommissioned | TabletNum | DataUsedCapacity | AvailCapacity | TotalCapacity | UsedPct | MaxDiskUsedPct | RemoteUsedCapacity | Tag | ErrMsg | Version | Status |
+-----------+-----------------+-----------+---------------+--------+----------+----------+---------------------+---------------------+-------+----------------------+-----------------------+-----------+------------------+---------------+---------------+--------------------+------------------+--------------------+--------------------------+--------+-----------------------------+-------------------------------------------------------------------------------------------------------------------------------+
| 10757 | default_cluster | 127.0.0.1 | 9159 | 9169 | 8149 | 8169 | 2022-11-24 11:16:31 | 2022-11-24 12:02:57 | true | false | false | 14 | 0 | 941359747073 | 3170529116160 | 70.309064746482065 | 70.3090647465136 | 0 | {"location" : "default"} | | doris-0.0.0-trunk-cc9545359 | {"lastSuccessReportTabletsTime":"2022-11-24 12:02:06","lastStreamLoadTime":-1,"isQueryDisabled":false,"isLoadDisabled":false} |
+-----------+-----------------+-----------+---------------+--------+----------+----------+---------------------+---------------------+-------+----------------------+-----------------------+-----------+------------------+---------------+---------------+--------------------+------------------+--------------------+--------------------------+--------+-----------------------------+-------------------------------------------------------------------------------------------------------------------------------+
```
### KeyWords
backends, information_schema
### Best Practice

View File

@ -0,0 +1,122 @@
---
{
"title": "backends",
"language": "zh-CN"
}
---
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
## `backends`
### Name
<version since="dev">
backends
</version>
### description
表函数,生成backends临时表,可以查看当前doris集群中的 BE 节点信息。
该函数用于from子句中。
语法:
```
backends();
```
backends()表结构:
```
mysql> desc function backends();
+-------------------------+--------+------+-------+---------+-------+
| Field | Type | Null | Key | Default | Extra |
+-------------------------+--------+------+-------+---------+-------+
| BackendId | BIGINT | No | false | NULL | NONE |
| Cluster | TEXT | No | false | NULL | NONE |
| IP | TEXT | No | false | NULL | NONE |
| HostName | TEXT | No | false | NULL | NONE |
| HeartbeatPort | INT | No | false | NULL | NONE |
| BePort | INT | No | false | NULL | NONE |
| HttpPort | INT | No | false | NULL | NONE |
| BrpcPort | INT | No | false | NULL | NONE |
| LastStartTime | TEXT | No | false | NULL | NONE |
| LastHeartbeat | TEXT | No | false | NULL | NONE |
| Alive | TEXT | No | false | NULL | NONE |
| SystemDecommissioned | TEXT | No | false | NULL | NONE |
| ClusterDecommissioned | TEXT | No | false | NULL | NONE |
| TabletNum | BIGINT | No | false | NULL | NONE |
| DataUsedCapacity | BIGINT | No | false | NULL | NONE |
| AvailCapacity | BIGINT | No | false | NULL | NONE |
| TotalCapacity | BIGINT | No | false | NULL | NONE |
| UsedPct | DOUBLE | No | false | NULL | NONE |
| MaxDiskUsedPct | DOUBLE | No | false | NULL | NONE |
| RemoteUsedCapacity | BIGINT | No | false | NULL | NONE |
| Tag | TEXT | No | false | NULL | NONE |
| ErrMsg | TEXT | No | false | NULL | NONE |
| Version | TEXT | No | false | NULL | NONE |
| Status | TEXT | No | false | NULL | NONE |
| HeartbeatFailureCounter | INT | No | false | NULL | NONE |
| NodeRole | TEXT | No | false | NULL | NONE |
+-------------------------+--------+------+-------+---------+-------+
26 rows in set (0.04 sec)
```
`backends()` tvf展示出来的信息基本与 `show backends` 语句展示出的信息一致,但是`backends()` tvf的各个字段类型更加明确,且可以利用tvf生成的表去做过滤、join等操作。
### example
```
mysql> select * from backends()\G
*************************** 1. row ***************************
BackendId: 10022
Cluster: default_cluster
IP: 10.16.10.14
HostName: 10.16.10.14
HeartbeatPort: 9159
BePort: 9169
HttpPort: 8149
BrpcPort: 8169
LastStartTime: 2023-03-24 14:37:00
LastHeartbeat: 2023-03-27 20:25:35
Alive: true
SystemDecommissioned: false
ClusterDecommissioned: false
TabletNum: 21
DataUsedCapacity: 0
AvailCapacity: 787460558849
TotalCapacity: 3169589592064
UsedPct: 75.155756416520319
MaxDiskUsedPct: 75.155756416551881
RemoteUsedCapacity: 0
Tag: {"location" : "default"}
ErrMsg:
Version: doris-0.0.0-trunk-8de51f96f3
Status: {"lastSuccessReportTabletsTime":"2023-03-27 20:24:55","lastStreamLoadTime":-1,"isQueryDisabled":false,"isLoadDisabled":false}
HeartbeatFailureCounter: 0
NodeRole: mix
1 row in set (0.03 sec)
```
### keywords
backends

View File

@ -72,7 +72,7 @@ WITH BROKER
- `column_separator`:指定导出的列分隔符,默认为\t。仅支持单字节。
- `line_delimiter`:指定导出的行分隔符,默认为\n。仅支持单字节。
- `exec_mem_limit`:导出在单个 BE 节点的内存使用上限,默认为 2GB,单位为字节。
- `timeout`:导作业的超时时间,默认为2小时,单位是秒。
- `timeout`:导作业的超时时间,默认为2小时,单位是秒。
- `tablet_num_per_task`:每个子任务能分配扫描的最大 Tablet 数量。
- `WITH BROKER`
@ -234,4 +234,4 @@ PROPERTIES (
- 如果 Export 作业运行成功,在远端存储中产生的 `__doris_export_tmp_xxx` 目录,根据远端存储的文件系统语义,可能会保留,也可能会被清除。比如在S3对象存储中,通过 rename 操作将一个目录中的最后一个文件移走后,该目录也会被删除。如果该目录没有被清除,用户可以手动清除。
- Export 作业只会导出 Base 表的数据,不会导出物化视图的数据。
- Export 作业会扫描数据,占用 IO 资源,可能会影响系统的查询延迟。
- 一个集群内同时运行的 Export 作业最大个数为 5。之后提交的作业将会排队。
- 一个集群内同时运行的 Export 作业最大个数为 5。之后提交的作业将会排队。

View File

@ -326,10 +326,10 @@ INTO OUTFILE "file_path"
3. 导出到本地文件
导出到本地文件的功能不适用于公有云用户,仅适用于私有化部署的用户。并且默认用户对集群节点有完全的控制权限。Doris 对于用户填写的导出路径不会做合法性检查。如果 Doris 的进程用户对该路径无写权限,或路径不存在,则会报错。同时于安全性考虑,如果该路径已存在同名的文件,也会导出失败。
导出到本地文件的功能不适用于公有云用户,仅适用于私有化部署的用户。并且默认用户对集群节点有完全的控制权限。Doris 对于用户填写的导出路径不会做合法性检查。如果 Doris 的进程用户对该路径无写权限,或路径不存在,则会报错。同时于安全性考虑,如果该路径已存在同名的文件,也会导出失败。
Doris 不会管理导出到本地的文件,也不会检查磁盘空间等。这些文件需要用户自行管理,如清理等。
4. 结果完整性保证
该命令是一个同步命令,因此有可能在执行过程中任务连接断开了,从而无法活着导出的数据是否正常结束,或是否完整。此时可以使用 `success_file_name` 参数要求任务成功后,在目录下生成一个成功文件标识。用户可以通过这个文件,来判断导出是否正常结束。
该命令是一个同步命令,因此有可能在执行过程中任务连接断开了,从而无法获知导出任务是否正常结束及导出数据是否完整。此时可以使用 `success_file_name` 参数要求任务成功后,在目录下生成一个成功文件标识。用户可以通过这个文件,来判断导出是否正常结束。

View File

@ -67,8 +67,7 @@ public enum SchemaTableType {
SCH_VIEWS("VIEWS", "VIEWS", TSchemaTableType.SCH_VIEWS),
SCH_CREATE_TABLE("CREATE_TABLE", "CREATE_TABLE", TSchemaTableType.SCH_CREATE_TABLE),
SCH_INVALID("NULL", "NULL", TSchemaTableType.SCH_INVALID),
SCH_ROWSETS("ROWSETS", "ROWSETS", TSchemaTableType.SCH_ROWSETS),
SCH_BACKENDS("BACKENDS", "BACKENDS", TSchemaTableType.SCH_BACKENDS);
SCH_ROWSETS("ROWSETS", "ROWSETS", TSchemaTableType.SCH_ROWSETS);
private static final String dbName = "INFORMATION_SCHEMA";
private static SelectList fullSelectLists;

View File

@ -46,10 +46,6 @@ public class ShowBackendsStmt extends ShowStmt {
public ShowResultSetMetaData getMetaData() {
ShowResultSetMetaData.Builder builder = ShowResultSetMetaData.builder();
for (String title : BackendsProcDir.TITLE_NAMES) {
// hide hostname for SHOW BACKENDS stmt
if (title.equals("HostName")) {
continue;
}
builder.addColumn(new Column(title, ScalarType.createVarchar(30)));
}
return builder.build();

View File

@ -46,10 +46,6 @@ public class ShowFrontendsStmt extends ShowStmt {
public ShowResultSetMetaData getMetaData() {
ShowResultSetMetaData.Builder builder = ShowResultSetMetaData.builder();
for (String title : FrontendsProcNode.TITLE_NAMES) {
// hide hostname for SHOW FRONTENDS stmt
if (title.equals("HostName")) {
continue;
}
builder.addColumn(new Column(title, ScalarType.createVarchar(30)));
}
return builder.build();

View File

@ -20,7 +20,6 @@ package org.apache.doris.catalog;
import org.apache.doris.analysis.SchemaTableType;
import org.apache.doris.common.SystemIdGenerator;
import org.apache.doris.thrift.TSchemaTable;
import org.apache.doris.thrift.TSchemaTableStructure;
import org.apache.doris.thrift.TTableDescriptor;
import org.apache.doris.thrift.TTableType;
@ -393,54 +392,8 @@ public class SchemaTable extends Table {
.column("CREATION_TIME", ScalarType.createType(PrimitiveType.BIGINT))
.column("NEWEST_WRITE_TIMESTAMP", ScalarType.createType(PrimitiveType.BIGINT))
.build()))
.put("backends", new SchemaTable(SystemIdGenerator.getNextId(), "backends", TableType.SCHEMA,
builder().column("BackendId", ScalarType.createType(PrimitiveType.BIGINT))
.column("Cluster", ScalarType.createVarchar(64))
.column("IP", ScalarType.createVarchar(16))
.column("HeartbeatPort", ScalarType.createType(PrimitiveType.INT))
.column("BePort", ScalarType.createType(PrimitiveType.INT))
.column("HttpPort", ScalarType.createType(PrimitiveType.INT))
.column("BrpcPort", ScalarType.createType(PrimitiveType.INT))
.column("LastStartTime", ScalarType.createVarchar(32))
.column("LastHeartbeat", ScalarType.createVarchar(32))
.column("Alive", ScalarType.createVarchar(8))
.column("SystemDecommissioned", ScalarType.createVarchar(8))
.column("ClusterDecommissioned", ScalarType.createVarchar(8))
.column("TabletNum", ScalarType.createType(PrimitiveType.BIGINT))
.column("DataUsedCapacity", ScalarType.createType(PrimitiveType.BIGINT))
.column("AvailCapacity", ScalarType.createType(PrimitiveType.BIGINT))
.column("TotalCapacity", ScalarType.createType(PrimitiveType.BIGINT))
.column("UsedPct", ScalarType.createType(PrimitiveType.DOUBLE))
.column("MaxDiskUsedPct", ScalarType.createType(PrimitiveType.DOUBLE))
.column("RemoteUsedCapacity", ScalarType.createType(PrimitiveType.BIGINT))
.column("Tag", ScalarType.createVarchar(128))
.column("ErrMsg", ScalarType.createVarchar(2048))
.column("Version", ScalarType.createVarchar(64))
.column("Status", ScalarType.createVarchar(1024))
.build()))
.build();
public static List<TSchemaTableStructure> getTableStructure(String tableName) {
List<TSchemaTableStructure> tSchemaTableStructureList = Lists.newArrayList();
switch (tableName) {
case "backends": {
Table table = TABLE_MAP.get(tableName);
for (Column column : table.getFullSchema()) {
TSchemaTableStructure tSchemaTableStructure = new TSchemaTableStructure();
tSchemaTableStructure.setColumnName(column.getName());
tSchemaTableStructure.setType(column.getDataType().toThrift());
tSchemaTableStructure.setLen(column.getDataType().getSlotSize());
tSchemaTableStructure.setIsNull(column.isAllowNull());
tSchemaTableStructureList.add(tSchemaTableStructure);
}
break;
}
default:
break;
}
return tSchemaTableStructureList;
}
protected SchemaTable(long id, String name, TableType type, List<Column> baseSchema) {
super(id, name, type, baseSchema);
}

View File

@ -50,8 +50,6 @@ public class FrontendsProcNode implements ProcNodeInterface {
.add("CurrentConnected")
.build();
public static final int HOSTNAME_INDEX = 2;
private Env env;
public FrontendsProcNode(Env env) {

View File

@ -122,8 +122,6 @@ public class SchemaScanNode extends ScanNode {
TUserIdentity tCurrentUser = ConnectContext.get().getCurrentUserIdentity().toThrift();
msg.schema_scan_node.setCurrentUserIdent(tCurrentUser);
msg.schema_scan_node.setTableStructure(SchemaTable.getTableStructure(tableName));
}
/**

View File

@ -1820,9 +1820,6 @@ public class ShowExecutor {
final ShowBackendsStmt showStmt = (ShowBackendsStmt) stmt;
List<List<String>> backendInfos = BackendsProcDir.getClusterBackendInfos(showStmt.getClusterName());
for (List<String> row : backendInfos) {
row.remove(BackendsProcDir.HOSTNAME_INDEX);
}
backendInfos.sort(new Comparator<List<String>>() {
@Override
public int compare(List<String> o1, List<String> o2) {
@ -1838,10 +1835,6 @@ public class ShowExecutor {
List<List<String>> infos = Lists.newArrayList();
FrontendsProcNode.getFrontendsInfo(Env.getCurrentEnv(), infos);
for (List<String> row : infos) {
row.remove(FrontendsProcNode.HOSTNAME_INDEX);
}
resultSet = new ShowResultSet(showStmt.getMetaData(), infos);
}

View File

@ -1340,8 +1340,6 @@ public class FrontendServiceImpl implements FrontendService.Iface {
@Override
public TFetchSchemaTableDataResult fetchSchemaTableData(TFetchSchemaTableDataRequest request) throws TException {
switch (request.getSchemaTableName()) {
case BACKENDS:
return MetadataGenerator.getBackendsSchemaTable(request);
case METADATA_TABLE:
return MetadataGenerator.getMetadataTable(request);
default:

View File

@ -67,17 +67,18 @@ public class BackendsTableValuedFunction extends MetadataTableValuedFunction {
public List<Column> getTableColumns() throws AnalysisException {
List<Column> resColumns = Lists.newArrayList();
resColumns.add(new Column("BackendId", ScalarType.createType(PrimitiveType.BIGINT)));
resColumns.add(new Column("Cluster", ScalarType.createVarchar(64)));
resColumns.add(new Column("IP", ScalarType.createVarchar(16)));
resColumns.add(new Column("Cluster", ScalarType.createStringType()));
resColumns.add(new Column("IP", ScalarType.createStringType()));
resColumns.add(new Column("HostName", ScalarType.createStringType()));
resColumns.add(new Column("HeartbeatPort", ScalarType.createType(PrimitiveType.INT)));
resColumns.add(new Column("BePort", ScalarType.createType(PrimitiveType.INT)));
resColumns.add(new Column("HttpPort", ScalarType.createType(PrimitiveType.INT)));
resColumns.add(new Column("BrpcPort", ScalarType.createType(PrimitiveType.INT)));
resColumns.add(new Column("LastStartTime", ScalarType.createVarchar(32)));
resColumns.add(new Column("LastHeartbeat", ScalarType.createVarchar(32)));
resColumns.add(new Column("Alive", ScalarType.createVarchar(8)));
resColumns.add(new Column("SystemDecommissioned", ScalarType.createVarchar(8)));
resColumns.add(new Column("ClusterDecommissioned", ScalarType.createVarchar(8)));
resColumns.add(new Column("LastStartTime", ScalarType.createStringType()));
resColumns.add(new Column("LastHeartbeat", ScalarType.createStringType()));
resColumns.add(new Column("Alive", ScalarType.createStringType()));
resColumns.add(new Column("SystemDecommissioned", ScalarType.createStringType()));
resColumns.add(new Column("ClusterDecommissioned", ScalarType.createStringType()));
resColumns.add(new Column("TabletNum", ScalarType.createType(PrimitiveType.BIGINT)));
resColumns.add(new Column("DataUsedCapacity", ScalarType.createType(PrimitiveType.BIGINT)));
resColumns.add(new Column("AvailCapacity", ScalarType.createType(PrimitiveType.BIGINT)));
@ -85,12 +86,12 @@ public class BackendsTableValuedFunction extends MetadataTableValuedFunction {
resColumns.add(new Column("UsedPct", ScalarType.createType(PrimitiveType.DOUBLE)));
resColumns.add(new Column("MaxDiskUsedPct", ScalarType.createType(PrimitiveType.DOUBLE)));
resColumns.add(new Column("RemoteUsedCapacity", ScalarType.createType(PrimitiveType.BIGINT)));
resColumns.add(new Column("Tag", ScalarType.createVarchar(128)));
resColumns.add(new Column("ErrMsg", ScalarType.createVarchar(2048)));
resColumns.add(new Column("Version", ScalarType.createVarchar(64)));
resColumns.add(new Column("Status", ScalarType.createVarchar(1024)));
resColumns.add(new Column("Tag", ScalarType.createStringType()));
resColumns.add(new Column("ErrMsg", ScalarType.createStringType()));
resColumns.add(new Column("Version", ScalarType.createStringType()));
resColumns.add(new Column("Status", ScalarType.createStringType()));
resColumns.add(new Column("HeartbeatFailureCounter", ScalarType.createType(PrimitiveType.INT)));
resColumns.add(new Column("NodeRole", ScalarType.createVarchar(64)));
resColumns.add(new Column("NodeRole", ScalarType.createStringType()));
return resColumns;
}
}

View File

@ -74,107 +74,6 @@ public class MetadataGenerator {
return errorResult("Metadata table params is not set. ");
}
// deprecated
public static TFetchSchemaTableDataResult getBackendsSchemaTable(TFetchSchemaTableDataRequest request) {
final SystemInfoService clusterInfoService = Env.getCurrentSystemInfo();
List<Long> backendIds = null;
if (!Strings.isNullOrEmpty(request.cluster_name)) {
final Cluster cluster = Env.getCurrentEnv().getCluster(request.cluster_name);
// root not in any cluster
if (null == cluster) {
return errorResult("Cluster is not existed.");
}
backendIds = cluster.getBackendIdList();
} else {
backendIds = clusterInfoService.getBackendIds(false);
}
TFetchSchemaTableDataResult result = new TFetchSchemaTableDataResult();
long start = System.currentTimeMillis();
Stopwatch watch = Stopwatch.createUnstarted();
List<TRow> dataBatch = Lists.newArrayList();
for (long backendId : backendIds) {
Backend backend = clusterInfoService.getBackend(backendId);
if (backend == null) {
continue;
}
watch.start();
Integer tabletNum = Env.getCurrentInvertedIndex().getTabletNumByBackendId(backendId);
watch.stop();
TRow trow = new TRow();
trow.addToColumnValue(new TCell().setLongVal(backendId));
trow.addToColumnValue(new TCell().setStringVal(backend.getOwnerClusterName()));
trow.addToColumnValue(new TCell().setStringVal(backend.getIp()));
if (Strings.isNullOrEmpty(request.cluster_name)) {
trow.addToColumnValue(new TCell().setIntVal(backend.getHeartbeatPort()));
trow.addToColumnValue(new TCell().setIntVal(backend.getBePort()));
trow.addToColumnValue(new TCell().setIntVal(backend.getHttpPort()));
trow.addToColumnValue(new TCell().setIntVal(backend.getBrpcPort()));
}
trow.addToColumnValue(new TCell().setStringVal(TimeUtils.longToTimeString(backend.getLastStartTime())));
trow.addToColumnValue(new TCell().setStringVal(TimeUtils.longToTimeString(backend.getLastUpdateMs())));
trow.addToColumnValue(new TCell().setStringVal(String.valueOf(backend.isAlive())));
if (backend.isDecommissioned() && backend.getDecommissionType() == DecommissionType.ClusterDecommission) {
trow.addToColumnValue(new TCell().setStringVal("false"));
trow.addToColumnValue(new TCell().setStringVal("true"));
} else if (backend.isDecommissioned()
&& backend.getDecommissionType() == DecommissionType.SystemDecommission) {
trow.addToColumnValue(new TCell().setStringVal("true"));
trow.addToColumnValue(new TCell().setStringVal("false"));
} else {
trow.addToColumnValue(new TCell().setStringVal("false"));
trow.addToColumnValue(new TCell().setStringVal("false"));
}
trow.addToColumnValue(new TCell().setLongVal(tabletNum));
// capacity
// data used
trow.addToColumnValue(new TCell().setLongVal(backend.getDataUsedCapacityB()));
// available
long availB = backend.getAvailableCapacityB();
trow.addToColumnValue(new TCell().setLongVal(availB));
// total
long totalB = backend.getTotalCapacityB();
trow.addToColumnValue(new TCell().setLongVal(totalB));
// used percent
double used = 0.0;
if (totalB <= 0) {
used = 0.0;
} else {
used = (double) (totalB - availB) * 100 / totalB;
}
trow.addToColumnValue(new TCell().setDoubleVal(used));
trow.addToColumnValue(new TCell().setDoubleVal(backend.getMaxDiskUsedPct() * 100));
// remote used capacity
trow.addToColumnValue(new TCell().setLongVal(backend.getRemoteUsedCapacityB()));
// tags
trow.addToColumnValue(new TCell().setStringVal(backend.getTagMapString()));
// err msg
trow.addToColumnValue(new TCell().setStringVal(backend.getHeartbeatErrMsg()));
// version
trow.addToColumnValue(new TCell().setStringVal(backend.getVersion()));
// status
trow.addToColumnValue(new TCell().setStringVal(new Gson().toJson(backend.getBackendStatus())));
dataBatch.add(trow);
}
// backends proc node get result too slow, add log to observer.
LOG.debug("backends proc get tablet num cost: {}, total cost: {}",
watch.elapsed(TimeUnit.MILLISECONDS), (System.currentTimeMillis() - start));
result.setDataBatch(dataBatch);
result.setStatus(new TStatus(TStatusCode.OK));
return result;
}
@NotNull
public static TFetchSchemaTableDataResult errorResult(String msg) {
TFetchSchemaTableDataResult result = new TFetchSchemaTableDataResult();
@ -266,6 +165,11 @@ public class MetadataGenerator {
trow.addToColumnValue(new TCell().setLongVal(backendId));
trow.addToColumnValue(new TCell().setStringVal(backend.getOwnerClusterName()));
trow.addToColumnValue(new TCell().setStringVal(backend.getIp()));
if (backend.getHostName() != null) {
trow.addToColumnValue(new TCell().setStringVal(backend.getHostName()));
} else {
trow.addToColumnValue(new TCell().setStringVal(backend.getIp()));
}
if (Strings.isNullOrEmpty(backendsParam.cluster_name)) {
trow.addToColumnValue(new TCell().setIntVal(backend.getHeartbeatPort()));
trow.addToColumnValue(new TCell().setIntVal(backend.getBePort()));

View File

@ -111,7 +111,6 @@ enum TSchemaTableType {
SCH_VIEWS,
SCH_INVALID,
SCH_ROWSETS,
SCH_BACKENDS,
SCH_COLUMN_STATISTICS
}

View File

@ -719,7 +719,7 @@ struct TInitExternalCtlMetaResult {
}
enum TSchemaTableName {
BACKENDS = 0,
// BACKENDS = 0,
METADATA_TABLE = 1,
}

View File

@ -512,13 +512,6 @@ struct TCsvScanNode {
10:optional map<string, TMiniLoadEtlFunction> column_function_mapping
}
struct TSchemaTableStructure {
1: optional string column_name
2: optional Types.TPrimitiveType type
3: optional i64 len
4: optional bool is_null;
}
struct TSchemaScanNode {
1: required Types.TTupleId tuple_id
@ -533,7 +526,7 @@ struct TSchemaScanNode {
10: optional string user_ip // deprecated
11: optional Types.TUserIdentity current_user_ident // to replace the user and user_ip
12: optional bool show_hidden_cloumns = false
13: optional list<TSchemaTableStructure> table_structure
// 13: optional list<TSchemaTableStructure> table_structure // deprecated
14: optional string catalog
}

View File

@ -1,7 +1,4 @@
-- This file is automatically generated. You should know what you did if you want to edit this
-- !backends --
true
-- !charsets --
true

View File

@ -1,7 +1,4 @@
-- This file is automatically generated. You should know what you did if you want to edit this
-- !backends --
true
-- !charsets --
true

View File

@ -29,7 +29,7 @@ suite("test_compaction_agg_keys") {
def backendId_to_backendHttpPort = [:]
for (String[] backend in backends) {
backendId_to_backendIP.put(backend[0], backend[2])
backendId_to_backendHttpPort.put(backend[0], backend[5])
backendId_to_backendHttpPort.put(backend[0], backend[6])
}
backend_id = backendId_to_backendIP.keySet()[0]

View File

@ -29,7 +29,7 @@ suite("test_compaction_agg_keys_with_delete") {
def backendId_to_backendHttpPort = [:]
for (String[] backend in backends) {
backendId_to_backendIP.put(backend[0], backend[2])
backendId_to_backendHttpPort.put(backend[0], backend[5])
backendId_to_backendHttpPort.put(backend[0], backend[6])
}
backend_id = backendId_to_backendIP.keySet()[0]

View File

@ -29,7 +29,7 @@ suite("test_compaction_dup_keys") {
def backendId_to_backendHttpPort = [:]
for (String[] backend in backends) {
backendId_to_backendIP.put(backend[0], backend[2])
backendId_to_backendHttpPort.put(backend[0], backend[5])
backendId_to_backendHttpPort.put(backend[0], backend[6])
}
backend_id = backendId_to_backendIP.keySet()[0]

View File

@ -29,7 +29,7 @@ suite("test_compaction_dup_keys_with_delete") {
def backendId_to_backendHttpPort = [:]
for (String[] backend in backends) {
backendId_to_backendIP.put(backend[0], backend[2])
backendId_to_backendHttpPort.put(backend[0], backend[5])
backendId_to_backendHttpPort.put(backend[0], backend[6])
}
backend_id = backendId_to_backendIP.keySet()[0]

View File

@ -29,7 +29,7 @@ suite("test_compaction_uniq_keys") {
def backendId_to_backendHttpPort = [:]
for (String[] backend in backends) {
backendId_to_backendIP.put(backend[0], backend[2])
backendId_to_backendHttpPort.put(backend[0], backend[5])
backendId_to_backendHttpPort.put(backend[0], backend[6])
}
backend_id = backendId_to_backendIP.keySet()[0]

View File

@ -42,7 +42,7 @@ suite("test_compaction_uniq_keys_row_store") {
def backendId_to_backendHttpPort = [:]
for (String[] backend in backends) {
backendId_to_backendIP.put(backend[0], backend[2])
backendId_to_backendHttpPort.put(backend[0], backend[5])
backendId_to_backendHttpPort.put(backend[0], backend[6])
}
backend_id = backendId_to_backendIP.keySet()[0]

View File

@ -29,7 +29,7 @@ suite("test_compaction_uniq_keys_with_delete") {
def backendId_to_backendHttpPort = [:]
for (String[] backend in backends) {
backendId_to_backendIP.put(backend[0], backend[2])
backendId_to_backendHttpPort.put(backend[0], backend[5])
backendId_to_backendHttpPort.put(backend[0], backend[6])
}
backend_id = backendId_to_backendIP.keySet()[0]

View File

@ -28,7 +28,7 @@ suite("test_vertical_compaction_agg_keys") {
setConfigCommand.append("curl -X POST http://")
setConfigCommand.append(backend[2])
setConfigCommand.append(":")
setConfigCommand.append(backend[5])
setConfigCommand.append(backend[6])
setConfigCommand.append("/api/update_config?")
String command1 = setConfigCommand.toString() + "enable_vertical_compaction=true"
logger.info(command1)
@ -45,7 +45,7 @@ suite("test_vertical_compaction_agg_keys") {
setConfigCommand.append("curl -X POST http://")
setConfigCommand.append(backend[2])
setConfigCommand.append(":")
setConfigCommand.append(backend[5])
setConfigCommand.append(backend[6])
setConfigCommand.append("/api/update_config?")
String command1 = setConfigCommand.toString() + "enable_vertical_compaction=false"
logger.info(command1)
@ -64,7 +64,7 @@ suite("test_vertical_compaction_agg_keys") {
def backendId_to_backendHttpPort = [:]
for (String[] backend in backends) {
backendId_to_backendIP.put(backend[0], backend[2])
backendId_to_backendHttpPort.put(backend[0], backend[5])
backendId_to_backendHttpPort.put(backend[0], backend[6])
}
backend_id = backendId_to_backendIP.keySet()[0]

View File

@ -28,7 +28,7 @@ suite("test_vertical_compaction_dup_keys") {
setConfigCommand.append("curl -X POST http://")
setConfigCommand.append(backend[2])
setConfigCommand.append(":")
setConfigCommand.append(backend[5])
setConfigCommand.append(backend[6])
setConfigCommand.append("/api/update_config?")
String command1 = setConfigCommand.toString() + "enable_vertical_compaction=true"
logger.info(command1)
@ -45,7 +45,7 @@ suite("test_vertical_compaction_dup_keys") {
setConfigCommand.append("curl -X POST http://")
setConfigCommand.append(backend[2])
setConfigCommand.append(":")
setConfigCommand.append(backend[5])
setConfigCommand.append(backend[6])
setConfigCommand.append("/api/update_config?")
String command1 = setConfigCommand.toString() + "enable_vertical_compaction=false"
logger.info(command1)
@ -64,7 +64,7 @@ suite("test_vertical_compaction_dup_keys") {
def backendId_to_backendHttpPort = [:]
for (String[] backend in backends) {
backendId_to_backendIP.put(backend[0], backend[2])
backendId_to_backendHttpPort.put(backend[0], backend[5])
backendId_to_backendHttpPort.put(backend[0], backend[6])
}
backend_id = backendId_to_backendIP.keySet()[0]

View File

@ -28,7 +28,7 @@ suite("test_vertical_compaction_uniq_keys") {
setConfigCommand.append("curl -X POST http://")
setConfigCommand.append(backend[2])
setConfigCommand.append(":")
setConfigCommand.append(backend[5])
setConfigCommand.append(backend[6])
setConfigCommand.append("/api/update_config?")
String command1 = setConfigCommand.toString() + "enable_vertical_compaction=true"
logger.info(command1)
@ -45,7 +45,7 @@ suite("test_vertical_compaction_uniq_keys") {
setConfigCommand.append("curl -X POST http://")
setConfigCommand.append(backend[2])
setConfigCommand.append(":")
setConfigCommand.append(backend[5])
setConfigCommand.append(backend[6])
setConfigCommand.append("/api/update_config?")
String command1 = setConfigCommand.toString() + "enable_vertical_compaction=false"
logger.info(command1)
@ -64,7 +64,7 @@ suite("test_vertical_compaction_uniq_keys") {
def backendId_to_backendHttpPort = [:]
for (String[] backend in backends) {
backendId_to_backendIP.put(backend[0], backend[2])
backendId_to_backendHttpPort.put(backend[0], backend[5])
backendId_to_backendHttpPort.put(backend[0], backend[6])
}
backend_id = backendId_to_backendIP.keySet()[0]

View File

@ -15,9 +15,9 @@
// specific language governing permissions and limitations
// under the License.
// This suit test the `backends` information_schema table
suite("test_backends_table") {
List<List<Object>> table = sql """ select * from information_schema.backends; """
// This suit test the `backends` tvf
suite("test_backends_tvf") {
List<List<Object>> table = sql """ select * from backends(); """
assertTrue(table.size() > 0) // row should > 0
assertTrue(table[0].size == 23) // column should be 23
assertTrue(table[0].size == 26) // column should be 26
}

View File

@ -24,9 +24,9 @@ suite("http_test_action") {
def backendIdToBackendIP = [:]
def backendIdToBackendBrpcPort = [:]
for (String[] backend in backends) {
if (backend[9].equals("true")) {
if (backend[10].equals("true")) {
backendIdToBackendIP.put(backend[0], backend[2])
backendIdToBackendBrpcPort.put(backend[0], backend[6])
backendIdToBackendBrpcPort.put(backend[0], backend[7])
}
}

View File

@ -115,7 +115,7 @@ suite("test_map_load_and_compaction", "p0") {
def backendId_to_backendHttpPort = [:]
for (String[] backend in backends) {
backendId_to_backendIP.put(backend[0], backend[2])
backendId_to_backendHttpPort.put(backend[0], backend[5])
backendId_to_backendHttpPort.put(backend[0], backend[6])
}
String tablet_id = tablet[0]
backend_id = tablet[2]
@ -168,7 +168,7 @@ suite("test_map_load_and_compaction", "p0") {
backends = sql """ show backends; """
assertTrue(backends.size() > 0)
for (String[] b : backends) {
assertEquals("true", b[9])
assertEquals("true", b[10])
}
} finally {
try_sql("DROP TABLE IF EXISTS ${testTable}")

View File

@ -28,10 +28,6 @@ suite("test_query_sys_tables", "query,p0") {
sql("drop database IF EXISTS ${dbName2}")
sql("drop database IF EXISTS ${dbName3}")
// test backends
sql("use information_schema")
qt_backends("select count(*) >= 1 from backends")
// test charsets
sql("use information_schema")
qt_charsets("select count(*) >= 1 from character_sets")

View File

@ -16,9 +16,9 @@
// under the License.
suite("information_schema") {
List<List<Object>> table = sql """ select * from information_schema.backends; """
List<List<Object>> table = sql """ select * from backends(); """
assertTrue(table.size() > 0) // row should > 0
assertTrue(table[0].size == 23) // column should be 23
assertTrue(table[0].size == 26) // column should be 26
sql "SELECT DATABASE();"
sql "select USER();"

View File

@ -26,10 +26,6 @@ suite("test_query_sys_tables", "query,p0") {
sql("drop database IF EXISTS ${dbName2}")
sql("drop database IF EXISTS ${dbName3}")
// test backends
sql("use information_schema")
qt_backends("select count(*) >= 1 from backends")
// test charsets
sql("use information_schema")
qt_charsets("select count(*) >= 1 from character_sets")

View File

@ -32,7 +32,7 @@ suite ("test_number_overflow") {
def backendId_to_backendHttpPort = [:]
for (String[] backend in backends) {
backendId_to_backendIP.put(backend[0], backend[2])
backendId_to_backendHttpPort.put(backend[0], backend[5])
backendId_to_backendHttpPort.put(backend[0], backend[6])
}
backend_id = backendId_to_backendIP.keySet()[0]

View File

@ -31,7 +31,7 @@ suite("test_agg_keys_schema_change_datev2") {
def backendId_to_backendHttpPort = [:]
for (String[] backend in backends) {
backendId_to_backendIP.put(backend[0], backend[2])
backendId_to_backendHttpPort.put(backend[0], backend[5])
backendId_to_backendHttpPort.put(backend[0], backend[6])
}
backend_id = backendId_to_backendIP.keySet()[0]

View File

@ -31,7 +31,7 @@ suite("test_dup_keys_schema_change_datev2") {
def backendId_to_backendHttpPort = [:]
for (String[] backend in backends) {
backendId_to_backendIP.put(backend[0], backend[2])
backendId_to_backendHttpPort.put(backend[0], backend[5])
backendId_to_backendHttpPort.put(backend[0], backend[6])
}
backend_id = backendId_to_backendIP.keySet()[0]

View File

@ -31,7 +31,7 @@ suite("test_agg_keys_schema_change_decimalv3") {
def backendId_to_backendHttpPort = [:]
for (String[] backend in backends) {
backendId_to_backendIP.put(backend[0], backend[2])
backendId_to_backendHttpPort.put(backend[0], backend[5])
backendId_to_backendHttpPort.put(backend[0], backend[6])
}
backend_id = backendId_to_backendIP.keySet()[0]

View File

@ -34,7 +34,7 @@ suite ("test_agg_keys_schema_change") {
def backendId_to_backendHttpPort = [:]
for (String[] backend in backends) {
backendId_to_backendIP.put(backend[0], backend[2])
backendId_to_backendHttpPort.put(backend[0], backend[5])
backendId_to_backendHttpPort.put(backend[0], backend[6])
}
backend_id = backendId_to_backendIP.keySet()[0]

View File

@ -37,7 +37,7 @@ suite ("test_agg_mv_schema_change") {
def backendId_to_backendHttpPort = [:]
for (String[] backend in backends) {
backendId_to_backendIP.put(backend[0], backend[2])
backendId_to_backendHttpPort.put(backend[0], backend[5])
backendId_to_backendHttpPort.put(backend[0], backend[6])
}
backend_id = backendId_to_backendIP.keySet()[0]

View File

@ -38,7 +38,7 @@ suite ("test_agg_rollup_schema_change") {
def backendId_to_backendHttpPort = [:]
for (String[] backend in backends) {
backendId_to_backendIP.put(backend[0], backend[2])
backendId_to_backendHttpPort.put(backend[0], backend[5])
backendId_to_backendHttpPort.put(backend[0], backend[6])
}
backend_id = backendId_to_backendIP.keySet()[0]

View File

@ -29,7 +29,7 @@ suite ("test_agg_vals_schema_change") {
def backendId_to_backendHttpPort = [:]
for (String[] backend in backends) {
backendId_to_backendIP.put(backend[0], backend[2])
backendId_to_backendHttpPort.put(backend[0], backend[5])
backendId_to_backendHttpPort.put(backend[0], backend[6])
}
backend_id = backendId_to_backendIP.keySet()[0]

View File

@ -32,7 +32,7 @@ suite ("test_dup_keys_schema_change") {
def backendId_to_backendHttpPort = [:]
for (String[] backend in backends) {
backendId_to_backendIP.put(backend[0], backend[2])
backendId_to_backendHttpPort.put(backend[0], backend[5])
backendId_to_backendHttpPort.put(backend[0], backend[6])
}
backend_id = backendId_to_backendIP.keySet()[0]

View File

@ -36,7 +36,7 @@ suite ("test_dup_mv_schema_change") {
def backendId_to_backendHttpPort = [:]
for (String[] backend in backends) {
backendId_to_backendIP.put(backend[0], backend[2])
backendId_to_backendHttpPort.put(backend[0], backend[5])
backendId_to_backendHttpPort.put(backend[0], backend[6])
}
backend_id = backendId_to_backendIP.keySet()[0]

View File

@ -36,7 +36,7 @@ suite ("test_dup_rollup_schema_change") {
def backendId_to_backendHttpPort = [:]
for (String[] backend in backends) {
backendId_to_backendIP.put(backend[0], backend[2])
backendId_to_backendHttpPort.put(backend[0], backend[5])
backendId_to_backendHttpPort.put(backend[0], backend[6])
}
backend_id = backendId_to_backendIP.keySet()[0]

View File

@ -28,7 +28,7 @@ suite ("test_dup_vals_schema_change") {
def backendId_to_backendHttpPort = [:]
for (String[] backend in backends) {
backendId_to_backendIP.put(backend[0], backend[2])
backendId_to_backendHttpPort.put(backend[0], backend[5])
backendId_to_backendHttpPort.put(backend[0], backend[6])
}
backend_id = backendId_to_backendIP.keySet()[0]

View File

@ -28,7 +28,7 @@ suite ("test_uniq_keys_schema_change") {
def backendId_to_backendHttpPort = [:]
for (String[] backend in backends) {
backendId_to_backendIP.put(backend[0], backend[2])
backendId_to_backendHttpPort.put(backend[0], backend[5])
backendId_to_backendHttpPort.put(backend[0], backend[6])
}
backend_id = backendId_to_backendIP.keySet()[0]

View File

@ -32,7 +32,7 @@ suite ("test_uniq_mv_schema_change") {
def backendId_to_backendHttpPort = [:]
for (String[] backend in backends) {
backendId_to_backendIP.put(backend[0], backend[2])
backendId_to_backendHttpPort.put(backend[0], backend[5])
backendId_to_backendHttpPort.put(backend[0], backend[6])
}
backend_id = backendId_to_backendIP.keySet()[0]

View File

@ -36,7 +36,7 @@ suite ("test_uniq_rollup_schema_change") {
def backendId_to_backendHttpPort = [:]
for (String[] backend in backends) {
backendId_to_backendIP.put(backend[0], backend[2])
backendId_to_backendHttpPort.put(backend[0], backend[5])
backendId_to_backendHttpPort.put(backend[0], backend[6])
}
backend_id = backendId_to_backendIP.keySet()[0]

View File

@ -28,7 +28,7 @@ suite ("test_uniq_vals_schema_change") {
def backendId_to_backendHttpPort = [:]
for (String[] backend in backends) {
backendId_to_backendIP.put(backend[0], backend[2])
backendId_to_backendHttpPort.put(backend[0], backend[5])
backendId_to_backendHttpPort.put(backend[0], backend[6])
}
backend_id = backendId_to_backendIP.keySet()[0]

View File

@ -34,7 +34,7 @@ suite ("test_varchar_schema_change") {
def backendId_to_backendHttpPort = [:]
for (String[] backend in backends) {
backendId_to_backendIP.put(backend[0], backend[2])
backendId_to_backendHttpPort.put(backend[0], backend[5])
backendId_to_backendHttpPort.put(backend[0], backend[6])
}
backend_id = backendId_to_backendIP.keySet()[0]

View File

@ -35,7 +35,7 @@ suite("test_segcompaction_agg_keys") {
def backendId_to_backendHttpPort = [:]
for (String[] backend in backends) {
backendId_to_backendIP.put(backend[0], backend[2])
backendId_to_backendHttpPort.put(backend[0], backend[5])
backendId_to_backendHttpPort.put(backend[0], backend[6])
}
backend_id = backendId_to_backendIP.keySet()[0]

View File

@ -35,7 +35,7 @@ suite("test_segcompaction_agg_keys_index") {
def backendId_to_backendHttpPort = [:]
for (String[] backend in backends) {
backendId_to_backendIP.put(backend[0], backend[2])
backendId_to_backendHttpPort.put(backend[0], backend[5])
backendId_to_backendHttpPort.put(backend[0], backend[6])
}
backend_id = backendId_to_backendIP.keySet()[0]

View File

@ -35,7 +35,7 @@ suite("test_segcompaction_dup_keys") {
def backendId_to_backendHttpPort = [:]
for (String[] backend in backends) {
backendId_to_backendIP.put(backend[0], backend[2])
backendId_to_backendHttpPort.put(backend[0], backend[5])
backendId_to_backendHttpPort.put(backend[0], backend[6])
}
backend_id = backendId_to_backendIP.keySet()[0]

View File

@ -35,7 +35,7 @@ suite("test_segcompaction_dup_keys_index") {
def backendId_to_backendHttpPort = [:]
for (String[] backend in backends) {
backendId_to_backendIP.put(backend[0], backend[2])
backendId_to_backendHttpPort.put(backend[0], backend[5])
backendId_to_backendHttpPort.put(backend[0], backend[6])
}
backend_id = backendId_to_backendIP.keySet()[0]

View File

@ -35,7 +35,7 @@ suite("test_segcompaction_unique_keys") {
def backendId_to_backendHttpPort = [:]
for (String[] backend in backends) {
backendId_to_backendIP.put(backend[0], backend[2])
backendId_to_backendHttpPort.put(backend[0], backend[5])
backendId_to_backendHttpPort.put(backend[0], backend[6])
}
backend_id = backendId_to_backendIP.keySet()[0]

View File

@ -35,7 +35,7 @@ suite("test_segcompaction_unique_keys_index") {
def backendId_to_backendHttpPort = [:]
for (String[] backend in backends) {
backendId_to_backendIP.put(backend[0], backend[2])
backendId_to_backendHttpPort.put(backend[0], backend[5])
backendId_to_backendHttpPort.put(backend[0], backend[6])
}
backend_id = backendId_to_backendIP.keySet()[0]

View File

@ -35,7 +35,7 @@ suite("test_segcompaction_unique_keys_mow") {
def backendId_to_backendHttpPort = [:]
for (String[] backend in backends) {
backendId_to_backendIP.put(backend[0], backend[2])
backendId_to_backendHttpPort.put(backend[0], backend[5])
backendId_to_backendHttpPort.put(backend[0], backend[6])
}
backend_id = backendId_to_backendIP.keySet()[0]

View File

@ -35,7 +35,7 @@ suite("test_segcompaction_unique_keys_mow_index") {
def backendId_to_backendHttpPort = [:]
for (String[] backend in backends) {
backendId_to_backendIP.put(backend[0], backend[2])
backendId_to_backendHttpPort.put(backend[0], backend[5])
backendId_to_backendHttpPort.put(backend[0], backend[6])
}
backend_id = backendId_to_backendIP.keySet()[0]