From 21223e65c59c23cfcb9e8ab610ea321168bcb75a Mon Sep 17 00:00:00 2001 From: yongjinhou <109586248+yongjinhou@users.noreply.github.com> Date: Thu, 12 Oct 2023 20:24:45 +0800 Subject: [PATCH] [Enhancement](show-backends-disks) Add show backends disks (#24229) * Add statement to query disk information corresponding to data directory of BE node [msyql]->'show backends disks;' +-----------+-------------+------------------------------+---------+----------+---------------+-------------+-------------------+---------+ | BackendId | Host | RootPath | DirType | DiskState| TotalCapacity | UsedCapacity| AvailableCapacity | UsedPct | +-----------+-------------+------------------------------+---------+----------+---------------+-------------+-------------------+---------+ | 10002 | 10.xx.xx.90 | /home/work/output/be/storage | STORAGE | ONLINE | 7.049 TB | 2.478 TB | 4.571 TB | 35.16 % | | 10002 | 10.xx.xx.90 | /home/work/output/be | DEPLOY | ONLINE | 7.049 TB | 2.478 TB | 4.571 TB | 35.16 % | | 10002 | 10.xx.xx.90 | /home/work/output/be/log | LOG | ONLINE | 7.049 TB | 2.478 TB | 4.571 TB | 35.16 % | +-----------+-------------+------------------------------+---------+----------+---------------+-------------+-------------------+---------+ --- be/src/agent/task_worker_pool.cpp | 21 ++++- be/src/agent/task_worker_pool.h | 2 + be/src/olap/olap_common.h | 9 +++ be/src/olap/special_dir.cpp | 67 ++++++++++++++++ be/src/olap/special_dir.h | 57 +++++++++++++ be/src/olap/storage_engine.cpp | 22 +++++- be/src/olap/storage_engine.h | 5 ++ .../Show-Statements/SHOW-BACKENDS-DISKS.md | 66 ++++++++++++++++ .../Show-Statements/SHOW-FRONTENDS-DISKS.md | 2 +- .../Show-Statements/SHOW-BACKENDS-DISKS.md | 66 ++++++++++++++++ .../Show-Statements/SHOW-FRONTENDS-DISKS.md | 2 +- fe/fe-core/src/main/cup/sql_parser.cup | 4 + .../doris/analysis/ShowBackendsStmt.java | 24 +++++- .../org/apache/doris/catalog/DiskInfo.java | 13 ++- .../doris/common/proc/BackendsProcDir.java | 79 +++++++++++++++++++ .../org/apache/doris/qe/ShowExecutor.java | 2 +- .../java/org/apache/doris/system/Backend.java | 50 +++++++++--- .../doris/system/SystemInfoService.java | 8 +- .../org/apache/doris/catalog/BackendTest.java | 4 + .../apache/doris/clone/DecommissionTest.java | 3 + .../clone/TabletRepairAndBalanceTest.java | 3 + .../doris/clone/TabletReplicaTooSlowTest.java | 3 + .../common/util/AutoBucketUtilsTest.java | 2 + .../doris/planner/ResourceTagQueryTest.java | 3 + .../doris/utframe/DemoMultiBackendsTest.java | 3 + gensrc/thrift/MasterService.thrift | 1 + gensrc/thrift/Types.thrift | 6 ++ 27 files changed, 507 insertions(+), 20 deletions(-) create mode 100644 be/src/olap/special_dir.cpp create mode 100644 be/src/olap/special_dir.h create mode 100644 docs/en/docs/sql-manual/sql-reference/Show-Statements/SHOW-BACKENDS-DISKS.md create mode 100644 docs/zh-CN/docs/sql-manual/sql-reference/Show-Statements/SHOW-BACKENDS-DISKS.md diff --git a/be/src/agent/task_worker_pool.cpp b/be/src/agent/task_worker_pool.cpp index d80a49bdbb..898fa584ba 100644 --- a/be/src/agent/task_worker_pool.cpp +++ b/be/src/agent/task_worker_pool.cpp @@ -20,7 +20,6 @@ #include #include #include -#include #include #include #include @@ -55,6 +54,7 @@ #include "olap/olap_common.h" #include "olap/rowset/rowset_meta.h" #include "olap/snapshot_manager.h" +#include "olap/special_dir.h" #include "olap/storage_engine.h" #include "olap/storage_policy.h" #include "olap/tablet.h" @@ -677,8 +677,13 @@ void TaskWorkerPool::_report_disk_state_worker_thread_callback() { disk.__set_disk_available_capacity(root_path_info.available); disk.__set_trash_used_capacity(root_path_info.trash_used_capacity); disk.__set_used(root_path_info.is_used); + disk.__set_dir_type(TDiskType::STORAGE); request.disks[root_path_info.path] = disk; } + + _set_disk_infos(request, TDiskType::LOG); + _set_disk_infos(request, TDiskType::DEPLOY); + request.__set_num_cores(CpuInfo::num_cores()); request.__set_pipeline_executor_size(config::pipeline_executor_size > 0 ? config::pipeline_executor_size @@ -1096,6 +1101,20 @@ void TaskWorkerPool::_handle_report(const TReportRequest& request, ReportType ty } } +void TaskWorkerPool::_set_disk_infos(TReportRequest& request, TDiskType::type type) { + SpecialDirInfo dir_info; + StorageEngine::instance()->get_special_dir_info(&dir_info, type); + + TDisk special_disk; + special_disk.__set_root_path(dir_info.path); + special_disk.__set_data_used_capacity(0); + special_disk.__set_disk_total_capacity(dir_info.capacity); + special_disk.__set_disk_available_capacity(dir_info.available); + special_disk.__set_used(dir_info.is_used); + special_disk.__set_dir_type(type); + request.disks[dir_info.path] = special_disk; +} + void TaskWorkerPool::_random_sleep(int second) { Random rnd(UnixMillis()); sleep(rnd.Uniform(second) + 1); diff --git a/be/src/agent/task_worker_pool.h b/be/src/agent/task_worker_pool.h index 50c8842166..ca90edac51 100644 --- a/be/src/agent/task_worker_pool.h +++ b/be/src/agent/task_worker_pool.h @@ -19,6 +19,7 @@ #include #include +#include #include #include @@ -211,6 +212,7 @@ protected: TFinishTaskRequest* finish_task_request); void _handle_report(const TReportRequest& request, ReportType type); + void _set_disk_infos(TReportRequest& request, TDiskType::type type); Status _get_tablet_info(const TTabletId tablet_id, const TSchemaHash schema_hash, int64_t signature, TTabletInfo* tablet_info); diff --git a/be/src/olap/olap_common.h b/be/src/olap/olap_common.h index 130d65e7ef..e5ca13bec9 100644 --- a/be/src/olap/olap_common.h +++ b/be/src/olap/olap_common.h @@ -61,11 +61,20 @@ struct DataDirInfo { bool is_used = false; // whether available mark TStorageMedium::type storage_medium = TStorageMedium::HDD; // Storage medium type: SSD|HDD }; + +struct SpecialDirInfo { + std::string path; + int64_t capacity = 1; // actual disk capacity + int64_t available = 0; // available space, in bytes unit + bool is_used = false; +}; + struct PredicateFilterInfo { int type = 0; uint64_t input_row = 0; uint64_t filtered_row = 0; }; + // Sort DataDirInfo by available space. struct DataDirInfoLessAvailability { bool operator()(const DataDirInfo& left, const DataDirInfo& right) const { diff --git a/be/src/olap/special_dir.cpp b/be/src/olap/special_dir.cpp new file mode 100644 index 0000000000..8e8e038476 --- /dev/null +++ b/be/src/olap/special_dir.cpp @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "olap/special_dir.h" + +#include + +#include "io/fs/file_reader_writer_fwd.h" +#include "io/fs/file_writer.h" +#include "io/fs/local_file_system.h" +#include "io/fs/path.h" +#include "olap/utils.h" // for check_dir_existed + +namespace doris { +using namespace ErrorCode; + +static const char* const kTestFilePath = ".testfile"; + +SpecialDir::SpecialDir(const std::string& path) + : _path(path), + _fs(io::LocalFileSystem::create(path)), + _available_bytes(0), + _capacity_bytes(0), + _is_used(true) {} + +SpecialDir::~SpecialDir() = default; + +Status SpecialDir::update_capacity() { + RETURN_IF_ERROR(io::global_local_filesystem()->get_space_info(_path, &_capacity_bytes, + &_available_bytes)); + LOG(INFO) << "path: " << _path << " total capacity: " << _capacity_bytes + << ", available capacity: " << _available_bytes; + + return Status::OK(); +} + +void SpecialDir::health_check() { + _is_used = true; + // check disk + Status res = _read_and_write_test_file(); + if (!res) { + LOG(WARNING) << "log read/write test file occur IO Error. path=" << _path + << ", err: " << res; + _is_used = !res.is(); + } +} + +Status SpecialDir::_read_and_write_test_file() { + auto test_file = fmt::format("{}/{}", _path, kTestFilePath); + return read_write_test_file(test_file); +} + +} // namespace doris diff --git a/be/src/olap/special_dir.h b/be/src/olap/special_dir.h new file mode 100644 index 0000000000..4768a6bf66 --- /dev/null +++ b/be/src/olap/special_dir.h @@ -0,0 +1,57 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "common/status.h" +#include "io/fs/file_system.h" +#include "olap/olap_common.h" + +namespace doris { + +class SpecialDir { +public: + SpecialDir(const std::string& path); + ~SpecialDir(); + + void get_dir_info(SpecialDirInfo* special_dir_info) { + special_dir_info->path = _path; + special_dir_info->capacity = _capacity_bytes; + special_dir_info->available = _available_bytes; + special_dir_info->is_used = _is_used; + return; + } + + Status update_capacity(); + + void health_check(); + +private: + Status _read_and_write_test_file(); + + std::string _path; + io::FileSystemSPtr _fs; + // the actual available capacity of the disk of this data dir + size_t _available_bytes; + // the actual capacity of the disk of this data dir + size_t _capacity_bytes; + bool _is_used; +}; + +} // namespace doris diff --git a/be/src/olap/storage_engine.cpp b/be/src/olap/storage_engine.cpp index f17a6de841..062a5c9d32 100644 --- a/be/src/olap/storage_engine.cpp +++ b/be/src/olap/storage_engine.cpp @@ -125,7 +125,9 @@ StorageEngine::StorageEngine(const EngineOptions& options) _calc_delete_bitmap_executor(nullptr), _default_rowset_type(BETA_ROWSET), _heartbeat_flags(nullptr), - _stream_load_recorder(nullptr) { + _stream_load_recorder(nullptr), + _log_dir(new SpecialDir(config::sys_log_dir)), + _deploy_dir(new SpecialDir(std::string(std::getenv("DORIS_HOME")))) { REGISTER_HOOK_METRIC(unused_rowsets_count, [this]() { // std::lock_guard lock(_gc_mutex); return _unused_rowsets.size(); @@ -360,6 +362,24 @@ Status StorageEngine::get_all_data_dir_info(std::vector* data_dir_i return res; } +void StorageEngine::get_special_dir_info(SpecialDirInfo* special_dir_infos, TDiskType::type type) { + switch (type) { + case TDiskType::LOG: + _log_dir->health_check(); + static_cast(_log_dir->update_capacity()); + _log_dir->get_dir_info(special_dir_infos); + break; + case TDiskType::DEPLOY: + _deploy_dir->health_check(); + static_cast(_deploy_dir->update_capacity()); + _deploy_dir->get_dir_info(special_dir_infos); + break; + default: + break; + } + return; +} + int64_t StorageEngine::get_file_or_directory_size(const std::string& file_path) { if (!std::filesystem::exists(file_path)) { return 0; diff --git a/be/src/olap/storage_engine.h b/be/src/olap/storage_engine.h index 6017354ef4..aafd5951a2 100644 --- a/be/src/olap/storage_engine.h +++ b/be/src/olap/storage_engine.h @@ -45,6 +45,7 @@ #include "olap/rowset/rowset.h" #include "olap/rowset/rowset_id_generator.h" #include "olap/rowset/segment_v2/segment.h" +#include "olap/special_dir.h" #include "olap/tablet.h" #include "olap/task/index_builder.h" #include "runtime/exec_env.h" @@ -54,6 +55,7 @@ namespace doris { class DataDir; +class SpecialDir; class EngineTask; class MemTableFlushExecutor; class SegcompactionWorker; @@ -101,6 +103,7 @@ public: // get all info of root_path Status get_all_data_dir_info(std::vector* data_dir_infos, bool need_update); + void get_special_dir_info(SpecialDirInfo* dir_infos, TDiskType::type type); int64_t get_file_or_directory_size(const std::string& file_path); @@ -471,6 +474,8 @@ private: std::condition_variable _compaction_producer_sleep_cv; std::shared_ptr _stream_load_recorder; + std::unique_ptr _log_dir; + std::unique_ptr _deploy_dir; // we use unordered_map to store all cumulative compaction policy sharded ptr std::unordered_map> diff --git a/docs/en/docs/sql-manual/sql-reference/Show-Statements/SHOW-BACKENDS-DISKS.md b/docs/en/docs/sql-manual/sql-reference/Show-Statements/SHOW-BACKENDS-DISKS.md new file mode 100644 index 0000000000..0d8b851421 --- /dev/null +++ b/docs/en/docs/sql-manual/sql-reference/Show-Statements/SHOW-BACKENDS-DISKS.md @@ -0,0 +1,66 @@ +--- +{ + "title": "SHOW-BACKENDS-DISKS", + "language": "en" +} +--- + + + +## SHOW-BACKENDS-DISKS + +### Name + +SHOW BACKENDS DISKS + +### Description + + This statement is used to query disk information corresponding to data directory of BE node. + + 语法: + +```sql +SHOW BACKENDS DISKS; +``` + +说明: +1. Name indicates id of BE node. +2. Host indicates ip of BE node. +3. RootPath indicates data directory of BE node. +4. DirType indicates the type of directory +5. DiskState indicates state of disk. +6. TotalCapacity indicates total capacity of the disk. +7. UsedCapacity indicates used space of the disk. +8. AvailableCapacity indicates available space of the disk. +9. UsedPct indicates percentage of the disk used. + +### Example +` +mysql> show backends disks; ++-----------+-------------+------------------------------+---------+----------+---------------+-------------+-------------------+---------+ +| BackendId | Host | RootPath | DirType | DiskState| TotalCapacity | UsedCapacity| AvailableCapacity | UsedPct | ++-----------+-------------+------------------------------+---------+----------+---------------+-------------+-------------------+---------+ +| 10002 | 10.xx.xx.90 | /home/work/output/be/storage | STORAGE | ONLINE | 7.049 TB | 2.478 TB | 4.571 TB | 35.16 % | +| 10002 | 10.xx.xx.90 | /home/work/output/be | DEPLOY | ONLINE | 7.049 TB | 2.478 TB | 4.571 TB | 35.16 % | +| 10002 | 10.xx.xx.90 | /home/work/output/be/log | LOG | ONLINE | 7.049 TB | 2.478 TB | 4.571 TB | 35.16 % | ++-----------+-------------+------------------------------+---------+----------+---------------+-------------+-------------------+---------+ +2 rows in set (0.00 sec) +` +### Keywords + + SHOW, BACKENDS, DISK, DISKS \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-reference/Show-Statements/SHOW-FRONTENDS-DISKS.md b/docs/en/docs/sql-manual/sql-reference/Show-Statements/SHOW-FRONTENDS-DISKS.md index fad9e33dd4..2180b99c95 100644 --- a/docs/en/docs/sql-manual/sql-reference/Show-Statements/SHOW-FRONTENDS-DISKS.md +++ b/docs/en/docs/sql-manual/sql-reference/Show-Statements/SHOW-FRONTENDS-DISKS.md @@ -70,7 +70,7 @@ mysql> show frontends disks; ### Keywords - SHOW, FRONTENDS + SHOW, FRONTENDS, DISK, DISKS ### Best Practice diff --git a/docs/zh-CN/docs/sql-manual/sql-reference/Show-Statements/SHOW-BACKENDS-DISKS.md b/docs/zh-CN/docs/sql-manual/sql-reference/Show-Statements/SHOW-BACKENDS-DISKS.md new file mode 100644 index 0000000000..dcb35ef42e --- /dev/null +++ b/docs/zh-CN/docs/sql-manual/sql-reference/Show-Statements/SHOW-BACKENDS-DISKS.md @@ -0,0 +1,66 @@ +--- +{ + "title": "SHOW-BACKENDS-DISKS", + "language": "zh-CN" +} +--- + + + +## SHOW-BACKENDS-DISKS + +### Name + +SHOW BACKENDS DISKS + +### Description + + 该语句用于查看 BE 节点的数据目录对应的磁盘信息。 + + 语法: + +```sql +SHOW BACKENDS DISKS; +``` + +说明: +1. Name 表示该 BE 节点的 ID。 +2. Host 表示该 BE 节点的 IP。 +3. RootPath 表示该 BE 节点的数据目录。 +4. DirType 表示目录类型 +5. DiskState 表示磁盘状态。 +6. TotalCapacity 表示数据目录对应磁盘的总容量。 +7. UsedCapacity 表示磁盘的已使用空间。 +8. AvailableCapacity 表示磁盘的可使用空间。 +9. UsedPct 表示磁盘的使用百分比。 + +### Example +` +mysql> show backends disks; ++-----------+-------------+------------------------------+---------+----------+---------------+-------------+-------------------+---------+ +| BackendId | Host | RootPath | DirType | DiskState| TotalCapacity | UsedCapacity| AvailableCapacity | UsedPct | ++-----------+-------------+------------------------------+---------+----------+---------------+-------------+-------------------+---------+ +| 10002 | 10.xx.xx.90 | /home/work/output/be/storage | STORAGE | ONLINE | 7.049 TB | 2.478 TB | 4.571 TB | 35.16 % | +| 10002 | 10.xx.xx.90 | /home/work/output/be | DEPLOY | ONLINE | 7.049 TB | 2.478 TB | 4.571 TB | 35.16 % | +| 10002 | 10.xx.xx.90 | /home/work/output/be/log | LOG | ONLINE | 7.049 TB | 2.478 TB | 4.571 TB | 35.16 % | ++-----------+-------------+------------------------------+---------+----------+---------------+-------------+-------------------+---------+ +2 rows in set (0.00 sec) +` +### Keywords + + SHOW, BACKENDS, DISK, DISKS \ No newline at end of file diff --git a/docs/zh-CN/docs/sql-manual/sql-reference/Show-Statements/SHOW-FRONTENDS-DISKS.md b/docs/zh-CN/docs/sql-manual/sql-reference/Show-Statements/SHOW-FRONTENDS-DISKS.md index 3570ee8302..4531688464 100644 --- a/docs/zh-CN/docs/sql-manual/sql-reference/Show-Statements/SHOW-FRONTENDS-DISKS.md +++ b/docs/zh-CN/docs/sql-manual/sql-reference/Show-Statements/SHOW-FRONTENDS-DISKS.md @@ -69,7 +69,7 @@ mysql> show frontends disks; ### Keywords - SHOW, FRONTENDS + SHOW, FRONTENDS, DISK, DISKS ### Best Practice diff --git a/fe/fe-core/src/main/cup/sql_parser.cup b/fe/fe-core/src/main/cup/sql_parser.cup index 5675e11b49..e2d7a2ca5d 100644 --- a/fe/fe-core/src/main/cup/sql_parser.cup +++ b/fe/fe-core/src/main/cup/sql_parser.cup @@ -4150,6 +4150,10 @@ show_param ::= {: RESULT = new ShowBackendsStmt(); :} + | KW_BACKENDS ident:name + {: + RESULT = new ShowBackendsStmt(name); + :} | KW_TRASH KW_ON STRING_LITERAL:backend {: RESULT = new ShowTrashDiskStmt(backend); diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowBackendsStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowBackendsStmt.java index 46009bd1c0..ce1ca8d26b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowBackendsStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowBackendsStmt.java @@ -20,6 +20,7 @@ package org.apache.doris.analysis; import org.apache.doris.catalog.Column; import org.apache.doris.catalog.Env; import org.apache.doris.catalog.ScalarType; +import org.apache.doris.common.AnalysisException; import org.apache.doris.common.ErrorCode; import org.apache.doris.common.ErrorReport; import org.apache.doris.common.UserException; @@ -28,11 +29,22 @@ import org.apache.doris.mysql.privilege.PrivPredicate; import org.apache.doris.qe.ConnectContext; import org.apache.doris.qe.ShowResultSetMetaData; +import com.google.common.collect.ImmutableList; + public class ShowBackendsStmt extends ShowStmt { + private String type; public ShowBackendsStmt() { } + public ShowBackendsStmt(String type) { + this.type = type; + } + + public String getType() { + return type; + } + @Override public void analyze(Analyzer analyzer) throws UserException { super.analyze(analyzer); @@ -42,12 +54,22 @@ public class ShowBackendsStmt extends ShowStmt { PrivPredicate.OPERATOR)) { ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, "ADMIN/OPERATOR"); } + + if (type != null && !type.equals("disks")) { + throw new AnalysisException("Show backends with extra info only support show backends disks"); + } } @Override public ShowResultSetMetaData getMetaData() { ShowResultSetMetaData.Builder builder = ShowResultSetMetaData.builder(); - for (String title : BackendsProcDir.TITLE_NAMES) { + + ImmutableList titles = BackendsProcDir.TITLE_NAMES; + if (type != null && type.equals("disks")) { + titles = BackendsProcDir.DISK_TITLE_NAMES; + } + + for (String title : titles) { builder.addColumn(new Column(title, ScalarType.createVarchar(30))); } return builder.build(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/DiskInfo.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/DiskInfo.java index b49acb2ff8..c54afda834 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/DiskInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/DiskInfo.java @@ -55,6 +55,8 @@ public class DiskInfo implements Writable { private long diskAvailableCapacityB; @SerializedName("state") private DiskState state; + @SerializedName("dirType") + private String dirType; // path hash and storage medium are reported from Backend and no need to persist private long pathHash = 0; private TStorageMedium storageMedium; @@ -70,6 +72,7 @@ public class DiskInfo implements Writable { this.trashUsedCapacityB = 0; this.diskAvailableCapacityB = DEFAULT_CAPACITY_B; this.state = DiskState.ONLINE; + this.dirType = "STORAGE"; this.pathHash = 0; this.storageMedium = TStorageMedium.HDD; } @@ -130,6 +133,10 @@ public class DiskInfo implements Writable { return state; } + public String getDirType() { + return dirType; + } + // return true if changed public boolean setState(DiskState state) { if (this.state != state) { @@ -139,6 +146,10 @@ public class DiskInfo implements Writable { return false; } + public void setDirType(String dirType) { + this.dirType = dirType; + } + public long getPathHash() { return pathHash; } @@ -185,7 +196,7 @@ public class DiskInfo implements Writable { return "DiskInfo [rootPath=" + rootPath + "(" + pathHash + "), totalCapacityB=" + totalCapacityB + ", dataUsedCapacityB=" + dataUsedCapacityB + ", trashUsedCapacityB=" + trashUsedCapacityB + ", diskAvailableCapacityB=" + diskAvailableCapacityB + ", state=" + state - + ", medium: " + storageMedium + "]"; + + ", dirType=" + dirType + ", medium: " + storageMedium + "]"; } @Override diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/BackendsProcDir.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/BackendsProcDir.java index 647e4caf57..b9d4db8cb9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/BackendsProcDir.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/BackendsProcDir.java @@ -17,6 +17,7 @@ package org.apache.doris.common.proc; +import org.apache.doris.catalog.DiskInfo; import org.apache.doris.catalog.Env; import org.apache.doris.common.AnalysisException; import org.apache.doris.common.Pair; @@ -30,6 +31,7 @@ import com.google.common.base.Preconditions; import com.google.common.base.Stopwatch; import com.google.common.base.Strings; import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import com.google.gson.Gson; import org.apache.logging.log4j.LogManager; @@ -51,6 +53,11 @@ public class BackendsProcDir implements ProcDirInterface { .add("HeartbeatFailureCounter").add("NodeRole") .build(); + public static final ImmutableList DISK_TITLE_NAMES = new ImmutableList.Builder() + .add("BackendId").add("Host").add("RootPath").add("DirType").add("DiskState") + .add("TotalCapacity").add("UsedCapacity").add("AvailableCapacity").add("UsedPct") + .build(); + public static final int HOSTNAME_INDEX = 3; private SystemInfoService systemInfoService; @@ -75,6 +82,17 @@ public class BackendsProcDir implements ProcDirInterface { return result; } + public static List> getBackendInfos(String type) { + List> backendInfos = new LinkedList<>(); + + if (type == null) { + backendInfos = getBackendInfos(); + } else if (type.equals("disks")) { + backendInfos = getBackendsDiskInfos(); + } + return backendInfos; + } + /** * get backends info * @@ -185,6 +203,67 @@ public class BackendsProcDir implements ProcDirInterface { return backendInfos; } + /** + * get backends disk info + * + * @return + */ + public static List> getBackendsDiskInfos() { + final SystemInfoService systemInfoService = Env.getCurrentSystemInfo(); + List> backendsDiskInfos = new LinkedList<>(); + List backendIds = systemInfoService.getAllBackendIds(false); + if (backendIds == null) { + return backendsDiskInfos; + } + + List> comparableBackendsDiskInfos = new LinkedList<>(); + for (long backendId : backendIds) { + Backend backend = systemInfoService.getBackend(backendId); + if (backend == null) { + continue; + } + + ImmutableMap disksRef = backend.getAllDisks(); + for (DiskInfo disk : disksRef.values()) { + List backendsDiskInfo = Lists.newArrayList(); + backendsDiskInfo.add(String.valueOf(backendId)); + backendsDiskInfo.add(backend.getHost()); + // add disk info to backendsDiskInfo + backendsDiskInfo.add(disk.getRootPath()); + backendsDiskInfo.add(disk.getDirType()); + backendsDiskInfo.add(disk.getState()); + long totalCapacityB = disk.getTotalCapacityB(); + Pair totalCapacity = DebugUtil.getByteUint(totalCapacityB); + backendsDiskInfo.add(DebugUtil.DECIMAL_FORMAT_SCALE_3.format( + totalCapacity.first) + " " + totalCapacity.second); + long diskUsedCapacityB = disk.getDiskUsedCapacityB(); + Pair diskUsedCapacity = DebugUtil.getByteUint(diskUsedCapacityB); + backendsDiskInfo.add(DebugUtil.DECIMAL_FORMAT_SCALE_3.format( + diskUsedCapacity.first) + " " + diskUsedCapacity.second); + long availableCapacityB = disk.getAvailableCapacityB(); + Pair availableCapacity = DebugUtil.getByteUint(availableCapacityB); + backendsDiskInfo.add(DebugUtil.DECIMAL_FORMAT_SCALE_3.format( + availableCapacity.first) + " " + availableCapacity.second); + backendsDiskInfo.add(String.format("%.2f", disk.getUsedPct() * 100) + " %"); + comparableBackendsDiskInfos.add(backendsDiskInfo); + } + } + + // sort by host name + ListComparator> comparator = new ListComparator>(1); + comparableBackendsDiskInfos.sort(comparator); + + for (List backendsDiskInfo : comparableBackendsDiskInfos) { + List oneInfo = new ArrayList(backendsDiskInfo.size()); + for (Comparable element : backendsDiskInfo) { + oneInfo.add(element.toString()); + } + backendsDiskInfos.add(oneInfo); + } + + return backendsDiskInfos; + } + @Override public boolean register(String name, ProcNodeInterface node) { return false; diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java b/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java index 64a5d8a0e9..b8b55c57c1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java @@ -1977,7 +1977,7 @@ public class ShowExecutor { private void handleShowBackends() { final ShowBackendsStmt showStmt = (ShowBackendsStmt) stmt; - List> backendInfos = BackendsProcDir.getBackendInfos(); + List> backendInfos = BackendsProcDir.getBackendInfos(showStmt.getType()); backendInfos.sort(new Comparator>() { @Override diff --git a/fe/fe-core/src/main/java/org/apache/doris/system/Backend.java b/fe/fe-core/src/main/java/org/apache/doris/system/Backend.java index fcb5e63e83..582567ae15 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/system/Backend.java +++ b/fe/fe-core/src/main/java/org/apache/doris/system/Backend.java @@ -59,6 +59,7 @@ public class Backend implements Writable { // Represent a meaningless IP public static final String DUMMY_IP = "0.0.0.0"; + public static final String DATA_DIR_TYPE = "STORAGE"; @SerializedName("id") private long id; @@ -374,22 +375,44 @@ public class Backend implements Writable { } public ImmutableMap getDisks() { + Map disks = Maps.newHashMap(); + for (Map.Entry entry : disksRef.entrySet()) { + if (entry.getValue().getDirType().equals(DATA_DIR_TYPE)) { + disks.put(entry.getKey(), entry.getValue()); + } + } + return ImmutableMap.copyOf(disks); + } + + public ImmutableMap getAllDisks() { return this.disksRef; } public boolean hasPathHash() { - return disksRef.values().stream().allMatch(DiskInfo::hasPathHash); + Map disks = Maps.newHashMap(); + for (Map.Entry entry : disksRef.entrySet()) { + if (entry.getValue().getDirType().equals(DATA_DIR_TYPE)) { + disks.put(entry.getKey(), entry.getValue()); + } + } + return disks.values().stream().allMatch(DiskInfo::hasPathHash); } public boolean hasSpecifiedStorageMedium(TStorageMedium storageMedium) { - return disksRef.values().stream().anyMatch(d -> d.isStorageMediumMatch(storageMedium)); + Map disks = Maps.newHashMap(); + for (Map.Entry entry : disksRef.entrySet()) { + if (entry.getValue().getDirType().equals(DATA_DIR_TYPE)) { + disks.put(entry.getKey(), entry.getValue()); + } + } + return disks.values().stream().anyMatch(d -> d.isStorageMediumMatch(storageMedium)); } public long getTotalCapacityB() { ImmutableMap disks = disksRef; long totalCapacityB = 0L; for (DiskInfo diskInfo : disks.values()) { - if (diskInfo.getState() == DiskState.ONLINE) { + if (diskInfo.getState() == DiskState.ONLINE && diskInfo.getDirType().equals(DATA_DIR_TYPE)) { totalCapacityB += diskInfo.getTotalCapacityB(); } } @@ -401,7 +424,7 @@ public class Backend implements Writable { ImmutableMap disks = disksRef; long availableCapacityB = 1L; for (DiskInfo diskInfo : disks.values()) { - if (diskInfo.getState() == DiskState.ONLINE) { + if (diskInfo.getState() == DiskState.ONLINE && diskInfo.getDirType().equals(DATA_DIR_TYPE)) { availableCapacityB += diskInfo.getAvailableCapacityB(); } } @@ -412,7 +435,7 @@ public class Backend implements Writable { ImmutableMap disks = disksRef; long dataUsedCapacityB = 0L; for (DiskInfo diskInfo : disks.values()) { - if (diskInfo.getState() == DiskState.ONLINE) { + if (diskInfo.getState() == DiskState.ONLINE && diskInfo.getDirType().equals(DATA_DIR_TYPE)) { dataUsedCapacityB += diskInfo.getDataUsedCapacityB(); } } @@ -423,7 +446,7 @@ public class Backend implements Writable { ImmutableMap disks = disksRef; long trashUsedCapacityB = 0L; for (DiskInfo diskInfo : disks.values()) { - if (diskInfo.getState() == DiskState.ONLINE) { + if (diskInfo.getState() == DiskState.ONLINE && diskInfo.getDirType().equals(DATA_DIR_TYPE)) { trashUsedCapacityB += diskInfo.getTrashUsedCapacityB(); } } @@ -434,7 +457,7 @@ public class Backend implements Writable { ImmutableMap disks = disksRef; long totalRemoteUsedCapacityB = 0L; for (DiskInfo diskInfo : disks.values()) { - if (diskInfo.getState() == DiskState.ONLINE) { + if (diskInfo.getState() == DiskState.ONLINE && diskInfo.getDirType().equals(DATA_DIR_TYPE)) { totalRemoteUsedCapacityB += diskInfo.getRemoteUsedCapacity(); } } @@ -445,7 +468,7 @@ public class Backend implements Writable { ImmutableMap disks = disksRef; double maxPct = 0.0; for (DiskInfo diskInfo : disks.values()) { - if (diskInfo.getState() == DiskState.ONLINE) { + if (diskInfo.getState() == DiskState.ONLINE && diskInfo.getDirType().equals(DATA_DIR_TYPE)) { double percent = diskInfo.getUsedPct(); if (percent > maxPct) { maxPct = percent; @@ -463,7 +486,7 @@ public class Backend implements Writable { boolean exceedLimit = true; for (DiskInfo diskInfo : diskInfos.values()) { if (diskInfo.getState() == DiskState.ONLINE && diskInfo.getStorageMedium() - == storageMedium && !diskInfo.exceedLimit(true)) { + == storageMedium && !diskInfo.exceedLimit(true) && diskInfo.getDirType().equals(DATA_DIR_TYPE)) { exceedLimit = false; break; } @@ -478,7 +501,8 @@ public class Backend implements Writable { ImmutableMap diskInfos = disksRef; boolean exceedLimit = true; for (DiskInfo diskInfo : diskInfos.values()) { - if (diskInfo.getState() == DiskState.ONLINE && !diskInfo.exceedLimit(true)) { + if (diskInfo.getState() == DiskState.ONLINE && diskInfo.getDirType().equals(DATA_DIR_TYPE) + && !diskInfo.exceedLimit(true)) { exceedLimit = false; break; } @@ -492,7 +516,7 @@ public class Backend implements Writable { if (!initPathInfo) { boolean allPathHashUpdated = true; for (DiskInfo diskInfo : disks.values()) { - if (diskInfo.getPathHash() == 0) { + if (diskInfo.getDirType().equals(DATA_DIR_TYPE) && diskInfo.getPathHash() == 0) { allPathHashUpdated = false; break; } @@ -519,6 +543,7 @@ public class Backend implements Writable { long trashUsedCapacityB = tDisk.getTrashUsedCapacity(); long diskAvailableCapacityB = tDisk.getDiskAvailableCapacity(); boolean isUsed = tDisk.isUsed(); + String dirType = tDisk.getDirType().toString(); DiskInfo diskInfo = disks.get(rootPath); if (diskInfo == null) { diskInfo = new DiskInfo(rootPath); @@ -532,6 +557,8 @@ public class Backend implements Writable { diskInfo.setDataUsedCapacityB(dataUsedCapacityB); diskInfo.setTrashUsedCapacityB(trashUsedCapacityB); diskInfo.setAvailableCapacityB(diskAvailableCapacityB); + diskInfo.setDirType(dirType); + if (tDisk.isSetRemoteUsedCapacity()) { diskInfo.setRemoteUsedCapacity(tDisk.getRemoteUsedCapacity()); } @@ -553,6 +580,7 @@ public class Backend implements Writable { isChanged = true; } } + LOG.debug("update disk info. backendId: {}, diskInfo: {}", id, diskInfo.toString()); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/system/SystemInfoService.java b/fe/fe-core/src/main/java/org/apache/doris/system/SystemInfoService.java index 69af28dffd..28c3a6a7ba 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/system/SystemInfoService.java +++ b/fe/fe-core/src/main/java/org/apache/doris/system/SystemInfoService.java @@ -868,10 +868,14 @@ public class SystemInfoService { public void updatePathInfo(List addedDisks, List removedDisks) { Map copiedPathInfos = Maps.newHashMap(pathHashToDiskInfoRef); for (DiskInfo diskInfo : addedDisks) { - copiedPathInfos.put(diskInfo.getPathHash(), diskInfo); + if (diskInfo.getDirType().equals("STORAGE")) { + copiedPathInfos.put(diskInfo.getPathHash(), diskInfo); + } } for (DiskInfo diskInfo : removedDisks) { - copiedPathInfos.remove(diskInfo.getPathHash()); + if (diskInfo.getDirType().equals("STORAGE")) { + copiedPathInfos.remove(diskInfo.getPathHash()); + } } ImmutableMap newPathInfos = ImmutableMap.copyOf(copiedPathInfos); pathHashToDiskInfoRef = newPathInfos; diff --git a/fe/fe-core/src/test/java/org/apache/doris/catalog/BackendTest.java b/fe/fe-core/src/test/java/org/apache/doris/catalog/BackendTest.java index 86d95482fa..2aa5d59ffd 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/catalog/BackendTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/catalog/BackendTest.java @@ -22,6 +22,7 @@ import org.apache.doris.common.FeConstants; import org.apache.doris.resource.Tag; import org.apache.doris.system.Backend; import org.apache.doris.thrift.TDisk; +import org.apache.doris.thrift.TDiskType; import org.apache.doris.thrift.TStorageMedium; import com.google.common.collect.ImmutableMap; @@ -92,8 +93,11 @@ public class BackendTest { Map diskInfos = new HashMap(); TDisk disk1 = new TDisk("/data1/", 1000, 800, true); + disk1.setDirType(TDiskType.STORAGE); TDisk disk2 = new TDisk("/data2/", 2000, 700, true); + disk2.setDirType(TDiskType.STORAGE); TDisk disk3 = new TDisk("/data3/", 3000, 600, false); + disk3.setDirType(TDiskType.STORAGE); diskInfos.put(disk1.getRootPath(), disk1); diskInfos.put(disk2.getRootPath(), disk2); diff --git a/fe/fe-core/src/test/java/org/apache/doris/clone/DecommissionTest.java b/fe/fe-core/src/test/java/org/apache/doris/clone/DecommissionTest.java index 1764573d0f..f74dd3a3f2 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/clone/DecommissionTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/clone/DecommissionTest.java @@ -29,6 +29,7 @@ import org.apache.doris.qe.ConnectContext; import org.apache.doris.system.Backend; import org.apache.doris.system.SystemInfoService; import org.apache.doris.thrift.TDisk; +import org.apache.doris.thrift.TDiskType; import org.apache.doris.thrift.TStorageMedium; import org.apache.doris.utframe.UtFrameUtils; @@ -91,6 +92,7 @@ public class DecommissionTest { tDisk1.setDiskAvailableCapacity(tDisk1.disk_total_capacity - tDisk1.data_used_capacity); tDisk1.setPathHash(random.nextLong()); tDisk1.setStorageMedium(TStorageMedium.HDD); + tDisk1.setDirType(TDiskType.STORAGE); backendDisks.put(tDisk1.getRootPath(), tDisk1); TDisk tDisk2 = new TDisk(); @@ -101,6 +103,7 @@ public class DecommissionTest { tDisk2.setDiskAvailableCapacity(tDisk2.disk_total_capacity - tDisk2.data_used_capacity); tDisk2.setPathHash(random.nextLong()); tDisk2.setStorageMedium(TStorageMedium.HDD); + tDisk2.setDirType(TDiskType.STORAGE); backendDisks.put(tDisk2.getRootPath(), tDisk2); be.updateDisks(backendDisks); diff --git a/fe/fe-core/src/test/java/org/apache/doris/clone/TabletRepairAndBalanceTest.java b/fe/fe-core/src/test/java/org/apache/doris/clone/TabletRepairAndBalanceTest.java index 3e178ef090..2c98decef9 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/clone/TabletRepairAndBalanceTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/clone/TabletRepairAndBalanceTest.java @@ -49,6 +49,7 @@ import org.apache.doris.resource.Tag; import org.apache.doris.system.Backend; import org.apache.doris.system.SystemInfoService; import org.apache.doris.thrift.TDisk; +import org.apache.doris.thrift.TDiskType; import org.apache.doris.thrift.TStorageMedium; import org.apache.doris.utframe.UtFrameUtils; @@ -134,6 +135,7 @@ public class TabletRepairAndBalanceTest { tDisk1.setDiskAvailableCapacity(tDisk1.disk_total_capacity - tDisk1.data_used_capacity); tDisk1.setPathHash(random.nextLong()); tDisk1.setStorageMedium(TStorageMedium.HDD); + tDisk1.setDirType(TDiskType.STORAGE); backendDisks.put(tDisk1.getRootPath(), tDisk1); TDisk tDisk2 = new TDisk(); @@ -144,6 +146,7 @@ public class TabletRepairAndBalanceTest { tDisk2.setDiskAvailableCapacity(tDisk2.disk_total_capacity - tDisk2.data_used_capacity); tDisk2.setPathHash(random.nextLong()); tDisk2.setStorageMedium(TStorageMedium.SSD); + tDisk2.setDirType(TDiskType.STORAGE); backendDisks.put(tDisk2.getRootPath(), tDisk2); be.updateDisks(backendDisks); diff --git a/fe/fe-core/src/test/java/org/apache/doris/clone/TabletReplicaTooSlowTest.java b/fe/fe-core/src/test/java/org/apache/doris/clone/TabletReplicaTooSlowTest.java index 6a38985b73..cb980b4051 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/clone/TabletReplicaTooSlowTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/clone/TabletReplicaTooSlowTest.java @@ -32,6 +32,7 @@ import org.apache.doris.system.Backend; import org.apache.doris.system.Diagnoser; import org.apache.doris.system.SystemInfoService; import org.apache.doris.thrift.TDisk; +import org.apache.doris.thrift.TDiskType; import org.apache.doris.thrift.TStorageMedium; import org.apache.doris.utframe.UtFrameUtils; @@ -103,6 +104,7 @@ public class TabletReplicaTooSlowTest { tDisk1.setDiskAvailableCapacity(tDisk1.disk_total_capacity - tDisk1.data_used_capacity); tDisk1.setPathHash(random.nextLong()); tDisk1.setStorageMedium(TStorageMedium.HDD); + tDisk1.setDirType(TDiskType.STORAGE); backendDisks.put(tDisk1.getRootPath(), tDisk1); TDisk tDisk2 = new TDisk(); @@ -113,6 +115,7 @@ public class TabletReplicaTooSlowTest { tDisk2.setDiskAvailableCapacity(tDisk2.disk_total_capacity - tDisk2.data_used_capacity); tDisk2.setPathHash(random.nextLong()); tDisk2.setStorageMedium(TStorageMedium.SSD); + tDisk2.setDirType(TDiskType.STORAGE); backendDisks.put(tDisk2.getRootPath(), tDisk2); be.updateDisks(backendDisks); diff --git a/fe/fe-core/src/test/java/org/apache/doris/common/util/AutoBucketUtilsTest.java b/fe/fe-core/src/test/java/org/apache/doris/common/util/AutoBucketUtilsTest.java index 3a23928734..3801877520 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/common/util/AutoBucketUtilsTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/common/util/AutoBucketUtilsTest.java @@ -27,6 +27,7 @@ import org.apache.doris.qe.ShowResultSet; import org.apache.doris.system.Backend; import org.apache.doris.system.SystemInfoService; import org.apache.doris.thrift.TDisk; +import org.apache.doris.thrift.TDiskType; import org.apache.doris.thrift.TStorageMedium; import org.apache.doris.utframe.UtFrameUtils; @@ -95,6 +96,7 @@ public class AutoBucketUtilsTest { disk.setDiskAvailableCapacity(disk.disk_total_capacity - disk.data_used_capacity); disk.setPathHash(random.nextLong()); disk.setStorageMedium(TStorageMedium.HDD); + disk.setDirType(TDiskType.STORAGE); backendDisks.put(disk.getRootPath(), disk); } be.updateDisks(backendDisks); diff --git a/fe/fe-core/src/test/java/org/apache/doris/planner/ResourceTagQueryTest.java b/fe/fe-core/src/test/java/org/apache/doris/planner/ResourceTagQueryTest.java index 29aa558860..5c4edc31c4 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/planner/ResourceTagQueryTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/planner/ResourceTagQueryTest.java @@ -41,6 +41,7 @@ import org.apache.doris.qe.DdlExecutor; import org.apache.doris.resource.Tag; import org.apache.doris.system.Backend; import org.apache.doris.thrift.TDisk; +import org.apache.doris.thrift.TDiskType; import org.apache.doris.thrift.TStorageMedium; import org.apache.doris.utframe.UtFrameUtils; @@ -118,6 +119,7 @@ public class ResourceTagQueryTest { tDisk1.setDiskAvailableCapacity(tDisk1.disk_total_capacity - tDisk1.data_used_capacity); tDisk1.setPathHash(random.nextLong()); tDisk1.setStorageMedium(TStorageMedium.HDD); + tDisk1.setDirType(TDiskType.STORAGE); backendDisks.put(tDisk1.getRootPath(), tDisk1); TDisk tDisk2 = new TDisk(); @@ -128,6 +130,7 @@ public class ResourceTagQueryTest { tDisk2.setDiskAvailableCapacity(tDisk2.disk_total_capacity - tDisk2.data_used_capacity); tDisk2.setPathHash(random.nextLong()); tDisk2.setStorageMedium(TStorageMedium.SSD); + tDisk2.setDirType(TDiskType.STORAGE); backendDisks.put(tDisk2.getRootPath(), tDisk2); be.updateDisks(backendDisks); diff --git a/fe/fe-core/src/test/java/org/apache/doris/utframe/DemoMultiBackendsTest.java b/fe/fe-core/src/test/java/org/apache/doris/utframe/DemoMultiBackendsTest.java index af9b98f126..dac84521e4 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/utframe/DemoMultiBackendsTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/utframe/DemoMultiBackendsTest.java @@ -41,6 +41,7 @@ import org.apache.doris.qe.StmtExecutor; import org.apache.doris.resource.Tag; import org.apache.doris.system.Backend; import org.apache.doris.thrift.TDisk; +import org.apache.doris.thrift.TDiskType; import org.apache.doris.thrift.TStorageMedium; import org.apache.doris.utframe.MockedFrontend.EnvVarNotSetException; import org.apache.doris.utframe.MockedFrontend.FeStartException; @@ -100,6 +101,7 @@ public class DemoMultiBackendsTest { tDisk1.setDiskAvailableCapacity(tDisk1.disk_total_capacity - tDisk1.data_used_capacity); tDisk1.setPathHash(random.nextLong()); tDisk1.setStorageMedium(TStorageMedium.HDD); + tDisk1.setDirType(TDiskType.STORAGE); backendDisks.put(tDisk1.getRootPath(), tDisk1); TDisk tDisk2 = new TDisk(); @@ -110,6 +112,7 @@ public class DemoMultiBackendsTest { tDisk2.setDiskAvailableCapacity(tDisk2.disk_total_capacity - tDisk2.data_used_capacity); tDisk2.setPathHash(random.nextLong()); tDisk2.setStorageMedium(TStorageMedium.SSD); + tDisk2.setDirType(TDiskType.STORAGE); backendDisks.put(tDisk2.getRootPath(), tDisk2); be.updateDisks(backendDisks); diff --git a/gensrc/thrift/MasterService.thrift b/gensrc/thrift/MasterService.thrift index 9acd3f85f7..088d01dad6 100644 --- a/gensrc/thrift/MasterService.thrift +++ b/gensrc/thrift/MasterService.thrift @@ -84,6 +84,7 @@ struct TDisk { 7: optional Types.TStorageMedium storage_medium 8: optional Types.TSize remote_used_capacity 9: optional Types.TSize trash_used_capacity + 10: optional Types.TDiskType dir_type } struct TPluginInfo { diff --git a/gensrc/thrift/Types.thrift b/gensrc/thrift/Types.thrift index f6a138976c..15840e0c91 100644 --- a/gensrc/thrift/Types.thrift +++ b/gensrc/thrift/Types.thrift @@ -51,6 +51,12 @@ enum TStorageMedium { REMOTE_CACHE, } +enum TDiskType { + STORAGE, + LOG, + DEPLOY +} + enum TVarType { SESSION, GLOBAL