From abc73ac1eb5a822e9c245e8b88010d67a2915e98 Mon Sep 17 00:00:00 2001 From: Mingyu Chen Date: Sun, 7 May 2023 18:44:42 +0800 Subject: [PATCH] [refactor](cluster)(step-1) remove cluster related stmt (#19355) * [refactor](cluster)(step-1) remove cluster stmt --- .../java/org/apache/doris/common/Config.java | 11 - fe/fe-core/src/main/cup/sql_parser.cup | 102 +-- .../org/apache/doris/alter/SystemHandler.java | 16 +- .../doris/analysis/AddBackendClause.java | 34 +- .../doris/analysis/AlterClusterClause.java | 76 -- .../doris/analysis/AlterClusterStmt.java | 92 --- .../doris/analysis/CreateClusterStmt.java | 125 --- .../doris/analysis/DropClusterStmt.java | 90 -- .../org/apache/doris/analysis/EnterStmt.java | 71 -- .../org/apache/doris/analysis/LinkDbStmt.java | 96 --- .../apache/doris/analysis/MigrateDbStmt.java | 90 -- .../doris/analysis/ShowClusterStmt.java | 73 -- .../doris/analysis/ShowMigrationsStmt.java | 67 -- .../java/org/apache/doris/catalog/Env.java | 154 +--- .../apache/doris/catalog/InfoSchemaDb.java | 6 +- .../org/apache/doris/cluster/BaseParam.java | 112 --- .../org/apache/doris/cluster/Cluster.java | 197 +---- .../DummyCluster.java} | 16 +- .../doris/common/proc/BackendsProcDir.java | 43 +- .../apache/doris/datasource/CatalogMgr.java | 2 +- .../doris/datasource/HMSExternalCatalog.java | 2 +- .../doris/datasource/InternalCatalog.java | 770 +----------------- .../doris/httpv2/rest/manager/NodeAction.java | 2 +- .../doris/httpv2/restv2/MetaInfoActionV2.java | 8 +- .../apache/doris/journal/JournalEntity.java | 42 - .../org/apache/doris/mysql/MysqlProto.java | 30 +- .../persist/DropLinkDbAndUpdateDbInfo.java | 81 -- .../org/apache/doris/persist/EditLog.java | 79 -- .../org/apache/doris/persist/LinkDbInfo.java | 81 -- .../apache/doris/persist/meta/MetaReader.java | 8 +- .../persist/meta/PersistMetaModules.java | 6 +- .../java/org/apache/doris/qe/DdlExecutor.java | 18 +- .../org/apache/doris/qe/ShowExecutor.java | 44 +- .../org/apache/doris/qe/StmtExecutor.java | 17 - .../doris/system/SystemInfoService.java | 54 +- .../tablefunction/MetadataGenerator.java | 13 +- .../apache/doris/analysis/AccessTestUtil.java | 12 - .../AdminCancelRebalanceDiskStmtTest.java | 2 - .../analysis/AdminRebalanceDiskStmtTest.java | 2 - .../doris/analysis/AlterClusterStmtTest.java | 79 -- .../doris/analysis/BackendStmtTest.java | 2 +- .../doris/analysis/CreateClusterStmtTest.java | 74 -- .../doris/analysis/DropClusterStmtTest.java | 78 -- .../apache/doris/analysis/LinkDbStmtTest.java | 68 -- .../doris/analysis/MigrateDbStmtTest.java | 68 -- .../apache/doris/catalog/CatalogTestUtil.java | 1 - .../doris/cluster/SystemInfoServiceTest.java | 4 - .../apache/doris/http/DorisHttpTestCase.java | 7 - .../org/apache/doris/qe/ShowExecutorTest.java | 8 - 49 files changed, 93 insertions(+), 3040 deletions(-) delete mode 100644 fe/fe-core/src/main/java/org/apache/doris/analysis/AlterClusterClause.java delete mode 100644 fe/fe-core/src/main/java/org/apache/doris/analysis/AlterClusterStmt.java delete mode 100644 fe/fe-core/src/main/java/org/apache/doris/analysis/CreateClusterStmt.java delete mode 100644 fe/fe-core/src/main/java/org/apache/doris/analysis/DropClusterStmt.java delete mode 100644 fe/fe-core/src/main/java/org/apache/doris/analysis/EnterStmt.java delete mode 100644 fe/fe-core/src/main/java/org/apache/doris/analysis/LinkDbStmt.java delete mode 100644 fe/fe-core/src/main/java/org/apache/doris/analysis/MigrateDbStmt.java delete mode 100644 fe/fe-core/src/main/java/org/apache/doris/analysis/ShowClusterStmt.java delete mode 100644 fe/fe-core/src/main/java/org/apache/doris/analysis/ShowMigrationsStmt.java delete mode 100644 fe/fe-core/src/main/java/org/apache/doris/cluster/BaseParam.java rename fe/fe-core/src/main/java/org/apache/doris/{analysis/AlterClusterType.java => cluster/DummyCluster.java} (78%) delete mode 100644 fe/fe-core/src/main/java/org/apache/doris/persist/DropLinkDbAndUpdateDbInfo.java delete mode 100644 fe/fe-core/src/main/java/org/apache/doris/persist/LinkDbInfo.java delete mode 100644 fe/fe-core/src/test/java/org/apache/doris/analysis/AlterClusterStmtTest.java delete mode 100644 fe/fe-core/src/test/java/org/apache/doris/analysis/CreateClusterStmtTest.java delete mode 100644 fe/fe-core/src/test/java/org/apache/doris/analysis/DropClusterStmtTest.java delete mode 100644 fe/fe-core/src/test/java/org/apache/doris/analysis/LinkDbStmtTest.java delete mode 100644 fe/fe-core/src/test/java/org/apache/doris/analysis/MigrateDbStmtTest.java diff --git a/fe/fe-common/src/main/java/org/apache/doris/common/Config.java b/fe/fe-common/src/main/java/org/apache/doris/common/Config.java index 6b06bd2b21..3b3ad137a7 100644 --- a/fe/fe-common/src/main/java/org/apache/doris/common/Config.java +++ b/fe/fe-common/src/main/java/org/apache/doris/common/Config.java @@ -1034,17 +1034,6 @@ public class Config extends ConfigBase { @ConfField(mutable = true, masterOnly = true) public static boolean force_do_metadata_checkpoint = false; - /** - * The multi cluster feature will be deprecated in version 0.12 - * set this config to true will disable all operations related to cluster feature, include: - * create/drop cluster - * add free backend/add backend to cluster/decommission cluster balance - * change the backends num of cluster - * link/migration db - */ - @ConfField(mutable = true) - public static boolean disable_cluster_feature = true; - /** * Decide how often to check dynamic partition */ diff --git a/fe/fe-core/src/main/cup/sql_parser.cup b/fe/fe-core/src/main/cup/sql_parser.cup index 64ef0feefc..cfdb614bc4 100644 --- a/fe/fe-core/src/main/cup/sql_parser.cup +++ b/fe/fe-core/src/main/cup/sql_parser.cup @@ -653,7 +653,7 @@ nonterminal StatementBase stmt, show_stmt, show_param, help_stmt, load_stmt, show_routine_load_stmt, show_routine_load_task_stmt, show_create_routine_load_stmt, show_create_load_stmt, show_create_reporitory_stmt, describe_stmt, alter_stmt, use_stmt, kill_stmt, drop_stmt, recover_stmt, grant_stmt, revoke_stmt, create_stmt, set_stmt, sync_stmt, cancel_stmt, cancel_param, delete_stmt, - link_stmt, migrate_stmt, switch_stmt, enter_stmt, transaction_stmt, unsupported_stmt, export_stmt, admin_stmt, truncate_stmt, + switch_stmt, transaction_stmt, unsupported_stmt, export_stmt, admin_stmt, truncate_stmt, import_columns_stmt, import_delete_on_stmt, import_sequence_stmt, import_where_stmt, install_plugin_stmt, uninstall_plugin_stmt, import_preceding_filter_stmt, unlock_tables_stmt, lock_tables_stmt, refresh_stmt, clean_stmt, analyze_stmt, show_mtmv_stmt, kill_analysis_job_stmt; @@ -721,8 +721,6 @@ nonterminal PartitionNames opt_partition_names, partition_names; nonterminal ArrayList opt_tablet_list, tablet_list; nonterminal TableSample opt_table_sample, table_sample; nonterminal TableSnapshot opt_table_snapshot, table_snapshot; -nonterminal ClusterName cluster_name; -nonterminal ClusterName des_cluster_name; nonterminal TableName table_name, opt_table_name; nonterminal FunctionName function_name; nonterminal EncryptKeyName encryptkey_name; @@ -966,7 +964,7 @@ precedence right LBRACE; precedence left KW_ENGINE; // unused -// nonterminal Expr where_clause_without_null, List col_list, opt_charset_name, AlterClause alter_cluster_clause; +// nonterminal Expr where_clause_without_null, List col_list, opt_charset_name; start with stmts; @@ -1065,14 +1063,8 @@ stmt ::= {: RESULT = stmt; :} | create_stmt:query {: RESULT = query; :} - | link_stmt:query - {: RESULT = query; :} - | migrate_stmt:query - {: RESULT = query; :} | switch_stmt:stmt {: RESULT = stmt; :} - | enter_stmt:enter - {: RESULT = enter; :} | query_stmt:query {: RESULT = query; :} | drop_stmt:stmt @@ -1173,28 +1165,6 @@ stmt ::= :} ; -cluster_name ::= - ident:cluster - {: - RESULT = new ClusterName(cluster, ""); - :} - | ident:cluster DOT ident:db - {: - RESULT = new ClusterName(cluster, db); - :} - ; - -des_cluster_name ::= - ident:cluster - {: - RESULT = new ClusterName(cluster, ""); - :} - | ident:cluster DOT ident:db - {: - RESULT = new ClusterName(cluster, db); - :} - ; - refresh_stmt ::= KW_REFRESH KW_TABLE table_name:tbl {: @@ -1252,22 +1222,6 @@ uninstall_plugin_stmt ::= :} ; -// link statement -link_stmt ::= - KW_LINK KW_DATABASE cluster_name:src_name des_cluster_name:des_name - {: - RESULT = new LinkDbStmt(src_name, des_name); - :} - ; - -// migrate statement -migrate_stmt ::= - KW_MIGRATE KW_DATABASE cluster_name:src_name des_cluster_name:des_name - {: - RESULT = new MigrateDbStmt(src_name, des_name); - :} - ; - // Alter Statement alter_stmt ::= KW_ALTER KW_TABLE table_name:tbl @@ -1292,10 +1246,6 @@ alter_stmt ::= {: RESULT = new AlterSystemStmt(clause); :} - | KW_ALTER KW_CLUSTER ident:name opt_properties:properties - {: - RESULT = new AlterClusterStmt(name, properties); - :} | KW_ALTER KW_DATABASE ident:dbName KW_SET KW_DATA KW_QUOTA quantity:quota_quantity {: RESULT = new AlterDatabaseQuotaStmt(dbName, QuotaType.DATA, quota_quantity); @@ -1615,15 +1565,7 @@ opt_enable_feature_properties ::= alter_system_clause ::= KW_ADD KW_BACKEND string_list:hostPorts opt_properties:properties {: - RESULT = new AddBackendClause(hostPorts, false, properties); - :} - | KW_ADD KW_FREE KW_BACKEND string_list:hostPorts - {: - RESULT = new AddBackendClause(hostPorts, true, Maps.newHashMap()); - :} - | KW_ADD KW_BACKEND KW_TO ident:clusterName string_list:hostPorts - {: - RESULT = new AddBackendClause(hostPorts, clusterName); + RESULT = new AddBackendClause(hostPorts, properties); :} | KW_DROP KW_BACKEND string_list:hostPorts {: @@ -1814,11 +1756,6 @@ create_stmt ::= {: RESULT = new CreateCatalogStmt(ifNotExists, catalogName, resourceName, properties, comment); :} - /* cluster */ - /* KW_CREATE KW_CLUSTER ident:name opt_properties:properties KW_IDENTIFIED KW_BY STRING_LITERAL:password - {: - RESULT = new CreateClusterStmt(name, properties, password); - :}*/ /* Function */ | KW_CREATE opt_var_type:type opt_aggregate:isAggregate KW_FUNCTION opt_if_not_exists:ifNotExists function_name:functionName LPAREN func_args_def:args RPAREN KW_RETURNS type_def:returnType opt_intermediate_type:intermediateType opt_properties:properties @@ -1929,11 +1866,6 @@ create_stmt ::= {: RESULT = new CreateViewStmt(ifNotExists, viewName, columns, comment, view_def); :} - /* cluster */ - | KW_CREATE KW_CLUSTER ident:name opt_properties:properties KW_IDENTIFIED KW_BY STRING_LITERAL:password - {: - RESULT = new CreateClusterStmt(name, properties, password); - :} | KW_CREATE opt_read_only:isReadOnly KW_REPOSITORY ident:repoName KW_WITH storage_backend:storage {: RESULT = new CreateRepositoryStmt(isReadOnly, repoName, storage); @@ -2972,11 +2904,6 @@ drop_stmt ::= {: RESULT = new DropCatalogStmt(ifExists, catalogName); :} - /* cluster */ - | KW_DROP KW_CLUSTER opt_if_exists:ifExists ident:cluster - {: - RESULT = new DropClusterStmt(ifExists, cluster); - :} /* Function */ | KW_DROP opt_var_type:type KW_FUNCTION opt_if_exists:ifExists function_name:functionName LPAREN func_args_def:args RPAREN {: @@ -3744,15 +3671,6 @@ show_param ::= {: RESULT = new ShowCreateFunctionStmt(type, dbName, functionName, args); :} - /* Cluster */ - | KW_CLUSTERS - {: - RESULT = new ShowClusterStmt(); - :} - | KW_MIGRATIONS - {: - RESULT = new ShowMigrationsStmt(); - :} /* Database */ | KW_DATABASES opt_wild_where {: @@ -4540,13 +4458,6 @@ switch_stmt ::= :} ; -// Change cluster -enter_stmt ::= - KW_ENTER ident:cluster - {: - RESULT = new EnterStmt(cluster); - :} - ; // Change database use_stmt ::= KW_USE ident:db @@ -7623,13 +7534,6 @@ time_unit ::= // {: RESULT = e; :} // ; // -// alter_cluster_clause ::= -// KW_MODIFY opt_properties:properties -// {: -// RESULT = new AlterClusterClause(AlterClusterType.ALTER_CLUSTER_PROPERTIES, properties); -// :} -// ; -// // col_list ::= // KW_COLUMNS LPAREN ident_list:colList RPAREN // {: diff --git a/fe/fe-core/src/main/java/org/apache/doris/alter/SystemHandler.java b/fe/fe-core/src/main/java/org/apache/doris/alter/SystemHandler.java index 7df48f4a3a..79952d8a66 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/alter/SystemHandler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/alter/SystemHandler.java @@ -37,8 +37,6 @@ import org.apache.doris.catalog.OlapTable; import org.apache.doris.catalog.TabletInvertedIndex; import org.apache.doris.common.Config; import org.apache.doris.common.DdlException; -import org.apache.doris.common.ErrorCode; -import org.apache.doris.common.ErrorReport; import org.apache.doris.common.UserException; import org.apache.doris.ha.FrontendNodeType; import org.apache.doris.system.Backend; @@ -46,7 +44,6 @@ import org.apache.doris.system.SystemInfoService; import org.apache.doris.system.SystemInfoService.HostInfo; import com.google.common.base.Preconditions; -import com.google.common.base.Strings; import com.google.common.collect.Lists; import org.apache.commons.lang3.NotImplementedException; import org.apache.logging.log4j.LogManager; @@ -116,18 +113,7 @@ public class SystemHandler extends AlterHandler { if (alterClause instanceof AddBackendClause) { // add backend AddBackendClause addBackendClause = (AddBackendClause) alterClause; - final String destClusterName = addBackendClause.getDestCluster(); - - if ((!Strings.isNullOrEmpty(destClusterName) || addBackendClause.isFree()) - && Config.disable_cluster_feature) { - ErrorReport.reportAnalysisException(ErrorCode.ERR_INVALID_OPERATION, "ADD BACKEND TO CLUSTER"); - } - - if (!Strings.isNullOrEmpty(destClusterName) && Env.getCurrentEnv().getCluster(destClusterName) == null) { - throw new DdlException("Cluster: " + destClusterName + " does not exist."); - } - Env.getCurrentSystemInfo().addBackends(addBackendClause.getHostInfos(), addBackendClause.isFree(), - addBackendClause.getDestCluster(), addBackendClause.getTagMap()); + Env.getCurrentSystemInfo().addBackends(addBackendClause.getHostInfos(), addBackendClause.getTagMap()); } else if (alterClause instanceof DropBackendClause) { // drop backend DropBackendClause dropBackendClause = (DropBackendClause) alterClause; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AddBackendClause.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AddBackendClause.java index 7ccf574f7d..a3f57d700d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AddBackendClause.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AddBackendClause.java @@ -22,42 +22,27 @@ import org.apache.doris.common.Config; import org.apache.doris.common.util.PropertyAnalyzer; import org.apache.doris.resource.Tag; -import com.google.common.base.Strings; import com.google.common.collect.Maps; import java.util.List; import java.util.Map; public class AddBackendClause extends BackendClause { - // be in free state is not owned by any cluster - protected boolean isFree; - // cluster that backend will be added to - protected String destCluster; protected Map properties = Maps.newHashMap(); private Map tagMap; public AddBackendClause(List hostPorts) { super(hostPorts); - this.isFree = true; - this.destCluster = ""; } - public AddBackendClause(List hostPorts, boolean isFree, Map properties) { + public AddBackendClause(List hostPorts, Map properties) { super(hostPorts); - this.isFree = isFree; - this.destCluster = ""; this.properties = properties; if (this.properties == null) { this.properties = Maps.newHashMap(); } } - public AddBackendClause(List hostPorts, String destCluster) { - super(hostPorts); - this.isFree = false; - this.destCluster = destCluster; - } - public Map getTagMap() { return tagMap; } @@ -83,15 +68,7 @@ public class AddBackendClause extends BackendClause { public String toSql() { StringBuilder sb = new StringBuilder(); sb.append("ADD "); - if (isFree) { - sb.append("FREE "); - } sb.append("BACKEND "); - - if (!Strings.isNullOrEmpty(destCluster)) { - sb.append("to").append(destCluster); - } - for (int i = 0; i < hostPorts.size(); i++) { sb.append("\"").append(hostPorts.get(i)).append("\""); if (i != hostPorts.size() - 1) { @@ -100,13 +77,4 @@ public class AddBackendClause extends BackendClause { } return sb.toString(); } - - public boolean isFree() { - return this.isFree; - } - - public String getDestCluster() { - return destCluster; - } - } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterClusterClause.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterClusterClause.java deleted file mode 100644 index eb92be8d33..0000000000 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterClusterClause.java +++ /dev/null @@ -1,76 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package org.apache.doris.analysis; - -import org.apache.doris.alter.AlterOpType; -import org.apache.doris.common.AnalysisException; -import org.apache.doris.common.Config; -import org.apache.doris.common.ErrorCode; -import org.apache.doris.common.ErrorReport; - -import org.apache.commons.lang3.NotImplementedException; - -import java.util.Map; - -@Deprecated -public class AlterClusterClause extends AlterClause { - private AlterClusterType type; - private Map properties; - private int instanceNum; - private String password; - - public AlterClusterClause(AlterClusterType type, Map properties) { - super(AlterOpType.ALTER_OTHER); - this.type = type; - this.properties = properties; - instanceNum = 0; - password = ""; - } - - @Override - public void analyze(Analyzer analyzer) throws AnalysisException { - if (Config.disable_cluster_feature) { - ErrorReport.reportAnalysisException(ErrorCode.ERR_INVALID_OPERATION, "ALTER CLUSTER"); - } - - if (properties == null || properties.size() == 0) { - ErrorReport.reportAnalysisException(ErrorCode.ERR_CLUSTER_NO_PARAMETER); - } - - if (!properties.containsKey(CreateClusterStmt.CLUSTER_INSTANCE_NUM)) { - ErrorReport.reportAnalysisException(ErrorCode.ERR_CLUSTER_NO_INSTANCE_NUM); - } - - instanceNum = Integer.valueOf(properties.get(CreateClusterStmt.CLUSTER_INSTANCE_NUM)); - password = properties.get(CreateClusterStmt.CLUSTER_SUPERMAN_PASSWORD); - } - - @Override - public String toSql() { - // TODO Auto-generated method stub - throw new NotImplementedException("toSql is not implemented"); - } - - public int getInstanceNum() { - return instanceNum; - } - - public String getPassword() { - return password; - } -} diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterClusterStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterClusterStmt.java deleted file mode 100644 index 459f7c591d..0000000000 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterClusterStmt.java +++ /dev/null @@ -1,92 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package org.apache.doris.analysis; - -import org.apache.doris.catalog.Env; -import org.apache.doris.common.AnalysisException; -import org.apache.doris.common.Config; -import org.apache.doris.common.ErrorCode; -import org.apache.doris.common.ErrorReport; -import org.apache.doris.mysql.privilege.PrivPredicate; -import org.apache.doris.qe.ConnectContext; - -import java.util.Map; - -@Deprecated -public class AlterClusterStmt extends DdlStmt { - - private Map properties; - private String alterClusterName; - private String clusterName; - private int instanceNum; - - public AlterClusterStmt(String clusterName, Map properties) { - this.alterClusterName = clusterName; - this.properties = properties; - } - - @Override - public void analyze(Analyzer analyzer) throws AnalysisException { - if (Config.disable_cluster_feature) { - ErrorReport.reportAnalysisException(ErrorCode.ERR_INVALID_OPERATION, "ALTER CLUSTER"); - } - - if (!Env.getCurrentEnv().getAccessManager().checkGlobalPriv(ConnectContext.get(), PrivPredicate.OPERATOR)) { - ErrorReport.reportAnalysisException(ErrorCode.ERR_CLUSTER_NO_AUTHORITY, "NODE"); - } - - if (properties == null || properties.size() == 0 - || !properties.containsKey(CreateClusterStmt.CLUSTER_INSTANCE_NUM)) { - ErrorReport.reportAnalysisException(ErrorCode.ERR_CLUSTER_NO_PARAMETER); - } - try { - instanceNum = Integer.valueOf(properties.get(CreateClusterStmt.CLUSTER_INSTANCE_NUM)); - } catch (NumberFormatException e) { - ErrorReport.reportAnalysisException(ErrorCode.ERR_CLUSTER_NO_PARAMETER); - } - - if (instanceNum < 0) { - ErrorReport.reportAnalysisException(ErrorCode.ERR_CLUSTER_CREATE_ISTANCE_NUM_ERROR); - } - } - - @Override - public String toSql() { - return "ALTER CLUSTER " + alterClusterName + " PROPERTIES(\"instance_num\"=" + "\"" + instanceNum + "\")"; - } - - public int getInstanceNum() { - return instanceNum; - } - - public String getAlterClusterName() { - return alterClusterName; - } - - public void setAlterClusterName(String alterClusterName) { - this.alterClusterName = alterClusterName; - } - - public String getClusterName() { - return this.clusterName; - } - - public void setInstanceNum(int instanceNum) { - this.instanceNum = instanceNum; - } -} diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateClusterStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateClusterStmt.java deleted file mode 100644 index 668a1bc3f1..0000000000 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateClusterStmt.java +++ /dev/null @@ -1,125 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package org.apache.doris.analysis; - -import org.apache.doris.catalog.Env; -import org.apache.doris.common.Config; -import org.apache.doris.common.ErrorCode; -import org.apache.doris.common.ErrorReport; -import org.apache.doris.common.FeNameFormat; -import org.apache.doris.common.UserException; -import org.apache.doris.mysql.MysqlPassword; -import org.apache.doris.mysql.privilege.PrivPredicate; -import org.apache.doris.qe.ConnectContext; - -import com.google.common.base.Strings; - -import java.util.Map; - -@Deprecated -public class CreateClusterStmt extends DdlStmt { - public static String CLUSTER_INSTANCE_NUM = "instance_num"; - public static String CLUSTER_SUPERMAN_PASSWORD = "password"; - public static String CLUSTER_SUPERUSER_NAME = "superuser"; - - private String clusterName; - private boolean ifNotExists; - private int instanceNum; - private Map properties; - private byte[] scramblePassword; - private String passwd; - - public CreateClusterStmt() { - - } - - public CreateClusterStmt(String clusterName, Map properties, String passwd) { - this.clusterName = clusterName; - this.properties = properties; - this.passwd = passwd; - } - - public String getClusterName() { - return clusterName; - } - - public void setClusterName(String clusterName) { - this.clusterName = clusterName; - } - - public boolean isSetIfNotExists() { - return ifNotExists; - } - - @Override - public void analyze(Analyzer analyzer) throws UserException { - if (Config.disable_cluster_feature) { - ErrorReport.reportAnalysisException(ErrorCode.ERR_INVALID_OPERATION, "CREATE CLUSTER"); - } - FeNameFormat.checkDbName(clusterName); - if (!Env.getCurrentEnv().getAccessManager().checkGlobalPriv(ConnectContext.get(), PrivPredicate.OPERATOR)) { - ErrorReport.reportAnalysisException(ErrorCode.ERR_CLUSTER_NO_AUTHORITY, analyzer.getQualifiedUser()); - } - - if (properties == null || properties.size() == 0 || !properties.containsKey(CLUSTER_INSTANCE_NUM)) { - ErrorReport.reportAnalysisException(ErrorCode.ERR_CLUSTER_NO_PARAMETER); - } - - try { - instanceNum = Integer.valueOf(properties.get(CLUSTER_INSTANCE_NUM)); - } catch (NumberFormatException e) { - ErrorReport.reportAnalysisException(ErrorCode.ERR_CLUSTER_NO_PARAMETER); - } - - if (instanceNum < 0) { - ErrorReport.reportAnalysisException(ErrorCode.ERR_CLUSTER_CREATE_ISTANCE_NUM_ERROR); - } - - final String password = passwd; - if (!Strings.isNullOrEmpty(password)) { - scramblePassword = MysqlPassword.makeScrambledPassword(password); - } else { - scramblePassword = new byte[0]; - } - } - - @Override - public String toSql() { - final String sql = "CREATE CLUSTER " + clusterName + " PROPERTIES(\"instance_num\"=" + "\"" + instanceNum - + "\")" + "IDENTIFIED BY '" + passwd + "'"; - return sql; - } - - @Override - public String toString() { - return toSql(); - } - - public int getInstanceNum() { - return instanceNum; - } - - public void setInstanceNum(int instanceNum) { - this.instanceNum = instanceNum; - } - - public byte[] getPassword() { - return scramblePassword; - } - -} diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/DropClusterStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/DropClusterStmt.java deleted file mode 100644 index 155d9d7f53..0000000000 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/DropClusterStmt.java +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package org.apache.doris.analysis; - -import org.apache.doris.catalog.Env; -import org.apache.doris.common.AnalysisException; -import org.apache.doris.common.Config; -import org.apache.doris.common.ErrorCode; -import org.apache.doris.common.ErrorReport; -import org.apache.doris.common.UserException; -import org.apache.doris.mysql.privilege.PrivPredicate; -import org.apache.doris.qe.ConnectContext; -import org.apache.doris.system.SystemInfoService; - -import com.google.common.base.Strings; - -@Deprecated -public class DropClusterStmt extends DdlStmt { - private boolean ifExists; - private String name; - - public DropClusterStmt(boolean ifExists, String name) { - this.ifExists = ifExists; - this.name = name; - } - - @Override - public void analyze(Analyzer analyzer) throws AnalysisException, UserException { - if (Config.disable_cluster_feature) { - ErrorReport.reportAnalysisException(ErrorCode.ERR_INVALID_OPERATION, "DROP CLUSTER"); - } - - if (Strings.isNullOrEmpty(name)) { - ErrorReport.reportAnalysisException(ErrorCode.ERR_CLUSTER_NAME_NULL); - } - - if (name.equalsIgnoreCase(SystemInfoService.DEFAULT_CLUSTER)) { - throw new AnalysisException("Can not drop " + SystemInfoService.DEFAULT_CLUSTER); - } - - if (!Env.getCurrentEnv().getAccessManager().checkGlobalPriv(ConnectContext.get(), PrivPredicate.OPERATOR)) { - ErrorReport.reportAnalysisException(ErrorCode.ERR_CLUSTER_NO_PERMISSIONS); - } - } - - @Override - public String toSql() { - return "DROP CLUSTER " + name; - } - - public String getClusterName() { - return name; - } - - public void setClusterName(String clusterName) { - this.name = clusterName; - } - - public boolean isIfExists() { - return ifExists; - } - - public void setIfExists(boolean ifExists) { - this.ifExists = ifExists; - } - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - -} diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/EnterStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/EnterStmt.java deleted file mode 100644 index b0d5fc0871..0000000000 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/EnterStmt.java +++ /dev/null @@ -1,71 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package org.apache.doris.analysis; - -import org.apache.doris.common.AnalysisException; -import org.apache.doris.common.ErrorCode; -import org.apache.doris.common.ErrorReport; -import org.apache.doris.common.UserException; - -import com.google.common.base.Strings; - -public class EnterStmt extends DdlStmt { - - private String name; - - public EnterStmt(String name) { - this.name = name; - } - - @Override - public void analyze(Analyzer analyzer) throws AnalysisException, UserException { - if (Strings.isNullOrEmpty(name)) { - ErrorReport.reportAnalysisException(ErrorCode.ERR_CLUSTER_NAME_NULL); - } - - if (analyzer.getEnv().getCluster(name) == null) { - ErrorReport.reportAnalysisException(ErrorCode.ERR_CLUSTER_UNKNOWN_ERROR, name); - } - } - - @Override - public String toSql() { - // TODO Auto-generated method stub - return super.toSql(); - } - - @Override - public String toString() { - // TODO Auto-generated method stub - return super.toString(); - } - - public String getClusterName() { - return name; - } - - public void setClusterName(String name) { - this.name = name; - } - - @Override - public RedirectStatus getRedirectStatus() { - return RedirectStatus.NO_FORWARD; - } - -} diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/LinkDbStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/LinkDbStmt.java deleted file mode 100644 index 684b0692eb..0000000000 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/LinkDbStmt.java +++ /dev/null @@ -1,96 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package org.apache.doris.analysis; - -import org.apache.doris.catalog.Env; -import org.apache.doris.cluster.ClusterNamespace; -import org.apache.doris.common.AnalysisException; -import org.apache.doris.common.Config; -import org.apache.doris.common.ErrorCode; -import org.apache.doris.common.ErrorReport; -import org.apache.doris.common.UserException; -import org.apache.doris.mysql.privilege.PrivPredicate; -import org.apache.doris.qe.ConnectContext; - -import com.google.common.base.Strings; - -@Deprecated -public class LinkDbStmt extends DdlStmt { - - private ClusterName src; - private ClusterName dest; - private String srcCluster; - private String destCluster; - private String srcDb; - private String destDb; - - LinkDbStmt(ClusterName src, ClusterName dest) { - this.src = src; - this.dest = dest; - } - - public String getSrcCluster() { - return srcCluster; - } - - public String getDestCluster() { - return destCluster; - } - - public String getSrcDb() { - return srcDb; - } - - public String getDestDb() { - return destDb; - } - - @Override - public void analyze(Analyzer analyzer) throws AnalysisException, UserException { - if (Config.disable_cluster_feature) { - ErrorReport.reportAnalysisException(ErrorCode.ERR_INVALID_OPERATION, "LINK DATABASE"); - } - - src.analyze(analyzer); - dest.analyze(analyzer); - - if (!Env.getCurrentEnv().getAccessManager().checkGlobalPriv(ConnectContext.get(), PrivPredicate.ADMIN)) { - ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, - "ADMIN"); - } - - if (Strings.isNullOrEmpty(src.getCluster()) || Strings.isNullOrEmpty(dest.getCluster()) - || Strings.isNullOrEmpty(src.getDb()) || Strings.isNullOrEmpty(dest.getDb())) { - ErrorReport.reportAnalysisException(ErrorCode.ERR_CLUSTER_NO_PARAMETER); - } - srcCluster = src.getCluster(); - srcDb = ClusterNamespace.getFullName(srcCluster, src.getDb()); - destCluster = dest.getCluster(); - destDb = ClusterNamespace.getFullName(destCluster, dest.getDb()); - } - - @Override - public String toSql() { - return "LINK DATABASE " + srcCluster + "." + srcDb + " " + destCluster + "." + destDb; - } - - @Override - public String toString() { - return toSql(); - } -} diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/MigrateDbStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/MigrateDbStmt.java deleted file mode 100644 index c0ed24ecc8..0000000000 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/MigrateDbStmt.java +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package org.apache.doris.analysis; - -import org.apache.doris.catalog.Env; -import org.apache.doris.cluster.ClusterNamespace; -import org.apache.doris.common.AnalysisException; -import org.apache.doris.common.Config; -import org.apache.doris.common.ErrorCode; -import org.apache.doris.common.ErrorReport; -import org.apache.doris.common.UserException; -import org.apache.doris.mysql.privilege.PrivPredicate; -import org.apache.doris.qe.ConnectContext; - -@Deprecated -public class MigrateDbStmt extends DdlStmt { - - private ClusterName src; - private ClusterName dest; - private String srcCluster; - private String destCluster; - private String srcDb; - private String destDb; - - MigrateDbStmt(ClusterName src, ClusterName dest) { - this.src = src; - this.dest = dest; - } - - public String getSrcCluster() { - return srcCluster; - } - - public String getDestCluster() { - return destCluster; - } - - public String getSrcDb() { - return srcDb; - } - - public String getDestDb() { - return destDb; - } - - @Override - public void analyze(Analyzer analyzer) throws AnalysisException, UserException { - if (Config.disable_cluster_feature) { - ErrorReport.reportAnalysisException(ErrorCode.ERR_INVALID_OPERATION, "MIGRATION CLUSTER"); - } - - src.analyze(analyzer); - dest.analyze(analyzer); - - if (!Env.getCurrentEnv().getAccessManager().checkGlobalPriv(ConnectContext.get(), PrivPredicate.ADMIN)) { - ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, - "ADMIN"); - } - - srcCluster = src.getCluster(); - srcDb = ClusterNamespace.getFullName(srcCluster, src.getDb()); - destCluster = dest.getCluster(); - destDb = ClusterNamespace.getFullName(destCluster, dest.getDb()); - } - - @Override - public String toSql() { - return "MIGRATE DATABASE " + srcCluster + "." + srcDb + " " + destCluster + "." + destDb; - } - - @Override - public String toString() { - return toSql(); - } -} diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowClusterStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowClusterStmt.java deleted file mode 100644 index 151eb8f57b..0000000000 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowClusterStmt.java +++ /dev/null @@ -1,73 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package org.apache.doris.analysis; - -import org.apache.doris.analysis.CompoundPredicate.Operator; -import org.apache.doris.catalog.Column; -import org.apache.doris.catalog.Env; -import org.apache.doris.catalog.ScalarType; -import org.apache.doris.common.AnalysisException; -import org.apache.doris.common.ErrorCode; -import org.apache.doris.common.ErrorReport; -import org.apache.doris.mysql.privilege.PrivBitSet; -import org.apache.doris.mysql.privilege.PrivPredicate; -import org.apache.doris.mysql.privilege.Privilege; -import org.apache.doris.qe.ConnectContext; -import org.apache.doris.qe.ShowResultSetMetaData; - -import com.google.common.collect.ImmutableList; - -public class ShowClusterStmt extends ShowStmt { - public static final ImmutableList TITLE_NAMES = new ImmutableList.Builder().add("cluster").build(); - - public ShowClusterStmt() { - } - - @Override - public ShowResultSetMetaData getMetaData() { - ShowResultSetMetaData.Builder builder = ShowResultSetMetaData.builder(); - - ImmutableList titleNames = null; - titleNames = TITLE_NAMES; - - for (String title : titleNames) { - builder.addColumn(new Column(title, ScalarType.createVarchar(30))); - } - return builder.build(); - } - - @Override - public SelectStmt toSelectStmt(Analyzer analyzer) throws AnalysisException { - return null; - } - - @Override - public void analyze(Analyzer analyzer) throws AnalysisException { - if (!Env.getCurrentEnv().getAccessManager().checkGlobalPriv(ConnectContext.get(), - PrivPredicate.of(PrivBitSet.of(Privilege.ADMIN_PRIV, Privilege.NODE_PRIV), Operator.OR))) { - ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, "ADMIN"); - } - } - - @Override - public String toSql() { - // TODO Auto-generated method stub - return super.toSql(); - } - -} diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowMigrationsStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowMigrationsStmt.java deleted file mode 100644 index 73bc859ad0..0000000000 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowMigrationsStmt.java +++ /dev/null @@ -1,67 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package org.apache.doris.analysis; - -import org.apache.doris.catalog.Column; -import org.apache.doris.catalog.Env; -import org.apache.doris.catalog.ScalarType; -import org.apache.doris.common.AnalysisException; -import org.apache.doris.common.ErrorCode; -import org.apache.doris.common.ErrorReport; -import org.apache.doris.mysql.privilege.PrivPredicate; -import org.apache.doris.qe.ConnectContext; -import org.apache.doris.qe.ShowResultSetMetaData; - -import com.google.common.collect.ImmutableList; - -public class ShowMigrationsStmt extends ShowStmt { - public static final ImmutableList TITLE_NAMES = new ImmutableList.Builder() - .add("cluster").add("srcdb").add("desdb").add("progress").build(); - - public ShowMigrationsStmt() { - - } - - @Override - public ShowResultSetMetaData getMetaData() { - ShowResultSetMetaData.Builder builder = ShowResultSetMetaData.builder(); - - ImmutableList titleNames = null; - titleNames = TITLE_NAMES; - - for (String title : titleNames) { - builder.addColumn(new Column(title, ScalarType.createVarchar(30))); - } - return builder.build(); - } - - @Override - public SelectStmt toSelectStmt(Analyzer analyzer) throws AnalysisException { - // TODO Auto-generated method stub - return super.toSelectStmt(analyzer); - } - - @Override - public void analyze(Analyzer analyzer) throws AnalysisException { - if (!Env.getCurrentEnv().getAccessManager().checkGlobalPriv(ConnectContext.get(), PrivPredicate.ADMIN)) { - ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, - "ADMIN"); - } - } - -} diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Env.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/Env.java index ee6b25096e..f35d1d1fb8 100755 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/Env.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/Env.java @@ -30,7 +30,6 @@ import org.apache.doris.analysis.AdminCleanTrashStmt; import org.apache.doris.analysis.AdminCompactTableStmt; import org.apache.doris.analysis.AdminSetConfigStmt; import org.apache.doris.analysis.AdminSetReplicaStatusStmt; -import org.apache.doris.analysis.AlterClusterStmt; import org.apache.doris.analysis.AlterDatabaseQuotaStmt; import org.apache.doris.analysis.AlterDatabaseQuotaStmt.QuotaType; import org.apache.doris.analysis.AlterDatabaseRename; @@ -44,7 +43,6 @@ import org.apache.doris.analysis.CancelAlterSystemStmt; import org.apache.doris.analysis.CancelAlterTableStmt; import org.apache.doris.analysis.CancelBackupStmt; import org.apache.doris.analysis.ColumnRenameClause; -import org.apache.doris.analysis.CreateClusterStmt; import org.apache.doris.analysis.CreateDbStmt; import org.apache.doris.analysis.CreateFunctionStmt; import org.apache.doris.analysis.CreateMaterializedViewStmt; @@ -55,7 +53,6 @@ import org.apache.doris.analysis.CreateTableStmt; import org.apache.doris.analysis.CreateViewStmt; import org.apache.doris.analysis.DdlStmt; import org.apache.doris.analysis.DistributionDesc; -import org.apache.doris.analysis.DropClusterStmt; import org.apache.doris.analysis.DropDbStmt; import org.apache.doris.analysis.DropFunctionStmt; import org.apache.doris.analysis.DropMaterializedViewStmt; @@ -64,8 +61,6 @@ import org.apache.doris.analysis.DropTableStmt; import org.apache.doris.analysis.Expr; import org.apache.doris.analysis.FunctionName; import org.apache.doris.analysis.InstallPluginStmt; -import org.apache.doris.analysis.LinkDbStmt; -import org.apache.doris.analysis.MigrateDbStmt; import org.apache.doris.analysis.ModifyDistributionClause; import org.apache.doris.analysis.PartitionRenameClause; import org.apache.doris.analysis.RecoverDbStmt; @@ -95,8 +90,6 @@ import org.apache.doris.clone.DynamicPartitionScheduler; import org.apache.doris.clone.TabletChecker; import org.apache.doris.clone.TabletScheduler; import org.apache.doris.clone.TabletSchedulerStat; -import org.apache.doris.cluster.BaseParam; -import org.apache.doris.cluster.Cluster; import org.apache.doris.cluster.ClusterNamespace; import org.apache.doris.common.AnalysisException; import org.apache.doris.common.ClientPool; @@ -173,12 +166,8 @@ import org.apache.doris.mysql.privilege.AccessControllerManager; import org.apache.doris.mysql.privilege.Auth; import org.apache.doris.mysql.privilege.PrivPredicate; import org.apache.doris.persist.AlterMultiMaterializedView; -import org.apache.doris.persist.BackendIdsUpdateInfo; import org.apache.doris.persist.BackendReplicasInfo; import org.apache.doris.persist.BackendTabletsInfo; -import org.apache.doris.persist.ClusterInfo; -import org.apache.doris.persist.DatabaseInfo; -import org.apache.doris.persist.DropLinkDbAndUpdateDbInfo; import org.apache.doris.persist.DropPartitionInfo; import org.apache.doris.persist.EditLog; import org.apache.doris.persist.GlobalVarPersistInfo; @@ -249,7 +238,6 @@ import com.google.common.collect.Queues; import com.sleepycat.je.rep.InsufficientLogException; import com.sleepycat.je.rep.NetworkRestore; import com.sleepycat.je.rep.NetworkRestoreConfig; -import lombok.Setter; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; import org.apache.logging.log4j.LogManager; @@ -346,10 +334,6 @@ public class Env { private AtomicBoolean canRead = new AtomicBoolean(false); private BlockingQueue typeTransferQueue; - // false if default_cluster is not created. - @Setter - private boolean isDefaultClusterCreated = false; - // node name is used for bdbje NodeName. private String nodeName; private FrontendNodeType role; @@ -598,8 +582,6 @@ public class Env { this.metaReplayState = new MetaReplayState(); - this.isDefaultClusterCreated = false; - this.brokerMgr = new BrokerMgr(); this.resourceMgr = new ResourceMgr(); @@ -1324,10 +1306,6 @@ public class Env { initLowerCaseTableNames(); } - if (!isDefaultClusterCreated) { - initDefaultCluster(); - } - getPolicyMgr().createDefaultStoragePolicy(); // MUST set master ip before starting checkpoint thread. @@ -1718,7 +1696,9 @@ public class Env { newChecksum ^= catalogId; idGenerator.setId(catalogId); - isDefaultClusterCreated = dis.readBoolean(); + // compatible with isDefaultClusterCreated, now is deprecated. + // just read and skip it. + dis.readBoolean(); LOG.info("finished replay header from image"); return newChecksum; @@ -2021,7 +2001,9 @@ public class Env { checksum ^= id; dos.writeLong(id); - dos.writeBoolean(isDefaultClusterCreated); + // compatible with isDefaultClusterCreated value, now is deprecated, + // so just write a true value. + dos.writeBoolean(true); return checksum; } @@ -2661,11 +2643,6 @@ public class Env { getInternalCatalog().unprotectCreateDb(db); } - // for test - public void addCluster(Cluster cluster) { - getInternalCatalog().addCluster(cluster); - } - public void replayCreateDb(Database db) { getInternalCatalog().replayCreateDb(db, ""); } @@ -2674,10 +2651,6 @@ public class Env { getInternalCatalog().dropDb(stmt); } - public void replayDropLinkDb(DropLinkDbAndUpdateDbInfo info) { - getInternalCatalog().replayDropLinkDb(info); - } - public void replayDropDb(String dbName, boolean isForceDrop, Long recycleTime) throws DdlException { getInternalCatalog().replayDropDb(dbName, isForceDrop, recycleTime); } @@ -4665,121 +4638,13 @@ public class Env { return functionSet.isNullResultWithOneNullParamFunctions(funcName); } - /** - * create cluster - * - * @param stmt - * @throws DdlException - */ - public void createCluster(CreateClusterStmt stmt) throws DdlException { - getInternalCatalog().createCluster(stmt); - } - - /** - * replay create cluster - * - * @param cluster - */ - public void replayCreateCluster(Cluster cluster) { - getInternalCatalog().replayCreateCluster(cluster); - } - - /** - * drop cluster and cluster's db must be have deleted - * - * @param stmt - * @throws DdlException - */ - public void dropCluster(DropClusterStmt stmt) throws DdlException { - getInternalCatalog().dropCluster(stmt); - } - - public void replayDropCluster(ClusterInfo info) throws DdlException { - getInternalCatalog().replayDropCluster(info); - } - - public void replayExpandCluster(ClusterInfo info) { - getInternalCatalog().replayExpandCluster(info); - } - - /** - * modify cluster: Expansion or shrink - * - * @param stmt - * @throws DdlException - */ - public void processModifyCluster(AlterClusterStmt stmt) throws UserException { - getInternalCatalog().processModifyCluster(stmt); - } - - /** - * @param ctx - * @param clusterName - * @throws DdlException - */ - public void changeCluster(ConnectContext ctx, String clusterName) throws DdlException { - getInternalCatalog().changeCluster(ctx, clusterName); - } - - /** - * migrate db to link dest cluster - * - * @param stmt - * @throws DdlException - */ - public void migrateDb(MigrateDbStmt stmt) throws DdlException { - getInternalCatalog().migrateDb(stmt); - } - - public void replayMigrateDb(BaseParam param) { - getInternalCatalog().replayMigrateDb(param); - } - - public void replayLinkDb(BaseParam param) { - getInternalCatalog().replayLinkDb(param); - } - - /** - * link src db to dest db. we use java's quotation Mechanism to realize db hard links - * - * @param stmt - * @throws DdlException - */ - public void linkDb(LinkDbStmt stmt) throws DdlException { - getInternalCatalog().linkDb(stmt); - } - - public Cluster getCluster(String clusterName) { - return getInternalCatalog().getCluster(clusterName); - } - - public List getClusterNames() { - return getInternalCatalog().getClusterNames(); - } - - /** - * get migrate progress , when finish migration, next cloneCheck will reset dbState - * - * @return - */ - public Set getMigrations() { - return getInternalCatalog().getMigrations(); - } - public long loadCluster(DataInputStream dis, long checksum) throws IOException, DdlException { return getInternalCatalog().loadCluster(dis, checksum); } - public void initDefaultCluster() { - getInternalCatalog().initDefaultCluster(); - } - - public void replayUpdateDb(DatabaseInfo info) { - getInternalCatalog().replayUpdateDb(info); - } - public long saveCluster(CountingDataOutputStream dos, long checksum) throws IOException { - return getInternalCatalog().saveCluster(dos, checksum); + // do nothing + return checksum; } public long saveBrokers(CountingDataOutputStream dos, long checksum) throws IOException { @@ -4820,9 +4685,6 @@ public class Env { return checksum; } - public void replayUpdateClusterAndBackends(BackendIdsUpdateInfo info) { - getInternalCatalog().replayUpdateClusterAndBackends(info); - } public String dumpImage() { LOG.info("begin to dump meta data"); diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/InfoSchemaDb.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/InfoSchemaDb.java index 5624addc6f..23a6bf3478 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/InfoSchemaDb.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/InfoSchemaDb.java @@ -19,7 +19,6 @@ package org.apache.doris.catalog; import org.apache.doris.cluster.ClusterNamespace; import org.apache.doris.common.Pair; -import org.apache.doris.common.SystemIdGenerator; import java.io.DataInput; import java.io.DataOutput; @@ -28,14 +27,15 @@ import java.io.IOException; // Information schema used for MySQL compatible. public class InfoSchemaDb extends Database { public static final String DATABASE_NAME = "information_schema"; + public static final long DATABASE_ID = 0L; public InfoSchemaDb() { - super(SystemIdGenerator.getNextId(), DATABASE_NAME); + super(DATABASE_ID, DATABASE_NAME); initTables(); } public InfoSchemaDb(String cluster) { - super(SystemIdGenerator.getNextId(), ClusterNamespace.getFullName(cluster, DATABASE_NAME)); + super(DATABASE_ID, ClusterNamespace.getFullName(cluster, DATABASE_NAME)); initTables(); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/cluster/BaseParam.java b/fe/fe-core/src/main/java/org/apache/doris/cluster/BaseParam.java deleted file mode 100644 index ca628dabd0..0000000000 --- a/fe/fe-core/src/main/java/org/apache/doris/cluster/BaseParam.java +++ /dev/null @@ -1,112 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package org.apache.doris.cluster; - -import org.apache.doris.common.io.Text; -import org.apache.doris.common.io.Writable; - -import com.google.common.collect.Lists; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; -import java.util.List; - -public class BaseParam implements Writable { - - private final List strParams = Lists.newArrayList(); - private final List longParams = Lists.newArrayList(); - private final List floatParams = Lists.newArrayList(); - // private final List intParams = Lists.newArrayList(); - // private final List doubleParams = Lists.newArrayList(); - // private final List floatParams = Lists.newArrayList(); - - public String getStringParam() { - return strParams.get(0); - } - - public String getStringParam(int index) { - return strParams.get(index); - } - - public void addStringParam(String value) { - this.strParams.add(value); - } - - public long getLongParam() { - return longParams.get(0); - } - - public long getLongParam(int index) { - return longParams.get(index); - } - - public void addLongParam(long value) { - this.longParams.add(value); - } - - public int getParamLength() { - return strParams.size() + longParams.size(); - } - - public float getFloatParam(int index) { - return floatParams.get(index); - } - - public void addFloatParam(float value) { - this.floatParams.add(value); - } - - @Override - public void write(DataOutput out) throws IOException { - out.writeInt(strParams.size()); - for (String str : strParams) { - Text.writeString(out, str); - } - - out.writeInt(longParams.size()); - for (long value : longParams) { - out.writeLong(value); - } - - out.writeInt(floatParams.size()); - for (float value : floatParams) { - out.writeFloat(value); - } - - } - - public void readFields(DataInput in) throws IOException { - int count = in.readInt(); - while (count-- > 0) { - strParams.add(Text.readString(in)); - } - - count = in.readInt(); - while (count-- > 0) { - longParams.add(in.readLong()); - } - - count = in.readInt(); - while (count-- > 0) { - floatParams.add(in.readFloat()); - } - - } - -} diff --git a/fe/fe-core/src/main/java/org/apache/doris/cluster/Cluster.java b/fe/fe-core/src/main/java/org/apache/doris/cluster/Cluster.java index 3d99c45636..6ac5ed14a7 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/cluster/Cluster.java +++ b/fe/fe-core/src/main/java/org/apache/doris/cluster/Cluster.java @@ -17,31 +17,19 @@ package org.apache.doris.cluster; -import org.apache.doris.catalog.InfoSchemaDb; import org.apache.doris.common.io.Text; import org.apache.doris.common.io.Writable; -import org.apache.doris.persist.LinkDbInfo; -import com.google.common.base.Preconditions; -import com.google.common.base.Strings; -import com.google.common.collect.Lists; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.locks.ReentrantLock; -/** - * cluster only save db and user's id and name - * - */ +@Deprecated public class Cluster implements Writable { private static final Logger LOG = LogManager.getLogger(Cluster.class); @@ -49,16 +37,8 @@ public class Cluster implements Writable { private String name; // backend which cluster own private Set backendIdSet = ConcurrentHashMap.newKeySet(); - - private ConcurrentHashMap linkDbNames = new ConcurrentHashMap<>(); - private ConcurrentHashMap linkDbIds = new ConcurrentHashMap<>(); - private Set dbIds = ConcurrentHashMap.newKeySet(); private Set dbNames = ConcurrentHashMap.newKeySet(); - private ConcurrentHashMap dbNameToIDs = new ConcurrentHashMap<>(); - - // lock to perform atomic operations - private ReentrantLock lock = new ReentrantLock(true); private Cluster() { // for persist @@ -69,119 +49,10 @@ public class Cluster implements Writable { this.id = id; } - private void lock() { - this.lock.lock(); - } - - private void unlock() { - this.lock.unlock(); - } - public Long getId() { return id; } - public String getName() { - return name; - } - - public void addLinkDb(BaseParam param) { - lock(); - try { - if (Strings.isNullOrEmpty(param.getStringParam(1)) || param.getLongParam(1) <= 0) { - return; - } - final LinkDbInfo info = new LinkDbInfo(param.getStringParam(1), param.getLongParam(1)); - linkDbNames.put(param.getStringParam(), info); - linkDbIds.put(param.getLongParam(), info); - } finally { - unlock(); - } - } - - public void removeLinkDb(BaseParam param) { - lock(); - try { - linkDbNames.remove(param.getStringParam()); - linkDbIds.remove(param.getLongParam()); - } finally { - unlock(); - } - - } - - public boolean containLink(String dest, String src) { - final LinkDbInfo info = linkDbNames.get(dest); - if (info != null && info.getName().equals(src)) { - return true; - } - return false; - } - - public void addDb(String name, long id) { - if (Strings.isNullOrEmpty(name)) { - return; - } - lock(); - try { - dbNames.add(name); - dbIds.add(id); - dbNameToIDs.put(name, id); - } finally { - unlock(); - } - } - - public List getDbNames() { - final ArrayList ret = new ArrayList(); - lock(); - try { - ret.addAll(dbNames); - ret.addAll(linkDbNames.keySet()); - } finally { - unlock(); - } - return ret; - } - - public void removeDb(String name, long id) { - lock(); - try { - dbNames.remove(name); - dbIds.remove(id); - } finally { - unlock(); - } - } - - public boolean containDb(String name) { - return dbNames.contains(name); - } - - public List getBackendIdList() { - return Lists.newArrayList(backendIdSet); - } - - public void setBackendIdList(List backendIdList) { - if (backendIdList == null) { - return; - } - backendIdSet = ConcurrentHashMap.newKeySet(); - backendIdSet.addAll(backendIdList); - } - - public void addBackend(long backendId) { - backendIdSet.add(backendId); - } - - public void addBackends(List backendIds) { - backendIdSet.addAll(backendIds); - } - - public void removeBackend(long removedBackendId) { - backendIdSet.remove((Long) removedBackendId); - } - public static Cluster read(DataInput in) throws IOException { Cluster cluster = new Cluster(); cluster.readFields(in); @@ -190,53 +61,10 @@ public class Cluster implements Writable { @Override public void write(DataOutput out) throws IOException { - out.writeLong(id); - Text.writeString(out, name); - - out.writeLong(backendIdSet.size()); - for (Long id : backendIdSet) { - out.writeLong(id); - } - - int dbCount = dbIds.size(); - if (dbNames.contains(ClusterNamespace.getFullName(this.name, InfoSchemaDb.DATABASE_NAME))) { - dbCount--; - } - - out.writeInt(dbCount); - // don't persist InfoSchemaDb meta - for (String name : dbNames) { - if (!name.equals(ClusterNamespace.getFullName(this.name, InfoSchemaDb.DATABASE_NAME))) { - Text.writeString(out, name); - } else { - dbIds.remove(dbNameToIDs.get(name)); - } - } - - String errMsg = String.format("%d vs %d, fatal error, Write cluster meta failed!", - dbNames.size(), dbIds.size() + 1); - // ensure we have removed InfoSchemaDb id - Preconditions.checkState(dbNames.size() == dbIds.size() + 1, errMsg); - - out.writeInt(dbCount); - for (long id : dbIds) { - out.writeLong(id); - } - - out.writeInt(linkDbNames.size()); - for (Map.Entry infoMap : linkDbNames.entrySet()) { - Text.writeString(out, infoMap.getKey()); - infoMap.getValue().write(out); - } - - out.writeInt(linkDbIds.size()); - for (Map.Entry infoMap : linkDbIds.entrySet()) { - out.writeLong(infoMap.getKey()); - infoMap.getValue().write(out); - } + throw new IOException("Cluster should not be persisted anymore"); } - public void readFields(DataInput in) throws IOException { + private void readFields(DataInput in) throws IOException { id = in.readLong(); name = Text.readString(in); Long len = in.readLong(); @@ -254,20 +82,9 @@ public class Cluster implements Writable { dbIds.add(in.readLong()); } - count = in.readInt(); - while (count-- > 0) { - final String key = Text.readString(in); - final LinkDbInfo value = new LinkDbInfo(); - value.readFields(in); - linkDbNames.put(key, value); - } - - count = in.readInt(); - while (count-- > 0) { - final long key = in.readLong(); - final LinkDbInfo value = new LinkDbInfo(); - value.readFields(in); - linkDbIds.put(key, value); - } + // for linkDbNames + in.readInt(); + // for linkDbIds + in.readInt(); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterClusterType.java b/fe/fe-core/src/main/java/org/apache/doris/cluster/DummyCluster.java similarity index 78% rename from fe/fe-core/src/main/java/org/apache/doris/analysis/AlterClusterType.java rename to fe/fe-core/src/main/java/org/apache/doris/cluster/DummyCluster.java index c1af4c5b1d..f302b2965f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterClusterType.java +++ b/fe/fe-core/src/main/java/org/apache/doris/cluster/DummyCluster.java @@ -15,15 +15,11 @@ // specific language governing permissions and limitations // under the License. -package org.apache.doris.analysis; +package org.apache.doris.cluster; -public enum AlterClusterType { - - ALTER_CLUSTER_PROPERTIES("delete_whiteList"); - - private String type; - - private AlterClusterType(String type) { - this.type = type; - } +/** + * This class is only use to replace the default_cluster. + * It should be removed after the default_cluster is removed. + */ +public class DummyCluster { } diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/BackendsProcDir.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/BackendsProcDir.java index 252b757724..8d0c11c74b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/BackendsProcDir.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/BackendsProcDir.java @@ -19,7 +19,6 @@ package org.apache.doris.common.proc; import org.apache.doris.alter.DecommissionType; import org.apache.doris.catalog.Env; -import org.apache.doris.cluster.Cluster; import org.apache.doris.common.AnalysisException; import org.apache.doris.common.Pair; import org.apache.doris.common.util.DebugUtil; @@ -70,7 +69,7 @@ public class BackendsProcDir implements ProcDirInterface { BaseProcResult result = new BaseProcResult(); result.setNames(TITLE_NAMES); - final List> backendInfos = getClusterBackendInfos(null); + final List> backendInfos = getBackendInfos(); for (List backendInfo : backendInfos) { List oneInfo = new ArrayList<>(backendInfo.size()); oneInfo.addAll(backendInfo); @@ -80,26 +79,16 @@ public class BackendsProcDir implements ProcDirInterface { } /** - * get backends of cluster - * @param clusterName + * get backends info + * * @return */ - public static List> getClusterBackendInfos(String clusterName) { + public static List> getBackendInfos() { final SystemInfoService clusterInfoService = Env.getCurrentSystemInfo(); List> backendInfos = new LinkedList<>(); - List backendIds; - if (!Strings.isNullOrEmpty(clusterName)) { - final Cluster cluster = Env.getCurrentEnv().getCluster(clusterName); - // root not in any cluster - if (null == cluster) { - return backendInfos; - } - backendIds = cluster.getBackendIdList(); - } else { - backendIds = clusterInfoService.getBackendIds(false); - if (backendIds == null) { - return backendInfos; - } + List backendIds = clusterInfoService.getBackendIds(false); + if (backendIds == null) { + return backendInfos; } long start = System.currentTimeMillis(); @@ -118,17 +107,15 @@ public class BackendsProcDir implements ProcDirInterface { backendInfo.add(String.valueOf(backendId)); backendInfo.add(backend.getOwnerClusterName()); backendInfo.add(backend.getIp()); - if (Strings.isNullOrEmpty(clusterName)) { - if (backend.getHostName() != null) { - backendInfo.add(backend.getHostName()); - } else { - backendInfo.add(NetUtils.getHostnameByIp(backend.getIp())); - } - backendInfo.add(String.valueOf(backend.getHeartbeatPort())); - backendInfo.add(String.valueOf(backend.getBePort())); - backendInfo.add(String.valueOf(backend.getHttpPort())); - backendInfo.add(String.valueOf(backend.getBrpcPort())); + if (backend.getHostName() != null) { + backendInfo.add(backend.getHostName()); + } else { + backendInfo.add(NetUtils.getHostnameByIp(backend.getIp())); } + backendInfo.add(String.valueOf(backend.getHeartbeatPort())); + backendInfo.add(String.valueOf(backend.getBePort())); + backendInfo.add(String.valueOf(backend.getHttpPort())); + backendInfo.add(String.valueOf(backend.getBrpcPort())); backendInfo.add(TimeUtils.longToTimeString(backend.getLastStartTime())); backendInfo.add(TimeUtils.longToTimeString(backend.getLastUpdateMs())); backendInfo.add(String.valueOf(backend.isAlive())); diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogMgr.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogMgr.java index e1e31b68c1..40eea74fd6 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogMgr.java @@ -93,7 +93,7 @@ public class CatalogMgr implements Writable, GsonPostProcessable { private final Map lastDBOfCatalog = Maps.newConcurrentMap(); // Use a separate instance to facilitate access. - // internalDataSource still exists in idToDataSource and nameToDataSource + // internalDataSource still exists in idToCatalog and nameToCatalog private InternalCatalog internalCatalog; public CatalogMgr() { diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/HMSExternalCatalog.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/HMSExternalCatalog.java index 6a359f3398..f5b039a7cf 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/HMSExternalCatalog.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/HMSExternalCatalog.java @@ -56,7 +56,7 @@ public class HMSExternalCatalog extends ExternalCatalog { protected PooledHiveMetaStoreClient client; // Record the latest synced event id when processing hive events // Must set to -1 otherwise client.getNextNotification will throw exception - // Reference to https://github.com/apache/doris/issues/18251 + // Reference to https://github.com/apDdlache/doris/issues/18251 private long lastSyncedEventId = -1L; public static final String ENABLE_SELF_SPLITTER = "enable.self.splitter"; public static final String FILE_META_CACHE_TTL_SECOND = "file.meta.cache.ttl-second"; diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java index c317ade75f..270f928bdc 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java @@ -17,28 +17,21 @@ package org.apache.doris.datasource; -import org.apache.doris.alter.DecommissionType; import org.apache.doris.analysis.AddPartitionClause; import org.apache.doris.analysis.AddRollupClause; import org.apache.doris.analysis.AlterClause; -import org.apache.doris.analysis.AlterClusterStmt; import org.apache.doris.analysis.AlterDatabaseQuotaStmt; import org.apache.doris.analysis.AlterDatabaseQuotaStmt.QuotaType; import org.apache.doris.analysis.AlterDatabaseRename; -import org.apache.doris.analysis.AlterSystemStmt; import org.apache.doris.analysis.Analyzer; import org.apache.doris.analysis.ColumnDef; import org.apache.doris.analysis.ColumnDef.DefaultValue; -import org.apache.doris.analysis.CreateClusterStmt; import org.apache.doris.analysis.CreateDbStmt; import org.apache.doris.analysis.CreateTableAsSelectStmt; import org.apache.doris.analysis.CreateTableLikeStmt; import org.apache.doris.analysis.CreateTableStmt; -import org.apache.doris.analysis.CreateUserStmt; import org.apache.doris.analysis.DataSortInfo; -import org.apache.doris.analysis.DecommissionBackendClause; import org.apache.doris.analysis.DistributionDesc; -import org.apache.doris.analysis.DropClusterStmt; import org.apache.doris.analysis.DropDbStmt; import org.apache.doris.analysis.DropPartitionClause; import org.apache.doris.analysis.DropTableStmt; @@ -46,10 +39,7 @@ import org.apache.doris.analysis.Expr; import org.apache.doris.analysis.FunctionCallExpr; import org.apache.doris.analysis.HashDistributionDesc; import org.apache.doris.analysis.KeysDesc; -import org.apache.doris.analysis.LinkDbStmt; -import org.apache.doris.analysis.MigrateDbStmt; import org.apache.doris.analysis.PartitionDesc; -import org.apache.doris.analysis.PassVar; import org.apache.doris.analysis.QueryStmt; import org.apache.doris.analysis.RecoverDbStmt; import org.apache.doris.analysis.RecoverPartitionStmt; @@ -59,8 +49,6 @@ import org.apache.doris.analysis.TableName; import org.apache.doris.analysis.TableRef; import org.apache.doris.analysis.TruncateTableStmt; import org.apache.doris.analysis.TypeDef; -import org.apache.doris.analysis.UserDesc; -import org.apache.doris.analysis.UserIdentity; import org.apache.doris.catalog.BrokerTable; import org.apache.doris.catalog.ColocateGroupSchema; import org.apache.doris.catalog.ColocateTableIndex; @@ -114,7 +102,6 @@ import org.apache.doris.catalog.TabletMeta; import org.apache.doris.catalog.Type; import org.apache.doris.catalog.View; import org.apache.doris.clone.DynamicPartitionScheduler; -import org.apache.doris.cluster.BaseParam; import org.apache.doris.cluster.Cluster; import org.apache.doris.cluster.ClusterNamespace; import org.apache.doris.common.AnalysisException; @@ -147,14 +134,10 @@ import org.apache.doris.external.iceberg.IcebergCatalogMgr; import org.apache.doris.external.iceberg.IcebergTableCreationRecordMgr; import org.apache.doris.mtmv.MTMVJobFactory; import org.apache.doris.mtmv.metadata.MTMVJob; -import org.apache.doris.mysql.privilege.Auth; -import org.apache.doris.persist.BackendIdsUpdateInfo; -import org.apache.doris.persist.ClusterInfo; import org.apache.doris.persist.ColocatePersistInfo; import org.apache.doris.persist.DatabaseInfo; import org.apache.doris.persist.DropDbInfo; import org.apache.doris.persist.DropInfo; -import org.apache.doris.persist.DropLinkDbAndUpdateDbInfo; import org.apache.doris.persist.DropPartitionInfo; import org.apache.doris.persist.PartitionPersistInfo; import org.apache.doris.persist.RecoverInfo; @@ -162,8 +145,6 @@ import org.apache.doris.persist.ReplicaPersistInfo; import org.apache.doris.persist.TruncateTableInfo; import org.apache.doris.qe.ConnectContext; import org.apache.doris.resource.Tag; -import org.apache.doris.system.Backend; -import org.apache.doris.system.Backend.BackendState; import org.apache.doris.system.SystemInfoService; import org.apache.doris.task.AgentBatchTask; import org.apache.doris.task.AgentTaskExecutor; @@ -221,14 +202,20 @@ public class InternalCatalog implements CatalogIf { private ConcurrentHashMap idToDb = new ConcurrentHashMap<>(); private ConcurrentHashMap fullNameToDb = new ConcurrentHashMap<>(); - private ConcurrentHashMap idToCluster = new ConcurrentHashMap<>(); - private ConcurrentHashMap nameToCluster = new ConcurrentHashMap<>(); - @Getter private EsRepository esRepository = new EsRepository(); @Getter private IcebergTableCreationRecordMgr icebergTableCreationRecordMgr = new IcebergTableCreationRecordMgr(); + public InternalCatalog() { + // create info schema db + final InfoSchemaDb db = new InfoSchemaDb(SystemInfoService.DEFAULT_CLUSTER); + db.setClusterName(SystemInfoService.DEFAULT_CLUSTER); + // do not call unprotectedCreateDb, because it will cause loop recursive when initializing Env singleton + idToDb.put(db.getId(), db); + fullNameToDb.put(db.getFullName(), db); + } + @Override public long getId() { return INTERNAL_CATALOG_ID; @@ -406,13 +393,12 @@ public class InternalCatalog implements CatalogIf { * @throws DdlException */ public void createDb(CreateDbStmt stmt) throws DdlException { - final String clusterName = stmt.getClusterName(); String fullDbName = stmt.getFullDbName(); Map properties = stmt.getProperties(); long id = Env.getCurrentEnv().getNextId(); Database db = new Database(id, fullDbName); - db.setClusterName(clusterName); + db.setClusterName(SystemInfoService.DEFAULT_CLUSTER); // check and analyze database properties before create database db.setDbProperties(new DatabaseProperty(properties).checkAndBuildProperties()); @@ -420,9 +406,6 @@ public class InternalCatalog implements CatalogIf { throw new DdlException("Failed to acquire catalog lock. Try again"); } try { - if (!nameToCluster.containsKey(clusterName)) { - ErrorReport.reportDdlException(ErrorCode.ERR_CLUSTER_NO_SELECT_CLUSTER, clusterName); - } if (fullNameToDb.containsKey(fullDbName)) { if (stmt.isSetIfNotExists()) { LOG.info("create database[{}] which already exists", fullDbName); @@ -453,17 +436,9 @@ public class InternalCatalog implements CatalogIf { public void unprotectCreateDb(Database db) { idToDb.put(db.getId(), db); fullNameToDb.put(db.getFullName(), db); - final Cluster cluster = nameToCluster.get(db.getClusterName()); - cluster.addDb(db.getFullName(), db.getId()); Env.getCurrentGlobalTransactionMgr().addDatabaseTransactionMgr(db.getId()); } - // for test - public void addCluster(Cluster cluster) { - nameToCluster.put(cluster.getName(), cluster); - idToCluster.put(cluster.getId(), cluster); - } - /** * replayCreateDb. * @@ -512,38 +487,6 @@ public class InternalCatalog implements CatalogIf { + " please use \"DROP database FORCE\"."); } } - if (db.getDbState() == DbState.LINK && dbName.equals(db.getAttachDb())) { - // We try to drop a hard link. - final DropLinkDbAndUpdateDbInfo info = new DropLinkDbAndUpdateDbInfo(); - fullNameToDb.remove(db.getAttachDb()); - db.setDbState(DbState.NORMAL); - info.setUpdateDbState(DbState.NORMAL); - final Cluster cluster = nameToCluster.get( - ClusterNamespace.getClusterNameFromFullName(db.getAttachDb())); - final BaseParam param = new BaseParam(); - param.addStringParam(db.getAttachDb()); - param.addLongParam(db.getId()); - cluster.removeLinkDb(param); - info.setDropDbCluster(cluster.getName()); - info.setDropDbId(db.getId()); - info.setDropDbName(db.getAttachDb()); - Env.getCurrentEnv().getEditLog().logDropLinkDb(info); - return; - } - - if (db.getDbState() == DbState.LINK && dbName.equals(db.getFullName())) { - // We try to drop a db which other dbs attach to it, - // which is not allowed. - ErrorReport.reportDdlException(ErrorCode.ERR_CLUSTER_DB_STATE_LINK_OR_MIGRATE, - ClusterNamespace.getNameFromFullName(dbName)); - return; - } - - if (dbName.equals(db.getAttachDb()) && db.getDbState() == DbState.MOVE) { - ErrorReport.reportDdlException(ErrorCode.ERR_CLUSTER_DB_STATE_LINK_OR_MIGRATE, - ClusterNamespace.getNameFromFullName(dbName)); - return; - } // save table names for recycling Set tableNames = db.getTableNamesWithLock(); @@ -586,8 +529,6 @@ public class InternalCatalog implements CatalogIf { // 3. remove db from catalog idToDb.remove(db.getId()); fullNameToDb.remove(db.getFullName()); - final Cluster cluster = nameToCluster.get(db.getClusterName()); - cluster.removeDb(dbName, db.getId()); DropDbInfo info = new DropDbInfo(dbName, stmt.isForceDrop(), recycleTime); Env.getCurrentEnv().getEditLog().logDropDb(info); } finally { @@ -608,21 +549,6 @@ public class InternalCatalog implements CatalogIf { db.markDropped(); } - public void replayDropLinkDb(DropLinkDbAndUpdateDbInfo info) { - tryLock(true); - try { - final Database db = this.fullNameToDb.remove(info.getDropDbName()); - db.setDbState(info.getUpdateDbState()); - final Cluster cluster = nameToCluster.get(info.getDropDbCluster()); - final BaseParam param = new BaseParam(); - param.addStringParam(db.getAttachDb()); - param.addLongParam(db.getId()); - cluster.removeLinkDb(param); - } finally { - unlock(); - } - } - public void replayDropDb(String dbName, boolean isForceDrop, Long recycleTime) throws DdlException { tryLock(true); try { @@ -652,8 +578,6 @@ public class InternalCatalog implements CatalogIf { fullNameToDb.remove(dbName); idToDb.remove(db.getId()); - final Cluster cluster = nameToCluster.get(db.getClusterName()); - cluster.removeDb(dbName, db.getId()); } finally { unlock(); } @@ -705,9 +629,6 @@ public class InternalCatalog implements CatalogIf { } fullNameToDb.put(db.getFullName(), db); idToDb.put(db.getId(), db); - final Cluster cluster = nameToCluster.get(db.getClusterName()); - cluster.addDb(db.getFullName(), db.getId()); - // log RecoverInfo recoverInfo = new RecoverInfo(db.getId(), -1L, -1L, newDbName, "", ""); Env.getCurrentEnv().getEditLog().logRecoverDb(recoverInfo); @@ -830,22 +751,16 @@ public class InternalCatalog implements CatalogIf { public void renameDatabase(AlterDatabaseRename stmt) throws DdlException { String fullDbName = stmt.getDbName(); String newFullDbName = stmt.getNewDbName(); - String clusterName = stmt.getClusterName(); if (fullDbName.equals(newFullDbName)) { throw new DdlException("Same database name"); } Database db = null; - Cluster cluster = null; if (!tryLock(false)) { throw new DdlException("Failed to acquire catalog lock. Try again"); } try { - cluster = nameToCluster.get(clusterName); - if (cluster == null) { - ErrorReport.reportDdlException(ErrorCode.ERR_CLUSTER_NO_EXISTS, clusterName); - } // check if db exists db = fullNameToDb.get(fullDbName); if (db == null) { @@ -859,9 +774,6 @@ public class InternalCatalog implements CatalogIf { if (fullNameToDb.get(newFullDbName) != null) { throw new DdlException("Database name[" + newFullDbName + "] is already used"); } - - cluster.removeDb(db.getFullName(), db.getId()); - cluster.addDb(newFullDbName, db.getId()); // 1. rename db db.setNameWithLock(newFullDbName); @@ -882,10 +794,7 @@ public class InternalCatalog implements CatalogIf { tryLock(true); try { Database db = fullNameToDb.get(dbName); - Cluster cluster = nameToCluster.get(db.getClusterName()); - cluster.removeDb(db.getFullName(), db.getId()); db.setName(newDbName); - cluster.addDb(newDbName, db.getId()); fullNameToDb.remove(dbName); fullNameToDb.put(newDbName, db); } finally { @@ -2874,661 +2783,26 @@ public class InternalCatalog implements CatalogIf { } } - /** - * create cluster - * - * @param stmt - * @throws DdlException - */ - public void createCluster(CreateClusterStmt stmt) throws DdlException { - final String clusterName = stmt.getClusterName(); - if (!tryLock(false)) { - throw new DdlException("Failed to acquire catalog lock. Try again"); - } - try { - if (nameToCluster.containsKey(clusterName)) { - ErrorReport.reportDdlException(ErrorCode.ERR_CLUSTER_HAS_EXIST, clusterName); - } else { - List backendList = Env.getCurrentSystemInfo().createCluster(clusterName, stmt.getInstanceNum()); - // 1: BE returned is less than requested, throws DdlException. - // 2: BE returned is more than or equal to 0, succeeds. - if (backendList != null || stmt.getInstanceNum() == 0) { - final long id = Env.getCurrentEnv().getNextId(); - final Cluster cluster = new Cluster(clusterName, id); - cluster.setBackendIdList(backendList); - unprotectCreateCluster(cluster); - if (clusterName.equals(SystemInfoService.DEFAULT_CLUSTER)) { - for (Database db : idToDb.values()) { - if (db.getClusterName().equals(SystemInfoService.DEFAULT_CLUSTER)) { - cluster.addDb(db.getFullName(), db.getId()); - } - } - } - Env.getCurrentEnv().getEditLog().logCreateCluster(cluster); - LOG.info("finish to create cluster: {}", clusterName); - } else { - ErrorReport.reportDdlException(ErrorCode.ERR_CLUSTER_BE_NOT_ENOUGH); - } - } - } finally { - unlock(); - } - - // create super user for this cluster - UserIdentity adminUser = new UserIdentity(Auth.ADMIN_USER, "%"); - try { - adminUser.analyze(stmt.getClusterName()); - } catch (AnalysisException e) { - LOG.error("should not happen", e); - } - Env.getCurrentEnv().getAuth().createUser(new CreateUserStmt(new UserDesc(adminUser, new PassVar("", true)))); - } - - private void unprotectCreateCluster(Cluster cluster) { - for (Long id : cluster.getBackendIdList()) { - final Backend backend = Env.getCurrentSystemInfo().getBackend(id); - backend.setOwnerClusterName(cluster.getName()); - backend.setBackendState(BackendState.using); - } - - idToCluster.put(cluster.getId(), cluster); - nameToCluster.put(cluster.getName(), cluster); - - // create info schema db - final InfoSchemaDb infoDb = new InfoSchemaDb(cluster.getName()); - infoDb.setClusterName(cluster.getName()); - unprotectCreateDb(infoDb); - - // only need to create default cluster once. - if (cluster.getName().equalsIgnoreCase(SystemInfoService.DEFAULT_CLUSTER)) { - Env.getCurrentEnv().setDefaultClusterCreated(true); - } - } - - /** - * replay create cluster - * - * @param cluster - */ - public void replayCreateCluster(Cluster cluster) { - tryLock(true); - try { - unprotectCreateCluster(cluster); - } finally { - unlock(); - } - } - - /** - * drop cluster and cluster's db must be have deleted - * - * @param stmt - * @throws DdlException - */ - public void dropCluster(DropClusterStmt stmt) throws DdlException { - if (!tryLock(false)) { - throw new DdlException("Failed to acquire catalog lock. Try again"); - } - try { - final String clusterName = stmt.getClusterName(); - final Cluster cluster = nameToCluster.get(clusterName); - if (cluster == null) { - ErrorReport.reportDdlException(ErrorCode.ERR_CLUSTER_NO_EXISTS, clusterName); - } - final List backends = Env.getCurrentSystemInfo().getClusterBackends(clusterName); - for (Backend backend : backends) { - if (backend.isDecommissioned()) { - ErrorReport.reportDdlException(ErrorCode.ERR_CLUSTER_ALTER_BE_IN_DECOMMISSION, clusterName); - } - } - - // check if there still have databases undropped, except for information_schema db - if (cluster.getDbNames().size() > 1) { - ErrorReport.reportDdlException(ErrorCode.ERR_CLUSTER_DELETE_DB_EXIST, clusterName); - } - - Env.getCurrentSystemInfo().releaseBackends(clusterName, false /* is not replay */); - final ClusterInfo info = new ClusterInfo(clusterName, cluster.getId()); - unprotectDropCluster(info, false /* is not replay */); - Env.getCurrentEnv().getEditLog().logDropCluster(info); - } finally { - unlock(); - } - - // drop user of this cluster - // set is replay to true, not write log - Env.getCurrentEnv().getAuth().dropUserOfCluster(stmt.getClusterName(), true /* is replay */); - } - - private void unprotectDropCluster(ClusterInfo info, boolean isReplay) { - Env.getCurrentSystemInfo().releaseBackends(info.getClusterName(), isReplay); - idToCluster.remove(info.getClusterId()); - nameToCluster.remove(info.getClusterName()); - final Database infoSchemaDb = fullNameToDb.get(InfoSchemaDb.getFullInfoSchemaDbName(info.getClusterName())); - fullNameToDb.remove(infoSchemaDb.getFullName()); - idToDb.remove(infoSchemaDb.getId()); - } - - public void replayDropCluster(ClusterInfo info) throws DdlException { - tryLock(true); - try { - unprotectDropCluster(info, true/* is replay */); - } finally { - unlock(); - } - - Env.getCurrentEnv().getAuth().dropUserOfCluster(info.getClusterName(), true /* is replay */); - } - - public void replayExpandCluster(ClusterInfo info) { - tryLock(true); - try { - final Cluster cluster = nameToCluster.get(info.getClusterName()); - cluster.addBackends(info.getBackendIdList()); - - for (Long beId : info.getBackendIdList()) { - Backend be = Env.getCurrentSystemInfo().getBackend(beId); - if (be == null) { - continue; - } - be.setOwnerClusterName(info.getClusterName()); - be.setBackendState(BackendState.using); - } - } finally { - unlock(); - } - } - - /** - * modify cluster: Expansion or shrink - * - * @param stmt - * @throws DdlException - */ - public void processModifyCluster(AlterClusterStmt stmt) throws UserException { - final String clusterName = stmt.getAlterClusterName(); - final int newInstanceNum = stmt.getInstanceNum(); - if (!tryLock(false)) { - throw new DdlException("Failed to acquire catalog lock. Try again"); - } - try { - Cluster cluster = nameToCluster.get(clusterName); - if (cluster == null) { - ErrorReport.reportDdlException(ErrorCode.ERR_CLUSTER_NO_EXISTS, clusterName); - } - - // check if this cluster has backend in decommission - final List backendIdsInCluster = cluster.getBackendIdList(); - for (Long beId : backendIdsInCluster) { - Backend be = Env.getCurrentSystemInfo().getBackend(beId); - if (be.isDecommissioned()) { - ErrorReport.reportDdlException(ErrorCode.ERR_CLUSTER_ALTER_BE_IN_DECOMMISSION, clusterName); - } - } - - final int oldInstanceNum = backendIdsInCluster.size(); - if (newInstanceNum > oldInstanceNum) { - // expansion - final List expandBackendIds = Env.getCurrentSystemInfo() - .calculateExpansionBackends(clusterName, newInstanceNum - oldInstanceNum); - if (expandBackendIds == null) { - ErrorReport.reportDdlException(ErrorCode.ERR_CLUSTER_BE_NOT_ENOUGH); - } - cluster.addBackends(expandBackendIds); - final ClusterInfo info = new ClusterInfo(clusterName, cluster.getId(), expandBackendIds); - Env.getCurrentEnv().getEditLog().logExpandCluster(info); - } else if (newInstanceNum < oldInstanceNum) { - // shrink - final List decomBackendIds = Env.getCurrentSystemInfo() - .calculateDecommissionBackends(clusterName, oldInstanceNum - newInstanceNum); - if (decomBackendIds == null || decomBackendIds.size() == 0) { - ErrorReport.reportDdlException(ErrorCode.ERR_CLUSTER_BACKEND_ERROR); - } - - List hostPortList = Lists.newArrayList(); - for (Long id : decomBackendIds) { - final Backend backend = Env.getCurrentSystemInfo().getBackend(id); - hostPortList.add( - new StringBuilder().append(backend.getIp()).append(":").append(backend.getHeartbeatPort()) - .toString()); - } - - // here we reuse the process of decommission backends. but set backend's decommission type to - // ClusterDecommission, which means this backend will not be removed from the system - // after decommission is done. - final DecommissionBackendClause clause = new DecommissionBackendClause(hostPortList); - try { - clause.analyze(null); - clause.setType(DecommissionType.ClusterDecommission); - AlterSystemStmt alterStmt = new AlterSystemStmt(clause); - alterStmt.setClusterName(clusterName); - Env.getCurrentEnv().getAlterInstance().processAlterCluster(alterStmt); - } catch (AnalysisException e) { - Preconditions.checkState(false, "should not happened: " + e.getMessage()); - } - } else { - ErrorReport.reportDdlException(ErrorCode.ERR_CLUSTER_ALTER_BE_NO_CHANGE, newInstanceNum); - } - - } finally { - unlock(); - } - } - - /** - * @param ctx - * @param clusterName - * @throws DdlException - */ - public void changeCluster(ConnectContext ctx, String clusterName) throws DdlException { - if (!Env.getCurrentEnv().getAuth().checkCanEnterCluster(ConnectContext.get(), clusterName)) { - ErrorReport.reportDdlException(ErrorCode.ERR_CLUSTER_NO_AUTHORITY, ConnectContext.get().getQualifiedUser(), - "enter"); - } - - if (!nameToCluster.containsKey(clusterName)) { - ErrorReport.reportDdlException(ErrorCode.ERR_CLUSTER_NO_EXISTS, clusterName); - } - - ctx.setCluster(clusterName); - } - - /** - * migrate db to link dest cluster - * - * @param stmt - * @throws DdlException - */ - public void migrateDb(MigrateDbStmt stmt) throws DdlException { - final String srcClusterName = stmt.getSrcCluster(); - final String destClusterName = stmt.getDestCluster(); - final String srcDbName = stmt.getSrcDb(); - final String destDbName = stmt.getDestDb(); - - if (!tryLock(false)) { - throw new DdlException("Failed to acquire catalog lock. Try again"); - } - try { - if (!nameToCluster.containsKey(srcClusterName)) { - ErrorReport.reportDdlException(ErrorCode.ERR_CLUSTER_SRC_CLUSTER_NOT_EXIST, srcClusterName); - } - if (!nameToCluster.containsKey(destClusterName)) { - ErrorReport.reportDdlException(ErrorCode.ERR_CLUSTER_DEST_CLUSTER_NOT_EXIST, destClusterName); - } - - if (srcClusterName.equals(destClusterName)) { - ErrorReport.reportDdlException(ErrorCode.ERR_CLUSTER_MIGRATE_SAME_CLUSTER); - } - - final Cluster srcCluster = this.nameToCluster.get(srcClusterName); - if (!srcCluster.containDb(srcDbName)) { - ErrorReport.reportDdlException(ErrorCode.ERR_CLUSTER_SRC_DB_NOT_EXIST, srcDbName); - } - final Cluster destCluster = this.nameToCluster.get(destClusterName); - if (!destCluster.containLink(destDbName, srcDbName)) { - ErrorReport.reportDdlException(ErrorCode.ERR_CLUSTER_MIGRATION_NO_LINK, srcDbName, destDbName); - } - - final Database db = fullNameToDb.get(srcDbName); - - // if the max replication num of the src db is larger then the backends num of the dest cluster, - // the migration will not be processed. - final int maxReplicationNum = db.getMaxReplicationNum(); - if (maxReplicationNum > destCluster.getBackendIdList().size()) { - ErrorReport.reportDdlException(ErrorCode.ERR_CLUSTER_MIGRATE_BE_NOT_ENOUGH, destClusterName); - } - - if (db.getDbState() == DbState.LINK) { - final BaseParam param = new BaseParam(); - param.addStringParam(destDbName); - param.addLongParam(db.getId()); - param.addStringParam(srcDbName); - param.addStringParam(destClusterName); - param.addStringParam(srcClusterName); - fullNameToDb.remove(db.getFullName()); - srcCluster.removeDb(db.getFullName(), db.getId()); - destCluster.removeLinkDb(param); - destCluster.addDb(destDbName, db.getId()); - db.writeLock(); - try { - db.setDbState(DbState.MOVE); - // set cluster to the dest cluster. - // and Clone process will do the migration things. - db.setClusterName(destClusterName); - db.setName(destDbName); - db.setAttachDb(srcDbName); - } finally { - db.writeUnlock(); - } - Env.getCurrentEnv().getEditLog().logMigrateCluster(param); - } else { - ErrorReport.reportDdlException(ErrorCode.ERR_CLUSTER_MIGRATION_NO_LINK, srcDbName, destDbName); - } - } finally { - unlock(); - } - } - - public void replayMigrateDb(BaseParam param) { - final String desDbName = param.getStringParam(); - final String srcDbName = param.getStringParam(1); - final String desClusterName = param.getStringParam(2); - final String srcClusterName = param.getStringParam(3); - tryLock(true); - try { - final Cluster desCluster = this.nameToCluster.get(desClusterName); - final Cluster srcCluster = this.nameToCluster.get(srcClusterName); - final Database db = fullNameToDb.get(srcDbName); - if (db.getDbState() == DbState.LINK) { - fullNameToDb.remove(db.getFullName()); - srcCluster.removeDb(db.getFullName(), db.getId()); - desCluster.removeLinkDb(param); - desCluster.addDb(param.getStringParam(), db.getId()); - - db.writeLock(); - db.setName(desDbName); - db.setAttachDb(srcDbName); - db.setDbState(DbState.MOVE); - db.setClusterName(desClusterName); - db.writeUnlock(); - } - } finally { - unlock(); - } - } - - public void replayLinkDb(BaseParam param) { - final String desClusterName = param.getStringParam(2); - final String srcDbName = param.getStringParam(1); - final String desDbName = param.getStringParam(); - - tryLock(true); - try { - final Cluster desCluster = this.nameToCluster.get(desClusterName); - final Database srcDb = fullNameToDb.get(srcDbName); - srcDb.writeLock(); - srcDb.setDbState(DbState.LINK); - srcDb.setAttachDb(desDbName); - srcDb.writeUnlock(); - desCluster.addLinkDb(param); - fullNameToDb.put(desDbName, srcDb); - } finally { - unlock(); - } - } - - /** - * link src db to dest db. we use java's quotation Mechanism to realize db hard links - * - * @param stmt - * @throws DdlException - */ - public void linkDb(LinkDbStmt stmt) throws DdlException { - final String srcClusterName = stmt.getSrcCluster(); - final String destClusterName = stmt.getDestCluster(); - final String srcDbName = stmt.getSrcDb(); - final String destDbName = stmt.getDestDb(); - - if (!tryLock(false)) { - throw new DdlException("Failed to acquire catalog lock. Try again"); - } - try { - if (!nameToCluster.containsKey(srcClusterName)) { - ErrorReport.reportDdlException(ErrorCode.ERR_CLUSTER_SRC_CLUSTER_NOT_EXIST, srcClusterName); - } - - if (!nameToCluster.containsKey(destClusterName)) { - ErrorReport.reportDdlException(ErrorCode.ERR_CLUSTER_DEST_CLUSTER_NOT_EXIST, destClusterName); - } - - if (srcClusterName.equals(destClusterName)) { - ErrorReport.reportDdlException(ErrorCode.ERR_CLUSTER_MIGRATE_SAME_CLUSTER); - } - - if (fullNameToDb.containsKey(destDbName)) { - ErrorReport.reportDdlException(ErrorCode.ERR_DB_CREATE_EXISTS, destDbName); - } - - final Cluster srcCluster = this.nameToCluster.get(srcClusterName); - final Cluster destCluster = this.nameToCluster.get(destClusterName); - - if (!srcCluster.containDb(srcDbName)) { - ErrorReport.reportDdlException(ErrorCode.ERR_CLUSTER_SRC_DB_NOT_EXIST, srcDbName); - } - final Database srcDb = fullNameToDb.get(srcDbName); - - if (srcDb.getDbState() != DbState.NORMAL) { - ErrorReport.reportDdlException(ErrorCode.ERR_CLUSTER_DB_STATE_LINK_OR_MIGRATE, - ClusterNamespace.getNameFromFullName(srcDbName)); - } - - srcDb.writeLock(); - try { - srcDb.setDbState(DbState.LINK); - srcDb.setAttachDb(destDbName); - } finally { - srcDb.writeUnlock(); - } - - final long id = Env.getCurrentEnv().getNextId(); - final BaseParam param = new BaseParam(); - param.addStringParam(destDbName); - param.addStringParam(srcDbName); - param.addLongParam(id); - param.addLongParam(srcDb.getId()); - param.addStringParam(destClusterName); - param.addStringParam(srcClusterName); - destCluster.addLinkDb(param); - fullNameToDb.put(destDbName, srcDb); - Env.getCurrentEnv().getEditLog().logLinkCluster(param); - } finally { - unlock(); - } - } - - public Cluster getCluster(String clusterName) { - return nameToCluster.get(clusterName); - } - - public List getClusterNames() { - return new ArrayList(nameToCluster.keySet()); - } - - /** - * get migrate progress , when finish migration, next cloneCheck will reset dbState - * - * @return - */ - public Set getMigrations() { - final Set infos = Sets.newHashSet(); - for (Database db : fullNameToDb.values()) { - db.readLock(); - try { - if (db.getDbState() == DbState.MOVE) { - int tabletTotal = 0; - int tabletQuorum = 0; - final Set beIds = Sets.newHashSet( - Env.getCurrentSystemInfo().getClusterBackendIds(db.getClusterName())); - final Set tableNames = db.getTableNamesWithLock(); - for (String tableName : tableNames) { - - Table table = db.getTableNullable(tableName); - if (table == null || table.getType() != TableType.OLAP) { - continue; - } - - OlapTable olapTable = (OlapTable) table; - olapTable.readLock(); - try { - for (Partition partition : olapTable.getPartitions()) { - ReplicaAllocation replicaAlloc = olapTable.getPartitionInfo() - .getReplicaAllocation(partition.getId()); - short totalReplicaNum = replicaAlloc.getTotalReplicaNum(); - for (MaterializedIndex materializedIndex : partition.getMaterializedIndices( - IndexExtState.ALL)) { - if (materializedIndex.getState() != IndexState.NORMAL) { - continue; - } - for (Tablet tablet : materializedIndex.getTablets()) { - int replicaNum = 0; - int quorum = totalReplicaNum / 2 + 1; - for (Replica replica : tablet.getReplicas()) { - if (replica.getState() != ReplicaState.CLONE && beIds.contains( - replica.getBackendId())) { - replicaNum++; - } - } - if (replicaNum > quorum) { - replicaNum = quorum; - } - - tabletQuorum = tabletQuorum + replicaNum; - tabletTotal = tabletTotal + quorum; - } - } - } - } finally { - olapTable.readUnlock(); - } - } - final BaseParam info = new BaseParam(); - info.addStringParam(db.getClusterName()); - info.addStringParam(db.getAttachDb()); - info.addStringParam(db.getFullName()); - final float percentage = tabletTotal > 0 ? (float) tabletQuorum / (float) tabletTotal : 0f; - info.addFloatParam(percentage); - infos.add(info); - } - } finally { - db.readUnlock(); - } - } - - return infos; - } - public long loadCluster(DataInputStream dis, long checksum) throws IOException, DdlException { int clusterCount = dis.readInt(); checksum ^= clusterCount; - for (long i = 0; i < clusterCount; ++i) { - final Cluster cluster = Cluster.read(dis); - checksum ^= cluster.getId(); - - List latestBackendIds = Env.getCurrentSystemInfo().getClusterBackendIds(cluster.getName()); - if (latestBackendIds.size() != cluster.getBackendIdList().size()) { - LOG.warn( - "Cluster:" + cluster.getName() + ", backends in Cluster is " + cluster.getBackendIdList().size() - + ", backends in SystemInfoService is " + cluster.getBackendIdList().size()); - } - // The number of BE in cluster is not same as in SystemInfoService, when perform 'ALTER - // SYSTEM ADD BACKEND TO ...' or 'ALTER SYSTEM ADD BACKEND ...', because both of them are - // for adding BE to some Cluster, but loadCluster is after loadBackend. - cluster.setBackendIdList(latestBackendIds); - - String dbName = InfoSchemaDb.getFullInfoSchemaDbName(cluster.getName()); - // Use real Catalog instance to avoid InfoSchemaDb id continuously increment - // when checkpoint thread load image. - InfoSchemaDb db = (InfoSchemaDb) Env.getServingEnv().getInternalCatalog().getDbNullable(dbName); - if (db == null) { - db = new InfoSchemaDb(cluster.getName()); - db.setClusterName(cluster.getName()); - } - String errMsg = "InfoSchemaDb id shouldn't larger than 10000, please restart your FE server"; - // Every time we construct the InfoSchemaDb, which id will increment. - // When InfoSchemaDb id larger than 10000 and put it to idToDb, - // which may be overwrite the normal db meta in idToDb, - // so we ensure InfoSchemaDb id less than 10000. - Preconditions.checkState(db.getId() < Env.NEXT_ID_INIT_VALUE, errMsg); - idToDb.put(db.getId(), db); - fullNameToDb.put(db.getFullName(), db); - cluster.addDb(dbName, db.getId()); - idToCluster.put(cluster.getId(), cluster); - nameToCluster.put(cluster.getName(), cluster); + Preconditions.checkState(clusterCount <= 1, clusterCount); + if (clusterCount == 1) { + // read the old cluster + Cluster oldCluster = Cluster.read(dis); + checksum ^= oldCluster.getId(); } - LOG.info("finished replay cluster from image"); + + InfoSchemaDb db = new InfoSchemaDb(SystemInfoService.DEFAULT_CLUSTER); + db.setClusterName(SystemInfoService.DEFAULT_CLUSTER); + idToDb.put(db.getId(), db); + fullNameToDb.put(db.getFullName(), db); return checksum; } - public void initDefaultCluster() { - final List backendList = Lists.newArrayList(); - final List defaultClusterBackends = Env.getCurrentSystemInfo() - .getClusterBackends(SystemInfoService.DEFAULT_CLUSTER); - for (Backend backend : defaultClusterBackends) { - backendList.add(backend.getId()); - } - - final long id = Env.getCurrentEnv().getNextId(); - final Cluster cluster = new Cluster(SystemInfoService.DEFAULT_CLUSTER, id); - - // make sure one host hold only one backend. - Set beHost = Sets.newHashSet(); - for (Backend be : defaultClusterBackends) { - if (beHost.contains(be.getIp())) { - // we can not handle this situation automatically. - LOG.error("found more than one backends in same host: {}", be.getIp()); - System.exit(-1); - } else { - beHost.add(be.getIp()); - } - } - - // we create default_cluster to meet the need for ease of use, because - // most users have no multi tenant needs. - cluster.setBackendIdList(backendList); - unprotectCreateCluster(cluster); - for (Database db : idToDb.values()) { - db.setClusterName(SystemInfoService.DEFAULT_CLUSTER); - cluster.addDb(db.getFullName(), db.getId()); - } - - // no matter default_cluster is created or not, - // mark isDefaultClusterCreated as true - Env.getCurrentEnv().setDefaultClusterCreated(true); - Env.getCurrentEnv().getEditLog().logCreateCluster(cluster); - } - - public void replayUpdateDb(DatabaseInfo info) { - final Database db = fullNameToDb.get(info.getDbName()); - db.setClusterName(info.getClusterName()); - db.setDbState(info.getDbState()); - } - - public long saveCluster(CountingDataOutputStream dos, long checksum) throws IOException { - final int clusterCount = idToCluster.size(); - checksum ^= clusterCount; - dos.writeInt(clusterCount); - for (Map.Entry entry : idToCluster.entrySet()) { - long clusterId = entry.getKey(); - if (clusterId >= Env.NEXT_ID_INIT_VALUE) { - checksum ^= clusterId; - final Cluster cluster = entry.getValue(); - cluster.write(dos); - } - } - return checksum; - } - - public void replayUpdateClusterAndBackends(BackendIdsUpdateInfo info) { - for (long id : info.getBackendList()) { - final Backend backend = Env.getCurrentSystemInfo().getBackend(id); - final Cluster cluster = nameToCluster.get(backend.getOwnerClusterName()); - cluster.removeBackend(id); - backend.setDecommissioned(false); - backend.clearClusterName(); - backend.setBackendState(BackendState.free); - } - } - - public List getClusterDbNames(String clusterName) throws AnalysisException { - final Cluster cluster = nameToCluster.get(clusterName); - if (cluster == null) { - throw new AnalysisException("No cluster selected"); - } - return Lists.newArrayList(cluster.getDbNames()); - } - public long saveDb(CountingDataOutputStream dos, long checksum) throws IOException { - int dbCount = idToDb.size() - nameToCluster.keySet().size(); + // 1 is for information_schema db, which does not need to be persisted. + int dbCount = idToDb.size() - 1; checksum ^= dbCount; dos.writeInt(dbCount); for (Map.Entry entry : idToDb.entrySet()) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/manager/NodeAction.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/manager/NodeAction.java index 421f63cba9..f417b3cfab 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/manager/NodeAction.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/manager/NodeAction.java @@ -615,7 +615,7 @@ public class NodeAction extends RestBaseController { } Map tagMap = PropertyAnalyzer.analyzeBackendTagsProperties(properties, Tag.DEFAULT_BACKEND_TAG); - currentSystemInfo.addBackends(hostInfos, false, "", tagMap); + currentSystemInfo.addBackends(hostInfos, tagMap); } else if ("DROP".equals(action)) { currentSystemInfo.dropBackends(hostInfos); } else if ("DECOMMISSION".equals(action)) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/restv2/MetaInfoActionV2.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/restv2/MetaInfoActionV2.java index 3dc5821d3c..53d6534fc2 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/restv2/MetaInfoActionV2.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/restv2/MetaInfoActionV2.java @@ -23,7 +23,6 @@ import org.apache.doris.catalog.Env; import org.apache.doris.catalog.OlapTable; import org.apache.doris.catalog.Table; import org.apache.doris.cluster.ClusterNamespace; -import org.apache.doris.common.AnalysisException; import org.apache.doris.common.FeConstants; import org.apache.doris.common.MetaNotFoundException; import org.apache.doris.common.Pair; @@ -91,12 +90,7 @@ public class MetaInfoActionV2 extends RestBaseController { } // 1. get all database with privilege - List dbNames = null; - try { - dbNames = Env.getCurrentInternalCatalog().getClusterDbNames(ns); - } catch (AnalysisException e) { - return ResponseEntityBuilder.okWithCommonError("namespace does not exist: " + ns); - } + List dbNames = Env.getCurrentInternalCatalog().getDbNames(); List dbNameSet = Lists.newArrayList(); for (String fullName : dbNames) { final String db = ClusterNamespace.getNameFromFullName(fullName); diff --git a/fe/fe-core/src/main/java/org/apache/doris/journal/JournalEntity.java b/fe/fe-core/src/main/java/org/apache/doris/journal/JournalEntity.java index 9cbcdfb2d5..1d8908e6c9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/journal/JournalEntity.java +++ b/fe/fe-core/src/main/java/org/apache/doris/journal/JournalEntity.java @@ -31,8 +31,6 @@ import org.apache.doris.catalog.EncryptKeySearchDesc; import org.apache.doris.catalog.Function; import org.apache.doris.catalog.FunctionSearchDesc; import org.apache.doris.catalog.Resource; -import org.apache.doris.cluster.BaseParam; -import org.apache.doris.cluster.Cluster; import org.apache.doris.common.io.Text; import org.apache.doris.common.io.Writable; import org.apache.doris.common.util.SmallFileMgr.SmallFile; @@ -66,7 +64,6 @@ import org.apache.doris.persist.AlterMultiMaterializedView; import org.apache.doris.persist.AlterRoutineLoadJobOperationLog; import org.apache.doris.persist.AlterUserOperationLog; import org.apache.doris.persist.AlterViewInfo; -import org.apache.doris.persist.BackendIdsUpdateInfo; import org.apache.doris.persist.BackendReplicasInfo; import org.apache.doris.persist.BackendTabletsInfo; import org.apache.doris.persist.BatchDropInfo; @@ -74,14 +71,12 @@ import org.apache.doris.persist.BatchModifyPartitionsInfo; import org.apache.doris.persist.BatchRemoveTransactionsOperation; import org.apache.doris.persist.BatchRemoveTransactionsOperationV2; import org.apache.doris.persist.CleanLabelOperationLog; -import org.apache.doris.persist.ClusterInfo; import org.apache.doris.persist.ColocatePersistInfo; import org.apache.doris.persist.ConsistencyCheckInfo; import org.apache.doris.persist.CreateTableInfo; import org.apache.doris.persist.DatabaseInfo; import org.apache.doris.persist.DropDbInfo; import org.apache.doris.persist.DropInfo; -import org.apache.doris.persist.DropLinkDbAndUpdateDbInfo; import org.apache.doris.persist.DropPartitionInfo; import org.apache.doris.persist.DropResourceGroupOperationLog; import org.apache.doris.persist.DropResourceOperationLog; @@ -394,37 +389,6 @@ public class JournalEntity implements Writable { isRead = true; break; } - case OperationType.OP_CREATE_CLUSTER: { - data = Cluster.read(in); - isRead = true; - break; - } - case OperationType.OP_DROP_CLUSTER: - case OperationType.OP_EXPAND_CLUSTER: { - data = new ClusterInfo(); - ((ClusterInfo) data).readFields(in); - isRead = true; - break; - } - case OperationType.OP_LINK_CLUSTER: - case OperationType.OP_MIGRATE_CLUSTER: { - data = new BaseParam(); - ((BaseParam) data).readFields(in); - isRead = true; - break; - } - case OperationType.OP_UPDATE_DB: { - data = new DatabaseInfo(); - ((DatabaseInfo) data).readFields(in); - isRead = true; - break; - } - case OperationType.OP_DROP_LINKDB: { - data = new DropLinkDbAndUpdateDbInfo(); - ((DropLinkDbAndUpdateDbInfo) data).readFields(in); - isRead = true; - break; - } case OperationType.OP_ADD_BROKER: case OperationType.OP_DROP_BROKER: { data = new BrokerMgr.ModifyBrokerInfo(); @@ -438,12 +402,6 @@ public class JournalEntity implements Writable { isRead = true; break; } - case OperationType.OP_UPDATE_CLUSTER_AND_BACKENDS: { - data = new BackendIdsUpdateInfo(); - ((BackendIdsUpdateInfo) data).readFields(in); - isRead = true; - break; - } case OperationType.OP_UPSERT_TRANSACTION_STATE: case OperationType.OP_DELETE_TRANSACTION_STATE: { data = new TransactionState(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/mysql/MysqlProto.java b/fe/fe-core/src/main/java/org/apache/doris/mysql/MysqlProto.java index 089bf3db35..8514f14f24 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/mysql/MysqlProto.java +++ b/fe/fe-core/src/main/java/org/apache/doris/mysql/MysqlProto.java @@ -76,42 +76,18 @@ public class MysqlProto { return null; } - // check cluster, user name may contains cluster name or cluster id. - // eg: - // user_name@cluster_name - String clusterName = ""; - String[] strList = tmpUser.split("@", 2); - if (strList.length > 1) { - tmpUser = strList[0]; - clusterName = strList[1]; - try { - // if cluster does not exist and it is not a valid cluster id, authenticate failed - if (Env.getCurrentEnv().getCluster(clusterName) == null - && Integer.valueOf(strList[1]) != context.getEnv().getClusterId()) { - ErrorReport.report(ErrorCode.ERR_UNKNOWN_CLUSTER_ID, strList[1]); - return null; - } - } catch (Throwable e) { - ErrorReport.report(ErrorCode.ERR_UNKNOWN_CLUSTER_ID, strList[1]); - return null; - } - } - if (Strings.isNullOrEmpty(clusterName)) { - clusterName = SystemInfoService.DEFAULT_CLUSTER; - } - context.setCluster(clusterName); + context.setCluster(SystemInfoService.DEFAULT_CLUSTER); // check resource group level. user name may contains resource group level. // eg: // ...@user_name#HIGH // set resource group if it is valid, or just ignore it - strList = tmpUser.split("#", 2); + String[] strList = tmpUser.split("#", 2); if (strList.length > 1) { tmpUser = strList[0]; } - LOG.debug("parse cluster: {}", clusterName); - String qualifiedUser = ClusterNamespace.getFullName(clusterName, tmpUser); + String qualifiedUser = ClusterNamespace.getFullName(SystemInfoService.DEFAULT_CLUSTER, tmpUser); context.setQualifiedUser(qualifiedUser); return qualifiedUser; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/persist/DropLinkDbAndUpdateDbInfo.java b/fe/fe-core/src/main/java/org/apache/doris/persist/DropLinkDbAndUpdateDbInfo.java deleted file mode 100644 index 807f9a9c40..0000000000 --- a/fe/fe-core/src/main/java/org/apache/doris/persist/DropLinkDbAndUpdateDbInfo.java +++ /dev/null @@ -1,81 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package org.apache.doris.persist; - -import org.apache.doris.catalog.Database.DbState; -import org.apache.doris.common.io.Text; -import org.apache.doris.common.io.Writable; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; - -public class DropLinkDbAndUpdateDbInfo implements Writable { - - private DbState state; - private LinkDbInfo linkDbInfo; - - public DropLinkDbAndUpdateDbInfo() { - state = DbState.NORMAL; - linkDbInfo = new LinkDbInfo(); - } - - public void setUpdateDbState(DbState dbState) { - this.state = dbState; - } - - public DbState getUpdateDbState() { - return state; - } - - public void setDropDbCluster(String cluster) { - this.linkDbInfo.setCluster(cluster); - } - - public String getDropDbCluster() { - return linkDbInfo.getCluster(); - } - - public void setDropDbName(String db) { - this.linkDbInfo.setName(db); - } - - public String getDropDbName() { - return linkDbInfo.getName(); - } - - public void setDropDbId(long id) { - this.linkDbInfo.setId(id); - } - - public long getDropDbId() { - return linkDbInfo.getId(); - } - - @Override - public void write(DataOutput out) throws IOException { - Text.writeString(out, state.toString()); - linkDbInfo.write(out); - } - - public void readFields(DataInput in) throws IOException { - state = DbState.valueOf(Text.readString(in)); - linkDbInfo.readFields(in); - } - -} diff --git a/fe/fe-core/src/main/java/org/apache/doris/persist/EditLog.java b/fe/fe-core/src/main/java/org/apache/doris/persist/EditLog.java index 094087f4fe..f491253ea8 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/persist/EditLog.java +++ b/fe/fe-core/src/main/java/org/apache/doris/persist/EditLog.java @@ -33,8 +33,6 @@ import org.apache.doris.catalog.Env; import org.apache.doris.catalog.Function; import org.apache.doris.catalog.FunctionSearchDesc; import org.apache.doris.catalog.Resource; -import org.apache.doris.cluster.BaseParam; -import org.apache.doris.cluster.Cluster; import org.apache.doris.common.Config; import org.apache.doris.common.FeConstants; import org.apache.doris.common.MetaNotFoundException; @@ -59,7 +57,6 @@ import org.apache.doris.load.DeleteHandler; import org.apache.doris.load.DeleteInfo; import org.apache.doris.load.ExportJob; import org.apache.doris.load.ExportMgr; -import org.apache.doris.load.LoadErrorHub; import org.apache.doris.load.LoadJob; import org.apache.doris.load.StreamLoadRecordMgr.FetchStreamLoadRecord; import org.apache.doris.load.loadv2.LoadJob.LoadJobStateUpdateInfo; @@ -485,41 +482,6 @@ public class EditLog { MetaContext.get().setMetaVersion(version); break; } - case OperationType.OP_CREATE_CLUSTER: { - final Cluster value = (Cluster) journal.getData(); - env.replayCreateCluster(value); - break; - } - case OperationType.OP_DROP_CLUSTER: { - final ClusterInfo value = (ClusterInfo) journal.getData(); - env.replayDropCluster(value); - break; - } - case OperationType.OP_EXPAND_CLUSTER: { - final ClusterInfo info = (ClusterInfo) journal.getData(); - env.replayExpandCluster(info); - break; - } - case OperationType.OP_LINK_CLUSTER: { - final BaseParam param = (BaseParam) journal.getData(); - env.replayLinkDb(param); - break; - } - case OperationType.OP_MIGRATE_CLUSTER: { - final BaseParam param = (BaseParam) journal.getData(); - env.replayMigrateDb(param); - break; - } - case OperationType.OP_UPDATE_DB: { - final DatabaseInfo param = (DatabaseInfo) journal.getData(); - env.replayUpdateDb(param); - break; - } - case OperationType.OP_DROP_LINKDB: { - final DropLinkDbAndUpdateDbInfo param = (DropLinkDbAndUpdateDbInfo) journal.getData(); - env.replayDropLinkDb(param); - break; - } case OperationType.OP_ADD_BROKER: { final BrokerMgr.ModifyBrokerInfo param = (BrokerMgr.ModifyBrokerInfo) journal.getData(); env.getBrokerMgr().replayAddBrokers(param.brokerName, param.brokerAddresses); @@ -540,11 +502,6 @@ public class EditLog { // ignore load error hub break; } - case OperationType.OP_UPDATE_CLUSTER_AND_BACKENDS: { - final BackendIdsUpdateInfo info = (BackendIdsUpdateInfo) journal.getData(); - env.replayUpdateClusterAndBackends(info); - break; - } case OperationType.OP_UPSERT_TRANSACTION_STATE: { final TransactionState state = (TransactionState) journal.getData(); Env.getCurrentGlobalTransactionMgr().replayUpsertTransactionState(state); @@ -1354,10 +1311,6 @@ public class EditLog { logEdit(OperationType.OP_RENAME_DB, databaseInfo); } - public void logUpdateDatabase(DatabaseInfo databaseInfo) { - logEdit(OperationType.OP_UPDATE_DB, databaseInfo); - } - public void logTableRename(TableInfo tableInfo) { logEdit(OperationType.OP_RENAME_TABLE, tableInfo); } @@ -1378,30 +1331,6 @@ public class EditLog { logEdit(OperationType.OP_RENAME_COLUMN, info); } - public void logCreateCluster(Cluster cluster) { - logEdit(OperationType.OP_CREATE_CLUSTER, cluster); - } - - public void logDropCluster(ClusterInfo info) { - logEdit(OperationType.OP_DROP_CLUSTER, info); - } - - public void logExpandCluster(ClusterInfo ci) { - logEdit(OperationType.OP_EXPAND_CLUSTER, ci); - } - - public void logLinkCluster(BaseParam param) { - logEdit(OperationType.OP_LINK_CLUSTER, param); - } - - public void logMigrateCluster(BaseParam param) { - logEdit(OperationType.OP_MIGRATE_CLUSTER, param); - } - - public void logDropLinkDb(DropLinkDbAndUpdateDbInfo info) { - logEdit(OperationType.OP_DROP_LINKDB, info); - } - public void logAddBroker(BrokerMgr.ModifyBrokerInfo info) { logEdit(OperationType.OP_ADD_BROKER, info); } @@ -1414,10 +1343,6 @@ public class EditLog { logEdit(OperationType.OP_DROP_ALL_BROKER, new Text(brokerName)); } - public void logSetLoadErrorHub(LoadErrorHub.Param param) { - logEdit(OperationType.OP_SET_LOAD_ERROR_HUB, param); - } - public void logExportCreate(ExportJob job) { logEdit(OperationType.OP_EXPORT_CREATE, job); } @@ -1427,10 +1352,6 @@ public class EditLog { logEdit(OperationType.OP_EXPORT_UPDATE_STATE, transfer); } - public void logUpdateClusterAndBackendState(BackendIdsUpdateInfo info) { - logEdit(OperationType.OP_UPDATE_CLUSTER_AND_BACKENDS, info); - } - // for TransactionState public void logInsertTransactionState(TransactionState transactionState) { logEdit(OperationType.OP_UPSERT_TRANSACTION_STATE, transactionState); diff --git a/fe/fe-core/src/main/java/org/apache/doris/persist/LinkDbInfo.java b/fe/fe-core/src/main/java/org/apache/doris/persist/LinkDbInfo.java deleted file mode 100644 index 2c7cb57dbd..0000000000 --- a/fe/fe-core/src/main/java/org/apache/doris/persist/LinkDbInfo.java +++ /dev/null @@ -1,81 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package org.apache.doris.persist; - -import org.apache.doris.common.io.Text; -import org.apache.doris.common.io.Writable; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; - -public class LinkDbInfo implements Writable { - private String cluster; - private String name; - private long id; - - public LinkDbInfo() { - this.cluster = ""; - this.name = ""; - } - - public LinkDbInfo(String name, long id) { - this.name = name; - this.id = id; - this.cluster = ""; - } - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - - public long getId() { - return id; - } - - public void setId(long id) { - this.id = id; - } - - @Override - public void write(DataOutput out) throws IOException { - Text.writeString(out, cluster); - Text.writeString(out, name); - out.writeLong(id); - - } - - public void readFields(DataInput in) throws IOException { - cluster = Text.readString(in); - name = Text.readString(in); - id = in.readLong(); - - } - - public String getCluster() { - return cluster; - } - - public void setCluster(String cluster) { - this.cluster = cluster; - } -} diff --git a/fe/fe-core/src/main/java/org/apache/doris/persist/meta/MetaReader.java b/fe/fe-core/src/main/java/org/apache/doris/persist/meta/MetaReader.java index 8379850de1..62e02bde42 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/persist/meta/MetaReader.java +++ b/fe/fe-core/src/main/java/org/apache/doris/persist/meta/MetaReader.java @@ -99,10 +99,10 @@ public class MetaReader { LOG.info("Skip {} module since empty meta length in the end.", metaIndex.name); continue; } - // Should skip some bytes because ignore some meta, such as load job - if (metaIndex.name.equals("loadJob") - || metaIndex.name.equals("cooldownJob")) { - LOG.info("Skip {} module", metaIndex.name); + // skip deprecated modules + if (PersistMetaModules.DEPRECATED_MODULE_NAMES.contains(metaIndex.name)) { + LOG.warn("meta modules {} is deprecated, ignore and skip it"); + // If this is the last module, nothing need to do. if (i < metaFooter.metaIndices.size() - 1) { IOUtils.skipFully(dis, metaFooter.metaIndices.get(i + 1).offset - metaIndex.offset); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/persist/meta/PersistMetaModules.java b/fe/fe-core/src/main/java/org/apache/doris/persist/meta/PersistMetaModules.java index 257fe982f0..5f1851fde4 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/persist/meta/PersistMetaModules.java +++ b/fe/fe-core/src/main/java/org/apache/doris/persist/meta/PersistMetaModules.java @@ -36,10 +36,14 @@ public class PersistMetaModules { public static final ImmutableList MODULE_NAMES = ImmutableList.of( "masterInfo", "frontends", "backends", "datasource", "db", "alterJob", "recycleBin", - "globalVariable", "cluster", "broker", "resources", "exportJob", "syncJob", "backupHandler", + "globalVariable", "broker", "resources", "exportJob", "syncJob", "backupHandler", "paloAuth", "transactionState", "colocateTableIndex", "routineLoadJobs", "loadJobV2", "smallFiles", "plugins", "deleteHandler", "sqlBlockRule", "policy", "mtmvJobManager", "globalFunction", "resourceGroups"); + // Modules in this list is deprecated and will not be saved in meta file. (also should not be in MODULE_NAMES) + public static final ImmutableList DEPRECATED_MODULE_NAMES = ImmutableList.of( + "cluster", "loadJob", "cooldownJob"); + static { MODULES_MAP = Maps.newHashMap(); MODULES_IN_ORDER = Lists.newArrayList(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/DdlExecutor.java b/fe/fe-core/src/main/java/org/apache/doris/qe/DdlExecutor.java index cfdbf467c3..fa7ea7324d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/DdlExecutor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/DdlExecutor.java @@ -28,7 +28,6 @@ import org.apache.doris.analysis.AdminSetConfigStmt; import org.apache.doris.analysis.AdminSetReplicaStatusStmt; import org.apache.doris.analysis.AlterCatalogNameStmt; import org.apache.doris.analysis.AlterCatalogPropertyStmt; -import org.apache.doris.analysis.AlterClusterStmt; import org.apache.doris.analysis.AlterColumnStatsStmt; import org.apache.doris.analysis.AlterDatabasePropertyStmt; import org.apache.doris.analysis.AlterDatabaseQuotaStmt; @@ -53,7 +52,6 @@ import org.apache.doris.analysis.CancelLoadStmt; import org.apache.doris.analysis.CleanLabelStmt; import org.apache.doris.analysis.CleanProfileStmt; import org.apache.doris.analysis.CreateCatalogStmt; -import org.apache.doris.analysis.CreateClusterStmt; import org.apache.doris.analysis.CreateDataSyncJobStmt; import org.apache.doris.analysis.CreateDbStmt; import org.apache.doris.analysis.CreateEncryptKeyStmt; @@ -76,7 +74,6 @@ import org.apache.doris.analysis.CreateViewStmt; import org.apache.doris.analysis.DdlStmt; import org.apache.doris.analysis.DeleteStmt; import org.apache.doris.analysis.DropCatalogStmt; -import org.apache.doris.analysis.DropClusterStmt; import org.apache.doris.analysis.DropDbStmt; import org.apache.doris.analysis.DropEncryptKeyStmt; import org.apache.doris.analysis.DropFileStmt; @@ -94,8 +91,6 @@ import org.apache.doris.analysis.DropUserStmt; import org.apache.doris.analysis.GrantStmt; import org.apache.doris.analysis.InstallPluginStmt; import org.apache.doris.analysis.KillAnalysisJobStmt; -import org.apache.doris.analysis.LinkDbStmt; -import org.apache.doris.analysis.MigrateDbStmt; import org.apache.doris.analysis.PauseRoutineLoadStmt; import org.apache.doris.analysis.PauseSyncJobStmt; import org.apache.doris.analysis.RecoverDbStmt; @@ -130,18 +125,7 @@ public class DdlExecutor { * Execute ddl. **/ public static void execute(Env env, DdlStmt ddlStmt) throws Exception { - if (ddlStmt instanceof CreateClusterStmt) { - CreateClusterStmt stmt = (CreateClusterStmt) ddlStmt; - env.createCluster(stmt); - } else if (ddlStmt instanceof AlterClusterStmt) { - env.processModifyCluster((AlterClusterStmt) ddlStmt); - } else if (ddlStmt instanceof DropClusterStmt) { - env.dropCluster((DropClusterStmt) ddlStmt); - } else if (ddlStmt instanceof MigrateDbStmt) { - env.migrateDb((MigrateDbStmt) ddlStmt); - } else if (ddlStmt instanceof LinkDbStmt) { - env.linkDb((LinkDbStmt) ddlStmt); - } else if (ddlStmt instanceof CreateDbStmt) { + if (ddlStmt instanceof CreateDbStmt) { env.createDb((CreateDbStmt) ddlStmt); } else if (ddlStmt instanceof DropDbStmt) { env.dropDb((DropDbStmt) ddlStmt); diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java b/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java index 690eab65c3..12596c7f6b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java @@ -34,7 +34,6 @@ import org.apache.doris.analysis.ShowBackupStmt; import org.apache.doris.analysis.ShowBrokerStmt; import org.apache.doris.analysis.ShowCatalogRecycleBinStmt; import org.apache.doris.analysis.ShowCatalogStmt; -import org.apache.doris.analysis.ShowClusterStmt; import org.apache.doris.analysis.ShowCollationStmt; import org.apache.doris.analysis.ShowColumnHistStmt; import org.apache.doris.analysis.ShowColumnStatsStmt; @@ -67,7 +66,6 @@ import org.apache.doris.analysis.ShowLoadStmt; import org.apache.doris.analysis.ShowLoadWarningsStmt; import org.apache.doris.analysis.ShowMTMVJobStmt; import org.apache.doris.analysis.ShowMTMVTaskStmt; -import org.apache.doris.analysis.ShowMigrationsStmt; import org.apache.doris.analysis.ShowPartitionIdStmt; import org.apache.doris.analysis.ShowPartitionsStmt; import org.apache.doris.analysis.ShowPluginsStmt; @@ -137,7 +135,6 @@ import org.apache.doris.catalog.TabletMeta; import org.apache.doris.catalog.View; import org.apache.doris.catalog.external.HMSExternalTable; import org.apache.doris.clone.DynamicPartitionScheduler; -import org.apache.doris.cluster.BaseParam; import org.apache.doris.cluster.ClusterNamespace; import org.apache.doris.common.AnalysisException; import org.apache.doris.common.CaseSensibility; @@ -322,10 +319,6 @@ public class ShowExecutor { handleShowBackup(); } else if (stmt instanceof ShowRestoreStmt) { handleShowRestore(); - } else if (stmt instanceof ShowClusterStmt) { - handleShowCluster(); - } else if (stmt instanceof ShowMigrationsStmt) { - handleShowMigrations(); } else if (stmt instanceof ShowBrokerStmt) { handleShowBroker(); } else if (stmt instanceof ShowResourcesStmt) { @@ -677,40 +670,7 @@ public class ShowExecutor { resultSet = new ShowResultSet(metaData, finalRows); } - // Show clusters - private void handleShowCluster() throws AnalysisException { - final ShowClusterStmt showStmt = (ShowClusterStmt) stmt; - final List> rows = Lists.newArrayList(); - final List clusterNames = ctx.getEnv().getClusterNames(); - - final Set clusterNameSet = Sets.newTreeSet(); - for (String cluster : clusterNames) { - clusterNameSet.add(cluster); - } - - for (String clusterName : clusterNameSet) { - rows.add(Lists.newArrayList(clusterName)); - } - - resultSet = new ShowResultSet(showStmt.getMetaData(), rows); - } - - // Show migrations - private void handleShowMigrations() throws AnalysisException { - final ShowMigrationsStmt showStmt = (ShowMigrationsStmt) stmt; - final List> rows = Lists.newArrayList(); - final Set infos = ctx.getEnv().getMigrations(); - - for (BaseParam param : infos) { - final int percent = (int) (param.getFloatParam(0) * 100f); - rows.add(Lists.newArrayList(param.getStringParam(0), param.getStringParam(1), param.getStringParam(2), - String.valueOf(percent + "%"))); - } - - resultSet = new ShowResultSet(showStmt.getMetaData(), rows); - } - - private void handleShowDbId() throws AnalysisException { + private void handleShowDbId() { ShowDbIdStmt showStmt = (ShowDbIdStmt) stmt; long dbId = showStmt.getDbId(); List> rows = Lists.newArrayList(); @@ -1834,7 +1794,7 @@ public class ShowExecutor { private void handleShowBackends() { final ShowBackendsStmt showStmt = (ShowBackendsStmt) stmt; - List> backendInfos = BackendsProcDir.getClusterBackendInfos(showStmt.getClusterName()); + List> backendInfos = BackendsProcDir.getBackendInfos(); backendInfos.sort(new Comparator>() { @Override diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/StmtExecutor.java b/fe/fe-core/src/main/java/org/apache/doris/qe/StmtExecutor.java index d77d21e8b1..6f7b9326ed 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/StmtExecutor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/StmtExecutor.java @@ -25,7 +25,6 @@ import org.apache.doris.analysis.DdlStmt; import org.apache.doris.analysis.DecimalLiteral; import org.apache.doris.analysis.DeleteStmt; import org.apache.doris.analysis.DropTableStmt; -import org.apache.doris.analysis.EnterStmt; import org.apache.doris.analysis.ExecuteStmt; import org.apache.doris.analysis.ExplainOptions; import org.apache.doris.analysis.ExportStmt; @@ -631,8 +630,6 @@ public class StmtExecutor { handleQueryWithRetry(queryId); } else if (parsedStmt instanceof SetStmt) { handleSetStmt(); - } else if (parsedStmt instanceof EnterStmt) { - handleEnterStmt(); } else if (parsedStmt instanceof SwitchStmt) { handleSwitchStmt(); } else if (parsedStmt instanceof UseStmt) { @@ -2033,20 +2030,6 @@ public class StmtExecutor { } } - // Process enter cluster - private void handleEnterStmt() { - final EnterStmt enterStmt = (EnterStmt) parsedStmt; - try { - context.getEnv().changeCluster(context, enterStmt.getClusterName()); - context.setDatabase(""); - } catch (DdlException e) { - LOG.warn("", e); - context.getState().setError(e.getMysqlErrorCode(), e.getMessage()); - return; - } - context.getState().setOk(); - } - private void handleExportStmt() throws Exception { ExportStmt exportStmt = (ExportStmt) parsedStmt; context.getEnv().getExportMgr().addExportJob(exportStmt); diff --git a/fe/fe-core/src/main/java/org/apache/doris/system/SystemInfoService.java b/fe/fe-core/src/main/java/org/apache/doris/system/SystemInfoService.java index e710665640..d2e8044964 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/system/SystemInfoService.java +++ b/fe/fe-core/src/main/java/org/apache/doris/system/SystemInfoService.java @@ -23,7 +23,6 @@ import org.apache.doris.catalog.Database; import org.apache.doris.catalog.DiskInfo; import org.apache.doris.catalog.Env; import org.apache.doris.catalog.ReplicaAllocation; -import org.apache.doris.cluster.Cluster; import org.apache.doris.common.AnalysisException; import org.apache.doris.common.Config; import org.apache.doris.common.DdlException; @@ -181,17 +180,14 @@ public class SystemInfoService { // for deploy manager public void addBackends(List hostInfos, boolean isFree) throws UserException { - addBackends(hostInfos, isFree, "", Tag.DEFAULT_BACKEND_TAG.toMap()); + addBackends(hostInfos, Tag.DEFAULT_BACKEND_TAG.toMap()); } /** * @param hostInfos : backend's ip, hostName and port - * @param isFree : if true the backend is not owned by any cluster - * @param destCluster : if not null or empty backend will be added to destCluster * @throws DdlException */ - public void addBackends(List hostInfos, boolean isFree, String destCluster, - Map tagMap) throws UserException { + public void addBackends(List hostInfos, Map tagMap) throws UserException { for (HostInfo hostInfo : hostInfos) { if (Config.enable_fqdn_mode && StringUtils.isEmpty(hostInfo.getHostName())) { throw new DdlException("backend's hostName should not be empty while enable_fqdn_mode is true"); @@ -205,7 +201,7 @@ public class SystemInfoService { } for (HostInfo hostInfo : hostInfos) { - addBackend(hostInfo.getIp(), hostInfo.getHostName(), hostInfo.getPort(), isFree, destCluster, tagMap); + addBackend(hostInfo.getIp(), hostInfo.getHostName(), hostInfo.getPort(), tagMap); } } @@ -218,16 +214,12 @@ public class SystemInfoService { } private void setBackendOwner(Backend backend, String clusterName) { - final Cluster cluster = Env.getCurrentEnv().getCluster(clusterName); - Preconditions.checkState(cluster != null); - cluster.addBackend(backend.getId()); backend.setOwnerClusterName(clusterName); backend.setBackendState(BackendState.using); } // Final entry of adding backend - private void addBackend(String ip, String hostName, int heartbeatPort, boolean isFree, String destCluster, - Map tagMap) { + private void addBackend(String ip, String hostName, int heartbeatPort, Map tagMap) { Backend newBackend = new Backend(Env.getCurrentEnv().getNextId(), ip, hostName, heartbeatPort); // update idToBackend Map copiedBackends = Maps.newHashMap(idToBackendRef); @@ -241,15 +233,8 @@ public class SystemInfoService { ImmutableMap newIdToReportVersion = ImmutableMap.copyOf(copiedReportVersions); idToReportVersionRef = newIdToReportVersion; - if (!Strings.isNullOrEmpty(destCluster)) { - // add backend to destCluster - setBackendOwner(newBackend, destCluster); - } else if (!isFree) { - // add backend to DEFAULT_CLUSTER - setBackendOwner(newBackend, DEFAULT_CLUSTER); - } else { - // backend is free - } + // add backend to DEFAULT_CLUSTER + setBackendOwner(newBackend, DEFAULT_CLUSTER); // set tags newBackend.setTagMap(tagMap); @@ -305,13 +290,6 @@ public class SystemInfoService { ImmutableMap newIdToReportVersion = ImmutableMap.copyOf(copiedReportVersions); idToReportVersionRef = newIdToReportVersion; - // update cluster - final Cluster cluster = Env.getCurrentEnv().getCluster(droppedBackend.getOwnerClusterName()); - if (null != cluster) { - cluster.removeBackend(droppedBackend.getId()); - } else { - LOG.error("Cluster " + droppedBackend.getOwnerClusterName() + " no exist."); - } // log Env.getCurrentEnv().getEditLog().logDropBackend(droppedBackend); LOG.info("finished to drop {}", droppedBackend); @@ -1215,18 +1193,6 @@ public class SystemInfoService { copiedReportVersions.put(newBackend.getId(), new AtomicLong(0L)); ImmutableMap newIdToReportVersion = ImmutableMap.copyOf(copiedReportVersions); idToReportVersionRef = newIdToReportVersion; - - // to add be to DEFAULT_CLUSTER - if (newBackend.getBackendState() == BackendState.using) { - final Cluster cluster = Env.getCurrentEnv().getCluster(DEFAULT_CLUSTER); - if (null != cluster) { - // replay log - cluster.addBackend(newBackend.getId()); - } else { - // This happens in loading image when fe is restarted, because loadCluster is after loadBackend, - // cluster is not created. Be in cluster will be updated in loadCluster. - } - } } public void replayDropBackend(Backend backend) { @@ -1242,14 +1208,6 @@ public class SystemInfoService { copiedReportVersions.remove(backend.getId()); ImmutableMap newIdToReportVersion = ImmutableMap.copyOf(copiedReportVersions); idToReportVersionRef = newIdToReportVersion; - - // update cluster - final Cluster cluster = Env.getCurrentEnv().getCluster(backend.getOwnerClusterName()); - if (null != cluster) { - cluster.removeBackend(backend.getId()); - } else { - LOG.error("Cluster " + backend.getOwnerClusterName() + " no exist."); - } } public void updateBackendState(Backend be) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/tablefunction/MetadataGenerator.java b/fe/fe-core/src/main/java/org/apache/doris/tablefunction/MetadataGenerator.java index f4dad26a99..0f77a98740 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/tablefunction/MetadataGenerator.java +++ b/fe/fe-core/src/main/java/org/apache/doris/tablefunction/MetadataGenerator.java @@ -19,7 +19,6 @@ package org.apache.doris.tablefunction; import org.apache.doris.alter.DecommissionType; import org.apache.doris.catalog.Env; -import org.apache.doris.cluster.Cluster; import org.apache.doris.common.MetaNotFoundException; import org.apache.doris.common.util.TimeUtils; import org.apache.doris.datasource.HMSExternalCatalog; @@ -146,17 +145,7 @@ public class MetadataGenerator { } TBackendsMetadataParams backendsParam = params.getBackendsMetadataParams(); final SystemInfoService clusterInfoService = Env.getCurrentSystemInfo(); - List backendIds = null; - if (!Strings.isNullOrEmpty(backendsParam.cluster_name)) { - final Cluster cluster = Env.getCurrentEnv().getCluster(backendsParam.cluster_name); - // root not in any cluster - if (null == cluster) { - return errorResult("Cluster is not existed."); - } - backendIds = cluster.getBackendIdList(); - } else { - backendIds = clusterInfoService.getBackendIds(false); - } + List backendIds = clusterInfoService.getBackendIds(false); TFetchSchemaTableDataResult result = new TFetchSchemaTableDataResult(); long start = System.currentTimeMillis(); diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/AccessTestUtil.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/AccessTestUtil.java index 7cb83baa11..27d0bf30a2 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/AccessTestUtil.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/AccessTestUtil.java @@ -148,10 +148,6 @@ public class AccessTestUtil { catalog.getDbNames(); minTimes = 0; result = Lists.newArrayList("testCluster:testDb"); - - catalog.getClusterDbNames("testCluster"); - minTimes = 0; - result = Lists.newArrayList("testCluster:testDb"); } }; @@ -194,10 +190,6 @@ public class AccessTestUtil { minTimes = 0; result = new Load(); - catalog.getClusterDbNames("testCluster"); - minTimes = 0; - result = Lists.newArrayList("testCluster:testDb"); - env.changeDb((ConnectContext) any, "blockDb"); minTimes = 0; result = new DdlException("failed"); @@ -378,10 +370,6 @@ public class AccessTestUtil { minTimes = 0; result = Lists.newArrayList("testCluster:testDb"); - catalog.getClusterDbNames("testCluster"); - minTimes = 0; - result = Lists.newArrayList("testCluster:testDb"); - catalog.getDbNullable("emptyCluster"); minTimes = 0; result = null; diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/AdminCancelRebalanceDiskStmtTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/AdminCancelRebalanceDiskStmtTest.java index 322f6b25fd..453ea5dc17 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/AdminCancelRebalanceDiskStmtTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/AdminCancelRebalanceDiskStmtTest.java @@ -20,7 +20,6 @@ package org.apache.doris.analysis; import org.apache.doris.catalog.Env; import org.apache.doris.clone.RebalancerTestUtil; import org.apache.doris.common.AnalysisException; -import org.apache.doris.common.Config; import org.apache.doris.mysql.privilege.AccessControllerManager; import org.apache.doris.mysql.privilege.MockedAuth; import org.apache.doris.qe.ConnectContext; @@ -44,7 +43,6 @@ public class AdminCancelRebalanceDiskStmtTest { @Before() public void setUp() { - Config.disable_cluster_feature = false; analyzer = AccessTestUtil.fetchAdminAnalyzer(true); MockedAuth.mockedAccess(accessManager); MockedAuth.mockedConnectContext(ctx, "root", "192.168.1.1"); diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/AdminRebalanceDiskStmtTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/AdminRebalanceDiskStmtTest.java index 79745ecc02..268126b1ca 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/AdminRebalanceDiskStmtTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/AdminRebalanceDiskStmtTest.java @@ -20,7 +20,6 @@ package org.apache.doris.analysis; import org.apache.doris.catalog.Env; import org.apache.doris.clone.RebalancerTestUtil; import org.apache.doris.common.AnalysisException; -import org.apache.doris.common.Config; import org.apache.doris.mysql.privilege.AccessControllerManager; import org.apache.doris.mysql.privilege.MockedAuth; import org.apache.doris.qe.ConnectContext; @@ -44,7 +43,6 @@ public class AdminRebalanceDiskStmtTest { @Before public void setUp() { - Config.disable_cluster_feature = false; analyzer = AccessTestUtil.fetchAdminAnalyzer(true); MockedAuth.mockedAccess(accessManager); MockedAuth.mockedConnectContext(ctx, "root", "192.168.1.1"); diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/AlterClusterStmtTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/AlterClusterStmtTest.java deleted file mode 100644 index 79c4f515db..0000000000 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/AlterClusterStmtTest.java +++ /dev/null @@ -1,79 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package org.apache.doris.analysis; - -import org.apache.doris.common.AnalysisException; -import org.apache.doris.common.Config; -import org.apache.doris.common.UserException; -import org.apache.doris.mysql.privilege.AccessControllerManager; -import org.apache.doris.mysql.privilege.MockedAuth; -import org.apache.doris.qe.ConnectContext; - -import mockit.Mocked; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.util.HashMap; -import java.util.Map; - -public class AlterClusterStmtTest { - - private static Analyzer analyzer; - - @Mocked - private AccessControllerManager accessManager; - @Mocked - private ConnectContext ctx; - - @Before() - public void setUp() { - Config.disable_cluster_feature = false; - analyzer = AccessTestUtil.fetchAdminAnalyzer(true); - MockedAuth.mockedAccess(accessManager); - MockedAuth.mockedConnectContext(ctx, "root", "192.168.1.1"); - } - - @Test - public void testAnalyzeNormal() throws UserException, AnalysisException { - final Map properties = new HashMap(); - properties.put("instance_num", "2"); - final AlterClusterStmt stmt = new AlterClusterStmt("testCluster", properties); - stmt.analyze(analyzer); - Assert.assertEquals("testCluster", stmt.getAlterClusterName()); - String sql = "ALTER CLUSTER " + "testCluster" + " PROPERTIES(\"instance_num\"=" + "\"" + "2" + "\")"; - Assert.assertEquals(sql, stmt.toSql()); - } - - @Test(expected = AnalysisException.class) - public void testNoPropertiesFail() throws UserException, AnalysisException { - final AlterClusterStmt stmt = new AlterClusterStmt("testCluster", null); - stmt.analyze(analyzer); - Assert.fail("no exception"); - } - - @Test(expected = AnalysisException.class) - public void testParamNumberFormatError() throws UserException, AnalysisException { - final Map properties = new HashMap(); - properties.put("instance_num", "0xfffffff"); - final AlterClusterStmt stmt = new AlterClusterStmt("testCluster", properties); - stmt.analyze(analyzer); - Assert.fail("no exception"); - } - -} diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/BackendStmtTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/BackendStmtTest.java index 990c463423..f57a4271f4 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/BackendStmtTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/BackendStmtTest.java @@ -78,7 +78,7 @@ public class BackendStmtTest { public void initBackendsTest4() throws Exception { BackendClause stmt = createStmt(4); stmt.analyze(analyzer); - Assert.assertEquals("ADD FREE BACKEND \"192.168.1.1:12345\"", stmt.toSql()); + Assert.assertEquals("ADD BACKEND \"192.168.1.1:12345\"", stmt.toSql()); } @Test diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/CreateClusterStmtTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/CreateClusterStmtTest.java deleted file mode 100644 index b3a2d8d583..0000000000 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/CreateClusterStmtTest.java +++ /dev/null @@ -1,74 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package org.apache.doris.analysis; - -import org.apache.doris.common.AnalysisException; -import org.apache.doris.common.Config; -import org.apache.doris.common.UserException; -import org.apache.doris.mysql.privilege.AccessControllerManager; -import org.apache.doris.mysql.privilege.MockedAuth; -import org.apache.doris.qe.ConnectContext; - -import mockit.Mocked; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.util.HashMap; -import java.util.Map; - -public class CreateClusterStmtTest { - - private static Analyzer analyzer; - - @Mocked - private AccessControllerManager accessManager; - @Mocked - private ConnectContext ctx; - - static { - // Startup.initializeIfPossible(); - } - - @Before() - public void setUp() { - Config.disable_cluster_feature = false; - analyzer = AccessTestUtil.fetchAdminAnalyzer(true); - MockedAuth.mockedAccess(accessManager); - MockedAuth.mockedConnectContext(ctx, "root", "192.168.1.1"); - } - - @Test - public void testAnalyzeNormal() throws UserException, AnalysisException { - final Map properties = new HashMap(); - properties.put("instance_num", "2"); - final CreateClusterStmt stmt = new CreateClusterStmt("testCluster", properties, "password"); - stmt.analyze(analyzer); - Assert.assertEquals("testCluster", stmt.getClusterName()); - String sql = "CREATE CLUSTER " + "testCluster" + " PROPERTIES(\"instance_num\"=" + "\"" + "2" + "\")" - + "IDENTIFIED BY '" + "password" + "'"; - Assert.assertEquals(sql, stmt.toSql()); - } - - @Test(expected = AnalysisException.class) - public void testAnalyzeWithException() throws UserException, AnalysisException { - final CreateClusterStmt stmt = new CreateClusterStmt("testCluster", null, "password"); - stmt.analyze(analyzer); - Assert.fail("no exception"); - } -} diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/DropClusterStmtTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/DropClusterStmtTest.java deleted file mode 100644 index af67064dbe..0000000000 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/DropClusterStmtTest.java +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package org.apache.doris.analysis; - -import org.apache.doris.common.AnalysisException; -import org.apache.doris.common.Config; -import org.apache.doris.common.UserException; -import org.apache.doris.mysql.privilege.AccessControllerManager; -import org.apache.doris.mysql.privilege.PrivPredicate; -import org.apache.doris.qe.ConnectContext; - -import mockit.Expectations; -import mockit.Mocked; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -public class DropClusterStmtTest { - - private static Analyzer analyzer; - - @Mocked - private AccessControllerManager accessManager; - - @Before - public void setUp() { - Config.disable_cluster_feature = false; - analyzer = AccessTestUtil.fetchAdminAnalyzer(true); - - new Expectations() { - { - accessManager.checkGlobalPriv((ConnectContext) any, (PrivPredicate) any); - minTimes = 0; - result = true; - - accessManager.checkDbPriv((ConnectContext) any, anyString, (PrivPredicate) any); - minTimes = 0; - result = true; - - accessManager.checkTblPriv((ConnectContext) any, anyString, anyString, (PrivPredicate) any); - minTimes = 0; - result = true; - } - }; - } - - @Test - public void testNormal() throws UserException, AnalysisException { - final DropClusterStmt stmt = new DropClusterStmt(true, "testCluster"); - - stmt.analyze(analyzer); - Assert.assertEquals("testCluster", stmt.getName()); - Assert.assertEquals("DROP CLUSTER testCluster", stmt.toSql()); - } - - @Test(expected = AnalysisException.class) - public void testFailed() throws UserException, AnalysisException { - DropClusterStmt stmt = new DropClusterStmt(false, ""); - - stmt.analyze(analyzer); - Assert.fail("no exception"); - } -} diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/LinkDbStmtTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/LinkDbStmtTest.java deleted file mode 100644 index fca8174d16..0000000000 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/LinkDbStmtTest.java +++ /dev/null @@ -1,68 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package org.apache.doris.analysis; - -import org.apache.doris.common.AnalysisException; -import org.apache.doris.common.Config; -import org.apache.doris.common.UserException; -import org.apache.doris.mysql.privilege.AccessControllerManager; -import org.apache.doris.mysql.privilege.MockedAuth; -import org.apache.doris.qe.ConnectContext; - -import mockit.Mocked; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -public class LinkDbStmtTest { - - private static Analyzer analyzer; - - @Mocked - private AccessControllerManager accessManager; - @Mocked - private ConnectContext ctx; - - @Before - public void setUp() { - Config.disable_cluster_feature = false; - analyzer = AccessTestUtil.fetchAdminAnalyzer(true); - MockedAuth.mockedAccess(accessManager); - MockedAuth.mockedConnectContext(ctx, "root", "192.168.1.1"); - } - - @Test - public void testNormal() throws UserException, AnalysisException { - final ClusterName cn1 = new ClusterName("testCluster1", "testDb1"); - final ClusterName cn2 = new ClusterName("testCluster2", "testDb2"); - final LinkDbStmt stmt = new LinkDbStmt(cn1, cn2); - stmt.analyze(analyzer); - - Assert.assertEquals("LINK DATABASE " + stmt.getSrcCluster() + "." + stmt.getSrcDb() - + " " + stmt.getDestCluster() + "." + stmt.getDestDb(), stmt.toString()); - } - - @Test(expected = AnalysisException.class) - public void testParamError() throws UserException, AnalysisException { - final ClusterName cn1 = new ClusterName("testCluster1", ""); - final ClusterName cn2 = new ClusterName("testCluster2", "testDb2"); - final LinkDbStmt stmt = new LinkDbStmt(cn1, cn2); - stmt.analyze(AccessTestUtil.fetchBlockAnalyzer()); - Assert.fail("no exception"); - } -} diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/MigrateDbStmtTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/MigrateDbStmtTest.java deleted file mode 100644 index bce3bb7d25..0000000000 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/MigrateDbStmtTest.java +++ /dev/null @@ -1,68 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package org.apache.doris.analysis; - -import org.apache.doris.common.AnalysisException; -import org.apache.doris.common.Config; -import org.apache.doris.common.UserException; -import org.apache.doris.mysql.privilege.AccessControllerManager; -import org.apache.doris.mysql.privilege.MockedAuth; -import org.apache.doris.qe.ConnectContext; - -import mockit.Mocked; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -public class MigrateDbStmtTest { - - private static Analyzer analyzer; - - @Mocked - private AccessControllerManager accessManager; - @Mocked - private ConnectContext ctx; - - @Before - public void setUp() { - Config.disable_cluster_feature = false; - analyzer = AccessTestUtil.fetchAdminAnalyzer(true); - MockedAuth.mockedAccess(accessManager); - MockedAuth.mockedConnectContext(ctx, "root", "192.168.1.1"); - } - - @Test - public void testNormal() throws UserException, AnalysisException { - final ClusterName cn1 = new ClusterName("testCluster1", "testDb1"); - final ClusterName cn2 = new ClusterName("testCluster2", "testDb2"); - final MigrateDbStmt stmt = new MigrateDbStmt(cn1, cn2); - stmt.analyze(analyzer); - final String sql = "MIGRATE DATABASE " + stmt.getSrcCluster() + "." + stmt.getSrcDb() + " " - + stmt.getDestCluster() + "." + stmt.getDestDb(); - Assert.assertEquals(sql, stmt.toSql()); - } - - @Test(expected = AnalysisException.class) - public void testParamError() throws UserException, AnalysisException { - final ClusterName cn1 = new ClusterName("testCluster1", ""); - final ClusterName cn2 = new ClusterName("testCluster2", "testDb2"); - final MigrateDbStmt stmt = new MigrateDbStmt(cn1, cn2); - stmt.analyze(AccessTestUtil.fetchBlockAnalyzer()); - Assert.fail("no exception"); - } -} diff --git a/fe/fe-core/src/test/java/org/apache/doris/catalog/CatalogTestUtil.java b/fe/fe-core/src/test/java/org/apache/doris/catalog/CatalogTestUtil.java index 5e022a56e2..66c63b346a 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/catalog/CatalogTestUtil.java +++ b/fe/fe-core/src/test/java/org/apache/doris/catalog/CatalogTestUtil.java @@ -118,7 +118,6 @@ public class CatalogTestUtil { Env.getCurrentSystemInfo().addBackend(backend1); Env.getCurrentSystemInfo().addBackend(backend2); Env.getCurrentSystemInfo().addBackend(backend3); - env.initDefaultCluster(); Database db = createSimpleDb(testDbId1, testTableId1, testPartitionId1, testIndexId1, testTabletId1, testStartVersion); diff --git a/fe/fe-core/src/test/java/org/apache/doris/cluster/SystemInfoServiceTest.java b/fe/fe-core/src/test/java/org/apache/doris/cluster/SystemInfoServiceTest.java index 153dd9687a..9d48b4ac61 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/cluster/SystemInfoServiceTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/cluster/SystemInfoServiceTest.java @@ -109,10 +109,6 @@ public class SystemInfoServiceTest { minTimes = 0; result = table; - env.getCluster(anyString); - minTimes = 0; - result = new Cluster("cluster", 1); - env.clear(); minTimes = 0; diff --git a/fe/fe-core/src/test/java/org/apache/doris/http/DorisHttpTestCase.java b/fe/fe-core/src/test/java/org/apache/doris/http/DorisHttpTestCase.java index 5385b6ed7f..6900788f4b 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/http/DorisHttpTestCase.java +++ b/fe/fe-core/src/test/java/org/apache/doris/http/DorisHttpTestCase.java @@ -245,10 +245,6 @@ public abstract class DorisHttpTestCase { internalCatalog.getDbNames(); minTimes = 0; result = Lists.newArrayList("default_cluster:testDb"); - - internalCatalog.getClusterDbNames("default_cluster"); - minTimes = 0; - result = Lists.newArrayList("default_cluster:testDb"); } }; @@ -301,9 +297,6 @@ public abstract class DorisHttpTestCase { env.changeDb((ConnectContext) any, anyString); minTimes = 0; - env.initDefaultCluster(); - minTimes = 0; - env.getCatalogMgr(); minTimes = 0; result = dsMgr; diff --git a/fe/fe-core/src/test/java/org/apache/doris/qe/ShowExecutorTest.java b/fe/fe-core/src/test/java/org/apache/doris/qe/ShowExecutorTest.java index da1af7702e..f19722d373 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/qe/ShowExecutorTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/qe/ShowExecutorTest.java @@ -181,14 +181,6 @@ public class ShowExecutorTest { catalog.getDbNullable("testCluster:emptyDb"); minTimes = 0; result = null; - - catalog.getClusterDbNames("testCluster"); - minTimes = 0; - result = Lists.newArrayList("testCluster:testDb"); - - catalog.getClusterDbNames(""); - minTimes = 0; - result = Lists.newArrayList(""); } };